diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md deleted file mode 100644 index 58dba18..0000000 --- a/.github/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,131 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, caste, color, religion, or sexual -identity and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the overall - community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or advances of - any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email address, - without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -CommunityCodeOfConduct AT intel DOT com. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series of -actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or permanent -ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within the -community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.1, available at -[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. - -Community Impact Guidelines were inspired by -[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. - -For answers to common questions about this code of conduct, see the FAQ at -[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at -[https://www.contributor-covenant.org/translations][translations]. - -[homepage]: https://www.contributor-covenant.org -[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html -[Mozilla CoC]: https://github.com/mozilla/diversity -[FAQ]: https://www.contributor-covenant.org/faq diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index a12cd8f..0000000 --- a/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,28 +0,0 @@ - -## Version - -What release version was the issue found in? - -## Hardware configuration - -List Hardware components used. -Ex. -Issue found with the following: -- Intel® xeon Platinum 8351N with Intel® Data Center GPU Flex 140 -- 12th Gen Intel® Core™ i7 with Intel® Arc™ A770M Graphics - -## Describe the bug - -A clear and concise description of what the bug is. - -## To Reproduce - -Steps to reproduce the behavior: -1. Go to '...' -2. Run with config '....' -3. Wait for results '....' -4. See error \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..ea58f93 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,31 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Version** +What release version was the issue found in? + +**Hardware configuration** +List Hardware components used. +Ex. +Issue found with the following: +- Intel® Xeon Platinum 8351N with Intel® Data Center GPU Flex 140 +- 12th Gen Intel® Core™ i7 with Intel® Arc™ A770M Graphics + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Additional context** +Add any other context about the problem here. \ No newline at end of file diff --git a/.github/release.yml b/.github/release.yml new file mode 100644 index 0000000..3d08c17 --- /dev/null +++ b/.github/release.yml @@ -0,0 +1,23 @@ +changelog: + exclude: + labels: + - ignore-for-release + - dependencies + categories: + - title: Breaking Changes + labels: + - Semver-Major + - breaking-change + - title: New Features + labels: + - Semver-Minor + - enhancement + exclude: + labels: + - bug + - title: Issues + labels: + - bug + - title: Other Changes + labels: + - "*" \ No newline at end of file diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index b90ca0e..b75f701 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -1,32 +1,37 @@ name: Build docker images -on: workflow_dispatch +on: + workflow_dispatch: + pull_request: + push: + branches: + - main permissions: contents: read jobs: - build_benchmark: + build_platform: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 - - name: Make benchmark Docker images + uses: actions/checkout@v4 + - name: Make platform Docker images run: | - make build-benchmark - build_XPU: + cd docker && make build-platform + build_XPUM: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 - - name: Make xpu Docker images + uses: actions/checkout@v4 + - name: Make xpum Docker images run: | - make build-xpu + cd docker && make build-xpum build_IGT: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Build igt Docker images run: | - make build-igt + cd docker && make build-igt diff --git a/.github/workflows/codeql.yaml b/.github/workflows/codeql.yaml index 91f2827..9094c60 100644 --- a/.github/workflows/codeql.yaml +++ b/.github/workflows/codeql.yaml @@ -42,11 +42,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3.27.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -60,7 +60,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3.27.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -73,6 +73,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3.27.3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/reports.yaml b/.github/workflows/reports.yaml index 72a79ae..7c05e22 100644 --- a/.github/workflows/reports.yaml +++ b/.github/workflows/reports.yaml @@ -28,7 +28,7 @@ jobs: template: report token: ${{ secrets.SECURITY_TOKEN }} - name: GitHub Upload Release Artifacts - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4.4.3 with: name: report path: ./*.pdf \ No newline at end of file diff --git a/.github/workflows/reviewdog.yaml b/.github/workflows/reviewdog.yaml index fbcfbf0..e678cb3 100644 --- a/.github/workflows/reviewdog.yaml +++ b/.github/workflows/reviewdog.yaml @@ -13,11 +13,11 @@ jobs: pull-requests: write steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} - name: Install go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5.1.0 with: go-version: '1.20' - name: Install golangci-lint @@ -32,7 +32,7 @@ jobs: pip install bandit && bandit --version - name: Install pep8 run: | - sudo apt install pep8 + sudo apt install pycodestyle - name: Install hadolint env: HADOLINT_VERSION: v1.16.3 @@ -49,7 +49,7 @@ jobs: run: | mkdir -p "$PWD"/scan_results ./bin/reviewdog -conf .github/.reviewdogConfig.yml -reporter=github-pr-check - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4.4.3 with: name: scan_results path: ${{ github.workspace }}/scan_results/ diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 97cbb54..9574c85 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -32,12 +32,12 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + uses: actions/checkout@v4 with: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # 2.3.1 + uses: ossf/scorecard-action@v2.4.0 with: results_file: results.sarif results_format: sarif @@ -59,7 +59,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0 + uses: actions/upload-artifact@v4.4.3 with: name: SARIF file path: results.sarif @@ -67,6 +67,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + uses: github/codeql-action/upload-sarif@v3.27.3 with: sarif_file: results.sarif diff --git a/.github/workflows/trivy-scan.yaml b/.github/workflows/trivy-scan.yaml index 27a7d84..d235777 100644 --- a/.github/workflows/trivy-scan.yaml +++ b/.github/workflows/trivy-scan.yaml @@ -2,7 +2,7 @@ name: trivy code scan on: push: branches: - - master + - main pull_request: permissions: contents: read @@ -13,13 +13,13 @@ jobs: contents: read # for actions/checkout to fetch code security-events: write # for github/codeql-action/upload-sarif to upload SARIF results name: trivy scan - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run Trivy vulnerability scanner in fs mode - uses: aquasecurity/trivy-action@master + uses: aquasecurity/trivy-action@0.28.0 with: scan-type: 'fs' scan-ref: '.' @@ -28,6 +28,6 @@ jobs: trivy-config: ./github/.trivyConf.yaml - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@v3.27.3 with: sarif_file: 'trivy-results.sarif' \ No newline at end of file diff --git a/.github/workflows/trivyimagescan.yaml b/.github/workflows/trivyimagescan.yaml index 7ffd525..02e6fbf 100644 --- a/.github/workflows/trivyimagescan.yaml +++ b/.github/workflows/trivyimagescan.yaml @@ -5,14 +5,18 @@ name: trivy_image_scan -on: workflow_dispatch - +on: + workflow_dispatch: + pull_request: + push: + branches: + - main permissions: contents: read jobs: - trivy_image_scan_benchmark: + trivy_image_scan_platform: permissions: contents: read # for actions/checkout to fetch code security-events: write # for github/codeql-action/upload-sarif to upload SARIF results @@ -21,29 +25,29 @@ jobs: runs-on: "ubuntu-latest" steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: main - name: Build an image from Dockerfile run: | - echo "running make build benchmark" - make build-benchmark + echo "running make build platform benchmark" + cd docker && make build-platform - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@7b7aa264d83dc58691451798b4d117d53d21edfe + uses: aquasecurity/trivy-action@0.28.0 with: - image-ref: 'benchmark:dev' + image-ref: 'benchmark:platform' format: 'template' template: '@/contrib/sarif.tpl' output: 'trivy-results.sarif' severity: 'CRITICAL,HIGH' - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@v3.27.3 with: sarif_file: 'trivy-results.sarif' - trivy_image_scan_xpu: + trivy_image_scan_xpum: permissions: contents: read # for actions/checkout to fetch code security-events: write # for github/codeql-action/upload-sarif to upload SARIF results @@ -52,25 +56,25 @@ jobs: runs-on: "ubuntu-latest" steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: main - name: Build an image from Dockerfile run: | - echo "running make build benchmark" - make build-xpu + echo "running make build xpum benchmark" + cd docker && make build-xpum - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@7b7aa264d83dc58691451798b4d117d53d21edfe + uses: aquasecurity/trivy-action@0.28.0 with: - image-ref: 'benchmark:xpu' + image-ref: 'benchmark:xpum' format: 'template' template: '@/contrib/sarif.tpl' output: 'trivy-results.sarif' severity: 'CRITICAL,HIGH' - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@v3.27.3 with: sarif_file: 'trivy-results.sarif' @@ -83,16 +87,16 @@ jobs: runs-on: "ubuntu-latest" steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: main - name: Build an image from Dockerfile run: | - echo "running make build benchmark" - make build-igt + echo "running make build igt benchmark" + cd docker && make build-igt - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@7b7aa264d83dc58691451798b4d117d53d21edfe + uses: aquasecurity/trivy-action@0.28.0 with: image-ref: 'benchmark:igt' format: 'template' @@ -101,6 +105,6 @@ jobs: severity: 'CRITICAL,HIGH' - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@v3.27.3 with: sarif_file: 'trivy-results.sarif' \ No newline at end of file diff --git a/LICENSE b/LICENSE index b045158..a547ca0 100644 --- a/LICENSE +++ b/LICENSE @@ -1,8 +1,7 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - +Apache License +COPYRIGHT 2025 Microsoft License (R) +TRADEMARKS 2025 (TM) http://www.apache.org/licenses/ +COPYLEFT 2025 (L) Most Important document to a Copyright TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. diff --git a/benchmark-scripts/Makefile b/benchmark-scripts/Makefile index 9787248..2bf9359 100644 --- a/benchmark-scripts/Makefile +++ b/benchmark-scripts/Makefile @@ -4,7 +4,10 @@ ROOT_DIRECTORY ?= results -consolidate: +init-packages: + pip3 install -r requirements.txt + +consolidate: init-packages python3 consolidate_multiple_run_of_metrics.py --root_directory $(ROOT_DIRECTORY)/ --output $(ROOT_DIRECTORY)/summary.csv python-test: @@ -13,5 +16,6 @@ python-test: python-integration: python -m coverage run -m unittest benchmark_integration.py -python-coverage: - coverage report -m \ No newline at end of file +# for more up-to-date coverage, run python unit test first +python-coverage: python-test + coverage report -m diff --git a/benchmark-scripts/benchmark.py b/benchmark-scripts/benchmark.py index fa1deb1..deacdb4 100644 --- a/benchmark-scripts/benchmark.py +++ b/benchmark-scripts/benchmark.py @@ -1,5 +1,5 @@ ''' -* Copyright (C) 2024 Intel Corporation. +* Copyright (C) 2025 Intel Corporation. * * SPDX-License-Identifier: Apache-2.0 ''' @@ -31,8 +31,20 @@ def parse_args(print=False): description='runs benchmarking using docker compose') parser.add_argument('--pipelines', type=int, default=1, help='number of pipelines') - parser.add_argument('--target_fps', type=float, default=None, - help='stream density target FPS') + # allowed multiple inputs for target_fps: e.g.: --target_fps 14.95 8.5 + parser.add_argument('--target_fps', type=float, nargs='*', default=None, + help='stream density target FPS; ' + + 'can take multiple values for multiple different ' + + 'pipelines with 1-to-1 mapping with pipeline ' + + 'container name via --container_names') + # when multiple target_fps is specified, the 1-to-1 mapping between + # target_fps and container_names are used; examples are given below + # --target_fps 14.95 8.5 14.95 \ + # --container_names container1 container2 container3 + parser.add_argument('--container_names', type=str, nargs='*', + default=None, help='stream density target ' + + 'container names; used together with --target_fps ' + + 'to have 1-to-1 mapping with the pipeline') parser.add_argument('--density_increment', type=int, default=None, help='pipeline increment number for ' + 'stream density. If not specified, then ' + @@ -57,6 +69,14 @@ def parse_args(print=False): default=os.path.join( os.curdir, '..', '..'), help='full path to the retail-use-cases repo root') + parser.add_argument('--docker_log', default=None, + help='docker container name to get logs of and save to file') + parser.add_argument('--parser_script', + default=os.path.join(os.path.curdir, 'parse_csv_to_json.py'), + help='full path to the parsing script to obtain FPS') + parser.add_argument('--parser_args', default='-k device -k igt', + help='arguments to pass to the parser script, ' + + 'pass args with spaces in quotes: "args with spaces"') if print: parser.print_help() return @@ -110,27 +130,6 @@ def docker_compose_containers(command, compose_files=[], compose_pre_args="", (command, traceback.format_exc())) -def convert_csv_results_to_json(results_dir, log_name): - ''' - convert the csv output to json format for readability - - Args: - results_dir: directory containing the benchmark results - log_name: first portion of the log filename to search for - ''' - for entry in os.scandir(results_dir): - if entry.name.startswith(log_name) and entry.is_file(): - print(entry.path) - csv_file = open(entry.path) - json_file = json.dumps([dict(r) for r in csv.DictReader(csv_file)]) - device_name = entry.name.split('.') - json_result_path = os.path.join( - results_dir, device_name[0]+".json") - with open(json_result_path, "w") as outfile: - outfile.write(json_file) - outfile.close() - csv_file.close() - def main(): ''' @@ -138,6 +137,20 @@ def main(): ''' my_args = parse_args() + target_fps_list = my_args.target_fps if my_args.target_fps else [] + container_names_list = ( + my_args.container_names + if my_args.container_names else [] + ) + + if (len(target_fps_list) > 1 + and len(target_fps_list) != len(container_names_list)): + raise ValueError( + "For stream density, the number of target FPS " + "values must match the number of " + "container names provided." + ) + results_dir = os.path.abspath(my_args.results_dir) if not os.path.exists(results_dir): os.mkdir(results_dir) @@ -159,19 +172,39 @@ def main(): env_vars["DEVICE"] = my_args.target_device retail_use_case_root = os.path.abspath(my_args.retail_use_case_root) env_vars["RETAIL_USE_CASE_ROOT"] = retail_use_case_root - if my_args.target_fps: - # stream density mode: + if my_args.density_increment: + env_vars["PIPELINE_INC"] = str(my_args.density_increment) + if len(target_fps_list) > 1 and container_names_list: + # stream density for multiple target FPS values and containers + print('starting stream density for multiple running pipelines...') + results = stream_density.run_stream_density(env_vars, compose_files, + target_fps_list, + container_names_list) + for result in results: + target_fps, container_name, num_pipelines, met_fps = result + print( + f"Completed stream density for target FPS: {target_fps} in " + f"container: {container_name}. " + f"Max pipelines: {num_pipelines}, " + f"Met target FPS? {met_fps}") + elif len(target_fps_list) == 1: + # single target_fps stream density mode: print('starting stream density...') - env_vars["TARGET_FPS"] = str(my_args.target_fps) + env_vars["TARGET_FPS"] = str(target_fps_list[0]) if my_args.density_increment: env_vars["PIPELINE_INC"] = str(my_args.density_increment) env_vars["INIT_DURATION"] = str(my_args.init_duration) - max_num_pipelines, met_fps = stream_density.run_stream_density( - env_vars, compose_files) + # use a default name since there is no + # --container_names provided in this case + container_name = (container_names_list[0] + if container_names_list else "default_container") + results = stream_density.run_stream_density( + env_vars, compose_files, [target_fps_list[0]], [container_name]) + target_fps, container_name, num_pipelines, met_fps = results[0] print( - f"Max number of pipelines in stream density found for " - f"target FPS {env_vars["TARGET_FPS"]} is " - f"{max_num_pipelines}. met target fps? {met_fps}") + f"Max number of pipelines in stream density found for target " + f"FPS = {target_fps} is {num_pipelines}. " + f"Met target FPS? {met_fps}") else: # regular --pipelines mode: if my_args.pipelines > 0: @@ -179,14 +212,32 @@ def main(): docker_compose_containers("up", compose_files=compose_files, compose_post_args="-d", env_vars=env_vars) - print("Waiting for init duration to complete...") + print("Waiting for %ds init duration to complete" % my_args.init_duration) time.sleep(my_args.init_duration) # use duration to sleep print( - "Waiting for %d seconds for workload to finish" + "Waiting for %ds for workload to finish" % my_args.duration) time.sleep(my_args.duration) + + # grab the container logs if necessary + if my_args.docker_log: + try: + docker_log = ("docker logs %s" % my_args.docker_log) + docker_log_args = shlex.split(docker_log) + log_file = os.path.join(my_args.results_dir, "%s.log" % my_args.docker_log) + print("writing docker log to %s" % log_file) + with open(log_file, 'wb') as f: + subprocess.run(docker_log_args, + stdout=f, + stderr=subprocess.STDOUT, + check=True, env=env_vars) # nosec B404, B603 + + except subprocess.CalledProcessError: + print("Exception getting the docker log %s: %s" % + (my_args.docker_log, traceback.format_exc())) + # stop all containers and camera-simulator docker_compose_containers("down", compose_files=compose_files, env_vars=env_vars) @@ -194,10 +245,16 @@ def main(): # collect metrics using copy-platform-metrics print("workloads finished...") # TODO: implement results handling based on what pipeline is run - # convert xpum results to json - convert_csv_results_to_json(results_dir, 'device') - # convert igt results to json - convert_csv_results_to_json(results_dir, 'igt') + try: + parser_string = ("python %s -d %s %s" % (my_args.parser_script, results_dir, my_args.parser_args)) + # print("======DEBUG======: %s" % parser_string) + parser_args = shlex.split(parser_string) + + subprocess.run(parser_args, + check=True, env=env_vars) # nosec B404, B603 + except subprocess.CalledProcessError: + print("Exception calling %s\n parser %s: %s" % + (parser_string, my_args.parser_script, traceback.format_exc())) if __name__ == '__main__': main() diff --git a/benchmark-scripts/consolidate_multiple_run_of_metrics.py b/benchmark-scripts/consolidate_multiple_run_of_metrics.py index 5809ed0..a6be606 100644 --- a/benchmark-scripts/consolidate_multiple_run_of_metrics.py +++ b/benchmark-scripts/consolidate_multiple_run_of_metrics.py @@ -434,15 +434,15 @@ def return_blank(self): KPIExtractor_OPTION = {"meta_summary.txt":MetaExtractor, "camera":FPSExtractor, "pipeline":PIPELINEFPSExtractor, - "(?:^r).*\.jsonl$":PIPELINLastModifiedExtractor, + r"(?:^r).*\.jsonl$": PIPELINLastModifiedExtractor, "cpu_usage.log":CPUUsageExtractor, "memory_usage.log":MemUsageExtractor, "memory_bandwidth.csv":MemBandwidthExtractor, "disk_bandwidth.log":DiskBandwidthExtractor, "power_usage.log":PowerUsageExtractor, "pcm.csv":PCMExtractor, - "(?:^xpum).*\.json$":XPUMUsageExtractor, - '(?:^igt).*\\.json': GPUUsageExtractor, } + r"(?:^xpum).*\.json$": XPUMUsageExtractor, + r"(?:^igt).*\\.json": GPUUsageExtractor, } def add_parser(): parser = argparse.ArgumentParser(description='Consolidate data') diff --git a/benchmark-scripts/parse_csv_to_json.py b/benchmark-scripts/parse_csv_to_json.py new file mode 100644 index 0000000..a56bc77 --- /dev/null +++ b/benchmark-scripts/parse_csv_to_json.py @@ -0,0 +1,54 @@ +''' +* Copyright (C) 2025 Intel Corporation. +* +* SPDX-License-Identifier: Apache-2.0 +''' + +import argparse +import csv +import json +import os + +def parse_args(): + + parser = argparse.ArgumentParser( + prog='parse_csv_to_json', + description='parses csv output to json') + parser.add_argument('--directory', '-d', + default=os.path.join(os.curdir, 'results'), + help='full path to the directory with the results') + parser.add_argument('--keyword', '-k', default=['device'], action='append', + help='keyword that results file(s) start with, ' + + 'can be used multiple times') + return parser.parse_args() + + +def convert_csv_results_to_json(results_dir, log_name): + ''' + convert the csv output to json format for readability + + Args: + results_dir: directory containing the benchmark results + log_name: first portion of the log filename to search for + ''' + for entry in os.scandir(results_dir): + if entry.name.startswith(log_name) and entry.is_file(): + print(entry.path) + csv_file = open(entry.path) + json_file = json.dumps([dict(r) for r in csv.DictReader(csv_file)]) + device_name = entry.name.split('.') + json_result_path = os.path.join( + results_dir, device_name[0]+".json") + with open(json_result_path, "w") as outfile: + outfile.write(json_file) + outfile.close() + csv_file.close() + +def main(): + my_args = parse_args() + for k in my_args.keyword: + convert_csv_results_to_json(my_args.directory, k) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/benchmark-scripts/parse_docker_log.py b/benchmark-scripts/parse_docker_log.py new file mode 100644 index 0000000..9e2183c --- /dev/null +++ b/benchmark-scripts/parse_docker_log.py @@ -0,0 +1,72 @@ +''' +* Copyright (C) 2025 Intel Corporation. +* +* SPDX-License-Identifier: Apache-2.0 +''' + +import argparse +import csv +import json +import os +import pprint + +def parse_args(): + + parser = argparse.ArgumentParser( + prog='parse_docker_log', + description='parses docker output to json') + parser.add_argument('--directory', '-d', + default=os.path.join(os.curdir, 'results'), + help='full path to the directory with the results') + parser.add_argument('--keyword', '-k', default=['device'], action='append', + help='keyword that results file(s) start with, ' + + 'can be used multiple times') + return parser.parse_args() + + +def parse_fps_from_log(results_dir, log_name): + ''' + parses the log output for FPS information + + Args: + results_dir: directory containing the benchmark results + log_name: first portion of the log filename to search for + ''' + for entry in os.scandir(results_dir): + if entry.name.startswith(log_name) and entry.is_file() and not entry.name.endswith("json"): + print(entry.path) + fps_info = dict() + count = 0 + sum_list = list() + with open(entry, "r") as f: + for line in f: + words = line.split() + if "FPS" in line: + count += 1 + fps_indices = [i for i, x in enumerate(words) if x == "FPS"] + for fps in fps_indices: + #fps_info[words[fps - 1]] = float(words[fps + 1]) + if "sum_%s" % words[fps - 1] not in fps_info: + fps_info["sum_%s" % words[fps - 1]] = 0 + sum_list.append("sum_%s" % words[fps - 1]) + fps_info["sum_%s" % words[fps - 1]] += float(words[fps + 1]) + total_fps = 0.0 + for sum in sum_list: + name = sum.split('sum_')[-1] + fps_info["avg_%s" % name] = fps_info[sum]/count + total_fps += fps_info[sum] + fps_info.pop(sum) + fps_info["avg_fps"] = total_fps/(count * len(sum_list)) + pprint.pp(fps_info) + outfile = os.path.join(os.path.split(entry.path)[0], "%s.json" % entry.name.split(".")[0]) + with open(outfile, "w") as output: + json.dump(fps_info, output) + +def main(): + my_args = parse_args() + for k in my_args.keyword: + parse_fps_from_log(my_args.directory, k) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/benchmark-scripts/requirements.txt b/benchmark-scripts/requirements.txt index e2500b7..ef5e3bd 100644 --- a/benchmark-scripts/requirements.txt +++ b/benchmark-scripts/requirements.txt @@ -1,3 +1,6 @@ argparse==1.4.0 coverage==7.6.1 -mock==5.1.0 \ No newline at end of file +mock==5.1.0 +numpy>=1.25.0 +pandas>=2.1.0 +natsort>=8.4.0 diff --git a/benchmark-scripts/stream_density.py b/benchmark-scripts/stream_density.py index 65fe61b..d1237f1 100644 --- a/benchmark-scripts/stream_density.py +++ b/benchmark-scripts/stream_density.py @@ -13,9 +13,12 @@ # Constants: TARGET_FPS_KEY = "TARGET_FPS" +CONTAINER_NAME_KEY = "CONTAINER_NAME" PIPELINE_INCR_KEY = "PIPELINE_INC" INIT_DURATION_KEY = "INIT_DURATION" RESULTS_DIR_KEY = "RESULTS_DIR" +DEFAULT_TARGET_FPS = 14.95 +MAX_GUESS_INCREMENTS = 5 class ArgumentError(Exception): @@ -57,12 +60,16 @@ def clean_up_pipeline_logs(results_dir): print('INFO: no match files to clean up') -def check_non_empty_result_logs(num_pipelines, results_dir, max_retries=5): +def check_non_empty_result_logs(num_pipelines, results_dir, + container_name, max_retries=5): ''' checks the current non-empty pipeline log files with some retries upto max_retires if file not exists or empty Args: num_pipelines: number of currently running pipelines + container_name: the name of the container to match in log files, + expected to be part of the filename pattern + after the underscore (_) results_dir: directory holding the benchmark results max_retries: maximum number of retires, default 5 retires ''' @@ -76,11 +83,13 @@ def check_non_empty_result_logs(num_pipelines, results_dir, max_retries=5): print("INFO: checking presence of all pipeline log files... " + "retry: {}".format(retry)) matching_files = glob.glob(os.path.join( - results_dir, 'pipeline*_*.log')) + results_dir, f'pipeline*_{container_name}.log')) if len(matching_files) >= num_pipelines and all([ os.path.isfile(file) and os.path.getsize(file) > 0 for file in matching_files]): - print('found all non-empty log files') + print( + f'found all non-empty log files for container name ' + f'{container_name}') break else: # some log files still empty or not found, retry it @@ -113,12 +122,15 @@ def get_latest_pipeline_logs(num_pipelines, pipeline_log_files): return latest_files -def calculate_total_fps(num_pipelines, results_dir): +def calculate_total_fps(num_pipelines, results_dir, container_name): ''' calculates averaged fps from the current running num_pipelines Args: num_pipelines: number of currently running pipelines results_dir: directory holding the benchmark results + container_name: the name of the container to match in log files, + expected to be part of the filename pattern + after the underscore (_) Returns: total_fps: accumulative total fps from all pipelines total_fps_per_stream: the averaged fps for pipelines @@ -126,7 +138,7 @@ def calculate_total_fps(num_pipelines, results_dir): total_fps = 0 total_fps_per_stream = 0 matching_files = glob.glob(os.path.join( - results_dir, 'pipeline*_*.log')) + results_dir, f'pipeline*_{container_name}.log')) print(f"DEBUG: num. of matching_files = {len(matching_files)}") latest_pipeline_logs = get_latest_pipeline_logs( num_pipelines, matching_files) @@ -150,135 +162,233 @@ def calculate_total_fps(num_pipelines, results_dir): return total_fps, total_fps_per_stream -def run_stream_density(env_vars, compose_files): +def validate_and_setup_env(env_vars, target_fps_list): ''' - runs stream density using docker compose for the specified target FPS - with optional stream density pipeline increment numbers + Validates and sets up the environment variables needed for + running stream density. Args: - env_vars: the dict of current environment variables - compose_files: the list of compose files to run pipelines - Returns: - num_pipelines: maximum number of pipelines to achieve TARGET_FPS - meet_target_fps: boolean to indicate whether the returned - number_pipelines can achieve the TARGET_FPS goal or not + env_vars: dict of current environment variables + target_fps_list: list of target FPS values for stream density ''' if not is_env_non_empty(env_vars, RESULTS_DIR_KEY): raise ArgumentError('ERROR: missing ' + RESULTS_DIR_KEY + 'in env') - # use some default values is missing - if not is_env_non_empty(env_vars, TARGET_FPS_KEY): - env_vars[TARGET_FPS_KEY] = "15.0" - elif float(env_vars[TARGET_FPS_KEY]) <= 0.0: + # Set default values if missing + if not target_fps_list: + target_fps_list.append(DEFAULT_TARGET_FPS) + elif any(float(fps) <= 0.0 for fps in target_fps_list): raise ArgumentError( - 'ERROR: stream density target fps should be greater than 0') + 'ERROR: stream density target fps ' + + 'should be greater than 0') if is_env_non_empty(env_vars, PIPELINE_INCR_KEY) and int( env_vars[PIPELINE_INCR_KEY]) <= 0: raise ArgumentError( - 'ERROR: stream density increments should be greater than 0') + 'ERROR: stream density increments ' + + 'should be greater than 0') if not is_env_non_empty(env_vars, INIT_DURATION_KEY): env_vars[INIT_DURATION_KEY] = "120" - # set up: - TARGET_FPS = float(env_vars[TARGET_FPS_KEY]) + +def run_pipeline_iterations( + env_vars, compose_files, results_dir, + container_name, target_fps): + ''' + runs an iteration of stream density benchmarking for + a given container name and target FPS. + Args: + env_vars: Environment variables for docker compose. + compose_files: Docker compose files. + results_dir: Directory for storing results. + container_name: Name of the container to run. + target_fps: Target FPS to achieve. + Returns: + num_pipelines: Number of pipelines used. + meet_target_fps: Whether the target FPS was achieved. + ''' INIT_DURATION = int(env_vars[INIT_DURATION_KEY]) - MAX_GUESS_INCREMENTS = 5 num_pipelines = 1 + in_decrement = False + increments = 1 + meet_target_fps = False + + # clean up any residual pipeline log files before starts: + clean_up_pipeline_logs(results_dir) + print( + f"INFO: Stream density TARGET_FPS set for {target_fps} " + f"with container_name {container_name} " + f"and INIT_DURATION set for {INIT_DURATION} seconds") + + while not meet_target_fps: + env_vars["PIPELINE_COUNT"] = str(num_pipelines) + print(f"Starting num. of pipelines: {num_pipelines}") + benchmark.docker_compose_containers( + "up", compose_files=compose_files, + compose_post_args="-d", env_vars=env_vars) + print("waiting for pipelines to settle...") + time.sleep(INIT_DURATION) + # note: before reading the pipeline log files + # we want to give pipelines some time as the log files + # producing could be lagging behind... + try: + check_non_empty_result_logs( + num_pipelines, results_dir, container_name, 50) + except ValueError as e: + print(f"ERROR: {e}") + # since we are not able to get all non-empty log + # the best we can do is to use the previous num_pipelines + # before this current num_pipelines + num_pipelines = num_pipelines - increments + if num_pipelines < 1: + num_pipelines = 1 + return num_pipelines, False + # once we have all non-empty pipeline log files + # we then can calculate the average fps + total_fps, total_fps_per_stream = calculate_total_fps( + num_pipelines, results_dir, container_name) + print('container name:', container_name) + print('Total FPS:', total_fps) + print(f"Total averaged FPS per stream: {total_fps_per_stream} " + f"for {num_pipelines} pipeline(s)") + + if not in_decrement: + if total_fps_per_stream >= target_fps: + # if the increments hint from $PIPELINE_INC is not empty + # we will use it as the increments + # otherwise, we will try to adjust increments dynamically + # based on the rate of {total_fps_per_stream} + # and target_fps + if is_env_non_empty(env_vars, PIPELINE_INCR_KEY): + increments = int(env_vars[PIPELINE_INCR_KEY]) + else: + increments = int( + total_fps_per_stream / target_fps) + if increments == 1: + increments = MAX_GUESS_INCREMENTS + print( + f"incrementing pipeline no. by {increments}") + else: + # below target_fps, start decrementing + increments = -1 + in_decrement = True + print( + f"Below target fps {target_fps}, " + f"starting to decrement pipelines by 1...") + else: + # in decrementing case: + if total_fps_per_stream >= target_fps: + print( + f"found maximum number of pipelines to reach " + f"target FPS {target_fps}") + meet_target_fps = True + print( + f"Max stream density achieved for target FPS " + f"{target_fps} is {num_pipelines}") + increments = 0 + elif num_pipelines <= 1: + print( + f"already reached num pipeline 1, and " + f"the fps per stream is {total_fps_per_stream} " + f"but target FPS is {target_fps}") + meet_target_fps = False + break + else: + print( + f"decrementing number of pipelines " + f"{num_pipelines} by 1") + # end of if not in_decrement: + num_pipelines += increments + if num_pipelines <= 0: + # we will keep the min. num_pipelines as 1 + num_pipelines = 1 + print( + f"already reached min. pipeline number, stopping...") + break + # end of while + print( + f"pipeline iterations done for " + f"container_name: {container_name} " + f"with input target_fps = {target_fps}" + ) + + return num_pipelines, meet_target_fps + + +def run_stream_density(env_vars, compose_files, target_fps_list, + container_names_list): + ''' + runs stream density using docker compose for the specified target FPS + values and the corresponding container names + with optional stream density pipeline increment numbers + Args: + env_vars: the dict of current environment variables + compose_files: the list of compose files to run pipelines + target_fps_list: list of target FPS values for stream density + container_names_list: list of container names for + the corresponding target FPS + Returns: + results as a list of tuples (target_fps, container_name, + num_pipelines, meet_target_fps) where + target_fps: the desire frames per second to maintain for pipeline + container_name: the corresponding container name for the pipeline + num_pipelines: maximum number of pipelines to achieve TARGET_FPS + meet_target_fps: boolean to indicate whether the returned + number_pipelines can achieve the TARGET_FPS goal or not + ''' + results = [] + validate_and_setup_env(env_vars, target_fps_list) results_dir = env_vars[RESULTS_DIR_KEY] log_file_path = os.path.join(results_dir, 'stream_density.log') orig_stdout = sys.stdout orig_stderr = sys.stderr try: - logger = open(log_file_path, 'w') - sys.stdout = logger - sys.stderr = logger - - # clean up any residual pipeline log files before starts: - clean_up_pipeline_logs(results_dir) + with open(log_file_path, 'a') as logger: + sys.stdout = logger + sys.stderr = logger - print(f"INFO: Stream density TARGET_FPS set for {TARGET_FPS} " - f"and INIT_DURATION set for {INIT_DURATION}") + # loop through the target_fps list and find out the stream density: + for target_fps, container_name in zip( + target_fps_list, container_names_list + ): + print( + f"DEBUG: in for-loop, target_fps={target_fps} " + f"container_name={container_name}") + env_vars[TARGET_FPS_KEY] = str(target_fps) + env_vars[CONTAINER_NAME_KEY] = container_name + # stream density main logic: + try: + num_pipelines, meet_target_fps = run_pipeline_iterations( + env_vars, compose_files, results_dir, + container_name, target_fps + ) + results.append( + ( + target_fps, + container_name, + num_pipelines, + meet_target_fps + ) + ) + finally: + # better to compose-down before the next iteration + benchmark.docker_compose_containers( + "down", + compose_files=compose_files, + env_vars=env_vars + ) + # give some time for processes to clean up: + time.sleep(10) - # stream density main logic: - in_decrement = False - increments = 1 - meet_target_fps = False - while not meet_target_fps: - total_fps_per_stream = 0.0 - total_fps = 0.0 - env_vars["PIPELINE_COUNT"] = str(num_pipelines) - print("Starting num. of pipelines: %d" % num_pipelines) - benchmark.docker_compose_containers( - "up", compose_files=compose_files, - compose_post_args="-d", env_vars=env_vars) - print("waiting for pipelines to settle...") - time.sleep(INIT_DURATION) - # note: before reading the pipeline log files - # we want to give pipelines some time as the log files - # producing could be lagging behind... - check_non_empty_result_logs(num_pipelines, results_dir, 50) - # once we have all non-empty pipeline log files - # we then can calculate the average fps - total_fps, total_fps_per_stream = calculate_total_fps( - num_pipelines, results_dir) - print('Total FPS:', total_fps) - print( - f"Total averaged FPS per stream: {total_fps_per_stream} " - f"for {num_pipelines} pipeline(s)") - if not in_decrement: - if total_fps_per_stream >= TARGET_FPS: - # if the increments hint from $PIPELINE_INC is not empty - # we will use it as the increments - # otherwise, we will try to adjust increments dynamically - # based on the rate of {total_fps_per_stream} - # and $TARGET_FPS - if is_env_non_empty(env_vars, PIPELINE_INCR_KEY): - increments = int(env_vars[PIPELINE_INCR_KEY]) - else: - increments = int( - total_fps_per_stream / TARGET_FPS) - if increments == 1: - increments = MAX_GUESS_INCREMENTS - print(f"incrementing pipeline no. by {increments}") - else: - # below TARGET_FPS, start decrementing - increments = -1 - in_decrement = True - print(f"Below target fps {TARGET_FPS}, " - f"starting to decrement pipelines by 1...") - else: - if total_fps_per_stream >= TARGET_FPS: - print(f"found maximum number of pipelines to reach " - f"target fps {TARGET_FPS}") - meet_target_fps = True - print(f"Max stream density achieved for target FPS " - f"{TARGET_FPS} is {num_pipelines}") - increments = 0 - print("Finished stream density benchmarking") - else: - if num_pipelines <= 1: - print(f"already reached num pipeline 1, and the " - f"fps per stream is {total_fps_per_stream} " - f"but target FPS is {TARGET_FPS}") - meet_target_fps = False - break - else: - print(f"decrementing number of pipelines " - f"{num_pipelines} by 1") - # end of if not in_decrement: - num_pipelines += increments - # end of while - print("stream_density done!") + # end of for-loop + print("stream_density done!") except Exception as ex: print(f'ERROR: found exception: {ex}') raise finally: - benchmark.docker_compose_containers( - "down", compose_files=compose_files, - env_vars=env_vars) # reset sys stdout and err back to it's own sys.stdout = orig_stdout sys.stderr = orig_stderr - return num_pipelines, meet_target_fps + + return results diff --git a/benchmark-scripts/stream_density_test.py b/benchmark-scripts/stream_density_test.py index 41097cb..04977f6 100644 --- a/benchmark-scripts/stream_density_test.py +++ b/benchmark-scripts/stream_density_test.py @@ -7,7 +7,15 @@ import mock import subprocess # nosec B404 import unittest +from unittest.mock import patch, mock_open, MagicMock import stream_density +from stream_density import validate_and_setup_env, ArgumentError +from stream_density import ( + RESULTS_DIR_KEY, + PIPELINE_INCR_KEY, + INIT_DURATION_KEY, + DEFAULT_TARGET_FPS +) import os @@ -33,7 +41,7 @@ def test_check_non_empty_result_logs_max_tries(self): # no file at all case: try: stream_density.check_non_empty_result_logs( - 1, './non-existing-results') + 1, './non-existing-results', 'abc') self.fail('expected ValueError exception') except ValueError as ex: self.assertTrue("""ERROR: cannot find all pipeline log files @@ -49,7 +57,7 @@ def test_check_non_empty_result_logs_max_tries(self): with open(testFile, 'w') as file: file.write('this is a test') stream_density.check_non_empty_result_logs( - 2, test_results_dir) + 2, test_results_dir, 'abc') self.fail('expected ValueError exception') except ValueError as ex: self.assertTrue("""ERROR: cannot find all pipeline log files @@ -65,7 +73,7 @@ def test_check_non_empty_result_logs_success(self): testFile1 = os.path.join( test_results_dir, 'pipeline12345656_abc.log') testFile2 = os.path.join( - test_results_dir, 'pipeline98765432_def.log') + test_results_dir, 'pipeline98765432_abc.log') try: os.makedirs(test_results_dir) # create two non empty temporary log files @@ -75,7 +83,9 @@ def test_check_non_empty_result_logs_success(self): file.write('another file for testing') stream_density.check_non_empty_result_logs( - 2, test_results_dir) + 2, test_results_dir, 'abc') + stream_density.check_non_empty_result_logs( + 2, test_results_dir, 'abc') except ValueError as ex: self.fail("""ERROR: cannot find all pipeline log files after max retries""") @@ -91,7 +101,7 @@ def test_calculate_total_fps_success(self): test_results_dir = './test_stream_density_results' try: fps, avg_fps = stream_density.calculate_total_fps( - 2, test_results_dir) + 2, test_results_dir, 'gst') self.assertTrue( fps > 0.0, f"total_fps is expected > 0.0 but found {fps}") @@ -132,6 +142,324 @@ def test_clean_up_pipeline_logs(self): if not os.listdir(test_results_dir): os.rmdir(test_results_dir) + def test_validate_and_setup_env(self): + test_cases = [ + # Test case 1: Valid environment, valid target_fps_list + { + "env_vars": {RESULTS_DIR_KEY: "/some/path"}, + "target_fps_list": [20.0], + "expect_exception": False, + "expected_target_fps_list": [20.0], + "expected_env_vars": { + RESULTS_DIR_KEY: "/some/path", + INIT_DURATION_KEY: "120" + }, + }, + # Test case 2: Missing RESULTS_DIR_KEY in env_vars + { + "env_vars": {}, + "target_fps_list": [20.0], + "expect_exception": True, + "exception_type": ArgumentError, + }, + # Test case 3: Empty target_fps_list (should set to default) + { + "env_vars": {RESULTS_DIR_KEY: "/some/path"}, + "target_fps_list": [], + "expect_exception": False, + "expected_target_fps_list": [DEFAULT_TARGET_FPS], + "expected_env_vars": { + RESULTS_DIR_KEY: "/some/path", + INIT_DURATION_KEY: "120" + }, + }, + # Test case 4: Negative target_fps value in target_fps_list + { + "env_vars": {RESULTS_DIR_KEY: "/some/path"}, + "target_fps_list": [-5.0], + "expect_exception": True, + "exception_type": ArgumentError, + }, + # Test case 5: Missing INIT_DURATION_KEY in env_vars- + # should default to "120" + { + "env_vars": {RESULTS_DIR_KEY: "/some/path"}, + "target_fps_list": [20.0], + "expect_exception": False, + "expected_target_fps_list": [20.0], + "expected_env_vars": { + RESULTS_DIR_KEY: "/some/path", + INIT_DURATION_KEY: "120" + }, + }, + # Test case 6: PIPELINE_INCR_KEY <= 0 (should raise exception) + { + "env_vars": { + RESULTS_DIR_KEY: "/some/path", + PIPELINE_INCR_KEY: "0" + }, + "target_fps_list": [20.0], + "expect_exception": True, + "exception_type": ArgumentError, + }, + ] + + for i, test_case in enumerate(test_cases): + with self.subTest(f"Test case {i + 1}"): + # Make a copy to avoid mutation + env_vars = test_case["env_vars"].copy() + target_fps_list = test_case["target_fps_list"].copy() + + if test_case["expect_exception"]: + with self.assertRaises(test_case["exception_type"]): + validate_and_setup_env(env_vars, target_fps_list) + else: + try: + validate_and_setup_env(env_vars, target_fps_list) + # Verify the target_fps_list was updated as expected + self.assertEqual( + target_fps_list, + test_case["expected_target_fps_list"]) + # Verify env_vars was updated as expected + expected_env_vars = test_case["expected_env_vars"] + for key, value in expected_env_vars.items(): + self.assertEqual(env_vars.get(key), value) + except Exception as ex: + self.fail(f"Unexpected exception raised: {ex}") + + @patch('time.sleep', return_value=None) + @patch('benchmark.docker_compose_containers') + @patch('stream_density.calculate_total_fps') + @patch('stream_density.check_non_empty_result_logs') + @patch('stream_density.clean_up_pipeline_logs') + def test_pipeline_iterations( + self, + mock_clean_logs, + mock_check_logs, + mock_calculate_fps, + mock_docker_compose, + mock_sleep + ): + test_cases = [ + # Test case 1: Succeed on the first iteration + # with target_fps achieved + { + "env_vars": {"INIT_DURATION": "10"}, + "compose_files": ["docker-compose.yml"], + "results_dir": "/path/to/results", + "container_name": "above_fps_target", + "target_fps": 14.0, + "expected_num_pipelines": 10, + "expected_meet_target_fps": True, + # Mock returns FPS below target + "calculate_fps_side_effect": [ + (50, 20.0), (70, 16.0), (100, 12.0), (120, 14.8) + ] + }, + # Test case 2: Reach minimum pipeline count + # without achieving target_fps + { + "env_vars": {"INIT_DURATION": "10"}, + "compose_files": ["docker-compose.yml"], + "results_dir": "/path/to/results", + "container_name": "below_fps_target", + "target_fps": 15.0, + "expected_num_pipelines": 1, + "expected_meet_target_fps": False, + # Mock returns FPS below target + "calculate_fps_side_effect": [(100, 10.0), (50, 5.0)] + }, + # Test case 3: Below target_fps and come back + { + "env_vars": {"INIT_DURATION": "10", "PIPELINE_INC": "1"}, + "compose_files": ["docker-compose.yml"], + "results_dir": "/path/to/results", + "container_name": "below_comeback_target", + "target_fps": 14.0, + "expected_num_pipelines": 1, + "expected_meet_target_fps": True, + # Mock returns FPS below target + "calculate_fps_side_effect": [ + (100, 60.0), (190, 10.0), (320, 14.2) + ] + }, + # Test case 4: Below target_fps and reach minimum + { + "env_vars": {"INIT_DURATION": "10", "PIPELINE_INC": "1"}, + "compose_files": ["docker-compose.yml"], + "results_dir": "/path/to/results", + "container_name": "below_comeback_target", + "target_fps": 14.0, + "expected_num_pipelines": 1, + "expected_meet_target_fps": False, + # Mock returns FPS below target + "calculate_fps_side_effect": [ + (100, 60.0), (190, 10.0), (220, 13.2) + ] + }, + # Test case 5: check_non_empty_result_logs raises ValueError + { + "env_vars": {"INIT_DURATION": "10"}, + "compose_files": ["docker-compose.yml"], + "results_dir": "/path/to/results", + "container_name": "value_error", + "target_fps": 14.0, + "check_logs_side_effect": ValueError( + "Expecting ValueError for check_non_empty_result_logs"), + "expected_num_pipelines": 1, + "expected_meet_target_fps": False + }, + ] + + for i, test_case in enumerate(test_cases): + with self.subTest(f"Test case {i + 1}"): + env_vars = test_case["env_vars"] + compose_files = test_case["compose_files"] + results_dir = test_case["results_dir"] + container_name = test_case["container_name"] + target_fps = test_case["target_fps"] + + # Set up the side effect for + # check_non_empty_result_logs if specified + if "check_logs_side_effect" in test_case: + mock_check_logs.side_effect = test_case[ + "check_logs_side_effect"] + + # Set up the side effect for + # calculate_total_fps using a finite list + if "calculate_fps_side_effect" in test_case: + mock_calculate_fps.side_effect = test_case[ + "calculate_fps_side_effect"] + + # Run the function with the test case inputs + if "check_logs_side_effect" in test_case: + num_pipelines, meet_target_fps = ( + stream_density.run_pipeline_iterations( + env_vars, compose_files, results_dir, + container_name, target_fps) + ) + + # Verify the returned values after ValueError is handled + self.assertEqual( + num_pipelines, + test_case["expected_num_pipelines"]) + self.assertEqual( + meet_target_fps, + test_case["expected_meet_target_fps"]) + + # Ensure that check_non_empty_result_logs was + # called and raised ValueError + mock_check_logs.assert_called() + else: + num_pipelines, meet_target_fps = ( + stream_density.run_pipeline_iterations( + env_vars, compose_files, results_dir, + container_name, target_fps) + ) + + # Verify the output + self.assertEqual( + num_pipelines, + test_case["expected_num_pipelines"]) + self.assertEqual( + meet_target_fps, + test_case["expected_meet_target_fps"]) + + @patch('time.sleep', return_value=None) + @patch('stream_density.validate_and_setup_env') + @patch('stream_density.run_pipeline_iterations') + @patch('stream_density.benchmark.docker_compose_containers') + @patch('builtins.open', new_callable=mock_open) + def test_run_stream_density( + self, + mock_open_file, + mock_docker_compose, + mock_run_pipeline_iterations, + mock_validate_env, + mock_sleep + ): + test_cases = [ + # Test case 1: Valid scenario where all parameters are correct + { + "env_vars": {RESULTS_DIR_KEY: "/some/path"}, + "compose_files": ["docker-compose.yml"], + "target_fps_list": [15.0, 25.0], + "container_names_list": ["container1", "container2"], + "run_pipeline_side_effect": [ + (5, True), # For container1 + (7, False) # For container2 + ], + "expected_results": [ + (15.0, "container1", 5, True), + (25.0, "container2", 7, False) + ], + # Expected number of compose down calls + "expected_down_call_count": 2 + }, + # Test case 2: Exception occurs during run_pipeline_iterations() + { + "env_vars": {RESULTS_DIR_KEY: "/some/path"}, + "compose_files": ["docker-compose.yml"], + "target_fps_list": [15.0], + "container_names_list": ["container1"], + "run_pipeline_side_effect": Exception("Test exception"), + "expect_exception": True, + # Expected number of compose down calls + # even if an exception occurs, it should call down + "expected_down_call_count": 1 + } + ] + + for i, test_case in enumerate(test_cases): + with self.subTest(f"Test case {i + 1}"): + mock_docker_compose.reset_mock() + env_vars = test_case["env_vars"].copy() + compose_files = test_case["compose_files"] + target_fps_list = test_case["target_fps_list"] + container_names_list = test_case["container_names_list"] + + # Mock the behavior of run_pipeline_iterations + # based on the test case + if isinstance(test_case["run_pipeline_side_effect"], list): + mock_run_pipeline_iterations.side_effect = test_case[ + "run_pipeline_side_effect"] + else: + mock_run_pipeline_iterations.side_effect = test_case[ + "run_pipeline_side_effect"] + + # Run the function and verify results or exceptions + if test_case.get("expect_exception"): + print('expecting exception test case') + with self.assertRaises(Exception) as context: + stream_density.run_stream_density( + env_vars, compose_files, + target_fps_list, container_names_list) + self.assertTrue(isinstance(context.exception, Exception)) + else: + results = stream_density.run_stream_density( + env_vars, compose_files, + target_fps_list, container_names_list) + self.assertEqual(results, test_case["expected_results"]) + + # Verify that validate_and_setup_env was called correctly + mock_validate_env.assert_called_with( + env_vars, target_fps_list + ) + + expected_down_call_count = test_case[ + "expected_down_call_count" + ] + actual_down_calls = [ + call for call in mock_docker_compose.call_args_list + if call[0][0] == 'down' + ] + self.assertEqual( + len(actual_down_calls), + expected_down_call_count, + f"Expected {expected_down_call_count} 'down' calls, " + f"but found {len(actual_down_calls)}" + ) + if __name__ == '__main__': unittest.main() diff --git a/docker/intel-top/entrypoint.sh b/docker/intel-top/entrypoint.sh index b368436..ff2b4ae 100755 --- a/docker/intel-top/entrypoint.sh +++ b/docker/intel-top/entrypoint.sh @@ -19,7 +19,8 @@ IFS='@' for device in $devices do # shellcheck disable=SC2086 # Intended work splitting - dev=$(echo $device | awk '{print $1}') + # the real device card number should be from card= instead of the device index string itself + dev=$(echo $device | awk '{print $NF}' | sed -E 's/.*?card=//') echo "$dev" # shellcheck disable=SC2086 # Intended work splitting deviceId=$(echo $device | awk '{print $2}' | sed -E 's/.*?device=//' | cut -f1 -d",") diff --git a/docker/platform/entrypoint.sh b/docker/platform/entrypoint.sh index 81f1a70..8bc3d0e 100755 --- a/docker/platform/entrypoint.sh +++ b/docker/platform/entrypoint.sh @@ -40,4 +40,10 @@ if [ "$is_xeon" == "1" ] echo "Starting general pcm collection" touch /tmp/results/pcm.csv chown 1000:1000 /tmp/results/pcm.csv -/opt/intel/pcm-bin/bin/pcm 1 -silent -nc -nsys -csv=/tmp/results/pcm.csv \ No newline at end of file +/opt/intel/pcm-bin/bin/pcm 1 -silent -nc -nsys -csv=/tmp/results/pcm.csv & + +while true +do + echo "Capturing platform data" + sleep 15 +done \ No newline at end of file diff --git a/sample-media/.golangci.yml b/sample-media/.golangci.yml new file mode 100644 index 0000000..45a11be --- /dev/null +++ b/sample-media/.golangci.yml @@ -0,0 +1,7 @@ +linters: + enable: + - gosec + - revive + - unused + - govet + - gosimple