Skip to content

Rename field model_config to model_cfg in NIXLKVBench workload #3903

Rename field model_config to model_cfg in NIXLKVBench workload

Rename field model_config to model_cfg in NIXLKVBench workload #3903

Workflow file for this run

name: CI
on:
pull_request:
jobs:
lint:
name: Linting
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
- name: Install dependencies
run: pip install '.[dev,docs]'
- name: Run ruff linter
run: ruff check
- name: Run ruff formatter
run: ruff format --check --diff
- name: Run pyright
run: pyright
- name: Run vulture check
run: vulture src/ tests/
- name: Import linter
run: PYTHONPATH=src lint-imports
- name: Check TOML formatting
run: |
set -eE
set -o pipefail
taplo fmt --check --diff
- name: Build documentation
run: |
set -eE
set -o pipefail
cd doc
make html
test:
name: Run pytest
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.12"]
fail-fast: false
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: pip install '.[dev]'
- name: Run pytest
run: |
set -eEx
set -o pipefail
python -m pytest -vv --cov
# <100% coverage might indicate that some tests are not doing what they should,
# but realistically, it might too hard to reach, so lower it a bit
# (at the moment of writing, it is 99.95%)
coverage report --include='tests/*' --precision=2 --fail-under=97.00
python -m pytest --dead-fixtures
python -m pytest -vv -m ci_only
- name: Test local installation
run: |
set -eE
set -o pipefail
pip install .
pip uninstall -y cloudai
pip install -e .
- name: Test commands
run: |
set -eEx
set -o pipefail
cloudai --help
# this checks that all TOMLs are valid, Test Scenarios are checked for _all_ tests
cloudai verify-configs conf/
# this checks that all TOMLs are valid, Test Scenarios are checked _only_ the tests in the specified directory
cloudai verify-configs --tests-dir conf/common/test conf/common
cloudai verify-configs --tests-dir conf/release/spcx/l40s/test conf/release/spcx/l40s
# set monitor_interval to 1 to speed up the test
sed -i '/scheduler =/a monitor_interval = 1' conf/common/system/example_slurm_cluster.toml
cloudai dry-run --system-config conf/common/system/example_slurm_cluster.toml \
--tests-dir conf/common/test/ \
--test-scenario conf/common/test_scenario/sleep.toml \
--output-dir ./results