Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions .coveragerc
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,16 @@ exclude_lines =
if False:
if __name__ == .__main__.:
pass
if TYPE_CHECKING:
if typing.TYPE_CHECKING:

omit =
# Omit files that cannot be tested
dace/jupyter.py

# Omit deprecated files
dace/frontend/tensorflow/__init__.py
dace/frontend/tensorflow/tensorflow.py
dace/frontend/tensorflow/winograd.py
dace/frontend/tensorflow/transformations/__init__.py
dace/frontend/tensorflow/transformations/redundant_array.py
dace/frontend/ml/tensorflow/__init__.py
dace/frontend/ml/tensorflow/tensorflow.py
dace/frontend/ml/tensorflow/winograd.py
dace/frontend/ml/tensorflow/transformations/__init__.py
dace/frontend/ml/tensorflow/transformations/redundant_array.py
2 changes: 1 addition & 1 deletion .github/workflows/copilot-setup-steps.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,6 @@ jobs:

- name: Install DaCe in development mode
run: |
python -m pip install --editable ".[testing,linting]"
python -m pip install --editable ".[testing,linting,ml]"
pre-commit install
pre-commit run
48 changes: 48 additions & 0 deletions .github/workflows/dace-updater.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
name: Inform the Python package index about a new DaCe release.

on:
# Trigger for all pushes to tags matching this pattern
push:
tags:
- __gt4py-next-integration_*

# To "install" this workflow you must enable this trigger, such that the workflow runs at least one.
# You should also disable any processing such that no commit in the index repo is performed.
# See https://stackoverflow.com/a/71057825
#pull_request:

# Allows to trigger the update manually.
# NOTE: Is only possible if the workflow file is located on the default and the branch where it should run on.
workflow_dispatch:

jobs:
update-dace:
runs-on: ubuntu-latest
steps:
- name: Inform Index
shell: bash
run: |
INDEX_ORGANIZATION="gridtools"
INDEX_REPO="python-pkg-index"

# We are using `github.sha` here to be sure that we transmit an identifier to the index
# that can be checked out. Before we used `github.ref_name` but got strange results
# with it.
DEPENDENCY_REF="${{ github.sha }}"
SOURCE_REPO="dace"
SOURCE_OWNER="gridtools"

curl -L -v --fail-with-body \
-X POST \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ secrets.PKG_UPDATE_TOKEN }}" \
-H "X-GitHub-Api-Version: 2022-11-28" \
"https://api.github.com/repos/${INDEX_ORGANIZATION}/${INDEX_REPO}/dispatches" \
-d '{"event_type":"update_package_index","client_payload":{"source_repo":"'"${SOURCE_REPO}"'","source_org":"'"${SOURCE_OWNER}"'","dependency_ref":"'"${DEPENDENCY_REF}"'"}}'

if [ $? -ne 0 ]
then
echo "POST to '${INDEX_ORGANIZATION}:${INDEX_REPO}' failed.
exit 1
fi
exit 1
2 changes: 1 addition & 1 deletion .github/workflows/fpga-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
python -m pip install --upgrade pip
pip install pytest-xdist flake8 coverage click
pip uninstall -y dace
pip install -e ".[testing]"
pip install -e ".[testing,ml]"
curl -Os https://uploader.codecov.io/latest/linux/codecov
chmod +x codecov

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/general-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ jobs:
else
export DACE_optimizer_automatic_simplification=${{ matrix.simplify }}
fi
pytest -n auto --cov-report=xml --cov=dace --tb=short --timeout_method thread --timeout=300 -m "not gpu and not verilator and not tensorflow and not mkl and not sve and not papi and not mlir and not lapack and not fpga and not mpi and not rtl_hardware and not scalapack and not datainstrument and not long and not sequential"
pytest -n auto --cov-report=xml --cov=dace --tb=short --timeout_method thread --timeout=300 -m "not gpu and not autodiff and not torch and not onnx and not verilator and not tensorflow and not mkl and not sve and not papi and not mlir and not lapack and not fpga and not mpi and not rtl_hardware and not scalapack and not datainstrument and not long and not sequential"
./codecov

- name: Test OpenBLAS LAPACK
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/gpu-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ jobs:
pip install mpi4py
pip install cupy
pip uninstall -y dace
pip install -e ".[testing]"
pip install -e ".[testing,ml]"
curl -Os https://uploader.codecov.io/latest/linux/codecov
chmod +x codecov

Expand Down
62 changes: 62 additions & 0 deletions .github/workflows/ml-ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
name: Machine Learning and Autodiff Tests

on:
push:
branches: [ main, ci-fix ]
pull_request:
branches: [ main, ci-fix ]
merge_group:
branches: [ main, ci-fix ]

concurrency:
group: ${{github.workflow}}-${{github.ref}}
cancel-in-progress: true

jobs:
test:
if: "!contains(github.event.pull_request.labels.*.name, 'no-ci')"
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.13']
simplify: [0,1,autoopt]

steps:
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y libyaml-dev cmake
sudo apt-get install -y libblas-dev libopenblas-dev liblapacke-dev
python -m pip install --upgrade pip
pip install flake8 pytest-xdist coverage
pip install -e ".[ml-testing,ml]"
curl -Os https://uploader.codecov.io/latest/linux/codecov
chmod +x codecov

- name: Test with pytest
run: |
export NOSTATUSBAR=1
export DACE_testing_serialization=1
export DACE_testing_deserialize_exception=1
export DACE_cache=unique
if [ "${{ matrix.simplify }}" = "autoopt" ]; then
export DACE_optimizer_automatic_simplification=1
export DACE_optimizer_autooptimize=1
echo "Auto-optimization heuristics"
else
export DACE_optimizer_automatic_simplification=${{ matrix.simplify }}
fi
pytest --cov-report=xml --cov=dace --tb=short --timeout_method thread --timeout=600 -v -m "(torch or onnx or autodiff) and not gpu"
./codecov

- uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -195,3 +195,8 @@ _build/

# Ignoring the test junk
_all_tests/


# Ignore downloaded ONNX models
/*.onnx
/*.bin
6 changes: 4 additions & 2 deletions codecov.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
ignore:
- "dace/jupyter.py" # Omit files that cannot be tested
- "dace/frontend/tensorflow/**/*" # Omit deprecated files
- "dace/frontend/ml/tensorflow/**/*" # Omit deprecated files
- "samples/**/*"
- "tests/**/*"

coverage:
range: 40..90
Expand All @@ -18,6 +20,6 @@ coverage:

codecov:
notify:
after_n_builds: 18
after_n_builds: 23

comment: false
11 changes: 11 additions & 0 deletions dace/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,17 @@
sys.path.insert(0, __external_transformations_path__)


# Lazy loading for ml module to avoid eager TensorFlow/PyTorch imports
def __getattr__(name):
if name == 'ml':
import importlib
ml_module = importlib.import_module('.ml', package='dace')
# Cache the module to avoid re-importing
globals()['ml'] = ml_module
return ml_module
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")


# Hack that enables using @dace as a decorator
# See https://stackoverflow.com/a/48100440/6489142
class DaceModule(sys.modules[__name__].__class__):
Expand Down
58 changes: 58 additions & 0 deletions dace/autodiff/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Copyright 2019-2025 ETH Zurich and the DaCe authors. All rights reserved.
"""
DaCe Automatic Differentiation (AD) System.

This module provides reverse-mode automatic differentiation for DaCe programs,
enabling automatic computation of gradients for optimized numerical kernels.

Main Components
---------------
- **add_backward_pass**: Main entry point for adding backward pass to an SDFG
- **BackwardPassGenerator**: Core algorithm for generating backward passes
- **BackwardImplementation**: ABC for implementing operation-specific backward rules
- **BackwardContext**: Context information for backward pass generation
- **BackwardResult**: Result of backward pass generation with forward/backward SDFGs
- **AutoDiffException**: Base exception for autodiff errors

Key Features
------------
- Support for control flow (loops, conditionals)
- Data forwarding strategies (store vs recompute tradeoffs)
- Extensible backward implementations for library nodes
- Integration with PyTorch autograd
- Automatic memory management for intermediate values


"""

from .base_abc import BackwardImplementation, BackwardContext, BackwardResult, AutoDiffException
from .backward_pass_generator import BackwardPassGenerator
from .autodiff import add_backward_pass

try:
from .torch import make_backward_function
TORCH_INTEGRATION_AVAILABLE = True
except ImportError:
make_backward_function = None
TORCH_INTEGRATION_AVAILABLE = False

import sys
from . import library

__all__ = [
# Main API
"add_backward_pass",
# Core classes
"BackwardPassGenerator",
"BackwardContext",
"BackwardResult",
# Extension points
"BackwardImplementation",
# Exceptions
"AutoDiffException",
# Submodules
"library",
]

if TORCH_INTEGRATION_AVAILABLE:
__all__.append("make_backward_function")
103 changes: 103 additions & 0 deletions dace/autodiff/analysis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# Copyright 2019-2025 ETH Zurich and the DaCe authors. All rights reserved.
"""
Analysis helpers for autodiff
"""
from typing import Dict, Set, Tuple, Optional
import collections

import networkx as nx

from dace.sdfg import SDFG, SDFGState, nodes, utils as sdfg_utils
from dace.transformation.passes import analysis
from dace.sdfg.state import FunctionCallRegion

AccessSets = Dict[SDFGState, Tuple[Set[str], Set[str]]]


def dependency_analysis(sdfg: SDFG) -> Dict[str, Set[str]]:
"""
Analyze read dependencies of arrays in the SDFG.

:param sdfg: SDFG to analyze
:return: A dictionary mapping array names to a list of read dependencies.
"""

# FIXME can be made more efficient
dependencies = nx.DiGraph()
for sdfg_node in sdfg.nodes():
if isinstance(sdfg_node, SDFGState):
for node in sdfg_node.data_nodes():
for edge in sdfg_node.edge_bfs(node, reverse=True):
dependencies.add_edge(node.data, edge.data.data)
elif isinstance(sdfg_node, FunctionCallRegion):
for state in sdfg_node.nodes():
assert isinstance(state, SDFGState)
for node in state.data_nodes():
for edge in state.edge_bfs(node, reverse=True):
dependencies.add_edge(node.data, edge.data.data)

dependencies = nx.transitive_closure(dependencies)
result = {}
for array in dependencies:
result[array] = {nbr for nbr in dependencies.neighbors(array)}
return result


def inverse_reachability(sdfg: SDFG) -> Dict[SDFGState, Set[SDFGState]]:

reachability = analysis.StateReachability().apply_pass(sdfg, {})
inverse_reachability = collections.defaultdict(set)
# iterate over cfg_ids
for cfg_id in reachability.keys():
for pred, successors in reachability[cfg_id].items():
for successor in successors:
inverse_reachability[successor].add(pred)

return inverse_reachability


def is_previously_written(sdfg: SDFG,
state: SDFGState,
node: nodes.Node,
array_name: str,
access_sets: Optional[AccessSets] = None) -> bool:
"""
Determine whether the given array name was written before the current node.

:param sdfg: the sdfg containing the node
:param state: the state containing the node
:param node: the node to check
:param array_name: the array name to check
:return: True if the array was written before the node, False otherwise.
"""

if access_sets is None:
access_sets = analysis.AccessSets().apply_pass(sdfg, {})

reachable = inverse_reachability(sdfg)

# Check the current state
for subgraph in sdfg_utils.concurrent_subgraphs(state):
if node in subgraph.nodes():
# Get all the access nodes in the subgraph to the same data
for other_node in subgraph.data_nodes():
if other_node != node and other_node.data == array_name:
# Check if this is a write node
for in_edge in subgraph.in_edges(other_node):
if in_edge.data.data == array_name:
# Check if there's a path to our node,
# since we only care about writes that happen before the current node
if nx.has_path(state.nx, other_node, node):
return True
else:
# This is not our current subgraph, check the write states
_, write_set = subgraph.read_and_write_sets()
if array_name in write_set:
return True

# Check other states
for predecessor in reachable[state]:
_, write_set = access_sets[predecessor]
if array_name in write_set:
return True
return False
Loading
Loading