Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ jobs:
- name: Find example scripts
id: set-matrix
run: |
EXAMPLES=$(find examples -name "*.py" | jq -R -s -c 'split("\n")[:-1]')
EXAMPLES=$(find examples -name "*.py" ! -path "examples/scaling/*" | jq -R -s -c 'split("\n")[:-1]')
echo "examples=$EXAMPLES" >> $GITHUB_OUTPUT

test-examples:
Expand Down
77 changes: 77 additions & 0 deletions examples/scaling/scaling_nve.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
"""Scaling for TorchSim NVE."""
# %%
# /// script
# dependencies = [
# "torch_sim_atomistic[mace,test]"
# ]
# ///

import time
import typing

import torch
from ase.build import bulk
from mace.calculators.foundations_models import mace_mp
from pymatgen.io.ase import AseAtomsAdaptor

import torch_sim as ts
from torch_sim.models.mace import MaceModel, MaceUrls


N_STRUCTURES = [1, 1, 1, 10, 100, 500, 1000, 1500, 5000, 10000]


MD_STEPS = 10


def run_torchsim_nve(
n_structures_list: list[int],
base_structure: typing.Any,
) -> list[float]:
"""Load model, run NVE MD for MD_STEPS per n; return times."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
loaded_model = mace_mp(
model=MaceUrls.mace_mpa_medium,
return_raw_model=True,
default_dtype="float64",
device=str(device),
)
max_memory_scaler = 400_000
memory_scales_with = "n_atoms_x_density"
model = MaceModel(
model=typing.cast("torch.nn.Module", loaded_model),
device=device,
compute_forces=True,
compute_stress=True,
dtype=torch.float64,
enable_cueq=False,
)
times: list[float] = []
for n in n_structures_list:
structures = [base_structure] * n
t0 = time.perf_counter()
ts.integrate(
system=structures,
model=model,
integrator=ts.Integrator.nve,
n_steps=MD_STEPS,
temperature=300.0,
timestep=0.002,
autobatcher=ts.BinningAutoBatcher(
model=model,
max_memory_scaler=max_memory_scaler,
memory_scales_with=memory_scales_with,
),
)
if device.type == "cuda":
torch.cuda.empty_cache()
elapsed = time.perf_counter() - t0
times.append(elapsed)
print(f" n={n} nve_time={elapsed:.6f}s")
return times


if __name__ == "__main__":
mgo_ase = bulk(name="MgO", crystalstructure="rocksalt", a=4.21, cubic=True)
base_structure = AseAtomsAdaptor.get_structure(atoms=mgo_ase)
sweep_totals = run_torchsim_nve(N_STRUCTURES, base_structure=base_structure)
77 changes: 77 additions & 0 deletions examples/scaling/scaling_nvt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
"""Scaling for TorchSim NVT (Nose-Hoover)."""
# %%
# /// script
# dependencies = [
# "torch_sim_atomistic[mace,test]"
# ]
# ///

import time
import typing

import torch
from ase.build import bulk
from mace.calculators.foundations_models import mace_mp
from pymatgen.io.ase import AseAtomsAdaptor

import torch_sim as ts
from torch_sim.models.mace import MaceModel, MaceUrls


N_STRUCTURES = [1, 1, 1, 10, 100, 500, 1000, 1500, 5000, 10000]


MD_STEPS = 10


def run_torchsim_nvt(
n_structures_list: list[int],
base_structure: typing.Any,
) -> list[float]:
"""Load model, run NVT MD for MD_STEPS per n; return times."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
loaded_model = mace_mp(
model=MaceUrls.mace_mpa_medium,
return_raw_model=True,
default_dtype="float64",
device=str(device),
)
max_memory_scaler = 400_000
memory_scales_with = "n_atoms_x_density"
model = MaceModel(
model=typing.cast("torch.nn.Module", loaded_model),
device=device,
compute_forces=True,
compute_stress=True,
dtype=torch.float64,
enable_cueq=False,
)
times: list[float] = []
for n in n_structures_list:
structures = [base_structure] * n
t0 = time.perf_counter()
ts.integrate(
system=structures,
model=model,
integrator=ts.Integrator.nvt_nose_hoover,
n_steps=MD_STEPS,
temperature=300.0,
timestep=0.002,
autobatcher=ts.BinningAutoBatcher(
model=model,
max_memory_scaler=max_memory_scaler,
memory_scales_with=memory_scales_with,
),
)
if device.type == "cuda":
torch.cuda.empty_cache()
elapsed = time.perf_counter() - t0
times.append(elapsed)
print(f" n={n} nvt_time={elapsed:.6f}s")
return times


if __name__ == "__main__":
mgo_ase = bulk(name="MgO", crystalstructure="rocksalt", a=4.21, cubic=True)
base_structure = AseAtomsAdaptor.get_structure(atoms=mgo_ase)
sweep_totals = run_torchsim_nvt(N_STRUCTURES, base_structure=base_structure)
86 changes: 86 additions & 0 deletions examples/scaling/scaling_relax.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
"""Scaling for TorchSim relax."""
# %%
# /// script
# dependencies = [
# "torch_sim_atomistic[mace,test]"
# ]
# ///

import time
import typing

import torch
from ase.build import bulk
from mace.calculators.foundations_models import mace_mp
from pymatgen.io.ase import AseAtomsAdaptor

import torch_sim as ts
from torch_sim.models.mace import MaceModel, MaceUrls


N_STRUCTURES = [1, 1, 1, 10, 100, 500, 1000, 1500]


RELAX_STEPS = 10


def run_torchsim_relax(
n_structures_list: list[int],
base_structure: typing.Any,
) -> list[float]:
"""Load TorchSim model once, run 10-step relaxation with ts.optimize for each n;
return timings.
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
loaded_model = mace_mp(
model=MaceUrls.mace_mpa_medium,
return_raw_model=True,
default_dtype="float64",
device=str(device),
)
model = MaceModel(
model=typing.cast("torch.nn.Module", loaded_model),
device=device,
compute_forces=True,
compute_stress=True,
dtype=torch.float64,
enable_cueq=False,
)
autobatcher = ts.InFlightAutoBatcher(
model=model,
max_memory_scaler=400_000,
memory_scales_with="n_atoms_x_density",
)
times: list[float] = []
for n in n_structures_list:
structures = [base_structure] * n
t0 = time.perf_counter()
ts.optimize(
system=structures,
model=model,
optimizer=ts.optimizers.Optimizer.fire,
init_kwargs={
"cell_filter": ts.optimizers.cell_filters.CellFilter.frechet,
"constant_volume": False,
"hydrostatic_strain": True,
},
max_steps=RELAX_STEPS,
convergence_fn=ts.runners.generate_force_convergence_fn(
force_tol=1e-3,
include_cell_forces=True,
),
autobatcher=autobatcher,
)
if device.type == "cuda":
torch.cuda.synchronize()
torch.cuda.empty_cache()
elapsed = time.perf_counter() - t0
times.append(elapsed)
print(f" n={n} relax_{RELAX_STEPS}_time={elapsed:.6f}s")
return times


if __name__ == "__main__":
mgo_ase = bulk(name="MgO", crystalstructure="rocksalt", a=4.21, cubic=True)
base_structure = AseAtomsAdaptor.get_structure(atoms=mgo_ase)
sweep_totals = run_torchsim_relax(N_STRUCTURES, base_structure)
71 changes: 71 additions & 0 deletions examples/scaling/scaling_static.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
"""Scaling for TorchSim static."""
# %%
# /// script
# dependencies = [
# "torch_sim_atomistic[mace,test]"
# ]
# ///

import time
import typing

import torch
from ase.build import bulk
from mace.calculators.foundations_models import mace_mp
from pymatgen.io.ase import AseAtomsAdaptor

import torch_sim as ts
from torch_sim.models.mace import MaceModel, MaceUrls


N_STRUCTURES = [1, 1, 1, 10, 100, 500, 1000, 1500, 5000, 10000, 50000, 100000]


def run_torchsim_static(
n_structures_list: list[int],
base_structure: typing.Any,
) -> list[float]:
"""Load TorchSim model once, run static for each n using O(1)
batched path, return timings.
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
loaded_model = mace_mp(
model=MaceUrls.mace_mpa_medium,
return_raw_model=True,
default_dtype="float64",
device=str(device),
)
model = MaceModel(
model=typing.cast("torch.nn.Module", loaded_model),
device=device,
compute_forces=True,
compute_stress=True,
dtype=torch.float64,
enable_cueq=False,
)
batcher = ts.BinningAutoBatcher(
model=model,
max_memory_scaler=400_000,
memory_scales_with="n_atoms_x_density",
)
times: list[float] = []
for n in n_structures_list:
structures = [base_structure] * n
t0 = time.perf_counter()
state = ts.initialize_state(structures, model.device, model.dtype)
batcher.load_states(state)
for sub_state, _ in batcher:
model(sub_state)
if device.type == "cuda":
torch.cuda.synchronize()
torch.cuda.empty_cache()
elapsed = time.perf_counter() - t0
times.append(elapsed)
print(f" n={n} static_time={elapsed:.6f}s")
return times


if __name__ == "__main__":
mgo_ase = bulk(name="MgO", crystalstructure="rocksalt", a=4.21, cubic=True)
base_structure = AseAtomsAdaptor.get_structure(atoms=mgo_ase)
sweep_totals = run_torchsim_static(N_STRUCTURES, base_structure)
15 changes: 13 additions & 2 deletions tests/test_autobatching.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,12 +95,14 @@ def test_calculate_scaling_metric(si_sim_state: ts.SimState) -> None:
# Test n_atoms metric
n_atoms_metric = calculate_memory_scaler(si_sim_state, "n_atoms")
assert n_atoms_metric == si_sim_state.n_atoms
assert n_atoms_metric == 8

# Test n_atoms_x_density metric
density_metric = calculate_memory_scaler(si_sim_state, "n_atoms_x_density")
volume = torch.abs(torch.linalg.det(si_sim_state.cell[0])) / 1000
expected = si_sim_state.n_atoms * (si_sim_state.n_atoms / volume.item())
assert pytest.approx(density_metric, rel=1e-5) == expected
assert pytest.approx(density_metric, rel=1e-5) == (8**2) * 1000 / (5.43**3)

# Test invalid metric
with pytest.raises(ValueError, match="Invalid metric"):
Expand All @@ -109,15 +111,24 @@ def test_calculate_scaling_metric(si_sim_state: ts.SimState) -> None:

def test_calculate_scaling_metric_non_periodic(benzene_sim_state: ts.SimState) -> None:
"""Test calculation of scaling metrics for a non-periodic state."""
# Test that calculate passes
n_atoms_metric = calculate_memory_scaler(benzene_sim_state, "n_atoms")
assert n_atoms_metric == benzene_sim_state.n_atoms
assert n_atoms_metric == 12

# Test n_atoms_x_density metric works for non-periodic systems
n_atoms_x_density_metric = calculate_memory_scaler(
benzene_sim_state, "n_atoms_x_density"
)
assert n_atoms_x_density_metric > 0
bbox = (
benzene_sim_state.positions.max(dim=0).values
- benzene_sim_state.positions.min(dim=0).values
).clone()
for i, p in enumerate(benzene_sim_state.pbc):
if not p:
bbox[i] += 2.0
assert pytest.approx(n_atoms_x_density_metric, rel=1e-5) == (
benzene_sim_state.n_atoms**2 / (bbox.prod().item() / 1000)
)


def test_split_state(si_double_sim_state: ts.SimState) -> None:
Expand Down
Loading