Skip to content

Commit 0c8222e

Browse files
committed
Fix convergence check in optimizers, and skip phono3py tests if not available
1 parent c545aac commit 0c8222e

File tree

6 files changed

+21
-8
lines changed

6 files changed

+21
-8
lines changed

docs/source/overview.parallelisation.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ The number of threads is controlled by an integer, passed in to the
6969
function as an optional ``num_python_subprocesses`` argument, or stored
7070
in the env var ``WFL_NUM_PYTHON_SUBPROCESSES``. The script should be
7171
started with a normal run of the python executable. Setting
72-
the ``WFL_TORCH_DEVICE_IS_SUBPROCESS_ID`` env var (to any value)
72+
the ``WFL_TORCH_N_GPUS`` env var to the number of GPUs
7373
causes ``wfl`` to call ``torch.cuda.set_device()`` for each subprocess
7474
ensuring that it gets a unique GPU from the other subprocesses.
7575

tests/local_scripts/complete_pytest.tin

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
module purge
44
# module load compiler/gnu python/system python_extras/quippy lapack/mkl
5-
module load compiler/gnu python python_extras/quippy lapack/mkl
5+
module load compiler/gnu python python_extras/structure python_extras/quippy lapack/mkl
66
# for wfl dependencies
77
module load python_extras/wif
88
module load python_extras/torch/cpu
@@ -14,7 +14,7 @@ WFL_PYTEST_EXPYRE_INFO=$(
1414
cat << EOF | python3
1515
import json, os
1616
i = {"pre_cmds": ["module purge",
17-
"module load compiler/gnu lapack/mkl python python_extras/quippy python_extras/wif dft/vasp dft/pwscf",
17+
"module load compiler/gnu lapack/mkl python python_extras/structure python_extras/quippy python_extras/wif dft/vasp dft/pwscf",
1818
"module list"]}
1919
ienv = json.loads(os.environ.get("WFL_PYTEST_EXPYRE_INFO", "{}"))
2020
i.update(ienv)

tests/test_phonopy.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,10 @@
77

88
from wfl.configset import ConfigSet, OutputSpec
99
from wfl.generate.phonopy import phonopy
10+
try:
11+
import phono3py
12+
except ImportError:
13+
phono3py = None
1014

1115

1216
def test_phonopy(tmp_path):
@@ -33,6 +37,7 @@ def test_phonopy(tmp_path):
3337
for v in at.positions[1:]:
3438
assert min(np.linalg.norm(sc.positions[1:] - v, axis=1)) < 1.0e-7
3539

40+
@pytest.mark.skipif(phono3py is None, reason="No phono3py module")
3641
def test_phono3py(tmp_path):
3742
at0 = Atoms(numbers=[29], cell = [[0, 2, 2], [2, 0, 2], [2, 2, 0]], positions = [[0, 0, 0]], pbc = [True]*3)
3843
at1 = Atoms(numbers=[29], cell = [[0, 1.9, 1.9], [1.9, 0, 1.9], [1.9, 1.9, 0]], positions = [[0, 0, 0]], pbc = [True]*3)
@@ -62,6 +67,7 @@ def test_phono3py(tmp_path):
6267
assert sum([at.info["config_type"] == "phonon_cubic_1" for at in pert]) == 13*2
6368

6469

70+
@pytest.mark.skipif(phono3py is None, reason="No phono3py module")
6571
def test_phono3py_same_supercell(tmp_path):
6672
at0 = Atoms(numbers=[29], cell = [[0, 2, 2], [2, 0, 2], [2, 2, 0]], positions = [[0, 0, 0]], pbc = [True]*3)
6773
at1 = Atoms(numbers=[29], cell = [[0, 1.9, 1.9], [1.9, 0, 1.9], [1.9, 1.9, 0]], positions = [[0, 0, 0]], pbc = [True]*3)

tests/test_remote_run.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ def test_vasp_fail(tmp_path, expyre_systems, monkeypatch, remoteinfo_env):
6969

7070
def do_vasp_fail(tmp_path, sys_name, monkeypatch, remoteinfo_env):
7171
ri = {'sys_name': sys_name, 'job_name': 'pytest_vasp_'+sys_name,
72-
'env_vars' : ['ASE_VASP_COMMAND=NONE', 'ASE_VASP_COMMAND_GAMMA=NONE'],
72+
'env_vars' : ['ASE_VASP_COMMAND=NO_VASP_FAIL', 'ASE_VASP_COMMAND_GAMMA=NO_VASP_FAIL'],
7373
'input_files' : ['POTCARs'],
7474
'resources': {'max_time': '5m', 'num_nodes': 1},
7575
'num_inputs_per_queued_job': 1, 'check_interval': 10}

wfl/generate/neb.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,11 @@ def process_step():
8989

9090
# preliminary value
9191
final_status = 'unconverged'
92+
is_converged = False
9293

9394
try:
94-
opt.run(fmax=fmax, steps=steps)
95+
for is_converged in opt.irun(fmax=fmax, steps=steps):
96+
pass
9597
except Exception as exc:
9698
# label actual failed optimizations
9799
# when this happens, the atomic config somehow ends up with a 6-vector stress, which can't be
@@ -109,7 +111,9 @@ def process_step():
109111
for at in traj[0]:
110112
at.info['neb_config_type'] = 'neb_initial'
111113

112-
if opt.converged():
114+
# from value returned by Optimizer.irun(), since Optimizer.converged requires
115+
# a gradient as of ASE 3.26
116+
if is_converged:
113117
final_status = 'converged'
114118

115119
for intermed_images in traj[1:-1]:

wfl/generate/optimize.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -159,9 +159,11 @@ def process_step():
159159

160160
# preliminary value
161161
final_status = 'unconverged'
162+
is_converged = False
162163

163164
try:
164-
opt.run(fmax=fmax, smax=smax, steps=steps)
165+
for is_converged in opt.irun(fmax=fmax, smax=smax, steps=steps):
166+
pass
165167
except Exception as exc:
166168
# label actual failed optimizations
167169
# when this happens, the atomic config somehow ends up with a 6-vector stress, which can't be
@@ -184,7 +186,8 @@ def process_step():
184186

185187
# as of 3.26 converged() requires a gradient, but for PreconLBFGS it's not used
186188
# See https://gitlab.com/ase/ase/-/issues/1744
187-
if opt.converged(None):
189+
# use Optimizer.irun instead, since it returns converged status
190+
if is_converged:
188191
final_status = 'converged'
189192

190193
traj[-1].info['optimize_config_type'] = f'optimize_last_{final_status}'

0 commit comments

Comments
 (0)