Skip to content

Commit

Permalink
Fix the initial guess of tune_ewald (#83)
Browse files Browse the repository at this point in the history
---------

Co-authored-by: Philip Loche <ploche@physik.fu-berlin.de>
  • Loading branch information
GardevoirX and PicoCentauri authored Oct 21, 2024
1 parent 838e5cc commit 7aea00f
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 13 deletions.
43 changes: 32 additions & 11 deletions src/torchpme/utils/tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,12 @@ def _optimize_parameters(
if loss_value <= accuracy:
break

if loss_value > accuracy:
if max_steps == 0:
warnings.warn(
"Skip optimization, return the initial guess.",
stacklevel=2,
)
elif loss_value > accuracy:
warnings.warn(
"The searching for the parameters is ended, but the error is "
f"{float(loss_value):.3e}, larger than the given accuracy {accuracy}. "
Expand All @@ -53,7 +58,8 @@ def tune_ewald(
r"""
Find the optimal parameters for :class:`torchpme.calculators.ewald.EwaldCalculator`.
The error formulas are given `online <https://www2.icp.uni-stuttgart.de/~icp/mediawiki/images/4/4d/Script_Longrange_Interactions.pdf>`_.
The error formulas are given `online
<https://www2.icp.uni-stuttgart.de/~icp/mediawiki/images/4/4d/Script_Longrange_Interactions.pdf>`_.
Note the difference notation between the parameters in the reference and ours:
.. math::
Expand All @@ -64,6 +70,13 @@ def tune_ewald(
r_c &= \mathrm{cutoff}
.. hint::
Tuning uses an initial guess for the optimization, which can be applied by
setting ``max_steps = 0``. This can be useful if fast tuning is required. These
values typically result in accuracies around :math:`10^{-7}`.
:param sum_squared_charges: accumulated squared charges
:param cell: single tensor of shape (3, 3), describing the bounding
:param positions: single tensor of shape (``len(charges), 3``) containing the
Expand All @@ -76,7 +89,8 @@ def tune_ewald(
:param verbose: whether to print the progress of gradient descent
:return: Tuple containing a float of the optimal smearing for the :py:class:
`CoulombPotential`, a dictionary with the parameters for :py:class:`EwaldCalculator` and a float of the optimal cutoff value for the
`CoulombPotential`, a dictionary with the parameters for
:py:class:`EwaldCalculator` and a float of the optimal cutoff value for the
neighborlist computation.
Example
Expand All @@ -95,13 +109,13 @@ def tune_ewald(
You can check the values of the parameters
>>> print(smearing)
0.20517140875115344
0.14999979983727296
>>> print(parameter)
{'lr_wavelength': 0.2879512643188817}
{'lr_wavelength': 0.047677734917968666}
>>> print(cutoff)
0.5961240167485603
0.5485209762493759
"""

_validate_parameters(cell, positions, exponent)
Expand Down Expand Up @@ -152,11 +166,12 @@ def loss(smearing, lr_wavelength, cutoff):
smearing_init, device=device, dtype=dtype, requires_grad=True
)
lr_wavelength = torch.tensor(
half_cell, device=device, dtype=dtype, requires_grad=True
)
cutoff = torch.tensor(
half_cell / 10, device=device, dtype=dtype, requires_grad=True
)
-math.log(10 * min_dimension / half_cell - 1),
device=device,
dtype=dtype,
requires_grad=True,
) # sigmoid(lr_wavelength) == half_cell / 10
cutoff = torch.tensor(half_cell, device=device, dtype=dtype, requires_grad=True)

_optimize_parameters(
[smearing, lr_wavelength, cutoff],
Expand Down Expand Up @@ -194,6 +209,12 @@ def tune_pme(
\alpha = \left(\sqrt{2}\,\mathrm{smearing} \right)^{-1}
.. hint::
Tuning uses an initial guess for the optimization, which can be applied by
setting ``max_steps = 0``. This can be useful if fast tuning is required. These
values typically result in accuracies around :math:`10^{-2}`.
:param sum_squared_charges: accumulated squared charges
:param cell: single tensor of shape (3, 3), describing the bounding
:param positions: single tensor of shape (``len(charges), 3``) containing the
Expand Down
23 changes: 21 additions & 2 deletions tests/utils/test_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
EwaldCalculator,
PMECalculator,
)
from torchpme.utils.tuning import tune_ewald, tune_pme
from torchpme.utils.tuning import _estimate_smearing, tune_ewald, tune_pme

sys.path.append(str(Path(__file__).parents[1]))
from helpers import define_crystal, neighbor_list_torch
Expand All @@ -32,7 +32,7 @@
("accuracy", "rtol"),
[
(1e-3, 1e-3),
(1e-3, 2e-6),
(1e-6, 2e-6),
(1e-1, 1e-1),
],
)
Expand Down Expand Up @@ -101,6 +101,25 @@ def test_odd_interpolation_nodes():
torch.testing.assert_close(madelung, madelung_ref, atol=0, rtol=1e-3)


@pytest.mark.parametrize("tune", [tune_ewald, tune_pme])
def test_skip_optimization(tune):
pos, charges, cell, _, _ = define_crystal()
match = "Skip optimization, return the initial guess."
with pytest.warns(UserWarning, match=match):
smearing, params, sr_cutoff = tune(
float(torch.sum(charges**2)), cell, pos, max_steps=0
)
cell_dimensions = torch.linalg.norm(cell, dim=1)
half_cell = float(torch.min(cell_dimensions) / 2)
pytest.approx(_estimate_smearing(cell), smearing)
if tune is tune_ewald:
pytest.approx(half_cell / 10, list(params.values())[0])
pytest.approx(half_cell, sr_cutoff)
elif tune is tune_pme:
pytest.approx(smearing / 8, list(params.values())[0])
pytest.approx(half_cell / 5, sr_cutoff)


@pytest.mark.parametrize("tune", [tune_ewald, tune_pme])
def test_accuracy_error(tune):
pos, charges, cell, _, _ = define_crystal()
Expand Down

0 comments on commit 7aea00f

Please sign in to comment.