Skip to content

Commit

Permalink
Merge branch 'main' into feature_restart
Browse files Browse the repository at this point in the history
  • Loading branch information
mb2055 committed Nov 6, 2023
2 parents c8e9f52 + a642d47 commit bb9606d
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 25 deletions.
15 changes: 12 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ First create a conda environment using the provided environment file:
mamba create -f environment.yaml
```

(We recommend using [Mambaforge](https://github.com/conda-forge/miniforge#mambaforge).)
(We recommend using [Miniforge](https://github.com/conda-forge/miniforge).)

Now install `somd2` into the environment:

Expand Down Expand Up @@ -67,7 +67,16 @@ processed using [BioSimSpace](https://github.com/OpenBioSim/biosimspace) as foll
```python
import BioSimSpace as BSS

pmf, overlap = BSS.FreeEnergy.Relative.analyse("output")
pmf1, overlap1 = BSS.FreeEnergy.Relative.analyse("output1")
```

(Here we assume that the output directory is called `output`.)
(Here we assume that the output directory is called `output1`.)

To compute the relative free-energy difference between two legs, e.g.
legs 1 and 2, you can use:

```python
pmf2, overlap2 = BSS.FreeEnergy.Relative.analyse("output2")

free_nrg = BSS.FreeEnergy.Relative.difference(pmf1, pmf2)
```
20 changes: 16 additions & 4 deletions src/somd2/runner/_dynamics.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def increment_filename(base_filename, suffix):

def _setup_dynamics(self, equilibration=False):
"""
Minimise if needed and then setup dynamics object
Setup the dynamics object.
Parameters
----------
Expand Down Expand Up @@ -175,12 +175,13 @@ def _setup_dynamics(self, equilibration=False):
perturbable_constraint="none"
if equilibration
else self._config.perturbable_constraint,
vacuum=not self._has_space,
map=map,
)

def _minimisation(self, lambda_min=None):
"""
Minimisation of self._system
Minimisation of self._system.
Parameters
----------
Expand All @@ -190,25 +191,29 @@ def _minimisation(self, lambda_min=None):
lambda_val.
"""
if lambda_min is None:
_logger.info(f"Minimising at λ = {self._lambda_val}")
try:
m = self._system.minimisation(
cutoff_type=self._config.cutoff_type,
schedule=self._config.lambda_schedule,
lambda_value=self._lambda_val,
platform=self._config.platform,
vacuum=not self._has_space,
map=self._config.extra_args,
)
m.run()
self._system = m.commit()
except:
raise
else:
_logger.info(f"Minimising at λ = {lambda_min}")
try:
m = self._system.minimisation(
cutoff_type=self._config.cutoff_type,
schedule=self._config.lambda_schedule,
lambda_value=lambda_min,
platform=self._config.platform,
vacuum=not self._has_space,
map=self._config.extra_args,
)
m.run()
Expand All @@ -223,6 +228,8 @@ def _equilibration(self):
Per-window equilibration.
Currently just runs dynamics without any saving
"""

_logger.info(f"Equilibrating at λ = {self._lambda_val}")
self._setup_dynamics(equilibration=True)
self._dyn.run(
self._config.equilibration_time,
Expand Down Expand Up @@ -275,6 +282,8 @@ def generate_lam_vals(lambda_base, increment):
else:
lam_arr = self._lambda_array + self._lambda_grad

_logger.info(f"Running dynamics at λ = {self._lambda_val}")

if self._config.checkpoint_frequency.value() > 0.0:
### Calc number of blocks and remainder (surely there's a better way?)###
num_blocks = 0
Expand All @@ -293,7 +302,7 @@ def generate_lam_vals(lambda_base, increment):
_Path(self._config.output_directory) / self._filenames["checkpoint"]
)
# Run num_blocks dynamics and then run a final block if rem > 0
for _ in range(int(num_blocks)):
for x in range(int(num_blocks)):
try:
self._dyn.run(
self._config.checkpoint_frequency,
Expand All @@ -309,7 +318,7 @@ def generate_lam_vals(lambda_base, increment):
self._system = self._dyn.commit()
_stream.save(self._system, str(sire_checkpoint_name))
df = self._system.energy_trajectory(to_alchemlyb=True)
if _ == 0:
if x == 0:
# Not including speed in checkpoints for now.
f = _dataframe_to_parquet(
df,
Expand All @@ -328,6 +337,9 @@ def generate_lam_vals(lambda_base, increment):
f,
df.iloc[-int(energy_per_block) :],
)
_logger.info(
f"Finished block {x+1} of {num_blocks} for λ = {self._lambda_val}"
)
except:
raise
# No need to checkpoint here as it is the final block.
Expand Down
43 changes: 25 additions & 18 deletions src/somd2/runner/_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,14 +85,14 @@ def __init__(self, system, config):
for mol in self._system.molecules("molecule property is_perturbable"):
self._system.update(mol.perturbation().link_to_reference().commit())

# Check for a periodic space.
self._check_space()

# Validate the configuration.
if not isinstance(config, _Config):
raise TypeError("'config' must be of type 'somd2.config.Config'")
self._config = config

# Check for a periodic space.
self._check_space()

# Set the lambda values.
self._lambda_values = [
round(i / (self._config.num_lambda - 1), 5)
Expand Down Expand Up @@ -183,6 +183,12 @@ def _check_space(self):
self._has_space = True
else:
self._has_space = False
_logger.info("No periodic space detected. Assuming vacuum simulation.")
if self._config.cutoff_type == "pme":
_logger.info(
"Cannot use PME for non-periodic simulations. Using RF cutoff instead."
)
self._config.cutoff_type = "rf"

def _check_directory(self):
"""
Expand Down Expand Up @@ -283,7 +289,7 @@ def _set_lambda_schedule(self, schedule):
self._config.lambda_schedule = schedule

@staticmethod
def get_gpu_devices(platform, _log_level="INFO"):
def get_gpu_devices(platform):
"""
Get list of available GPUs from CUDA_VISIBLE_DEVICES,
OPENCL_VISIBLE_DEVICES, or HIP_VISIBLE_DEVICES.
Expand Down Expand Up @@ -358,6 +364,10 @@ def _repartition_h_mass(self):
repartition_hydrogen_masses as _repartition_hydrogen_masses,
)

_logger.info(
f"Repartitioning hydrogen masses with factor {self._config.h_mass_factor}"
)

self._system = _repartition_hydrogen_masses(
self._system, mass_factor=self._config.h_mass_factor
)
Expand All @@ -380,7 +390,6 @@ def _initialise_simulation(self, system, lambda_value, device=None):
"""
from ._dynamics import Dynamics

_logger.debug(f"Initialising simulation at lambda = {lambda_value}")
try:
self._sim = Dynamics(
system,
Expand All @@ -391,7 +400,7 @@ def _initialise_simulation(self, system, lambda_value, device=None):
has_space=self._has_space,
)
except:
_logger.warning(f"System creation at {lambda_value} failed")
_logger.warning(f"System creation at λ = {lambda_value} failed")
raise

def _cleanup_simulation(self):
Expand Down Expand Up @@ -457,7 +466,7 @@ def run(self):
except Exception as e:
result = False
_logger.error(
f"Exception raised for lambda = {lambda_value}: {e}"
f"Exception raised for λ = {lambda_value}: {e}"
)
with self._lock:
results.append(result)
Expand Down Expand Up @@ -518,8 +527,8 @@ def _run(sim):
return df, lambda_grad, speed
except Exception as e:
_logger.warning(
f"Minimisation/dynamics at lambda = {lambda_value} failed with the "
f"following exception {e}, trying again with minimsation at lambda = 0."
f"Minimisation/dynamics at λ = {lambda_value} failed with the "
f"following exception {e}, trying again with minimsation at λ = 0."
)
try:
df = sim._run(lambda_minimisation=0.0)
Expand All @@ -528,8 +537,8 @@ def _run(sim):
return df, lambda_grad, speed
except Exception as e:
_logger.error(
f"Minimisation/dynamics at lambda = {lambda_value} failed, even after "
f"minimisation at lambda = 0. The following warning was raised: {e}."
f"Minimisation/dynamics at λ = {lambda_value} failed, even after "
f"minimisation at λ = 0. The following warning was raised: {e}."
)
raise
else:
Expand All @@ -540,7 +549,7 @@ def _run(sim):
return df, lambda_grad, speed
except Exception as e:
_logger.error(
f"Dynamics at lambda = {lambda_value} failed. The following warning was "
f"Dynamics at λ = {lambda_value} failed. The following warning was "
f"raised: {e}. This may be due to a lack of minimisation."
)

Expand Down Expand Up @@ -569,13 +578,11 @@ def _run(sim):
gpu_num = self._gpu_pool[0]
self._remove_gpu_from_pool(gpu_num)
if lambda_value is not None:
_logger.info(
f"Running lambda = {lambda_value} on GPU {gpu_num}"
)
_logger.info(f"Running λ = {lambda_value} on GPU {gpu_num}")
# Assumes that device for non-parallel GPU jobs is 0
else:
gpu_num = 0
_logger.info("Running lambda = {lambda_value} on GPU 0")
_logger.info("Running λ = {lambda_value} on GPU 0")
self._initialise_simulation(system, lambda_value, device=gpu_num)
try:
df, lambda_grad, speed = _run(self._sim)
Expand All @@ -592,7 +599,7 @@ def _run(sim):

# All other platforms.
else:
_logger.info(f"Running lambda = {lambda_value}")
_logger.info(f"Running λ = {lambda_value}")

self._initialise_simulation(system, lambda_value)
try:
Expand All @@ -617,5 +624,5 @@ def _run(sim):
filename=self._fnames[lambda_value]["energy_traj"],
)
del system
_logger.success("Lambda = {} complete".format(lambda_value))
_logger.success(f"λ = {lambda_value} complete")
return True

0 comments on commit bb9606d

Please sign in to comment.