diff --git a/src/somd2/runner/_dynamics.py b/src/somd2/runner/_dynamics.py index 5e9b6fb..8385a70 100644 --- a/src/somd2/runner/_dynamics.py +++ b/src/somd2/runner/_dynamics.py @@ -141,7 +141,7 @@ def _setup_dynamics(self, equilibration=False): def _minimisation(self, lambda_min=None): """ - Minimisation of self._system + Minimisation of self._system. Parameters ---------- @@ -151,6 +151,7 @@ def _minimisation(self, lambda_min=None): lambda_val. """ if lambda_min is None: + _logger.info(f"Minimising at λ = {self._lambda_val}") try: m = self._system.minimisation( cutoff_type=self._config.cutoff_type, @@ -165,6 +166,7 @@ def _minimisation(self, lambda_min=None): except: raise else: + _logger.info(f"Minimising at λ = {lambda_min}") try: m = self._system.minimisation( cutoff_type=self._config.cutoff_type, @@ -186,6 +188,8 @@ def _equilibration(self): Per-window equilibration. Currently just runs dynamics without any saving """ + + _logger.info(f"Equilibrating at λ = {self._lambda_val}") self._setup_dynamics(equilibration=True) self._dyn.run( self._config.equilibration_time, @@ -238,6 +242,8 @@ def generate_lam_vals(lambda_base, increment): else: lam_arr = self._lambda_array + self._lambda_grad + _logger.info(f"Running dynamics at λ = {self._lambda_val}") + if self._config.checkpoint_frequency.value() > 0.0: ### Calc number of blocks and remainder (surely there's a better way?)### num_blocks = 0 @@ -257,7 +263,7 @@ def generate_lam_vals(lambda_base, increment): / f"checkpoint_{self._lambda_array.index(self._lambda_val)}.s3" ) # Run num_blocks dynamics and then run a final block if rem > 0 - for _ in range(int(num_blocks)): + for x in range(int(num_blocks)): try: self._dyn.run( self._config.checkpoint_frequency, @@ -273,7 +279,7 @@ def generate_lam_vals(lambda_base, increment): self._system = self._dyn.commit() _stream.save(self._system, str(sire_checkpoint_name)) df = self._system.energy_trajectory(to_alchemlyb=True) - if _ == 0: + if x == 0: # Not including speed in checkpoints for now. f = _dataframe_to_parquet( df, @@ -292,6 +298,9 @@ def generate_lam_vals(lambda_base, increment): f, df.iloc[-int(energy_per_block) :], ) + _logger.info( + f"Finished block {x+1} of {num_blocks} for λ = {self._lambda_val}" + ) except: raise # No need to checkpoint here as it is the final block. diff --git a/src/somd2/runner/_runner.py b/src/somd2/runner/_runner.py index af34a32..d5e7fa0 100644 --- a/src/somd2/runner/_runner.py +++ b/src/somd2/runner/_runner.py @@ -358,7 +358,7 @@ def _initialise_simulation(self, system, lambda_value, device=None): has_space=self._has_space, ) except: - _logger.warning(f"System creation at {lambda_value} failed") + _logger.warning(f"System creation at λ = {lambda_value} failed") raise def _cleanup_simulation(self): @@ -424,7 +424,7 @@ def run(self): except Exception as e: result = False _logger.error( - f"Exception raised for lambda = {lambda_value}: {e}" + f"Exception raised for λ = {lambda_value}: {e}" ) with self._lock: results.append(result) @@ -485,8 +485,8 @@ def _run(sim): return df, lambda_grad, speed except Exception as e: _logger.warning( - f"Minimisation/dynamics at lambda = {lambda_value} failed with the " - f"following exception {e}, trying again with minimsation at lambda = 0." + f"Minimisation/dynamics at λ = {lambda_value} failed with the " + f"following exception {e}, trying again with minimsation at λ = 0." ) try: df = sim._run(lambda_minimisation=0.0) @@ -495,8 +495,8 @@ def _run(sim): return df, lambda_grad, speed except Exception as e: _logger.error( - f"Minimisation/dynamics at lambda = {lambda_value} failed, even after " - f"minimisation at lambda = 0. The following warning was raised: {e}." + f"Minimisation/dynamics at λ = {lambda_value} failed, even after " + f"minimisation at λ = 0. The following warning was raised: {e}." ) raise else: @@ -507,7 +507,7 @@ def _run(sim): return df, lambda_grad, speed except Exception as e: _logger.error( - f"Dynamics at lambda = {lambda_value} failed. The following warning was " + f"Dynamics at λ = {lambda_value} failed. The following warning was " f"raised: {e}. This may be due to a lack of minimisation." ) @@ -520,13 +520,11 @@ def _run(sim): gpu_num = self._gpu_pool[0] self._remove_gpu_from_pool(gpu_num) if lambda_value is not None: - _logger.info( - f"Running lambda = {lambda_value} on GPU {gpu_num}" - ) + _logger.info(f"Running λ = {lambda_value} on GPU {gpu_num}") # Assumes that device for non-parallel GPU jobs is 0 else: gpu_num = 0 - _logger.info("Running lambda = {lambda_value} on GPU 0") + _logger.info("Running λ = {lambda_value} on GPU 0") self._initialise_simulation(system, lambda_value, device=gpu_num) try: df, lambda_grad, speed = _run(self._sim) @@ -543,7 +541,7 @@ def _run(sim): # All other platforms. else: - _logger.info(f"Running lambda = {lambda_value}") + _logger.info(f"Running λ = {lambda_value}") self._initialise_simulation(system, lambda_value) try: @@ -566,5 +564,5 @@ def _run(sim): filename=f"energy_traj_{self._lambda_values.index(lambda_value)}.parquet", ) del system - _logger.success("Lambda = {} complete".format(lambda_value)) + _logger.success(f"λ = {lambda_value} complete") return True