From 8e7060325db8a158075f1cdc78f1f3fbea53bd94 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 29 Jun 2024 15:17:56 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/aiida_sssp_workflow/cli/run.py | 23 ++- src/aiida_sssp_workflow/protocol/criteria.yml | 1 - src/aiida_sssp_workflow/utils/__init__.py | 1 + src/aiida_sssp_workflow/utils/protocol.py | 23 ++- src/aiida_sssp_workflow/utils/pseudo.py | 15 +- .../workflows/convergence/_base.py | 2 - .../workflows/transferability/eos.py | 15 +- .../workflows/verification.py | 189 +++++++++++------- tests/utils/test_pseudo.py | 14 +- tests/workflows/test_verification.py | 46 ++--- 10 files changed, 194 insertions(+), 135 deletions(-) diff --git a/src/aiida_sssp_workflow/cli/run.py b/src/aiida_sssp_workflow/cli/run.py index 3bec47a..0ed38c1 100644 --- a/src/aiida_sssp_workflow/cli/run.py +++ b/src/aiida_sssp_workflow/cli/run.py @@ -13,7 +13,7 @@ from aiida.cmdline.params import options, types from aiida.cmdline.utils import echo from aiida.engine import ProcessBuilder, run_get_node, submit -from aiida.plugins import DataFactory, WorkflowFactory +from aiida.plugins import WorkflowFactory from aiida_pseudo.data.pseudo.upf import UpfData from aiida_sssp_workflow.cli import cmd_root @@ -25,6 +25,7 @@ VerificationWorkChain = WorkflowFactory("sssp_workflow.verification") + def guess_properties_list(property: list) -> Tuple[List[str], str]: # if the property is not specified, use the default list with all properties calculated. # otherwise, use the specified properties. @@ -43,21 +44,27 @@ def guess_properties_list(property: list) -> Tuple[List[str], str]: return properties_list, extra_desc + def guess_is_convergence(properties_list: list) -> bool: """Check if it is a convergence test""" return any([c for c in properties_list if c.startswith("convergence")]) + def guess_is_full_convergence(properties_list: list) -> bool: """Check if all properties are run for convergence test""" - return len([c for c in properties_list if c.startswith("convergence")]) == len(DEFAULT_CONVERGENCE_PROPERTIES_LIST) + return len([c for c in properties_list if c.startswith("convergence")]) == len( + DEFAULT_CONVERGENCE_PROPERTIES_LIST + ) + def guess_is_measure(properties_list: list) -> bool: """Check if it is a measure test""" return any([c for c in properties_list if c.startswith("measure")]) + def guess_is_ph(properties_list: list) -> bool: """Check if it has a measure test""" @@ -175,7 +182,9 @@ def launch( is_ph = guess_is_ph(properties_list) if is_ph and not ph_code: - echo.echo_critical("ph_code must be provided since we run on it for phonon frequencies.") + echo.echo_critical( + "ph_code must be provided since we run on it for phonon frequencies." + ) if is_convergence and len(configuration) > 1: echo.echo_critical( @@ -183,7 +192,9 @@ def launch( ) if is_measure and not is_full_convergence: - echo.echo_warning("Full convergence tests are not run, so we use maximum cutoffs for transferability verification.") + echo.echo_warning( + "Full convergence tests are not run, so we use maximum cutoffs for transferability verification." + ) # Load the curent AiiDA profile and log to user _profile = aiida.load_profile() @@ -211,7 +222,9 @@ def launch( clean_workdir=clean_workdir, ) - builder.metadata.label = f"({protocol} at {pw_code.computer.label} - {conf_label}) {pseudo.stem}" + builder.metadata.label = ( + f"({protocol} at {pw_code.computer.label} - {conf_label}) {pseudo.stem}" + ) builder.metadata.description = f"""Calculation is run on protocol: {protocol}; on {pw_code.computer.label}; on configuration {conf_label}; on pseudo {pseudo.stem}.""" builder.pw_code = pw_code diff --git a/src/aiida_sssp_workflow/protocol/criteria.yml b/src/aiida_sssp_workflow/protocol/criteria.yml index 7ef3b29..56806a8 100644 --- a/src/aiida_sssp_workflow/protocol/criteria.yml +++ b/src/aiida_sssp_workflow/protocol/criteria.yml @@ -66,4 +66,3 @@ v2024.1001: bounds: [0.0, 10] # when error eta_c < 20 meV eps: 1.0e-3 unit: meV/atom - diff --git a/src/aiida_sssp_workflow/utils/__init__.py b/src/aiida_sssp_workflow/utils/__init__.py index 5d01846..6ed7588 100644 --- a/src/aiida_sssp_workflow/utils/__init__.py +++ b/src/aiida_sssp_workflow/utils/__init__.py @@ -37,6 +37,7 @@ def get_default_mpi_options( "withmpi": with_mpi, } + def serialize_data(data): from aiida.orm import ( AbstractCode, diff --git a/src/aiida_sssp_workflow/utils/protocol.py b/src/aiida_sssp_workflow/utils/protocol.py index 955d96f..4fe91d0 100644 --- a/src/aiida_sssp_workflow/utils/protocol.py +++ b/src/aiida_sssp_workflow/utils/protocol.py @@ -5,7 +5,7 @@ from aiida_sssp_workflow.utils.pseudo import DualType, get_dual_type -def get_protocol(category: str, name: str | None=None): +def get_protocol(category: str, name: str | None = None): """Load and read protocol from faml file to a verbose dict if name not set, return whole protocol.""" import_path = resources.path("aiida_sssp_workflow.protocol", f"{category}.yml") @@ -17,24 +17,25 @@ def get_protocol(category: str, name: str | None=None): else: return protocol_dict -def generate_cutoff_list(protocol_name: str, element: str, pp_type: str) -> List[Tuple[int, int]]: - """From the control protocol name, get the cutoff list - """ + +def generate_cutoff_list( + protocol_name: str, element: str, pp_type: str +) -> List[Tuple[int, int]]: + """From the control protocol name, get the cutoff list""" match get_dual_type(pp_type, element): case DualType.NC: - dual_type = 'nc_dual_scan' + dual_type = "nc_dual_scan" case DualType.AUGLOW: - dual_type = 'nonnc_dual_scan' + dual_type = "nonnc_dual_scan" case DualType.AUGHIGH: - dual_type = 'nonnc_high_dual_scan' + dual_type = "nonnc_high_dual_scan" - dual_scan_list = get_protocol('control', protocol_name)[dual_type] + dual_scan_list = get_protocol("control", protocol_name)[dual_type] if len(dual_scan_list) > 0: max_dual = int(max(dual_scan_list)) else: max_dual = 8 - ecutwfc_list = get_protocol('control', protocol_name)['wfc_scan'] - - return [(e, e*max_dual) for e in ecutwfc_list] + ecutwfc_list = get_protocol("control", protocol_name)["wfc_scan"] + return [(e, e * max_dual) for e in ecutwfc_list] diff --git a/src/aiida_sssp_workflow/utils/pseudo.py b/src/aiida_sssp_workflow/utils/pseudo.py index f8040aa..3bd8b72 100644 --- a/src/aiida_sssp_workflow/utils/pseudo.py +++ b/src/aiida_sssp_workflow/utils/pseudo.py @@ -119,18 +119,21 @@ class PseudoInfo(BaseModel): # source_lib: str # ... + class DualType(Enum): NC = "nc" AUGLOW = "charge augmentation low" AUGHIGH = "charge augmentation high" + def get_dual_type(pp_type: str, element: str) -> DualType: - if element in HIGH_DUAL_ELEMENTS and pp_type != 'nc': - return DualType.AUGHIGH - elif pp_type == 'nc': - return DualType.NC - else: - return DualType.AUGLOW + if element in HIGH_DUAL_ELEMENTS and pp_type != "nc": + return DualType.AUGHIGH + elif pp_type == "nc": + return DualType.NC + else: + return DualType.AUGLOW + def extract_pseudo_info(pseudo_text: str) -> PseudoInfo: """Giving a pseudo, extract the pseudo info and return as a `PseudoInfo` object""" diff --git a/src/aiida_sssp_workflow/workflows/convergence/_base.py b/src/aiida_sssp_workflow/workflows/convergence/_base.py index dfddfe6..c179ea3 100644 --- a/src/aiida_sssp_workflow/workflows/convergence/_base.py +++ b/src/aiida_sssp_workflow/workflows/convergence/_base.py @@ -196,7 +196,6 @@ def configuration(self): return self.ctx.configuration - @property def pseudos(self): """Syntax sugar for self.ctx.pseudos""" @@ -279,7 +278,6 @@ def get_builder( if ret := is_valid_cutoff_list(cutoff_list): raise ValueError(ret) - builder.cutoff_list = orm.List(list=cutoff_list) builder.clean_workdir = orm.Bool(clean_workdir) diff --git a/src/aiida_sssp_workflow/workflows/transferability/eos.py b/src/aiida_sssp_workflow/workflows/transferability/eos.py index aa5b311..da16d02 100644 --- a/src/aiida_sssp_workflow/workflows/transferability/eos.py +++ b/src/aiida_sssp_workflow/workflows/transferability/eos.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- """Workchain to calculate delta factor of specific psp""" + from typing import Tuple from pathlib import Path @@ -172,9 +173,7 @@ def get_pseudos(self, configuration) -> dict: def _setup_protocol(self): """unzip and parse protocol parameters to context""" - protocol = get_protocol( - category="eos", name=self.inputs.protocol.value - ) + protocol = get_protocol(category="eos", name=self.inputs.protocol.value) self.ctx.protocol = protocol @property @@ -205,12 +204,12 @@ def get_builder( ) -> ProcessBuilder: """Return a builder to run this EOS convergence workchain""" builder = super().get_builder( - code, + code, pseudo, - protocol, - cutoffs, - parallelization, - mpi_options, + protocol, + cutoffs, + parallelization, + mpi_options, clean_workdir, ) diff --git a/src/aiida_sssp_workflow/workflows/verification.py b/src/aiida_sssp_workflow/workflows/verification.py index 04a503a..712c436 100644 --- a/src/aiida_sssp_workflow/workflows/verification.py +++ b/src/aiida_sssp_workflow/workflows/verification.py @@ -2,6 +2,7 @@ """ All in one verification workchain """ + from typing import Tuple from pathlib import Path @@ -14,10 +15,15 @@ from aiida_sssp_workflow.utils.protocol import generate_cutoff_list, get_protocol from aiida_sssp_workflow.utils import get_default_mpi_options, parse, serialize_data -from aiida_sssp_workflow.utils.pseudo import PseudoInfo, extract_pseudo_info, get_default_dual +from aiida_sssp_workflow.utils.pseudo import ( + PseudoInfo, + extract_pseudo_info, + get_default_dual, +) from aiida_sssp_workflow.workflows import SelfCleanWorkChain from aiida_sssp_workflow.workflows.convergence.report import ConvergenceReport + # XXX: remove me if I am not used @calcfunction def parse_pseudo_info(pseudo): @@ -47,11 +53,14 @@ def parse_pseudo_info(pseudo): DEFAULT_MEASURE_PROPERTIES_LIST + DEFAULT_CONVERGENCE_PROPERTIES_LIST ) -def compute_recommended_cutoffs(workchains: dict, pseudo: UpfData, criteria_name: str='standard'): + +def compute_recommended_cutoffs( + workchains: dict, pseudo: UpfData, criteria_name: str = "standard" +): """Input is a dict with workchain name and values are the workchain node, loop over the workchain and apply the criteria to get the recommended cutoffs. """ - criteria = get_protocol(category='criteria', name=criteria_name) + criteria = get_protocol(category="criteria", name=criteria_name) success_workchains = {k: w for k, w in workchains.items() if w.is_finished_ok} assert len(success_workchains) <= len(DEFAULT_CONVERGENCE_PROPERTIES_LIST) @@ -62,13 +71,14 @@ def compute_recommended_cutoffs(workchains: dict, pseudo: UpfData, criteria_name ecutrho = -1 for k, w in success_workchains.items(): k: str - property_name = k.split('.')[-1] + property_name = k.split(".")[-1] - recommended_ecutwfc, recommended_ecutrho = converge_check(w.outputs.report, criteria[property_name]) + recommended_ecutwfc, recommended_ecutrho = converge_check( + w.outputs.report, criteria[property_name] + ) ecutwfc = max(ecutwfc, recommended_ecutwfc) ecutrho = max(ecutrho, recommended_ecutrho) - return ecutwfc, ecutrho @@ -85,6 +95,7 @@ def compute_recommended_cutoffs(workchains: dict, pseudo: UpfData, criteria_name return ecutwfc, ecutwfc * dual + def converge_check(report: ConvergenceReport, criteria: dict) -> Tuple[int, int]: """From the report, go through evaluation node of reference and convergence test points, compute the convergence behavior of the convergence run and based on the criteria, @@ -103,22 +114,38 @@ class FullVerificationWorkChain(SelfCleanWorkChain): # run and results write to outputs ports. _VALID_CONGENCENCE_WF = DEFAULT_CONVERGENCE_PROPERTIES_LIST _VALID_MEASURE_WF = DEFAULT_MEASURE_PROPERTIES_LIST - _CRITERIA = 'v2024.1001' + _CRITERIA = "v2024.1001" @classmethod def define(cls, spec): super().define(spec) - spec.input('pw_code', valid_type=orm.AbstractCode, - help='The `pw.x` code use for the `PwCalculation`.') - spec.input('ph_code', valid_type=orm.AbstractCode, required=True, - help='The `ph.x` code use for the `PhCalculation`.') - spec.input('pseudo', valid_type=UpfData, required=True, - help='Pseudopotential to be verified') - spec.input('protocol', valid_type=orm.Str, - help='Verification protocol') # XXX: validate, can only be standard, quick, test - spec.input('curate_type', valid_type=orm.Str, required=True, - help='sssp or nc, which oxygen to use') # XXX: validation - spec.input('dry_run', valid_type=orm.Bool, default=lambda: orm.Bool(False)) + spec.input( + "pw_code", + valid_type=orm.AbstractCode, + help="The `pw.x` code use for the `PwCalculation`.", + ) + spec.input( + "ph_code", + valid_type=orm.AbstractCode, + required=True, + help="The `ph.x` code use for the `PhCalculation`.", + ) + spec.input( + "pseudo", + valid_type=UpfData, + required=True, + help="Pseudopotential to be verified", + ) + spec.input( + "protocol", valid_type=orm.Str, help="Verification protocol" + ) # XXX: validate, can only be standard, quick, test + spec.input( + "curate_type", + valid_type=orm.Str, + required=True, + help="sssp or nc, which oxygen to use", + ) # XXX: validation + spec.input("dry_run", valid_type=orm.Bool, default=lambda: orm.Bool(False)) spec.input( "parallelization", valid_type=orm.Dict, @@ -144,21 +171,36 @@ def define(cls, spec): ), ) - spec.output('pseudo_info', valid_type=orm.Dict, required=True, - help='pseudopotential info') - spec.output_namespace('builders', dynamic=True, - help='Flat out subworkchain builders info, only output this port when it is in dry run.') + spec.output( + "pseudo_info", + valid_type=orm.Dict, + required=True, + help="pseudopotential info", + ) + spec.output_namespace( + "builders", + dynamic=True, + help="Flat out subworkchain builders info, only output this port when it is in dry run.", + ) for wfname in cls._VALID_MEASURE_WF: - spec.output_namespace(wfname, dynamic=True, - help=f'results of {wfname} calculation.') + spec.output_namespace( + wfname, dynamic=True, help=f"results of {wfname} calculation." + ) for wfname in cls._VALID_CONGENCENCE_WF: - spec.output_namespace(wfname, dynamic=True, - help=f'results of {wfname} calculation.') + spec.output_namespace( + wfname, dynamic=True, help=f"results of {wfname} calculation." + ) - spec.exit_code(401, 'ERROR_CACHING_ON_BUT_FAILED', - message='The caching is triggered but failed.') - spec.exit_code(811, 'WARNING_NOT_ALL_SUB_WORKFLOW_OK', - message='The sub-workflows {processes} is not finished ok.') + spec.exit_code( + 401, + "ERROR_CACHING_ON_BUT_FAILED", + message="The caching is triggered but failed.", + ) + spec.exit_code( + 811, + "WARNING_NOT_ALL_SUB_WORKFLOW_OK", + message="The sub-workflows {processes} is not finished ok.", + ) @classmethod def get_builder( @@ -182,7 +224,6 @@ def get_builder( builder.curate_type = orm.Str(curate_type) builder.dry_run = orm.Bool(dry_run) - if parallelization: builder.parallelization = orm.Dict(parallelization) else: @@ -193,7 +234,6 @@ def get_builder( else: builder.mpi_options = orm.Dict(get_default_mpi_options()) - return builder def _setup(self): @@ -208,17 +248,19 @@ def _prepare_subworkchain_builders(self): """ protocol = self.inputs.protocol.value mapping_to_convergence = { - 'standard': 'balanced', - 'quick': 'balanced', - 'test': 'test', + "standard": "balanced", + "quick": "balanced", + "test": "test", } mapping_to_control = { - 'standard': 'standard', - 'quick': 'quick', - 'test': 'test', + "standard": "standard", + "quick": "quick", + "test": "test", } - cutoff_list = generate_cutoff_list(mapping_to_control[protocol], self.ctx.element, self.ctx.pp_type) + cutoff_list = generate_cutoff_list( + mapping_to_control[protocol], self.ctx.element, self.ctx.pp_type + ) builders = {} for property in self._VALID_CONGENCENCE_WF: @@ -230,47 +272,47 @@ def _prepare_subworkchain_builders(self): "clean_workdir": self.inputs.clean_workdir.value, } if "phonon_frequencies" in property: - builder_inputs['pw_code'] = self.inputs.pw_code - builder_inputs['ph_code'] = self.inputs.ph_code + builder_inputs["pw_code"] = self.inputs.pw_code + builder_inputs["ph_code"] = self.inputs.ph_code else: - builder_inputs['code'] = self.inputs.pw_code + builder_inputs["code"] = self.inputs.pw_code - # The problem with this setting is nothing is optimized for the atom + # The problem with this setting is nothing is optimized for the atom # and npool is always set to 1. - # Meanwhile, I don't want to add support to all types of scheduler + # Meanwhile, I don't want to add support to all types of scheduler # (Especially, I am using hyperqueue at the moment which has diffrent mpi_options inputs as slurm) # The ultimate solution would be to have a single interface to set for all kinds of schedule. if "cohesive_energy" in property: - builder_inputs['bulk_parallelization'] = self.inputs.parallelization - builder_inputs['bulk_mpi_options'] = self.inputs.mpi_options - builder_inputs['atom_parallelization'] = self.inputs.parallelization - builder_inputs['atom_mpi_options'] = self.inputs.mpi_options + builder_inputs["bulk_parallelization"] = self.inputs.parallelization + builder_inputs["bulk_mpi_options"] = self.inputs.mpi_options + builder_inputs["atom_parallelization"] = self.inputs.parallelization + builder_inputs["atom_mpi_options"] = self.inputs.mpi_options elif "phonon_frequencies" in property: - npool = 1 # XXX: Need to be optimized - builder_inputs['pw_parallelization'] = self.inputs.parallelization - builder_inputs['pw_mpi_options'] = self.inputs.mpi_options - builder_inputs['ph_mpi_options'] = self.inputs.mpi_options - builder_inputs['ph_settings'] = {"CMDLINE": ["-npool", f"{npool}"]} + npool = 1 # XXX: Need to be optimized + builder_inputs["pw_parallelization"] = self.inputs.parallelization + builder_inputs["pw_mpi_options"] = self.inputs.mpi_options + builder_inputs["ph_mpi_options"] = self.inputs.mpi_options + builder_inputs["ph_settings"] = {"CMDLINE": ["-npool", f"{npool}"]} else: - builder_inputs['parallelization'] = self.inputs.parallelization - builder_inputs['mpi_options'] = self.inputs.mpi_options + builder_inputs["parallelization"] = self.inputs.parallelization + builder_inputs["mpi_options"] = self.inputs.mpi_options builder: ProcessBuilder = _WorkChain.get_builder( - **builder_inputs, + **builder_inputs, ) builders[property] = builder mapping_to_eos = { - 'standard': 'standard', - 'quick': 'standard', - 'test': 'test', + "standard": "standard", + "quick": "standard", + "test": "test", } mapping_to_bands = { - 'standard': 'balanced', - 'quick': 'balanced', - 'test': 'test', + "standard": "balanced", + "quick": "balanced", + "test": "test", } _WorkChain = WorkflowFactory("sssp_workflow.transferability.eos") @@ -284,7 +326,7 @@ def _prepare_subworkchain_builders(self): builder.parallelization = self.inputs.parallelization builder.mpi_options = self.inputs.mpi_options - builders['transferability.eos'] = builder + builders["transferability.eos"] = builder _WorkChain = WorkflowFactory("sssp_workflow.transferability.bands") builder: ProcessBuilder = _WorkChain.get_builder( @@ -296,7 +338,7 @@ def _prepare_subworkchain_builders(self): builder.parallelization = self.inputs.parallelization builder.mpi_options = self.inputs.mpi_options - builders['transferability.bands'] = builder + builders["transferability.bands"] = builder self.ctx.builders = builders @@ -306,7 +348,10 @@ def _not_dry_run(self): # Write to the output of all builder for check if it is dry run # which is helpful for test and sanity check. if dry_run: - serialized_builders = {k: serialize_data(builder._inputs(prune=True)) for k, builder in self.ctx.builders.items()} + serialized_builders = { + k: serialize_data(builder._inputs(prune=True)) + for k, builder in self.ctx.builders.items() + } self.out("builders", serialized_builders) @@ -316,9 +361,7 @@ def _run_convergence_test(self): workchains = {} for property in self._VALID_CONGENCENCE_WF: running = self.submit(self.ctx.builders.get(property)) - self.report( - f"Submit {property} convergence workchain pk={running.pk}" - ) + self.report(f"Submit {property} convergence workchain pk={running.pk}") self.to_context(_=running) @@ -334,11 +377,15 @@ def _set_cutoffs(self): test are run, then use the maximum cutoff for the transferability run. """ for property in self._VALID_MEASURE_WF: - wavefunction_cutoff, charge_density_cutoff = compute_recommended_cutoffs(self.ctx.convergence_workchains, self.inputs.pseudo, criteria_name=self._CRITERIA) + wavefunction_cutoff, charge_density_cutoff = compute_recommended_cutoffs( + self.ctx.convergence_workchains, + self.inputs.pseudo, + criteria_name=self._CRITERIA, + ) builder = self.ctx.builders.get(property) - builder['wavefunction_cutoff'] = wavefunction_cutoff - builder['charge_density_cutoff'] = charge_density_cutoff + builder["wavefunction_cutoff"] = wavefunction_cutoff + builder["charge_density_cutoff"] = charge_density_cutoff def _run_transferability_verification(self): """Run delta measure sub-workflow""" diff --git a/tests/utils/test_pseudo.py b/tests/utils/test_pseudo.py index 94a8e09..f97b1cd 100644 --- a/tests/utils/test_pseudo.py +++ b/tests/utils/test_pseudo.py @@ -1,6 +1,5 @@ """Test ``utils.pseudo`` module.""" -from _pytest.pytester import pytester import pytest from pathlib import Path @@ -71,14 +70,15 @@ def test_compute_total_nelectrons(): ]: assert compute_total_nelectrons(configuration, pseudos) == n_total + @pytest.mark.parametrize( - "element, pp_type, expected_dual_type", + "element, pp_type, expected_dual_type", [ - ('He', 'nc', DualType.NC), - ('Fe', 'nc', DualType.NC), - ('Fe', 'paw', DualType.AUGHIGH), - ('H', 'us', DualType.AUGLOW), - ] + ("He", "nc", DualType.NC), + ("Fe", "nc", DualType.NC), + ("Fe", "paw", DualType.AUGHIGH), + ("H", "us", DualType.AUGLOW), + ], ) def test_get_dual_type(element, pp_type, expected_dual_type): assert get_dual_type(pp_type, element) == expected_dual_type diff --git a/tests/workflows/test_verification.py b/tests/workflows/test_verification.py index ee846ce..d899197 100644 --- a/tests/workflows/test_verification.py +++ b/tests/workflows/test_verification.py @@ -5,48 +5,46 @@ @pytest.mark.parametrize( - 'pseudo_', [ - 'Al.paw', - 'O.nc', - 'O.paw', - ] + "pseudo_", + [ + "Al.paw", + "O.nc", + "O.paw", + ], ) def test_default_builder(pseudo_, code_generator, pseudo_path, data_regression): """Check the builder is created from inputs""" - _WorkChain = WorkflowFactory('sssp_workflow.verification') - + _WorkChain = WorkflowFactory("sssp_workflow.verification") + builder: ProcessBuilder = _WorkChain.get_builder( - pw_code=code_generator('pw'), - ph_code=code_generator('ph'), + pw_code=code_generator("pw"), + ph_code=code_generator("ph"), pseudo=pseudo_path(pseudo_), - protocol='test', - curate_type='sssp', + protocol="test", + curate_type="sssp", dry_run=True, ) result, _ = run_get_node(builder) - data_regression.check(result['builders']) + data_regression.check(result["builders"]) + # TODO: test using nc Oxygen when curate_type is 'nc' + @pytest.mark.slow def test_default_verification(code_generator, pseudo_path, data_regression): """Check the builder is created from inputs""" - _WorkChain = WorkflowFactory('sssp_workflow.verification') - + _WorkChain = WorkflowFactory("sssp_workflow.verification") + builder: ProcessBuilder = _WorkChain.get_builder( - pw_code=code_generator('pw'), - ph_code=code_generator('ph'), - pseudo=pseudo_path('Al.paw'), - protocol='test', - curate_type='sssp', + pw_code=code_generator("pw"), + ph_code=code_generator("ph"), + pseudo=pseudo_path("Al.paw"), + protocol="test", + curate_type="sssp", dry_run=False, ) result, node = run_get_node(builder) - - - - -