Skip to content

Commit

Permalink
lint with latest black
Browse files Browse the repository at this point in the history
  • Loading branch information
leepc12 committed Nov 4, 2022
1 parent a9cbbaa commit 19829c4
Show file tree
Hide file tree
Showing 32 changed files with 207 additions and 248 deletions.
13 changes: 7 additions & 6 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
---
repos:
- repo: https://github.com/psf/black
rev: 19.3b0
rev: 22.3.0
hooks:
- id: black
args: [--skip-string-normalization]
Expand Down Expand Up @@ -33,8 +34,8 @@
- id: debug-statements
- id: check-yaml

- repo: https://github.com/jumanjihouse/pre-commit-hook-yamlfmt
rev: 0.0.10
hooks:
- id: yamlfmt
args: [--mapping, '2', --sequence, '4', --offset, '2']
# - repo: https://github.com/jumanjihouse/pre-commit-hook-yamlfmt
# rev: 0.0.10
# hooks:
# - id: yamlfmt
# args: [--mapping, '2', --sequence, '4', --offset, '2']
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ $ caper hpc submit [WDL] -i [INPUT_JSON] --singularity --leader-job-name GOOD_NA

# Example with Conda and using call-caching (restarting a workflow from where it left off)
# Use the same --file-db PATH for next re-run then Caper will collect and softlink previous outputs.
$ caper hpc submit [WDL] -i [INPUT_JSON] --conda --leader-job-name GOOD_NAME2 --db file --file-db [METADATA_DB_PATH]
$ caper hpc submit [WDL] -i [INPUT_JSON] --conda --leader-job-name GOOD_NAME2 --db file --file-db [METADATA_DB_PATH]

# List all leader jobs.
$ caper hpc list
Expand Down
1 change: 1 addition & 0 deletions caper/arg_tool.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
from argparse import ArgumentParser
from configparser import ConfigParser, MissingSectionHeaderError

from distutils.util import strtobool


Expand Down
27 changes: 11 additions & 16 deletions caper/caper_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,9 @@
CromwellBackendSlurm,
)
from .cromwell_rest_api import CromwellRestAPI
from .hpc import LsfWrapper, PbsWrapper, SgeWrapper, SlurmWrapper
from .resource_analysis import ResourceAnalysis
from .server_heartbeat import ServerHeartbeat
from .hpc import (
SlurmWrapper,
SgeWrapper,
PbsWrapper,
LsfWrapper,
)

DEFAULT_CAPER_CONF = '~/.caper/default.conf'
DEFAULT_LIST_FORMAT = 'id,status,name,str_label,user,parent,submission'
Expand Down Expand Up @@ -163,7 +158,7 @@ def get_parser_and_defaults(conf_file=None):
)
group_db.add_argument(
'--db',
default=CromwellBackendDatabase.DEFAULT_DB,
default=CromwellBackendDatabase.DB_FILE,
help='Cromwell metadata database type',
)
group_db.add_argument(
Expand Down Expand Up @@ -534,31 +529,31 @@ def get_parser_and_defaults(conf_file=None):
'--leader-job-name',
help='Leader job name for a submitted workflow.'
'This name will be appended to the prefix "CAPER_LEADER_" and then '
'submitted to HPC. Such prefix is used to identify Caper leader jobs.'
'submitted to HPC. Such prefix is used to identify Caper leader jobs.',
)
group_hpc_submit.add_argument(
'--slurm-leader-job-resource-param',
help='Resource parameters to submit a Caper leader job to SLURM. '
'Make sure to quote if you use it in the command line arguments.',
default=' '.join(SlurmWrapper.DEFAULT_LEADER_JOB_RESOURCE_PARAM)
default=' '.join(SlurmWrapper.DEFAULT_LEADER_JOB_RESOURCE_PARAM),
)
group_hpc_submit.add_argument(
'--sge-leader-job-resource-param',
help='Resource parameters to submit a Caper leader job to SGE'
'Make sure to quote if you use it in the command line arguments.',
default=' '.join(SgeWrapper.DEFAULT_LEADER_JOB_RESOURCE_PARAM)
default=' '.join(SgeWrapper.DEFAULT_LEADER_JOB_RESOURCE_PARAM),
)
group_hpc_submit.add_argument(
'--pbs-leader-job-resource-param',
help='Resource parameters to submit a Caper leader job to PBS'
'Make sure to quote if you use it in the command line arguments.',
default=' '.join(PbsWrapper.DEFAULT_LEADER_JOB_RESOURCE_PARAM)
default=' '.join(PbsWrapper.DEFAULT_LEADER_JOB_RESOURCE_PARAM),
)
group_hpc_submit.add_argument(
'--lsf-leader-job-resource-param',
help='Resource parameters to submit a Caper leader job to LSF'
'Make sure to quote if you use it in the command line arguments.',
default=' '.join(LsfWrapper.DEFAULT_LEADER_JOB_RESOURCE_PARAM)
default=' '.join(LsfWrapper.DEFAULT_LEADER_JOB_RESOURCE_PARAM),
)

group_slurm = parent_submit.add_argument_group('SLURM arguments')
Expand Down Expand Up @@ -771,7 +766,7 @@ def get_parser_and_defaults(conf_file=None):
parent_hpc_abort.add_argument(
'job_ids',
nargs='+',
help='Job ID or list of job IDs to abort matching Caper leader jobs.'
help='Job ID or list of job IDs to abort matching Caper leader jobs.',
)

# all subcommands
Expand Down Expand Up @@ -864,18 +859,18 @@ def get_parser_and_defaults(conf_file=None):
parents=[parent_all],
)
subparser_hpc = p_hpc.add_subparsers(dest='hpc_action')
p_hpc_submit = subparser_hpc.add_parser(
subparser_hpc.add_parser(
'submit',
help='Submit a single workflow to HPC.',
parents=[parent_all, parent_submit, parent_run, parent_runner, parent_backend],
)

p_hpc_list = subparser_hpc.add_parser(
subparser_hpc.add_parser(
'list',
help='List all workflows submitted to HPC.',
parents=[parent_all, parent_backend],
)
p_hpc_abort = subparser_hpc.add_parser(
subparser_hpc.add_parser(
'abort',
help='Abort a workflow submitted to HPC.',
parents=[parent_all, parent_backend, parent_hpc_abort],
Expand Down
3 changes: 1 addition & 2 deletions caper/caper_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,8 +185,7 @@ def create_timestamped_work_dir(self, prefix=''):
return work_dir

def get_loc_dir(self, backend):
"""Get localization directory for a backend.
"""
"""Get localization directory for a backend."""
if backend == BACKEND_GCP:
return self._gcp_loc_dir
elif backend == BACKEND_AWS:
Expand Down
12 changes: 0 additions & 12 deletions caper/caper_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,8 @@
BACKEND_PBS,
BACKEND_SGE,
BACKEND_SLURM,
CromwellBackendLsf,
CromwellBackendPbs,
CromwellBackendSge,
CromwellBackendSlurm,
)

from .hpc import (
SlurmWrapper,
SgeWrapper,
PbsWrapper,
LsfWrapper,
)


CONF_CONTENTS_TMP_DIR = """
# Local directory for localized files and Cromwell's intermediate files.
# If not defined then Caper will make .caper_tmp/ on CWD or `local-out-dir`.
Expand Down
82 changes: 41 additions & 41 deletions caper/caper_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,47 +495,47 @@ def server(
dry_run=False,
):
"""Run a Cromwell server.
default_backend:
Default backend. If backend is not specified for a submitted workflow
then default backend will be used.
Choose among Caper's built-in backends.
(aws, gcp, Local, slurm, sge, pbs, lsf).
Or use a backend defined in your custom backend config file
(above "backend_conf" file).
server_heartbeat:
Server heartbeat to write hostname/port of a server.
server_port:
Server port to run Cromwell server.
Make sure to use different port for multiple Cromwell servers on the same
machine.
server_hostname:
Server hostname. If not defined then socket.gethostname() will be used.
If server_heartbeat is given, then this hostname will be written to
the server heartbeat file defined in server_heartbeat.
custom_backend_conf:
Backend config file (HOCON) to override Caper's auto-generated backend config.
fileobj_stdout:
File-like object to write Cromwell's STDOUT.
embed_subworkflow:
Caper stores/updates metadata.JSON file on
each workflow's root directory whenever there is status change
of workflow (or its tasks).
This flag ensures that any subworkflow's metadata JSON will be
embedded in main (this) workflow's metadata JSON.
This is to mimic behavior of Cromwell run mode's -m parameter.
java_heap_server:
Java heap (java -Xmx) for Cromwell server mode.
auto_write_metadata:
Automatic retrieval/writing of metadata.json upon workflow/task's status change.
work_dir:
Local temporary directory to store all temporary files.
Temporary files mean intermediate files used for running Cromwell.
For example, auto-generated backend config file and workflow options file.
If this is not defined, then cache directory self._local_loc_dir with a timestamp
will be used.
However, Cromwell Java process itself will run on CWD instead of this directory.
dry_run:
Stop before running Java command line for Cromwell.
default_backend:
Default backend. If backend is not specified for a submitted workflow
then default backend will be used.
Choose among Caper's built-in backends.
(aws, gcp, Local, slurm, sge, pbs, lsf).
Or use a backend defined in your custom backend config file
(above "backend_conf" file).
server_heartbeat:
Server heartbeat to write hostname/port of a server.
server_port:
Server port to run Cromwell server.
Make sure to use different port for multiple Cromwell servers on the same
machine.
server_hostname:
Server hostname. If not defined then socket.gethostname() will be used.
If server_heartbeat is given, then this hostname will be written to
the server heartbeat file defined in server_heartbeat.
custom_backend_conf:
Backend config file (HOCON) to override Caper's auto-generated backend config.
fileobj_stdout:
File-like object to write Cromwell's STDOUT.
embed_subworkflow:
Caper stores/updates metadata.JSON file on
each workflow's root directory whenever there is status change
of workflow (or its tasks).
This flag ensures that any subworkflow's metadata JSON will be
embedded in main (this) workflow's metadata JSON.
This is to mimic behavior of Cromwell run mode's -m parameter.
java_heap_server:
Java heap (java -Xmx) for Cromwell server mode.
auto_write_metadata:
Automatic retrieval/writing of metadata.json upon workflow/task's status change.
work_dir:
Local temporary directory to store all temporary files.
Temporary files mean intermediate files used for running Cromwell.
For example, auto-generated backend config file and workflow options file.
If this is not defined, then cache directory self._local_loc_dir with a timestamp
will be used.
However, Cromwell Java process itself will run on CWD instead of this directory.
dry_run:
Stop before running Java command line for Cromwell.
"""
if work_dir is None:
work_dir = self.create_timestamped_work_dir(
Expand Down
12 changes: 4 additions & 8 deletions caper/caper_wdl_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@


class CaperWDLParser(WDLParser):
"""WDL parser for Caper.
"""
"""WDL parser for Caper."""

RE_WDL_COMMENT_DOCKER = r'^\s*\#\s*CAPER\s+docker\s(.+)'
RE_WDL_COMMENT_SINGULARITY = r'^\s*\#\s*CAPER\s+singularity\s(.+)'
Expand All @@ -25,8 +24,7 @@ def __init__(self, wdl):

@property
def caper_docker(self):
"""Backward compatibility for property name. See property default_docker.
"""
"""Backward compatibility for property name. See property default_docker."""
return self.default_docker

@property
Expand All @@ -48,8 +46,7 @@ def default_docker(self):

@property
def caper_singularity(self):
"""Backward compatibility for property name. See property default_singularity.
"""
"""Backward compatibility for property name. See property default_singularity."""
return self.default_singularity

@property
Expand All @@ -71,8 +68,7 @@ def default_singularity(self):

@property
def default_conda(self):
"""Find a default Conda environment name in WDL for Caper.
"""
"""Find a default Conda environment name in WDL for Caper."""
if self.workflow_meta:
for conda_key in CaperWDLParser.WDL_WORKFLOW_META_CONDA_KEYS:
if conda_key in self.workflow_meta:
Expand Down
11 changes: 3 additions & 8 deletions caper/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import logging
import os
import re
import subprocess
import sys

from autouri import GCSURI, AutoURI
Expand All @@ -16,6 +15,7 @@
from .caper_init import init_caper_conf
from .caper_labels import CaperLabels
from .caper_runner import CaperRunner
from .cli_hpc import subcmd_hpc
from .cromwell_backend import (
BACKEND_ALIAS_LOCAL,
BACKEND_LOCAL,
Expand All @@ -25,8 +25,6 @@
from .dict_tool import flatten_dict
from .resource_analysis import LinearResourceAnalysis
from .server_heartbeat import ServerHeartbeat
from .cli_hpc import subcmd_hpc


logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -326,7 +324,6 @@ def client(args):
raise ValueError('Unsupported client action {act}'.format(act=args.action))



def subcmd_server(caper_runner, args, nonblocking=False):
"""
Args:
Expand Down Expand Up @@ -541,8 +538,7 @@ def get_single_cromwell_metadata_obj(caper_client, args, subcmd):


def split_list_into_file_and_non_file(lst):
"""Returns tuple of (list of existing files, list of non-file strings)
"""
"""Returns tuple of (list of existing files, list of non-file strings)"""
files = []
non_files = []

Expand Down Expand Up @@ -666,8 +662,7 @@ def subcmd_gcp_res_analysis(caper_client, args):


def subcmd_cleanup(caper_client, args):
"""Cleanup outputs of a workflow.
"""
"""Cleanup outputs of a workflow."""
cm = get_single_cromwell_metadata_obj(caper_client, args, 'cleanup')
cm.cleanup(dry_run=not args.delete, num_threads=args.num_threads, no_lock=True)
if not args.delete:
Expand Down
15 changes: 6 additions & 9 deletions caper/cli_hpc.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging
import sys

from .hpc import (SlurmWrapper, SgeWrapper, PbsWrapper, LsfWrapper)
from .hpc import LsfWrapper, PbsWrapper, SgeWrapper, SlurmWrapper

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -34,25 +34,22 @@ def subcmd_hpc(args):
stdout = SlurmWrapper(
args.slurm_leader_job_resource_param.split(),
args.slurm_partition,
args.slurm_account
args.slurm_account,
).submit(args.leader_job_name, caper_run_command)

elif args.backend == 'sge':
stdout = SgeWrapper(
args.sge_leader_job_resource_param.split(),
args.sge_queue
args.sge_leader_job_resource_param.split(), args.sge_queue
).submit(args.leader_job_name, caper_run_command)

elif args.backend == 'pbs':
stdout = PbsWrapper(
args.pbs_leader_job_resource_param.split(),
args.pbs_queue
args.pbs_leader_job_resource_param.split(), args.pbs_queue
).submit(args.leader_job_name, caper_run_command)

elif args.backend == 'lsf':
stdout = LsfWrapper(
args.lsf_leader_job_resource_param.split(),
args.lsf_queue
args.lsf_leader_job_resource_param.split(), args.lsf_queue
).submit(args.leader_job_name, caper_run_command)

else:
Expand Down
Loading

0 comments on commit 19829c4

Please sign in to comment.