Skip to content

Commit

Permalink
Switch from flake + isort + black to ruff
Browse files Browse the repository at this point in the history
This will hopefully avoid the endless formatting swings.
  • Loading branch information
Kobzol committed Jan 2, 2024
1 parent fdc8772 commit 49e3f76
Show file tree
Hide file tree
Showing 21 changed files with 197 additions and 119 deletions.
4 changes: 0 additions & 4 deletions .flake8

This file was deleted.

10 changes: 5 additions & 5 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ jobs:
name: pytest artifacts
path: artifacts.tar

- name: Lint
- name: Lint Rust
uses: actions-rs/cargo@v1
with:
command: clippy
Expand All @@ -83,11 +83,11 @@ jobs:
command: fmt
args: --all -- --check

- name: Lint Python
run: python -m ruff check

- name: Check Python formatting
run: |
export DIRECTORIES="scripts tests benchmarks crates/pyhq/python"
python -m black --check $DIRECTORIES
python -m flake8 $DIRECTORIES
run: python -m ruff format --check

- name: Build docs
run: |
Expand Down
6 changes: 4 additions & 2 deletions benchmarks/src/benchmark/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,10 @@ def _handle_result(self, identifier: BenchmarkIdentifier, result: BenchmarkResul
if isinstance(result, Failure):
logging.error(f"Benchmark {key} has failed: {result.traceback}")
if self.exit_on_error:
raise Exception(f"""Benchmark {identifier} has failed: {result}
You can find details in {identifier.workdir}""")
raise Exception(
f"""Benchmark {identifier} has failed: {result}
You can find details in {identifier.workdir}"""
)
elif isinstance(result, Timeout):
logging.info(f"Benchmark {key} has timeouted after {result.timeout}s")
elif isinstance(result, Success):
Expand Down
6 changes: 4 additions & 2 deletions benchmarks/src/postprocessing/monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -623,12 +623,14 @@ def render_process(pid: int) -> Optional[Model]:
]
cpu_time_figures = [render_process_cpu_time(process_data, key, time, label) for (label, key) in cpu_times]

summary = PreText(text=f"""
summary = PreText(
text=f"""
PID: {pid}
Key: {process.key}
Max. RSS: {humanize.naturalsize(max_rss, binary=True)}
Avg. CPU: {avg_cpu:.02f} %
""".strip())
""".strip()
)

right_col = Column(children=cpu_time_figures)
left_col = Column(children=[summary, mem_figure, cpu_figure])
Expand Down
6 changes: 4 additions & 2 deletions benchmarks/src/utils/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@ def execute_process(
if result.returncode != 0:
with open(stdout) as stdout_file:
with open(stderr) as stderr_file:
raise Exception(f"""The process {args} has exited with error code {result.returncode}
raise Exception(
f"""The process {args} has exited with error code {result.returncode}
Stdout: {stdout_file.read()}
Stderr: {stderr_file.read()}
""".strip())
""".strip()
)
return result
8 changes: 2 additions & 6 deletions crates/pyhq/python/hyperqueue/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,7 @@ def submit(self, job: Job) -> SubmittedJob:
raise Exception("Submitted job must have at least a single task")

job_id = self.connection.submit_job(job_desc)
logging.info(
f"Submitted job {job_id} with {task_count} {pluralize('task', task_count)}"
)
logging.info(f"Submitted job {job_id} with {task_count} {pluralize('task', task_count)}")
return SubmittedJob(job=job, id=job_id)

def wait_for_jobs(self, jobs: Sequence[SubmittedJob], raise_on_error=True) -> bool:
Expand All @@ -95,9 +93,7 @@ def wait_for_jobs(self, jobs: Sequence[SubmittedJob], raise_on_error=True) -> bo
job_ids_str = ",".join(str(id) for id in job_ids)
if len(jobs) > 1:
job_ids_str = "{" + job_ids_str + "}"
logging.info(
f"Waiting for {pluralize('job', len(jobs))} {job_ids_str} to finish"
)
logging.info(f"Waiting for {pluralize('job', len(jobs))} {job_ids_str} to finish")

callback = create_progress_callback()

Expand Down
4 changes: 1 addition & 3 deletions crates/pyhq/python/hyperqueue/ffi/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,7 @@

class ResourceRequest:
n_nodes: int = 0
resources: Dict[str, Union[int, float, str]] = dataclasses.field(
default_factory=dict
)
resources: Dict[str, Union[int, float, str]] = dataclasses.field(default_factory=dict)
min_time: Optional[float] = None

def __init__(
Expand Down
6 changes: 1 addition & 5 deletions crates/pyhq/python/hyperqueue/job.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,7 @@ def __init__(
self.tasks: List[Task] = []
self.task_map: Dict[TaskId, Task] = {}
self.max_fails = max_fails
self.default_workdir = (
Path(default_workdir).resolve()
if default_workdir is not None
else default_workdir
)
self.default_workdir = Path(default_workdir).resolve() if default_workdir is not None else default_workdir
self.default_env = default_env or {}

def task_by_id(self, id: TaskId) -> Optional[Task]:
Expand Down
8 changes: 2 additions & 6 deletions crates/pyhq/python/hyperqueue/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,9 @@ def default_stderr() -> str:

# TODO: how to resolve TASK_ID in the context of some other task?
class Output:
def __init__(
self, name: str, filepath: Optional[str] = None, extension: Optional[str] = None
):
def __init__(self, name: str, filepath: Optional[str] = None, extension: Optional[str] = None):
if filepath and extension:
raise ValidationException(
"Parameters `filepath` and `extension` are mutually exclusive"
)
raise ValidationException("Parameters `filepath` and `extension` are mutually exclusive")

self.name = name
self.filepath = filepath
Expand Down
12 changes: 3 additions & 9 deletions crates/pyhq/python/hyperqueue/task/function/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,7 @@ class CloudWrapper:
Wraps a callable so that cloudpickle is used to pickle it, caching the pickle.
"""

def __init__(
self, fn, pickled_fn=None, cache=True, protocol=cloudpickle.DEFAULT_PROTOCOL
):
def __init__(self, fn, pickled_fn=None, cache=True, protocol=cloudpickle.DEFAULT_PROTOCOL):
if fn is None:
if pickled_fn is None:
raise ValueError("Pass at least one of `fn` and `pickled_fn`")
Expand All @@ -27,9 +25,7 @@ def __init__(
self.pickled_fn = pickled_fn
self.cache = cache
self.protocol = protocol
self.__doc__ = "CloudWrapper for {!r}. Original doc:\n\n{}".format(
self.fn, self.fn.__doc__
)
self.__doc__ = "CloudWrapper for {!r}. Original doc:\n\n{}".format(self.fn, self.fn.__doc__)
if hasattr(self.fn, "__name__"):
self.__name__ = self.fn.__name__

Expand All @@ -56,9 +52,7 @@ def _get_pickled_fn(self):
return pfn

def __call__(self, *args, **kwargs):
logging.debug(
f"Running function {self.fn} using args {args} and kwargs {kwargs}"
)
logging.debug(f"Running function {self.fn} using args {args} and kwargs {kwargs}")
return self.fn(*args, **kwargs)

def __reduce__(self):
Expand Down
4 changes: 1 addition & 3 deletions crates/pyhq/python/hyperqueue/task/program.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,6 @@ def get_task_outputs(task: ExternalProgram) -> Dict[str, Output]:
outputs = gather_outputs(task.args) + gather_outputs(task.env)
for output in outputs:
if output.name in output_map:
raise ValidationException(
f"Output `{output.name}` has been defined multiple times"
)
raise ValidationException(f"Output `{output.name}` has been defined multiple times")
output_map[output.name] = output
return output_map
3 changes: 0 additions & 3 deletions pyproject.toml

This file was deleted.

32 changes: 32 additions & 0 deletions ruff.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
exclude = [
".git",
".git-rewrite",
".ipynb_checkpoints",
".pytest_cache",
".ruff_cache",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"site-packages",
"venv",
"target",
]
include = [
"benchmarks/**/*.py",
"crates/pyhq/python/**/*.py",
"crates/pyhq/pyproject.toml",
"scripts/**/*.py",
"tests/**/*.py",
]

line-length = 120
indent-width = 4

target-version = "py37"

[lint]
select = ["E4", "E7", "E9", "F"]
ignore = ["E203"]
5 changes: 2 additions & 3 deletions scripts/check.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,10 @@ cd `dirname $0`/..
cargo fmt --all

# Format Python code
isort --profile black scripts tests benchmarks crates/pyhq/python
black scripts tests benchmarks crates/pyhq/python
ruff format

# Lint Python code
flake8 scripts tests benchmarks crates/pyhq/python
ruff check

# Test Rust code
cargo test
Expand Down
4 changes: 1 addition & 3 deletions tests/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
pytest==7.1.2
pytest-xdist==2.5.0
flake8==4.0.1
black==23.7.0
isort==5.10.1
iso8601==1.0.2
schema==0.7.5
maturin==1.3.0
Expand All @@ -11,3 +8,4 @@ jinja2==3.0.3
requests==2.31.0
aiohttp==3.9.0
inline-snapshot==0.2.1
ruff==0.1.9
Loading

0 comments on commit 49e3f76

Please sign in to comment.