From 4fd61ffc72ef9808315903a2e78a96cc60045563 Mon Sep 17 00:00:00 2001
From: Sourcery AI <>
Date: Thu, 8 Dec 2022 16:07:39 +0000
Subject: [PATCH] 'Refactored by Sourcery'
---
asv/benchmark.py | 114 +++++++-------
asv/benchmarks.py | 17 +--
asv/build_cache.py | 2 +-
asv/commands/__init__.py | 20 ++-
asv/commands/common_args.py | 37 +++--
asv/commands/compare.py | 67 +++-----
asv/commands/find.py | 11 +-
asv/commands/machine.py | 18 +--
asv/commands/preview.py | 2 +-
asv/commands/profiling.py | 48 +++---
asv/commands/publish.py | 44 +++---
asv/commands/rm.py | 20 ++-
asv/commands/run.py | 5 +-
asv/commands/show.py | 12 +-
asv/commands/update.py | 14 +-
asv/console.py | 45 ++----
asv/environment.py | 65 ++++----
asv/extern/asizeof.py | 212 ++++++++++----------------
asv/extern/minify_json.py | 4 +-
asv/feed.py | 44 +++---
asv/graph.py | 28 ++--
asv/machine.py | 32 +---
asv/plugin_manager.py | 2 +-
asv/plugins/conda.py | 57 ++++---
asv/plugins/git.py | 50 +++---
asv/plugins/mercurial.py | 18 +--
asv/plugins/regressions.py | 25 ++-
asv/plugins/summarylist.py | 8 +-
asv/plugins/virtualenv.py | 11 +-
asv/repo.py | 12 +-
asv/results.py | 113 +++++++-------
asv/runner.py | 46 ++----
asv/statistics.py | 69 ++-------
asv/step_detect.py | 25 +--
asv/template/benchmarks/benchmarks.py | 8 +-
asv/util.py | 151 +++++++-----------
test/benchmark/params_examples.py | 5 +-
test/benchmark/peakmem_examples.py | 2 -
test/benchmark/subdir/time_subdir.py | 2 -
test/benchmark/time_examples.py | 8 +-
test/conftest.py | 30 ++--
test/test_benchmarks.py | 10 +-
test/test_compare.py | 2 +-
test/test_continuous.py | 2 +-
test/test_environment.py | 38 +++--
test/test_feed.py | 2 +-
test/test_gh_pages.py | 12 +-
test/test_publish.py | 48 +++---
test/test_repo.py | 3 -
test/test_results.py | 4 +-
test/test_run.py | 42 +++--
test/test_runner.py | 6 +-
test/test_statistics.py | 36 ++---
test/test_step_detect.py | 5 +-
test/test_subprocess.py | 14 +-
test/test_update.py | 10 +-
test/test_workflow.py | 29 ++--
test/tools.py | 42 +++--
58 files changed, 752 insertions(+), 1056 deletions(-)
diff --git a/asv/benchmark.py b/asv/benchmark.py
index 53438f300..bf231c73c 100644
--- a/asv/benchmark.py
+++ b/asv/benchmark.py
@@ -22,6 +22,7 @@
Run a Unix socket forkserver.
"""
+
# !!!!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!
# This file, unlike most others, must be compatible with as many
# versions of Python as possible and have no dependencies outside of
@@ -33,11 +34,7 @@
# sys.path[0] on start which can shadow other modules
import sys
-if __name__ == "__main__":
- _old_sys_path_head = sys.path.pop(0)
-else:
- _old_sys_path_head = None
-
+_old_sys_path_head = sys.path.pop(0) if __name__ == "__main__" else None
import copy
import cProfile as profile
import ctypes
@@ -170,18 +167,17 @@ def recvall(sock, size):
def _get_attr(source, name, ignore_case=False):
- if ignore_case:
- attrs = [getattr(source, key) for key in dir(source)
- if key.lower() == name.lower()]
-
- if len(attrs) > 1:
- raise ValueError(f"{source.__name__} contains multiple {name} functions.")
- elif len(attrs) == 1:
- return attrs[0]
- else:
- return None
- else:
+ if not ignore_case:
return getattr(source, name, None)
+ attrs = [getattr(source, key) for key in dir(source)
+ if key.lower() == name.lower()]
+
+ if len(attrs) > 1:
+ raise ValueError(f"{source.__name__} contains multiple {name} functions.")
+ elif len(attrs) == 1:
+ return attrs[0]
+ else:
+ return None
def _get_all_attrs(sources, name, ignore_case=False):
@@ -282,20 +278,13 @@ def check_num_args(root, benchmark_name, func, min_num_args, max_num_args=None):
if inspect.ismethod(func):
max_args -= 1
- if info.defaults is not None:
- min_args = max_args - len(info.defaults)
- else:
- min_args = max_args
-
+ min_args = max_args if info.defaults is None else max_args - len(info.defaults)
if info.varargs is not None:
max_args = math.inf
ok = (min_args <= max_num_args) and (min_num_args <= max_args)
if not ok:
- if min_args == max_args:
- args_str = min_args
- else:
- args_str = f"{min_args}-{max_args}"
+ args_str = min_args if min_args == max_args else f"{min_args}-{max_args}"
if min_num_args == max_num_args:
num_args_str = min_num_args
else:
@@ -310,16 +299,14 @@ def check_num_args(root, benchmark_name, func, min_num_args, max_num_args=None):
def _repr_no_address(obj):
result = repr(obj)
address_regex = re.compile(r'^(<.*) at (0x[\da-fA-F]*)(>)$')
- match = address_regex.match(result)
- if match:
- suspected_address = match.group(2)
+ if match := address_regex.match(result):
+ suspected_address = match[2]
# Double check this is the actual address
default_result = object.__repr__(obj)
- match2 = address_regex.match(default_result)
- if match2:
- known_address = match2.group(2)
+ if match2 := address_regex.match(default_result):
+ known_address = match2[2]
if known_address == suspected_address:
- result = match.group(1) + match.group(3)
+ result = match[1] + match[3]
return result
@@ -361,18 +348,18 @@ def __init__(self, name, func, attr_sources):
try:
self.param_names = [str(x) for x in list(self.param_names)]
except ValueError:
- raise ValueError("%s.param_names is not a list of strings" % (name,))
+ raise ValueError(f"{name}.param_names is not a list of strings")
try:
self._params = list(self._params)
except ValueError:
- raise ValueError("%s.params is not a list" % (name,))
+ raise ValueError(f"{name}.params is not a list")
if self._params and not isinstance(self._params[0], (tuple, list)):
# Accept a single list for one parameter only
self._params = [self._params]
else:
- self._params = [[item for item in entry] for entry in self._params]
+ self._params = [list(entry) for entry in self._params]
if len(self.param_names) != len(self._params):
self.param_names = self.param_names[:len(self._params)]
@@ -388,7 +375,7 @@ def __init__(self, name, func, attr_sources):
for j in range(len(param)):
name = param[j]
if name in dupe_dict:
- param[j] = name + f' ({dupe_dict[name]})'
+ param[j] = f'{name} ({dupe_dict[name]})'
dupe_dict[name] += 1
self.params[i] = param
@@ -421,20 +408,28 @@ def check(self, root):
max_num_args = min_num_args
if self.setup_cache_key is not None:
- ok = ok and check_num_args(root, self.name + ": setup_cache",
- self._setup_cache, 0)
+ ok = ok and check_num_args(
+ root, f"{self.name}: setup_cache", self._setup_cache, 0
+ )
max_num_args += 1
for setup in self._setups:
- ok = ok and check_num_args(root, self.name + ": setup",
- setup, min_num_args, max_num_args)
+ ok = ok and check_num_args(
+ root, f"{self.name}: setup", setup, min_num_args, max_num_args
+ )
- ok = ok and check_num_args(root, self.name + ": call",
- self.func, min_num_args, max_num_args)
+ ok = ok and check_num_args(
+ root, f"{self.name}: call", self.func, min_num_args, max_num_args
+ )
for teardown in self._teardowns:
- ok = ok and check_num_args(root, self.name + ": teardown",
- teardown, min_num_args, max_num_args)
+ ok = ok and check_num_args(
+ root,
+ f"{self.name}: teardown",
+ teardown,
+ min_num_args,
+ max_num_args,
+ )
return ok
@@ -533,13 +528,7 @@ def func():
def run(self, *param):
warmup_time = self.warmup_time
if warmup_time < 0:
- if '__pypy__' in sys.modules:
- warmup_time = 1.0
- else:
- # Transient effects exist also on CPython, e.g. from
- # OS scheduling
- warmup_time = 0.1
-
+ warmup_time = 1.0 if '__pypy__' in sys.modules else 0.1
timer = self._get_timer(*param)
try:
@@ -840,9 +829,8 @@ def disc_modules(module_name, ignore_import_errors=False):
yield module
if getattr(module, '__path__', None):
- for _, name, _ in pkgutil.iter_modules(module.__path__, module_name + '.'):
- for item in disc_modules(name, ignore_import_errors=ignore_import_errors):
- yield item
+ for _, name, _ in pkgutil.iter_modules(module.__path__, f'{module_name}.'):
+ yield from disc_modules(name, ignore_import_errors=ignore_import_errors)
def disc_benchmarks(root, ignore_import_errors=False):
@@ -908,7 +896,7 @@ def get_benchmark_from_name(root, name, extra_params=None):
# name
parts = name.split('.')
for i in [1, 2]:
- path = os.path.join(root, *parts[:-i]) + '.py'
+ path = f'{os.path.join(root, *parts[:-i])}.py'
if not os.path.isfile(path):
continue
modname = '.'.join([os.path.basename(root)] + parts[:-i])
@@ -965,10 +953,12 @@ def list_benchmarks(root, fp):
for benchmark in disc_benchmarks(root):
if not first:
fp.write(', ')
- clean = dict(
- (k, v) for (k, v) in benchmark.__dict__.items()
- if isinstance(v, (str, int, float, list, dict, bool)) and not
- k.startswith('_'))
+ clean = {
+ k: v
+ for (k, v) in benchmark.__dict__.items()
+ if isinstance(v, (str, int, float, list, dict, bool))
+ and not k.startswith('_')
+ }
json.dump(clean, fp, skipkeys=True)
first = False
fp.write(']')
@@ -1131,7 +1121,7 @@ def main_run_server(args):
# Import benchmark suite before forking.
# Capture I/O to a file during import.
with posix_redirect_output(stdout_file, permanent=False):
- for benchmark in disc_benchmarks(benchmark_dir, ignore_import_errors=True):
+ for _ in disc_benchmarks(benchmark_dir, ignore_import_errors=True):
pass
# Report result
@@ -1297,9 +1287,9 @@ def main():
sys.exit(1)
mode = sys.argv[1]
- args = sys.argv[2:]
-
if mode in commands:
+ args = sys.argv[2:]
+
commands[mode](args)
sys.exit(0)
else:
diff --git a/asv/benchmarks.py b/asv/benchmarks.py
index 81969dae0..d3ed04f19 100644
--- a/asv/benchmarks.py
+++ b/asv/benchmarks.py
@@ -55,9 +55,7 @@ def __init__(self, conf, benchmarks, regex=None):
self._benchmark_selection[benchmark['name']] = []
for idx, param_set in enumerate(
itertools.product(*benchmark['params'])):
- name = '%s(%s)' % (
- benchmark['name'],
- ', '.join(param_set))
+ name = f"{benchmark['name']}({', '.join(param_set)})"
if not regex or any(re.search(reg, name) for reg in regex):
self[benchmark['name']] = benchmark
self._benchmark_selection[benchmark['name']].append(idx)
@@ -153,15 +151,13 @@ def _disc_benchmarks(cls, conf, repo, environments, commit_hashes, check):
#
def iter_hashes():
- for h in commit_hashes[:1]:
- yield h
+ yield from commit_hashes[:1]
for branch in conf.branches:
try:
yield repo.get_hash_from_name(branch)
except NoSuchNameError:
continue
- for h in commit_hashes[1:]:
- yield h
+ yield from commit_hashes[1:]
def iter_unique(iter):
seen = set()
@@ -326,7 +322,7 @@ def load(cls, conf, regex=None):
try:
path = cls.get_benchmark_file_path(conf.results_dir)
if not os.path.isfile(path):
- raise util.UserError("Benchmark list file {} missing!".format(path))
+ raise util.UserError(f"Benchmark list file {path} missing!")
d = util.load_json(path, api_version=cls.api_version)
benchmarks = d.values()
return cls(conf, benchmarks, regex=regex)
@@ -334,5 +330,6 @@ def load(cls, conf, regex=None):
if "asv update" in str(err):
# Don't give conflicting instructions
raise
- raise util.UserError("{}\nUse `asv run --bench just-discover` to "
- "regenerate benchmarks.json".format(str(err)))
+ raise util.UserError(
+ f"{str(err)}\nUse `asv run --bench just-discover` to regenerate benchmarks.json"
+ )
diff --git a/asv/build_cache.py b/asv/build_cache.py
index 9ca89b245..625e0472c 100644
--- a/asv/build_cache.py
+++ b/asv/build_cache.py
@@ -39,7 +39,7 @@ def _get_cache_dir(self, commit_hash):
Get the cache dir and timestamp file corresponding to a given commit hash.
"""
path = os.path.join(self._path, commit_hash)
- stamp = path + ".timestamp"
+ stamp = f"{path}.timestamp"
return path, stamp
def _remove_cache_dir(self, commit_hash):
diff --git a/asv/commands/__init__.py b/asv/commands/__init__.py
index 13fa4c9ed..6e221961a 100644
--- a/asv/commands/__init__.py
+++ b/asv/commands/__init__.py
@@ -82,7 +82,7 @@ def help(args):
"help", help="Display usage information")
help_parser.set_defaults(func=help)
- commands = dict((x.__name__, x) for x in util.iter_subclasses(Command))
+ commands = {x.__name__: x for x in util.iter_subclasses(Command)}
for command in command_order:
subparser = commands[str(command)].setup_arguments(subparsers)
@@ -102,13 +102,17 @@ def _make_docstring():
lines = []
for p in subparsers.choices.values():
- lines.append('.. _cmd-{0}:'.format(p.prog.replace(' ', '-')))
- lines.append('')
- lines.append(p.prog)
- lines.append('-' * len(p.prog))
- lines.append('::')
- lines.append('')
- lines.extend(' ' + x for x in p.format_help().splitlines())
+ lines.extend(
+ (
+ '.. _cmd-{0}:'.format(p.prog.replace(' ', '-')),
+ '',
+ p.prog,
+ '-' * len(p.prog),
+ '::',
+ '',
+ )
+ )
+ lines.extend(f' {x}' for x in p.format_help().splitlines())
lines.append('')
return '\n'.join(lines)
diff --git a/asv/commands/common_args.py b/asv/commands/common_args.py
index 05fcf1518..01946612e 100644
--- a/asv/commands/common_args.py
+++ b/asv/commands/common_args.py
@@ -13,11 +13,7 @@ def add_global_arguments(parser, suppress_defaults=True):
# arguments both before and after subcommand. Only the top-level
# parser should have suppress_defaults=False
- if suppress_defaults:
- suppressor = dict(default=argparse.SUPPRESS)
- else:
- suppressor = dict()
-
+ suppressor = dict(default=argparse.SUPPRESS) if suppress_defaults else {}
parser.add_argument(
"--verbose", "-v", action="store_true",
help="Increase verbosity",
@@ -29,9 +25,12 @@ def add_global_arguments(parser, suppress_defaults=True):
default=(argparse.SUPPRESS if suppress_defaults else 'asv.conf.json'))
parser.add_argument(
- "--version", action="version", version="%(prog)s " + __version__,
+ "--version",
+ action="version",
+ version=f"%(prog)s {__version__}",
help="Print program version",
- **suppressor)
+ **suppressor,
+ )
def add_compare(parser, only_changed_default=False, sort_default='name'):
@@ -135,11 +134,7 @@ def parse_repeat(value):
return value
def parse_affinity(value):
- if "," in value:
- value = value.split(",")
- else:
- value = [value]
-
+ value = value.split(",") if "," in value else [value]
affinity_list = []
for v in value:
if "-" in v:
@@ -206,7 +201,7 @@ def __call__(self, parser, namespace, values, option_string=None):
if values == "same":
items.extend(["existing:same"])
else:
- items.extend([":" + value for value in values])
+ items.extend([f":{value}" for value in values])
setattr(namespace, "env_spec", items)
@@ -267,10 +262,14 @@ def add_record_samples(parser, record_default=False):
"""Store raw measurement samples, not only statistics"""),
default=record_default)
grp.add_argument(
- "--no-record-samples", action="store_false", dest="record_samples",
- help=(argparse.SUPPRESS if not record_default else
- """Do not store raw measurement samples, but only statistics"""),
- default=record_default)
+ "--no-record-samples",
+ action="store_false",
+ dest="record_samples",
+ help="""Do not store raw measurement samples, but only statistics"""
+ if record_default
+ else argparse.SUPPRESS,
+ default=record_default,
+ )
parser.add_argument(
"--append-samples", action="store_true",
help="""Combine new measurement samples with previous results,
@@ -284,7 +283,7 @@ def positive_int(string):
"""
try:
value = int(string)
- if not value > 0:
+ if value <= 0:
raise ValueError()
return value
except ValueError:
@@ -299,7 +298,7 @@ def positive_int_or_inf(string):
if string == 'all':
return math.inf
value = int(string)
- if not value > 0:
+ if value <= 0:
raise ValueError()
return value
except ValueError:
diff --git a/asv/commands/compare.py b/asv/commands/compare.py
index 26a61c20f..e62ac3a86 100644
--- a/asv/commands/compare.py
+++ b/asv/commands/compare.py
@@ -15,11 +15,10 @@
def mean(values):
- if all([value is None for value in values]):
+ if all(value is None for value in values):
return None
- else:
- values = [value for value in values if value is not None]
- return sum(values) / float(len(values))
+ values = [value for value in values if value is not None]
+ return sum(values) / float(len(values))
def unroll_result(benchmark_name, params, *values):
@@ -49,7 +48,7 @@ def unroll_result(benchmark_name, params, *values):
if params == ():
name = benchmark_name
else:
- name = "%s(%s)" % (benchmark_name, ", ".join(params))
+ name = f'{benchmark_name}({", ".join(params)})'
yield (name,) + value
@@ -65,16 +64,16 @@ def _is_result_better(a, b, a_ss, b_ss, factor, use_stats=True):
"""
- if use_stats and a_ss and b_ss and a_ss[0] and b_ss[0] and (
- a_ss[0].get('repeat', 0) != 1 and b_ss[0].get('repeat', 0) != 1):
- # Return False if estimates don't differ.
- #
- # Special-case the situation with only one sample, in which
- # case we do the comparison only based on `factor` as there's
- # not enough data to do statistics.
- if not statistics.is_different(a_ss[1], b_ss[1],
- a_ss[0], b_ss[0]):
- return False
+ if (
+ use_stats
+ and a_ss
+ and b_ss
+ and a_ss[0]
+ and b_ss[0]
+ and (a_ss[0].get('repeat', 0) != 1 and b_ss[0].get('repeat', 0) != 1)
+ and not statistics.is_different(a_ss[1], b_ss[1], a_ss[0], b_ss[0])
+ ):
+ return False
return a < b / factor
@@ -146,7 +145,7 @@ def run(cls, conf, hash_1, hash_2, factor=None, split=False, only_changed=False,
d = load_json(path)
machines.append(d['machine'])
- if len(machines) == 0:
+ if not machines:
raise util.UserError("No results found")
elif machine is None:
if len(machines) > 1:
@@ -225,11 +224,11 @@ def results_default_iter(commit_hash):
ss_2[(name, machine_env_name)] = (stats, samples)
versions_2[(name, machine_env_name)] = version
- if len(results_1) == 0:
+ if not results_1:
raise util.UserError(
"Did not find results for commit {0}".format(hash_1))
- if len(results_2) == 0:
+ if not results_2:
raise util.UserError(
"Did not find results for commit {0}".format(hash_2))
@@ -252,16 +251,8 @@ def results_default_iter(commit_hash):
improved = False
for benchmark in joint_benchmarks:
- if benchmark in results_1:
- time_1 = results_1[benchmark]
- else:
- time_1 = math.nan
-
- if benchmark in results_2:
- time_2 = results_2[benchmark]
- else:
- time_2 = math.nan
-
+ time_1 = results_1[benchmark] if benchmark in results_1 else math.nan
+ time_2 = results_2[benchmark] if benchmark in results_2 else math.nan
if benchmark in ss_1 and ss_1[benchmark][0]:
err_1 = statistics.get_err(time_1, ss_1[benchmark][0])
else:
@@ -328,7 +319,7 @@ def results_default_iter(commit_hash):
# Mark statistically insignificant results
if (_is_result_better(time_1, time_2, None, None, factor) or
_is_result_better(time_2, time_1, None, None, factor)):
- ratio = "~" + ratio.strip()
+ ratio = f"~{ratio.strip()}"
if only_changed and mark in (' ', 'x'):
continue
@@ -346,11 +337,7 @@ def results_default_iter(commit_hash):
else:
bench['all'].append((color, details, benchmark, ratio_num))
- if split:
- keys = ['green', 'default', 'red', 'lightgrey']
- else:
- keys = ['all']
-
+ keys = ['green', 'default', 'red', 'lightgrey'] if split else ['all']
titles = {}
titles['green'] = "Benchmarks that have improved:"
titles['default'] = "Benchmarks that have stayed the same:"
@@ -373,17 +360,9 @@ def results_default_iter(commit_hash):
color_print(" [{0:8s}] [{1:8s}]".format(hash_1[:8], hash_2[:8]))
name_1 = commit_names.get(hash_1)
- if name_1:
- name_1 = '<{0}>'.format(name_1)
- else:
- name_1 = ''
-
+ name_1 = '<{0}>'.format(name_1) if name_1 else ''
name_2 = commit_names.get(hash_2)
- if name_2:
- name_2 = '<{0}>'.format(name_2)
- else:
- name_2 = ''
-
+ name_2 = '<{0}>'.format(name_2) if name_2 else ''
if name_1 or name_2:
color_print(" {0:10s} {1:10s}".format(name_1, name_2))
diff --git a/asv/commands/find.py b/asv/commands/find.py
index 308e8c9ad..ca69abc6e 100644
--- a/asv/commands/find.py
+++ b/asv/commands/find.py
@@ -217,20 +217,13 @@ def do_search(lo, hi):
diff_b, diff_a = difference_3way(hi_result, mid_result, lo_result)
- if diff_a >= diff_b:
- return do_search(lo, mid)
- else:
- return do_search(mid, hi)
+ return do_search(lo, mid) if diff_a >= diff_b else do_search(mid, hi)
result = do_search(0, len(commit_hashes) - 1)
commit_name = repo.get_decorated_hash(commit_hashes[result], 8)
- if invert:
- direction = "improvement"
- else:
- direction = "regression"
-
+ direction = "improvement" if invert else "regression"
log.info("Greatest {0} found: {1}".format(direction, commit_name))
return 0
diff --git a/asv/commands/machine.py b/asv/commands/machine.py
index 18f5cba89..a26b66168 100644
--- a/asv/commands/machine.py
+++ b/asv/commands/machine.py
@@ -17,9 +17,7 @@ def setup_arguments(cls, subparsers):
defaults = machine.Machine.get_defaults()
for name, description in machine.Machine.fields:
- parser.add_argument(
- '--' + name, default=defaults[name],
- help=description)
+ parser.add_argument(f'--{name}', default=defaults[name], help=description)
parser.add_argument('--yes', default=False, action='store_true',
help="Accept all questions")
@@ -34,14 +32,14 @@ def run_from_conf_args(cls, conf, args):
@classmethod
def run(cls, **kwargs):
- different = {}
defaults = machine.Machine.get_defaults()
- for key, val in defaults.items():
- if kwargs.get(key) != val:
- different[key] = kwargs.get(key)
-
+ different = {
+ key: kwargs.get(key)
+ for key, val in defaults.items()
+ if kwargs.get(key) != val
+ }
use_defaults = kwargs['yes']
machine.Machine.load(
- force_interactive=(len(different) == 0),
- use_defaults=use_defaults, **different)
+ force_interactive=not different, use_defaults=use_defaults, **different
+ )
diff --git a/asv/commands/preview.py b/asv/commands/preview.py
index 1af16bd78..bac492206 100644
--- a/asv/commands/preview.py
+++ b/asv/commands/preview.py
@@ -24,7 +24,7 @@ def random_ports(port, n):
port = 8080
for i in range(min(5, n)):
yield port + i
- for i in range(n - 5):
+ for _ in range(n - 5):
yield max(1, port + random.randint(-2 * n, 2 * n))
diff --git a/asv/commands/profiling.py b/asv/commands/profiling.py
index 6006abbf8..092293414 100644
--- a/asv/commands/profiling.py
+++ b/asv/commands/profiling.py
@@ -118,40 +118,37 @@ def run(cls, conf, benchmark, revision=None, gui=None, output=None,
repo.pull()
machine_name = Machine.get_unique_machine_name()
- if revision is None:
- rev = conf.branches[0]
- else:
- rev = revision
-
+ rev = conf.branches[0] if revision is None else revision
try:
commit_hash = repo.get_hash_from_name(rev)
except NoSuchNameError as exc:
raise util.UserError("Unknown commit {0}".format(exc))
profile_data = None
- checked_out = set()
-
# First, we see if we already have the profile in the results
# database
if not force and commit_hash:
+ checked_out = set()
+
for result in iter_results_for_machine(
conf.results_dir, machine_name):
- if hash_equal(commit_hash, result.commit_hash):
- if result.has_profile(benchmark):
- env_matched = any(result.env.name == env.name
- for env in environments)
- if env_matched:
- if result.env.name not in checked_out:
- # We need to checkout the correct commit so that
- # the line numbers in the profile data match up with
- # what's in the source tree.
- result.env.checkout_project(repo, commit_hash)
- checked_out.add(result.env.name)
- profile_data = result.get_profile(benchmark)
- break
+ if hash_equal(
+ commit_hash, result.commit_hash
+ ) and result.has_profile(benchmark):
+ env_matched = any(result.env.name == env.name
+ for env in environments)
+ if env_matched:
+ if result.env.name not in checked_out:
+ # We need to checkout the correct commit so that
+ # the line numbers in the profile data match up with
+ # what's in the source tree.
+ result.env.checkout_project(repo, commit_hash)
+ checked_out.add(result.env.name)
+ profile_data = result.get_profile(benchmark)
+ break
if profile_data is None:
- if len(environments) == 0:
+ if not environments:
log.error("No environments selected")
return
@@ -177,13 +174,12 @@ def run(cls, conf, benchmark, revision=None, gui=None, output=None,
raise util.UserError("'{0}' benchmark not found".format(benchmark))
elif len(benchmarks) > 1:
exact_matches = benchmarks.filter_out([x for x in benchmarks if x != benchmark])
- if len(exact_matches) == 1:
- log.warning("'{0}' matches more than one benchmark, "
- "using exact match".format(benchmark))
- benchmarks = exact_matches
- else:
+ if len(exact_matches) != 1:
raise util.UserError("'{0}' matches more than one benchmark".format(benchmark))
+ log.warning("'{0}' matches more than one benchmark, "
+ "using exact match".format(benchmark))
+ benchmarks = exact_matches
benchmark_name, = benchmarks.keys()
if not force:
diff --git a/asv/commands/publish.py b/asv/commands/publish.py
index e5eb0bce5..6417d17e8 100644
--- a/asv/commands/publish.py
+++ b/asv/commands/publish.py
@@ -27,8 +27,7 @@ def check_benchmark_params(name, benchmark):
benchmark['params'] = []
benchmark['param_names'] = []
- msg = "Information in benchmarks.json for benchmark %s is malformed" % (
- name)
+ msg = f"Information in benchmarks.json for benchmark {name} is malformed"
if (not isinstance(benchmark['params'], list) or
not isinstance(benchmark['param_names'], list)):
raise ValueError(msg)
@@ -71,13 +70,12 @@ def run_from_conf_args(cls, conf, args):
@staticmethod
def iter_results(conf, repo, range_spec=None):
- if range_spec is not None:
- if isinstance(range_spec, list):
- hashes = range_spec
- else:
- hashes = repo.get_hashes_from_range(range_spec)
- else:
+ if range_spec is None:
hashes = None
+ elif isinstance(range_spec, list):
+ hashes = range_spec
+ else:
+ hashes = repo.get_hashes_from_range(range_spec)
for result in iter_results(conf.results_dir):
if hashes is None or result.commit_hash in hashes:
yield result
@@ -156,11 +154,11 @@ def copy_ignore(src, names):
tags[tag] = revisions[tags[tag]]
hash_to_date[commit_hash] = repo.get_date_from_name(commit_hash)
- revision_to_date = dict((r, hash_to_date[h]) for h, r in revisions.items())
+ revision_to_date = {r: hash_to_date[h] for h, r in revisions.items()}
- branches = dict(
- (branch, repo.get_branch_commits(branch))
- for branch in conf.branches)
+ branches = {
+ branch: repo.get_branch_commits(branch) for branch in conf.branches
+ }
log.step()
log.info("Loading results")
@@ -175,8 +173,12 @@ def copy_ignore(src, names):
# Print a warning message if we couldn't find the branch of a commit
if not len(branches_for_commit):
msg = "Couldn't find {} in branches ({})"
- log.warning(msg.format(results.commit_hash[:conf.hash_length],
- ", ".join(str(branch) for branch in branches.keys())))
+ log.warning(
+ msg.format(
+ results.commit_hash[: conf.hash_length],
+ ", ".join(str(branch) for branch in branches),
+ )
+ )
for key in results.get_result_keys(benchmarks):
b = benchmarks[key]
@@ -204,7 +206,7 @@ def copy_ignore(src, names):
cur_params[param_key] = ''
# Fill in missing params
- for param_key in params.keys():
+ for param_key in params:
if param_key not in cur_params:
cur_params[param_key] = None
params[param_key].add(None)
@@ -216,9 +218,11 @@ def copy_ignore(src, names):
# Get the parameter sets for all graphs
graph_param_list = []
for path, graph in graphs:
- if 'summary' not in graph.params:
- if graph.params not in graph_param_list:
- graph_param_list.append(graph.params)
+ if (
+ 'summary' not in graph.params
+ and graph.params not in graph_param_list
+ ):
+ graph_param_list.append(graph.params)
log.step()
log.info("Detecting steps")
@@ -251,14 +255,14 @@ def copy_ignore(src, names):
log.step()
log.info("Writing index")
benchmark_map = dict(benchmarks)
- for key in benchmark_map.keys():
+ for key in benchmark_map:
check_benchmark_params(key, benchmark_map[key])
for key, val in params.items():
val = list(val)
val.sort(key=lambda x: '[none]' if x is None else str(x))
params[key] = val
params['branch'] = [repo.get_branch_name(branch) for branch in conf.branches]
- revision_to_hash = dict((r, h) for h, r in revisions.items())
+ revision_to_hash = {r: h for h, r in revisions.items()}
util.write_json(os.path.join(conf.html_dir, "index.json"), {
'project': conf.project,
'project_url': conf.project_url,
diff --git a/asv/commands/rm.py b/asv/commands/rm.py
index b0dd7caa0..dc6d917fb 100644
--- a/asv/commands/rm.py
+++ b/asv/commands/rm.py
@@ -51,10 +51,10 @@ def run(cls, conf, patterns, y=True):
if single_benchmark is not None:
raise util.UserError("'benchmark' appears more than once")
single_benchmark = parts[1]
+ elif parts[0] in global_patterns:
+ raise util.UserError(
+ f"'{parts[0]}' appears more than once")
else:
- if parts[0] in global_patterns:
- raise util.UserError(
- f"'{parts[0]}' appears more than once")
global_patterns[parts[0]] = parts[1]
for result in iter_results(conf.results_dir):
@@ -68,10 +68,9 @@ def run(cls, conf, patterns, y=True):
if not fnmatchcase(result.env.python, val):
found = False
break
- else:
- if not fnmatchcase(result.params.get(key), val):
- found = False
- break
+ elif not fnmatchcase(result.params.get(key), val):
+ found = False
+ break
if not found:
continue
@@ -96,9 +95,8 @@ def run(cls, conf, patterns, y=True):
if len(do) and do.lower()[0] != 'y':
sys.exit(0)
- if single_benchmark is not None:
- for result in files_to_remove:
+ for result in files_to_remove:
+ if single_benchmark is not None:
result.save(conf.results_dir)
- else:
- for result in files_to_remove:
+ else:
result.rm(conf.results_dir)
diff --git a/asv/commands/run.py b/asv/commands/run.py
index 457c127fb..0125132a5 100644
--- a/asv/commands/run.py
+++ b/asv/commands/run.py
@@ -40,10 +40,7 @@ def _do_build_multiprocess(args_sets):
environment.
"""
try:
- res = []
- for args in args_sets:
- res.append(_do_build(args))
- return res
+ return [_do_build(args) for args in args_sets]
except BaseException as exc:
raise util.ParallelFailure(str(exc), exc.__class__, traceback.format_exc())
diff --git a/asv/commands/show.py b/asv/commands/show.py
index d8f837e96..20a72ff73 100644
--- a/asv/commands/show.py
+++ b/asv/commands/show.py
@@ -61,7 +61,7 @@ def run(cls, conf, commit=None, bench=None, machine=None, env_spec=None,
d = load_json(path)
machines.append(d['machine'])
- if len(machines) == 0:
+ if not machines:
raise util.UserError("No results found")
elif machine is None:
pass
@@ -180,11 +180,7 @@ def _print_benchmark(cls, machine, result, benchmark, show_details=False):
started_at = "n/a"
duration = result.duration.get(benchmark['name'])
- if duration is not None:
- duration = util.human_time(duration)
- else:
- duration = "n/a"
-
+ duration = util.human_time(duration) if duration is not None else "n/a"
if started_at != "n/a" or duration != "n/a":
color_print(f' started: {started_at}, duration: {duration}')
@@ -211,11 +207,11 @@ def get_stat_info(key):
values = [util.human_value(x, benchmark['unit']) if x is not None else None
for x in values]
- if not all(x is None for x in values):
+ if any(x is not None for x in values):
color_print(f' {key}: {", ".join(map(str, values))}')
samples = result.get_result_samples(benchmark['name'], benchmark['params'])
- if not all(x is None for x in samples):
+ if any(x is not None for x in samples):
color_print(f" samples: {samples}")
color_print("")
diff --git a/asv/commands/update.py b/asv/commands/update.py
index d40db60e7..c665e635e 100644
--- a/asv/commands/update.py
+++ b/asv/commands/update.py
@@ -47,22 +47,22 @@ def run(cls, conf, _machine_file=None):
Results.update(path)
except util.UserError as err:
# Conversion failed: just skip the file
- log.warning("{}: {}".format(path, err))
+ log.warning(f"{path}: {err}")
continue
- # Rename files if necessary
- m = re.match(r'^([0-9a-f]+)-(.*)\.json$', os.path.basename(path), re.I)
- if m:
- new_path = get_filename(root, m.group(1), m.group(2))
+ if m := re.match(
+ r'^([0-9a-f]+)-(.*)\.json$', os.path.basename(path), re.I
+ ):
+ new_path = get_filename(root, m[1], m[2])
if new_path != path:
try:
if os.path.exists(new_path):
raise OSError()
os.rename(path, new_path)
except OSError:
- log.warning("{}: should be renamed to {}".format(path, new_path))
+ log.warning(f"{path}: should be renamed to {new_path}")
else:
- log.warning("{}: unrecognized file name".format(path))
+ log.warning(f"{path}: unrecognized file name")
# Check benchmarks.json
log.info("Updating benchmarks.json...")
diff --git a/asv/console.py b/asv/console.py
index 4fc653902..0974fa2b1 100644
--- a/asv/console.py
+++ b/asv/console.py
@@ -25,9 +25,7 @@ def isatty(file):
but some user-defined types may not, so this assumes those are not
ttys.
"""
- if hasattr(file, 'isatty'):
- return file.isatty()
- return False
+ return file.isatty() if hasattr(file, 'isatty') else False
def _color_text(text, color):
@@ -101,12 +99,9 @@ def _write_with_fallback(s, fileobj):
if not isinstance(s, str):
raise ValueError("Input string is not a Unicode string")
- try:
+ with contextlib.suppress(UnicodeError):
fileobj.write(s)
return
- except UnicodeError:
- pass
-
# Fall back to writing bytes
enc = locale.getpreferredencoding()
try:
@@ -156,21 +151,17 @@ def color_print(*args, **kwargs):
if isatty(file) and not WIN:
for i in range(0, len(args), 2):
msg = args[i]
- if i + 1 == len(args):
- color = ''
- else:
- color = args[i + 1]
-
+ color = '' if i + 1 == len(args) else args[i + 1]
if color:
msg = _color_text(msg, color)
_write_with_fallback(msg, file)
- _write_with_fallback(end, file)
else:
for i in range(0, len(args), 2):
msg = args[i]
_write_with_fallback(msg, file)
- _write_with_fallback(end, file)
+
+ _write_with_fallback(end, file)
def get_answer_default(prompt, default, use_defaults=False):
@@ -180,16 +171,11 @@ def get_answer_default(prompt, default, use_defaults=False):
return default
x = input()
- if x.strip() == '':
- return default
- return x
+ return default if x.strip() == '' else x
def truncate_left(s, l):
- if len(s) > l:
- return '...' + s[-(l - 3):]
- else:
- return s
+ return f'...{s[-(l - 3):]}' if len(s) > l else s
class Log:
@@ -209,11 +195,7 @@ def _stream_formatter(self, record):
color_print('')
parts = record.msg.split('\n', 1)
first_line = parts[0]
- if len(parts) == 1:
- rest = None
- else:
- rest = parts[1]
-
+ rest = None if len(parts) == 1 else parts[1]
indent = self._indent + 1
continued = getattr(record, 'continued', False)
@@ -247,11 +229,11 @@ def _stream_formatter(self, record):
else:
color = 'red'
- spaces = ' ' * indent
color_print(first_line, color, end='')
if rest is not None:
color_print('')
detail = textwrap.dedent(rest)
+ spaces = ' ' * indent
for line in detail.split('\n'):
color_print(spaces, end='')
color_print(line)
@@ -269,11 +251,10 @@ def indent(self):
self._indent -= 1
def dot(self):
- if isatty(sys.stdout):
- if time.time() > self._last_dot + 1.0:
- color_print('.', 'darkgrey', end='')
- sys.stdout.flush()
- self._last_dot = time.time()
+ if isatty(sys.stdout) and time.time() > self._last_dot + 1.0:
+ color_print('.', 'darkgrey', end='')
+ sys.stdout.flush()
+ self._last_dot = time.time()
def set_nitems(self, n):
"""
diff --git a/asv/environment.py b/asv/environment.py
index 2cb7ba684..daa95c5a6 100644
--- a/asv/environment.py
+++ b/asv/environment.py
@@ -110,7 +110,7 @@ def get_env_type(python):
rule = {}
- for key in platform_keys.keys():
+ for key in platform_keys:
if key in include:
rule[key] = include.pop(key)
@@ -170,11 +170,10 @@ def _parse_matrix(matrix, bare_keys=()):
submatrix = matrix.pop(t, {})
matrices.append((t, submatrix))
- # Check if spurious keys left
- remaining_keys = tuple(matrix.keys())
- if remaining_keys:
- raise util.UserError('Unknown keys in "matrix" configuration: {}, expected: {}'.format(
- remaining_keys, matrix_types + tuple(bare_keys)))
+ if remaining_keys := tuple(matrix.keys()):
+ raise util.UserError(
+ f'Unknown keys in "matrix" configuration: {remaining_keys}, expected: {matrix_types + tuple(bare_keys)}'
+ )
else:
# Backward-compatibility for old-style config
matrices = [('req', matrix)]
@@ -259,15 +258,9 @@ def get_env_name(tool_name, python, requirements, tagged_env_vars, build=False):
Whether to omit non-build environment variables.
The canonical name of the environment is the name with build=False.
"""
- if tool_name:
- name = [tool_name]
- else:
- # Backward compatibility vs. result file names
- name = []
-
+ name = [tool_name] if tool_name else []
name.append("py{0}".format(python))
- reqs = list(requirements.items())
- reqs.sort()
+ reqs = sorted(requirements.items())
for key, val in reqs:
if val:
name.append(''.join([key, val]))
@@ -276,20 +269,19 @@ def get_env_name(tool_name, python, requirements, tagged_env_vars, build=False):
env_vars = _untag_env_vars(tagged_env_vars, build=build)
- for env_var, value in sorted(env_vars.items()):
- name.append(''.join([env_var, value]))
-
+ name.extend(
+ ''.join([env_var, value])
+ for env_var, value in sorted(env_vars.items())
+ )
return util.sanitize_filename('-'.join(name))
def _untag_env_vars(tagged_env_vars, build=False):
- vars = {}
-
- for (tag, key), value in tagged_env_vars.items():
- if not build or tag == 'build':
- vars[key] = value
-
- return vars
+ return {
+ key: value
+ for (tag, key), value in tagged_env_vars.items()
+ if not build or tag == 'build'
+ }
def get_environments(conf, env_specifiers, verbose=True):
@@ -506,10 +498,11 @@ def __init__(self, conf, python, requirements, tagged_env_vars):
self._install_command = conf.install_command
self._uninstall_command = conf.uninstall_command
- self._global_env_vars = {}
- self._global_env_vars['ASV'] = 'true'
- self._global_env_vars['ASV_PROJECT'] = conf.project
- self._global_env_vars['ASV_CONF_DIR'] = os.path.abspath(os.getcwd())
+ self._global_env_vars = {
+ 'ASV': 'true',
+ 'ASV_PROJECT': conf.project,
+ 'ASV_CONF_DIR': os.path.abspath(os.getcwd()),
+ }
self._global_env_vars['ASV_ENV_NAME'] = self.name
self._global_env_vars['ASV_ENV_DIR'] = self._path
self._global_env_vars['ASV_ENV_TYPE'] = self.tool_name
@@ -571,7 +564,7 @@ def installed_commit_hash(self):
return self._get_installed_commit_hash()
@classmethod
- def matches(self, python):
+ def matches(cls, python):
"""
Returns `True` if this environment subclass can handle the
given Python specifier.
@@ -739,7 +732,7 @@ def _interpolate_commands(self, commands):
# All environment variables are available as interpolation variables,
# lowercased without the prefix.
- kwargs = dict()
+ kwargs = {}
for key, value in self._global_env_vars.items():
if key == 'ASV':
continue
@@ -766,7 +759,7 @@ def _interpolate_and_run_commands(self, commands, default_cwd, extra_env=None):
for cmd, env, return_codes, cwd in interpolated:
environ = dict(os.environ)
if extra_env is not None:
- environ.update(extra_env)
+ environ |= extra_env
environ.update(env)
if cwd is None:
cwd = default_cwd
@@ -906,11 +899,7 @@ def run_executable(self, executable, args, **kwargs):
env.update(self._global_env_vars)
# Insert bin dirs to PATH
- if "PATH" in env:
- paths = env["PATH"].split(os.pathsep)
- else:
- paths = []
-
+ paths = env["PATH"].split(os.pathsep) if "PATH" in env else []
if WIN:
subpaths = ['Library\\mingw-w64\\bin',
'Library\\bin',
@@ -933,9 +922,7 @@ def run_executable(self, executable, args, **kwargs):
# When running pip, we need to set PIP_USER to false, as --user (which
# may have been set from a pip config file) is incompatible with
# virtualenvs.
- kwargs["env"] = dict(env,
- PIP_USER=str("false"),
- PATH=str(os.pathsep.join(paths)))
+ kwargs["env"] = dict(env, PIP_USER="false", PATH=str(os.pathsep.join(paths)))
exe = self.find_executable(executable)
return util.check_output([exe] + args, **kwargs)
diff --git a/asv/extern/asizeof.py b/asv/extern/asizeof.py
index 473149ae4..36c1fdba3 100644
--- a/asv/extern/asizeof.py
+++ b/asv/extern/asizeof.py
@@ -185,11 +185,12 @@ class and the ``... def`` suffix marks the *definition object*.
.. [#bi] ``Type``s and ``class``es are considered built-in if the
``__module__`` of the type or class is listed in the private
``_builtin_modules``.
-''' # PYCHOK escape
-import sys
+'''
+
+ # PYCHOK escapeimport sys
if sys.version_info < (2, 6, 0):
- raise NotImplementedError('%s requires Python 2.6 or newer' % ('asizeof',))
+ raise NotImplementedError('asizeof requires Python 2.6 or newer')
import types as Types
import warnings
@@ -218,10 +219,7 @@ class and the ``... def`` suffix marks the *definition object*.
_sizeof_Cvoidp = calcsize('P') # sizeof(void*)
# sizeof(long) != sizeof(ssize_t) on LLP64
-if _sizeof_Clong < _sizeof_Cvoidp: # pragma: no coverage
- _z_P_L = 'P'
-else:
- _z_P_L = 'L'
+_z_P_L = 'P' if _sizeof_Clong < _sizeof_Cvoidp else 'L'
def _calcsize(fmt):
@@ -292,34 +290,25 @@ class ABCMeta(type):
# Compatibility functions for more uniform
# behavior across Python version 2.2 thu 3+
-def _items(obj): # dict only
+def _items(obj): # dict only
'''Return iter-/generator, preferably.
'''
o = getattr(obj, 'iteritems', obj.items)
- if _callable(o):
- return o()
- else:
- return o or ()
+ return o() if _callable(o) else o or ()
-def _keys(obj): # dict only
+def _keys(obj): # dict only
'''Return iter-/generator, preferably.
'''
o = getattr(obj, 'iterkeys', obj.keys)
- if _callable(o):
- return o()
- else:
- return o or ()
+ return o() if _callable(o) else o or ()
-def _values(obj): # dict only
+def _values(obj): # dict only
'''Return iter-/generator, preferably.
'''
o = getattr(obj, 'itervalues', obj.values)
- if _callable(o):
- return o()
- else:
- return o or ()
+ return o() if _callable(o) else o or ()
try: # callable() builtin
@@ -402,9 +391,7 @@ def _derive_typedef(typ):
'''Return single, existing super type typedef or None.
'''
v = [v for v in _values(_typedefs) if _issubclass(typ, v.type)]
- if len(v) == 1:
- return v[0]
- return None
+ return v[0] if len(v) == 1 else None
def _dir2(obj, pref='', excl=(), slots=None, itor=''):
@@ -423,14 +410,13 @@ def _dir2(obj, pref='', excl=(), slots=None, itor=''):
for c in type(obj).mro():
for a in getattr(c, slots, ()):
if a.startswith('__'):
- a = '_' + c.__name__ + a
+ a = f'_{c.__name__}{a}'
if hasattr(obj, a):
s.setdefault(a, getattr(obj, a))
# assume __slots__ tuple-like is holding the values
# yield slots, _Slots(s) # _keys(s) ... REMOVED,
# see _Slots.__doc__ further below
- for t in _items(s):
- yield t # attr name, value
+ yield from _items(s)
elif itor: # iterator referents
for o in obj: # iter(obj)
yield itor, o
@@ -536,12 +522,11 @@ def _lengstr(obj):
'''
n = leng(obj)
if n is None: # no len
- r = ''
+ return ''
elif n > _len(obj): # extended
- r = ' leng %d!' % n
+ return ' leng %d!' % n
else:
- r = ' leng %d' % n
- return r
+ return ' leng %d' % n
def _moduleof(obj, dflt=''):
@@ -577,8 +562,7 @@ def _objs_opts_x(objs, all=None, **opts):
def _p100(part, total, prec=1):
'''Return percentage as string.
'''
- r = float(total)
- if r:
+ if r := float(total):
r = part * 100.0 / r
return '%.*f%%' % (prec, r)
return 'n/a'
@@ -587,11 +571,7 @@ def _p100(part, total, prec=1):
def _plural(num):
'''Return 's' if plural.
'''
- if num == 1:
- s = ''
- else:
- s = 's'
- return s
+ return '' if num == 1 else 's'
def _power2(n):
@@ -656,7 +636,7 @@ def _repr(obj, clip=80):
if len(r) > clip > 0:
h = (clip // 2) - 2
if h > 0:
- r = r[:h] + '....' + r[-h:]
+ r = f'{r[:h]}....{r[-h:]}'
return r
@@ -711,8 +691,8 @@ def _dict_refs(obj, named):
if named:
for k, v in _items(obj):
s = str(k)
- yield _NamedRef('[K] ' + s, k)
- yield _NamedRef('[V] ' + s + ': ' + _repr(v), v)
+ yield _NamedRef(f'[K] {s}', k)
+ yield _NamedRef(f'[V] {s}: {_repr(v)}', v)
else:
for k, v in _items(obj):
yield k
@@ -789,19 +769,14 @@ def _module_refs(obj, named):
'''Return specific referents of a module object.
'''
# ignore this very module
- if obj.__name__ == __name__:
- return ()
- # module is essentially a dict
- return _dict_refs(obj.__dict__, named)
+ return () if obj.__name__ == __name__ else _dict_refs(obj.__dict__, named)
def _namedtuple_refs(obj, named):
'''Return specific referents of obj-as-sequence and slots but exclude dict.
'''
- for r in _refs(obj, named, '__class__', slots='__slots__'):
- yield r
- for r in obj:
- yield r
+ yield from _refs(obj, named, '__class__', slots='__slots__')
+ yield from obj
def _prop_refs(obj, named):
@@ -885,22 +860,13 @@ def _len_dict(obj):
'''Dict length in items (estimate).
'''
n = len(obj) # active items
- if n < 6: # ma_smalltable ...
- n = 0 # ... in basicsize
- else: # at least one unused
- n = _power2(n + 1)
- return n
+ return 0 if n < 6 else _power2(n + 1)
def _len_frame(obj):
'''Length of a frame object.
'''
- c = getattr(obj, 'f_code', None)
- if c:
- n = _len_code(c)
- else:
- n = 0
- return n
+ return _len_code(c) if (c := getattr(obj, 'f_code', None)) else 0
_digit2p2 = 1 << (_sizeof_Cdigit << 3)
@@ -925,10 +891,7 @@ def _len_iter(obj):
'''Length (hint) of an iterator.
'''
n = getattr(obj, '__length_hint__', None)
- if n:
- n = n()
- else: # try len()
- n = _len(obj)
+ n = n() if n else _len(obj)
return n
@@ -1016,12 +979,11 @@ def __init__(self, obj, style):
def __str__(self):
r = str(self._obj)
if r.endswith('>'):
- r = '%s%s def>' % (r[:-1], self._sty)
+ return f'{r[:-1]}{self._sty} def>'
elif self._sty is _old_style and not r.startswith('class '):
- r = 'class %s%s def' % (r, self._sty)
+ return f'class {r}{self._sty} def'
else:
- r = '%s%s def' % (r, self._sty)
- return r
+ return f'{r}{self._sty} def'
__repr__ = __str__
@@ -1193,10 +1155,7 @@ def args(self): # as args tuple
def dup(self, other=None, **kwds):
'''Duplicate attributes of dict or other typedef.
'''
- if other is None:
- d = _dict_typedef.kwds()
- else:
- d = other.kwds()
+ d = _dict_typedef.kwds() if other is None else other.kwds()
d.update(kwds)
self.reset(**d)
@@ -1218,14 +1177,12 @@ def flat(self, obj, mask=0):
def format(self):
'''Return format dict.
'''
- i = self.item
- if self.vari:
- i = 'var'
+ i = 'var' if self.vari else self.item
c = n = ''
if not self.both:
c = ' (code only)'
if self.leng:
- n = ' (%s)' % _nameof(self.leng)
+ n = f' ({_nameof(self.leng)})'
return dict(base=self.base, item=i, leng=n, code=c,
kind=self.kind)
@@ -1244,10 +1201,7 @@ def save(self, t, base=0, heap=False):
if k and k not in _typedefs: # instance key
_typedefs[k] = self
if c and c not in _typedefs: # class key
- if t.__module__ in _builtin_modules:
- k = _kind_ignored # default
- else:
- k = self.kind
+ k = _kind_ignored if t.__module__ in _builtin_modules else self.kind
_typedefs[c] = _Typedef(base=_basicsize(type(t), base=base, heap=heap),
refs=_type_refs,
both=False, kind=k, type=t)
@@ -1366,10 +1320,7 @@ def _typedef_code(t, base=0, refs=None, kind=_kind_static, heap=False):
from array import array # array type
def _array_kwds(obj):
- if hasattr(obj, 'itemsize'):
- v = 'itemsize'
- else:
- v = _Not_vari
+ v = 'itemsize' if hasattr(obj, 'itemsize') else _Not_vari
# since item size varies by the array data type, set
# itemsize to 1 byte and use _len_array in bytes; note,
# function itemsize returns the actual size in bytes
@@ -1654,7 +1605,7 @@ def _isnumpy(unused): # PYCHOK expected
del i, s, t
-def _typedef(obj, derive=False, frames=False, infer=False): # MCCABE 25
+def _typedef(obj, derive=False, frames=False, infer=False): # MCCABE 25
'''Create a new typedef for an object.
'''
t = type(obj)
@@ -1719,8 +1670,7 @@ def _typedef(obj, derive=False, frames=False, infer=False): # MCCABE 25
v.set(kind=_kind_ignored)
else: # assume an instance of some class
if derive:
- p = _derive_typedef(t)
- if p: # duplicate parent
+ if p := _derive_typedef(t):
v.dup(other=p, kind=_kind_derived)
return v
if _issubclass(t, Exception):
@@ -1746,13 +1696,13 @@ class _Prof:
weak = False # objref is weakref(obj)
def __cmp__(self, other):
- if self.total < other.total:
- return -1
- elif self.total > other.total:
- return +1
- elif self.number < other.number:
+ if (
+ self.total < other.total
+ or self.total <= other.total
+ and self.number < other.number
+ ):
return -1
- elif self.number > other.number:
+ elif self.total > other.total or self.number > other.number:
return +1
return 0
@@ -1818,14 +1768,8 @@ def format(self, clip=0, id2x={}):
o = self.objref
if self.weak:
o = o()
- if self.deep > 0:
- d = ' (at %s)' % (self.deep,)
- else:
- d = ''
- if self.pid:
- p = ', pix %s' % (id2x.get(self.pid, '?'),)
- else:
- p = ''
+ d = f' (at {self.deep})' if self.deep > 0 else ''
+ p = f", pix {id2x.get(self.pid, '?')}" if self.pid else ''
return '%s: %s%s, ix %d%s%s' % (_prepr(self.key, clip=clip),
_repr(o, clip=clip), _lengstr(o), id2x[self.id], d, p)
@@ -1886,18 +1830,22 @@ def format(self, format='%(name)s size=%(size)d flat=%(flat)d',
if depth and self.refs:
rs = sorted(self.refs, key=lambda x: getattr(x, order_by),
reverse=order_by in ('size', 'flat'))
- rs = [r.format(format=format, depth=depth-1, order_by=order_by,
- indent=indent+' ') for r in rs]
+ rs = [
+ r.format(
+ format=format,
+ depth=depth - 1,
+ order_by=order_by,
+ indent=f'{indent} ',
+ )
+ for r in rs
+ ]
t = '\n'.join([t] + rs)
return t
def get(self, name, dflt=None):
'''Return the named referent (or *dflt* if not found).
'''
- for ref in self.refs:
- if name == ref.name:
- return ref
- return dflt
+ return next((ref for ref in self.refs if name == ref.name), dflt)
class Asizer:
@@ -2180,7 +2128,7 @@ def detail(self):
def duplicate(self):
'''Get the number of duplicate objects seen so far (int).
'''
- return sum(1 for v in _values(self._seen) if v > 1) # == len
+ return sum(v > 1 for v in _values(self._seen))
def exclude_objs(self, *objs):
'''Exclude the specified objects from sizing, profiling and ranking.
@@ -2226,7 +2174,7 @@ def frames(self):
def ignored(self):
'''Ignore certain types (bool).
'''
- return True if self._ign_d else False
+ return bool(self._ign_d)
@property
def infer(self):
@@ -2263,7 +2211,7 @@ def print_largest(self, w=0, cutoff=0, **print3options):
if n > 0 and s > 0:
self._printf('%s%*d largest object%s (of %d over %d bytes%s)', linesep,
w, n, _plural(n), self._ranked, s, _SI(s), **print3options)
- id2x = dict((r.id, i) for i, r in enumerate(self._ranks))
+ id2x = {r.id: i for i, r in enumerate(self._ranks)}
for r in self._ranks[:n]:
s, t = r.size, r.format(self._clip_, id2x)
self._printf('%*d bytes%s: %s', w, s, _SI(s), t, **print3options)
@@ -2402,17 +2350,16 @@ def print_typedefs(self, w=0, **print3options):
*print3options* -- some keyword arguments, like Python 3+ print
'''
for k in _all_kinds:
- # XXX Python 3+ doesn't sort type objects
- t = [(self._prepr(a), v) for a, v in _items(_typedefs)
- if v.kind == k and (v.both or self._code_)]
- if t:
+ if t := [
+ (self._prepr(a), v)
+ for a, v in _items(_typedefs)
+ if v.kind == k and (v.both or self._code_)
+ ]:
self._printf('%s%*d %s type%s: basicsize, itemsize, _len_(), _refs()',
linesep, w, len(t), k, _plural(len(t)), **print3options)
for a, v in sorted(t):
self._printf('%*s %s: %s', w, '', a, v, **print3options)
- # dict and dict-like classes
- t = sum(len(v) for v in _values(_dict_classes))
- if t:
+ if t := sum(len(v) for v in _values(_dict_classes)):
self._printf('%s%*d dict/-like classes:', linesep, w, t, **print3options)
for m, v in _items(_dict_classes):
self._printf('%*s %s: %s', w, '', m, self._prepr(v), **print3options)
@@ -2472,10 +2419,7 @@ def reset(self, above=1024, align=8, clip=80, code=False, # PYCHOK too many arg
self._limit_ = limit
self._stats_ = stats
self._stream = stream
- if ignored:
- self._ign_d = _kind_ignored
- else:
- self._ign_d = None
+ self._ign_d = _kind_ignored if ignored else None
# clear state
self._clear()
self.set(align=align, code=code, cutoff=cutoff, stats=stats)
@@ -2544,7 +2488,7 @@ def set(self, above=None, align=None, code=None, cutoff=None,
def sized(self):
'''Get the number objects sized so far (int).
'''
- return sum(1 for v in _values(self._seen) if v > 0)
+ return sum(v > 0 for v in _values(self._seen))
@property
def stats(self):
@@ -2853,12 +2797,12 @@ def named_refs(obj, **opts):
v = _typedefof(obj, **opts)
if v:
v = v.refs
- if v and _callable(v):
- for r in v(obj, True):
- try:
- rs.append((r.name, r.ref))
- except AttributeError:
- pass
+ if v and _callable(v):
+ for r in v(obj, True):
+ try:
+ rs.append((r.name, r.ref))
+ except AttributeError:
+ pass
return rs
@@ -2870,8 +2814,8 @@ def refs(obj, **opts):
v = _typedefof(obj, **opts)
if v:
v = v.refs
- if v and _callable(v):
- v = v(obj, False)
+ if v and _callable(v):
+ v = v(obj, False)
return v
@@ -2879,9 +2823,9 @@ def refs(obj, **opts):
if '-v' in sys.argv:
import platform
- print('%s %s (Python %s %s)' % (__file__, __version__,
- sys.version.split()[0],
- platform.architecture()[0]))
+ print(
+ f'{__file__} {__version__} (Python {sys.version.split()[0]} {platform.architecture()[0]})'
+ )
elif '-types' in sys.argv: # print static _typedefs
n = len(_typedefs)
diff --git a/asv/extern/minify_json.py b/asv/extern/minify_json.py
index 675660935..0813e426b 100644
--- a/asv/extern/minify_json.py
+++ b/asv/extern/minify_json.py
@@ -48,9 +48,9 @@ def json_minify(string, strip_space=True):
in_multi = True
elif val == '//':
in_single = True
- elif val == '*/' and in_multi and not (in_string or in_single):
+ elif val == '*/' and in_multi and not in_string and not in_single:
in_multi = False
- elif val in '\r\n' and not (in_multi or in_string) and in_single:
+ elif val in '\r\n' and not (in_multi or in_string):
in_single = False
elif not ((in_multi or in_single) or (val in ' \r\n\t' and strip_space)):
new_str.append(val)
diff --git a/asv/feed.py b/asv/feed.py
index c9de8ef1a..4eef755fa 100644
--- a/asv/feed.py
+++ b/asv/feed.py
@@ -48,7 +48,7 @@ def __init__(self, title, updated, link=None, content=None, id_context=None, id_
self.id_date = id_date
def get_atom(self, id_prefix, language):
- item = etree.Element(ATOM_NS + 'entry')
+ item = etree.Element(f'{ATOM_NS}entry')
id_context = ["entry"]
if self.id_context is None:
@@ -56,34 +56,30 @@ def get_atom(self, id_prefix, language):
else:
id_context += list(self.id_context)
- if self.id_date is None:
- id_date = self.updated
- else:
- id_date = self.id_date
-
- el = etree.Element(ATOM_NS + 'id')
+ id_date = self.updated if self.id_date is None else self.id_date
+ el = etree.Element(f'{ATOM_NS}id')
el.text = _get_id(id_prefix, id_date, id_context)
item.append(el)
- el = etree.Element(ATOM_NS + 'title')
- el.attrib[XML_NS + 'lang'] = language
+ el = etree.Element(f'{ATOM_NS}title')
+ el.attrib[f'{XML_NS}lang'] = language
el.text = self.title
item.append(el)
- el = etree.Element(ATOM_NS + 'updated')
+ el = etree.Element(f'{ATOM_NS}updated')
el.text = self.updated.strftime('%Y-%m-%dT%H:%M:%SZ')
item.append(el)
if self.link:
- el = etree.Element(ATOM_NS + 'link')
- el.attrib[ATOM_NS + 'href'] = self.link
+ el = etree.Element(f'{ATOM_NS}link')
+ el.attrib[f'{ATOM_NS}href'] = self.link
item.append(el)
- el = etree.Element(ATOM_NS + 'content')
- el.attrib[XML_NS + 'lang'] = language
+ el = etree.Element(f'{ATOM_NS}content')
+ el.attrib[f'{XML_NS}lang'] = language
if self.content:
el.text = self.content
- el.attrib[ATOM_NS + 'type'] = 'html'
+ el.attrib[f'{ATOM_NS}type'] = 'html'
else:
el.text = ' '
item.append(el)
@@ -123,35 +119,35 @@ def write_atom(dest, entries, author, title, address, updated=None, link=None,
else:
updated = datetime.datetime.utcnow()
- root = etree.Element(ATOM_NS + 'feed')
+ root = etree.Element(f'{ATOM_NS}feed')
# id (obligatory)
- el = etree.Element(ATOM_NS + 'id')
+ el = etree.Element(f'{ATOM_NS}id')
el.text = _get_id(address, None, ["feed", author, title])
root.append(el)
# author (obligatory)
- el = etree.Element(ATOM_NS + 'author')
- el2 = etree.Element(ATOM_NS + 'name')
+ el = etree.Element(f'{ATOM_NS}author')
+ el2 = etree.Element(f'{ATOM_NS}name')
el2.text = author
el.append(el2)
root.append(el)
# title (obligatory)
- el = etree.Element(ATOM_NS + 'title')
- el.attrib[XML_NS + 'lang'] = language
+ el = etree.Element(f'{ATOM_NS}title')
+ el.attrib[f'{XML_NS}lang'] = language
el.text = title
root.append(el)
# updated (obligatory)
- el = etree.Element(ATOM_NS + 'updated')
+ el = etree.Element(f'{ATOM_NS}updated')
el.text = updated.strftime('%Y-%m-%dT%H:%M:%SZ')
root.append(el)
# link
if link is not None:
- el = etree.Element(ATOM_NS + 'link')
- el.attrib[ATOM_NS + 'href'] = link
+ el = etree.Element(f'{ATOM_NS}link')
+ el.attrib[f'{ATOM_NS}href'] = link
root.append(el)
# entries
diff --git a/asv/graph.py b/asv/graph.py
index a204cf2eb..113250c12 100644
--- a/asv/graph.py
+++ b/asv/graph.py
@@ -176,10 +176,11 @@ def get_data(self):
self.n_series = 1
def mean_axis0(v):
- if not v:
- return [None] * self.n_series
- return [mean_na(x[j] for x in v)
- for j in range(self.n_series)]
+ return (
+ [mean_na(x[j] for x in v) for j in range(self.n_series)]
+ if v
+ else [None] * self.n_series
+ )
# Average data over commit log
val = []
@@ -220,7 +221,7 @@ def save(self, html_dir):
html_dir : str
The root of the HTML tree.
"""
- filename = os.path.join(html_dir, self.path + ".json")
+ filename = os.path.join(html_dir, f"{self.path}.json")
# Drop weights
val = [v[:2] for v in self.get_data()]
@@ -283,10 +284,7 @@ def get_steps(self):
if not isinstance(item, list):
self._steps[j] = item.get()
- if self.scalar_series:
- return self._steps[0]
- else:
- return self._steps
+ return self._steps[0] if self.scalar_series else self._steps
def _compute_graph_steps(data, reraise=True):
@@ -296,12 +294,10 @@ def _compute_graph_steps(data, reraise=True):
w = [d[2] for d in data]
steps = step_detect.detect_steps(y, w)
- new_steps = []
-
- for left, right, cur_val, cur_min, cur_err in steps:
- new_steps.append((x[left], x[right - 1] + 1, cur_val, cur_min, cur_err))
-
- return new_steps
+ return [
+ (x[left], x[right - 1] + 1, cur_val, cur_min, cur_err)
+ for left, right, cur_val, cur_min, cur_err in steps
+ ]
except BaseException as exc:
if reraise:
raise util.ParallelFailure(str(exc), exc.__class__, traceback.format_exc())
@@ -422,7 +418,7 @@ def _combine_graph_data(graphs):
x_idx = dict(zip(x, range(len(x))))
# Get y-values
- ys = [[None] * len(x_idx) for j in range(n_series)]
+ ys = [[None] * len(x_idx) for _ in range(n_series)]
pos = 0
for dataset, graph in zip(datasets, graphs):
for k, v, dv in dataset:
diff --git a/asv/machine.py b/asv/machine.py
index f18812ef5..1d25e9ee0 100644
--- a/asv/machine.py
+++ b/asv/machine.py
@@ -17,8 +17,7 @@ def iter_machine_files(results_dir):
for root, dirs, files in os.walk(results_dir):
for filename in files:
if filename == 'machine.json':
- path = os.path.join(root, filename)
- yield path
+ yield os.path.join(root, filename)
def _get_unique_machine_name():
@@ -39,11 +38,7 @@ def get_machine_file_path():
@classmethod
def load(cls, machine_name, _path=None):
- if _path is None:
- path = cls.get_machine_file_path()
- else:
- path = _path
-
+ path = cls.get_machine_file_path() if _path is None else _path
d = {}
if os.path.isfile(path):
d = util.load_json(path, cls.api_version)
@@ -58,23 +53,14 @@ def load(cls, machine_name, _path=None):
@classmethod
def save(cls, machine_name, machine_info, _path=None):
- if _path is None:
- path = cls.get_machine_file_path()
- else:
- path = _path
- if os.path.isfile(path):
- d = util.load_json(path)
- else:
- d = {}
+ path = cls.get_machine_file_path() if _path is None else _path
+ d = util.load_json(path) if os.path.isfile(path) else {}
d[machine_name] = machine_info
util.write_json(path, d, cls.api_version)
@classmethod
def update(cls, _path=None):
- if _path is None:
- path = cls.get_machine_file_path()
- else:
- path = _path
+ path = cls.get_machine_file_path() if _path is None else _path
if os.path.isfile(path):
util.update_json(cls, path, cls.api_version)
@@ -128,9 +114,7 @@ class Machine:
@classmethod
def get_unique_machine_name(cls):
- if cls.hardcoded_machine_name:
- return cls.hardcoded_machine_name
- return _get_unique_machine_name()
+ return cls.hardcoded_machine_name or _get_unique_machine_name()
@staticmethod
def get_defaults():
@@ -189,9 +173,9 @@ def load(cls, interactive=False, force_interactive=False, _path=None,
except util.UserError as e:
console.log.error(str(e) + '\n')
d = {}
- d.update(kwargs)
+ d |= kwargs
if (not len(d) and interactive) or force_interactive:
- d.update(self.generate_machine_file(use_defaults=use_defaults))
+ d |= self.generate_machine_file(use_defaults=use_defaults)
machine_name = d['machine']
diff --git a/asv/plugin_manager.py b/asv/plugin_manager.py
index a375e71e1..927f72d23 100644
--- a/asv/plugin_manager.py
+++ b/asv/plugin_manager.py
@@ -20,7 +20,7 @@ def __init__(self):
self._plugins = []
def load_plugins(self, package):
- prefix = package.__name__ + '.'
+ prefix = f'{package.__name__}.'
for module_finder, name, ispkg in pkgutil.iter_modules(package.__path__, prefix):
__import__(name)
mod = sys.modules[name]
diff --git a/asv/plugins/conda.py b/asv/plugins/conda.py
index 9bec76f68..fd1faf28b 100644
--- a/asv/plugins/conda.py
+++ b/asv/plugins/conda.py
@@ -52,11 +52,11 @@ def _find_conda():
See https://github.com/airspeed-velocity/asv/issues/645 for more details.
"""
- if 'CONDA_EXE' in os.environ:
- conda = os.environ['CONDA_EXE']
- else:
- conda = util.which('conda')
- return conda
+ return (
+ os.environ['CONDA_EXE']
+ if 'CONDA_EXE' in os.environ
+ else util.which('conda')
+ )
class Conda(environment.Environment):
@@ -136,7 +136,7 @@ def _setup(self):
conda_args, pip_args = self._get_requirements()
env = dict(os.environ)
- env.update(self.build_env_vars)
+ env |= self.build_env_vars
if not self._conda_environment_file:
# The user-provided env file is assumed to set the python version
@@ -175,38 +175,36 @@ def _setup(self):
env=env)
except Exception:
if env_file_name != env_file.name:
- log.info("conda env create/update failed: "
- "in {} with file {}".format(self._path, env_file_name))
+ log.info(
+ f"conda env create/update failed: in {self._path} with file {env_file_name}"
+ )
elif os.path.isfile(env_file_name):
with open(env_file_name, 'r') as f:
text = f.read()
- log.info("conda env create/update failed: "
- "in {} with:\n{}".format(self._path, text))
+ log.info(f"conda env create/update failed: in {self._path} with:\n{text}")
raise
finally:
os.unlink(env_file.name)
def _get_requirements(self):
- if self._requirements:
- # retrieve and return all conda / pip dependencies
- conda_args = []
- pip_args = []
-
- for key, val in self._requirements.items():
- if key.startswith('pip+'):
- if val:
- pip_args.append("{0}=={1}".format(key[4:], val))
- else:
- pip_args.append(key[4:])
+ if not self._requirements:
+ return [], []
+ # retrieve and return all conda / pip dependencies
+ conda_args = []
+ pip_args = []
+
+ for key, val in self._requirements.items():
+ if key.startswith('pip+'):
+ if val:
+ pip_args.append("{0}=={1}".format(key[4:], val))
else:
- if val:
- conda_args.append("{0}={1}".format(key, val))
- else:
- conda_args.append(key)
+ pip_args.append(key[4:])
+ elif val:
+ conda_args.append("{0}={1}".format(key, val))
+ else:
+ conda_args.append(key)
- return conda_args, pip_args
- else:
- return [], []
+ return conda_args, pip_args
def _run_conda(self, args, env=None):
"""
@@ -233,8 +231,7 @@ def run_executable(self, executable, args, **kwargs):
lock = _dummy_lock
# Conda doesn't guarantee that user site directories are excluded
- kwargs["env"] = dict(kwargs.pop("env", os.environ),
- PYTHONNOUSERSITE=str("True"))
+ kwargs["env"] = dict(kwargs.pop("env", os.environ), PYTHONNOUSERSITE="True")
with lock():
return super(Conda, self).run_executable(executable, args, **kwargs)
diff --git a/asv/plugins/git.py b/asv/plugins/git.py
index 944b56093..9834abf75 100644
--- a/asv/plugins/git.py
+++ b/asv/plugins/git.py
@@ -51,10 +51,7 @@ def url_match(cls, url):
return True
# Check for a local path
- if cls.is_local_repo(url):
- return True
-
- return False
+ return bool(cls.is_local_repo(url))
def _run_git(self, args, cwd=True, **kwargs):
if cwd is True:
@@ -138,7 +135,7 @@ def get_hash_from_name(self, name):
raise
def get_hash_from_parent(self, name):
- return self.get_hash_from_name(name + '^')
+ return self.get_hash_from_name(f'{name}^')
def get_name_from_hash(self, commit):
try:
@@ -152,18 +149,20 @@ def get_name_from_hash(self, commit):
# Failed to obtain.
return None
- # Return tags without prefix
- for prefix in ['tags/']:
- if name.startswith(prefix):
- return name[len(prefix):]
-
- return name
+ return next(
+ (
+ name[len(prefix) :]
+ for prefix in ['tags/']
+ if name.startswith(prefix)
+ ),
+ name,
+ )
def get_tags(self):
- tags = {}
- for tag in self._run_git(["tag", "-l"]).splitlines():
- tags[tag] = self._run_git(["rev-list", "-n", "1", tag]).strip()
- return tags
+ return {
+ tag: self._run_git(["rev-list", "-n", "1", tag]).strip()
+ for tag in self._run_git(["tag", "-l"]).splitlines()
+ }
def get_date_from_name(self, name):
return self.get_date(name + "^{commit}")
@@ -172,10 +171,17 @@ def get_branch_commits(self, branch):
return self.get_hashes_from_range(self.get_branch_name(branch))
def get_revisions(self, commits):
- revisions = {}
- for i, commit in enumerate(self._run_git([
- "rev-list", "--all", "--date-order", "--reverse",
- ]).splitlines()):
- if commit in commits:
- revisions[commit] = i
- return revisions
+ return {
+ commit: i
+ for i, commit in enumerate(
+ self._run_git(
+ [
+ "rev-list",
+ "--all",
+ "--date-order",
+ "--reverse",
+ ]
+ ).splitlines()
+ )
+ if commit in commits
+ }
diff --git a/asv/plugins/mercurial.py b/asv/plugins/mercurial.py
index b2ccc15d5..b4e1e1c47 100644
--- a/asv/plugins/mercurial.py
+++ b/asv/plugins/mercurial.py
@@ -86,10 +86,7 @@ def url_match(cls, url):
return True
# Check for a local path
- if cls.is_local_repo(url):
- return True
-
- return False
+ return bool(cls.is_local_repo(url))
def get_range_spec(self, commit_a, commit_b):
return '{0}::{1} and not {0}'.format(commit_a, commit_b)
@@ -159,19 +156,16 @@ def get_name_from_hash(self, commit):
return None
def get_tags(self):
- tags = {}
- for item in self._repo.log(b"tag()"):
- tags[self._decode(item.tags)] = self._decode(item.node)
- return tags
+ return {
+ self._decode(item.tags): self._decode(item.node)
+ for item in self._repo.log(b"tag()")
+ }
def get_date_from_name(self, name):
return self.get_date(name)
def get_branch_commits(self, branch):
- if self._repo.version >= (4, 5):
- query = "branch({0})"
- else:
- query = "ancestors({0})"
+ query = "branch({0})" if self._repo.version >= (4, 5) else "ancestors({0})"
return self.get_hashes_from_range(query.format(self.get_branch_name(branch)),
followfirst=True)
diff --git a/asv/plugins/regressions.py b/asv/plugins/regressions.py
index 00ac61fd9..e631b4691 100644
--- a/asv/plugins/regressions.py
+++ b/asv/plugins/regressions.py
@@ -24,7 +24,7 @@ def publish(cls, conf, repo, benchmarks, graphs, revisions):
# it's easier to work with than the results directly
regressions = []
- revision_to_hash = dict((r, h) for h, r in revisions.items())
+ revision_to_hash = {r: h for h, r in revisions.items()}
data_filter = _GraphDataFilter(conf, repo, revisions)
@@ -59,12 +59,12 @@ def _process_regression(cls, regressions, revision_to_hash, repo,
return
# Select unique graph params
- graph_params = {}
- for name, value in graph.params.items():
- if len(all_params[name]) > 1:
- graph_params[name] = value
-
- graph_path = graph.path + '.json'
+ graph_params = {
+ name: value
+ for name, value in graph.params.items()
+ if len(all_params[name]) > 1
+ }
+ graph_path = f'{graph.path}.json'
# Check which ranges are a single commit
for k, jump in enumerate(jumps):
@@ -120,11 +120,7 @@ def _save_feed(cls, conf, benchmarks, data, revisions, revision_to_hash):
entries = []
for name, graph_path, graph_params, idx, last_value, best_value, jumps in data:
- if '(' in name:
- benchmark_name = name[:name.index('(')]
- else:
- benchmark_name = name
-
+ benchmark_name = name[:name.index('(')] if '(' in name else name
benchmark = benchmarks[benchmark_name]
if idx is not None:
@@ -134,7 +130,7 @@ def _save_feed(cls, conf, benchmarks, data, revisions, revision_to_hash):
param_values, = itertools.islice(itertools.product(*benchmark['params']),
idx, idx + 1)
for k, v in zip(benchmark['param_names'], param_values):
- graph_params['p-' + k] = v
+ graph_params[f'p-{k}'] = v
for rev1, rev2, value1, value2 in jumps:
timestamps = (run_timestamps[benchmark_name, t]
@@ -171,8 +167,7 @@ def _save_feed(cls, conf, benchmarks, data, revisions, revision_to_hash):
commit_a = revision_to_hash[rev1]
commit_b = revision_to_hash[rev2]
if 'github.com' in conf.show_commit_url:
- commit_url = (conf.show_commit_url + '../compare/' +
- commit_a + "..." + commit_b)
+ commit_url = f'{conf.show_commit_url}../compare/{commit_a}...{commit_b}'
else:
commit_url = conf.show_commit_url + commit_a
commit_ref = 'in commits {1}...{2}'.format(commit_url,
diff --git a/asv/plugins/summarylist.py b/asv/plugins/summarylist.py
index ad2b33bf6..61bcbada9 100644
--- a/asv/plugins/summarylist.py
+++ b/asv/plugins/summarylist.py
@@ -23,8 +23,7 @@ def benchmark_param_iter(benchmark):
if not benchmark['params']:
yield None, ()
else:
- for item in enumerate(itertools.product(*benchmark['params'])):
- yield item
+ yield from enumerate(itertools.product(*benchmark['params']))
class SummaryList(OutputPublisher):
@@ -70,10 +69,7 @@ def publish(cls, conf, repo, benchmarks, graphs, revisions):
last_rev = None
prev_value = None
- if not steps:
- # No data
- pass
- else:
+ if steps:
last_piece = steps[-1]
last_value = last_piece[2]
last_err = last_piece[4]
diff --git a/asv/plugins/virtualenv.py b/asv/plugins/virtualenv.py
index ca15ae591..7d061225f 100644
--- a/asv/plugins/virtualenv.py
+++ b/asv/plugins/virtualenv.py
@@ -59,10 +59,7 @@ def _find_python(python):
# Parse python specifier
if is_pypy:
executable = python
- if python == 'pypy':
- python_version = '2'
- else:
- python_version = python[4:]
+ python_version = '2' if python == 'pypy' else python[4:]
else:
python_version = python
executable = f"python{python_version}"
@@ -98,7 +95,7 @@ def name(self):
self._tagged_env_vars)
@classmethod
- def matches(self, python):
+ def matches(cls, python):
if not (re.match(r'^[0-9].*$', python) or re.match(r'^pypy[0-9.]*$', python)):
# The python name should be a version number, or pypy+number
return False
@@ -126,7 +123,7 @@ def _setup(self):
it using `pip install`.
"""
env = dict(os.environ)
- env.update(self.build_env_vars)
+ env |= self.build_env_vars
log.info(f"Creating virtualenv for {self.name}")
util.check_call([
@@ -143,7 +140,7 @@ def _install_requirements(self):
pip_args = ['install', '-v', 'wheel', 'pip>=8']
env = dict(os.environ)
- env.update(self.build_env_vars)
+ env |= self.build_env_vars
self._run_pip(pip_args, env=env)
diff --git a/asv/repo.py b/asv/repo.py
index 73a0d61c7..be7f02613 100644
--- a/asv/repo.py
+++ b/asv/repo.py
@@ -79,10 +79,7 @@ def get_branch_name(self, branch=None):
Returns the given branch name or the default branch name if branch is
None or not specified.
"""
- if branch is None:
- return self._default_branch
- else:
- return branch
+ return self._default_branch if branch is None else branch
def get_range_spec(self, commit_a, commit_b):
"""
@@ -203,9 +200,10 @@ def filter_date_period(self, commits, period, old_commits=None):
old_commits = set(old_commits)
- items = []
- for commit in set(commits).union(old_commits):
- items.append((self.get_date(commit), commit))
+ items = [
+ (self.get_date(commit), commit)
+ for commit in set(commits).union(old_commits)
+ ]
items.sort()
# JS date
diff --git a/asv/results.py b/asv/results.py
index bab36c610..c2a3f1980 100644
--- a/asv/results.py
+++ b/asv/results.py
@@ -17,9 +17,7 @@ def iter_results_paths(results):
"""
Iterate over all of the result file paths.
"""
- skip_files = set([
- 'machine.json', 'benchmarks.json'
- ])
+ skip_files = {'machine.json', 'benchmarks.json'}
for root, dirs, files in os.walk(results):
# Iterate over files only if machine.json is valid json
machine_json = os.path.join(root, "machine.json")
@@ -97,8 +95,7 @@ def get_existing_hashes(results):
Get a list of the commit hashes that have already been tested.
"""
log.info("Getting existing hashes")
- hashes = list(set(iter_existing_hashes(results)))
- return hashes
+ return list(set(iter_existing_hashes(results)))
def get_result_hash_from_prefix(results, machine_name, commit_prefix):
@@ -157,19 +154,12 @@ def _compatible_results(result, result_params, params):
"""
if result is None:
# All results missing, eg. build failure
- return [None for param in itertools.product(*params)]
+ return [None for _ in itertools.product(*params)]
# Pick results for those parameters that also appear in the
# current benchmark
- old_results = {}
- for param, value in zip(itertools.product(*result_params), result):
- old_results[param] = value
-
- new_results = []
- for param in itertools.product(*params):
- new_results.append(old_results.get(param))
-
- return new_results
+ old_results = dict(zip(itertools.product(*result_params), result))
+ return [old_results.get(param) for param in itertools.product(*params)]
class Results:
@@ -478,10 +468,11 @@ def add_result(self, benchmark, result,
if old_samples[j] is not None and new_samples[j] is not None:
new_samples[j] = old_samples[j] + new_samples[j]
- # Retain old result where requested
- merge_idx = [j for j in range(len(new_result))
- if selected_idx is not None and j not in selected_idx]
- if merge_idx:
+ if merge_idx := [
+ j
+ for j in range(len(new_result))
+ if selected_idx is not None and j not in selected_idx
+ ]:
old_result = self.get_result_value(benchmark_name, benchmark['params'])
old_samples = self.get_result_samples(benchmark_name, benchmark['params'])
old_stats = self.get_result_stats(benchmark_name, benchmark['params'])
@@ -517,7 +508,7 @@ def add_result(self, benchmark, result,
self._stats[benchmark_name] = new_stats
self._samples[benchmark_name] = new_samples
- self._benchmark_params[benchmark_name] = benchmark['params'] if benchmark['params'] else []
+ self._benchmark_params[benchmark_name] = benchmark['params'] or []
self._started_at[benchmark_name] = util.datetime_to_js_timestamp(started_at)
if duration is None:
self._duration.pop(benchmark_name, None)
@@ -602,9 +593,12 @@ def save(self, result_dir):
value = [x.get(key[6:]) if x is not None else None
for x in z]
- if key != 'params':
- if isinstance(value, list) and all(x is None for x in value):
- value = None
+ if (
+ key != 'params'
+ and isinstance(value, list)
+ and all(x is None for x in value)
+ ):
+ value = None
if key.startswith('stats_') or key == 'duration':
value = util.truncate_float_list(value)
@@ -616,11 +610,11 @@ def save(self, result_dir):
results[name] = row
- other_durations = {}
- for key, value in self._duration.items():
- if key.startswith('<'):
- other_durations[key] = value
-
+ other_durations = {
+ key: value
+ for key, value in self._duration.items()
+ if key.startswith('<')
+ }
data = {
'commit_hash': self._commit_hash,
'env_name': self._env_name,
@@ -756,15 +750,17 @@ def update_to_2(cls, d):
Reformat data in api_version 1 format to version 2.
"""
try:
- d2 = {}
-
- d2['commit_hash'] = d['commit_hash']
- d2['date'] = d['date']
- d2['env_name'] = d.get('env_name',
- environment.get_env_name('',
- d['python'],
- d['requirements'],
- {}))
+ d2 = {
+ 'commit_hash': d['commit_hash'],
+ 'date': d['date'],
+ 'env_name': d.get(
+ 'env_name',
+ environment.get_env_name(
+ '', d['python'], d['requirements'], {}
+ ),
+ ),
+ }
+
d2['params'] = d['params']
d2['python'] = d['python']
d2['requirements'] = d['requirements']
@@ -799,11 +795,7 @@ def update_to_2(cls, d):
stats[key] = value['stats']
benchmark_params[key] = value['params']
- if 'profiles' in d:
- profiles = d['profiles']
- else:
- profiles = {}
-
+ profiles = d['profiles'] if 'profiles' in d else {}
started_at = d.get('started_at', {})
duration = d.get('duration', {})
benchmark_version = d.get('benchmark_version', {})
@@ -850,9 +842,12 @@ def update_to_2(cls, d):
if key_name == 'params' and value is None:
value = []
- if key_name != 'params' and isinstance(value, list):
- if all(x is None for x in value):
- value = None
+ if (
+ key_name != 'params'
+ and isinstance(value, list)
+ and all(x is None for x in value)
+ ):
+ value = None
r.append(value)
@@ -896,7 +891,6 @@ def format_benchmark_result(results, benchmark):
result = results.get_result_value(name, benchmark['params'])
stats = results.get_result_stats(name, benchmark['params'])
- total_count = len(result)
failure_count = sum(r is None for r in result)
info = None
@@ -904,6 +898,7 @@ def format_benchmark_result(results, benchmark):
# Display status
if failure_count > 0:
+ total_count = len(result)
if failure_count == total_count:
info = "failed"
else:
@@ -920,20 +915,16 @@ def format_benchmark_result(results, benchmark):
display = _format_benchmark_result(display_result, benchmark)
display = "\n".join(display).strip()
details = display
- else:
- if failure_count == 0:
+ elif failure_count == 0:
# Failure already shown above
- if not result:
- display = "[]"
- else:
- if stats[0]:
- err = statistics.get_err(result[0], stats[0])
- else:
- err = None
- display = util.human_value(result[0], benchmark['unit'], err=err)
- if len(result) > 1:
- display += ";..."
- info = display
+ if result:
+ err = statistics.get_err(result[0], stats[0]) if stats[0] else None
+ display = util.human_value(result[0], benchmark['unit'], err=err)
+ if len(result) > 1:
+ display += ";..."
+ else:
+ display = "[]"
+ info = display
return info, details
@@ -1012,7 +1003,7 @@ def _format_param_value(value_repr):
for regex in regexs:
m = re.match(regex, value_repr)
- if m and m.group(1).strip():
- return m.group(1)
+ if m and m[1].strip():
+ return m[1]
return value_repr
diff --git a/asv/runner.py b/asv/runner.py
index b5c6a7b29..94f3674af 100644
--- a/asv/runner.py
+++ b/asv/runner.py
@@ -146,11 +146,7 @@ def run_benchmarks(benchmarks, env, results=None,
"""
- if extra_params is None:
- extra_params = {}
- else:
- extra_params = dict(extra_params)
-
+ extra_params = {} if extra_params is None else dict(extra_params)
if quick:
extra_params['number'] = 1
extra_params['repeat'] = 1
@@ -216,11 +212,7 @@ def iter_run_items():
failed_benchmarks = set()
failed_setup_cache = {}
- if append_samples:
- previous_result_keys = existing_results
- else:
- previous_result_keys = set()
-
+ previous_result_keys = existing_results if append_samples else set()
benchmark_durations = {}
log.info(f"Benchmarking {env.name}")
@@ -444,7 +436,7 @@ def fail_benchmark(benchmark, stderr='', errcode=1):
if benchmark['params']:
# Mark only selected parameter combinations skipped
params = itertools.product(*benchmark['params'])
- result = [None for idx in params]
+ result = [None for _ in params]
samples = [None] * len(result)
number = [None] * len(result)
else:
@@ -589,11 +581,7 @@ def _run_benchmark_single_param(benchmark, spawner, param_idx,
params_str = json.dumps(extra_params)
- if cwd is None:
- real_cwd = tempfile.mkdtemp()
- else:
- real_cwd = cwd
-
+ real_cwd = tempfile.mkdtemp() if cwd is None else cwd
result_file = tempfile.NamedTemporaryFile(delete=False)
try:
result_file.close()
@@ -640,7 +628,7 @@ def _run_benchmark_single_param(benchmark, spawner, param_idx,
if profile:
with io.open(profile_path, 'rb') as profile_fd:
profile_data = profile_fd.read()
- profile_data = profile_data if profile_data else None
+ profile_data = profile_data or None
else:
profile_data = None
@@ -680,7 +668,7 @@ def create_setup_cache(self, benchmark_id, timeout, params_str):
cache_dir = tempfile.mkdtemp()
env_vars = dict(os.environ)
- env_vars.update(self.env.env_vars)
+ env_vars |= self.env.env_vars
out, _, errcode = self.env.run(
[BENCHMARK_RUN_SCRIPT, 'setup_cache',
@@ -695,14 +683,13 @@ def create_setup_cache(self, benchmark_id, timeout, params_str):
if errcode == 0:
return cache_dir, None
- else:
- util.long_path_rmtree(cache_dir, True)
- out += f'\nasv: setup_cache failed (exit status {errcode})'
- return None, out.strip()
+ util.long_path_rmtree(cache_dir, True)
+ out += f'\nasv: setup_cache failed (exit status {errcode})'
+ return None, out.strip()
def run(self, name, params_str, profile_path, result_file_name, timeout, cwd):
env_vars = dict(os.environ)
- env_vars.update(self.env.env_vars)
+ env_vars |= self.env.env_vars
out, _, errcode = self.env.run(
[BENCHMARK_RUN_SCRIPT, 'run', os.path.abspath(self.benchmark_dir),
@@ -731,7 +718,7 @@ def __init__(self, env, root):
self.socket_name = os.path.join(self.tmp_dir, 'socket')
env_vars = dict(os.environ)
- env_vars.update(env.env_vars)
+ env_vars |= env.env_vars
self.server_proc = env.run(
[BENCHMARK_RUN_SCRIPT, 'run_server', self.benchmark_dir, self.socket_name],
@@ -744,9 +731,9 @@ def __init__(self, env, root):
self.stdout_reader_thread.start()
# Wait for the socket to appear
- while self.stdout_reader_thread.is_alive():
- if os.path.exists(self.socket_name):
- break
+ while self.stdout_reader_thread.is_alive() and not os.path.exists(
+ self.socket_name
+ ):
time.sleep(0.05)
if not os.path.exists(self.socket_name):
@@ -783,10 +770,7 @@ def preimport(self):
except Exception as exc:
success = False
out = "asv: benchmark runner crashed\n"
- if isinstance(exc, util.UserError):
- out += str(exc)
- else:
- out += traceback.format_exc()
+ out += str(exc) if isinstance(exc, util.UserError) else traceback.format_exc()
out = out.rstrip()
return success, out
diff --git a/asv/statistics.py b/asv/statistics.py
index 4cca0c460..a25637a3a 100644
--- a/asv/statistics.py
+++ b/asv/statistics.py
@@ -86,12 +86,7 @@ def get_weight(stats):
a = stats['ci_99_a']
b = stats['ci_99_b']
- if math.isinf(a) or math.isinf(b):
- # Infinite interval is due to too few samples --- consider
- # weight as missing
- return None
-
- return 2 / abs(b - a)
+ return None if math.isinf(a) or math.isinf(b) else 2 / abs(b - a)
except ZeroDivisionError:
return None
@@ -132,10 +127,7 @@ def is_different(samples_a, samples_b, stats_a, stats_b, p_threshold=0.002):
ci_a = (stats_a['ci_99_a'], stats_a['ci_99_b'])
ci_b = (stats_b['ci_99_a'], stats_b['ci_99_b'])
- if ci_a[1] >= ci_b[0] and ci_a[0] <= ci_b[1]:
- return False
-
- return True
+ return ci_a[1] < ci_b[0] or ci_a[0] > ci_b[1]
def quantile_ci(x, q, alpha_min=0.01):
@@ -227,12 +219,7 @@ def quantile(x, q):
j = int(math.floor(z))
z -= j
- if j == n - 1:
- m = y[-1]
- else:
- m = (1 - z) * y[j] + z * y[j + 1]
-
- return m
+ return y[-1] if j == n - 1 else (1 - z) * y[j] + z * y[j + 1]
_mann_whitney_u_memo = {}
@@ -274,11 +261,7 @@ def mann_whitney_u(x, y, method='auto'):
n = len(y)
if method == 'auto':
- if max(m, n) > 20:
- method = 'normal'
- else:
- method = 'exact'
-
+ method = 'normal' if max(m, n) > 20 else 'exact'
u, ties = mann_whitney_u_u(x, y)
# Conservative tie breaking
@@ -326,10 +309,7 @@ def mann_whitney_u_u(x, y):
def mann_whitney_u_cdf(m, n, u, memo=None):
if memo is None:
memo = {}
- cdf = 0
- for uu in range(u + 1):
- cdf += mann_whitney_u_pmf(m, n, uu, memo)
- return cdf
+ return sum(mann_whitney_u_pmf(m, n, uu, memo) for uu in range(u + 1))
def mann_whitney_u_pmf(m, n, u, memo=None):
@@ -449,11 +429,7 @@ def __init__(self, y, nu=None):
if len(y) == 0:
raise ValueError("empty input")
- if nu is None:
- self.nu = len(y) - 1
- else:
- self.nu = nu
-
+ self.nu = len(y) - 1 if nu is None else nu
# Sort input
y = sorted(y)
@@ -470,7 +446,7 @@ def __init__(self, y, nu=None):
if self._y_scale != 0:
self.y = [(yp - self.mle) / self._y_scale for yp in y]
else:
- self.y = [0 for yp in y]
+ self.y = [0 for _ in y]
self._cdf_norm = None
self._cdf_memo = {}
@@ -485,13 +461,7 @@ def _cdf_unnorm(self, beta):
if beta != beta:
return beta
- for k, y in enumerate(self.y):
- if y > beta:
- k0 = k
- break
- else:
- k0 = len(self.y)
-
+ k0 = next((k for k, y in enumerate(self.y) if y > beta), len(self.y))
cdf = 0
nu = self.nu
@@ -509,16 +479,8 @@ def _cdf_unnorm(self, beta):
c = 2 * k - len(self.y)
y = sum(self.y[k:]) - sum(self.y[:k])
- if k == 0:
- a = -math.inf
- else:
- a = self.y[k - 1]
-
- if k == k0:
- b = beta
- else:
- b = self.y[k]
-
+ a = -math.inf if k == 0 else self.y[k - 1]
+ b = beta if k == k0 else self.y[k]
if c == 0:
term = (b - a) / y**(nu + 1)
else:
@@ -555,19 +517,12 @@ def _ppf_unnorm(self, cdfx):
if k == 0:
z = -nu * c * term
- if z > 0:
- beta = (z**(-1 / nu) - y) / c
- else:
- beta = -math.inf
+ beta = (z**(-1 / nu) - y) / c if z > 0 else -math.inf
elif c == 0:
beta = a + term * y**(nu + 1)
else:
z = (a * c + y)**(-nu) - nu * c * term
- if z > 0:
- beta = (z**(-1 / nu) - y) / c
- else:
- beta = math.inf
-
+ beta = (z**(-1 / nu) - y) / c if z > 0 else math.inf
if k < len(self.y):
beta = min(beta, self.y[k])
diff --git a/asv/step_detect.py b/asv/step_detect.py
index 833606572..b30014302 100644
--- a/asv/step_detect.py
+++ b/asv/step_detect.py
@@ -414,9 +414,7 @@ def detect_steps(y, w=None):
if w is None:
w_filtered = [1] * len(y_filtered)
else:
- # Fill-in and normalize weights
- w_valid = [ww for ww in w if ww is not None and ww == ww]
- if w_valid:
+ if w_valid := [ww for ww in w if ww is not None and ww == ww]:
w_median = median(w_valid)
if w_median == 0:
w_median = 1.0
@@ -774,11 +772,7 @@ def solve_potts_approx(y, w, gamma=None, min_size=1, **kw):
dist = mu_dist.dist
gamma = 3 * dist(0, n - 1) * math.log(n) / n
- if min_size < 10:
- max_size = 20
- else:
- max_size = min_size + 50
-
+ max_size = 20 if min_size < 10 else min_size + 50
right, values, dists = solve_potts(y, w, gamma, min_size=min_size, max_size=max_size, **kw)
return merge_pieces(gamma, right, values, dists, mu_dist, max_size=max_size)
@@ -905,10 +899,7 @@ def cleanup_cache(self):
def get_mu_dist(y, w):
- if _rangemedian is not None:
- return _rangemedian.RangeMedian(y, w)
- else:
- return L1Dist(y, w)
+ return L1Dist(y, w) if _rangemedian is None else _rangemedian.RangeMedian(y, w)
def rolling_median_dev(items):
@@ -998,10 +989,7 @@ def golden_search(f, a, b, xatol=1e-6, ftol=1e-8, expand_bounds=False):
f0 = max(abs(f1), abs(f2))
- while True:
- if abs(x0 - x3) < xatol or abs(f1 - f2) < ftol * f0:
- break
-
+ while abs(x0 - x3) >= xatol and abs(f1 - f2) >= ftol * f0:
if f2 < f1:
x0 = x1
x1 = x2
@@ -1015,10 +1003,7 @@ def golden_search(f, a, b, xatol=1e-6, ftol=1e-8, expand_bounds=False):
f2 = f1
f1 = f(x1)
- if f2 < f1:
- return x2
- else:
- return x1
+ return x2 if f2 < f1 else x1
def _plot_potts(x, sol):
diff --git a/asv/template/benchmarks/benchmarks.py b/asv/template/benchmarks/benchmarks.py
index 6248a9342..01bdaa454 100644
--- a/asv/template/benchmarks/benchmarks.py
+++ b/asv/template/benchmarks/benchmarks.py
@@ -8,16 +8,14 @@ class TimeSuite:
of iterating over dictionaries in Python.
"""
def setup(self):
- self.d = {}
- for x in range(500):
- self.d[x] = None
+ self.d = {x: None for x in range(500)}
def time_keys(self):
- for key in self.d.keys():
+ for _ in self.d.keys():
pass
def time_values(self):
- for value in self.d.values():
+ for _ in self.d.values():
pass
def time_range(self):
diff --git a/asv/util.py b/asv/util.py
index 5b980f9ce..57424f93c 100644
--- a/asv/util.py
+++ b/asv/util.py
@@ -73,7 +73,7 @@ def human_list(input_list):
"""
input_list = ["'{0}'".format(x) for x in input_list]
- if len(input_list) == 0:
+ if not input_list:
return 'nothing'
elif len(input_list) == 1:
return input_list[0]
@@ -94,7 +94,7 @@ def human_float(value, significant=3, truncate_small=None, significant_zeros=Fal
if value == 0:
return "0"
elif math.isinf(value) or math.isnan(value):
- return "{}".format(value)
+ return f"{value}"
elif value < 0:
sign = "-"
value = -value
@@ -112,10 +112,7 @@ def human_float(value, significant=3, truncate_small=None, significant_zeros=Fal
if magnitude <= -5 or magnitude >= 9:
# Too many digits, use scientific notation
fmt = "{{0:.{0}e}}".format(significant)
- elif value == int(value):
- value = int(round(value, num_digits))
- fmt = "{0:d}"
- elif num_digits <= 0:
+ elif value == int(value) or num_digits <= 0:
value = int(round(value, num_digits))
fmt = "{0:d}"
else:
@@ -128,9 +125,12 @@ def human_float(value, significant=3, truncate_small=None, significant_zeros=Fal
if formatted[-1] == '.':
formatted = formatted[:-1]
- if significant_zeros and '.' not in formatted:
- if len(formatted) < significant:
- formatted += "." + "0" * (significant - len(formatted))
+ if (
+ significant_zeros
+ and '.' not in formatted
+ and len(formatted) < significant
+ ):
+ formatted += "." + "0" * (significant - len(formatted))
return formatted
@@ -167,10 +167,7 @@ def human_file_size(size, err=None):
num_scale = 0
else:
num_scale = int(math.floor(math.log(size) / math.log(1000)))
- if num_scale > 7:
- suffix = '?'
- else:
- suffix = suffixes[num_scale].strip()
+ suffix = '?' if num_scale > 7 else suffixes[num_scale].strip()
scale = int(math.pow(1000, num_scale))
value = size / scale
@@ -178,9 +175,8 @@ def human_file_size(size, err=None):
if err is None:
return "{0:s}{1}".format(str_value, suffix)
- else:
- str_err = human_float(err / scale, 1, truncate_small=2)
- return "{0:s}±{1:s}{2}".format(str_value, str_err, suffix)
+ str_err = human_float(err / scale, 1, truncate_small=2)
+ return "{0:s}±{1:s}{2}".format(str_value, str_err, suffix)
_human_time_units = (
@@ -244,9 +240,8 @@ def human_time(seconds, err=None):
str_time = human_float(seconds / units[i][1], 3, significant_zeros=True)
if err is None:
return "{0:s}{1}".format(str_time, units[i][0])
- else:
- str_err = human_float(err / units[i][1], 1, truncate_small=2)
- return "{0:s}±{1:s}{2}".format(str_time, str_err, units[i][0])
+ str_err = human_float(err / units[i][1], 1, truncate_small=2)
+ return "{0:s}±{1:s}{2}".format(str_time, str_err, units[i][0])
return '~0'
@@ -298,10 +293,10 @@ def parse_human_time(string, base_period='d'):
suffixes = '|'.join(units.keys())
try:
- m = re.match(r'^\s*([0-9.]+)\s*({})\s*$'.format(suffixes), string)
+ m = re.match(f'^\s*([0-9.]+)\s*({suffixes})\s*$', string)
if m is None:
raise ValueError()
- return float(m.group(1)) * units[m.group(2)]
+ return float(m[1]) * units[m[2]]
except ValueError:
raise ValueError("%r is not a valid time period (valid units: %s)"
% (string, suffixes))
@@ -339,11 +334,8 @@ def which(filename, paths=None):
if os.path.isfile(candidate) or os.path.islink(candidate):
candidates.append(candidate)
- if len(candidates) == 0:
- if paths is None:
- loc_info = 'PATH'
- else:
- loc_info = os.pathsep.join(locations)
+ if not candidates:
+ loc_info = 'PATH' if paths is None else os.pathsep.join(locations)
raise IOError("Could not find '{0}' in {1}".format(filename, loc_info))
return candidates[0]
@@ -720,11 +712,7 @@ def debug_log(c):
stdout = stdout.decode('utf-8', 'replace')
stderr = stderr.decode('utf-8', 'replace')
- if is_timeout:
- retcode = TIMEOUT_RETCODE
- else:
- retcode = proc.returncode
-
+ retcode = TIMEOUT_RETCODE if is_timeout else proc.returncode
if valid_return_codes is not None and retcode not in valid_return_codes:
header = 'Error running {0} (exit status {1})'.format(' '.join(args), retcode)
if display_error:
@@ -735,10 +723,7 @@ def debug_log(c):
log.error(get_content(header))
raise ProcessError(args, retcode, stdout, stderr)
- if return_stderr:
- return (stdout, stderr, retcode)
- else:
- return stdout
+ return (stdout, stderr, retcode) if return_stderr else stdout
def _killpg_safe(pgid, signo):
@@ -748,11 +733,7 @@ def _killpg_safe(pgid, signo):
try:
os.killpg(pgid, signo)
except OSError as exc:
- if exc.errno == errno.EPERM:
- # OSX/BSD may raise EPERM on killpg if the process group
- # already terminated
- pass
- else:
+ if exc.errno != errno.EPERM:
raise
@@ -789,8 +770,7 @@ def write_json(path, data, api_version=None, compact=False):
data = dict(data)
data['version'] = api_version
- open_kwargs = {}
- open_kwargs['encoding'] = 'utf-8'
+ open_kwargs = {'encoding': 'utf-8'}
with long_path_open(path, 'w', **open_kwargs) as fd:
if not compact:
json.dump(data, fd, indent=4, sort_keys=True)
@@ -818,8 +798,7 @@ def load_json(path, api_version=None, js_comments=False):
path = os.path.abspath(path)
- open_kwargs = {}
- open_kwargs['encoding'] = 'utf-8'
+ open_kwargs = {'encoding': 'utf-8'}
with long_path_open(path, 'r', **open_kwargs) as fd:
content = fd.read()
@@ -836,22 +815,21 @@ def load_json(path, api_version=None, js_comments=False):
path, str(e)))
if api_version is not None:
- if 'version' in d:
- if d['version'] < api_version:
- raise UserError(
- "{0} is stored in an old file format. Run "
- "`asv update` to update it.".format(path))
- elif d['version'] > api_version:
- raise UserError(
- "{0} is stored in a format that is newer than "
- "what this version of asv understands. Update "
- "asv to use this file.".format(path))
-
- del d['version']
- else:
+ if 'version' not in d:
raise UserError(
"No version specified in {0}.".format(path))
+ if d['version'] < api_version:
+ raise UserError(
+ "{0} is stored in an old file format. Run "
+ "`asv update` to update it.".format(path))
+ elif d['version'] > api_version:
+ raise UserError(
+ "{0} is stored in a format that is newer than "
+ "what this version of asv understands. Update "
+ "asv to use this file.".format(path))
+
+ del d['version']
return d
@@ -908,13 +886,13 @@ def iter_chunks(s, n):
def pick_n(items, n):
"""Pick n items, attempting to get equal index spacing.
"""
- if not (n > 0):
+ if n <= 0:
raise ValueError("Invalid number of items to pick")
spacing = max(float(len(items)) / n, 1)
spaced = []
i = 0
- while int(i) < len(items) and len(spaced) < n:
- spaced.append(items[int(i)])
+ while i < len(items) and len(spaced) < n:
+ spaced.append(items[i])
i += spacing
return spaced
@@ -939,8 +917,7 @@ def iter_subclasses(cls):
"""
for x in cls.__subclasses__():
yield x
- for y in iter_subclasses(x):
- yield y
+ yield from iter_subclasses(x)
def hash_equal(a, b):
@@ -1096,9 +1073,7 @@ def is_nan(x):
"""
Returns `True` if x is a NaN value.
"""
- if isinstance(x, float):
- return x != x
- return False
+ return x != x if isinstance(x, float) else False
def is_na(value):
@@ -1113,8 +1088,7 @@ def mean_na(values):
Take a mean, with the understanding that None and NaN stand for
missing data.
"""
- values = [x for x in values if not is_na(x)]
- if values:
+ if values := [x for x in values if not is_na(x)]:
return sum(values) / len(values)
else:
return None
@@ -1125,17 +1099,15 @@ def geom_mean_na(values):
Compute geometric mean, with the understanding that None and NaN
stand for missing data.
"""
- values = [x for x in values if not is_na(x)]
- if values:
- exponent = 1 / len(values)
- prod = 1.0
- acc = 0
- for x in values:
- prod *= abs(x)**exponent
- acc += x
- return prod if acc >= 0 else -prod
- else:
+ if not (values := [x for x in values if not is_na(x)]):
return None
+ exponent = 1 / len(values)
+ prod = 1.0
+ acc = 0
+ for x in values:
+ prod *= abs(x)**exponent
+ acc += x
+ return prod if acc >= 0 else -prod
def ceildiv(numerator, denominator):
@@ -1151,9 +1123,7 @@ def long_path(path):
return path
else:
def long_path(path):
- if path.startswith("\\\\"):
- return path
- return "\\\\?\\" + os.path.abspath(path)
+ return path if path.startswith("\\\\") else "\\\\?\\" + os.path.abspath(path)
def _remove_readonly(func, path, exc_info):
"""Try harder to remove files on Windows"""
@@ -1174,10 +1144,7 @@ def long_path_open(filename, *a, **kw):
return open(long_path(filename), *a, **kw)
def long_path_rmtree(path, ignore_errors=False):
- if ignore_errors:
- onerror = None
- else:
- onerror = _remove_readonly
+ onerror = None if ignore_errors else _remove_readonly
shutil.rmtree(long_path(path),
ignore_errors=ignore_errors,
onerror=onerror)
@@ -1203,7 +1170,7 @@ def sanitize_filename(filename):
"LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8",
"LPT9"]
if filename.upper() in forbidden:
- filename = filename + "_"
+ filename = f"{filename}_"
return filename
@@ -1277,9 +1244,8 @@ def interpolate_command(command, variables):
cwd = None
while result:
- m = re.match('^([A-Za-z_][A-Za-z0-9_]*)=(.*)$', result[0])
- if m:
- env[m.group(1)] = m.group(2)
+ if m := re.match('^([A-Za-z_][A-Za-z0-9_]*)=(.*)$', result[0]):
+ env[m[1]] = m[2]
del result[0]
continue
@@ -1288,18 +1254,15 @@ def interpolate_command(command, variables):
raise UserError("Configuration error: multiple return-code specifications "
"in command {0!r} "
"".format(command))
- break
-
if result[0] == 'return-code=any':
return_codes = None
return_codes_set = True
del result[0]
continue
- m = re.match('^return-code=([0-9,]+)$', result[0])
- if m:
+ if m := re.match('^return-code=([0-9,]+)$', result[0]):
try:
- return_codes = set(int(x) for x in m.group(1).split(","))
+ return_codes = {int(x) for x in m[1].split(",")}
return_codes_set = True
del result[0]
continue
@@ -1315,8 +1278,6 @@ def interpolate_command(command, variables):
raise UserError("Configuration error: multiple in-dir specifications "
"in command {0!r} "
"".format(command))
- break
-
cwd = result[0][7:]
del result[0]
continue
diff --git a/test/benchmark/params_examples.py b/test/benchmark/params_examples.py
index 65442bf1b..6e8ea0adf 100644
--- a/test/benchmark/params_examples.py
+++ b/test/benchmark/params_examples.py
@@ -67,9 +67,8 @@ def time_it(self, n):
def teardown(self, n):
# The time benchmark may call it one additional time
- if not (self.counter[0] <= n + 1 and self.counter[1] == 1):
- raise RuntimeError("Number and repeat didn't have effect: {} {}".format(
- self.counter, n))
+ if self.counter[0] > n + 1 or self.counter[1] != 1:
+ raise RuntimeError(f"Number and repeat didn't have effect: {self.counter} {n}")
def setup_skip(n):
diff --git a/test/benchmark/peakmem_examples.py b/test/benchmark/peakmem_examples.py
index 2c834a1ed..81c62c757 100644
--- a/test/benchmark/peakmem_examples.py
+++ b/test/benchmark/peakmem_examples.py
@@ -4,5 +4,3 @@ def peakmem_list():
# One element takes sizeof(void*) bytes; the code below uses up
# 4MB (32-bit) or 8MB (64-bit)
obj = [0] * 2**20
- for x in obj:
- pass
diff --git a/test/benchmark/subdir/time_subdir.py b/test/benchmark/subdir/time_subdir.py
index 92fd388bd..80facdb7c 100644
--- a/test/benchmark/subdir/time_subdir.py
+++ b/test/benchmark/subdir/time_subdir.py
@@ -4,8 +4,6 @@
def time_foo():
if x != 42:
raise RuntimeError()
- for y in range(1000):
- pass
def setup_foo():
diff --git a/test/benchmark/time_examples.py b/test/benchmark/time_examples.py
index 36d34d6ad..40a0066d1 100644
--- a/test/benchmark/time_examples.py
+++ b/test/benchmark/time_examples.py
@@ -16,13 +16,11 @@ def setup(self):
def time_example_benchmark_1(self):
s = ''
- for i in xrange(self.n):
- s = s + 'x'
+ for _ in xrange(self.n):
+ s = f'{s}x'
def time_example_benchmark_2(self):
- s = []
- for i in xrange(self.n):
- s.append('x')
+ s = ['x' for _ in xrange(self.n)]
''.join(s)
diff --git a/test/conftest.py b/test/conftest.py
index fa85c97ac..7ce03c08a 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -267,7 +267,7 @@ def ChromeHeadless():
ns['FirefoxHeadless'] = FirefoxHeadless
ns['ChromeHeadless'] = ChromeHeadless
- create_driver = ns.get(driver_str, None)
+ create_driver = ns.get(driver_str)
if create_driver is None:
src = "def create_driver():\n"
src += textwrap.indent(driver_str, " ")
@@ -284,6 +284,7 @@ def ChromeHeadless():
# Clean up on fixture finalization
def fin():
browser.quit()
+
request.addfinalizer(fin)
# Set default time to wait for AJAX requests to complete
@@ -305,10 +306,10 @@ def dummy_packages(request, monkeypatch):
with locked_cache_dir(request.config, "asv-wheels", timeout=900, tag=tag) as cache_dir:
wheel_dir = os.path.abspath(join(str(cache_dir), 'wheels'))
- monkeypatch.setenv(str('PIP_FIND_LINKS'), str('file://' + wheel_dir))
+ monkeypatch.setenv('PIP_FIND_LINKS', str(f'file://{wheel_dir}'))
condarc = join(wheel_dir, 'condarc')
- monkeypatch.setenv(str('CONDARC'), str(condarc))
+ monkeypatch.setenv('CONDARC', str(condarc))
if os.path.isdir(wheel_dir):
return
@@ -326,11 +327,7 @@ def dummy_packages(request, monkeypatch):
raise
# Conda packages were installed in a local channel
- if not WIN:
- wheel_dir_str = "file://{0}".format(wheel_dir)
- else:
- wheel_dir_str = wheel_dir
-
+ wheel_dir_str = wheel_dir if WIN else "file://{0}".format(wheel_dir)
with open(condarc, 'w') as f:
f.write("channels:\n"
"- defaults\n"
@@ -353,7 +350,7 @@ def benchmarks_fixture(tmpdir):
shutil.copytree(BENCHMARK_DIR, 'benchmark')
d = {}
- d.update(ASV_CONF_JSON)
+ d |= ASV_CONF_JSON
d['env_dir'] = "env"
d['benchmark_dir'] = 'benchmark'
d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
@@ -395,13 +392,14 @@ def show_fixture(tmpdir, example_results):
tmpdir = str(tmpdir)
os.chdir(tmpdir)
- conf = config.Config.from_json(
- {'results_dir': example_results,
- 'repo': tools.generate_test_repo(tmpdir).path,
- 'project': 'asv',
- 'environment_type': "shouldn't matter what"})
-
- return conf
+ return config.Config.from_json(
+ {
+ 'results_dir': example_results,
+ 'repo': tools.generate_test_repo(tmpdir).path,
+ 'project': 'asv',
+ 'environment_type': "shouldn't matter what",
+ }
+ )
@pytest.fixture(params=[
diff --git a/test/test_benchmarks.py b/test/test_benchmarks.py
index 633cc0160..d14914d15 100644
--- a/test/test_benchmarks.py
+++ b/test/test_benchmarks.py
@@ -97,7 +97,7 @@ def test_invalid_benchmark_tree(tmpdir):
os.chdir(tmpdir)
d = {}
- d.update(ASV_CONF_JSON)
+ d |= ASV_CONF_JSON
d['benchmark_dir'] = INVALID_BENCHMARK_DIR
d['env_dir'] = "env"
d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
@@ -138,7 +138,7 @@ def track_this():
f.write("raise AssertionError('Should not be imported!')")
d = {}
- d.update(ASV_CONF_JSON)
+ d |= ASV_CONF_JSON
d['env_dir'] = "env"
d['benchmark_dir'] = 'benchmark'
d['repo'] = tools.generate_test_repo(tmpdir, [[0, 1]]).path
@@ -176,7 +176,7 @@ def time_foo():
dvcs = tools.generate_test_repo(tmpdir, [2, 1, 0])
d = {}
- d.update(ASV_CONF_JSON)
+ d |= ASV_CONF_JSON
d['env_dir'] = "env"
d['benchmark_dir'] = 'benchmark'
d['repo'] = dvcs.path
@@ -206,7 +206,7 @@ def test_conf_inside_benchmarks_dir(tmpdir):
f.write("def track_this(): pass")
d = {}
- d.update(ASV_CONF_JSON)
+ d |= ASV_CONF_JSON
d['env_dir'] = "env"
d['benchmark_dir'] = '.'
d['repo'] = tools.generate_test_repo(tmpdir, [[0, 1]]).path
@@ -231,7 +231,7 @@ def test_code_extraction(tmpdir):
shutil.copytree(BENCHMARK_DIR, 'benchmark')
d = {}
- d.update(ASV_CONF_JSON)
+ d |= ASV_CONF_JSON
d['env_dir'] = "env"
d['benchmark_dir'] = 'benchmark'
d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
diff --git a/test/test_compare.py b/test/test_compare.py
index 5269b8491..90dff0b44 100644
--- a/test/test_compare.py
+++ b/test/test_compare.py
@@ -225,7 +225,7 @@ def test_compare_name_lookup(dvcs_type, capsys, tmpdir, example_results):
# Copy to different commit
fn_1 = os.path.join(dst, 'feea15ca-py2.7-Cython-numpy1.8.json')
- fn_2 = os.path.join(dst, commit_hash[:8] + '-py2.7-Cython-numpy1.8.json')
+ fn_2 = os.path.join(dst, f'{commit_hash[:8]}-py2.7-Cython-numpy1.8.json')
data = util.load_json(fn_1)
data['commit_hash'] = commit_hash
util.write_json(fn_2, data)
diff --git a/test/test_continuous.py b/test/test_continuous.py
index f1f0a1f24..389d03fb0 100644
--- a/test/test_continuous.py
+++ b/test/test_continuous.py
@@ -14,7 +14,7 @@ def test_continuous(capfd, basic_conf_2):
python = "{0[0]}.{0[1]}".format(sys.version_info)
env_type = get_default_environment_type(conf, python)
- env_spec = ("-E", env_type + ":" + python)
+ env_spec = "-E", f"{env_type}:{python}"
# Check that asv continuous runs
tools.run_asv_with_conf(conf, 'continuous', "master^", '--show-stderr',
diff --git a/test/test_environment.py b/test/test_environment.py
index 80863836c..1dc23754e 100644
--- a/test/test_environment.py
+++ b/test/test_environment.py
@@ -78,7 +78,7 @@ def test_presence_checks(tmpdir, monkeypatch):
# Tell conda to not use hardlinks: on Windows it's not possible
# to delete hard links to files in use, which causes problem when
# trying to cleanup environments during this test
- monkeypatch.setenv(str('CONDA_ALWAYS_COPY'), str('True'))
+ monkeypatch.setenv('CONDA_ALWAYS_COPY', 'True')
conf.env_dir = str(tmpdir.join("env"))
@@ -164,7 +164,7 @@ def test_matrix_expand_include():
conf.include = [
{'python': '3.5', 'b': '2'},
{'sys_platform': sys.platform, 'python': '2.7', 'b': '3'},
- {'sys_platform': sys.platform + 'nope', 'python': '2.7', 'b': '3'},
+ {'sys_platform': f'{sys.platform}nope', 'python': '2.7', 'b': '3'},
{'environment_type': 'nope', 'python': '2.7', 'b': '4'},
{'environment_type': 'something', 'python': '2.7', 'b': '5'},
]
@@ -415,9 +415,11 @@ def test_environment_select():
assert items == [('conda', '1.9'), ('conda', PYTHON_VER1), ('virtualenv', PYTHON_VER1)]
# Check specific python specifiers
- environments = list(environment.get_environments(conf,
- ["conda:3.5",
- "virtualenv:" + PYTHON_VER1]))
+ environments = list(
+ environment.get_environments(
+ conf, ["conda:3.5", f"virtualenv:{PYTHON_VER1}"]
+ )
+ )
items = sorted((env.tool_name, env.python) for env in environments)
assert items == [('conda', '3.5'), ('virtualenv', PYTHON_VER1)]
@@ -428,9 +430,11 @@ def test_environment_select():
# Check autodetect existing
executable = os.path.relpath(os.path.abspath(sys.executable))
- environments = list(environment.get_environments(conf, ["existing",
- ":same",
- ":" + executable]))
+ environments = list(
+ environment.get_environments(
+ conf, ["existing", ":same", f":{executable}"]
+ )
+ )
assert len(environments) == 3
for env in environments:
assert env.tool_name == "existing"
@@ -450,7 +454,7 @@ def test_environment_select():
# Check interaction with exclude
conf.exclude = [{'environment_type': "conda"}]
environments = list(environment.get_environments(conf, ["conda-py2.7-six1.10"]))
- assert len(environments) == 0
+ assert not environments
conf.exclude = [{'environment_type': 'matches nothing'}]
environments = list(environment.get_environments(conf, ["conda-py2.7-six1.10"]))
@@ -466,22 +470,24 @@ def test_environment_select_autodetect():
}
# Check autodetect
- environments = list(environment.get_environments(conf, [":" + PYTHON_VER1]))
+ environments = list(environment.get_environments(conf, [f":{PYTHON_VER1}"]))
assert len(environments) == 1
assert environments[0].python == PYTHON_VER1
assert environments[0].tool_name in ("virtualenv", "conda")
# Check interaction with exclude
conf.exclude = [{'environment_type': 'matches nothing'}]
- environments = list(environment.get_environments(conf, [":" + PYTHON_VER1]))
+ environments = list(environment.get_environments(conf, [f":{PYTHON_VER1}"]))
assert len(environments) == 1
conf.exclude = [{'environment_type': 'virtualenv|conda'}]
- environments = list(environment.get_environments(conf, [":" + PYTHON_VER1]))
+ environments = list(environment.get_environments(conf, [f":{PYTHON_VER1}"]))
assert len(environments) == 1
conf.exclude = [{'environment_type': 'conda'}]
- environments = list(environment.get_environments(conf, ["conda:" + PYTHON_VER1]))
+ environments = list(
+ environment.get_environments(conf, [f"conda:{PYTHON_VER1}"])
+ )
assert len(environments) == 1
@@ -627,11 +633,11 @@ def test_environment_environ_path(environment_type, tmpdir, monkeypatch):
assert usersite_in_syspath == "False"
# Check PYTHONPATH is ignored
- monkeypatch.setenv(str('PYTHONPATH'), str(tmpdir))
+ monkeypatch.setenv('PYTHONPATH', str(tmpdir))
output = env.run(['-c', 'import os; print(os.environ.get("PYTHONPATH", ""))'])
assert output.strip() == ""
- monkeypatch.setenv(str('ASV_PYTHONPATH'), str("Hello python path"))
+ monkeypatch.setenv('ASV_PYTHONPATH', "Hello python path")
output = env.run(['-c', 'import os; print(os.environ["PYTHONPATH"])'])
assert output.strip() == "Hello python path"
@@ -879,7 +885,7 @@ def test_environment_env_matrix():
environments = list(environment.get_environments(conf, None))
assert len(environments) == environ_count
- assert len(set(e.dir_name for e in environments)) == build_count
+ assert len({e.dir_name for e in environments}) == build_count
def test__parse_matrix():
diff --git a/test/test_feed.py b/test/test_feed.py
index c57644d04..30b43ebed 100644
--- a/test/test_feed.py
+++ b/test/test_feed.py
@@ -81,7 +81,7 @@ def test_dummy_xml():
"""
expected2 = expected.replace('type="html" xml:lang="en"', 'xml:lang="en" type="html"')
- assert text == expected or text == expected2
+ assert text in [expected, expected2]
@pytest.mark.skipif(not HAVE_FEEDPARSER, reason="test requires feedparser module")
diff --git a/test/test_gh_pages.py b/test/test_gh_pages.py
index 81c64c4c1..77d157211 100644
--- a/test/test_gh_pages.py
+++ b/test/test_gh_pages.py
@@ -12,9 +12,9 @@
def test_gh_pages(rewrite, tmpdir, generate_result_dir, monkeypatch):
tmpdir = os.path.abspath(str(tmpdir))
- monkeypatch.setenv(str('EMAIL'), str('test@asv'))
- monkeypatch.setenv(str('GIT_COMMITTER_NAME'), str('asv test'))
- monkeypatch.setenv(str('GIT_AUTHOR_NAME'), str('asv test'))
+ monkeypatch.setenv('EMAIL', 'test@asv')
+ monkeypatch.setenv('GIT_COMMITTER_NAME', 'asv test')
+ monkeypatch.setenv('GIT_AUTHOR_NAME', 'asv test')
conf, repo, commits = generate_result_dir([1, 2, 3, 4])
@@ -33,11 +33,7 @@ def test_gh_pages(rewrite, tmpdir, generate_result_dir, monkeypatch):
dvcs.add('dummy')
dvcs.commit('Initial commit')
- if rewrite:
- rewrite_args = ("--rewrite",)
- else:
- rewrite_args = ()
-
+ rewrite_args = ("--rewrite", ) if rewrite else ()
# Check with no existing gh-pages branch, no push
tools.run_asv_with_conf(conf, "gh-pages", "--no-push", *rewrite_args)
dvcs.checkout('gh-pages')
diff --git a/test/test_publish.py b/test/test_publish.py
index d2113fc1b..878dd75d4 100644
--- a/test/test_publish.py
+++ b/test/test_publish.py
@@ -85,13 +85,24 @@ def test_publish(tmpdir, example_results):
assert index['params']['branch'] == ['master']
repo = get_repo(conf)
- revision_to_hash = dict((r, h) for h, r in repo.get_revisions(commits).items())
+ revision_to_hash = {r: h for h, r in repo.get_revisions(commits).items()}
def check_file(branch, cython):
- fn = join(tmpdir, 'html', 'graphs', cython, 'arch-x86_64', 'branch-' + branch,
- 'cpu-Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
- 'machine-cheetah', 'numpy-1.8', 'os-Linux (Fedora 20)', 'python-2.7', 'ram-8.2G',
- 'time_coordinates.time_latitude.json')
+ fn = join(
+ tmpdir,
+ 'html',
+ 'graphs',
+ cython,
+ 'arch-x86_64',
+ f'branch-{branch}',
+ 'cpu-Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
+ 'machine-cheetah',
+ 'numpy-1.8',
+ 'os-Linux (Fedora 20)',
+ 'python-2.7',
+ 'ram-8.2G',
+ 'time_coordinates.time_latitude.json',
+ )
data = util.load_json(fn)
data_commits = [revision_to_hash[x[0]] for x in data]
if branch == "master":
@@ -151,11 +162,8 @@ def _graph_path(dvcs_type):
def test_publish_range_spec(generate_result_dir):
conf, repo, commits = generate_result_dir(5 * [1])
- for range_spec, expected in (
- ([commits[0], commits[-1]], set([commits[0], commits[-1]])),
- ('HEAD~2..HEAD' if repo.dvcs == 'git' else '.~1:',
- set(commits[-2:])),
- ):
+ for range_spec, expected in (([commits[0], commits[-1]], {commits[0], commits[-1]}), ('HEAD~2..HEAD' if repo.dvcs == 'git' else '.~1:',
+ set(commits[-2:]))):
tools.run_asv_with_conf(conf, "publish", range_spec)
data = util.load_json(join(conf.html_dir, 'index.json'))
assert set(data['revision_to_hash'].values()) == expected
@@ -274,10 +282,10 @@ def test_regression_multiple_branches(dvcs_type, tmpdir):
],
)
commit_values = {}
- branches = dict(
- (branch, list(reversed(dvcs.get_branch_hashes(branch))))
+ branches = {
+ branch: list(reversed(dvcs.get_branch_hashes(branch)))
for branch in (master, "stable")
- )
+ }
for branch, values in (
(master, 10 * [1]),
("stable", 5 * [1] + 5 * [2]),
@@ -313,9 +321,7 @@ def test_regression_non_monotonic(dvcs_type, tmpdir):
dvcs = tools.generate_repo_from_ops(tmpdir, dvcs_type,
[("commit", i, d) for i, d in enumerate(dates)])
commits = list(reversed(dvcs.get_branch_hashes()))
- commit_values = {}
- for commit, value in zip(commits, 5 * [1] + 5 * [2]):
- commit_values[commit] = value
+ commit_values = dict(zip(commits, 5 * [1] + 5 * [2]))
conf = tools.generate_result_dir(tmpdir, dvcs, commit_values)
tools.run_asv_with_conf(conf, "publish")
regressions = util.load_json(join(conf.html_dir, "regressions.json"))
@@ -365,9 +371,9 @@ def test_regression_atom_feed(generate_result_dir):
# Check there's a link of some sort to the website in the content
content = entries[0].find('{http://www.w3.org/2005/Atom}content')
- assert ('= 3:
- cls = args.func.__self__
- else:
- cls = args.func.im_self
-
+ cls = args.func.__self__ if sys.version_info[0] >= 3 else args.func.im_self
return cls.run_from_conf_args(conf, args, **kwargs)
@@ -162,10 +158,7 @@ def __init__(self, path):
self._fake_date = datetime.datetime.now()
def run_git(self, args, chdir=True, **kwargs):
- if chdir:
- cwd = self.path
- else:
- cwd = None
+ cwd = self.path if chdir else None
kwargs['cwd'] = cwd
return util.check_output(
[self._git] + args, **kwargs)
@@ -281,8 +274,7 @@ def merge(self, branch_name, commit_message=None):
self.commit(commit_message)
def get_hash(self, name):
- log = self._repo.log(name.encode(self.encoding), limit=1)
- if log:
+ if log := self._repo.log(name.encode(self.encoding), limit=1):
return log[0][1].decode(self.encoding)
return None
@@ -462,7 +454,6 @@ def generate_result_dir(tmpdir, dvcs, values, branches=None, updated=None):
benchmark_version = sha256(os.urandom(16)).hexdigest()
params = []
- param_names = None
for commit, value in values.items():
if isinstance(value, dict):
params = value["params"]
@@ -482,9 +473,7 @@ def generate_result_dir(tmpdir, dvcs, values, branches=None, updated=None):
value, started_at=updated, duration=1.0)
result.save(result_dir)
- if params:
- param_names = ["param{}".format(k) for k in range(len(params))]
-
+ param_names = [f"param{k}" for k in range(len(params))] if params else None
util.write_json(join(result_dir, "benchmarks.json"), {
"time_func": {
"name": "time_func",
@@ -543,7 +532,7 @@ def run():
def get_with_retry(browser, url):
- for j in range(2):
+ for _ in range(2):
try:
return browser.get(url)
except TimeoutException:
@@ -556,7 +545,7 @@ def _build_dummy_wheels(tmpdir, wheel_dir, to_build, build_conda=False):
# Build fake wheels for testing
for name, version in to_build:
- build_dir = join(tmpdir, name + '-' + version)
+ build_dir = join(tmpdir, f'{name}-{version}')
os.makedirs(build_dir)
with open(join(build_dir, 'setup.py'), 'w') as f:
@@ -610,9 +599,14 @@ def _build_dummy_conda_pkg(name, version, build_dir, dst):
for pyver in [PYTHON_VER1, PYTHON_VER2]:
with _conda_lock():
- subprocess.check_call([conda, 'build',
- '--output-folder=' + dst,
- '--no-anaconda-upload',
- '--python=' + pyver,
- '.'],
- cwd=build_dir)
+ subprocess.check_call(
+ [
+ conda,
+ 'build',
+ f'--output-folder={dst}',
+ '--no-anaconda-upload',
+ f'--python={pyver}',
+ '.',
+ ],
+ cwd=build_dir,
+ )