From a171590c0d728b057d77fb17150239a872602136 Mon Sep 17 00:00:00 2001 From: Keith Wiley Date: Mon, 16 Dec 2024 11:28:41 -0800 Subject: [PATCH] ruff cleanup --- caveclient/skeletonservice.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/caveclient/skeletonservice.py b/caveclient/skeletonservice.py index f80d5251..6dfa6fd7 100644 --- a/caveclient/skeletonservice.py +++ b/caveclient/skeletonservice.py @@ -674,7 +674,7 @@ def generate_bulk_skeletons_async( The name of the datastack to check skeleton_version : int The skeleton version to generate. Use 0 for Neuroglancer-compatibility. Use -1 for latest. - + Returns ------- float @@ -701,7 +701,7 @@ def generate_bulk_skeletons_async( f"The number of root_ids exceeds the current limit of {MAX_BULK_ASYNCHRONOUS_SKELETONS}. Only the first {MAX_BULK_ASYNCHRONOUS_SKELETONS} will be processed." ) root_ids = root_ids[:MAX_BULK_ASYNCHRONOUS_SKELETONS] - + # TODO: I recently converted this function to a batched approach to alleviate sending a long URL of root_ids via GET, # but has since converted the call to POST, which probably obviates the need for the considerably more complex batch handling. # So consider reverting to the unbatched approach in the future. @@ -750,5 +750,7 @@ def generate_bulk_skeletons_async( # else: # estimate_time_str = f"{(estimated_async_time_secs_upper_bound_sum / 86400):.2f} days" - logging.info(f"Upper estimate to generate all {len(root_ids)} skeletons: {estimate_time_str}") + logging.info( + f"Upper estimate to generate all {len(root_ids)} skeletons: {estimate_time_str}" + ) return estimated_async_time_secs_upper_bound_sum