Skip to content

Commit

Permalink
Skeleton dev3 (#289)
Browse files Browse the repository at this point in the history
* generate_bulk_skeletons_async now returns a float instead of a string.

* Bulk async skeletons now use POST instead of GET.

* ruff cleanup

* Added deprecation TODO comments to bulk async skeleton GET endpoints.

* Added deprecation TODO comments to bulk async skeleton GET endpoints.

* Added server version check.
  • Loading branch information
kebwi authored Dec 17, 2024
1 parent 5c51e18 commit 037fb81
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 11 deletions.
8 changes: 8 additions & 0 deletions caveclient/endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,9 +320,17 @@
+ "/{datastack_name}/bulk/get_skeletons/{output_format}/{gen_missing_sks}/{root_ids}",
"get_bulk_skeletons_via_skvn_rids": skeleton_v1
+ "/{datastack_name}/bulk/get_skeletons/{skeleton_version}/{output_format}/{gen_missing_sks}/{root_ids}",
# TODO: DEPRECATED: This endpoint is deprecated and will be removed in the future.
# Please use the POST endpoint in the future.
"gen_bulk_skeletons_via_rids": skeleton_v1
+ "/{datastack_name}/bulk/gen_skeletons/{root_ids}",
# TODO: DEPRECATED: This endpoint is deprecated and will be removed in the future.
# Please use the POST endpoint in the future.
"gen_bulk_skeletons_via_skvn_rids": skeleton_v1
+ "/{datastack_name}/bulk/gen_skeletons/{skeleton_version}/{root_ids}",
"gen_bulk_skeletons_via_rids_as_post": skeleton_v1
+ "/{datastack_name}/bulk/gen_skeletons",
"gen_bulk_skeletons_via_skvn_rids_as_post": skeleton_v1
+ "/{datastack_name}/bulk/gen_skeletons",
}
skeletonservice_api_versions = {1: skeletonservice_endpoints_v1}
59 changes: 48 additions & 11 deletions caveclient/skeletonservice.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
ClientBase,
_api_endpoints,
_check_version_compatibility,
handle_response,
)
from .endpoints import skeletonservice_api_versions, skeletonservice_common

Expand Down Expand Up @@ -295,6 +296,7 @@ def _build_bulk_async_endpoint(
root_ids: List,
datastack_name: str,
skeleton_version: int,
post: bool = False,
):
"""
Building the URL in a separate function facilitates testing
Expand All @@ -305,13 +307,21 @@ def _build_bulk_async_endpoint(

endpoint_mapping = self.default_url_mapping
endpoint_mapping["datastack_name"] = datastack_name
endpoint_mapping["root_ids"] = ",".join([str(v) for v in root_ids])
if not post:
# TODO: DEPRECATED: This endpoint is deprecated and will be removed in the future.
# Please use the POST endpoint in the future.
endpoint_mapping["root_ids"] = ",".join([str(v) for v in root_ids])

if not skeleton_version:
endpoint = "gen_bulk_skeletons_via_rids"
if not skeleton_version:
endpoint = "gen_bulk_skeletons_via_rids"
else:
endpoint_mapping["skeleton_version"] = skeleton_version
endpoint = "gen_bulk_skeletons_via_skvn_rids"
else:
endpoint_mapping["skeleton_version"] = skeleton_version
endpoint = "gen_bulk_skeletons_via_skvn_rids"
if not skeleton_version:
endpoint = "gen_bulk_skeletons_via_rids_as_post"
else:
endpoint = "gen_bulk_skeletons_via_skvn_rids_as_post"

url = self._endpoints[endpoint].format_map(endpoint_mapping)
return url
Expand Down Expand Up @@ -666,6 +676,11 @@ def generate_bulk_skeletons_async(
The name of the datastack to check
skeleton_version : int
The skeleton version to generate. Use 0 for Neuroglancer-compatibility. Use -1 for latest.
Returns
-------
float
The estimated time in seconds to generate all skeletons (a comparable message will be output to the console prior to return).
"""
if not self.fc.l2cache.has_cache():
raise NoL2CacheException("SkeletonClient requires an L2Cache.")
Expand All @@ -682,22 +697,41 @@ def generate_bulk_skeletons_async(
raise ValueError(
f"root_ids must be a list or numpy array of root_ids, not a {type(root_ids)}"
)

if self._server_version < Version("0.8.0"):
logging.warning(
"Server version is old and only supports GET interactions for bulk async skeletons. Consider upgrading to a newer server version to enable POST interactions."
)

if len(root_ids) > MAX_BULK_ASYNCHRONOUS_SKELETONS:
logging.warning(
f"The number of root_ids exceeds the current limit of {MAX_BULK_ASYNCHRONOUS_SKELETONS}. Only the first {MAX_BULK_ASYNCHRONOUS_SKELETONS} will be processed."
)
root_ids = root_ids[:MAX_BULK_ASYNCHRONOUS_SKELETONS]

# TODO: I recently converted this function to a batched approach to alleviate sending a long URL of root_ids via GET,
# but have since converted the call to POST, which probably obviates the need for the considerably more complex batch handling.
# So consider reverting to the unbatched approach in the future.

estimated_async_time_secs_upper_bound_sum = 0
for batch in range(0, len(root_ids), BULK_ASYNC_SKELETONS_BATCH_SIZE):
rids_one_batch = root_ids[batch : batch + BULK_ASYNC_SKELETONS_BATCH_SIZE]

url = self._build_bulk_async_endpoint(
rids_one_batch, datastack_name, skeleton_version
)
response = self.session.get(url)
self.raise_for_status(response, log_warning=log_warning)
if self._server_version < Version("0.8.0"):
url = self._build_bulk_async_endpoint(
rids_one_batch, datastack_name, skeleton_version
)
response = self.session.get(url)
else:
url = self._build_bulk_async_endpoint(
rids_one_batch, datastack_name, skeleton_version, post=True
)
data = {
"root_ids": rids_one_batch,
"skeleton_version": skeleton_version,
}
response = self.session.post(url, json=data)
response = handle_response(response, as_json=False)

estimated_async_time_secs_upper_bound = float(response.text)
estimated_async_time_secs_upper_bound_sum += (
Expand Down Expand Up @@ -729,4 +763,7 @@ def generate_bulk_skeletons_async(
# else:
# estimate_time_str = f"{(estimated_async_time_secs_upper_bound_sum / 86400):.2f} days"

return f"Upper estimate to generate all {len(root_ids)} skeletons: {estimate_time_str}"
logging.info(
f"Upper estimate to generate all {len(root_ids)} skeletons: {estimate_time_str}"
)
return estimated_async_time_secs_upper_bound_sum

0 comments on commit 037fb81

Please sign in to comment.