From 4e5f8cdd86431a4a8739deed07429c5ecfc3a388 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 7 Jul 2024 10:39:39 -0700 Subject: [PATCH 01/34] use navis-fastcore when available: - _generate_segments() and _break_segments() - _connected_components() if input is skeleton - skeletons.cable_length (via new cable_length() function) - geodesic_matrix() - prune_twigs() when precise=False - segments_to_coords - synapse_flow_centrality() --- navis/core/skeleton.py | 13 +- navis/graph/graph_utils.py | 182 ++++++++++++++++++-------- navis/morpho/__init__.py | 2 +- navis/morpho/manipulation.py | 71 ++++++---- navis/morpho/mmetrics.py | 193 +++++++++++++++++++++------- navis/morpho/subset.py | 2 +- navis/plotting/dd.py | 6 +- navis/plotting/k3d/k3d_objects.py | 2 +- navis/plotting/plot_utils.py | 86 ++++++++----- navis/plotting/plotly/graph_objs.py | 2 +- navis/plotting/vispy/visuals.py | 3 +- navis/utils/__init__.py | 7 + 12 files changed, 389 insertions(+), 180 deletions(-) diff --git a/navis/core/skeleton.py b/navis/core/skeleton.py index d8ca879d..da411fb1 100644 --- a/navis/core/skeleton.py +++ b/navis/core/skeleton.py @@ -606,17 +606,8 @@ def n_leafs(self) -> Optional[int]: @temp_property def cable_length(self) -> Union[int, float]: """Cable length.""" - if hasattr(self, '_cable_length'): - return self._cable_length - - # The by far fastest way to get the cable length is to work on the node table - # Using the igraph representation is about the same speed - if it is already calculated! - # However, one problem with the graph representation is that with large neuronlists - # it adds a lot to the memory footprint. - not_root = (self.nodes.parent_id >= 0).values - xyz = self.nodes[['x', 'y', 'z']].values[not_root] - xyz_parent = self.nodes.set_index('node_id').loc[self.nodes.parent_id.values[not_root], ['x', 'y', 'z']].values - self._cable_length = np.sum(np.linalg.norm(xyz - xyz_parent, axis=1)) + if not hasattr(self, '_cable_length'): + self._cable_length = morpho.cable_length(self) return self._cable_length @property diff --git a/navis/graph/graph_utils.py b/navis/graph/graph_utils.py index ef099468..935aed75 100644 --- a/navis/graph/graph_utils.py +++ b/navis/graph/graph_utils.py @@ -29,19 +29,32 @@ # Set up logging logger = config.get_logger(__name__) -__all__ = sorted(['classify_nodes', 'cut_skeleton', 'longest_neurite', - 'split_into_fragments', 'reroot_skeleton', 'distal_to', - 'dist_between', 'find_main_branchpoint', - 'generate_list_of_childs', 'geodesic_matrix', - 'node_label_sorting', - 'segment_length', 'rewire_skeleton', 'insert_nodes', - 'remove_nodes', 'dist_to_root']) - - -@utils.map_neuronlist(desc='Gen. segments', allow_parallel=True) -def _generate_segments(x: 'core.NeuronObject', - weight: Optional[str] = None, - return_lengths: bool = False) -> Union[list, Tuple[list, list]]: +__all__ = sorted( + [ + "classify_nodes", + "cut_skeleton", + "longest_neurite", + "split_into_fragments", + "reroot_skeleton", + "distal_to", + "dist_between", + "find_main_branchpoint", + "generate_list_of_childs", + "geodesic_matrix", + "node_label_sorting", + "segment_length", + "rewire_skeleton", + "insert_nodes", + "remove_nodes", + "dist_to_root", + ] +) + + +@utils.map_neuronlist(desc="Gen. segments", allow_parallel=True) +def _generate_segments( + x: "core.NeuronObject", weight: Optional[str] = None, return_lengths: bool = False +) -> Union[list, Tuple[list, list]]: """Generate segments maximizing segment lengths. Parameters @@ -65,7 +78,7 @@ def _generate_segments(x: 'core.NeuronObject', Examples -------- - This is for doctests mostly + This is primarily for doctests: >>> import navis >>> n = navis.example_neurons(1) @@ -79,15 +92,29 @@ def _generate_segments(x: 'core.NeuronObject', # At this point x is TreeNeuron x: core.TreeNeuron - assert weight in ('weight', None), f'Unable to use weight "{weight}"' + assert weight in ("weight", None), f'Unable to use weight "{weight}"' + + if utils.fastcore and not return_lengths: + if weight == "weight": + weight = utils.fastcore.dag.parent_dist( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + root_dist=0, + ) + + return utils.fastcore.generate_segments( + x.nodes.node_id.values, x.nodes.parent_id.values, weights=weight + ) + d = dist_to_root(x, igraph_indices=False, weight=weight) - endNodeIDs = x.nodes[x.nodes.type == 'end'].node_id.values + endNodeIDs = x.nodes[x.nodes.type == "end"].node_id.values endNodeIDs = sorted(endNodeIDs, key=lambda x: d.get(x, 0), reverse=True) if config.use_igraph and x.igraph: g: igraph.Graph = x.igraph # noqa # Convert endNodeIDs to indices - id2ix = dict(zip(x.igraph.vs['node_id'], range(len(x.igraph.vs)))) + id2ix = dict(zip(x.igraph.vs["node_id"], range(len(x.igraph.vs)))) endNodeIDs = [id2ix[n] for n in endNodeIDs] else: g: nx.DiGraph = x.graph @@ -125,9 +152,13 @@ def _generate_segments(x: 'core.NeuronObject', return sequences -def _connected_components(x: Union['core.TreeNeuron', 'core.MeshNeuron']) -> List[Set[int]]: +def _connected_components( + x: Union["core.TreeNeuron", "core.MeshNeuron"], +) -> List[Set[int]]: """Extract the connected components within a neuron. + Will use `navis-fastcore` for skeletons, if available. + Parameters ---------- x : TreeNeuron | MeshNeuron @@ -150,15 +181,22 @@ def _connected_components(x: Union['core.TreeNeuron', 'core.MeshNeuron']) -> Lis """ assert isinstance(x, (core.TreeNeuron, core.MeshNeuron)) - if config.use_igraph and x.igraph: + if isinstance(x, core.TreeNeuron) and utils.fastcore: + # This returns for each node the ID of its root + ms = utils.fastcore.connected_components( + x.nodes.node_id.values, x.nodes.parent_id.values + ) + # Translate into list of sets + cc = [x.nodes.node_id.values[ms == i] for i in np.unique(ms)] + elif config.use_igraph and x.igraph: G: igraph.Graph = x.igraph # noqa # Get the vertex clustering - vc = G.components(mode='WEAK') + vc = G.components(mode="WEAK") # Membership maps indices to connected components ms = np.array(vc.membership) if isinstance(x, core.TreeNeuron): # For skeletons we need node IDs - ids = np.array(G.vs['node_id']) + ids = np.array(G.vs["node_id"]) else: # For MeshNeurons we can use the indices directly ids = np.array(G.vs.indices) @@ -173,7 +211,7 @@ def _connected_components(x: Union['core.TreeNeuron', 'core.MeshNeuron']) -> Lis return cc -def _break_segments(x: 'core.NeuronObject') -> list: +def _break_segments(x: "core.NeuronObject") -> list: """Break neuron into small segments connecting ends, branches and root. Parameters @@ -200,14 +238,18 @@ def _break_segments(x: 'core.NeuronObject') -> list: elif isinstance(x, core.TreeNeuron): pass else: - logger.error('Unexpected datatype: %s' % str(type(x))) + logger.error("Unexpected datatype: %s" % str(type(x))) raise ValueError # At this point x is TreeNeuron x: core.TreeNeuron - if x.igraph and config.use_igraph: - g: Union['igraph.Graph', 'nx.DiGraph'] = x.igraph # noqa + if utils.fastcore: + seg_list = utils.fastcore.break_segments( + x.nodes.node_id.values, x.nodes.parent_id.values + ) + elif x.igraph and config.use_igraph: + g: Union["igraph.Graph", "nx.DiGraph"] = x.igraph # noqa end = g.vs.select(_indegree=0).indices branch = g.vs.select(_indegree_gt=1, _outdegree=1).indices root = g.vs.select(_outdegree=0).indices @@ -228,12 +270,13 @@ def _break_segments(x: 'core.NeuronObject') -> list: seg.append(parent) seg_list.append(seg) # Translate indices to node IDs - ix_id = {v: n for v, n in zip(g.vs.indices, - g.vs.get_attribute_values('node_id'))} + ix_id = { + v: n for v, n in zip(g.vs.indices, g.vs.get_attribute_values("node_id")) + } seg_list = [[ix_id[n] for n in s] for s in seg_list] else: - seeds = x.nodes[x.nodes.type.isin(['branch', 'end'])].node_id.values - stops = x.nodes[x.nodes.type.isin(['branch', 'root'])].node_id.values + seeds = x.nodes[x.nodes.type.isin(["branch", "end"])].node_id.values + stops = x.nodes[x.nodes.type.isin(["branch", "root"])].node_id.values # Converting to set speeds up the "parent in stops" check stops = set(stops) g = x.graph @@ -648,12 +691,13 @@ def distal_to(x: 'core.TreeNeuron', return df -def geodesic_matrix(x: 'core.NeuronObject', - from_: Optional[Iterable[int]] = None, - directed: bool = False, - weight: Optional[str] = 'weight', - limit: Union[float, int] = np.inf - ) -> pd.DataFrame: +def geodesic_matrix( + x: "core.NeuronObject", + from_: Optional[Iterable[int]] = None, + directed: bool = False, + weight: Optional[str] = "weight", + limit: Union[float, int] = np.inf, +) -> pd.DataFrame: """Generate geodesic ("along-the-arbor") distance matrix between nodes/vertices. Parameters @@ -709,11 +753,51 @@ def geodesic_matrix(x: 'core.NeuronObject', if len(x) == 1: x = x[0] else: - raise ValueError('Cannot process more than a single neuron.') + raise ValueError("Cannot process more than a single neuron.") elif not isinstance(x, (core.TreeNeuron, core.MeshNeuron)): raise ValueError(f'Unable to process data of type "{type(x)}"') - limit = x.map_units(limit, on_error='raise') + limit = x.map_units(limit, on_error="raise") + + # Use fastcore if available + if utils.fastcore and isinstance(x, core.TreeNeuron): + # Calculate node distances + if weight == "weight": + weight = utils.fastcore.dag.parent_dist( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + root_dist=0, + ) + + # Check for missing sources + if not isinstance(from_, type(None)): + from_ = np.unique(utils.make_iterable(from_)) + + miss = from_[~np.isin(from_, x.nodes.node_id.values)] + if len(miss): + raise ValueError( + f'Node/vertex IDs not present: {", ".join(miss.astype(str))}' + ) + ix = from_ + else: + ix = x.nodes.node_id.values + + dmat = utils.fastcore.geodesic_matrix( + x.nodes.node_id.values, + x.nodes.parent_id.values, + weights=weight, + directed=directed, + sources=from_, + ) + + # Fastcore returns -1 for non-connected nodes + dmat[dmat < 0] = np.inf + + if limit is not None and limit is not np.inf: + dmat[dmat > limit] = np.inf + + return pd.DataFrame(dmat, index=ix, columns=x.nodes.node_id.values) # Makes no sense to use directed for MeshNeurons if isinstance(x, core.MeshNeuron): @@ -721,7 +805,7 @@ def geodesic_matrix(x: 'core.NeuronObject', if x.igraph and config.use_igraph: if isinstance(x, core.TreeNeuron): - nodeList = np.array(x.igraph.vs.get_attribute_values('node_id')) + nodeList = np.array(x.igraph.vs.get_attribute_values("node_id")) else: nodeList = np.arange(len(x.igraph.vs)) @@ -730,18 +814,16 @@ def geodesic_matrix(x: 'core.NeuronObject', else: nodeList = np.array(x.graph.nodes()) - if hasattr(nx, 'to_scipy_sparse_matrix'): - m = nx.to_scipy_sparse_matrix(x.graph, nodeList, - weight=weight) + if hasattr(nx, "to_scipy_sparse_matrix"): + m = nx.to_scipy_sparse_matrix(x.graph, nodeList, weight=weight) else: - m = nx.to_scipy_sparse_array(x.graph, nodeList, - weight=weight) + m = nx.to_scipy_sparse_array(x.graph, nodeList, weight=weight) if not isinstance(from_, type(None)): from_ = np.unique(utils.make_iterable(from_)) miss = from_[~np.isin(from_, nodeList)].astype(str) - if any(miss): + if len(miss): raise ValueError(f'Node/vertex IDs not present: {", ".join(miss)}') indices = np.where(np.isin(nodeList, from_))[0] @@ -752,19 +834,15 @@ def geodesic_matrix(x: 'core.NeuronObject', # For some reason csgrpah.dijkstra expects indices/indptr as int32 # igraph seems to do that by default but networkx uses int64 for indices - m.indptr = m.indptr.astype('int32', copy=False) - m.indices = m.indices.astype('int32', copy=False) - dmat = csgraph.dijkstra(m, - directed=directed, - indices=indices, - limit=limit) + m.indptr = m.indptr.astype("int32", copy=False) + m.indices = m.indices.astype("int32", copy=False) + dmat = csgraph.dijkstra(m, directed=directed, indices=indices, limit=limit) return pd.DataFrame(dmat, columns=nodeList, index=ix) # type: ignore # no stubs @utils.lock_neuron -def segment_length(x: 'core.TreeNeuron', - segment: List[int]) -> float: +def segment_length(x: "core.TreeNeuron", segment: List[int]) -> float: """Get length of a linear segment. This function is superfast but has no checks - you must provide a diff --git a/navis/morpho/__init__.py b/navis/morpho/__init__.py index 421332fb..f7d4f919 100644 --- a/navis/morpho/__init__.py +++ b/navis/morpho/__init__.py @@ -14,7 +14,7 @@ from .mmetrics import (strahler_index, bending_flow, flow_centrality, synapse_flow_centrality, sholl_analysis, segregation_index, arbor_segregation_index, tortuosity, - betweeness_centrality, segment_analysis) + betweeness_centrality, segment_analysis, cable_length) from .manipulation import (prune_by_strahler, stitch_skeletons, split_axon_dendrite, average_skeletons, despike_skeleton, guess_radius, smooth_skeleton, diff --git a/navis/morpho/manipulation.py b/navis/morpho/manipulation.py index ea453ef9..650b6e50 100644 --- a/navis/morpho/manipulation.py +++ b/navis/morpho/manipulation.py @@ -392,33 +392,53 @@ def _prune_twigs_simple(neuron: 'core.TreeNeuron', if not inplace: neuron = neuron.copy() - # Find terminal nodes - leafs = neuron.nodes[neuron.nodes.type == 'end'].node_id.values + if utils.fastcore: + nodes_to_keep = utils.fastcore.prune_twigs( + neuron.nodes.node_id.values, + neuron.nodes.parent_id.values, + threshold=size, + weights=utils.fastcore.dag.parent_dist( + neuron.nodes.node_id.values, + neuron.nodes.parent_id.values, + neuron.nodes[['x', 'y', 'z']].values, + ) + ) + + if len(nodes_to_keep) < neuron.n_nodes: + subset.subset_neuron(neuron, + nodes_to_keep, + inplace=True) + + if recursive: + recursive -= 1 + prune_twigs(neuron, size=size, inplace=True, recursive=recursive) + else: + # Find terminal nodes + leafs = neuron.nodes[neuron.nodes.type == 'end'].node_id.values - # Find terminal segments - segs = graph._break_segments(neuron) - segs = np.array([s for s in segs if s[0] in leafs], dtype=object) + # Find terminal segments + segs = graph._break_segments(neuron) + segs = np.array([s for s in segs if s[0] in leafs], dtype=object) - # Get segment lengths - seg_lengths = np.array([graph.segment_length(neuron, s) for s in segs]) + # Get segment lengths + seg_lengths = np.array([graph.segment_length(neuron, s) for s in segs]) - # Find out which to delete - segs_to_delete = segs[seg_lengths <= size] + # Find out which to delete + segs_to_delete = segs[seg_lengths <= size] + if len(segs_to_delete): + # Unravel the into list of node IDs -> skip the last parent + nodes_to_delete = [n for s in segs_to_delete for n in s[:-1]] - if segs_to_delete.any(): - # Unravel the into list of node IDs -> skip the last parent - nodes_to_delete = [n for s in segs_to_delete for n in s[:-1]] + # Subset neuron + nodes_to_keep = neuron.nodes[~neuron.nodes.node_id.isin(nodes_to_delete)].node_id.values + subset.subset_neuron(neuron, + nodes_to_keep, + inplace=True) - # Subset neuron - nodes_to_keep = neuron.nodes[~neuron.nodes.node_id.isin(nodes_to_delete)].node_id.values - subset.subset_neuron(neuron, - nodes_to_keep, - inplace=True) - - # Go recursive - if recursive: - recursive -= 1 - prune_twigs(neuron, size=size, inplace=True, recursive=recursive) + # Go recursive + if recursive: + recursive -= 1 + prune_twigs(neuron, size=size, inplace=True, recursive=recursive) return neuron @@ -973,7 +993,7 @@ def stitch_skeletons(*x: Union[Sequence[NeuronObject], 'core.NeuronList'], """Stitch multiple skeletons together. Uses minimum spanning tree to determine a way to connect all fragments - while minimizing length (Euclidian distance) of the new edges. Nodes + while minimizing length (Euclidean distance) of the new edges. Nodes that have been stitched will get a "stitched" tag. Important @@ -1912,10 +1932,8 @@ def _stitch_mst(x: 'core.TreeNeuron', "nodes in the neuron") mask = x.nodes.node_id.values[mask] - g = x.graph.to_undirected() - # Get connected components - cc = list(nx.connected_components(g)) + cc = graph._connected_components(x) if len(cc) == 1: # There's only one component -- no healing necessary return x @@ -1998,6 +2016,7 @@ def _stitch_mst(x: 'core.TreeNeuron', # For each inter-fragment edge, add the corresponding # fine-grained edge between skeleton nodes in the original graph. + g = x.graph.to_undirected() to_add = [[e[2]['node_a'], e[2]['node_b']] for e in frag_edges] g.add_edges_from(to_add) diff --git a/navis/morpho/mmetrics.py b/navis/morpho/mmetrics.py index 7d0e2c76..10331686 100644 --- a/navis/morpho/mmetrics.py +++ b/navis/morpho/mmetrics.py @@ -23,7 +23,7 @@ import pandas as pd import numpy as np -from typing import Union, Optional, Sequence, List, Dict, overload +from typing import Union, Optional, Sequence, Dict from typing_extensions import Literal from .. import config, graph, sampling, core, utils @@ -31,14 +31,25 @@ # Set up logging logger = config.get_logger(__name__) -__all__ = sorted(['strahler_index', 'bending_flow', 'sholl_analysis', - 'flow_centrality', 'synapse_flow_centrality', - 'segregation_index', 'tortuosity', - 'betweeness_centrality', 'segment_analysis']) - - -def parent_dist(x: Union['core.TreeNeuron', pd.DataFrame], - root_dist: Optional[int] = None) -> None: +__all__ = sorted( + [ + "strahler_index", + "bending_flow", + "sholl_analysis", + "flow_centrality", + "synapse_flow_centrality", + "segregation_index", + "tortuosity", + "betweeness_centrality", + "segment_analysis", + "cable_length", + ] +) + + +def parent_dist( + x: Union["core.TreeNeuron", pd.DataFrame], root_dist: Optional[int] = None +) -> None: """Get child->parent distances for skeleton nodes. Parameters @@ -61,31 +72,43 @@ def parent_dist(x: Union['core.TreeNeuron', pd.DataFrame], else: raise TypeError(f'Need TreeNeuron or DataFrame, got "{type(x)}"') - # Extract node coordinates - tn_coords = nodes[['x', 'y', 'z']].values + if not utils.fastcore: + # Extract node coordinates + tn_coords = nodes[["x", "y", "z"]].values - # Get parent coordinates - parent_coords = nodes.set_index('node_id').reindex(nodes.parent_id.values)[['x', 'y', 'z']].values + # Get parent coordinates + parent_coords = ( + nodes.set_index("node_id") + .reindex(nodes.parent_id.values)[["x", "y", "z"]] + .values + ) - # Calculate distances between nodes and their parents - w = np.sqrt(np.sum((tn_coords - parent_coords) ** 2, axis=1)) + # Calculate distances between nodes and their parents + w = np.sqrt(np.sum((tn_coords - parent_coords) ** 2, axis=1)) - # Replace root dist (nan by default) - w[np.isnan(w)] = root_dist + # Replace root dist (nan by default) + w[np.isnan(w)] = root_dist + else: + w = utils.fastcore.dag.parent_dist( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + root_dist=root_dist, + ) return w -@utils.map_neuronlist(desc='Calc. SI', allow_parallel=True) -@utils.meshneuron_skeleton(method='node_properties', - reroot_soma=True, - node_props=['strahler_index']) -def strahler_index(x: 'core.NeuronObject', - method: Union[Literal['standard'], - Literal['greedy']] = 'standard', - to_ignore: list = [], - min_twig_size: Optional[int] = None - ) -> 'core.NeuronObject': +@utils.map_neuronlist(desc="Calc. SI", allow_parallel=True) +@utils.meshneuron_skeleton( + method="node_properties", reroot_soma=True, node_props=["strahler_index"] +) +def strahler_index( + x: "core.NeuronObject", + method: Union[Literal["standard"], Literal["greedy"]] = "standard", + to_ignore: list = [], + min_twig_size: Optional[int] = None, +) -> "core.NeuronObject": """Calculate Strahler Index (SI). Starts with SI of 1 at each leaf and walks to root. At forks with different @@ -873,16 +896,17 @@ def _flow_centrality_igraph(x: 'core.NeuronObject', return x -@utils.map_neuronlist(desc='Calc. flow', allow_parallel=True) -@utils.meshneuron_skeleton(method='node_properties', - include_connectors=True, - heal=True, - node_props=['synapse_flow_centrality']) -def synapse_flow_centrality(x: 'core.NeuronObject', - mode: Union[Literal['centrifugal'], - Literal['centripetal'], - Literal['sum']] = 'sum' - ) -> 'core.NeuronObject': +@utils.map_neuronlist(desc="Calc. flow", allow_parallel=True) +@utils.meshneuron_skeleton( + method="node_properties", + include_connectors=True, + heal=True, + node_props=["synapse_flow_centrality"], +) +def synapse_flow_centrality( + x: "core.NeuronObject", + mode: Union[Literal["centrifugal"], Literal["centripetal"], Literal["sum"]] = "sum", +) -> "core.NeuronObject": """Calculate synapse flow centrality (SFC). From Schneider-Mizell et al. (2016): "We use flow centrality for @@ -899,8 +923,7 @@ def synapse_flow_centrality(x: 'core.NeuronObject', of each skeleton node in a 3d view, providing a characteristic signature of the arbor that enables subjective evaluation of its identity." - Losely based on Alex Bates' implemention in `catnat - `_. + Uses navis-fastcore if available. Parameters ---------- @@ -949,26 +972,62 @@ def synapse_flow_centrality(x: 'core.NeuronObject', # causes less headaches. It is, however, about >10X slower than this version! # Note to self: do not go down that rabbit hole again! - if mode not in ['centrifugal', 'centripetal', 'sum']: + if mode not in ["centrifugal", "centripetal", "sum"]: raise ValueError(f'Unknown "mode" parameter: {mode}') if not isinstance(x, core.TreeNeuron): raise ValueError(f'Expected TreeNeuron(s), got "{type(x)}"') if not x.has_connectors: - raise ValueError('Neuron must have connectors.') + raise ValueError("Neuron must have connectors.") if np.any(x.soma) and not np.all(np.isin(x.soma, x.root)): - logger.warning(f'Neuron {x.id} is not rooted to its soma!') + logger.warning(f"Neuron {x.id} is not rooted to its soma!") # Figure out how connector types are labeled cn_types = x.connectors.type.unique() - if any(np.isin(['pre', 'post'], cn_types)): - pre, post = 'pre', 'post' + if any(np.isin(["pre", "post"], cn_types)): + pre, post = "pre", "post" elif any(np.isin([0, 1], cn_types)): pre, post = 0, 1 else: - raise ValueError(f'Unable to parse connector types "{cn_types}" for neuron {x.id}') + raise ValueError( + f'Unable to parse connector types "{cn_types}" for neuron {x.id}' + ) + + if utils.fastcore: + x.nodes["synapse_flow_centrality"] = utils.fastcore.synapse_flow_centrality( + node_ids=x.nodes.node_id.values, + parent_ids=x.nodes.parent_id.values, + presynapses=x.nodes.node_id.map( + x.connectors[(x.connectors.type == pre)].node_id.value_counts() + ) + .fillna(0) + .astype(int) + .values, + postsynapses=x.nodes.node_id.map( + x.connectors[(x.connectors.type == post)].node_id.value_counts() + ) + .fillna(0) + .astype(int) + .values, + mode=mode, + ) + # Add info on method/mode used for flow centrality + x.centrality_method = mode # type: ignore + + # Need to add a restriction, that a branchpoint cannot have a lower + # flow than its highest child -> this happens at the main branch point to + # the cell body fiber because the flow doesn't go "through" it in + # child -> parent direction but rather "across" it from one child to the + # other + is_bp = x.nodes["type"] == "branch" + bp = x.nodes.loc[is_bp, "node_id"].values + bp_childs = x.nodes[x.nodes.parent_id.isin(bp)] + max_flow = bp_childs.groupby("parent_id").synapse_flow_centrality.max() + x.nodes.loc[is_bp, "synapse_flow_centrality"] = max_flow.loc[bp].values + x.nodes["synapse_flow_centrality"] = x.nodes.synapse_flow_centrality.astype(int) + return x # Get list of nodes with pre/postsynapses pre_node_ids = x.connectors[x.connectors.type == pre].node_id.values @@ -1547,3 +1606,47 @@ def betweeness_centrality(x: 'core.NeuronObject', x.nodes['betweenness'] = x.nodes.node_id.map(bc).astype(int) return x + + +@utils.map_neuronlist(desc="Cable length", allow_parallel=True) +@utils.meshneuron_skeleton(method="pass_through") +def cable_length(x) -> Union[int, float]: + """Calculate cable length. + + Parameters + ---------- + x : TreeNeuron | MeshNeuron | NeuronList + Neuron(s) for which to calculate cable length. + + Returns + ------- + cable_length : float | array of float + Cable length of the neuron(s). + + """ + utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,)) + + # See if we can use fastcore + if not utils.fastcore: + # The by far fastest way to get the cable length is to work on the node table + # Using the igraph representation is about the same speed... if it is already calculated! + # However, one problem with the graph representation is that with large neuronlists + # it adds a lot to the memory footprint. + not_root = (x.nodes.parent_id >= 0).values + xyz = x.nodes[["x", "y", "z"]].values[not_root] + xyz_parent = ( + x.nodes.set_index("node_id") + .loc[x.nodes.parent_id.values[not_root], ["x", "y", "z"]] + .values + ) + cable_length = np.sum(np.linalg.norm(xyz - xyz_parent, axis=1)) + else: + cable_length = utils.fastcore.dag.parent_dist( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + root_dist=0, + ).sum() + + return cable_length + diff --git a/navis/morpho/subset.py b/navis/morpho/subset.py index e7b5d2f9..0bd876dd 100644 --- a/navis/morpho/subset.py +++ b/navis/morpho/subset.py @@ -291,7 +291,7 @@ def _subset_treeneuron(x, subset, keep_disc_cn, prevent_fragments): # Remove empty tags x.tags = {t: x.tags[t] for t in x.tags if x.tags[t]} # type: ignore # TreeNeuron has no tags - # Fix graph representations + # Fix graph representations (avoids having to recompute them) if '_graph_nx' in x.__dict__: x._graph_nx = x.graph.subgraph(x.nodes.node_id.values) if '_igraph' in x.__dict__: diff --git a/navis/plotting/dd.py b/navis/plotting/dd.py index dbebe0ce..44760a16 100644 --- a/navis/plotting/dd.py +++ b/navis/plotting/dd.py @@ -845,9 +845,7 @@ def _plot_skeleton(neuron, color, method, ax, **kwargs): if method == '2d': if not depth_coloring and not (isinstance(color, np.ndarray) and color.ndim == 2): # Generate by-segment coordinates - coords = segments_to_coords(neuron, - neuron.segments, - modifier=(1, 1, 1)) + coords = segments_to_coords(neuron, modifier=(1, 1, 1)) # We have to add (None, None, None) to the end of each # slab to make that line discontinuous there @@ -929,7 +927,6 @@ def _plot_skeleton(neuron, color, method, ax, **kwargs): else: # Generate by-segment coordinates coords = segments_to_coords(neuron, - neuron.segments, modifier=(1, 1, 1)) line_color = color @@ -953,7 +950,6 @@ def _plot_skeleton(neuron, color, method, ax, **kwargs): elif method == '3d_complex': # Generate by-segment coordinates coords = segments_to_coords(neuron, - neuron.segments, modifier=(1, 1, 1)) for c in coords: lc = Line3DCollection([c], diff --git a/navis/plotting/k3d/k3d_objects.py b/navis/plotting/k3d/k3d_objects.py index bd54eeea..13aaaf2a 100644 --- a/navis/plotting/k3d/k3d_objects.py +++ b/navis/plotting/k3d/k3d_objects.py @@ -239,7 +239,7 @@ def skeleton2k3d(neuron, legendgroup, showlegend, label, color, **kwargs): logger.warning(f'Skipping single-node skeleton: {neuron.label}') return [] - coords = segments_to_coords(neuron, neuron.segments) + coords = segments_to_coords(neuron) linewidth = kwargs.get('linewidth', kwargs.get('lw', 1)) # We have to add (None, None, None) to the end of each segment to diff --git a/navis/plotting/plot_utils.py b/navis/plotting/plot_utils.py index ce49eb8f..3b302431 100644 --- a/navis/plotting/plot_utils.py +++ b/navis/plotting/plot_utils.py @@ -69,20 +69,19 @@ def tn_pairs_to_coords(x: core.TreeNeuron, return coords.reshape((coords.shape[0], 2, 3)) -def segments_to_coords(x: core.TreeNeuron, - segments: List[List[int]], - modifier: Optional[Tuple[float, - float, - float]] = (1, 1, 1), - node_colors: Optional[np.ndarray] = None, - ) -> List[np.ndarray]: +def segments_to_coords( + x: core.TreeNeuron, + modifier: Optional[Tuple[float, float, float]] = (1, 1, 1), + node_colors: Optional[np.ndarray] = None, +) -> List[np.ndarray]: """Turn lists of node IDs into coordinates. + Will use navis-fastcore if available. + Parameters ---------- x : TreeNeuron Must contain the nodes - segments : list of lists node IDs node_colors : numpy.ndarray, optional A color for each node in ``x.nodes``. If provided, will also return a list of colors sorted to match coordinates. @@ -98,43 +97,60 @@ def segments_to_coords(x: core.TreeNeuron, to match ``coords``. """ - if not isinstance(modifier, np.ndarray): - modifier = np.array(modifier) - - # Using a dictionary here is orders of manitude faster than .loc[]! - locs: Dict[int, Tuple[float, float, float]] - # Oddly, this is also the fastest way to generate the dictionary - nodes = x.nodes - locs = {i: (x, y, z) for i, x, y, z in zip(nodes.node_id.values, - nodes.x.values, - nodes.y.values, - nodes.z.values)} # type: ignore - # locs = {r.node_id: (r.x, r.y, r.z) for r in x.nodes.itertuples()} # type: ignore - coords = [[locs[tn] for tn in s] for s in segments] - - if any(modifier != 1): - coords = [(np.array(c) * modifier).tolist() for c in coords] - - if not isinstance(node_colors, type(None)): - ilocs = dict(zip(x.nodes.node_id.values, - np.arange(x.nodes.shape[0]))) - colors = [node_colors[[ilocs[tn] for tn in s]] for s in segments] - + colors = None + + if utils.fastcore is not None: + if node_colors is None: + coords = utils.fastcore.segment_coords( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + ) + else: + coords, colors = utils.fastcore.segment_coords( + x.nodes.node_id.values, + x.nodes.parent_id.values, + x.nodes[["x", "y", "z"]].values, + node_colors=node_colors, + ) + else: + # Using a dictionary here is orders of manitude faster than .loc[]! + locs: Dict[int, Tuple[float, float, float]] + # Oddly, this is also the fastest way to generate the dictionary + nodes = x.nodes + locs = { + i: (x, y, z) + for i, x, y, z in zip( + nodes.node_id.values, nodes.x.values, nodes.y.values, nodes.z.values + ) + } # type: ignore + # locs = {r.node_id: (r.x, r.y, r.z) for r in x.nodes.itertuples()} # type: ignore + coords = [np.array([locs[tn] for tn in s]) for s in x.segments] + + if not isinstance(node_colors, type(None)): + ilocs = dict(zip(x.nodes.node_id.values, np.arange(x.nodes.shape[0]))) + colors = [node_colors[[ilocs[tn] for tn in s]] for s in x.segments] + + modifier = np.asarray(modifier) + if (modifier != 1).any(): + for seg in coords: + np.multiply(seg, modifier, out=seg) + + if colors is not None: return coords, colors return coords -def fibonacci_sphere(samples: int = 1, - randomize: bool = True) -> list: +def fibonacci_sphere(samples: int = 1, randomize: bool = True) -> list: """Generate points on a sphere.""" - rnd = 1. + rnd = 1.0 if randomize: rnd = random.random() * samples points = [] - offset = 2. / samples - increment = math.pi * (3. - math.sqrt(5.)) + offset = 2.0 / samples + increment = math.pi * (3.0 - math.sqrt(5.0)) for i in range(samples): y = ((i * offset) - 1) + (offset / 2) diff --git a/navis/plotting/plotly/graph_objs.py b/navis/plotting/plotly/graph_objs.py index c927d2d5..8e7536cc 100644 --- a/navis/plotting/plotly/graph_objs.py +++ b/navis/plotting/plotly/graph_objs.py @@ -357,7 +357,7 @@ def skeleton2plotly(neuron, legendgroup, showlegend, label, color, **kwargs): logger.warning(f'Skipping single-node TreeNeuron: {neuron.label}') return [] - coords = segments_to_coords(neuron, neuron.segments) + coords = segments_to_coords(neuron) linewidth = kwargs.get('linewidth', kwargs.get('lw', 2)) # We have to add (None, None, None) to the end of each segment to diff --git a/navis/plotting/vispy/visuals.py b/navis/plotting/vispy/visuals.py index d52dca28..e34a6f51 100644 --- a/navis/plotting/vispy/visuals.py +++ b/navis/plotting/vispy/visuals.py @@ -23,9 +23,8 @@ import matplotlib.colors as mcl -from ... import core, config, utils, morpho, conversion +from ... import core, config, utils, conversion from ..colors import * -from ..plot_utils import segments_to_coords, make_tube with warnings.catch_warnings(): warnings.simplefilter("ignore") diff --git a/navis/utils/__init__.py b/navis/utils/__init__.py index 542d9c98..cd5fbcf5 100644 --- a/navis/utils/__init__.py +++ b/navis/utils/__init__.py @@ -24,5 +24,12 @@ from .decorators import (meshneuron_skeleton, map_neuronlist_df, map_neuronlist, lock_neuron) +try: + import navis_fastcore as fastcore +except ModuleNotFoundError: + fastcore = None +except ImportError: + raise + __all__ = ['set_loggers', 'set_pbars', 'set_default_connector_colors', 'patch_cloudvolume'] From fc73768ec4ec5f5b53a532caba47fa011d7d7242 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 7 Jul 2024 10:50:24 -0700 Subject: [PATCH 02/34] add missing import --- navis/plotting/plot_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/navis/plotting/plot_utils.py b/navis/plotting/plot_utils.py index 3b302431..11b28294 100644 --- a/navis/plotting/plot_utils.py +++ b/navis/plotting/plot_utils.py @@ -14,9 +14,9 @@ # You should have received a copy of the GNU General Public License # along -""" Module contains functions to plot neurons in 2D and 3D. -""" -from .. import config, core +"""Module contains functions to plot neurons in 2D and 3D.""" + +from .. import config, core, utils import math import random From 5f2242cf2612a09aa275a98d086eff3a0f7b29f7 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 7 Jul 2024 10:51:03 -0700 Subject: [PATCH 03/34] add fastcore tests --- tests/test_fastcore.py | 116 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 tests/test_fastcore.py diff --git a/tests/test_fastcore.py b/tests/test_fastcore.py new file mode 100644 index 00000000..7bb126c2 --- /dev/null +++ b/tests/test_fastcore.py @@ -0,0 +1,116 @@ +import navis +import pytest + +import numpy as np + +from pathlib import Path + + +@pytest.mark.parametrize("directed", [True, False]) +@pytest.mark.parametrize("weight", ["weight", None]) +@pytest.mark.parametrize("limit", [np.inf, 100]) +@pytest.mark.parametrize("from_", [None, np.arange(1, 100)]) +def test_geodesic_matrix(directed, weight, limit, from_): + n = navis.example_neurons(1, kind="skeleton") + + # Make sure that the fastcore package is installed (otherwise this test is useless) + if navis.utils.fastcore is None: + return + + # Save fastcore + fastcore = navis.utils.fastcore + + # Compute the geodesic matrix with fastcore + m_with = navis.geodesic_matrix( + n, directed=directed, weight=weight, limit=limit, from_=from_ + ) + + # Compute without + try: + navis.utils.fastcore = None + m_without = navis.geodesic_matrix( + n, directed=directed, weight=weight, limit=limit, from_=from_ + ) + except: + raise + finally: + navis.utils.fastcore = fastcore + + assert np.allclose(m_with, m_without) + + +@pytest.mark.parametrize("recursive", [True, False]) +def test_prune_twigs(recursive): + n = navis.example_neurons(1, kind="skeleton") + + # Make sure that the fastcore package is installed (otherwise this test is useless) + if navis.utils.fastcore is None: + return + + # Save fastcore + fastcore = navis.utils.fastcore + + # Prune with fastcore + n_with = navis.prune_twigs(n, size=5000 / 8, recursive=recursive) + + # Prune without fastcore + try: + navis.utils.fastcore = None + n_without = navis.prune_twigs(n, size=5000 / 8, recursive=recursive) + except: + raise + finally: + navis.utils.fastcore = fastcore + + assert n_with.n_nodes == n_without.n_nodes + + +@pytest.mark.parametrize("mode", ["sum", "centrifugal", "centripetal"]) +def test_synapse_flow_centrality(mode): + n = navis.example_neurons(1, kind="skeleton") + + # Make sure that the fastcore package is installed (otherwise this test is useless) + if navis.utils.fastcore is None: + return + + # Save fastcore + fastcore = navis.utils.fastcore + + # Compute flow with fastcore + sfc_with = navis.synapse_flow_centrality(n, mode=mode).nodes.synapse_flow_centrality.values + + # Compute flow without fastcore + try: + navis.utils.fastcore = None + sfc_without = navis.synapse_flow_centrality(n, mode=mode).nodes.synapse_flow_centrality.values + except: + raise + finally: + navis.utils.fastcore = fastcore + + assert np.allclose(sfc_with, sfc_without) + + +def test_parent_dist(): + n = navis.example_neurons(1, kind="skeleton") + + # Make sure that the fastcore package is installed (otherwise this test is useless) + if navis.utils.fastcore is None: + return + + # Save fastcore + fastcore = navis.utils.fastcore + + # Compute parent dist with fastcore + pd_with = navis.morpho.mmetrics.parent_dist(n, root_dist=0) + + # Compute parent dist without fastcore + try: + navis.utils.fastcore = None + pd_without = navis.morpho.mmetrics.parent_dist(n, root_dist=0) + except: + raise + finally: + navis.utils.fastcore = fastcore + + assert np.allclose(pd_with, pd_without) \ No newline at end of file From 14c034c6b7161bcd1b82d7001270ecfce703c458 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 7 Jul 2024 10:59:34 -0700 Subject: [PATCH 04/34] add navis-fastcore as extra requirement --- .github/workflows/test-package.yml | 2 +- requirements.txt | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-package.yml b/.github/workflows/test-package.yml index e6c2d99a..560012ce 100644 --- a/.github/workflows/test-package.yml +++ b/.github/workflows/test-package.yml @@ -39,7 +39,7 @@ jobs: pip install k3d pip install pyarrow - name: Install navis - run: pip install -e .[dev,vispy-pyqt5,pathos,cloudvolume] + run: pip install -e .[dev,vispy-pyqt5,pathos,cloudvolume,fastcore] - run: pip install python-igraph if: ${{ matrix.igraph == 'igraph' }} - name: Report dependency versions diff --git a/requirements.txt b/requirements.txt index 40774d48..f636c37a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -37,6 +37,8 @@ flybrains #extra: flybrains cloud-volume>=5.2.0 #extra: cloudvolume +navis-fastcore #extra: fastcore + # this should inherit the vispy version constraint from above vispy[pyside6]>=0.6.4 #extra: vispy-default From 64c8464d92a57b82afe146dddc9231126631bf94 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Mon, 8 Jul 2024 14:03:51 -0700 Subject: [PATCH 05/34] plot2d: avoid issue with numpy 1.26.4 and matplotlib 3.9.1 --- navis/plotting/dd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/navis/plotting/dd.py b/navis/plotting/dd.py index 44760a16..b505b5df 100644 --- a/navis/plotting/dd.py +++ b/navis/plotting/dd.py @@ -619,7 +619,7 @@ def _add_scalebar(scalebar, neurons, method, ax): lc = Line3DCollection(sbar, color='black', lw=1) lc.set_gid(f'{scalebar}_scalebar') - ax.add_collection3d(lc) + ax.add_collection3d(lc, autolim=False) def _plot_scatter(points, method, ax, kwargs, **scatter_kws): @@ -943,7 +943,7 @@ def _plot_skeleton(neuron, color, method, ax, **kwargs): lc.set_gid(neuron.id) # Need to get this before adding data line3D_collection = lc - ax.add_collection3d(lc) + ax.add_collection3d(lc, autolim=False) # For complex scenes, add each segment as a single collection # -> helps reducing Z-order errors @@ -960,7 +960,7 @@ def _plot_skeleton(neuron, color, method, ax, **kwargs): linestyle=linestyle) if group_neurons: lc.set_gid(neuron.id) - ax.add_collection3d(lc) + ax.add_collection3d(lc, autolim=False) line3D_collection = None surf3D_collections = [] From bd7f6578d598f32ec595b0d88f717652c67a35f5 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Mon, 8 Jul 2024 14:05:22 -0700 Subject: [PATCH 06/34] use fastcore in navis.strahler_index(); important note: - this also changes how the "greedy" method works --- navis/morpho/mmetrics.py | 79 +++++++++++++++++++++++++++------------- tests/test_fastcore.py | 42 +++++++++++++++++++-- 2 files changed, 93 insertions(+), 28 deletions(-) diff --git a/navis/morpho/mmetrics.py b/navis/morpho/mmetrics.py index 10331686..3164adb8 100644 --- a/navis/morpho/mmetrics.py +++ b/navis/morpho/mmetrics.py @@ -12,8 +12,7 @@ # GNU General Public License for more details. -""" This module contains functions to analyse and manipulate neuron morphology. -""" +"""This module contains functions to analyse and manipulate neuron morphology.""" import math import itertools @@ -160,30 +159,49 @@ def strahler_index( 5 """ - utils.eval_param(x, name='x', allowed_types=(core.TreeNeuron, )) + utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,)) + + if method not in ["standard", "greedy"]: + raise ValueError(f'`method` must be "standard" or "greedy", got "{method}"') + + if utils.fastcore: + x.nodes['strahler_index'] = utils.fastcore.strahler_index( + x.nodes.node_id.values, + x.nodes.parent_id.values, + method=method, + to_ignore=to_ignore, + min_twig_size=min_twig_size, + ).astype(np.int16) + x.nodes['strahler_index'] = x.nodes.strahler_index.fillna(1) + return x # Find branch, root and end nodes - if 'type' not in x.nodes: + if "type" not in x.nodes: graph.classify_nodes(x) - end_nodes = x.nodes[x.nodes.type == 'end'].node_id.values - branch_nodes = x.nodes[x.nodes.type == 'branch'].node_id.values - root = x.nodes[x.nodes.type == 'root'].node_id.values + end_nodes = x.nodes[x.nodes.type == "end"].node_id.values + branch_nodes = x.nodes[x.nodes.type == "branch"].node_id.values + root = x.nodes[x.nodes.type == "root"].node_id.values end_nodes = set(end_nodes) branch_nodes = set(branch_nodes) root = set(root) if min_twig_size: - to_ignore = np.append(to_ignore, - [seg[0] for seg in x.small_segments if seg[0] - in end_nodes and len(seg) < min_twig_size]) + to_ignore = np.append( + to_ignore, + [ + seg[0] + for seg in x.small_segments + if seg[0] in end_nodes and len(seg) < min_twig_size + ], + ).astype(int) # Generate dicts for childs and parents list_of_childs = graph.generate_list_of_childs(x) - # Get a node ID -> parent ID dictionary for FAST lookups - parents = x.nodes.set_index('node_id').parent_id.to_dict() + # Get a node ID -> parent ID dictionary for fast lookups + parents = x.nodes.set_index("node_id").parent_id.to_dict() # Do NOT name any parameter `strahler_index` - this overwrites the function! SI: Dict[int, int] = {} @@ -191,23 +209,30 @@ def strahler_index( starting_points = end_nodes seen = set() while starting_points: - logger.debug(f'New starting point. Remaining: {len(starting_points)}') + logger.debug(f"New starting point. Remaining: {len(starting_points)}") this_node = starting_points.pop() + print(this_node) + # Get upstream indices for this branch - previous_indices = [SI.get(c, None) for c in list_of_childs[this_node]] + previous_indices = [SI[c] for c in list_of_childs[this_node]] + + print(" ", previous_indices) # If this is a not-a-branch branch if this_node in to_ignore: - this_branch_index = None + this_branch_index = 0 # If this is an end node: start at 1 - elif len(previous_indices) == 0: + elif not len(previous_indices): this_branch_index = 1 # If this is a slab: assign SI of predecessor elif len(previous_indices) == 1: this_branch_index = previous_indices[0] + # If this is a branch point and we're using the greedy method + elif method == "greedy": + this_branch_index = sum(previous_indices) # If this is a branch point at which similar indices collide: +1 - elif method == 'greedy' or previous_indices.count(max(previous_indices)) >= 2: + elif previous_indices.count(max(previous_indices)) >= 2: this_branch_index = max(previous_indices) + 1 # If just a branch point: continue max SI else: @@ -226,6 +251,9 @@ def strahler_index( segment.append(this_node) seen.add(this_node) + print(" ", this_branch_index) + print(" ", segment) + # Update indices for the entire segment SI.update({n: this_branch_index for n in segment}) @@ -243,9 +271,11 @@ def strahler_index( starting_points.add(parent_node) # Fix branches that were ignored - if to_ignore: + if len(to_ignore): # Go over all terminal branches with the tag - for tn in x.nodes[(x.nodes.type == 'end') & x.nodes.node_id.isin(to_ignore)].node_id.values: + for tn in x.nodes[ + (x.nodes.type == "end") & x.nodes.node_id.isin(to_ignore) + ].node_id.values: # Get this terminal's segment this_seg = [s for s in x.small_segments if s[0] == tn][0] # Get strahler index of parent branch @@ -254,18 +284,17 @@ def strahler_index( # Disconnected single nodes (e.g. after pruning) will end up w/o an entry # --> we will give them an SI of 1 - x.nodes['strahler_index'] = x.nodes.node_id.map(lambda x: SI.get(x, 1)) + x.nodes["strahler_index"] = x.nodes.node_id.map(lambda x: SI.get(x, 1)) # Set correct data type - x.nodes['strahler_index'] = x.nodes.strahler_index.astype(np.int16) + x.nodes["strahler_index"] = x.nodes.strahler_index.astype(np.int16) return x -@utils.map_neuronlist_df(desc='Analyzing', allow_parallel=True, reset_index=True) -@utils.meshneuron_skeleton(method='pass_through', - reroot_soma=True) -def segment_analysis(x: 'core.NeuronObject') -> 'core.NeuronObject': +@utils.map_neuronlist_df(desc="Analyzing", allow_parallel=True, reset_index=True) +@utils.meshneuron_skeleton(method="pass_through", reroot_soma=True) +def segment_analysis(x: "core.NeuronObject") -> "core.NeuronObject": """Calculate morphometric properties a neuron's segments. This currently includes Strahler index, length, distance to root and diff --git a/tests/test_fastcore.py b/tests/test_fastcore.py index 7bb126c2..7509f692 100644 --- a/tests/test_fastcore.py +++ b/tests/test_fastcore.py @@ -77,12 +77,16 @@ def test_synapse_flow_centrality(mode): fastcore = navis.utils.fastcore # Compute flow with fastcore - sfc_with = navis.synapse_flow_centrality(n, mode=mode).nodes.synapse_flow_centrality.values + sfc_with = navis.synapse_flow_centrality( + n, mode=mode + ).nodes.synapse_flow_centrality.values # Compute flow without fastcore try: navis.utils.fastcore = None - sfc_without = navis.synapse_flow_centrality(n, mode=mode).nodes.synapse_flow_centrality.values + sfc_without = navis.synapse_flow_centrality( + n, mode=mode + ).nodes.synapse_flow_centrality.values except: raise finally: @@ -113,4 +117,36 @@ def test_parent_dist(): finally: navis.utils.fastcore = fastcore - assert np.allclose(pd_with, pd_without) \ No newline at end of file + assert np.allclose(pd_with, pd_without) + + +@pytest.mark.parametrize("min_twig_size", [None, 2]) +@pytest.mark.parametrize("to_ignore", [[], np.array([465, 548], dtype=int)]) +@pytest.mark.parametrize("method", ["standard", "greedy"]) +def test_strahler_index(method, to_ignore, min_twig_size): + n = navis.example_neurons(1, kind="skeleton") + + # Make sure that the fastcore package is installed (otherwise this test is useless) + if navis.utils.fastcore is None: + return + + # Save fastcore + fastcore = navis.utils.fastcore + + # Compute strahler index with fastcore + si_with = navis.strahler_index( + n, to_ignore=to_ignore, method=method, min_twig_size=min_twig_size + ).nodes.strahler_index.values + + # Compute strahler index without fastcore + try: + navis.utils.fastcore = None + si_without = navis.strahler_index( + n, to_ignore=to_ignore, method=method, min_twig_size=min_twig_size + ).nodes.strahler_index.values + except: + raise + finally: + navis.utils.fastcore = fastcore + + assert np.allclose(si_with, si_without) From 4c79f31c3f79ba1ee40b55a4dd674889bd3fb970 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Mon, 8 Jul 2024 14:13:20 -0700 Subject: [PATCH 07/34] docs: add fastcore descriptions --- README.md | 1 + docs/source/install.rst | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index fc2898d6..26f71c96 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,7 @@ NAVis is on [ReadTheDocs](http://navis.readthedocs.io/ "NAVis ReadTheDocs"). * **render**: use Blender 3D for high quality [visualizations](https://youtu.be/wl3sFG7WQJc) * **R** neuron libraries: interfaces with [nat](https://github.com/jefferis/nat), [rcatmaid](https://github.com/jefferis/rcatmaid), [elmr](https://github.com/jefferis/elmr) and more * **import-export**: read/write SWCs, neuroglancer's ["*precomputed*"](https://github.com/google/neuroglancer/tree/master/src/datasource/precomputed) format, NMX/NML, NRRD, mesh-files and more +* **fast**: uses functions compiled in Rust under-the-hood (see [fastcore](https://github.com/schlegelp/fastcore-rs)) * **scalable**: out-of-the-box support for multiprocessing * **extensible**: build your own package on top of navis - see [pymaid](https://pymaid.readthedocs.io/en/latest/) for example diff --git a/docs/source/install.rst b/docs/source/install.rst index 08141bc1..309dcf34 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -3,7 +3,7 @@ Install ======= -NAVis currently requires Python 3.8 or later. Below instructions assume that +NAVis currently requires Python 3.9 or later. Below instructions assume that you have already installed Python and its package manager ``pip``. .. topic:: By the way @@ -67,6 +67,19 @@ These extras can be installed directly, or along with navis with The user-facing extras, the dependencies they install, and how to install those dependencies directly, are below. +.. _fastcore: + +``fastcore``: `navis-fastcore `_ + ``navis-fastcore`` re-implements a bunch of low-level functions in Rust + and wraps them in Python. ``navis`` will use ``fastcore`` under-the-hood + if it is available. This is a highly recommended extra as it will + speed up certain operations such as geodesic distances, Strahler Index + or pruning by several orders of magnitude. + + :: + + pip3 install navis-fastcore + .. _pykd: From 006dd59ff727c52072fcbc8f917aac23a599ad5f Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Mon, 8 Jul 2024 15:35:44 -0700 Subject: [PATCH 08/34] strahler_index: remove superfluous print statements --- navis/morpho/mmetrics.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/navis/morpho/mmetrics.py b/navis/morpho/mmetrics.py index 3164adb8..6475e806 100644 --- a/navis/morpho/mmetrics.py +++ b/navis/morpho/mmetrics.py @@ -212,13 +212,9 @@ def strahler_index( logger.debug(f"New starting point. Remaining: {len(starting_points)}") this_node = starting_points.pop() - print(this_node) - # Get upstream indices for this branch previous_indices = [SI[c] for c in list_of_childs[this_node]] - print(" ", previous_indices) - # If this is a not-a-branch branch if this_node in to_ignore: this_branch_index = 0 @@ -251,9 +247,6 @@ def strahler_index( segment.append(this_node) seen.add(this_node) - print(" ", this_branch_index) - print(" ", segment) - # Update indices for the entire segment SI.update({n: this_branch_index for n in segment}) From bc1dcb9c29d698a81c54f98e1987bd3475fc0092 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Thu, 25 Jul 2024 15:22:59 +0100 Subject: [PATCH 09/34] docs: fix typo in `mirror_brain` docstring --- navis/transforms/templates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/navis/transforms/templates.py b/navis/transforms/templates.py index 99c120d7..d0310cde 100644 --- a/navis/transforms/templates.py +++ b/navis/transforms/templates.py @@ -1200,7 +1200,7 @@ def mirror_brain(x: Union['core.NeuronObject', 'pd.DataFrame', 'np.ndarray'], mirror_axis : 'x' | 'y' | 'z', optional Axis to mirror. Defaults to `x`. warp : bool | "auto" | Transform, optional - If 'auto', will check if a non-rigi mirror transformation + If 'auto', will check if a non-rigid mirror transformation exists for the given ``template`` and apply it after the flipping. Alternatively, you can also pass a Transform or TransformSequence directly. From d799006e1c8fde5ed7e23873225cb0ffaaed932d Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Thu, 25 Jul 2024 15:23:21 +0100 Subject: [PATCH 10/34] requirements: pin navis-fastcore==0.0.3 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f636c37a..d4f4a89f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -37,7 +37,7 @@ flybrains #extra: flybrains cloud-volume>=5.2.0 #extra: cloudvolume -navis-fastcore #extra: fastcore +navis-fastcore==0.0.3 #extra: fastcore # this should inherit the vispy version constraint from above vispy[pyside6]>=0.6.4 #extra: vispy-default From a9794ccb7ef1d5ca9cb44a1f5412573e466e19a7 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Thu, 25 Jul 2024 15:25:14 +0100 Subject: [PATCH 11/34] remove a few unnecessary imports --- navis/core/dotprop.py | 1 - navis/morpho/persistence.py | 4 +--- navis/utils/misc.py | 5 +---- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/navis/core/dotprop.py b/navis/core/dotprop.py index a182e5f2..98bad3be 100644 --- a/navis/core/dotprop.py +++ b/navis/core/dotprop.py @@ -15,7 +15,6 @@ import numbers import pint import types -import uuid import warnings import numpy as np diff --git a/navis/morpho/persistence.py b/navis/morpho/persistence.py index 9000d19d..0b9a4d3b 100644 --- a/navis/morpho/persistence.py +++ b/navis/morpho/persistence.py @@ -13,8 +13,6 @@ """Module to generate and analyze persistence diagrams.""" -import os - import numpy as np import pandas as pd @@ -23,7 +21,7 @@ from scipy.spatial.distance import pdist, cdist, squareform from scipy.stats import gaussian_kde -from typing import Union, Optional, Sequence, List, Dict, overload +from typing import Union, Optional from typing_extensions import Literal from .. import utils, config, core, graph diff --git a/navis/utils/misc.py b/navis/utils/misc.py index b293a3ba..3e75aabb 100644 --- a/navis/utils/misc.py +++ b/navis/utils/misc.py @@ -11,9 +11,7 @@ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -import inspect import math -import os import requests import sys import urllib @@ -22,13 +20,12 @@ import pandas as pd from typing import Optional, Union, List, Iterable, Dict, Tuple, Any -from typing_extensions import Literal from .. import config, core from .eval import is_mesh -from .iterables import is_iterable, make_iterable from ..transforms.templates import TemplateBrain + # Set up logging logger = config.get_logger(__name__) From 3a18bc685e67d800080b4b904795f69cfc3362f0 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Thu, 25 Jul 2024 17:33:23 +0100 Subject: [PATCH 12/34] requirements: bump minimum navis-fastcore version to 0.0.4 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d4f4a89f..acc2ebba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -37,7 +37,7 @@ flybrains #extra: flybrains cloud-volume>=5.2.0 #extra: cloudvolume -navis-fastcore==0.0.3 #extra: fastcore +navis-fastcore>=0.0.4 #extra: fastcore # this should inherit the vispy version constraint from above vispy[pyside6]>=0.6.4 #extra: vispy-default From 5c9de875ed17090c9712edf3194dc430bc55e1eb Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 28 Jul 2024 09:46:41 +0100 Subject: [PATCH 13/34] doctests: exclude '/scripts' path --- conftest.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/conftest.py b/conftest.py index 81c49295..6780f2b1 100644 --- a/conftest.py +++ b/conftest.py @@ -4,15 +4,15 @@ def pytest_ignore_collect(path, config): more specific hooks. """ path = str(path) - if 'interfaces' in path: - return True - if '/docs' in path: - return True - if '/stubs' in path: - return True - if '/examples' in path: - return True - if '/dist/' in path: - return True - if '/binder' in path: - return True + for pattern in ( + "interfaces", + "/docs", + "/stubs", + "/examples", + "/dist/", + "/binder", + "/site", + "/scripts", + ): + if pattern in path: + return True From 579f72a3f2ad1449878b4273434a949597485583 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 28 Jul 2024 09:47:03 +0100 Subject: [PATCH 14/34] to_adjacency: avoid setting on copy warning --- navis/connectivity/adjacency.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/navis/connectivity/adjacency.py b/navis/connectivity/adjacency.py index f22edc92..d7029395 100644 --- a/navis/connectivity/adjacency.py +++ b/navis/connectivity/adjacency.py @@ -152,7 +152,7 @@ def to_adjacency(self, include_other=True) -> pd.DataFrame: data = np.zeros((len(index), len(index)), np.uint64) df = pd.DataFrame(data, index, index) for _, src, tgt, _, _ in self.edges(include_other): - df[tgt][src] += 1 + df.loc[src, tgt] += 1 return df From 36cef59e3a1eba4cca1384308d27f9a75a0cb46a Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 28 Jul 2024 09:49:12 +0100 Subject: [PATCH 15/34] pandas: avoid future issues with mismatching dtypes when updating values --- navis/core/skeleton.py | 8 ++++---- navis/morpho/manipulation.py | 19 +++++++++++++------ 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/navis/core/skeleton.py b/navis/core/skeleton.py index da411fb1..1099a5a4 100644 --- a/navis/core/skeleton.py +++ b/navis/core/skeleton.py @@ -217,13 +217,13 @@ def __truediv__(self, other, copy=True): # If a number, consider this an offset for coordinates n = self.copy() if copy else self - n.nodes.loc[:, ['x', 'y', 'z', 'radius']] /= other + n.nodes[['x', 'y', 'z', 'radius']] /= other # At this point we can ditch any 4th unit if utils.is_iterable(other): other = other[:3] if n.has_connectors: - n.connectors.loc[:, ['x', 'y', 'z']] /= other + n.connectors[['x', 'y', 'z']] /= other if hasattr(n, 'soma_radius'): if isinstance(n.soma_radius, numbers.Number): @@ -254,13 +254,13 @@ def __mul__(self, other, copy=True): # If a number, consider this an offset for coordinates n = self.copy() if copy else self - n.nodes.loc[:, ['x', 'y', 'z', 'radius']] *= other + n.nodes[['x', 'y', 'z', 'radius']] *= other # At this point we can ditch any 4th unit if utils.is_iterable(other): other = other[:3] if n.has_connectors: - n.connectors.loc[:, ['x', 'y', 'z']] *= other + n.connectors[['x', 'y', 'z']] *= other if hasattr(n, 'soma_radius'): if isinstance(n.soma_radius, numbers.Number): diff --git a/navis/morpho/manipulation.py b/navis/morpho/manipulation.py index 650b6e50..4a2266ff 100644 --- a/navis/morpho/manipulation.py +++ b/navis/morpho/manipulation.py @@ -527,7 +527,9 @@ def _prune_twigs_precise(neuron: 'core.TreeNeuron', # will be deleted anyway if not all(to_remove): new_loc = loc1 - vec_norm * len_to_prune.reshape(-1, 1) - neuron.nodes.loc[is_new_leaf, ['x', 'y', 'z']] = new_loc + neuron.nodes.loc[is_new_leaf, ['x', 'y', 'z']] = new_loc.astype( + neuron.nodes.x.dtype, copy=False + ) if any(to_remove): leafs_to_remove = new_leafs[to_remove] @@ -1347,9 +1349,9 @@ def average_skeletons(x: 'core.NeuronList', mean_z[np.isnan(mean_z)] = base_nodes[np.isnan(mean_z), 2] # Change coordinates accordingly - bn.nodes.loc[:, 'x'] = mean_x - bn.nodes.loc[:, 'y'] = mean_y - bn.nodes.loc[:, 'z'] = mean_z + bn.nodes['x'] = mean_x + bn.nodes['y'] = mean_y + bn.nodes['z'] = mean_z return bn @@ -1533,7 +1535,9 @@ def guess_radius(x: NeuronObject, nodes.loc[nodes.radius <= 0, 'radius'] = None # Assign radii to nodes - nodes.loc[cn_grouped.index, 'radius'] = cn_grouped.values + nodes.loc[cn_grouped.index, 'radius'] = cn_grouped.values.astype( + nodes.radius.dtype, copy=False + ) # Go over each segment and interpolate radii for s in config.tqdm(x.segments, desc='Interp.', disable=config.pbar_hide, @@ -1639,7 +1643,10 @@ def smooth_skeleton(x: NeuronObject, interp = this_co.rolling(window, min_periods=1).mean() - nodes.loc[s, to_smooth] = interp.values + for i, c in enumerate(to_smooth): + nodes.loc[s, c] = interp.iloc[:, i].values.astype( + nodes[c].dtype, copy=False + ) # Reassign nodes x.nodes = nodes.reset_index(drop=False, inplace=False) From a3a7b5dfd6cfcc02474aa35d7a0a61868a21f683 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 28 Jul 2024 09:49:43 +0100 Subject: [PATCH 16/34] resample_skeleton: fix issue when segments is array, not list (fastcore) --- navis/sampling/resampling.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/navis/sampling/resampling.py b/navis/sampling/resampling.py index 84066f63..18ee6b55 100644 --- a/navis/sampling/resampling.py +++ b/navis/sampling/resampling.py @@ -196,8 +196,7 @@ def resample_skeleton(x: 'core.NeuronObject', new_coords = np.array([xnew, ynew, znew]).T # Generate new ids (start and end node IDs of this segment are kept) - new_ids = seg[:1] + [max_tn_id + - i for i in range(len(new_coords) - 2)] + seg[-1:] + new_ids = np.concatenate((seg[:1], [max_tn_id + i for i in range(len(new_coords) - 2)], seg[-1:])) # Increase max index max_tn_id += len(new_ids) @@ -291,8 +290,8 @@ def resample_skeleton(x: 'core.NeuronObject', node_map = dict(zip(nodes_to_remap, new_nodes.node_id.values[ix])) x.tags = {k: [node_map[n] for n in v] for k, v in x.tags.items()} - # Set nodes - x.nodes = new_nodes + # Set nodes (avoid setting on copy warning) + x.nodes = new_nodes.copy() # Clear and regenerate temporary attributes x._clear_temp_attr() From 143d74b7f1caf520eb9b4cbe230bb48d90f0fca3 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 28 Jul 2024 09:50:10 +0100 Subject: [PATCH 17/34] plot2d: replace depcrecated mpl proj_points function --- navis/plotting/dd.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/navis/plotting/dd.py b/navis/plotting/dd.py index b505b5df..07efa69b 100644 --- a/navis/plotting/dd.py +++ b/navis/plotting/dd.py @@ -492,7 +492,7 @@ def plot2d(x: Union[core.NeuronObject, def set_depth(): """Set depth information for neurons according to camera position.""" # Get projected coordinates - proj_co = mpl_toolkits.mplot3d.proj3d.proj_points(all_co, ax.get_proj()) + proj_co = proj_points(all_co, ax.get_proj()) # Get min and max of z coordinates z_min, z_max = min(proj_co[:, 2]), max(proj_co[:, 2]) @@ -515,8 +515,7 @@ def set_depth(): raise ValueError(f'Neither mesh nor skeleton found for neuron {neuron.id}') # Get projected coordinates - this_proj = mpl_toolkits.mplot3d.proj3d.proj_points(this_co, - ax.get_proj()) + this_proj = proj_points(this_co, ax.get_proj()) # Normalise z coordinates ns = norm(this_proj[:, 2]).data @@ -532,8 +531,7 @@ def set_depth(): # Get depth of soma(s) soma = utils.make_iterable(neuron.soma) soma_co = neuron.nodes.set_index('node_id').loc[soma][['x', 'y', 'z']].values - soma_proj = mpl_toolkits.mplot3d.proj3d.proj_points(soma_co, - ax.get_proj()) + soma_proj = proj_points(soma_co, ax.get_proj()) soma_cs = norm(soma_proj[:, 2]).data # Set soma color @@ -1206,3 +1204,20 @@ def _orthogonal_proj(zfront, zback, focal_length=None): [0, 1, 0, 0], [0, 0, a, b], [0, 0, -0.0001, zback]]) + + +def proj_points(points, M): + """Project points using a projection matrix. + + This was previously done using the analagous function + mpl_toolkits.mplot3d.proj3d.proj_points but that is deprecated. + """ + xs, ys, zs = zip(*points) + vec = np.array([xs, ys, zs, np.ones_like(xs)]) + + vecw = np.dot(M, vec) + w = vecw[3] + # clip here.. + txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w + + return np.column_stack((txs, tys, tzs)) \ No newline at end of file From 46e99776e59d2e0e26eb59290cc3f0d8c14f4a74 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 28 Jul 2024 09:51:41 +0100 Subject: [PATCH 18/34] synblast: avoid issue with pykdtree's KDTree being unpickable --- navis/nbl/synblast_funcs.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/navis/nbl/synblast_funcs.py b/navis/nbl/synblast_funcs.py index cd41022d..1b33beaa 100644 --- a/navis/nbl/synblast_funcs.py +++ b/navis/nbl/synblast_funcs.py @@ -139,7 +139,7 @@ def _append_connectors(self, connectors: pd.DataFrame, id, self_hit=None) -> int if not self.by_type: data = connectors[['x', 'y', 'z']].values # Generate the KDTree - self.neurons[-1]['all'] = KDTree(data) + self.neurons[-1]['all'] = data # KDTree(data) else: if 'type' not in connectors.columns: raise ValueError('Connector tables must have a "type" column ' @@ -147,7 +147,7 @@ def _append_connectors(self, connectors: pd.DataFrame, id, self_hit=None) -> int for ty in connectors['type'].unique(): data = connectors.loc[connectors['type'] == ty, ['x', 'y', 'z']].values # Generate the KDTree - self.neurons[-1][ty] = KDTree(data) + self.neurons[-1][ty] = data # KDTree(data) # Calculate score for self hit if required if not self_hit: @@ -156,6 +156,12 @@ def _append_connectors(self, connectors: pd.DataFrame, id, self_hit=None) -> int return next_idx + def _build_trees(self): + """Build KDTree for all neurons.""" + for i, n in enumerate(self.neurons): + for ty, data in n.items(): + n[ty] = KDTree(data) + def calc_self_hit(self, cn): """Non-normalized value for self hit.""" return cn.shape[0] * self.score_fn(0, 1) @@ -183,7 +189,14 @@ def single_query_target(self, q_idx: int, t_idx: int, scores='forward'): # score possible in the scoring function dists = np.append(dists, [self.score_fn.max_dist] * qt.data.shape[0]) else: + # Note: we're building the trees lazily here once we actually need them. + # The main reason is that pykdtree is not picklable and hence + # we can't pass it to the worker processes. + if not isinstance(t_trees[ty], KDTree): + t_trees[ty] = KDTree(t_trees[ty]) + tt = t_trees[ty] + data = qt.data # pykdtree tracks data as flat array if data.ndim == 1: @@ -399,6 +412,8 @@ def get_connectors(n): t_idx=this.targets, scores=scores)] = this + return futures + # Collect results if futures and len(futures) > 1: # Prepare empty score matrix From 20e365238079a6bb2cfe7b2603cd393042072226 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 28 Jul 2024 09:52:35 +0100 Subject: [PATCH 19/34] MovingLeastSquaresTransform: fix doctests; important note: no clue why that passed previously - I did investigate but did not find a reason --- navis/transforms/moving_least_squares.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/navis/transforms/moving_least_squares.py b/navis/transforms/moving_least_squares.py index 04cbbef9..315e869b 100644 --- a/navis/transforms/moving_least_squares.py +++ b/navis/transforms/moving_least_squares.py @@ -29,11 +29,9 @@ def __init__( ) -> None: """Moving Least Squares transforms of 3D spatial data. - Uses the - `molesq `_ - library, which packages the - `implementation by Casey Schneider-Mizell `_ - of the affine algorithm published in + Uses the `molesq `_ library, which packages the + `implementation`_ + by Casey Schneider-Mizell of the affine algorithm published in `Schaefer et al. 2006 `_. Parameters @@ -55,8 +53,8 @@ def __init__( >>> tr = transforms.MovingLeastSquaresTransform(src, trg) >>> points = np.array([[0, 0, 0], [50, 50, 50]]) >>> tr.xform(points) - array([[ 1. , 15. , 5. ], - [17.56361725, 43.32071504, 59.3147564 ]]) + array([[ 1. , 15. , 5. ], + [ 81.56361725, 155.32071504, 187.3147564 ]]) """ assert direction in ('forward', 'inverse') From df709b4cc3186ddb9841f8ec5ca6adde337aad50 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 28 Jul 2024 09:52:56 +0100 Subject: [PATCH 20/34] replace deprecated alias: numpy.product -> numpy.prod --- tests/test_nbl/test_smat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_nbl/test_smat.py b/tests/test_nbl/test_smat.py index 04377f83..f884ddad 100644 --- a/tests/test_nbl/test_smat.py +++ b/tests/test_nbl/test_smat.py @@ -38,7 +38,7 @@ def lookup_args(ndim): ) """ shape = tuple(range(SMALLEST_DIM_SIZE, SMALLEST_DIM_SIZE + ndim)) - cells = np.arange(np.product(shape)).reshape(shape) + cells = np.arange(np.prod(shape)).reshape(shape) digitizers = [Digitizer(np.arange(s + 1, dtype=float)) for s in shape] return digitizers, cells From c1612d14acc8aa36c83b74a99f29df2779b487e4 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 28 Jul 2024 09:53:31 +0100 Subject: [PATCH 21/34] pd.read_json: avoid deprecation warning by wrapping string in StringIO --- navis/io/json_io.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/navis/io/json_io.py b/navis/io/json_io.py index f520dc8c..4b262654 100644 --- a/navis/io/json_io.py +++ b/navis/io/json_io.py @@ -11,6 +11,7 @@ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. +import io import json import pandas as pd @@ -140,13 +141,13 @@ def read_json(s: str, **kwargs) -> 'core.NeuronList': if '_nodes' in n: try: - cn._nodes = pd.read_json(n['_nodes']) + cn._nodes = pd.read_json(io.StringIO(n['_nodes'])) except ValueError: cn._nodes = None if '_connectors' in n: try: - cn._connectors = pd.read_json(n['_connectors']) + cn._connectors = pd.read_json(io.StringIO(n['_connectors'])) except ValueError: cn._connectors = None From 01f42c796141be120c80bc3dab4b960eb79675a5 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Mon, 29 Jul 2024 11:24:23 +0100 Subject: [PATCH 22/34] update transforms README --- navis/transforms/README.md | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/navis/transforms/README.md b/navis/transforms/README.md index ec51a5cb..eb040e8e 100644 --- a/navis/transforms/README.md +++ b/navis/transforms/README.md @@ -26,17 +26,23 @@ Transforms come in two flavours: flipped obviously). In addition to these flavours, transforms can be off different types. Off the -bat, navis supports 4 types of transforms: - 1. `navis.transforms.affine.AffineTransform` implements simple rigid affine - transforms (scale, shear, offset, etc.). - 2. `navis.transforms.thinplate.TPStransform` implements thin-plate spine - transforms: given the same set of landmarks in two different template - spaces it will calculate and apply a thin-plate spine transform. - 3. `navis.transforms.cmtk.CMTKtransform` provides an interface with +bat, navis supports 6 types of transforms: + 1. `navis.transforms.affine.AffineTransform` is a simple rigid affine + transforms (scale, shear, translation, etc.). + 2. `navis.transforms.thinplate.TPStransform` uses thin-plate spine + transforms (via the `morphops` library) to transform points based + on landmarks. + 3. `navis.transforms.thinplate.MovingLeastSquaresTransform` uses + the Moving Least Squares algorithm (via the `molseq` library) to + transform points based on landmarks. + 4. `navis.transforms.cmtk.CMTKtransform` provides an interface with [CMTK](https://www.nitrc.org/projects/cmtk/) to use CMTK's `.list` transforms. CMTK needs to be installed separately. - 4. `navis.transforms.h5reg.H5transform` provides an interface to use the - Saalfeld lab's h5 deformation field-based transforms. + 5. `navis.transforms.h5reg.H5transform` provides an interface to use the + Saalfeld lab's h5 deformation field-based transforms. + 6. `navis.transforms.elastix.ElastixTransform` provides an interface with + [Elastix](https://github.com/SuperElastix/elastix/). + Elastix needs to be installed separately. You can subclass ``navis.transforms.base.BaseTransform`` to implement other types of transforms for navis to use. From 5b9ea640e264371add95c0374fc9d196932c28cd Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Mon, 29 Jul 2024 11:28:14 +0100 Subject: [PATCH 23/34] replace deprecated igraph shortest_paths() -> distances() --- navis/graph/graph_utils.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/navis/graph/graph_utils.py b/navis/graph/graph_utils.py index 935aed75..82970fc2 100644 --- a/navis/graph/graph_utils.py +++ b/navis/graph/graph_utils.py @@ -655,7 +655,7 @@ def distal_to(x: 'core.TreeNeuron', tnB = [id2ix[n] for n in tnB] # type: ignore # Get path lengths - le = x.igraph.shortest_paths(tnA, tnB, mode='OUT') + le = x.igraph.distances(tnA, tnB, mode="OUT") # Converting to numpy array first is ~2X as fast le = np.asarray(le) @@ -962,9 +962,7 @@ def dist_between(x: 'core.NeuronObject', b = G.vs.find(node_id=b) # If not, we're assuming g is an iGraph object - return G.shortest_paths(a, b, - weights='weight', - mode='ALL')[0][0] + return G.distances(a, b, weights="weight", mode="ALL")[0][0] @utils.map_neuronlist(desc='Searching', allow_parallel=True) From 5471610b791aae3b0590f5f6d46986f65412284f Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Mon, 29 Jul 2024 11:30:28 +0100 Subject: [PATCH 24/34] node_label_sorting: avoid issues if segment list if array not list --- navis/graph/graph_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/navis/graph/graph_utils.py b/navis/graph/graph_utils.py index 82970fc2..da2e5c68 100644 --- a/navis/graph/graph_utils.py +++ b/navis/graph/graph_utils.py @@ -1835,7 +1835,7 @@ def node_label_sorting(x: 'core.TreeNeuron', curr_points = new_points + curr_points # Translate into segments - node_list = [x.root[0]] + node_list = [x.root[0:]] # Note that we're inverting here so that the segments are ordered # proximal -> distal (i.e. root to tips) seg_dict = {s[0]: s[::-1] for s in _break_segments(x)} @@ -1843,9 +1843,9 @@ def node_label_sorting(x: 'core.TreeNeuron', for n in nodes_walked: # Note that we're skipping the first (proximal) node to avoid double # counting nodes - node_list += seg_dict[n][1:] + node_list.append(seg_dict[n][1:]) - return np.array(node_list) + return np.concatenate(node_list, dtype=int) def _igraph_to_sparse(graph, weight_attr=None): From 7bde4f7dd0f4b3de0c9badc72060eb894beddad1 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Tue, 30 Jul 2024 13:20:46 +0100 Subject: [PATCH 25/34] fix issue with parsing vertex colors for mesh neurons: - eval_color: make sure numpy arrays are not converted to lists - prepare_colormap: not sure why this was wrong but it was - mesh2plotly: deal with rgb vs rgba colors --- navis/plotting/colors.py | 25 ++++++++++++++++++++----- navis/plotting/plotly/graph_objs.py | 4 ++-- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/navis/plotting/colors.py b/navis/plotting/colors.py index 8b8bdea4..775d9587 100644 --- a/navis/plotting/colors.py +++ b/navis/plotting/colors.py @@ -431,8 +431,8 @@ def prepare_colormap(colors, if not isinstance(volumes, np.ndarray): volumes = np.array(volumes) - # Only neurons REQUIRE a color - # Volumes are second class citiziens here :( + # Only neurons absolutely REQUIRE a color + # (Volumes are second class citiziens here) colors_required = neurons.shape[0] if not colors_required and not len(volumes): @@ -449,11 +449,20 @@ def prepare_colormap(colors, if len(color_by) != len(neurons): raise ValueError('Must provide a label for all neurons: got ' f'{len(color_by)} groups for {len(neurons)} neurons') + + # Turn e.g. labels into colors cmap = {g: c for g, c in zip(np.unique(color_by), generate_colors(len(np.unique(color_by)), palette=palette, color_range=color_range))} - colors = [cmap[g] for g in color_by] + + colors = [] + for cb in color_by: + if utils.is_iterable(cb): + colors.append(np.array([cmap[g] for g in cb])) + else: + colors += [cmap[cb]] + colors += [getattr(v, 'color', (1, 1, 1)) for v in volumes] # If no colors, generate random colors @@ -553,7 +562,7 @@ def add_alpha(c, alpha): def eval_color(x, color_range=255, force_alpha=False): """Evaluate colors and return tuples.""" - if color_range not in [1, 255]: + if color_range not in (1, 255): raise ValueError('"color_range" must be 1 or 255') if isinstance(x, str): @@ -570,12 +579,18 @@ def eval_color(x, color_range=255, force_alpha=False): raise elif isinstance(x, dict): return {k: eval_color(v, color_range=color_range) for k, v in x.items()} - elif isinstance(x, (list, tuple, np.ndarray)): + elif isinstance(x, (list, tuple)): # If is this is not a list of RGB values: if any([not isinstance(elem, numbers.Number) for elem in x]): return [eval_color(c, color_range=color_range) for c in x] # If this is a single RGB color: c = x + elif isinstance(x, np.ndarray): + # If is this is not a list of RGB values: + if any([not isinstance(elem, numbers.Number) for elem in x]): + return np.array([eval_color(c, color_range=color_range) for c in x]) + # If this is a single RGB color: + c = x elif isinstance(x, type(None)): return None else: diff --git a/navis/plotting/plotly/graph_objs.py b/navis/plotting/plotly/graph_objs.py index 8e7536cc..70f1b3f4 100644 --- a/navis/plotting/plotly/graph_objs.py +++ b/navis/plotting/plotly/graph_objs.py @@ -220,9 +220,9 @@ def mesh2plotly(neuron, legendgroup, showlegend, label, color, **kwargs): if len(color) == len(neuron.vertices): # For some reason single colors are 0-255 but face/vertex colors # have to be 0-1 - color_kwargs = dict(vertexcolor=color / [255, 255, 255, 1]) + color_kwargs = dict(vertexcolor=color / [255, 255, 255, 1][: color.shape[1]]) elif len(color) == len(neuron.faces): - color_kwargs = dict(facecolor=color / [255, 255, 255, 1]) + color_kwargs = dict(facecolor=color / [255, 255, 255, 1][: color.shape[1]]) else: color_kwargs = dict(color=color) else: From 26f4fa9ba53b2bd8c3f8e22184c657b55fa39643 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Tue, 30 Jul 2024 13:36:20 +0100 Subject: [PATCH 26/34] fix synblast: - remove accidental return - fix single query target --- navis/nbl/synblast_funcs.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/navis/nbl/synblast_funcs.py b/navis/nbl/synblast_funcs.py index 1b33beaa..7f943383 100644 --- a/navis/nbl/synblast_funcs.py +++ b/navis/nbl/synblast_funcs.py @@ -196,8 +196,11 @@ def single_query_target(self, q_idx: int, t_idx: int, scores='forward'): t_trees[ty] = KDTree(t_trees[ty]) tt = t_trees[ty] + if isinstance(qt, KDTree): + data = qt.data + else: + data = qt - data = qt.data # pykdtree tracks data as flat array if data.ndim == 1: data = data.reshape((qt.n, qt.ndim)) @@ -412,8 +415,6 @@ def get_connectors(n): t_idx=this.targets, scores=scores)] = this - return futures - # Collect results if futures and len(futures) > 1: # Prepare empty score matrix From acfbc17bf4dcebacabb55c9a8fd2d2278de1f766 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Tue, 30 Jul 2024 14:41:06 +0100 Subject: [PATCH 27/34] fix morph analysis notebook --- docs/source/tutorials/morph_analysis.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/tutorials/morph_analysis.ipynb b/docs/source/tutorials/morph_analysis.ipynb index ac778114..71eeba16 100644 --- a/docs/source/tutorials/morph_analysis.ipynb +++ b/docs/source/tutorials/morph_analysis.ipynb @@ -737,7 +737,7 @@ "import networkx as nx\n", "\n", "# First we need to find the path between the soma and the terminal node\n", - "path = nx.shortest_path(n.graph.to_undirected(), n.soma[0], end)\n", + "path = nx.shortest_path(n.graph.to_undirected(), n.soma, end)\n", "\n", "# Get coordinates for the path\n", "path_co = n.nodes.set_index('node_id').loc[path, ['x', 'y', 'z']].copy()\n", @@ -754,7 +754,7 @@ "\n", "# Add Euclidian path\n", "end_loc = n.nodes.set_index('node_id').loc[end, ['x', 'y', 'z']]\n", - "soma_loc = n.nodes.set_index('node_id').loc[n.soma[0], ['x', 'y', 'z']]\n", + "soma_loc = n.nodes.set_index('node_id').loc[n.soma, ['x', 'y', 'z']]\n", "ax.plot([soma_loc.x, end_loc.x], [-soma_loc.z, -end_loc.z], c='g', ls='--')\n", "\n", "d_eucl = np.sqrt(np.sum((end_loc - soma_loc)**2)) * n.units\n", From 52f2004f20a92d6fd9bfc12c418b38d7a0c57f1f Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Tue, 30 Jul 2024 15:08:02 +0100 Subject: [PATCH 28/34] subset_neuron (skeletons): drop soma if not part of neuron anymore --- navis/morpho/subset.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/navis/morpho/subset.py b/navis/morpho/subset.py index 0bd876dd..a003dba0 100644 --- a/navis/morpho/subset.py +++ b/navis/morpho/subset.py @@ -302,6 +302,11 @@ def _subset_treeneuron(x, subset, keep_disc_cn, prevent_fragments): vs = x.igraph.vs[indices] x._igraph = x.igraph.subgraph(vs) + if hasattr(x, '_soma') and x._soma is not None: + # Check if soma is still in the neuron + if x._soma not in x.nodes.node_id.values: + x._soma = None + # Reset indices of data tables x.nodes.reset_index(inplace=True, drop=True) From 173de406e5c7cea41cbaf9c7190b1b2352cc8079 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Tue, 30 Jul 2024 15:08:21 +0100 Subject: [PATCH 29/34] prepare_colormap: fix issue with numpy array of colors --- navis/plotting/colors.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/navis/plotting/colors.py b/navis/plotting/colors.py index 775d9587..3e8d08b3 100644 --- a/navis/plotting/colors.py +++ b/navis/plotting/colors.py @@ -509,7 +509,15 @@ def prepare_colormap(colors, volumes_cmap = [getattr(v, 'color', (.95, .95, .95, .1)) for v in volumes] # If list of colors elif isinstance(colors, (list, tuple, np.ndarray)): - # If color is a single color, convert to list + if isinstance(colors, np.ndarray): + # If this is an array of a single color convert to rgba tuple + if colors.ndim == 1 and colors.shape[0] in (3, 4): + colors = colors.tolist() + # If this is an array of multiple colors convert to list of rgba arrays + elif colors.ndim == 2: + colors = [c for c in colors] + + # If color is a single color, convert to list of colors, one for each neuron if all([isinstance(elem, numbers.Number) for elem in colors]): # Generate at least one color colors = [colors] * max(colors_required, 1) From 241658f479276c81dfa1ee7370f357017a6beede Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Fri, 2 Aug 2024 09:33:34 +0100 Subject: [PATCH 30/34] some formatting in various files --- navis/graph/graph_utils.py | 596 +++++++++++++++++++---------------- navis/morpho/mmetrics.py | 506 +++++++++++++++-------------- navis/plotting/plot_utils.py | 78 +++-- 3 files changed, 637 insertions(+), 543 deletions(-) diff --git a/navis/graph/graph_utils.py b/navis/graph/graph_utils.py index da2e5c68..01ee5721 100644 --- a/navis/graph/graph_utils.py +++ b/navis/graph/graph_utils.py @@ -293,9 +293,9 @@ def _break_segments(x: "core.NeuronObject") -> list: @utils.lock_neuron -def dist_to_root(x: 'core.TreeNeuron', - weight=None, - igraph_indices: bool = False) -> dict: +def dist_to_root( + x: "core.TreeNeuron", weight=None, igraph_indices: bool = False +) -> dict: """Calculate distance to root for each node. Parameters @@ -328,7 +328,7 @@ def dist_to_root(x: 'core.TreeNeuron', """ if not isinstance(x, core.TreeNeuron): - raise TypeError(f'Expected TreeNeuron, got {type(x)}') + raise TypeError(f"Expected TreeNeuron, got {type(x)}") dist = {} for root in x.root: @@ -337,21 +337,21 @@ def dist_to_root(x: 'core.TreeNeuron', # Map node ID to vertex index for igraph if igraph_indices: if not x.igraph: - raise ValueError('Neuron does not have an igraph representation.') - id2ix = dict(zip(x.igraph.vs['node_id'], range(len(x.igraph.vs)))) + raise ValueError("Neuron does not have an igraph representation.") + id2ix = dict(zip(x.igraph.vs["node_id"], range(len(x.igraph.vs)))) dist = {id2ix[k]: v for k, v in dist.items()} return dist -def _edge_count_to_root_old(x: 'core.TreeNeuron') -> dict: +def _edge_count_to_root_old(x: "core.TreeNeuron") -> dict: """Return a map of nodeID vs number of edges. Starts from the first node that lacks successors (aka the root). """ current_level: List[int] - g: Union['igraph.Graph', 'nx.DiGraph'] # noqa + g: Union["igraph.Graph", "nx.DiGraph"] # noqa if x.igraph and config.use_igraph: g = x.igraph current_level = g.vs(_outdegree=0).indices @@ -376,16 +376,16 @@ def _edge_count_to_root_old(x: 'core.TreeNeuron') -> dict: if x.igraph and config.use_igraph: # Grab graph once to avoid overhead from stale checks g = x.igraph - dist = {g.vs[k]['node_id']: v for k, v in dist.items()} + dist = {g.vs[k]["node_id"]: v for k, v in dist.items()} return dist -@utils.map_neuronlist(desc='Classifying', allow_parallel=True) +@utils.map_neuronlist(desc="Classifying", allow_parallel=True) @utils.lock_neuron -def _classify_nodes_old(x: 'core.NeuronObject', - inplace: bool = True - ) -> Optional['core.NeuronObject']: +def _classify_nodes_old( + x: "core.NeuronObject", inplace: bool = True +) -> Optional["core.NeuronObject"]: """Classify neuron's nodes into end nodes, branches, slabs or root. Adds ``'type'`` column to ``x.nodes``. @@ -424,13 +424,13 @@ def _classify_nodes_old(x: 'core.NeuronObject', # Get graph representation of neuron vs = x.igraph.vs # Get branch/end nodes based on their degree of connectivity - ends = vs.select(_indegree=0).get_attribute_values('node_id') - branches = vs.select(_indegree_gt=1).get_attribute_values('node_id') + ends = vs.select(_indegree=0).get_attribute_values("node_id") + branches = vs.select(_indegree_gt=1).get_attribute_values("node_id") else: # Get graph representation of neuron g = x.graph # Get branch/end nodes based on their degree of connectivity - deg = pd.DataFrame.from_dict(dict(g.degree()), orient='index') + deg = pd.DataFrame.from_dict(dict(g.degree()), orient="index") # [ n for n in g.nodes if g.degree(n) == 1 ] ends = deg[deg.iloc[:, 0] == 1].index.values # [ n for n in g.nodes if g.degree(n) > 2 ] @@ -439,26 +439,26 @@ def _classify_nodes_old(x: 'core.NeuronObject', # This also resets the column if it already exists. This is important # because an existing column will be categorical and if we try setting # types that didn't previously exist, it will throw exceptions. - x.nodes['type'] = 'slab' + x.nodes["type"] = "slab" - x.nodes.loc[x.nodes.node_id.isin(ends), 'type'] = 'end' - x.nodes.loc[x.nodes.node_id.isin(branches), 'type'] = 'branch' - x.nodes.loc[x.nodes.parent_id < 0, 'type'] = 'root' + x.nodes.loc[x.nodes.node_id.isin(ends), "type"] = "end" + x.nodes.loc[x.nodes.node_id.isin(branches), "type"] = "branch" + x.nodes.loc[x.nodes.parent_id < 0, "type"] = "root" else: - x.nodes['type'] = None + x.nodes["type"] = None # Turn into categorical data - saves tons of memory # Note that we have to make sure all categories are set even if they # don't exist (e.g. if a neuron has no branch points) - cat_types = CategoricalDtype(categories=["end", "branch", "root", "slab"], - ordered=False) - x.nodes['type'] = x.nodes['type'].astype(cat_types) + cat_types = CategoricalDtype( + categories=["end", "branch", "root", "slab"], ordered=False + ) + x.nodes["type"] = x.nodes["type"].astype(cat_types) return x - -@utils.map_neuronlist(desc='Classifying', allow_parallel=True) +@utils.map_neuronlist(desc="Classifying", allow_parallel=True) @utils.lock_neuron def classify_nodes(x: "core.NeuronObject", categorical=True, inplace: bool = True): """Classify neuron's nodes into end nodes, branches, slabs or root. @@ -519,7 +519,9 @@ def classify_nodes(x: "core.NeuronObject", categorical=True, inplace: bool = Tru cl[np.isin(node_ids, bp)] = "branch" cl[parent_ids < 0] = "root" if categorical: - cl = pd.Categorical(cl, categories=["end", "branch", "root", "slab"], ordered=False) + cl = pd.Categorical( + cl, categories=["end", "branch", "root", "slab"], ordered=False + ) x.nodes["type"] = cl return x @@ -527,36 +529,40 @@ def classify_nodes(x: "core.NeuronObject", categorical=True, inplace: bool = Tru # only this combination will return a single bool @overload -def distal_to(x: 'core.TreeNeuron', - a: Union[str, str], - b: Union[str, int], - ) -> bool: +def distal_to( + x: "core.TreeNeuron", + a: Union[str, str], + b: Union[str, int], +) -> bool: pass # if above types don't a DataFrame will be returned @overload -def distal_to(x: 'core.TreeNeuron', - a: Optional[List[Union[str, int]]], - b: Optional[Union[str, int, List[Union[str, int]]]], - ) -> pd.DataFrame: +def distal_to( + x: "core.TreeNeuron", + a: Optional[List[Union[str, int]]], + b: Optional[Union[str, int, List[Union[str, int]]]], +) -> pd.DataFrame: pass # if above types don't a DataFrame will be returned @overload -def distal_to(x: 'core.TreeNeuron', - a: Optional[Union[str, int, List[Union[str, int]]]], - b: Optional[List[Union[str, int]]], - ) -> pd.DataFrame: +def distal_to( + x: "core.TreeNeuron", + a: Optional[Union[str, int, List[Union[str, int]]]], + b: Optional[List[Union[str, int]]], +) -> pd.DataFrame: pass @utils.lock_neuron -def distal_to(x: 'core.TreeNeuron', - a: Optional[Union[str, int, List[Union[str, int]]]] = None, - b: Optional[Union[str, int, List[Union[str, int]]]] = None, - ) -> Union[bool, pd.DataFrame]: +def distal_to( + x: "core.TreeNeuron", + a: Optional[Union[str, int, List[Union[str, int]]]] = None, + b: Optional[Union[str, int, List[Union[str, int]]]] = None, +) -> Union[bool, pd.DataFrame]: """Check if nodes A are distal to nodes B. Important @@ -626,7 +632,7 @@ def distal_to(x: 'core.TreeNeuron', x = x[0] if not isinstance(x, core.TreeNeuron): - raise ValueError(f'Please pass a single TreeNeuron, got {type(x)}') + raise ValueError(f"Please pass a single TreeNeuron, got {type(x)}") # At this point x is TreeNeuron x: core.TreeNeuron @@ -647,8 +653,7 @@ def distal_to(x: 'core.TreeNeuron', if x.igraph and config.use_igraph: # Map node ID to index - id2ix = {n: v for v, n in zip(x.igraph.vs.indices, - x.igraph.vs['node_id'])} + id2ix = {n: v for v, n in zip(x.igraph.vs.indices, x.igraph.vs["node_id"])} # Convert node IDs to indices tnA = [id2ix[n] for n in tnA] # type: ignore @@ -661,22 +666,26 @@ def distal_to(x: 'core.TreeNeuron', le = np.asarray(le) # Convert to True/False - le = le != float('inf') + le = le != float("inf") - df = pd.DataFrame(le, - index=x.igraph.vs[tnA]['node_id'], - columns=x.igraph.vs[tnB]['node_id']) + df = pd.DataFrame( + le, index=x.igraph.vs[tnA]["node_id"], columns=x.igraph.vs[tnB]["node_id"] + ) else: # Generate empty DataFrame - df = pd.DataFrame(np.zeros((len(tnA), len(tnB)), dtype=bool), - columns=tnB, index=tnA) + df = pd.DataFrame( + np.zeros((len(tnA), len(tnB)), dtype=bool), columns=tnB, index=tnA + ) # Iterate over all targets # Grab graph once to avoid overhead from stale checks g = x.graph - for nB in config.tqdm(tnB, desc='Querying paths', - disable=(len(tnB) < 1000) | config.pbar_hide, - leave=config.pbar_leave): + for nB in config.tqdm( + tnB, + desc="Querying paths", + disable=(len(tnB) < 1000) | config.pbar_hide, + leave=config.pbar_leave, + ): # Get all paths TO this target. This function returns a dictionary: # { source1 : path_length, source2 : path_length, ... } containing # all nodes distal to this node. @@ -878,15 +887,14 @@ def segment_length(x: "core.TreeNeuron", segment: List[int]) -> float: # Get graph once to avoid overhead from validation - do NOT change this graph = x.graph - dist = np.array([graph.edges[(c, p)]['weight'] - for c, p in zip(segment[:-1], segment[1:])]) + dist = np.array( + [graph.edges[(c, p)]["weight"] for c, p in zip(segment[:-1], segment[1:])] + ) return sum(dist) @utils.lock_neuron -def dist_between(x: 'core.NeuronObject', - a: int, - b: int) -> float: +def dist_between(x: "core.NeuronObject", a: int, b: int) -> float: """Get the geodesic distance between nodes in nanometers. Parameters @@ -924,23 +932,29 @@ def dist_between(x: 'core.NeuronObject', if len(x) == 1: x = x[0] else: - raise ValueError(f'Need a single TreeNeuron, got {len(x)}') + raise ValueError(f"Need a single TreeNeuron, got {len(x)}") if isinstance(x, (core.TreeNeuron, core.MeshNeuron)): - G: Union['igraph.Graph', # noqa - 'nx.DiGraph'] = x.igraph if (x.igraph and config.use_igraph) else x.graph + G: Union[ + "igraph.Graph", # noqa + "nx.DiGraph", + ] = x.igraph if (x.igraph and config.use_igraph) else x.graph elif isinstance(x, nx.DiGraph): G = x - elif 'igraph' in str(type(x.igraph)): + elif "igraph" in str(type(x.igraph)): # We can't use isinstance here because igraph library might not be installed G = x else: - raise ValueError(f'Unable to process data of type {type(x)}') - - if ((utils.is_iterable(a) and len(a) > 1) # type: ignore # this is just a check - or (utils.is_iterable(b) and len(b) > 1)): # type: ignore # this is just a check - raise ValueError('Can only process single nodes/vertices. Use ' - 'navis.geodesic_matrix instead.') + raise ValueError(f"Unable to process data of type {type(x)}") + + if ( + (utils.is_iterable(a) and len(a) > 1) # type: ignore # this is just a check + or (utils.is_iterable(b) and len(b) > 1) + ): # type: ignore # this is just a check + raise ValueError( + "Can only process single nodes/vertices. Use " + "navis.geodesic_matrix instead." + ) a = utils.make_non_iterable(a) b = utils.make_non_iterable(b) @@ -949,13 +963,15 @@ def dist_between(x: 'core.NeuronObject', _ = int(a) _ = int(b) except BaseException: - raise ValueError('a, b need to be node IDs or vertex indices!') + raise ValueError("a, b need to be node IDs or vertex indices!") # If we're working with network X DiGraph if isinstance(G, nx.DiGraph): - return int(nx.algorithms.shortest_path_length(G.to_undirected(as_view=True), - a, b, - weight='weight')) + return int( + nx.algorithms.shortest_path_length( + G.to_undirected(as_view=True), a, b, weight="weight" + ) + ) else: if isinstance(x, core.TreeNeuron): a = G.vs.find(node_id=a) @@ -965,13 +981,14 @@ def dist_between(x: 'core.NeuronObject', return G.distances(a, b, weights="weight", mode="ALL")[0][0] -@utils.map_neuronlist(desc='Searching', allow_parallel=True) -@utils.meshneuron_skeleton(method='node_to_vertex') -def find_main_branchpoint(x: 'core.NeuronObject', - method: Union[Literal['longest_neurite'], - Literal['betweenness']] = 'betweenness', - threshold: float = .95, - reroot_soma: bool = False) -> Union[int, List[int]]: +@utils.map_neuronlist(desc="Searching", allow_parallel=True) +@utils.meshneuron_skeleton(method="node_to_vertex") +def find_main_branchpoint( + x: "core.NeuronObject", + method: Union[Literal["longest_neurite"], Literal["betweenness"]] = "betweenness", + threshold: float = 0.95, + reroot_soma: bool = False, +) -> Union[int, List[int]]: """Find main branch point of unipolar (e.g. insect) neurons. Note that this might produce garbage if the neuron is fragmented. @@ -1018,8 +1035,9 @@ def find_main_branchpoint(x: 'core.NeuronObject', 2 TreeNeuron 3656 0 63 66 648285.745750 None """ - utils.eval_param(method, name='method', - allowed_values=('longest_neurite', 'betweenness')) + utils.eval_param( + method, name="method", allowed_values=("longest_neurite", "betweenness") + ) if not isinstance(x, core.TreeNeuron): raise TypeError(f'Expected TreeNeuron(s), got "{type(x)}"') @@ -1028,33 +1046,35 @@ def find_main_branchpoint(x: 'core.NeuronObject', x: core.TreeNeuron # If no branches - if x.nodes[x.nodes.type == 'branch'].empty: - raise ValueError('Neuron has no branch points.') + if x.nodes[x.nodes.type == "branch"].empty: + raise ValueError("Neuron has no branch points.") if reroot_soma and not isinstance(x.soma, type(None)): x = x.reroot(x.soma, inplace=False) - if method == 'longest_neurite': + if method == "longest_neurite": G = x.graph # First, find longest path - longest = nx.dag_longest_path(G, weight='weight') + longest = nx.dag_longest_path(G, weight="weight") # Remove longest path # (use subgraph to avoid editing original or copying raph) keep = ~np.isin(G.nodes, longest) G = G.subgraph(np.array(G.nodes)[keep]) - # Find second longst path - sc_longest = nx.dag_longest_path(G, weight='weight') + # Find second longest path + sc_longest = nx.dag_longest_path(G, weight="weight") # Parent of the last node in sc_longest is the common branch point bp = list(x.graph.successors(sc_longest[-1]))[0] else: # Get betweenness for each node - x = morpho.betweeness_centrality(x, directed=True, from_='branch_points') + x = morpho.betweeness_centrality(x, directed=True, from_="branch_points") # Get branch points with highest centrality - high_between = x.branch_points.betweenness >= x.branch_points.betweenness.max() * threshold + high_between = ( + x.branch_points.betweenness >= x.branch_points.betweenness.max() * threshold + ) candidates = x.branch_points[high_between] # If only one nodes just go with it @@ -1063,20 +1083,20 @@ def find_main_branchpoint(x: 'core.NeuronObject', else: # If multiple points get the farthest one from the root root_dists = dist_to_root(x) - bp = sorted(candidates.node_id.values, - key=lambda x: root_dists[x])[-1] - + bp = sorted(candidates.node_id.values, key=lambda x: root_dists[x])[-1] # This makes sure we get the same data type as in the node table # -> Network X seems to sometimes convert integers to floats return x.nodes.node_id.dtype.type(bp) -@utils.meshneuron_skeleton(method='split') -def split_into_fragments(x: 'core.NeuronObject', - n: int = 2, - min_size: Optional[Union[float, str]] = None, - reroot_soma: bool = False) -> 'core.NeuronList': +@utils.meshneuron_skeleton(method="split") +def split_into_fragments( + x: "core.NeuronObject", + n: int = 2, + min_size: Optional[Union[float, str]] = None, + reroot_soma: bool = False, +) -> "core.NeuronList": """Split neuron into fragments. Cuts are based on longest neurites: the first cut is made where the second @@ -1117,19 +1137,21 @@ def split_into_fragments(x: 'core.NeuronObject', if len(x) == 1: x = x[0] else: - raise Exception(f'{x.shape[0]} neurons provided. Please provide ' - 'only a single neuron!') + raise Exception( + f"{x.shape[0]} neurons provided. Please provide " + "only a single neuron!" + ) if not isinstance(x, core.TreeNeuron): raise TypeError(f'Expected a single TreeNeuron, got "{type(x)}"') if n < 2: - raise ValueError('Number of fragments must be at least 2.') + raise ValueError("Number of fragments must be at least 2.") # At this point x is TreeNeuron x: core.TreeNeuron - min_size = x.map_units(min_size, on_error='raise') + min_size = x.map_units(min_size, on_error="raise") if reroot_soma and not isinstance(x.soma, type(None)): x.reroot(x.soma, inplace=True) @@ -1153,8 +1175,13 @@ def split_into_fragments(x: 'core.NeuronObject', # Check if fragment is still long enough if min_size: - this_length = sum([v for k, v in nx.get_edge_attributes( - g, 'weight').items() if k[1] in longest_path]) + this_length = sum( + [ + v + for k, v in nx.get_edge_attributes(g, "weight").items() + if k[1] in longest_path + ] + ) if this_length <= min_size: break @@ -1176,7 +1203,7 @@ def split_into_fragments(x: 'core.NeuronObject', # Next, we need to remove nodes that are in subsequent graphs from # those graphs for i, g in enumerate(graphs): - for g2 in graphs[i + 1:]: + for g2 in graphs[i + 1 :]: g.remove_nodes_from(g2.nodes) # Now make neurons @@ -1185,14 +1212,16 @@ def split_into_fragments(x: 'core.NeuronObject', return nl -@utils.map_neuronlist(desc='Pruning', allow_parallel=True) -@utils.meshneuron_skeleton(method='subset') -def longest_neurite(x: 'core.NeuronObject', - n: int = 1, - reroot_soma: bool = False, - from_root: bool = True, - inverse: bool = False, - inplace: bool = False) -> 'core.NeuronObject': +@utils.map_neuronlist(desc="Pruning", allow_parallel=True) +@utils.meshneuron_skeleton(method="subset") +def longest_neurite( + x: "core.NeuronObject", + n: int = 1, + reroot_soma: bool = False, + from_root: bool = True, + inverse: bool = False, + inplace: bool = False, +) -> "core.NeuronObject": """Return a neuron consisting of only the longest neurite(s). Based on geodesic distances. @@ -1244,7 +1273,7 @@ def longest_neurite(x: 'core.NeuronObject', raise TypeError(f'Expected TreeNeuron(s), got "{type(x)}"') if isinstance(n, numbers.Number) and n < 1: - raise ValueError('Number of longest neurites to preserve must be >=1') + raise ValueError("Number of longest neurites to preserve must be >=1") # At this point x is TreeNeuron x: core.TreeNeuron @@ -1266,7 +1295,7 @@ def longest_neurite(x: 'core.NeuronObject', elif reroot_soma and not isinstance(x.soma, type(None)): x.reroot(x.soma, inplace=True) - segments = _generate_segments(x, weight='weight') + segments = _generate_segments(x, weight="weight") if isinstance(n, (int, np.integer)): tn_to_preserve: List[int] = [tn for s in segments[:n] for tn in s] @@ -1278,16 +1307,17 @@ def longest_neurite(x: 'core.NeuronObject', if not inverse: _ = morpho.subset_neuron(x, tn_to_preserve, inplace=True) else: - _ = morpho.subset_neuron(x, ~np.isin(x.nodes.node_id.values, tn_to_preserve), - inplace=True) + _ = morpho.subset_neuron( + x, ~np.isin(x.nodes.node_id.values, tn_to_preserve), inplace=True + ) return x @utils.lock_neuron -def reroot_skeleton(x: 'core.NeuronObject', - new_root: Union[int, str], - inplace: bool = False) -> 'core.TreeNeuron': +def reroot_skeleton( + x: "core.NeuronObject", new_root: Union[int, str], inplace: bool = False +) -> "core.TreeNeuron": """Reroot neuron to new root. Parameters @@ -1324,7 +1354,7 @@ def reroot_skeleton(x: 'core.NeuronObject', if len(x) == 1: x = x[0] else: - raise ValueError(f'Expected a single neuron, got {len(x)}') + raise ValueError(f"Expected a single neuron, got {len(x)}") if not isinstance(x, core.TreeNeuron): raise ValueError(f'Unable to reroot object of type "{type(x)}"') @@ -1335,7 +1365,7 @@ def reroot_skeleton(x: 'core.NeuronObject', # Parse new roots for i, root in enumerate(new_roots): if root is None: - raise ValueError('New root can not be ') + raise ValueError("New root can not be ") # If new root is a tag, rather than a ID, try finding that node if isinstance(root, str): @@ -1343,12 +1373,15 @@ def reroot_skeleton(x: 'core.NeuronObject', raise ValueError("Neuron does not have tags") if root not in x.tags: - raise ValueError(f'#{x.id}: Found no nodes with tag {root}' - ' - please double check!') + raise ValueError( + f"#{x.id}: Found no nodes with tag {root}" " - please double check!" + ) elif len(x.tags[root]) > 1: - raise ValueError(f'#{x.id}: Found multiple node with tag ' - f'{root} - please double check!') + raise ValueError( + f"#{x.id}: Found multiple node with tag " + f"{root} - please double check!" + ) else: new_roots[i] = x.tags[root][0] @@ -1383,11 +1416,14 @@ def reroot_skeleton(x: 'core.NeuronObject', warnings.simplefilter("ignore") # Find paths to all roots - path = g.get_shortest_paths(g.vs.find(node_id=new_root), - [g.vs.find(node_id=r) for r in x.root]) - epath = g.get_shortest_paths(g.vs.find(node_id=new_root), - [g.vs.find(node_id=r) for r in x.root], - output='epath') + path = g.get_shortest_paths( + g.vs.find(node_id=new_root), [g.vs.find(node_id=r) for r in x.root] + ) + epath = g.get_shortest_paths( + g.vs.find(node_id=new_root), + [g.vs.find(node_id=r) for r in x.root], + output="epath", + ) # Extract paths that actually worked (i.e. within a continuous fragment) path = [p for p in path if p][0] @@ -1395,16 +1431,16 @@ def reroot_skeleton(x: 'core.NeuronObject', edges = [(s, t) for s, t in zip(path[:-1], path[1:])] - weights = [g.es[e]['weight'] for e in epath] + weights = [g.es[e]["weight"] for e in epath] # Get all weights and append inversed new weights - all_weights = g.es['weight'] + weights + all_weights = g.es["weight"] + weights # Add inverse edges: old_root->new_root g.add_edges([(e[1], e[0]) for e in edges]) # Re-set weights - g.es['weight'] = all_weights + g.es["weight"] = all_weights # Remove new_root->old_root g.delete_edges(edges) @@ -1413,19 +1449,21 @@ def reroot_skeleton(x: 'core.NeuronObject', old_root_deg = len(g.es.select(_target=path[-1])) # Translate path indices to node IDs - ix2id = {ix: n for ix, n in zip(g.vs.indices, - g.vs.get_attribute_values('node_id'))} + ix2id = { + ix: n + for ix, n in zip(g.vs.indices, g.vs.get_attribute_values("node_id")) + } path = [ix2id[i] for i in path] else: # Grab graph once to avoid overhead from stale checks g = x.graph # If this NetworkX graph is just an (immutable) view, turn it into a # full, independent graph - nx_main_version = '.'.join(nx.__version__.split(".")[:2]) + nx_main_version = ".".join(nx.__version__.split(".")[:2]) if float(nx_main_version) < 2.2: if isinstance(g, nx.classes.graphviews.ReadOnlyGraph): x._graph_nx = g = nx.DiGraph(g) - elif hasattr(g, '_NODE_OK'): + elif hasattr(g, "_NODE_OK"): x._graph_nx = g = nx.DiGraph(g) elif nx.is_frozen(g): x._graph_nx = g = nx.DiGraph(g) @@ -1439,14 +1477,16 @@ def reroot_skeleton(x: 'core.NeuronObject', path = [new_root] weights = [] while parent is not None: - weights.append(g[path[-1]][parent]['weight']) + weights.append(g[path[-1]][parent]["weight"]) g.remove_edge(path[-1], parent) path.append(parent) parent = next(g.successors(parent), None) # Invert path and add weights - new_edges = [(path[i + 1], path[i], - {'weight': weights[i]}) for i in range(len(path) - 1)] + new_edges = [ + (path[i + 1], path[i], {"weight": weights[i]}) + for i in range(len(path) - 1) + ] # Add inverted path between old and new root g.add_edges_from(new_edges) @@ -1455,45 +1495,44 @@ def reroot_skeleton(x: 'core.NeuronObject', old_root_deg = g.in_degree(path[-1]) # Set index to node ID for later - x.nodes.set_index('node_id', inplace=True) + x.nodes.set_index("node_id", inplace=True) # Propagate changes in graph back to node table # Assign new node type to old root - x.nodes.loc[path[1:], 'parent_id'] = path[:-1] + x.nodes.loc[path[1:], "parent_id"] = path[:-1] if old_root_deg == 1: - x.nodes.loc[path[-1], 'type'] = 'slab' + x.nodes.loc[path[-1], "type"] = "slab" elif old_root_deg > 1: - x.nodes.loc[path[-1], 'type'] = 'branch' + x.nodes.loc[path[-1], "type"] = "branch" else: - x.nodes.loc[path[-1], 'type'] = 'end' + x.nodes.loc[path[-1], "type"] = "end" # Make new root node type "root" - x.nodes.loc[path[0], 'type'] = 'root' + x.nodes.loc[path[0], "type"] = "root" # Set new root's parent to None - x.nodes.loc[new_root, 'parent_id'] = -1 + x.nodes.loc[new_root, "parent_id"] = -1 # Reset index x.nodes.reset_index(drop=False, inplace=True) # Make sure node ID has the same datatype as before if x.nodes.node_id.dtype != nodeid_dtype: - x.nodes['node_id'] = x.nodes.node_id.astype(nodeid_dtype, copy=False) + x.nodes["node_id"] = x.nodes.node_id.astype(nodeid_dtype, copy=False) # Finally: only reset non-graph related attributes if x.igraph and config.use_igraph: - x._clear_temp_attr(exclude=['igraph', 'classify_nodes']) + x._clear_temp_attr(exclude=["igraph", "classify_nodes"]) else: - x._clear_temp_attr(exclude=['graph', 'classify_nodes']) + x._clear_temp_attr(exclude=["graph", "classify_nodes"]) return x -def cut_skeleton(x: 'core.NeuronObject', - where: Union[int, str, List[Union[int, str]]], - ret: Union[Literal['both'], - Literal['proximal'], - Literal['distal']] = 'both' - ) -> 'core.NeuronList': +def cut_skeleton( + x: "core.NeuronObject", + where: Union[int, str, List[Union[int, str]]], + ret: Union[Literal["both"], Literal["proximal"], Literal["distal"]] = "both", +) -> "core.NeuronList": """Split skeleton at given point and returns two new neurons. Split is performed between cut node and its parent node. The cut node itself @@ -1549,22 +1588,23 @@ def cut_skeleton(x: 'core.NeuronObject', Returns a neuron consisting of a subset of its nodes. """ - utils.eval_param(ret, name='ret', - allowed_values=('proximal', 'distal', 'both')) + utils.eval_param(ret, name="ret", allowed_values=("proximal", "distal", "both")) if isinstance(x, core.NeuronList): if len(x) == 1: x = x[0] else: - raise Exception(f'Expected a single TreeNeuron, got {len(x)}') + raise Exception(f"Expected a single TreeNeuron, got {len(x)}") if not isinstance(x, core.TreeNeuron): raise TypeError(f'Expected a single TreeNeuron, got "{type(x)}"') if x.n_trees != 1: - raise ValueError(f'Unable to cut: neuron {x.id} consists of multiple ' - 'disconnected trees. Use navis.heal_skeleton()' - ' to fix.') + raise ValueError( + f"Unable to cut: neuron {x.id} consists of multiple " + "disconnected trees. Use navis.heal_skeleton()" + " to fix." + ) # At this point x is TreeNeuron x: core.TreeNeuron @@ -1581,8 +1621,9 @@ def cut_skeleton(x: 'core.NeuronObject', if x.tags is None: raise ValueError(f"Neuron {x.id} has no tags") if cn not in x.tags: - raise ValueError(f'#{x.id}: Found no node with tag {cn}' - ' - please double check!') + raise ValueError( + f"#{x.id}: Found no node with tag {cn}" " - please double check!" + ) cn_ids += x.tags[cn] elif cn not in x.nodes.node_id.values: raise ValueError(f'No node with ID "{cn}" found.') @@ -1596,7 +1637,7 @@ def cut_skeleton(x: 'core.NeuronObject', cn_ids = [cn for cn in cn_ids if not (cn in seen or seen.add(cn))] # Warn if not all returned - if len(cn_ids) > 1 and ret != 'both': + if len(cn_ids) > 1 and ret != "both": logger.warning('Multiple cuts should use `ret = "both"`.') # Go over all cut_nodes -> order matters! @@ -1626,11 +1667,9 @@ def cut_skeleton(x: 'core.NeuronObject', return core.NeuronList(res) -def _cut_igraph(x: 'core.TreeNeuron', - cut_node: int, - ret: str) -> Union['core.TreeNeuron', - Tuple['core.TreeNeuron', - 'core.TreeNeuron']]: +def _cut_igraph( + x: "core.TreeNeuron", cut_node: int, ret: str +) -> Union["core.TreeNeuron", Tuple["core.TreeNeuron", "core.TreeNeuron"]]: """Use iGraph to cut a neuron.""" # Make a copy g = x.igraph.copy() @@ -1647,112 +1686,101 @@ def _cut_igraph(x: 'core.TreeNeuron', # Make graph undirected -> otherwise .decompose() throws an error # This issue is fixed in the up-to-date branch of igraph-python # (which is not on PyPI O_o ) - g.to_undirected(combine_edges='first') + g.to_undirected(combine_edges="first") # Get subgraph -> fastest way to get sets of nodes for subsetting - a, b = g.decompose(mode='WEAK') + a, b = g.decompose(mode="WEAK") # IMPORTANT: a,b are now UNDIRECTED graphs -> we must not keep using them! - if x.root[0] in a.vs['node_id']: + if x.root[0] in a.vs["node_id"]: dist_graph, prox_graph = b, a else: dist_graph, prox_graph = a, b - if ret == 'distal' or ret == 'both': - dist = morpho.subset_neuron(x, - subset=dist_graph.vs['node_id'], - inplace=False) + if ret == "distal" or ret == "both": + dist = morpho.subset_neuron(x, subset=dist_graph.vs["node_id"], inplace=False) # Change new root for dist - dist.nodes.loc[dist.nodes.node_id == cut_node, 'type'] = 'root' + dist.nodes.loc[dist.nodes.node_id == cut_node, "type"] = "root" # Clear other temporary attributes - dist._clear_temp_attr(exclude=['igraph', 'type', 'classify_nodes']) + dist._clear_temp_attr(exclude=["igraph", "type", "classify_nodes"]) - if ret == 'proximal' or ret == 'both': - ss: Sequence[int] = prox_graph.vs['node_id'] + [cut_node] - prox = morpho.subset_neuron(x, - subset=ss, - inplace=False) + if ret == "proximal" or ret == "both": + ss: Sequence[int] = prox_graph.vs["node_id"] + [cut_node] + prox = morpho.subset_neuron(x, subset=ss, inplace=False) # Change new root for dist - prox.nodes.loc[prox.nodes.node_id == cut_node, 'type'] = 'end' + prox.nodes.loc[prox.nodes.node_id == cut_node, "type"] = "end" # Clear other temporary attributes - prox._clear_temp_attr(exclude=['igraph', 'type', 'classify_nodes']) + prox._clear_temp_attr(exclude=["igraph", "type", "classify_nodes"]) - if ret == 'both': + if ret == "both": return dist, prox - elif ret == 'distal': + elif ret == "distal": return dist else: # elif ret == 'proximal': return prox -def _cut_networkx(x: 'core.TreeNeuron', - cut_node: Union[int, str], - ret: str) -> Union['core.TreeNeuron', - Tuple['core.TreeNeuron', - 'core.TreeNeuron']]: +def _cut_networkx( + x: "core.TreeNeuron", cut_node: Union[int, str], ret: str +) -> Union["core.TreeNeuron", Tuple["core.TreeNeuron", "core.TreeNeuron"]]: """Use networkX graph to cut a neuron.""" # Get subgraphs consisting of nodes distal to cut node dist_graph: nx.DiGraph = nx.bfs_tree(x.graph, cut_node, reverse=True) - if ret == 'distal' or ret == 'both': + if ret == "distal" or ret == "both": # bfs_tree does not preserve 'weight' # -> need to subset original graph by those nodes dist_graph = x.graph.subgraph(dist_graph.nodes) # Generate new neurons # This is the actual bottleneck of the function: ~70% of time - dist = morpho.subset_neuron(x, - subset=dist_graph, - inplace=False) # type: ignore # doesn't know nx.DiGraph + dist = morpho.subset_neuron(x, subset=dist_graph, inplace=False) # type: ignore # doesn't know nx.DiGraph # Change new root for dist - dist.nodes.loc[dist.nodes.node_id == cut_node, 'parent_id'] = -1 - dist.nodes.loc[dist.nodes.node_id == cut_node, 'type'] = 'root' + dist.nodes.loc[dist.nodes.node_id == cut_node, "parent_id"] = -1 + dist.nodes.loc[dist.nodes.node_id == cut_node, "type"] = "root" # Reassign graphs dist._graph_nx = dist_graph # Clear other temporary attributes - dist._clear_temp_attr(exclude=['graph', 'type', 'classify_nodes']) + dist._clear_temp_attr(exclude=["graph", "type", "classify_nodes"]) - if ret == 'proximal' or ret == 'both': + if ret == "proximal" or ret == "both": # bfs_tree does not preserve 'weight' # need to subset original graph by those nodes - ss_nodes = [n for n in x.graph.nodes if n not in dist_graph.nodes] + \ - [cut_node] + ss_nodes = [n for n in x.graph.nodes if n not in dist_graph.nodes] + [cut_node] prox_graph: nx.DiGraph = x.graph.subgraph(ss_nodes) # Generate new neurons # This is the actual bottleneck of the function: ~70% of time - prox = morpho.subset_neuron(x, - subset=prox_graph, - inplace=False) + prox = morpho.subset_neuron(x, subset=prox_graph, inplace=False) # Change cut node to end node for prox - prox.nodes.loc[prox.nodes.node_id == cut_node, 'type'] = 'end' + prox.nodes.loc[prox.nodes.node_id == cut_node, "type"] = "end" # Reassign graphs prox._graph_nx = prox_graph # Clear other temporary attributes - prox._clear_temp_attr(exclude=['graph', 'type', 'classify_nodes']) + prox._clear_temp_attr(exclude=["graph", "type", "classify_nodes"]) # ATTENTION: prox/dist_graph contain pointers to the original graph # -> changes to attributes will propagate back - if ret == 'both': + if ret == "both": return dist, prox - elif ret == 'distal': + elif ret == "distal": return dist else: # elif ret == 'proximal': return prox -def generate_list_of_childs(x: 'core.NeuronObject') -> Dict[int, List[int]]: +def generate_list_of_childs(x: "core.NeuronObject") -> Dict[int, List[int]]: """Return list of childs. Parameters @@ -1772,8 +1800,9 @@ def generate_list_of_childs(x: 'core.NeuronObject') -> Dict[int, List[int]]: return {n: [e[0] for e in g.in_edges(n)] for n in g.nodes} -def node_label_sorting(x: 'core.TreeNeuron', - weighted: bool = False) -> List[Union[str, int]]: +def node_label_sorting( + x: "core.TreeNeuron", weighted: bool = False +) -> List[Union[str, int]]: """Return nodes ordered by node label sorting according to Cuntz et al., PLoS Computational Biology (2010). @@ -1798,28 +1827,36 @@ def node_label_sorting(x: 'core.TreeNeuron', raise TypeError(f'Expected a singleTreeNeuron, got "{type(x)}"') if len(x.root) > 1: - raise ValueError('Unable to process multi-root neurons!') + raise ValueError("Unable to process multi-root neurons!") # Get relevant terminal nodes - term = x.nodes[x.nodes.type == 'end'].node_id.values + term = x.nodes[x.nodes.type == "end"].node_id.values # Get distance from terminals to all other nodes - geo = geodesic_matrix(x, from_=term, directed=True, weight='weight' if weighted else None) + geo = geodesic_matrix( + x, from_=term, directed=True, weight="weight" if weighted else None + ) # Set distance between unreachable points to None # Need to reinitialise SparseMatrix to replace float('inf') with NaN # dist_mat[geo == float('inf')] = None - dist_mat = pd.DataFrame(np.where(geo == float('inf'), # type: ignore # no stubs for SparseDataFrame - np.nan, - geo), - columns=geo.columns, - index=geo.index) + dist_mat = pd.DataFrame( + np.where( + geo == float("inf"), # type: ignore # no stubs for SparseDataFrame + np.nan, + geo, + ), + columns=geo.columns, + index=geo.index, + ) # Get starting points (i.e. branches off the root) and sort by longest # path to a terminal (note we're operating on the simplified version # of the skeleton) - curr_points = sorted(list(x.simple.graph.predecessors(x.root[0])), - key=lambda n: dist_mat[n].max(), - reverse=True) + curr_points = sorted( + list(x.simple.graph.predecessors(x.root[0])), + key=lambda n: dist_mat[n].max(), + reverse=True, + ) # Walk from root towards terminals, prioritising longer branches nodes_walked = [] @@ -1829,9 +1866,11 @@ def node_label_sorting(x: 'core.TreeNeuron', if nodes_walked[-1] in term: pass else: - new_points = sorted(list(x.simple.graph.predecessors(nodes_walked[-1])), - key=lambda n: dist_mat[n].max(), - reverse=True) + new_points = sorted( + list(x.simple.graph.predecessors(nodes_walked[-1])), + key=lambda n: dist_mat[n].max(), + reverse=True, + ) curr_points = new_points + curr_points # Translate into segments @@ -1860,12 +1899,14 @@ def _igraph_to_sparse(graph, weight_attr=None): # Note: previously, we used a generator (weights, zip(*egdes)) as input to # csr_matrix but with Scipy 1.13.0 this has stopped working edges = np.array(edges) - return csr_matrix((weights, (edges[:,0], edges[:,1])), - shape=(len(graph.vs), len(graph.vs))) + return csr_matrix( + (weights, (edges[:, 0], edges[:, 1])), shape=(len(graph.vs), len(graph.vs)) + ) -def connected_subgraph(x: Union['core.TreeNeuron', nx.DiGraph], - ss: Sequence[Union[str, int]]) -> Tuple[np.ndarray, Union[int, str]]: +def connected_subgraph( + x: Union["core.TreeNeuron", nx.DiGraph], ss: Sequence[Union[str, int]] +) -> Tuple[np.ndarray, Union[int, str]]: """Return set of nodes necessary to connect all nodes in subset ``ss``. Parameters @@ -1958,8 +1999,7 @@ def connected_subgraph(x: Union['core.TreeNeuron', nx.DiGraph], this_ss = ss & cc if this_ss - include: # Make sure the new root is set correctly - nr = sorted(this_ss - include, - key=lambda x: longest_path.index(x))[-1] + nr = sorted(this_ss - include, key=lambda x: longest_path.index(x))[-1] new_roots.append(nr) # Add those nodes to be included include = set.union(include, this_ss) @@ -1969,11 +2009,13 @@ def connected_subgraph(x: Union['core.TreeNeuron', nx.DiGraph], return np.array(list(include)), new_roots -def insert_nodes(x: 'core.TreeNeuron', - where: List[tuple], - coords: List[tuple] = None, - validate: bool = True, - inplace: bool = False) -> Optional['core.TreeNeuron']: +def insert_nodes( + x: "core.TreeNeuron", + where: List[tuple], + coords: List[tuple] = None, + validate: bool = True, + inplace: bool = False, +) -> Optional["core.TreeNeuron"]: """Insert new nodes between existing nodes. Parameters @@ -2017,17 +2059,18 @@ def insert_nodes(x: 'core.TreeNeuron', 4565 """ - utils.eval_param(x, name='x', allowed_types=(core.TreeNeuron, )) + utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,)) where = np.asarray(where) if where.ndim != 2 or where.shape[1] != 2: - raise ValueError('Expected `where` to be a (N, 2) list of pairs. ' - f'Got {where.shape}') + raise ValueError( + "Expected `where` to be a (N, 2) list of pairs. " f"Got {where.shape}" + ) # Validate if that's desired if validate: # Setup to get parents - parent = x.nodes.set_index('node_id').parent_id + parent = x.nodes.set_index("node_id").parent_id # Get parents of the left and the right nodes of each pair parent_left = parent.loc[where[:, 0]].values @@ -2039,8 +2082,9 @@ def insert_nodes(x: 'core.TreeNeuron', not_connected = ~(correct_order | swapped) if np.any(not_connected): - raise ValueError('The following pairs are not connected: ' - f'{where[not_connected]}') + raise ValueError( + "The following pairs are not connected: " f"{where[not_connected]}" + ) # Flip nodes where necessary to sure we have (parent, child) order if np.any(swapped): @@ -2048,7 +2092,7 @@ def insert_nodes(x: 'core.TreeNeuron', # If not provided, generate coordinates in the center between each node pair if isinstance(coords, type(None)): - node_locs = x.nodes.set_index('node_id')[['x', 'y', 'z']] + node_locs = x.nodes.set_index("node_id")[["x", "y", "z"]] left_loc = node_locs.loc[where[:, 0]].values right_loc = node_locs.loc[where[:, 1]].values @@ -2058,12 +2102,14 @@ def insert_nodes(x: 'core.TreeNeuron', coords = np.asarray(coords) # Make sure we have correct coordinates if coords.shape[0] != where.shape[0]: - raise ValueError(f'Expected {where.shape[0]} coordinates or distances, ' - f'got {coords.shape[0]}') + raise ValueError( + f"Expected {where.shape[0]} coordinates or distances, " + f"got {coords.shape[0]}" + ) # If array of fractional distances translate to coordinates if coords.ndim == 1: - node_locs = x.nodes.set_index('node_id')[['x', 'y', 'z']] + node_locs = x.nodes.set_index("node_id")[["x", "y", "z"]] left_loc = node_locs.loc[where[:, 0]].values right_loc = node_locs.loc[where[:, 1]].values @@ -2071,27 +2117,30 @@ def insert_nodes(x: 'core.TreeNeuron', coords = left_loc + (right_loc - left_loc) * coords.reshape(-1, 1) # For the moment, we will interpolate the radius - rad = x.nodes.set_index('node_id').radius + rad = x.nodes.set_index("node_id").radius new_rad = (rad.loc[where[:, 0]].values + rad.loc[where[:, 1]].values) / 2 # Generate table for new nodes new_nodes = pd.DataFrame() max_id = x.nodes.node_id.max() + 1 - new_nodes['node_id'] = np.arange(max_id, max_id + where.shape[0]).astype(int) - new_nodes['parent_id'] = where[:, 0] - new_nodes['x'] = coords[:, 0] - new_nodes['y'] = coords[:, 1] - new_nodes['z'] = coords[:, 2] - new_nodes['radius'] = new_rad + new_nodes["node_id"] = np.arange(max_id, max_id + where.shape[0]).astype(int) + new_nodes["parent_id"] = where[:, 0] + new_nodes["x"] = coords[:, 0] + new_nodes["y"] = coords[:, 1] + new_nodes["z"] = coords[:, 2] + new_nodes["radius"] = new_rad # Merge tables - nodes = pd.concat([x.nodes, new_nodes], - join='outer', axis=0, sort=True, ignore_index=True) + nodes = pd.concat( + [x.nodes, new_nodes], join="outer", axis=0, sort=True, ignore_index=True + ) # Remap nodes new_parents = dict(zip(where[:, 1], new_nodes.node_id.values)) to_rewire = nodes.node_id.isin(new_parents) - nodes.loc[to_rewire, 'parent_id'] = nodes.loc[to_rewire, 'node_id'].map(new_parents) + nodes.loc[to_rewire, "parent_id"] = nodes.loc[to_rewire, "node_id"].map(new_parents).values.astype( + nodes.dtypes["parent_id"], copy=False + ) if not inplace: x = x.copy() @@ -2101,9 +2150,9 @@ def insert_nodes(x: 'core.TreeNeuron', return x -def remove_nodes(x: 'core.TreeNeuron', - which: List[int], - inplace: bool = False) -> Optional['core.TreeNeuron']: +def remove_nodes( + x: "core.TreeNeuron", which: List[int], inplace: bool = False +) -> Optional["core.TreeNeuron"]: """Drop nodes from neuron without disconnecting it. Dropping node 2 from 1->2->3 will lead to connectivity 1->3. @@ -2136,7 +2185,7 @@ def remove_nodes(x: 'core.TreeNeuron', 4365 """ - utils.eval_param(x, name='x', allowed_types=(core.TreeNeuron, )) + utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,)) if not utils.is_iterable(which): which = [which] @@ -2144,7 +2193,7 @@ def remove_nodes(x: 'core.TreeNeuron', miss = ~np.isin(which, x.nodes.node_id.values) if np.any(miss): - raise ValueError(f'{len(miss)} node IDs not found in neuron') + raise ValueError(f"{len(miss)} node IDs not found in neuron") if not inplace: x = x.copy() @@ -2157,7 +2206,7 @@ def remove_nodes(x: 'core.TreeNeuron', lop.update({c: lop[n] for c, p in lop.items() if p == n}) # Rewire neuron - x.nodes['parent_id'] = x.nodes.node_id.map(lop) + x.nodes["parent_id"] = x.nodes.node_id.map(lop) # Drop nodes x.nodes = x.nodes[~x.nodes.node_id.isin(which)].copy() @@ -2168,10 +2217,9 @@ def remove_nodes(x: 'core.TreeNeuron', return x -def rewire_skeleton(x: 'core.TreeNeuron', - g: nx.Graph, - root: Optional[id] = None, - inplace: bool = False) -> Optional['core.TreeNeuron']: +def rewire_skeleton( + x: "core.TreeNeuron", g: nx.Graph, root: Optional[id] = None, inplace: bool = False +) -> Optional["core.TreeNeuron"]: """Rewire neuron from graph. This function takes a graph representation of a neuron and rewires its @@ -2214,8 +2262,8 @@ def rewire_skeleton(x: 'core.TreeNeuron', 2 """ - assert isinstance(x, core.TreeNeuron), f'Expected TreeNeuron, got {type(x)}' - assert isinstance(g, nx.Graph), f'Expected networkx graph, got {type(g)}' + assert isinstance(x, core.TreeNeuron), f"Expected TreeNeuron, got {type(x)}" + assert isinstance(g, nx.Graph), f"Expected networkx graph, got {type(g)}" if not inplace: x = x.copy() @@ -2223,7 +2271,7 @@ def rewire_skeleton(x: 'core.TreeNeuron', if g.is_directed(): g = g.to_undirected() - g = nx.minimum_spanning_tree(g, weight='weight') + g = nx.minimum_spanning_tree(g, weight="weight") if not root: root = x.root[0] if x.root[0] in g.nodes else next(iter(g.nodes)) @@ -2243,7 +2291,7 @@ def rewire_skeleton(x: 'core.TreeNeuron', lop.update({e[1]: e[0] for e in tree.edges}) # Update parent IDs - x.nodes['parent_id'] = x.nodes.node_id.map(lambda x: lop.get(x, -1)) + x.nodes["parent_id"] = x.nodes.node_id.map(lambda x: lop.get(x, -1)) x._clear_temp_attr() diff --git a/navis/morpho/mmetrics.py b/navis/morpho/mmetrics.py index 6475e806..bf0593a0 100644 --- a/navis/morpho/mmetrics.py +++ b/navis/morpho/mmetrics.py @@ -378,46 +378,46 @@ def segment_analysis(x: "core.NeuronObject") -> "core.NeuronObject": 1734350908 761.0 363.0 203.0 116.0 20.0 33.0 NaN """ - utils.eval_param(x, name='x', allowed_types=(core.TreeNeuron, )) + utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,)) - if 'strahler_index' not in x.nodes: + if "strahler_index" not in x.nodes: strahler_index(x) # Get small segments for this neuron segs = graph._break_segments(x) # For each segment get the SI - nodes = x.nodes.set_index('node_id') - SI = nodes.loc[[s[0] for s in segs], 'strahler_index'].values + nodes = x.nodes.set_index("node_id") + SI = nodes.loc[[s[0] for s in segs], "strahler_index"].values # Get segment lengths seg_lengths = np.array([graph.segment_length(x, s) for s in segs]) # Get tortuosity - start = nodes.loc[[s[0] for s in segs], ['x', 'y', 'z']].values - end = nodes.loc[[s[-1] for s in segs], ['x', 'y', 'z']].values + start = nodes.loc[[s[0] for s in segs], ["x", "y", "z"]].values + end = nodes.loc[[s[-1] for s in segs], ["x", "y", "z"]].values L = np.sqrt(((start - end) ** 2).sum(axis=1)) tort = seg_lengths / L # Get distance from root - root_dists_dict = graph.dist_to_root(x, weight='weight') + root_dists_dict = graph.dist_to_root(x, weight="weight") root_dists = np.array([root_dists_dict[s[-1]] for s in segs]) # Compile results res = pd.DataFrame() - res['length'] = seg_lengths - res['tortuosity'] = tort - res['root_dist'] = root_dists - res['strahler_index'] = SI + res["length"] = seg_lengths + res["tortuosity"] = tort + res["root_dist"] = root_dists + res["strahler_index"] = SI - if 'radius' in nodes: + if "radius" in nodes: # Generate radius dict radii = nodes.radius.to_dict() seg_radii = [[radii.get(n, 0) for n in s] for s in segs] - res['radius_mean'] = [np.nanmean(s) for s in seg_radii] - res['radius_min'] = [np.nanmin(s) for s in seg_radii] - res['radius_max'] = [np.nanmax(s) for s in seg_radii] + res["radius_mean"] = [np.nanmean(s) for s in seg_radii] + res["radius_min"] = [np.nanmin(s) for s in seg_radii] + res["radius_max"] = [np.nanmax(s) for s in seg_radii] # Get radii for each cylinder r1 = nodes.index.map(radii).values @@ -428,16 +428,16 @@ def segment_analysis(x: "core.NeuronObject") -> "core.NeuronObject": h = parent_dist(x, root_dist=0) # Radii for top and bottom of tapered cylinder - vols = (1/3 * np.pi * (r1**2 + r1 * r2 + r2**2) * h) + vols = 1 / 3 * np.pi * (r1**2 + r1 * r2 + r2**2) * h vols_dict = dict(zip(nodes.index.values, vols)) # For each segment get the volume - res['volume'] = [np.nansum([vols_dict.get(n, 0) for n in s[:-1]]) for s in segs] + res["volume"] = [np.nansum([vols_dict.get(n, 0) for n in s[:-1]]) for s in segs] return res -def segregation_index(x: Union['core.NeuronObject', dict]) -> float: +def segregation_index(x: Union["core.NeuronObject", dict]) -> float: """Calculate segregation index (SI). The segregation index as established by Schneider-Mizell et al. (eLife, @@ -476,38 +476,40 @@ def segregation_index(x: Union['core.NeuronObject', dict]) -> float: raise ValueError(f'Expected NeuronList or list got "{type(x)}"') if isinstance(x, core.NeuronList) and len(x) <= 1: - raise ValueError(f'Expected multiple neurons, got {len(x)}') + raise ValueError(f"Expected multiple neurons, got {len(x)}") # Turn NeuronList into records if isinstance(x, core.NeuronList): - x = [{'presynapses': n.n_presynapses, 'postsynapses': n.n_postsynapses} - for n in x] + x = [ + {"presynapses": n.n_presynapses, "postsynapses": n.n_postsynapses} + for n in x + ] # Extract the total number of pre- and postsynapses - total_pre = sum([n['presynapses'] for n in x]) - total_post = sum([n['postsynapses'] for n in x]) + total_pre = sum([n["presynapses"] for n in x]) + total_post = sum([n["postsynapses"] for n in x]) total_syn = total_pre + total_post # Calculate entropy for each fragment entropy = [] for n in x: - n['total_syn'] = n['postsynapses'] + n['presynapses'] + n["total_syn"] = n["postsynapses"] + n["presynapses"] # This is to avoid warnings - if n['total_syn']: - p = n['postsynapses'] / n['total_syn'] + if n["total_syn"]: + p = n["postsynapses"] / n["total_syn"] else: - p = float('inf') + p = float("inf") if 0 < p < 1: - S = - (p * math.log(p) + (1 - p) * math.log(1 - p)) + S = -(p * math.log(p) + (1 - p) * math.log(1 - p)) else: S = 0 entropy.append(S) # Calc entropy between fragments - S = 1 / total_syn * sum([e * n['total_syn'] for n, e in zip(x, entropy)]) + S = 1 / total_syn * sum([e * n["total_syn"] for n, e in zip(x, entropy)]) # Normalize to entropy in whole neuron p_norm = total_post / total_syn @@ -521,10 +523,9 @@ def segregation_index(x: Union['core.NeuronObject', dict]) -> float: return H -@utils.map_neuronlist(desc='Calc. seg.', allow_parallel=True) -@utils.meshneuron_skeleton(method='node_properties', - node_props=['segregation_index']) -def arbor_segregation_index(x: 'core.NeuronObject') -> 'core.NeuronObject': +@utils.map_neuronlist(desc="Calc. seg.", allow_parallel=True) +@utils.meshneuron_skeleton(method="node_properties", node_props=["segregation_index"]) +def arbor_segregation_index(x: "core.NeuronObject") -> "core.NeuronObject": """Per arbor seggregation index (SI). The segregation index (SI) as established by Schneider-Mizell et al. (eLife, @@ -573,16 +574,16 @@ def arbor_segregation_index(x: 'core.NeuronObject') -> 'core.NeuronObject': raise ValueError(f'Expected TreeNeuron(s), got "{type(x)}"') if not x.has_connectors: - raise ValueError('Neuron must have connectors.') + raise ValueError("Neuron must have connectors.") # Figure out how connector types are labeled cn_types = x.connectors.type.unique() - if all(np.isin(['pre', 'post'], cn_types)): - pre, post = 'pre', 'post' + if all(np.isin(["pre", "post"], cn_types)): + pre, post = "pre", "post" elif all(np.isin([0, 1], cn_types)): pre, post = 0, 1 else: - raise ValueError(f'Unable to parse connector types for neuron {x.id}') + raise ValueError(f"Unable to parse connector types for neuron {x.id}") # Get list of nodes with pre/postsynapses pre_node_ids = x.connectors[x.connectors.type == pre].node_id.values @@ -590,27 +591,24 @@ def arbor_segregation_index(x: 'core.NeuronObject') -> 'core.NeuronObject': # Get list of points to calculate SI for: # branches points and their children plus nodes with connectors - is_bp = x.nodes['type'].isin(['branch', 'root']) - is_bp_child = x.nodes.parent_id.isin(x.nodes.loc[is_bp, 'node_id'].values) + is_bp = x.nodes["type"].isin(["branch", "root"]) + is_bp_child = x.nodes.parent_id.isin(x.nodes.loc[is_bp, "node_id"].values) is_cn = x.nodes.node_id.isin(x.connectors.node_id) calc_node_ids = x.nodes[is_bp | is_bp_child | is_cn].node_id.values # We will be processing a super downsampled version of the neuron to speed # up calculations current_level = logger.level - logger.setLevel('ERROR') - y = x.downsample(factor=float('inf'), - preserve_nodes=calc_node_ids, - inplace=False) + logger.setLevel("ERROR") + y = x.downsample(factor=float("inf"), preserve_nodes=calc_node_ids, inplace=False) logger.setLevel(current_level) # Get number of pre/postsynapses distal to each branch's childs # Note that we're using geodesic matrix here because it is much more # efficient than for `distal_to` for larger queries/neurons - dists = graph.geodesic_matrix(y, - from_=np.append(pre_node_ids, post_node_ids), - directed=True, - weight=None) + dists = graph.geodesic_matrix( + y, from_=np.append(pre_node_ids, post_node_ids), directed=True, weight=None + ) distal = dists[calc_node_ids] < np.inf # Since nodes can have multiple pre-/postsynapses but they show up only @@ -632,8 +630,10 @@ def arbor_segregation_index(x: 'core.NeuronObject') -> 'core.NeuronObject': # Get the SI if we were to cut at this point post = distal_post_sum[n] pre = distal_pre_sum[n] - n_syn = [{'presynapses': pre, 'postsynapses': post}, - {'presynapses': total_pre - pre, 'postsynapses': total_post - post}] + n_syn = [ + {"presynapses": pre, "postsynapses": post}, + {"presynapses": total_pre - pre, "postsynapses": total_post - post}, + ] SI[n] = segregation_index(n_syn) # At this point there are only segregation indices for branch points and @@ -657,17 +657,19 @@ def arbor_segregation_index(x: 'core.NeuronObject') -> 'core.NeuronObject': SI.update({n: this_SI for n in s[:-2]}) # Add segregation index to node table - x.nodes['segregation_index'] = x.nodes.node_id.map(SI) + x.nodes["segregation_index"] = x.nodes.node_id.map(SI) return x -@utils.map_neuronlist(desc='Calc. flow', allow_parallel=True) -@utils.meshneuron_skeleton(method='node_properties', - include_connectors=True, - heal=True, - node_props=['bending_flow']) -def bending_flow(x: 'core.NeuronObject') -> 'core.NeuronObject': +@utils.map_neuronlist(desc="Calc. flow", allow_parallel=True) +@utils.meshneuron_skeleton( + method="node_properties", + include_connectors=True, + heal=True, + node_props=["bending_flow"], +) +def bending_flow(x: "core.NeuronObject") -> "core.NeuronObject": """Calculate synapse "bending" flow. This is a variation of the algorithm for calculating synapse flow from @@ -720,35 +722,33 @@ def bending_flow(x: 'core.NeuronObject') -> 'core.NeuronObject': raise ValueError(f'Expected TreeNeuron(s), got "{type(x)}"') if not x.has_connectors: - raise ValueError('Neuron must have connectors.') + raise ValueError("Neuron must have connectors.") if np.any(x.soma) and not np.all(np.isin(x.soma, x.root)): - logger.warning(f'Neuron {x.id} is not rooted to its soma!') + logger.warning(f"Neuron {x.id} is not rooted to its soma!") # We will be processing a super downsampled version of the neuron to speed # up calculations current_level = logger.level - logger.setLevel('ERROR') - y = x.downsample(factor=float('inf'), - preserve_nodes='connectors', - inplace=False) + logger.setLevel("ERROR") + y = x.downsample(factor=float("inf"), preserve_nodes="connectors", inplace=False) logger.setLevel(current_level) # Figure out how connector types are labeled cn_types = y.connectors.type.unique() - if all(np.isin(['pre', 'post'], cn_types)): - pre, post = 'pre', 'post' + if all(np.isin(["pre", "post"], cn_types)): + pre, post = "pre", "post" elif all(np.isin([0, 1], cn_types)): pre, post = 0, 1 else: - raise ValueError(f'Unable to parse connector types for neuron {y.id}') + raise ValueError(f"Unable to parse connector types for neuron {y.id}") # Get list of nodes with pre/postsynapses pre_node_ids = y.connectors[y.connectors.type == pre].node_id.values post_node_ids = y.connectors[y.connectors.type == post].node_id.values # Get list of branch_points - bp_node_ids = y.nodes[y.nodes.type == 'branch'].node_id.values.tolist() + bp_node_ids = y.nodes[y.nodes.type == "branch"].node_id.values.tolist() # Add root if it is also a branch point for root in y.root: if y.graph.degree(root) > 1: @@ -761,10 +761,9 @@ def bending_flow(x: 'core.NeuronObject') -> 'core.NeuronObject': # Get number of pre/postsynapses distal to each branch's childs # Note that we're using geodesic matrix here because it is much more # efficient than for `distal_to` for larger queries/neurons - dists = graph.geodesic_matrix(y, - from_=np.append(pre_node_ids, post_node_ids), - directed=True, - weight=None) + dists = graph.geodesic_matrix( + y, from_=np.append(pre_node_ids, post_node_ids), directed=True, weight=None + ) distal = dists[childs] < np.inf # Since nodes can have multiple pre-/postsynapses but they show up only @@ -801,16 +800,15 @@ def bending_flow(x: 'core.NeuronObject') -> 'core.NeuronObject': flow.update({n: this_flow for n in s}) # Set flow centrality to None for all nodes - x.nodes['bending_flow'] = x.nodes.node_id.map(flow) + x.nodes["bending_flow"] = x.nodes.node_id.map(flow) return x -def _flow_centrality_igraph(x: 'core.NeuronObject', - mode: Union[Literal['centrifugal'], - Literal['centripetal'], - Literal['sum']] = 'sum' - ) -> 'core.NeuronObject': +def _flow_centrality_igraph( + x: "core.NeuronObject", + mode: Union[Literal["centrifugal"], Literal["centripetal"], Literal["sum"]] = "sum", +) -> "core.NeuronObject": """iGraph-based flow centrality. Turns out this is much slower than the other implementation. Will keep it @@ -835,37 +833,41 @@ def _flow_centrality_igraph(x: 'core.NeuronObject', (for MeshNeurons). """ - if mode not in ['centrifugal', 'centripetal', 'sum']: + if mode not in ["centrifugal", "centripetal", "sum"]: raise ValueError(f'Unknown "mode" parameter: {mode}') if not isinstance(x, core.TreeNeuron): raise ValueError(f'Expected TreeNeuron(s), got "{type(x)}"') if not x.has_connectors: - raise ValueError('Neuron must have connectors.') + raise ValueError("Neuron must have connectors.") if np.any(x.soma) and not np.all(np.isin(x.soma, x.root)): - logger.warning(f'Neuron {x.id} is not rooted to its soma!') + logger.warning(f"Neuron {x.id} is not rooted to its soma!") # Figure out how connector types are labeled cn_types = x.connectors.type.unique() - if any(np.isin(['pre', 'post'], cn_types)): - pre, post = 'pre', 'post' + if any(np.isin(["pre", "post"], cn_types)): + pre, post = "pre", "post" elif any(np.isin([0, 1], cn_types)): pre, post = 0, 1 else: - raise ValueError(f'Unable to parse connector types "{cn_types}" for neuron {x.id}') + raise ValueError( + f'Unable to parse connector types "{cn_types}" for neuron {x.id}' + ) # Get list of nodes with pre/postsynapses - pre_node_ids, pre_counts = np.unique(x.connectors[x.connectors.type == pre].node_id.values, - return_counts=True) + pre_node_ids, pre_counts = np.unique( + x.connectors[x.connectors.type == pre].node_id.values, return_counts=True + ) pre_counts_dict = dict(zip(pre_node_ids, pre_counts)) - post_node_ids, post_counts = np.unique(x.connectors[x.connectors.type == post].node_id.values, - return_counts=True) + post_node_ids, post_counts = np.unique( + x.connectors[x.connectors.type == post].node_id.values, return_counts=True + ) post_counts_dict = dict(zip(post_node_ids, post_counts)) # Note: even downsampling the neuron doesn't really speed things up much - #y = sampling.downsample_neuron(x=x, + # y = sampling.downsample_neuron(x=x, # downsampling_factor=float('inf'), # inplace=False, # preserve_nodes=np.append(pre_node_ids, post_node_ids)) @@ -874,15 +876,15 @@ def _flow_centrality_igraph(x: 'core.NeuronObject', sources = G.vs.select(node_id_in=post_node_ids) targets = G.vs.select(node_id_in=pre_node_ids) - if mode in ('centrifugal', ): - paths = [G.get_all_shortest_paths(so, targets, mode='in') for so in sources] + if mode in ("centrifugal",): + paths = [G.get_all_shortest_paths(so, targets, mode="in") for so in sources] # Mode "in" paths are sorted by child -> parent # We'll invert them to make our life easier paths = [v[::-1] for p in paths for v in p] - elif mode in ('centripetal', ): - paths = [G.get_all_shortest_paths(so, targets, mode='out') for so in sources] + elif mode in ("centripetal",): + paths = [G.get_all_shortest_paths(so, targets, mode="out") for so in sources] else: - paths = [G.get_all_shortest_paths(so, targets, mode='all') for so in sources] + paths = [G.get_all_shortest_paths(so, targets, mode="all") for so in sources] # Calculate the flow flow = {} @@ -895,12 +897,12 @@ def _flow_centrality_igraph(x: 'core.NeuronObject', # Get the source for all these paths so = G.vs[pp[0][0]] # Find the post (i.e. source) multiplier - post_mul = post_counts_dict[so.attributes()['node_id']] + post_mul = post_counts_dict[so.attributes()["node_id"]] # Go over individual paths for vs in pp: # Translate path to node IDs - v_id = G.vs[vs].get_attribute_values('node_id') + v_id = G.vs[vs].get_attribute_values("node_id") # Get the pre (i.e. target) multiplier pre_mul = pre_counts_dict[v_id[-1]] @@ -910,7 +912,7 @@ def _flow_centrality_igraph(x: 'core.NeuronObject', for i in v_id: flow[i] = flow.get(i, 0) + post_mul * pre_mul - x.nodes['synapse_flow_centrality'] = x.nodes.node_id.map(flow).fillna(0).astype(int) + x.nodes["synapse_flow_centrality"] = x.nodes.node_id.map(flow).fillna(0).astype(int) # Add info on method/mode used for flow centrality x.centrality_method = mode # type: ignore @@ -1059,7 +1061,7 @@ def synapse_flow_centrality( # Get list of points to calculate flow centrality for: # branches and and their children - is_bp = x.nodes['type'] == 'branch' + is_bp = x.nodes["type"] == "branch" is_cn = x.nodes.node_id.isin(x.connectors.node_id) calc_node_ids = x.nodes[is_bp | is_cn].node_id.values @@ -1067,22 +1069,23 @@ def synapse_flow_centrality( # speed up calculations current_level = logger.level current_state = config.pbar_hide - logger.setLevel('ERROR') + logger.setLevel("ERROR") config.pbar_hide = True - y = sampling.downsample_neuron(x=x, - downsampling_factor=float('inf'), - inplace=False, - preserve_nodes=calc_node_ids) + y = sampling.downsample_neuron( + x=x, + downsampling_factor=float("inf"), + inplace=False, + preserve_nodes=calc_node_ids, + ) logger.setLevel(current_level) config.pbar_hide = current_state # Get number of pre/postsynapses distal to each branch's childs # Note that we're using geodesic matrix here because it is much more # efficient than for `distal_to` for larger queries/neurons - dists = graph.geodesic_matrix(y, - from_=np.append(pre_node_ids, post_node_ids), - directed=True, - weight=None) + dists = graph.geodesic_matrix( + y, from_=np.append(pre_node_ids, post_node_ids), directed=True, weight=None + ) distal = dists[calc_node_ids] < np.inf # Since nodes can have multiple pre-/postsynapses but they show up only @@ -1095,20 +1098,24 @@ def synapse_flow_centrality( distal_pre = distal_pre.sum(axis=0) distal_post = distal_post.sum(axis=0) - if mode != 'centripetal': + if mode != "centripetal": # Centrifugal is the flow from all proximal posts- to all distal presynapses - centrifugal = {n: (total_post - distal_post[n]) * distal_pre[n] for n in calc_node_ids} + centrifugal = { + n: (total_post - distal_post[n]) * distal_pre[n] for n in calc_node_ids + } - if mode != 'centrifugal': + if mode != "centrifugal": # Centripetal is the flow from all distal post- to all non-distal presynapses - centripetal = {n: distal_post[n] * (total_pre - distal_pre[n]) for n in calc_node_ids} + centripetal = { + n: distal_post[n] * (total_pre - distal_pre[n]) for n in calc_node_ids + } # Now map this onto our neuron - if mode == 'centrifugal': + if mode == "centrifugal": flow = centrifugal - elif mode == 'centripetal': + elif mode == "centripetal": flow = centripetal - elif mode == 'sum': + elif mode == "sum": flow = {n: centrifugal[n] + centripetal[n] for n in centrifugal} # At this point there is only flow for branch points and connectors nodes. @@ -1122,20 +1129,20 @@ def synapse_flow_centrality( # For each node get the flow of its child for i in range(1, len(s)): if s[i] not in flow: - flow[s[i]] = flow[s[i-1]] + flow[s[i]] = flow[s[i - 1]] - x.nodes['synapse_flow_centrality'] = x.nodes.node_id.map(flow).fillna(0).astype(int) + x.nodes["synapse_flow_centrality"] = x.nodes.node_id.map(flow).fillna(0).astype(int) # Need to add a restriction, that a branchpoint cannot have a lower # flow than its highest child -> this happens at the main branch point to # the cell body fiber because the flow doesn't go "through" it in # child -> parent direction but rather "across" it from one child to the # other - bp = x.nodes.loc[is_bp, 'node_id'].values + bp = x.nodes.loc[is_bp, "node_id"].values bp_childs = x.nodes[x.nodes.parent_id.isin(bp)] - max_flow = bp_childs.groupby('parent_id').synapse_flow_centrality.max() - x.nodes.loc[is_bp, 'synapse_flow_centrality'] = max_flow.loc[bp].values - x.nodes['synapse_flow_centrality'] = x.nodes.synapse_flow_centrality.astype(int) + max_flow = bp_childs.groupby("parent_id").synapse_flow_centrality.max() + x.nodes.loc[is_bp, "synapse_flow_centrality"] = max_flow.loc[bp].values + x.nodes["synapse_flow_centrality"] = x.nodes.synapse_flow_centrality.astype(int) # Add info on method/mode used for flow centrality x.centrality_method = mode # type: ignore @@ -1143,12 +1150,14 @@ def synapse_flow_centrality( return x -@utils.map_neuronlist(desc='Calc. flow', allow_parallel=True) -@utils.meshneuron_skeleton(method='node_properties', - include_connectors=True, - heal=True, - node_props=['flow_centrality']) -def flow_centrality(x: 'core.NeuronObject') -> 'core.NeuronObject': +@utils.map_neuronlist(desc="Calc. flow", allow_parallel=True) +@utils.meshneuron_skeleton( + method="node_properties", + include_connectors=True, + heal=True, + node_props=["flow_centrality"], +) +def flow_centrality(x: "core.NeuronObject") -> "core.NeuronObject": """Calculate flow between leaf nodes. Parameters @@ -1183,11 +1192,13 @@ def flow_centrality(x: 'core.NeuronObject') -> 'core.NeuronObject': # implementation using igraph + shortest paths which works like a charm and # causes less headaches. It is, however, about >10X slower than this version! # Note to self: do not go down that rabbit hole again! - msg = ("Synapse-based flow centrality has been moved to " - "`navis.synapse_flow_centrality` in navis " - "version 1.4.0. `navis.flow_centrality` now calculates " - "morphology-only flow." - "This warning will be removed in a future version of navis.") + msg = ( + "Synapse-based flow centrality has been moved to " + "`navis.synapse_flow_centrality` in navis " + "version 1.4.0. `navis.flow_centrality` now calculates " + "morphology-only flow." + "This warning will be removed in a future version of navis." + ) warnings.warn(msg, DeprecationWarning) logger.warning(msg) @@ -1195,7 +1206,7 @@ def flow_centrality(x: 'core.NeuronObject') -> 'core.NeuronObject': raise ValueError(f'Expected TreeNeuron(s), got "{type(x)}"') if np.any(x.soma) and not np.all(np.isin(x.soma, x.root)): - logger.warning(f'Neuron {x.id} is not rooted to its soma!') + logger.warning(f"Neuron {x.id} is not rooted to its soma!") # Get list of leafs leafs = x.leafs.node_id.values @@ -1208,22 +1219,21 @@ def flow_centrality(x: 'core.NeuronObject') -> 'core.NeuronObject': # speed up calculations current_level = logger.level current_state = config.pbar_hide - logger.setLevel('ERROR') + logger.setLevel("ERROR") config.pbar_hide = True - y = sampling.downsample_neuron(x=x, - downsampling_factor=float('inf'), - inplace=False, - preserve_nodes=calc_node_ids) + y = sampling.downsample_neuron( + x=x, + downsampling_factor=float("inf"), + inplace=False, + preserve_nodes=calc_node_ids, + ) logger.setLevel(current_level) config.pbar_hide = current_state # Get number of leafs distal to each branch's childs # Note that we're using geodesic matrix here because it is much more # efficient than for `distal_to` for larger queries/neurons - dists = graph.geodesic_matrix(y, - from_=leafs, - directed=True, - weight=None) + dists = graph.geodesic_matrix(y, from_=leafs, directed=True, weight=None) distal = (dists[calc_node_ids] < np.inf).sum(axis=0) # Calculate the flow @@ -1240,31 +1250,29 @@ def flow_centrality(x: 'core.NeuronObject') -> 'core.NeuronObject': # For each node get the flow of its child for i in range(1, len(s)): if s[i] not in flow: - flow[s[i]] = flow[s[i-1]] + flow[s[i]] = flow[s[i - 1]] - x.nodes['flow_centrality'] = x.nodes.node_id.map(flow).fillna(0).astype(int) + x.nodes["flow_centrality"] = x.nodes.node_id.map(flow).fillna(0).astype(int) - # Need to add a restriction, that a branchpoint cannot have a lower + # We need to add a restriction: a branchpoint cannot have a lower # flow than its highest child -> this happens at the main branch point to # the cell body fiber because the flow doesn't go "through" it in # child -> parent direction but rather "across" it from one child to the # other - is_bp = x.nodes['type'] == 'branch' - bp = x.nodes.loc[is_bp, 'node_id'].values + is_bp = x.nodes["type"] == "branch" + bp = x.nodes.loc[is_bp, "node_id"].values bp_childs = x.nodes[x.nodes.parent_id.isin(bp)] - max_flow = bp_childs.groupby('parent_id').flow_centrality.max() - x.nodes.loc[is_bp, 'flow_centrality'] = max_flow.loc[bp].values - x.nodes['flow_centrality'] = x.nodes.flow_centrality.astype(int) + max_flow = bp_childs.groupby("parent_id").flow_centrality.max() + x.nodes.loc[is_bp, "flow_centrality"] = max_flow.loc[bp].values + x.nodes["flow_centrality"] = x.nodes.flow_centrality.astype(int) return x -def tortuosity(x: 'core.NeuronObject', - seg_length: Union[int, float, str, - Sequence[Union[int, float, str]]] = 10 - ) -> Union[float, - Sequence[float], - pd.DataFrame]: +def tortuosity( + x: "core.NeuronObject", + seg_length: Union[int, float, str, Sequence[Union[int, float, str]]] = 10, +) -> Union[float, Sequence[float], pd.DataFrame]: """Calculate tortuosity of a neuron. See Stepanyants et al., Neuron (2004) for detailed explanation. Briefly, @@ -1272,6 +1280,7 @@ def tortuosity(x: 'core.NeuronObject', `L` (``seg_length``) to the Euclidian distance `R` between its ends. The way this is implemented in `navis`: + 1. Each linear stretch (i.e. between branch points or branch points to a leaf node) is divided into segments of exactly ``seg_length`` geodesic length. Any remainder is skipped. @@ -1283,7 +1292,7 @@ def tortuosity(x: 'core.NeuronObject', Note ---- If you want to make sure that segments are as close to length `L` as - possible, consider resampling the neuron using :func:`navis.resample_skeleton`. + possible, consider resampling the neuron using [`navis.resample_skeleton`][]. Parameters ---------- @@ -1323,35 +1332,44 @@ def tortuosity(x: 'core.NeuronObject', if isinstance(x, core.NeuronList): if not isinstance(seg_length, (list, np.ndarray, tuple)): seg_length = [seg_length] # type: ignore - df = pd.DataFrame([tortuosity(n, - seg_length=seg_length) for n in config.tqdm(x, - desc='Tortuosity', - disable=config.pbar_hide, - leave=config.pbar_leave)], - index=x.id, columns=seg_length).T - df.index.name = 'seg_length' + df = pd.DataFrame( + [ + tortuosity(n, seg_length=seg_length) + for n in config.tqdm( + x, + desc="Tortuosity", + disable=config.pbar_hide, + leave=config.pbar_leave, + ) + ], + index=x.id, + columns=seg_length, + ).T + df.index.name = "seg_length" return df if isinstance(x, core.MeshNeuron): x = x.skeleton elif not isinstance(x, core.TreeNeuron): - raise TypeError(f'Expected TreeNeuron(s), got {type(x)}') + raise TypeError(f"Expected TreeNeuron(s), got {type(x)}") if isinstance(seg_length, (list, np.ndarray)): return [tortuosity(x, l) for l in seg_length] # From here on out seg length is single value - seg_length: float = x.map_units(seg_length, on_error='raise') + seg_length: float = x.map_units(seg_length, on_error="raise") if seg_length <= 0: - raise ValueError('`seg_length` must be > 0.') + raise ValueError("`seg_length` must be > 0.") res = x.sampling_resolution if seg_length <= res: - raise ValueError('`seg_length` must not be smaller than the sampling ' - f'resolution of the neuron ({res:.2f}).') + raise ValueError( + "`seg_length` must not be smaller than the sampling " + f"resolution of the neuron ({res:.2f})." + ) # Iterate over segments - locs = x.nodes.set_index('node_id')[['x', 'y', 'z']].astype(float) + locs = x.nodes.set_index("node_id")[["x", "y", "z"]].astype(float) T_all = [] for i, seg in enumerate(x.small_segments): # Get coordinates @@ -1372,12 +1390,9 @@ def tortuosity(x: 'core.NeuronObject', new_dist = np.arange(0, dist[-1], seg_length) try: - sampleX = scipy.interpolate.interp1d(dist, coords[:, 0], - kind='linear') - sampleY = scipy.interpolate.interp1d(dist, coords[:, 1], - kind='linear') - sampleZ = scipy.interpolate.interp1d(dist, coords[:, 2], - kind='linear') + sampleX = scipy.interpolate.interp1d(dist, coords[:, 0], kind="linear") + sampleY = scipy.interpolate.interp1d(dist, coords[:, 1], kind="linear") + sampleZ = scipy.interpolate.interp1d(dist, coords[:, 2], kind="linear") except ValueError: continue @@ -1398,17 +1413,13 @@ def tortuosity(x: 'core.NeuronObject', return T_all.mean() -@utils.map_neuronlist(desc='Sholl analysis', allow_parallel=True) -def sholl_analysis(x: 'core.NeuronObject', - radii: Union[int, list] = 10, - center: Union[Literal['root'], - Literal['soma'], - list, - int] = 'centermass', - geodesic=False, - ) -> Union[float, - Sequence[float], - pd.DataFrame]: +@utils.map_neuronlist(desc="Sholl analysis", allow_parallel=True) +def sholl_analysis( + x: "core.NeuronObject", + radii: Union[int, list] = 10, + center: Union[Literal["root"], Literal["soma"], list, int] = "centermass", + geodesic=False, +) -> Union[float, Sequence[float], pd.DataFrame]: """Run Sholl analysis for given neuron(s). Parameters @@ -1461,57 +1472,68 @@ def sholl_analysis(x: 'core.NeuronObject', x = x.skeleton if not isinstance(x, core.TreeNeuron): - raise TypeError(f'Expected TreeNeuron or MeshNeuron(s), got {type(x)}') + raise TypeError(f"Expected TreeNeuron or MeshNeuron(s), got {type(x)}") if geodesic and len(x.root) > 1: - raise ValueError('Unable to use `geodesic=True` with fragmented ' - 'neurons. Use `navis.heal_fragmented_neuron` first.') + raise ValueError( + "Unable to use `geodesic=True` with fragmented " + "neurons. Use `navis.heal_fragmented_neuron` first." + ) - if center == 'soma' and not x.has_soma: - raise ValueError(f'Neuron {x.id} has no soma.') + if center == "soma" and not x.has_soma: + raise ValueError(f"Neuron {x.id} has no soma.") elif utils.is_iterable(center): center = np.asarray(center) if center.ndim != 1 or len(center) != 3: - raise ValueError('`center` must be (3, ) list-like when providing ' - f'a coordinate. Got {center.shape}') + raise ValueError( + "`center` must be (3, ) list-like when providing " + f"a coordinate. Got {center.shape}" + ) if geodesic: - raise ValueError('Must not provide a `center` as coordinate when ' - 'geodesic=True') - elif center == 'root' and len(x.root) > 1: - raise ValueError(f'Neuron {x.id} has multiple roots. Please specify ' - 'which node/coordinate to use as center.') + raise ValueError( + "Must not provide a `center` as coordinate when " "geodesic=True" + ) + elif center == "root" and len(x.root) > 1: + raise ValueError( + f"Neuron {x.id} has multiple roots. Please specify " + "which node/coordinate to use as center." + ) - if center == 'centermass': - center = x.nodes[['x', 'y', 'z']].mean(axis=0).values + if center == "centermass": + center = x.nodes[["x", "y", "z"]].mean(axis=0).values # Calculate distances for each node - nodes = x.nodes.set_index('node_id').copy() + nodes = x.nodes.set_index("node_id").copy() if not geodesic: if isinstance(center, int): if center not in nodes.index.values: - raise ValueError(f'{center} is not a valid node ID.') + raise ValueError(f"{center} is not a valid node ID.") - center = nodes.loc[center, ['x', 'y', 'z']].values - elif center == 'soma': - center = nodes.loc[utils.make_iterable(x.soma)[0], ['x', 'y', 'z']].values - elif center == 'root': - center = nodes.loc[utils.make_iterable(x.root)[0], ['x', 'y', 'z']].values + center = nodes.loc[center, ["x", "y", "z"]].values + elif center == "soma": + center = nodes.loc[utils.make_iterable(x.soma)[0], ["x", "y", "z"]].values + elif center == "root": + center = nodes.loc[utils.make_iterable(x.root)[0], ["x", "y", "z"]].values center = center.astype(float) - nodes['dist'] = np.sqrt(((x.nodes[['x', 'y', 'z']].values - center)**2).sum(axis=1)) + nodes["dist"] = np.sqrt( + ((x.nodes[["x", "y", "z"]].values - center) ** 2).sum(axis=1) + ) else: - if center == 'soma': + if center == "soma": center = x.soma[0] - elif center == 'root': + elif center == "root": center = x.root[0] - nodes['dist'] = graph.geodesic_matrix(x, from_=center)[x.nodes.node_id.values].values[0] + nodes["dist"] = graph.geodesic_matrix(x, from_=center)[ + x.nodes.node_id.values + ].values[0] not_root = nodes.parent_id >= 0 - dists = nodes.loc[not_root, 'dist'].values - pdists = nodes.loc[nodes[not_root].parent_id.values, 'dist'].values + dists = nodes.loc[not_root, "dist"].values + pdists = nodes.loc[nodes[not_root].parent_id.values, "dist"].values le = parent_dist(x)[not_root] - ty = nodes.loc[not_root, 'type'].values + ty = nodes.loc[not_root, "type"].values # Generate radii for the Sholl spheres if isinstance(radii, int): @@ -1530,25 +1552,24 @@ def sholl_analysis(x: 'core.NeuronObject', cable = le[this_sphere].sum() # The number of branch points in this sphere - n_branchpoints = (ty[this_sphere] == 'branch').sum() + n_branchpoints = (ty[this_sphere] == "branch").sum() data.append([radii[i], crossings, cable, n_branchpoints]) - return pd.DataFrame(data, - columns=['radius', 'intersections', - 'cable_length', 'branch_points']).set_index('radius') - - -@utils.map_neuronlist(desc='Calc. betweeness', allow_parallel=True) -@utils.meshneuron_skeleton(method='node_properties', - reroot_soma=True, - node_props=['betweenness']) -def betweeness_centrality(x: 'core.NeuronObject', - from_: Optional[Union[Literal['leafs'], - Literal['branch_points'], - Sequence] - ] = None, - directed: bool = True) -> 'core.NeuronObject': + return pd.DataFrame( + data, columns=["radius", "intersections", "cable_length", "branch_points"] + ).set_index("radius") + + +@utils.map_neuronlist(desc="Calc. betweeness", allow_parallel=True) +@utils.meshneuron_skeleton( + method="node_properties", reroot_soma=True, node_props=["betweenness"] +) +def betweeness_centrality( + x: "core.NeuronObject", + from_: Optional[Union[Literal["leafs"], Literal["branch_points"], Sequence]] = None, + directed: bool = True, +) -> "core.NeuronObject": """Calculate vertex/node betweenness. Betweenness is (roughly) defined by the number of shortest paths going @@ -1589,43 +1610,46 @@ def betweeness_centrality(x: 'core.NeuronObject', 59637 """ - utils.eval_param(x, name='x', allowed_types=(core.TreeNeuron, )) + utils.eval_param(x, name="x", allowed_types=(core.TreeNeuron,)) if isinstance(from_, str): - utils.eval_param(from_, name='from_', - allowed_values=('leafs', 'branch_points')) + utils.eval_param(from_, name="from_", allowed_values=("leafs", "branch_points")) else: - utils.eval_param(from_, name='from_', - allowed_types=(type(None), np.ndarray, list, tuple, set)) + utils.eval_param( + from_, + name="from_", + allowed_types=(type(None), np.ndarray, list, tuple, set), + ) G = x.igraph if isinstance(from_, type(None)): - bc = dict(zip(G.vs.get_attribute_values('node_id'), - G.betweenness(directed=directed))) + bc = dict( + zip(G.vs.get_attribute_values("node_id"), G.betweenness(directed=directed)) + ) else: if not directed: - raise ValueError('`from_!=None` only implemented for `directed=True`') + raise ValueError("`from_!=None` only implemented for `directed=True`") paths = [] - if from_ == 'leafs': + if from_ == "leafs": sources = G.vs.select(_indegree=0) - elif from_ == 'branch_points': + elif from_ == "branch_points": sources = G.vs.select(_indegree_ge=2) else: sources = G.vs.select(node_id_in=from_) roots = G.vs.select(_outdegree=0) for r in roots: - paths += G.get_shortest_paths(r, to=sources, mode='in') + paths += G.get_shortest_paths(r, to=sources, mode="in") # Drop too short paths paths = [p for p in paths if len(p) > 2] flat_ix = [i for p in paths for i in p[:-1]] ix, counts = np.unique(flat_ix, return_counts=True) - ids = [G.vs[i]['node_id'] for i in ix] + ids = [G.vs[i]["node_id"] for i in ix] bc = {i: 0 for i in x.nodes.node_id.values} bc.update(dict(zip(ids, counts))) - x.nodes['betweenness'] = x.nodes.node_id.map(bc).astype(int) + x.nodes["betweenness"] = x.nodes.node_id.map(bc).astype(int) return x diff --git a/navis/plotting/plot_utils.py b/navis/plotting/plot_utils.py index 11b28294..f25313a3 100644 --- a/navis/plotting/plot_utils.py +++ b/navis/plotting/plot_utils.py @@ -27,16 +27,15 @@ from collections.abc import Iterable from typing import Tuple, Optional, List, Dict -__all__ = ['tn_pairs_to_coords', 'segments_to_coords', 'fibonacci_sphere', 'make_tube'] + +__all__ = ["tn_pairs_to_coords", "segments_to_coords", "fibonacci_sphere", "make_tube"] logger = config.get_logger(__name__) -def tn_pairs_to_coords(x: core.TreeNeuron, - modifier: Optional[Tuple[float, - float, - float]] = (1, 1, 1) - ) -> np.ndarray: +def tn_pairs_to_coords( + x: core.TreeNeuron, modifier: Optional[Tuple[float, float, float]] = (1, 1, 1) +) -> np.ndarray: """Return pairs of child->parent node coordinates. Parameters @@ -57,9 +56,10 @@ def tn_pairs_to_coords(x: core.TreeNeuron, nodes = x.nodes[x.nodes.parent_id >= 0] - tn_co = nodes.loc[:, ['x', 'y', 'z']].values - parent_co = x.nodes.set_index('node_id').loc[nodes.parent_id.values, - ['x', 'y', 'z']].values + tn_co = nodes.loc[:, ["x", "y", "z"]].values + parent_co = ( + x.nodes.set_index("node_id").loc[nodes.parent_id.values, ["x", "y", "z"]].values + ) coords = np.append(tn_co, parent_co, axis=1) @@ -215,14 +215,28 @@ def make_tube(segments, radii=1.0, tube_points=8, use_normals=True): # Vertices for each point on the circle verts = np.repeat(points, tube_points, axis=0) - v = np.arange(tube_points, - dtype=np.float_) / tube_points * 2 * np.pi - - all_cx = (radius * -1. * np.tile(np.cos(v), points.shape[0]).reshape((tube_points, points.shape[0]), order='F')).T - cx_norm = (all_cx[:, :, np.newaxis] * normals[:, np.newaxis, :]).reshape(verts.shape) + v = np.arange(tube_points, dtype=np.float_) / tube_points * 2 * np.pi - all_cy = (radius * np.tile(np.sin(v), points.shape[0]).reshape((tube_points, points.shape[0]), order='F')).T - cy_norm = (all_cy[:, :, np.newaxis] * binormals[:, np.newaxis, :]).reshape(verts.shape) + all_cx = ( + radius + * -1.0 + * np.tile(np.cos(v), points.shape[0]).reshape( + (tube_points, points.shape[0]), order="F" + ) + ).T + cx_norm = (all_cx[:, :, np.newaxis] * normals[:, np.newaxis, :]).reshape( + verts.shape + ) + + all_cy = ( + radius + * np.tile(np.sin(v), points.shape[0]).reshape( + (tube_points, points.shape[0]), order="F" + ) + ).T + cy_norm = (all_cy[:, :, np.newaxis] * binormals[:, np.newaxis, :]).reshape( + verts.shape + ) verts = verts + cx_norm + cy_norm @@ -248,8 +262,12 @@ def make_tube(segments, radii=1.0, tube_points=8, use_normals=True): ix_d = np.append(ix_d[:, 1:], ix_d[:, [0]], axis=1) ix_d = ix_d.ravel() - faces1 = np.concatenate((ix_a, ix_b, ix_d), axis=0).reshape((n_segments * tube_points, 3), order='F') - faces2 = np.concatenate((ix_b, ix_c, ix_d), axis=0).reshape((n_segments * tube_points, 3), order='F') + faces1 = np.concatenate((ix_a, ix_b, ix_d), axis=0).reshape( + (n_segments * tube_points, 3), order="F" + ) + faces2 = np.concatenate((ix_b, ix_c, ix_d), axis=0).reshape( + (n_segments * tube_points, 3), order="F" + ) faces = np.append(faces1, faces2, axis=0) @@ -293,7 +311,7 @@ def _frenet_frames(points): smallest = np.argmin(t) normal = np.zeros(3) - normal[smallest] = 1. + normal[smallest] = 1.0 vec = np.cross(tangents[0], normal) normals[0] = np.cross(tangents[0], vec) @@ -314,13 +332,12 @@ def _frenet_frames(points): # Compute normal and binormal vectors along the path for i in range(1, len(points)): - normals[i] = normals[i-1] + normals[i] = normals[i - 1] - vec_norm = all_vec_norm[i-1] - vec = all_vec[i-1] + vec_norm = all_vec_norm[i - 1] + vec = all_vec[i - 1] if vec_norm > epsilon: - normals[i] = rotate(-np.degrees(th[i-1]), - vec)[:3, :3].dot(normals[i]) + normals[i] = rotate(-np.degrees(th[i - 1]), vec)[:3, :3].dot(normals[i]) binormals = np.cross(tangents, normals) @@ -350,8 +367,13 @@ def rotate(angle, axis, dtype=None): x, y, z = axis / np.linalg.norm(axis) c, s = math.cos(angle), math.sin(angle) cx, cy, cz = (1 - c) * x, (1 - c) * y, (1 - c) * z - M = np.array([[cx * x + c, cy * x - z * s, cz * x + y * s, .0], - [cx * y + z * s, cy * y + c, cz * y - x * s, 0.], - [cx * z - y * s, cy * z + x * s, cz * z + c, 0.], - [0., 0., 0., 1.]], dtype).T + M = np.array( + [ + [cx * x + c, cy * x - z * s, cz * x + y * s, 0.0], + [cx * y + z * s, cy * y + c, cz * y - x * s, 0.0], + [cx * z - y * s, cy * z + x * s, cz * z + c, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + dtype, + ).T return M From d7aa6fdd112733fc744bb7a54b0e591f1b018904 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Fri, 2 Aug 2024 09:34:10 +0100 Subject: [PATCH 31/34] small fixes in transforms README --- navis/transforms/README.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/navis/transforms/README.md b/navis/transforms/README.md index eb040e8e..1df2efdf 100644 --- a/navis/transforms/README.md +++ b/navis/transforms/README.md @@ -1,8 +1,8 @@ # Transforms The landscape between template (brains), different types of transforms and bridging sequences is fairly complex. This document will attempt to give a brief -overview to disentangle this. Because I work with Drosophila (fruit flies) -examples will be insect-centric but navis is general-purpose. +overview to disentangle this. Because I work with _Drosophila_ (fruit flies) +examples will be insect-centric but `navis` is general-purpose. `navis` also does not ship with any transform/template data as these are typically large-ish files. Check out @@ -10,7 +10,8 @@ typically large-ish files. Check out how to make data available to navis transforms. ## Terminology -### Template + +### Templates Very generally speaking, "templates" or "template brains" refer to landmark brains. These can be artificial templates generated by averaging across multiple brains (e.g. the JRC2018U fly template brain, Bogovic et al. 2018), or single @@ -59,6 +60,6 @@ That's what `navis.transforms.registry` is for: it keeps track of transforms, templates and plots paths to get from A to B. On import, the `registry` scans paths set in an `NAVIS_TRANSFORMS` environment -variable for transform files/directories it understands (`.h5`, '.list'). +variable for transform files/directories it understands (`.h5`, `.list`). Alternatively, you can add more paths via `registry.register_path()` or add already constructed transforms via ``registry.add_transforms()``. From 63fe8f0cad6963c2201ed8c32619f23a8a319ad2 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Fri, 2 Aug 2024 09:34:42 +0100 Subject: [PATCH 32/34] parse_objects: return pygfx objects as visuals --- navis/utils/misc.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/navis/utils/misc.py b/navis/utils/misc.py index 3e75aabb..619a1722 100644 --- a/navis/utils/misc.py +++ b/navis/utils/misc.py @@ -293,7 +293,7 @@ def parse_objects(x) -> Tuple['core.NeuronList', Neurons : navis.NeuronList Volume : list of navis.Volume (trimesh.Trimesh will be converted) Points : list of arrays - Visuals : list of vispy visuals + Visuals : list of vispy and pygfx visuals Examples -------- @@ -316,8 +316,8 @@ def parse_objects(x) -> Tuple['core.NeuronList', (, 1) >>> type(points[0]) - >>> type(vis), len(points) - (, 1) + >>> type(vis), len(vis) + (, 0) """ # Make sure this is a list. @@ -339,7 +339,7 @@ def parse_objects(x) -> Tuple['core.NeuronList', make_copy=False) # Collect visuals - visuals = [ob for ob in x if 'vispy' in str(type(ob))] + visuals = [ob for ob in x if 'vispy' in str(type(ob)) or 'pygfx.objects' in str(type(ob))] # Collect and parse volumes volumes = [ob for ob in x if not isinstance(ob, (core.BaseNeuron, From 73ca67464d5e195c53834ddc814ed0c2488532ca Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Fri, 2 Aug 2024 09:36:24 +0100 Subject: [PATCH 33/34] small fixes in neuPrint interface: 1. Ignore LOD if the mesh source doesn't support it (change in CV?) 2. Don't break if NeuronList is empty --- navis/interfaces/neuprint.py | 41 +++++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/navis/interfaces/neuprint.py b/navis/interfaces/neuprint.py index 3cad1993..bf38c002 100644 --- a/navis/interfaces/neuprint.py +++ b/navis/interfaces/neuprint.py @@ -107,11 +107,11 @@ def fetch_mesh_neuron(x, *, lod=1, with_synapses=False, missing_mesh='raise', Parameters ---------- x : str | int | list-like | pandas.DataFrame | SegmentCriteria - Body ID(s). Multiple Ids can be provided as list-like or + Body ID(s). Multiple IDs can be provided as list-like or DataFrame with "bodyId" or "bodyid" column. lod : int Level of detail. Higher ``lod`` = coarser. Ignored if mesh - source is DVID. + source does not support LODs (e.g. for DVID). with_synapses : bool, optional If True will download and attach synapses as ``.connectors``. missing_mesh : 'raise' | 'warn' | 'skip' @@ -160,28 +160,45 @@ def fetch_mesh_neuron(x, *, lod=1, with_synapses=False, missing_mesh='raise', if isinstance(seg_source, str) and seg_source.startswith('dvid'): try: import dvid as dv - except ImportError: - raise ImportError('This looks like a DVID mesh source. For this we ' - 'need the `dvid-tools` library:\n' - ' pip3 install dvidtools -U') + except ModuleNotFoundError: + raise ModuleNotFoundError( + 'This looks like a DVID mesh source. For this we ' + 'need the `dvid-tools` library:\n' + ' pip3 install dvidtools -U') o = urlparse(seg_source.replace('dvid://', '')) server = f'{o.scheme}://{o.netloc}' node = o.path.split('/')[1] + + if lod is not None: + logger.warning( + 'This dataset does not support LODs. ' + 'Will ignore the `lod` argument. ' + 'You can silence this warning by setting `lod=None`.') + lod = None else: try: from cloudvolume import CloudVolume - except ImportError: - raise ImportError("You need to install the `cloudvolume` library" - 'to fetch meshes from this mesh source:\n' - ' pip3 install cloud-volume -U') + except ModuleNotFoundError: + raise ModuleNotFoundError( + "You need to install the `cloudvolume` library" + "to fetch meshes from this mesh source:\n" + " pip3 install cloud-volume -U") # Initialize volume if isinstance(seg_source, CloudVolume): vol = seg_source else: - defaults = dict(use_https=True) + defaults = dict(use_https=True, progress=False) defaults.update(kwargs) vol = CloudVolume(seg_source, **defaults) + # Check if vol.mesh.get has a lod argument + if lod is not None and 'lod' not in vol.mesh.get.__code__.co_varnames: + logger.warning( + 'This dataset does not support LODs. ' + 'Will ignore the `lod` argument. ' + 'You can silence this warning by setting `lod=None`.') + lod = None + if isinstance(x, NeuronCriteria): query = x wanted_ids = None @@ -281,7 +298,7 @@ def fetch_mesh_neuron(x, *, lod=1, with_synapses=False, missing_mesh='raise', n.connectors = syn[['type', 'x', 'y', 'z', 'roi', 'confidence']] # Make an effort to retain the original order - if not isinstance(x, NeuronCriteria): + if not isinstance(x, NeuronCriteria) and not nl.empty: nl = nl.idx[np.asarray(x)[np.isin(x, nl.id)]] return nl From 7dc50111457f408b2169933be68b63e73a4d2009 Mon Sep 17 00:00:00 2001 From: Philipp Schlegel Date: Sun, 11 Aug 2024 13:41:44 +0100 Subject: [PATCH 34/34] make_tube: skip single point branches --- navis/plotting/plot_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/navis/plotting/plot_utils.py b/navis/plotting/plot_utils.py index f25313a3..6ed0a262 100644 --- a/navis/plotting/plot_utils.py +++ b/navis/plotting/plot_utils.py @@ -200,6 +200,10 @@ def make_tube(segments, radii=1.0, tube_points=8, use_normals=True): # Need to make sure points are floats points = np.array(points).astype(float) + # Skip single points + if len(points) < 2: + continue + if use_normals: tangents, normals, binormals = _frenet_frames(points) else: