diff --git a/navis/core/base.py b/navis/core/base.py index 009b0a66..219f71c1 100644 --- a/navis/core/base.py +++ b/navis/core/base.py @@ -46,7 +46,7 @@ def Neuron( - x: Union[nx.DiGraph, str, pd.DataFrame, "TreeNeuron", "MeshNeuron"], + x: Union[nx.DiGraph, str, pd.DataFrame, "TreeNeuron", "MeshNeuron"], # noqa: F821 **metadata, # noqa: F821 ): """Constructor for Neuron objects. Depending on the input, either a @@ -664,6 +664,10 @@ def copy(self, deepcopy=False) -> "BaseNeuron": return x + def view(self) -> "BaseNeuron": + """Create a view of the neuron without copying data.""" + raise NotImplementedError(f"View not implemented for neuron of type {type(self)}.") + def summary(self, add_props=None) -> pd.Series: """Get a summary of this neuron.""" @@ -687,6 +691,11 @@ def summary(self, add_props=None) -> pd.Series: warnings.simplefilter("ignore") s = pd.Series([getattr(self, at, "NA") for at in props], index=props) + # Show mask status + if self.is_masked: + if "masked" not in s.index: + s["masked"] = True + return s def plot2d(self, **kwargs): @@ -750,7 +759,20 @@ def is_masked(self): return hasattr(self, "_masked_data") def mask(self, mask): - """Mask neuron.""" + """Mask neuron. + + Implementation details depend on the neuron type (see below). + + See Also + -------- + [`navis.TreeNeuron.mask`][] + Mask skeleton. + [`navis.MeshNeuron.mask`][] + Mask mesh. + [`navis.Dotprops.mask`][] + Mask dotprops. + + """ raise NotImplementedError( f"Masking not implemented for neuron of type {type(self)}." ) diff --git a/navis/core/dotprop.py b/navis/core/dotprop.py index d3a69cdc..7d83d5e8 100644 --- a/navis/core/dotprop.py +++ b/navis/core/dotprop.py @@ -37,7 +37,7 @@ except ModuleNotFoundError: from scipy.spatial import cKDTree as KDTree -__all__ = ['Dotprops'] +__all__ = ["Dotprops"] # Set up logging logger = config.get_logger(__name__) @@ -93,34 +93,35 @@ class Dotprops(BaseNeuron): points: np.ndarray alpha: np.ndarray - vect: np.ndarray + vect: np.ndarray k: Optional[int] soma: Optional[Union[list, np.ndarray]] #: Attributes used for neuron summary - SUMMARY_PROPS = ['type', 'name', 'k', 'units', 'n_points'] + SUMMARY_PROPS = ["type", "name", "k", "units", "n_points"] #: Attributes to be used when comparing two neurons. - EQ_ATTRIBUTES = ['name', 'n_points', 'k'] + EQ_ATTRIBUTES = ["name", "n_points", "k"] #: Temporary attributes that need clearing when neuron data changes - TEMP_ATTR = ['_memory_usage', "_tree"] + TEMP_ATTR = ["_memory_usage", "_tree"] #: Core data table(s) used to calculate hash - _CORE_DATA = ['points', 'vect'] + _CORE_DATA = ["points", "vect"] #: Property used to calculate length of neuron - _LENGTH_DATA = 'points' - - def __init__(self, - points: np.ndarray, - k: int, - vect: Optional[np.ndarray] = None, - alpha: Optional[np.ndarray] = None, - units: Union[pint.Unit, str] = None, - **metadata - ): + _LENGTH_DATA = "points" + + def __init__( + self, + points: np.ndarray, + k: int, + vect: Optional[np.ndarray] = None, + alpha: Optional[np.ndarray] = None, + units: Union[pint.Unit, str] = None, + **metadata, + ): """Initialize Dotprops Neuron.""" super().__init__() @@ -144,13 +145,13 @@ def __truediv__(self, other, copy=True): if isinstance(other, numbers.Number) or utils.is_iterable(other): # If a number, consider this an offset for coordinates n = self.copy() if copy else self - _ = np.divide(n.points, other, out=n.points, casting='unsafe') + _ = np.divide(n.points, other, out=n.points, casting="unsafe") if n.has_connectors: - n.connectors.loc[:, ['x', 'y', 'z']] /= other + n.connectors.loc[:, ["x", "y", "z"]] /= other # Force recomputing of KDTree - if hasattr(n, '_tree'): - delattr(n, '_tree') + if hasattr(n, "_tree"): + delattr(n, "_tree") # Convert units # Note: .to_compact() throws a RuntimeWarning and returns unchanged @@ -167,13 +168,13 @@ def __mul__(self, other, copy=True): if isinstance(other, numbers.Number) or utils.is_iterable(other): # If a number, consider this an offset for coordinates n = self.copy() if copy else self - _ = np.multiply(n.points, other, out=n.points, casting='unsafe') + _ = np.multiply(n.points, other, out=n.points, casting="unsafe") if n.has_connectors: - n.connectors.loc[:, ['x', 'y', 'z']] *= other + n.connectors.loc[:, ["x", "y", "z"]] *= other # Force recomputing of KDTree - if hasattr(n, '_tree'): - delattr(n, '_tree') + if hasattr(n, "_tree"): + delattr(n, "_tree") # Convert units # Note: .to_compact() throws a RuntimeWarning and returns unchanged @@ -190,13 +191,13 @@ def __add__(self, other, copy=True): if isinstance(other, numbers.Number) or utils.is_iterable(other): # If a number, consider this an offset for coordinates n = self.copy() if copy else self - _ = np.add(n.points, other, out=n.points, casting='unsafe') + _ = np.add(n.points, other, out=n.points, casting="unsafe") if n.has_connectors: - n.connectors.loc[:, ['x', 'y', 'z']] += other + n.connectors.loc[:, ["x", "y", "z"]] += other # Force recomputing of KDTree - if hasattr(n, '_tree'): - delattr(n, '_tree') + if hasattr(n, "_tree"): + delattr(n, "_tree") return n # If another neuron, return a list of neurons @@ -209,13 +210,13 @@ def __sub__(self, other, copy=True): if isinstance(other, numbers.Number) or utils.is_iterable(other): # If a number, consider this an offset for coordinates n = self.copy() if copy else self - _ = np.subtract(n.points, other, out=n.points, casting='unsafe') + _ = np.subtract(n.points, other, out=n.points, casting="unsafe") if n.has_connectors: - n.connectors.loc[:, ['x', 'y', 'z']] -= other + n.connectors.loc[:, ["x", "y", "z"]] -= other # Force recomputing of KDTree - if hasattr(n, '_tree'): - delattr(n, '_tree') + if hasattr(n, "_tree"): + delattr(n, "_tree") return n return NotImplemented @@ -227,9 +228,9 @@ def __getstate__(self): # The KDTree from pykdtree does not like being pickled # We will have to remove it which will force it to be regenerated # after unpickling - if '_tree' in state: - if 'pykdtree' in str(type(state['_tree'])): - _ = state.pop('_tree') + if "_tree" in state: + if "pykdtree" in str(type(state["_tree"])): + _ = state.pop("_tree") return state @@ -238,8 +239,10 @@ def alpha(self): """Alpha value for tangent vectors (optional).""" if isinstance(self._alpha, type(None)): if isinstance(self.k, type(None)) or (self.k <= 0): - raise ValueError('Unable to calculate `alpha` for Dotprops not ' - 'generated using k-nearest-neighbors.') + raise ValueError( + "Unable to calculate `alpha` for Dotprops not " + "generated using k-nearest-neighbors." + ) self.recalculate_tangents(self.k, inplace=True) return self._alpha @@ -249,7 +252,7 @@ def alpha(self, value): if not isinstance(value, type(None)): value = np.asarray(value) if value.ndim != 1: - raise ValueError(f'alpha must be (N, ) array, got {value.shape}') + raise ValueError(f"alpha must be (N, ) array, got {value.shape}") self._alpha = value @property @@ -259,8 +262,8 @@ def bbox(self) -> np.ndarray: mx = np.max(self.points, axis=0) if self.has_connectors: - cn_mn = np.min(self.connectors[['x', 'y', 'z']].values, axis=0) - cn_mx = np.max(self.connectors[['x', 'y', 'z']].values, axis=0) + cn_mn = np.min(self.connectors[["x", "y", "z"]].values, axis=0) + cn_mx = np.max(self.connectors[["x", "y", "z"]].values, axis=0) mn = np.min(np.vstack((mn, cn_mn)), axis=0) mx = np.max(np.vstack((mx, cn_mx)), axis=0) @@ -270,12 +273,16 @@ def bbox(self) -> np.ndarray: @property def datatables(self) -> List[str]: """Names of all DataFrames attached to this neuron.""" - return [k for k, v in self.__dict__.items() if isinstance(v, pd.DataFrame, np.ndarray)] + return [ + k + for k, v in self.__dict__.items() + if isinstance(v, pd.DataFrame, np.ndarray) + ] @property def kdtree(self): """KDTree for points.""" - if not getattr(self, '_tree', None): + if not getattr(self, "_tree", None): self._tree = KDTree(self.points) return self._tree @@ -290,7 +297,7 @@ def points(self, value): value = np.zeros((0, 3)) value = np.asarray(value) if value.ndim != 2 or value.shape[1] != 3: - raise ValueError(f'points must be (N, 3) array, got {value.shape}') + raise ValueError(f"points must be (N, 3) array, got {value.shape}") self._points = value # Also reset KDtree self._tree = None @@ -307,7 +314,7 @@ def vect(self, value): if not isinstance(value, type(None)): value = np.asarray(value) if value.ndim != 2 or value.shape[1] != 3: - raise ValueError(f'vectors must be (N, 3) array, got {value.shape}') + raise ValueError(f"vectors must be (N, 3) array, got {value.shape}") self._vect = value @property @@ -336,11 +343,11 @@ def soma(self) -> Optional[int]: if not any(soma): soma = None elif any(np.array(soma) < 0) or any(np.array(soma) > self.points.shape[0]): - logger.warning(f'Soma(s) {soma} not found in points.') + logger.warning(f"Soma(s) {soma} not found in points.") soma = None else: if 0 < soma < self.points.shape[0]: - logger.warning(f'Soma {soma} not found in node table.') + logger.warning(f"Soma {soma} not found in node table.") soma = None return soma @@ -348,7 +355,7 @@ def soma(self) -> Optional[int]: @soma.setter def soma(self, value: Union[Callable, int, None]) -> None: """Set soma.""" - if hasattr(value, '__call__'): + if hasattr(value, "__call__"): self._soma = types.MethodType(value, self) elif isinstance(value, type(None)): self._soma = None @@ -358,20 +365,22 @@ def soma(self, value: Union[Callable, int, None]) -> None: if 0 < value < self.points.shape[0]: self._soma = value else: - raise ValueError('Soma must be function, None or a valid node index.') + raise ValueError("Soma must be function, None or a valid node index.") @property def type(self) -> str: """Neuron type.""" - return 'navis.Dotprops' - - def dist_dots(self, - other: 'Dotprops', - alpha: bool = False, - distance_upper_bound: Optional[float] = None, - **kwargs) -> Union[ - Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray] - ]: + return "navis.Dotprops" + + def dist_dots( + self, + other: "Dotprops", + alpha: bool = False, + distance_upper_bound: Optional[float] = None, + **kwargs, + ) -> Union[ + Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray] + ]: """Query this Dotprops against another. This function is mainly for `navis.nblast`. @@ -419,9 +428,9 @@ def dist_dots(self, # Scipy's KDTree does not like the distance to be None diub = distance_upper_bound if distance_upper_bound else np.inf - fast_dists, fast_idxs = other.kdtree.query(points, - distance_upper_bound=diub, - **kwargs) + fast_dists, fast_idxs = other.kdtree.query( + points, distance_upper_bound=diub, **kwargs + ) # If upper distance we have to worry about infinite distances if distance_upper_bound: @@ -479,7 +488,7 @@ def downsample(self, factor=5, inplace=False, **kwargs): return x return None - def copy(self) -> 'Dotprops': + def copy(self) -> "Dotprops": """Return a copy of the dotprops. Returns @@ -489,17 +498,40 @@ def copy(self) -> 'Dotprops': """ # Don't copy the KDtree - when using pykdtree, copy.copy throws an # error and the construction is super fast anyway - no_copy = ['_lock', '_tree'] + no_copy = ["_lock", "_tree"] # Generate new empty neuron - note we pass vect and alpha to # prevent calculation on initialization - x = self.__class__(points=np.zeros((0, 3)), k=1, - vect=np.zeros((0, 3)), alpha=np.zeros(0)) + x = self.__class__( + points=np.zeros((0, 3)), k=1, vect=np.zeros((0, 3)), alpha=np.zeros(0) + ) # Populate with this neuron's data - x.__dict__.update({k: copy.copy(v) for k, v in self.__dict__.items() if k not in no_copy}) + x.__dict__.update( + {k: copy.copy(v) for k, v in self.__dict__.items() if k not in no_copy} + ) return x - def drop_fluff(self, epsilon, keep_size: int = None, n_largest: int = None, inplace=False): + def view(self) -> "Dotprops": + """Create a view of the neuron without copying data. + + Be aware that changes to the view may affect the original neuron! + + """ + no_copy = ["_lock"] + + # Generate new empty neuron + x = self.__class__( + points=np.zeros((0, 3)), k=1, vect=np.zeros((0, 3)), alpha=np.zeros(0) + ) + + # Override with this neuron's data + x.__dict__.update({k: v for k, v in self.__dict__.items() if k not in no_copy}) + + return x + + def drop_fluff( + self, epsilon, keep_size: int = None, n_largest: int = None, inplace=False + ): """Remove fluff from neuron. By default, this function will remove all but the largest connected @@ -534,16 +566,20 @@ def drop_fluff(self, epsilon, keep_size: int = None, n_largest: int = None, inpl Base function. See for details and examples. """ - x = morpho.drop_fluff(self, epsilon=epsilon, keep_size=keep_size, n_largest=n_largest, inplace=inplace) + x = morpho.drop_fluff( + self, + epsilon=epsilon, + keep_size=keep_size, + n_largest=n_largest, + inplace=inplace, + ) if not inplace: return x - def mask(self, mask, copy=True): + def mask(self, mask, inplace=False, copy=False) -> "Dotprops": """Mask neuron with given mask. - This is always done in-place! - Parameters ---------- mask : np.ndarray @@ -551,10 +587,16 @@ def mask(self, mask, copy=True): - 1D array with boolean values - callable that accepts a neuron and returns a mask - string with property name + inplace : bool, optional + Whether to mask the neuron inplace. + copy : bool, optional + Whether to copy data (points, vectors, alpha, etc.) after masking. + This is useful if you want to avoid accidentally modifying + the original nodes table. Returns ------- - self + n : Dotprops The masked neuron. See Also @@ -572,56 +614,60 @@ def mask(self, mask, copy=True): "Neuron already masked. Layering multiple masks is currently not supported, please unmask first." ) + n = self + if not inplace: + n = self.view() + if callable(mask): - mask = mask(self) + mask = mask(n) elif isinstance(mask, str): - mask = getattr(self, mask) + mask = getattr(n, mask) mask = np.asarray(mask) if mask.dtype != bool: raise ValueError("Mask must be boolean array.") - elif mask.shape[0] != len(self): + elif mask.shape[0] != len(n): raise ValueError("Mask must have same length as points.") - self._mask = mask - self._masked_data = {} - self._masked_data['_points'] = self.points + n._mask = mask + n._masked_data = {} + n._masked_data["_points"] = n.points # Drop soma if masked out - if self.soma is not None: - if isinstance(self.soma, (list, np.ndarray)): - soma_left = self.soma[mask[self.soma]] - self._masked_data['_soma'] = self.soma + if n.soma is not None: + if isinstance(n.soma, (list, np.ndarray)): + soma_left = n.soma[mask[n.soma]] + n._masked_data["_soma"] = n.soma if any(soma_left): - self.soma = soma_left + n.soma = soma_left else: - self.soma = None - elif not mask[self.soma]: - self._masked_data['_soma'] = self.soma - self.soma = None + n.soma = None + elif not mask[n.soma]: + n._masked_data["_soma"] = n.soma + n.soma = None - # N.B. we're directly setting `._nodes`` to avoid overhead from checks + # Apply the mask and make copy if requested for att in ("_points", "_vect", "_alpha"): - if hasattr(self, att): - self._masked_data[att] = getattr(self, att) - setattr(self, att, getattr(self, att)[mask]) + if hasattr(n, att): + n._masked_data[att] = getattr(n, att) # save original data + setattr(n, att, getattr(n, att)[mask]) # apply mask if copy: - setattr(self, att, getattr(self, att).copy()) + setattr(n, att, getattr(n, att).copy()) # copy masked data if requested - if hasattr(self, "_connectors") and "point_ix" in self._connectors.columns: - self._masked_data['connectors'] = self.connectors - self._connectors = self._connectors.loc[ - self.connectors.point_ix.isin(np.arange(len(mask))[mask]) - ] + if hasattr(n, "_connectors") and "point_ix" in n._connectors.columns: + n._masked_data["connectors"] = n.connectors + n._connectors = n._connectors.loc[ + n.connectors.point_ix.isin(np.arange(len(mask))[mask]) + ] if copy: - self._connectors = self._connectors.copy() + n._connectors = n._connectors.copy() - self._clear_temp_attr() + n._clear_temp_attr() - return self + return n def unmask(self, reset=True): """Unmask neuron. @@ -699,8 +745,9 @@ def recalculate_tangents(self, k: int, inplace=False): # Checks and balances n_points = x.points.shape[0] if n_points < k: - raise ValueError(f"Too few points ({n_points}) to calculate {k} " - "nearest-neighbors") + raise ValueError( + f"Too few points ({n_points}) to calculate {k} " "nearest-neighbors" + ) # Create the KDTree and get the k-nearest neighbors for each point dist, ix = self.kdtree.query(x.points, k=k) @@ -728,7 +775,7 @@ def recalculate_tangents(self, k: int, inplace=False): if not inplace: return x - def snap(self, locs, to='points'): + def snap(self, locs, to="points"): """Snap xyz location(s) to closest point or synapse. Parameters @@ -757,15 +804,16 @@ def snap(self, locs, to='points'): """ locs = np.asarray(locs).astype(np.float64) - is_single = (locs.ndim == 1 and len(locs) == 3) - is_multi = (locs.ndim == 2 and locs.shape[1] == 3) + is_single = locs.ndim == 1 and len(locs) == 3 + is_multi = locs.ndim == 2 and locs.shape[1] == 3 if not is_single and not is_multi: - raise ValueError('Expected a single (x, y, z) location or a ' - '(N, 3) array of multiple locations') + raise ValueError( + "Expected a single (x, y, z) location or a " + "(N, 3) array of multiple locations" + ) - if to not in ['points', 'connectors']: - raise ValueError('`to` must be "points" or "connectors", ' - f'got {to}') + if to not in ["points", "connectors"]: + raise ValueError('`to` must be "points" or "connectors", ' f"got {to}") # Generate tree tree = graph.neuron2KDTree(self, data=to) @@ -775,9 +823,9 @@ def snap(self, locs, to='points'): return ix, dist - def to_skeleton(self, - scale_vec: Union[float, Literal['auto']] = 'auto' - ) -> core.TreeNeuron: + def to_skeleton( + self, scale_vec: Union[float, Literal["auto"]] = "auto" + ) -> core.TreeNeuron: """Turn Dotprop into a TreeNeuron. This does *not* skeletonize the neuron but rather generates a line @@ -801,12 +849,13 @@ def to_skeleton(self, TreeNeuron """ - if not isinstance(scale_vec, numbers.Number) and scale_vec != 'auto': - raise ValueError('`scale_vect` must be "auto" or a number, ' - f'got {scale_vec}') + if not isinstance(scale_vec, numbers.Number) and scale_vec != "auto": + raise ValueError( + '`scale_vect` must be "auto" or a number, ' f"got {scale_vec}" + ) - if scale_vec == 'auto': - scale_vec = self.sampling_resolution * .8 + if scale_vec == "auto": + scale_vec = self.sampling_resolution * 0.8 # Prepare segments - this is based on nat:::plot3d.dotprops halfvect = self.vect / 2 * scale_vec @@ -819,16 +868,16 @@ def to_skeleton(self, segs[1::2] = ends # Generate node table - nodes = pd.DataFrame(segs, columns=['x', 'y', 'z']) - nodes['node_id'] = nodes.index - nodes['parent_id'] = -1 - nodes.loc[1::2, 'parent_id'] = nodes.index.values[::2] + nodes = pd.DataFrame(segs, columns=["x", "y", "z"]) + nodes["node_id"] = nodes.index + nodes["parent_id"] = -1 + nodes.loc[1::2, "parent_id"] = nodes.index.values[::2] # Produce a minimal TreeNeuron tn = core.TreeNeuron(nodes, units=self.units, id=self.id) # Carry over the label - if getattr(self, '_label', None): + if getattr(self, "_label", None): tn._label = self._label # Add some other relevant attributes directly @@ -837,4 +886,3 @@ def to_skeleton(self, tn._soma = self._soma return tn - diff --git a/navis/core/masking.py b/navis/core/masking.py index c5a78861..be74976e 100644 --- a/navis/core/masking.py +++ b/navis/core/masking.py @@ -20,7 +20,6 @@ from .voxel import VoxelNeuron from .mesh import MeshNeuron -from .. import utils __all__ = ["NeuronMask"] @@ -30,7 +29,7 @@ class NeuronMask: Parameters ---------- - x : Neuron/List + x : Neuron/List Neuron(s) to mask. mask : str | array | callable | list | dict The mask to apply: @@ -43,10 +42,8 @@ class NeuronMask: above copy_data : bool Whether to copy the neuron data (e.g. node table for skeletons) - when masking. Setting this to False will may some time and - memory but may lead to e.g. pandas setting-on-copy warnings - if the data is modified. Only set to `True` if you know your - code won't modify the data. + when masking. Set this to `True` if you know your code will modify + the masked data and you want to prevent changes to the original. reset_neurons : bool If True, reset the neurons to their original state after the context manager exits. If False, will try to incorporate any @@ -84,7 +81,7 @@ class NeuronMask: """ - def __init__(self, x, mask, reset_neurons=True, copy_data=True, validate_mask=True): + def __init__(self, x, mask, reset_neurons=True, copy_data=False, validate_mask=True): self.neurons = x if validate_mask: @@ -159,7 +156,7 @@ def __enter__(self): else: mask = self.mask[i] - n.mask(mask, copy=self.copy) + n.mask(mask, copy=self.copy, inplace=True) return self diff --git a/navis/core/mesh.py b/navis/core/mesh.py index d6f78db0..5cca369c 100644 --- a/navis/core/mesh.py +++ b/navis/core/mesh.py @@ -428,10 +428,24 @@ def copy(self) -> "MeshNeuron": return x - def mask(self, mask, copy=True): - """Mask neuron with given mask. + def view(self) -> "MeshNeuron": + """Create a view of the neuron without copying data. + + Be aware that changes to the view may affect the original neuron! + + """ + no_copy = ["_lock"] + + # Generate new empty neuron + x = self.__class__(None) + + # Override with this neuron's data + x.__dict__.update({k: v for k, v in self.__dict__.items() if k not in no_copy}) + + return x - This is always done in-place! + def mask(self, mask, inplace=False, copy=False): + """Mask neuron with given mask. Parameters ---------- @@ -443,8 +457,12 @@ def mask(self, mask, copy=True): The mask can be either for vertices or faces but will ultimately be used to mask out faces. Vertices not participating in any face will be removed regardless of the mask. - copy : bool - Whether to copy mask a copy of the data. Only applies for connectors. + inplace : bool, optional + Whether to mask the neuron inplace. + copy : bool, optional + Whether to copy data (faces, vertices, etc.) after masking. This + is useful if you want to avoid accidentally modifying + the original nodes table. Returns ------- @@ -466,101 +484,111 @@ def mask(self, mask, copy=True): "Please either apply the existing mask or unmask first." ) + n = self + if not inplace: + n = self.view() + if callable(mask): - mask = mask(self) + mask = mask(n) elif isinstance(mask, str): - mask = getattr(self, mask) + mask = getattr(n, mask) mask = np.asarray(mask) # Some checks if mask.dtype != bool: raise ValueError("Mask must be boolean array.") - elif len(mask) not in (self.vertices.shape[0], self.faces.shape[0]): + elif len(mask) not in (n.vertices.shape[0], n.faces.shape[0]): raise ValueError("Mask length does not match number of vertices or faces.") # Transate vertex mask to face mask - if mask.shape[0] == self.vertices.shape[0]: + if mask.shape[0] == n.vertices.shape[0]: vert_mask = mask - face_mask = np.all(mask[self.faces], axis=1) + face_mask = np.all(mask[n.faces], axis=1) + + # Apply mask + verts_new, faces_new, vert_map, face_map = morpho.subset.submesh( + n, vertex_index=np.where(vert_mask)[0], return_map=True + ) else: face_mask = mask - vert_mask = np.zeros(self.vertices.shape[0], dtype=bool) - vert_mask[np.unique(self.faces[face_mask])] = True + vert_mask = np.zeros(n.vertices.shape[0], dtype=bool) + vert_mask[np.unique(n.faces[face_mask])] = True - # Apply mask - verts_new, faces_new, vert_map, face_map = morpho.subset.submesh( - self, vertex_index=np.where(vert_mask)[0], return_map=True - ) + # Apply mask + verts_new, faces_new, vert_map, face_map = morpho.subset.submesh( + n, faces_index=np.where(face_mask)[0], return_map=True + ) # The above will have likely dropped some vertices - we need to update the vertex mask - vert_mask = np.zeros(self.vertices.shape[0], dtype=bool) + vert_mask = np.zeros(n.vertices.shape[0], dtype=bool) vert_mask[np.where(vert_map != -1)[0]] = True # Track mask, vertices and faces before masking - self._mask = face_mask # mask is always the face mask - self._masked_data = {} - self._masked_data["_vertices"] = self._vertices - self._masked_data["_faces"] = self._faces + n._mask = face_mask # mask is always the face mask + n._masked_data = {} + n._masked_data["_vertices"] = n._vertices + n._masked_data["_faces"] = n._faces # Update vertices and faces - self._vertices = verts_new - self._faces = faces_new + n._vertices = verts_new + n._faces = faces_new # See if we can mask the mesh's skeleton as well - if hasattr(self, "_skeleton"): + if hasattr(n, "_skeleton"): # If the skeleton has a vertex map, we can use it to mask the skeleton - if hasattr(self._skeleton, "vertex_map"): + if hasattr(n._skeleton, "vertex_map"): # Generate a mask for the skeleton # (keep in mind vertex_map are node IDs, not indices) - sk_mask = self._skeleton.nodes.node_id.isin( - self._skeleton.vertex_map[vert_mask] + sk_mask = n._skeleton.nodes.node_id.isin( + n._skeleton.vertex_map[vert_mask] ) # Apply mask - self._skeleton.mask(sk_mask) + n._skeleton.mask(sk_mask) # Last but not least: we need to update the vertex map # Track the old map. N.B. we're not adding this to # skeleton._masked_data since the remapping is done by # the MeshNeuron itself! - self._skeleton._vertex_map_unmasked = self._skeleton.vertex_map + n._skeleton._vertex_map_unmasked = n._skeleton.vertex_map # Subset the vertex map to the surviving mesh vertices # N.B. that the node IDs don't change when masking skeletons! - self._skeleton.vertex_map = self._skeleton.vertex_map[vert_mask] + n._skeleton.vertex_map = n._skeleton.vertex_map[vert_mask] # If the skeleton has no vertex map, we have to ditch it and # let it be regenerated when needed else: - self._masked_data["_skeleton"] = self._skeleton - self._skeleton = None # Clear the skeleton + n._masked_data["_skeleton"] = n._skeleton + n._skeleton = None # Clear the skeleton # See if we need to mask any connectors as well - if hasattr(self, "_connectors"): + if hasattr(n, "_connectors"): # Only mask if there is an actual "vertex_ind" or "face_ind" column - cn_mask = None - if "vertex_ind" in self._connectors.columns: - cn_mask = self._connectors.vertex_id.isin(np.where(vert_mask)[0]) - elif "face_ind" in self._connectors.columns: - cn_mask = self._connectors.face_id.isin(np.where(face_mask)[0]) + if "vertex_ind" in n._connectors.columns: + cn_mask = n._connectors.vertex_id.isin(np.where(vert_mask)[0]) + elif "face_ind" in n._connectors.columns: + cn_mask = n._connectors.face_id.isin(np.where(face_mask)[0]) + else: + cn_mask = None if cn_mask is not None: - self._masked_data["_connectors"] = self._connectors - self._connectors = self._connectors.loc[mask] + n._masked_data["_connectors"] = n._connectors + n._connectors = n._connectors.loc[mask] if copy: - self._connectors = self._connectors.copy() + n._connectors = n._connectors.copy() # Check if we need to drop the soma position - if hasattr(self, "soma_pos"): - vid = self.snap(self.soma_pos, to="vertices")[0] + if hasattr(n, "soma_pos"): + vid = n.snap(self.soma_pos, to="vertices")[0] if not vert_mask[vid]: - self._masked_data["_soma_pos"] = self.soma_pos - self.soma_pos = None + n._masked_data["_soma_pos"] = n.soma_pos + n.soma_pos = None - # Clear temporary attributes but keep the skeleton since we already fixed that - self._clear_temp_attr(exclude=["_skeleton"]) + # Clear temporary attributes but keep the skeleton since we already fixed that manually + n._clear_temp_attr(exclude=["_skeleton"]) - return self + return n def unmask(self, reset=True): """Unmask neuron. @@ -631,7 +659,9 @@ def unmask(self, reset=True): # Generate a mesh for the masked-out data: # The mesh prior to masking - pre_mesh = tm.Trimesh(self._masked_data["_vertices"], self._masked_data["_faces"]) + pre_mesh = tm.Trimesh( + self._masked_data["_vertices"], self._masked_data["_faces"] + ) # The vertices and faces that were masked out pre_vertices, pre_faces, vert_map, face_map = morpho.subset.submesh( pre_mesh, faces_index=np.where(~face_mask)[0], return_map=True @@ -661,7 +691,7 @@ def unmask(self, reset=True): # Check if the vertex map is still valid # Note to self: we could do some elaborate checks here to map old to # most likely new vertex / nodes but that's a bit overkill for now. - if hasattr(skeleton, 'vertex_map'): + if hasattr(skeleton, "vertex_map"): if skeleton.vertex_map.shape[0] != self._vertices.shape[0]: skeleton = None elif skeleton.vertex_map.max() >= self._faces.shape[0]: diff --git a/navis/core/skeleton.py b/navis/core/skeleton.py index 41937e5d..62e97302 100644 --- a/navis/core/skeleton.py +++ b/navis/core/skeleton.py @@ -930,6 +930,22 @@ def copy(self, deepcopy: bool = False) -> "TreeNeuron": return x + def view(self) -> "TreeNeuron": + """Create a view of the neuron without copying data. + + Be aware that changes to the view may affect the original neuron! + + """ + no_copy = ["_lock"] + + # Generate new empty neuron + x = self.__class__(None) + + # Override with this neuron's data + x.__dict__.update({k: v for k, v in self.__dict__.items() if k not in no_copy}) + + return x + def get_graph_nx(self) -> nx.DiGraph: """Calculate and return networkX representation of neuron. @@ -962,11 +978,9 @@ def get_igraph(self) -> "igraph.Graph": # type: ignore self._igraph = graph.neuron2igraph(self, raise_not_installed=False) return self._igraph - def mask(self, mask, copy=True): + def mask(self, mask, inplace=False, copy=False) -> "TreeNeuron": """Mask neuron with given mask. - This is always done in-place! - Parameters ---------- mask : np.ndarray @@ -974,10 +988,17 @@ def mask(self, mask, copy=True): - 1D array with boolean values - callable that accepts a neuron and returns a mask - string with column name in nodes table + inplace : bool, optional + Whether to mask the neuron inplace. + copy : bool, optional + Whether to copy data such as the node table after masking. This + is useful if you want to avoid accidentally modifying + the original nodes table. Returns ------- - self + n : TreeNeuron + The masked neuron. See Also -------- @@ -992,45 +1013,49 @@ def mask(self, mask, copy=True): "Neuron already masked. Layering multiple masks is currently not supported, please unmask first." ) + n = self + if not inplace: + n = self.view() + if callable(mask): - mask = mask(self) + mask = mask(n) elif isinstance(mask, str): - mask = self.nodes[mask].values + mask = n.nodes[mask].values mask = np.asarray(mask) if mask.dtype != bool: raise ValueError("Mask must be boolean array.") - elif mask.shape[0] != self.nodes.shape[0]: + elif mask.shape[0] != n.nodes.shape[0]: raise ValueError("Mask must have same length as nodes table.") - self._mask = mask - self._masked_data = {} - self._masked_data["_nodes"] = self.nodes + n._mask = mask + n._masked_data = {} + n._masked_data["_nodes"] = n.nodes # N.B. we're directly setting `._nodes`` to avoid overhead from checks - self._nodes = self._nodes.loc[mask].drop("type", axis=1, errors="ignore") + n._nodes = n._nodes.loc[mask].drop("type", axis=1, errors="ignore") if copy: - self._nodes = self._nodes.copy() + n._nodes = n._nodes.copy() # See if any parent IDs have ceased to exist - missing_parents = ~self._nodes.parent_id.isin(self._nodes.node_id) & ( - self._nodes.parent_id >= 0 + missing_parents = ~n._nodes.parent_id.isin(n._nodes.node_id) & ( + n._nodes.parent_id >= 0 ) if any(missing_parents): - self.nodes.loc[missing_parents, "parent_id"] = -1 + n.nodes.loc[missing_parents, "parent_id"] = -1 - if hasattr(self, "_connectors"): - self._masked_data["_connectors"] = self.connectors - self._connectors = self._connectors.loc[ - self._connectors.node_id.isin(self.nodes.node_id) + if hasattr(n, "_connectors"): + n._masked_data["_connectors"] = n.connectors + n._connectors = n._connectors.loc[ + n._connectors.node_id.isin(n.nodes.node_id) ] if copy: - self._connectors = self._connectors.copy() + n._connectors = n._connectors.copy() - self._clear_temp_attr() + n._clear_temp_attr() - return self + return n def unmask(self, reset=True): """Unmask neuron. diff --git a/navis/morpho/subset.py b/navis/morpho/subset.py index c828f13d..7b0d21fd 100644 --- a/navis/morpho/subset.py +++ b/navis/morpho/subset.py @@ -401,7 +401,7 @@ def submesh(mesh, *, faces_index=None, vertex_index=None, return_map=False): if faces_index is None and vertex_index is None: raise ValueError("Either `faces_index` or `vertex_index` must be provided.") elif faces_index is not None and vertex_index is not None: - raise ValueError("Only one of `faces_index` or `vertex_index` can be provided.") + raise ValueError("Must provide either `faces_index` or `vertex_index`, not both.") # First check if we can return either an empty mesh or the original mesh right away if faces_index is not None: