Skip to content

Commit

Permalink
volume with c order
Browse files Browse the repository at this point in the history
  • Loading branch information
xiuliren committed Aug 2, 2022
1 parent 0192b91 commit 1a3daa1
Show file tree
Hide file tree
Showing 8 changed files with 208 additions and 31 deletions.
2 changes: 2 additions & 0 deletions chunkflow/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,5 @@
# import gevent.monkey
#from gevent import monkey
#monkey.patch_all(thread=True)

from .__version__ import *
3 changes: 1 addition & 2 deletions chunkflow/__version__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1 @@

__version__ = "1.0.9"
version = "1.0.9"
9 changes: 4 additions & 5 deletions chunkflow/chunk/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ def from_h5(cls, file_name: str,
voxel_size = Cartesian(*f['voxel_size'])
else:
voxel_size = Cartesian(1, 1, 1)

if cutout_start is None:
cutout_start = voxel_offset
if cutout_size is None:
Expand All @@ -294,7 +294,7 @@ def from_h5(cls, file_name: str,

for c, v in zip(cutout_start, voxel_offset):
assert c >= v, "can only cutout after the global voxel offset."

assert len(cutout_start) == 3
assert len(cutout_stop) == 3
dset = dset[...,
Expand Down Expand Up @@ -538,14 +538,13 @@ def transpose(self, only_array: bool=False):
voxel_size = self.voxel_size
return Chunk(new_array, voxel_offset=voxel_offset, voxel_size=voxel_size)


def fill(self, x):
self.array.fill(x)

def squeeze_channel(self) -> np.ndarray:
def squeeze_channel(self, axis: int = 0) -> np.ndarray:
"""given a 4D array, squeeze the channel axis."""
assert self.array.ndim == 4
new_array = np.squeeze(self, axis=0)
new_array = np.squeeze(self, axis=axis)
return Chunk(new_array, voxel_offset=self.voxel_offset, voxel_size=self.voxel_size)

# @profile(precision=0)
Expand Down
35 changes: 22 additions & 13 deletions chunkflow/flow/flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,10 @@
type=click.INT, nargs=3, default=None, callback=default_none,
help='size of region of interest')
@click.option('--chunk-size', '-c',
type=click.INT, required=True, nargs=3,
type=click.INT, default=None, nargs=3,
help='(z y x), size/shape of chunks')
@click.option('--bounding-box', '-b', type=str, default=None,
help='the string representation of a bounding box')
@click.option('--grid-size', '-g',
type=click.INT, default=None, nargs=3, callback=default_none,
help='(z y x), grid size of output blocks')
Expand All @@ -90,23 +92,30 @@
default=False, help='use disBatch environment variable or not')
@generator
def generate_tasks(
layer_path: str, mip: int, roi_start: tuple, roi_stop: tuple,roi_size, chunk_size,
grid_size: tuple, file_path: str, queue_name: str, respect_chunk_size: bool,
aligned_block_size: tuple, task_index_start: tuple,
task_index_stop: tuple, disbatch: bool ):
layer_path: str, mip: int, roi_start: tuple, roi_stop: tuple,
roi_size: tuple, chunk_size: tuple, bounding_box:str,
grid_size: tuple, file_path: str, queue_name: str,
respect_chunk_size: bool, aligned_block_size: tuple,
task_index_start: tuple, task_index_stop: tuple, disbatch: bool ):
"""Generate a batch of tasks."""
if mip is None:
mip = state['mip']
assert mip >=0

"""Generate tasks."""
bboxes = BoundingBoxes.from_manual_setup(
chunk_size, layer_path=layer_path,
roi_start=roi_start, roi_stop=roi_stop,
roi_size=roi_size, mip=mip, grid_size=grid_size,
respect_chunk_size=respect_chunk_size,
aligned_block_size=aligned_block_size
)
if bounding_box is not None:
bboxes = [BoundingBox.from_string(bounding_box)]
if chunk_size is None:
chunk_size = bboxes[0].shape
else:
assert chunk_size == bboxes[0].shape
else:
bboxes = BoundingBoxes.from_manual_setup(
chunk_size, layer_path=layer_path,
roi_start=roi_start, roi_stop=roi_stop,
roi_size=roi_size, mip=mip, grid_size=grid_size,
respect_chunk_size=respect_chunk_size,
aligned_block_size=aligned_block_size
)

if task_index_start:
if task_index_stop is None:
Expand Down
83 changes: 73 additions & 10 deletions chunkflow/lib/synapses.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import os
import json
import time
from typing import List
from copy import deepcopy

Expand All @@ -10,6 +11,7 @@

from scipy.spatial import KDTree

import chunkflow
from chunkflow.chunk import Chunk
from chunkflow.lib.bounding_boxes import BoundingBox, Cartesian

Expand Down Expand Up @@ -226,13 +228,19 @@ def from_dvid_list(cls, syns: list, resolution: Cartesian=None):
)

@classmethod
def from_json(cls, fname: str, resolution: tuple = None):
def from_json(cls, fname: str, resolution: tuple = None, c_order: bool = True):
with open(fname, 'r') as file:
syns = json.load(file)

if resolution is not None:
syns['resolution'] = resolution
return cls.from_dict(syns)

syns = cls.from_dict(syns)

if not c_order:
syns.transpose_axis()

return syns

@classmethod
def from_h5(cls, fname: str, resolution: tuple = None, c_order: bool = True):
Expand Down Expand Up @@ -282,18 +290,17 @@ def from_h5(cls, fname: str, resolution: tuple = None, c_order: bool = True):
else:
post_users = None

if not c_order:
# transform to C order
pre = pre[:, ::-1]
post[:, 1:] = post[:, 1:][:, ::-1]

return cls(
syns = cls(
pre, post=post, pre_confidence=pre_confidence,
post_confidence=post_confidence, resolution=resolution,
users = users,
pre_users = pre_users,
post_users = post_users,
)
if not c_order:
# transform to C order
syns.transpose_axis()
return syns

def to_h5(self, fname: str) -> None:
"""save to a HDF5 file
Expand Down Expand Up @@ -330,13 +337,58 @@ def to_h5(self, fname: str) -> None:
def from_file(cls, fname: str, resolution: tuple = None, c_order: bool = True):
assert os.path.exists(fname)
if fname.endswith('.json'):
assert c_order
return cls.from_json(fname, resolution = resolution)
return cls.from_json(fname, resolution = resolution, c_order=c_order)
elif fname.endswith('.h5'):
return cls.from_h5(fname, resolution=resolution, c_order=c_order)
else:
raise ValueError(f'only support JSON and HDF5 file, but got {fname}')

def to_neutu_task(self, fname: str,
software_revision: int=4809,
description: str = "transformed using chunkflow",
file_version: int = 1,
body_id: int = None
):
"""transform to a JSON file as an input to NeuTu.
Note that current version only support presynapse.
There is no post-synapses transformed!
Args:
fname (str): file name with extension of .json
"""
assert fname.endswith('.json')
task = {
'metadata': {
"date": time.strftime('%d-%B-%Y %H:%M'),
"session path": "",
"software revision": software_revision,
"description": description,
"coordinate system": "dvid",
"software": "chunkflow",
"file version": file_version,
"username": "chunkflow",
"software version": chunkflow.version,
"computer": "localhost"
}
}

if body_id is None:
body_id = ""

data = []
for idx in range(self.pre_num):
z, y, x = self.pre[idx, :]
data.append({
"body ID": body_id,
"location": [int(x), int(y), int(z)]
})

task['data'] = data

with open(fname, 'w') as jf:
json.dump(task, jf)
return

def add_pre(self, pre: np.ndarray, confidence: float = 1.):
"""add some additional pre synapses
Expand All @@ -353,6 +405,13 @@ def add_pre(self, pre: np.ndarray, confidence: float = 1.):
self.pre_confidence = np.concatenate((self.pre_confidence, confidences), axis=None)
return self

def transpose_axis(self):
# transform to C order
self.pre = self.pre[:, ::-1]
self.resolution = self.resolution[::-1]
if self.post is not None:
self.post[:, 1:] = self.post[:, 1:][:, ::-1]

def __len__(self):
return self.post_num

Expand Down Expand Up @@ -506,10 +565,14 @@ def remove_pre(self, indices: List[int]):
self.pre = np.delete(self.pre, indices, axis=0)
if self.pre_confidence is not None:
self.pre_confidence = np.delete(self.pre_confidence, indices)
if self.pre_users is not None:
self.pre_users = np.delete(self.pre_users, indices)

if self.post is not None:
post_indices = np.isin(self.post[:, 0], indices)
self.post = np.delete(self.post, post_indices, axis=0)
if self.post_users is not None:
self.post_users = np.delete(self.post_users, post_indices)
for idx in range(self.post_num):
self.post[idx, 0] = old2new[self.post[idx, 0]]

Expand Down
68 changes: 68 additions & 0 deletions chunkflow/volume.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
from typing import Union

import numpy as np

from cloudvolume import CloudVolume
from .lib.bounding_boxes import BoundingBox, Cartesian
from .chunk import Chunk


class Volume:
"""The major difference with CloudVolume is that we use C order here.
ZYX indexing.
Args:
CloudVolume (class): the cloud-volume class
"""
def __init__(self, vol: CloudVolume) -> None:
self.vol = vol

@property
def dtype(self):
return self.vol.dtype

@classmethod
def from_numpy(cls, arr: np.ndarray, vol_path: str):
vol = CloudVolume.from_numpy(np.transpose(arr), vol_path=vol_path)
return cls(vol)

def cutout(self, key: Union[BoundingBox, list]):
if isinstance(key, BoundingBox):
chunk = self.vol[ key.to_slices()[::-1] ]
voxel_offset = key.start
elif isinstance(key, list):
chunk = self.vol[key[::-1]]
voxel_offset = Cartesian(key[0].start, key[1].start, key[2].start)
else:
raise ValueError('we only support BoundingBox or a list of slices')

# transpose
chunk = np.transpose(chunk)
chunk = Chunk(np.asarray(chunk), voxel_offset=voxel_offset)
return chunk

def _auto_convert_dtype(self, chunk: Chunk):
"""convert the data type to fit volume datatype"""
if np.issubdtype(self.dtype, np.floating) and \
np.issubdtype(chunk.dtype, np.uint8):
chunk = chunk.astype(self.dtype)
chunk /= 255.
# chunk = chunk / chunk.array.max() * np.iinfo(volume.dtype).max
elif np.issubdtype(self.dtype, np.uint8) and \
np.issubdtype(chunk.dtype, np.floating):
chunk.max() <= 1.
chunk *= 255

if self.dtype != chunk.dtype:
print(f'converting chunk data type {chunk.dtype} ' +
f'to volume data type: {self.dtype}')
return chunk.astype(self.dtype)
else:
return chunk

def save(self, chunk: Chunk):
chunk = self._auto_convert_dtype(chunk)

# transpose czyx to xyzc order
arr = np.transpose(chunk.array)
self[chunk.slices[::-1]] = arr
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

VERSIONFILE = os.path.join(PACKAGE_DIR, "chunkflow/__version__.py")
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
VSRE = r"^version = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
version = mo.group(1)
Expand Down
37 changes: 37 additions & 0 deletions tests/test_volume.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import shutil

import numpy as np

from cloudvolume.lib import generate_random_string
from chunkflow.lib.bounding_boxes import BoundingBox, Cartesian
from chunkflow.volume import Volume


def test_volume():
print('test volume cutout...')
# compute parameters
mip = 0
size = (36, 448, 448)

# create image dataset using cloud-volume
img = np.random.randint(0, 256, size=size)
img = img.astype(np.uint8)
# save the input to disk
volume_path = 'file:///tmp/test/volume/' + \
generate_random_string()

vol = Volume.from_numpy(
img,
volume_path
)

offset = Cartesian(4, 64, 64)
shape = (28, 320, 320)
bbox = BoundingBox.from_delta(offset, shape)
chunk = vol.cutout(bbox)
chunk = chunk.squeeze_channel()

assert offset == chunk.voxel_offset
np.testing.assert_array_equal(chunk, img[4:-4, 64:-64, 64:-64])

shutil.rmtree('/tmp/test')

0 comments on commit 1a3daa1

Please sign in to comment.