Skip to content

feat: heterogeneous feature generation, img_utils #135

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions src/deep_neurographs/geometry.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ def fill_path(img, path, val=-1):
"""
for xyz in path:
x, y, z = tuple(np.floor(xyz).astype(int))
img[x - 1 : x + 2, y - 1 : y + 2, z - 1 : z + 2] = val
img[x - 1: x + 2, y - 1: y + 2, z - 1: z + 2] = val
return img


Expand Down Expand Up @@ -425,9 +425,9 @@ def align(neurograph, img, branch_1, branch_2, depth):
best_d2 = None
best_score = 0
for d1 in range(min(depth, len(branch_1) - 1)):
xyz_1 = neurograph.to_img(branch_1[d1], shift=True)
xyz_1 = neurograph.to_voxels(branch_1[d1], shift=True)
for d2 in range(min(depth, len(branch_2) - 1)):
xyz_2 = neurograph.to_img(branch_2[d2], shift=True)
xyz_2 = neurograph.to_voxels(branch_2[d2], shift=True)
line = make_line(xyz_1, xyz_2, 10)
score = np.mean(get_profile(img, line, window=[3, 3, 3]))
if score > best_score:
Expand Down
202 changes: 202 additions & 0 deletions src/deep_neurographs/img_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
"""
Created on Sat May 9 11:00:00 2024

@author: Anna Grim
@email: anna.grim@alleninstitute.org

Helper routines for working with images.

"""

from concurrent.futures import ThreadPoolExecutor
from copy import deepcopy

import numpy as np
import tensorstore as ts
import zarr
from skimage.color import label2rgb

SUPPORTED_DRIVERS = ["neuroglancer_precomputed", "n5", "zarr"]


# --- io utils ---
def open_tensorstore(path, driver):
"""
Uploads segmentation mask stored as a directory of shard files.

Parameters
----------
path : str
Path to directory containing shard files.
driver : str
Storage driver needed to read data at "path".

Returns
-------
sparse_volume : dict
Sparse image volume.

"""
assert driver in SUPPORTED_DRIVERS, "Error! Driver is not supported!"
arr = ts.open(
{
"driver": driver,
"kvstore": {
"driver": "gcs",
"bucket": "allen-nd-goog",
"path": path,
},
"context": {
"cache_pool": {"total_bytes_limit": 1000000000},
"cache_pool#remote": {"total_bytes_limit": 1000000000},
"data_copy_concurrency": {"limit": 8},
},
"recheck_cached_data": "open",
}
).result()
if driver == "neuroglancer_precomputed":
return arr[ts.d["channel"][0]]
elif driver == "zarr":
arr = arr[0, 0, :, :, :]
arr = arr[ts.d[0].transpose[2]]
arr = arr[ts.d[0].transpose[1]]
return arr


def open_zarr(path):
"""
Opens zarr file at "path".

Parameters
----------
path : str
Path to zarr file to be opened.

Returns
-------
np.ndarray
Contents of zarr file at "path".

"""
n5store = zarr.N5FSStore(path, "r")
if "653980" in path:
return zarr.open(n5store).ch488.s0
elif "653158" in path:
return zarr.open(n5store).s0


def read_tensorstore(arr, xyz, shape, from_center=True):
"""
Reads a chunk of data from the specified tensorstore array, given the
coordinates and shape of the chunk.

Parameters
----------
arr : tensorstore.ndarray Array
Array from which data is to be read.
xyz : tuple
xyz coordinates of chunk to be read from the tensorstore array.
shape : tuple
Shape (dimensions) of the chunk to be read.
from_center : bool, optional
Indication of whether the provided coordinates represent the center of
the chunk or the starting point. If True, coordinates are the center;
if False, coordinates are the starting point of the chunk. The default
is True.

Returns
-------
numpy.ndarray
Chunk of data read from the tensorstore array.

"""
chunk = read_chunk(arr, xyz, shape, from_center=from_center)
return chunk.read().result()


def read_tensorstore_with_bbox(img, bbox):
start = bbox["min"]
end = bbox["max"]
return (
img[start[0]: end[0], start[1]: end[1], start[2]: end[2]]
.read()
.result()
)


def read_chunk(arr, xyz, shape, from_center=True):
start, end = get_start_end(xyz, shape, from_center=from_center)
return deepcopy(
arr[start[0]: end[0], start[1]: end[1], start[2]: end[2]]
)


def get_start_end(xyz, shape, from_center=True):
if from_center:
start = [xyz[i] - shape[i] // 2 for i in range(3)]
end = [xyz[i] + shape[i] // 2 for i in range(3)]
else:
start = xyz
end = [xyz[i] + shape[i] for i in range(3)]
return start, end


def read_superchunks(img_path, labels_path, xyz, shape, from_center=True):
with ThreadPoolExecutor() as executor:
img_job = executor.submit(
get_superchunk,
img_path,
"n5" if ".n5" in img_path else "zarr",
xyz,
shape,
from_center=from_center,
)
labels_job = executor.submit(
get_superchunk,
labels_path,
"neuroglancer_precomputed",
xyz,
shape,
from_center=from_center,
)
img = img_job.result().astype(np.int16)
labels = labels_job.result().astype(np.int64)
assert img.shape == labels.shape, "img.shape != labels.shape"
return img, labels


def get_superchunk(path, driver, xyz, shape, from_center=True):
arr = open_tensorstore(path, driver)
return read_tensorstore(arr, xyz, shape, from_center=from_center)


# -- Image Operations --
def normalize_img(img):
img -= np.min(img)
return img / np.max(img)


def get_mip(img, axis=0):
"""
Compute the maximum intensity projection (MIP) of "img" along "axis".

Parameters
----------
img : numpy.ndarray
Image to compute MIP of.
axis : int, optional
Projection axis. The default is 0.

Returns
-------
numpy.ndarray
MIP of "img".

"""
return np.max(img, axis=axis)


def get_labels_mip(img, axis=0):
mip = np.max(img, axis=axis)
mip = label2rgb(mip)
return (255 * mip).astype(np.uint8)
Loading
Loading