Skip to content

Commit

Permalink
Merge pull request #69 from Leengit/support_ndpi
Browse files Browse the repository at this point in the history
ENH: Use large_image for NDPI, etc., not just SVS files
  • Loading branch information
Leengit authored Mar 15, 2022
2 parents aeb2b81 + ea665e9 commit ded55f8
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 38 deletions.
19 changes: 2 additions & 17 deletions histomics_stream/configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def __call__(self, slide):
filename = slide["filename"]

# Do the work.
if re.compile(r"\.svs$").search(filename):
if not re.compile(r"\.zarr$").search(filename):
import large_image

# read whole-slide image file and create large_image object
Expand Down Expand Up @@ -202,7 +202,7 @@ def __call__(self, slide):
/ read_magnification
)

elif re.compile(r"\.zarr$").search(filename):
else:
import zarr
import openslide as os

Expand Down Expand Up @@ -253,21 +253,6 @@ def __call__(self, slide):
f"Couldn't find magnification {self.target_magnification}X in Zarr storage."
)

else:
from PIL import Image

# We don't know magnifications so assume reasonable values
level = 0
slide["target_magnification"] = self.target_magnification
slide["scan_magnification"] = None
slide["read_magnification"] = None
slide["returned_magnification"] = None

pil_obj = Image.open(filename)
# (Note that number_pixel_columns_for_slide is before
# number_pixel_rows_for_slide)
number_pixel_columns_for_slide, number_pixel_rows_for_slide = pil_obj.size

slide["level"] = level
# Note that slide size is defined by the requested magnification, which may not
# be the same as the magnification for the selected level. To get the slide
Expand Down
34 changes: 13 additions & 21 deletions histomics_stream/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,28 +349,20 @@ def _py_read_chunk_pixels(
chunk_right = math.floor(chunk_right.numpy() / factor.numpy() + 0.01)
returned_magnification = returned_magnification.numpy()

if re.compile(r"\.svs$").search(filename):
import large_image
import large_image

ts = large_image.open(filename)
chunk = ts.getRegion(
scale=dict(magnification=returned_magnification),
format=large_image.constants.TILE_FORMAT_NUMPY,
region=dict(
left=chunk_left,
top=chunk_top,
width=chunk_right - chunk_left,
height=chunk_bottom - chunk_top,
units="mag_pixels",
),
)[0]
else:
from PIL import Image

pil_obj = Image.open(filename)
chunk = np.asarray(pil_obj)[
chunk_left:chunk_right, chunk_top:chunk_bottom, :
]
ts = large_image.open(filename)
chunk = ts.getRegion(
scale=dict(magnification=returned_magnification),
format=large_image.constants.TILE_FORMAT_NUMPY,
region=dict(
left=chunk_left,
top=chunk_top,
width=chunk_right - chunk_left,
height=chunk_bottom - chunk_top,
units="mag_pixels",
),
)[0]

# Do we want to support other than RGB and/or other than uint8?!!!
return tf.convert_to_tensor(chunk[..., :3], dtype=tf.uint8)

0 comments on commit ded55f8

Please sign in to comment.