Skip to content

Commit

Permalink
Fix for crop + workflow
Browse files Browse the repository at this point in the history
  • Loading branch information
multimeric committed Aug 14, 2024
1 parent 4dfeb7c commit 14627c5
Show file tree
Hide file tree
Showing 5 changed files with 160 additions and 93 deletions.
9 changes: 5 additions & 4 deletions core/lls_core/models/lattice_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,8 +333,8 @@ def _process_crop(self) -> Iterable[ImageSlice]:
decon_processing=self.deconvolution.decon_processing
)

yield slice.copy_with_data(
crop_volume_deskew(
yield slice.copy(update={
"data": crop_volume_deskew(
original_volume=slice.data,
deconvolution=self.deconv_enabled,
get_deskew_and_decon=False,
Expand All @@ -349,8 +349,9 @@ def _process_crop(self) -> Iterable[ImageSlice]:
z_start=self.crop.z_range[0],
z_end=self.crop.z_range[1],
**deconv_args
)
)
),
"roi_index": roi_index
})

def _process_non_crop(self) -> Iterable[ImageSlice]:
"""
Expand Down
7 changes: 4 additions & 3 deletions core/lls_core/models/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from typing_extensions import Generic, TypeVar
from pydantic import BaseModel, NonNegativeInt
from lls_core.types import ArrayLike, is_arraylike
from lls_core.utils import make_filename_prefix
from lls_core.utils import make_filename_suffix
from lls_core.writers import RoiIndex, Writer
from pandas import DataFrame, Series

Expand Down Expand Up @@ -95,8 +95,9 @@ def process(self) -> Iterable[Tuple[RoiIndex, ProcessedWorkflowOutput]]:
"""
import pandas as pd

# Handle each ROI separately
for roi, roi_results in groupby(self.slices, key=lambda it: it.roi_index):
values = []
values: list[Writer, dict, tuple, list] = []
for result in roi_results:
# Ensure the data is in a tuple
data = (result.data,) if is_arraylike(result.data) else result.data
Expand Down Expand Up @@ -135,7 +136,7 @@ def save(self) -> Iterable[Path]:
"""
for roi, result in self.process():
if isinstance(result, DataFrame):
path = self.lattice_data.make_filepath_df(make_filename_prefix(roi_index=roi),result)
path = self.lattice_data.make_filepath_df(make_filename_suffix(roi_index=roi),result)
result = result.apply(Series.explode)
result.to_csv(str(path))
yield path
Expand Down
2 changes: 1 addition & 1 deletion core/lls_core/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ def array_to_dask(arr: ArrayLike) -> DaskArray:
else:
return from_array(arr)

def make_filename_prefix(prefix: Optional[str] = None, roi_index: Optional[str] = None, channel: Optional[str] = None, time: Optional[str] = None) -> str:
def make_filename_suffix(prefix: Optional[str] = None, roi_index: Optional[str] = None, channel: Optional[str] = None, time: Optional[str] = None) -> str:
"""
Generates a filename for this result
"""
Expand Down
8 changes: 4 additions & 4 deletions core/lls_core/writers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@

from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Iterable, Iterator, List, Optional
from typing import TYPE_CHECKING, List, Optional

from lls_core.types import ArrayLike

from pydantic import NonNegativeInt

from lls_core.utils import make_filename_prefix
from lls_core.utils import make_filename_suffix
RoiIndex = Optional[NonNegativeInt]

if TYPE_CHECKING:
Expand Down Expand Up @@ -51,7 +51,7 @@ class BdvWriter(Writer):

def __post_init__(self):
import npy2bdv
path = self.lattice.make_filepath(make_filename_prefix(roi_index=self.roi_index))
path = self.lattice.make_filepath("_" + make_filename_suffix(roi_index=self.roi_index))
self.bdv_writer = npy2bdv.BdvWriter(
filename=str(path),
compression='gzip',
Expand Down Expand Up @@ -100,7 +100,7 @@ def flush(self):
).astype("uint16")
# ImageJ TIFF can only handle 16-bit uints, not 32
path = self.lattice.make_filepath(
make_filename_prefix(
make_filename_suffix(
channel=first_result.channel,
time=first_result.time,
roi_index=first_result.roi_index
Expand Down
227 changes: 146 additions & 81 deletions core/tests/test_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import pytest
from lls_core.models import LatticeData
from lls_core.models.crop import CropParams
from lls_core.models.output import SaveFileType
from lls_core.sample import resources
from importlib_resources import as_file
import tempfile
Expand All @@ -11,134 +12,198 @@

from .params import parameterized

root = Path(__file__).parent / "data"
root = Path(__file__).parent / "data"


def open_psf(name: str):
with as_file(resources / "psfs" / "zeiss_simulated" / name) as path:
return path


@parameterized
def test_process(minimal_image_path: str, args: dict):
# Processes a minimal set of images, with multiple parameter combinations
for slice in LatticeData.parse_obj({
"input_image": minimal_image_path,
**args
}).process().slices:
for slice in (
LatticeData.parse_obj({"input_image": minimal_image_path, **args})
.process()
.slices
):
assert slice.data.ndim == 3


def test_process_all(image_path: str):
# Processes all input images, but without parameter combinations
for slice in LatticeData.parse_obj({
"input_image": image_path
}).process().slices:
for slice in (
LatticeData.parse_obj({"input_image": image_path}).process().slices
):
assert slice.data.ndim == 3


@parameterized
def test_save(minimal_image_path: str, args: dict):
with tempfile.TemporaryDirectory() as tempdir:
LatticeData.parse_obj({
"input_image": minimal_image_path,
"save_dir": tempdir,
**args
}).process().save_image()
LatticeData.parse_obj(
{"input_image": minimal_image_path, "save_dir": tempdir, **args}
).process().save_image()
results = list(Path(tempdir).iterdir())
assert len(results) > 0


def test_process_deconv_crop():
for slice in LatticeData.parse_obj({
"input_image": root / "raw.tif",
"deconvolution": {
"psf": [root / "psf.tif"],
},
"crop": CropParams(roi_list = [[[0, 0], [0, 110], [95, 0], [95, 110]]])
}).process().slices:
for slice in (
LatticeData.parse_obj(
{
"input_image": root / "raw.tif",
"deconvolution": {
"psf": [root / "psf.tif"],
},
"crop": CropParams(
roi_list=[[[0, 0], [0, 110], [95, 0], [95, 110]]]
),
}
)
.process()
.slices
):
assert slice.data.ndim == 3


def test_process_time_range(multi_channel_time: Path):
from lls_core.models.output import SaveFileType

with tempfile.TemporaryDirectory() as outdir:
LatticeData.parse_obj({
"input_image": multi_channel_time,
# Channels 2 & 3
"channel_range": range(1, 3),
# Time point 2
"time_range": range(1, 2),
"save_dir": outdir,
"save_type": SaveFileType.h5
}).save()

@pytest.mark.parametrize(["background"], [(1, ), ("auto",), ("second_last",)])
LatticeData.parse_obj(
{
"input_image": multi_channel_time,
# Channels 2 & 3
"channel_range": range(1, 3),
# Time point 2
"time_range": range(1, 2),
"save_dir": outdir,
"save_type": SaveFileType.h5,
}
).save()


@pytest.mark.parametrize(["background"], [(1,), ("auto",), ("second_last",)])
@parameterized
def test_process_deconvolution(args: dict, background: Any):
for slice in LatticeData.parse_obj({
"input_image": root / "raw.tif",
"deconvolution": {
"psf": [root / "psf.tif"],
"background": background
},
**args
}).process().slices:
for slice in (
LatticeData.parse_obj(
{
"input_image": root / "raw.tif",
"deconvolution": {
"psf": [root / "psf.tif"],
"background": background,
},
**args,
}
)
.process()
.slices
):
assert slice.data.ndim == 3


@parameterized
@pytest.mark.parametrize(["workflow_name"], [("image_workflow", ), ("table_workflow", )])
def test_process_workflow(args: dict, request: FixtureRequest, workflow_name: str):
@pytest.mark.parametrize(
["workflow_name"], [("image_workflow",), ("table_workflow",)]
)
def test_process_workflow(
args: dict, request: FixtureRequest, workflow_name: str
):
from pandas import DataFrame

workflow: Workflow = request.getfixturevalue(workflow_name)
with tempfile.TemporaryDirectory() as tmpdir:
for roi, output in LatticeData.parse_obj({
"input_image": root / "raw.tif",
"workflow": workflow,
"save_dir": tmpdir,
**args
}).process_workflow().process():
for roi, output in (
LatticeData.parse_obj(
{
"input_image": root / "raw.tif",
"workflow": workflow,
"save_dir": tmpdir,
**args,
}
)
.process_workflow()
.process()
):
assert roi is None or isinstance(roi, int)
assert isinstance(output, (Path, DataFrame))

@pytest.mark.parametrize(["roi_subset"], [
[None],
[[0]],
[[0, 1]],
])

@pytest.mark.parametrize(
["roi_subset"],
[
[None],
[[0]],
[[0, 1]],
],
)
@parameterized
def test_process_crop_roi_file(args: dict, roi_subset: Optional[List[int]]):
# Test cropping with a roi zip file, selecting different subsets from that file
with as_file(resources / "RBC_tiny.czi") as lattice_path:
rois = root / "crop" / "two_rois.zip"
for slice in LatticeData.parse_obj({
"input_image": lattice_path,
"crop": {
"roi_list": [rois],
"roi_subset": roi_subset
},
**args
}).process().slices:
slices = list(
LatticeData.parse_obj(
{
"input_image": lattice_path,
"crop": {"roi_list": [rois], "roi_subset": roi_subset},
**args,
}
)
.process()
.slices
)
# Check we made the correct number of slices
assert len(slices) == len(roi_subset) if roi_subset is not None else 2
for slice in slices:
# Check correct dimensionality
assert slice.data.ndim == 3

@pytest.mark.parametrize(["roi"], [
[[[
(174.0, 24.0),
(174.0, 88.0),
(262.0, 88.0),
(262.0, 24.0)
]]],
[[[
(174.13, 24.2),
(173.98, 87.87),
(262.21, 88.3),
(261.99, 23.79)
]]],
])

def test_process_crop_workflow(table_workflow: Workflow):
# Test cropping with a roi zip file, selecting different subsets from that file
with as_file(
resources / "RBC_tiny.czi"
) as lattice_path, tempfile.TemporaryDirectory() as outdir:
LatticeData.parse_obj(
{
"input_image": lattice_path,
"workflow": table_workflow,
"save_dir": outdir,
"save_type": SaveFileType.h5,
"crop": {
"roi_list": [root / "crop" / "two_rois.zip"],
},
}
).process().save_image()
# Two separate H5 files should be created in this scenario: one for each ROI
results = list(Path(outdir).glob("*.h5"))
assert len(results) == 2


@pytest.mark.parametrize(
["roi"],
[
[[[(174.0, 24.0), (174.0, 88.0), (262.0, 88.0), (262.0, 24.0)]]],
[[[(174.13, 24.2), (173.98, 87.87), (262.21, 88.3), (261.99, 23.79)]]],
],
)
@parameterized
def test_process_crop_roi_manual(args: dict, roi: List):
# Test manually provided ROIs, both with integer and float values
with as_file(resources / "RBC_tiny.czi") as lattice_path:
for slice in LatticeData.parse_obj({
"input_image": lattice_path,
"crop": {
"roi_list": roi
},
**args
}).process().slices:
for slice in (
LatticeData.parse_obj(
{
"input_image": lattice_path,
"crop": {"roi_list": roi},
**args,
}
)
.process()
.slices
):
assert slice.data.ndim == 3

0 comments on commit 14627c5

Please sign in to comment.