Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Usability Update and some bugfixes #162

Merged
merged 5 commits into from
Dec 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/example_spine_instance.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
def main():
with cProfile.Profile() as pr:
results = evaluator.evaluate(prediction_mask, reference_mask, verbose=False)
for groupname, (result, intermediate_steps_data) in results.items():
for groupname, result in results.items():
print()
print("### Group", groupname)
print(result)
Expand Down
2 changes: 1 addition & 1 deletion examples/example_spine_instance_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
def main():
with cProfile.Profile() as pr:
results = evaluator.evaluate(prediction_mask, reference_mask, verbose=False)
for groupname, (result, intermediate_steps_data) in results.items():
for groupname, result in results.items():
print()
print("### Group", groupname)
print(result)
Expand Down
6 changes: 3 additions & 3 deletions examples/example_spine_semantic.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,13 @@

def main():
with cProfile.Profile() as pr:
result, intermediate_steps_data = evaluator.evaluate(
prediction_mask, reference_mask
)["ungrouped"]
result = evaluator.evaluate(prediction_mask, reference_mask)["ungrouped"]

# To print the results, just call print
print(result)

intermediate_steps_data = result.intermediate_steps_data
assert intermediate_steps_data is not None
# To get the different intermediate arrays, just use the second returned object
intermediate_steps_data.original_prediction_arr # Input prediction array, untouched
intermediate_steps_data.original_reference_arr # Input reference array, untouched
Expand Down
33 changes: 15 additions & 18 deletions panoptica/instance_approximator.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@

from panoptica.utils.constants import CCABackend
from panoptica._functionals import _connected_components
from panoptica.utils.numpy_utils import _get_smallest_fitting_uint

# from panoptica.utils.numpy_utils import _get_smallest_fitting_uint
from panoptica.utils.processing_pair import (
MatchedInstancePair,
SemanticPair,
Expand Down Expand Up @@ -80,7 +81,7 @@ def approximate_instances(
AssertionError: If there are negative values in the semantic maps, which is not allowed.
"""
# Check validity
pred_labels, ref_labels = semantic_pair._pred_labels, semantic_pair._ref_labels
pred_labels, ref_labels = semantic_pair.pred_labels, semantic_pair.ref_labels
pred_label_range = (
(np.min(pred_labels), np.max(pred_labels))
if len(pred_labels) > 0
Expand All @@ -95,10 +96,10 @@ def approximate_instances(
min_value >= 0
), "There are negative values in the semantic maps. This is not allowed!"
# Set dtype to smalles fitting uint
max_value = max(np.max(pred_label_range[1]), np.max(ref_label_range[1]))
dtype = _get_smallest_fitting_uint(max_value)
semantic_pair.set_dtype(dtype)
print(f"-- Set dtype to {dtype}") if verbose else None
# max_value = max(np.max(pred_label_range[1]), np.max(ref_label_range[1]))
# dtype = _get_smallest_fitting_uint(max_value)
# semantic_pair.set_dtype(dtype)
# print(f"-- Set dtype to {dtype}") if verbose else None

# Call algorithm
instance_pair = self._approximate_instances(semantic_pair, **kwargs)
Expand Down Expand Up @@ -153,26 +154,22 @@ def _approximate_instances(
)
assert cca_backend is not None

empty_prediction = len(semantic_pair._pred_labels) == 0
empty_reference = len(semantic_pair._ref_labels) == 0
empty_prediction = len(semantic_pair.pred_labels) == 0
empty_reference = len(semantic_pair.ref_labels) == 0
prediction_arr, n_prediction_instance = (
_connected_components(semantic_pair._prediction_arr, cca_backend)
_connected_components(semantic_pair.prediction_arr, cca_backend)
if not empty_prediction
else (semantic_pair._prediction_arr, 0)
else (semantic_pair.prediction_arr, 0)
)
reference_arr, n_reference_instance = (
_connected_components(semantic_pair._reference_arr, cca_backend)
_connected_components(semantic_pair.reference_arr, cca_backend)
if not empty_reference
else (semantic_pair._reference_arr, 0)
)

dtype = _get_smallest_fitting_uint(
max(prediction_arr.max(), reference_arr.max())
else (semantic_pair.reference_arr, 0)
)

return UnmatchedInstancePair(
prediction_arr=prediction_arr.astype(dtype),
reference_arr=reference_arr.astype(dtype),
prediction_arr=prediction_arr,
reference_arr=reference_arr,
n_prediction_instance=n_prediction_instance,
n_reference_instance=n_reference_instance,
)
Expand Down
2 changes: 1 addition & 1 deletion panoptica/instance_matcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def map_instance_labels(
# Build a MatchedInstancePair out of the newly derived data
matched_instance_pair = MatchedInstancePair(
prediction_arr=prediction_arr_relabeled,
reference_arr=processing_pair._reference_arr,
reference_arr=processing_pair.reference_arr,
)
return matched_instance_pair

Expand Down
2 changes: 1 addition & 1 deletion panoptica/panoptica_aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def _save_one_subject(self, subject_name, result_grouped):
#
content = [subject_name]
for groupname in self.__class_group_names:
result: PanopticaResult = result_grouped[groupname][0]
result: PanopticaResult = result_grouped[groupname]
result_dict = result.to_dict()
if result.computation_time is not None:
result_dict[COMPUTATION_TIME_KEY] = result.computation_time
Expand Down
32 changes: 18 additions & 14 deletions panoptica/panoptica_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from panoptica.instance_approximator import InstanceApproximator
from panoptica.instance_evaluator import evaluate_matched_instance
from panoptica.instance_matcher import InstanceMatchingAlgorithm
from panoptica.metrics import Metric, _Metric
from panoptica.metrics import Metric
from panoptica.panoptica_result import PanopticaResult
from panoptica.utils.timing import measure_time
from panoptica.utils import EdgeCaseHandler
Expand All @@ -12,7 +12,6 @@
MatchedInstancePair,
SemanticPair,
UnmatchedInstancePair,
_ProcessingPair,
InputType,
EvaluateInstancePair,
IntermediateStepsData,
Expand Down Expand Up @@ -54,14 +53,18 @@ def __init__(
expected_input (type, optional): Expected DataPair Input Type. Defaults to InputType.MATCHED_INSTANCE (which is type(MatchedInstancePair)).
instance_approximator (InstanceApproximator | None, optional): Determines which instance approximator is used if necessary. Defaults to None.
instance_matcher (InstanceMatchingAlgorithm | None, optional): Determines which instance matching algorithm is used if necessary. Defaults to None.
iou_threshold (float, optional): Iou Threshold for evaluation. Defaults to 0.5.

edge_case_handler (edge_case_handler, optional): EdgeCaseHandler to be used. If none, will create the default one
segmentation_class_groups (SegmentationClassGroups, optional): If not none, will evaluate per class group defined, instead of over all at the same time.
segmentation_class_groups (SegmentationClassGroups, optional): If not none, will evaluate per class group defined, instead of over all at the same time. A class group is a collection of labels that are considered of the same class / structure.

instance_metrics (list[Metric]): List of all metrics that should be calculated between all instances
global_metrics (list[Metric]): List of all metrics that should be calculated on the global binary masks

decision_metric: (Metric | None, optional): This metric is the final decision point between True Positive and False Positive. Can be left away if the matching algorithm is used (it will match by a metric and threshold already)
decision_threshold: (float | None, optional): Threshold for the decision_metric
log_times (bool): If true, will printout the times for the different phases of the pipeline.

save_group_times(bool): If true, will save the computation time of each sample and put that into the result object.
log_times (bool): If true, will print the times for the different phases of the pipeline.
verbose (bool): If true, will spit out more details than you want.
"""
self.__expected_input = expected_input
Expand Down Expand Up @@ -117,7 +120,7 @@ def evaluate(
save_group_times: bool | None = None,
log_times: bool | None = None,
verbose: bool | None = None,
) -> dict[str, tuple[PanopticaResult, IntermediateStepsData]]:
) -> dict[str, PanopticaResult]:
processing_pair = self.__expected_input(prediction_arr, reference_arr)
assert isinstance(
processing_pair, self.__expected_input.value
Expand All @@ -130,7 +133,7 @@ def evaluate(
processing_pair.reference_arr, raise_error=True
)

result_grouped: dict[str, tuple[PanopticaResult, IntermediateStepsData]] = {}
result_grouped: dict[str, PanopticaResult] = {}
for group_name, label_group in self.__segmentation_class_groups.items():
result_grouped[group_name] = self._evaluate_group(
group_name,
Expand All @@ -144,7 +147,7 @@ def evaluate(
),
log_times=log_times,
verbose=verbose,
)[1:]
)
return result_grouped

@property
Expand All @@ -166,7 +169,7 @@ def resulting_metric_keys(self) -> list[str]:
dummy_input = MatchedInstancePair(
np.ones((1, 1, 1), dtype=np.uint8), np.ones((1, 1, 1), dtype=np.uint8)
)
_, res, _ = self._evaluate_group(
res = self._evaluate_group(
group_name="",
label_group=LabelGroup(1, single_instance=False),
processing_pair=dummy_input,
Expand All @@ -188,7 +191,7 @@ def _evaluate_group(
verbose: bool | None = None,
log_times: bool | None = None,
save_group_times: bool = False,
):
) -> PanopticaResult:
assert isinstance(label_group, LabelGroup)
if self.__save_group_times:
start_time = perf_counter()
Expand All @@ -208,7 +211,7 @@ def _evaluate_group(
)
decision_threshold = 0.0

result, intermediate_steps_data = panoptic_evaluate(
result = panoptic_evaluate(
input_pair=processing_pair_grouped,
edge_case_handler=self.__edge_case_handler,
instance_approximator=self.__instance_approximator,
Expand All @@ -225,7 +228,7 @@ def _evaluate_group(
if save_group_times:
duration = perf_counter() - start_time
result.computation_time = duration
return group_name, result, intermediate_steps_data
return result


def panoptic_evaluate(
Expand All @@ -242,7 +245,7 @@ def panoptic_evaluate(
verbose=False,
verbose_calc=False,
**kwargs,
) -> tuple[PanopticaResult, IntermediateStepsData]:
) -> PanopticaResult:
"""
Perform panoptic evaluation on the given processing pair.

Expand Down Expand Up @@ -364,13 +367,14 @@ def panoptic_evaluate(
list_metrics=processing_pair.list_metrics,
global_metrics=global_metrics,
edge_case_handler=edge_case_handler,
intermediate_steps_data=intermediate_steps_data,
)

if isinstance(processing_pair, PanopticaResult):
processing_pair._global_metrics = global_metrics
if result_all:
processing_pair.calculate_all(print_errors=verbose_calc)
return processing_pair, intermediate_steps_data
return processing_pair

raise RuntimeError("End of panoptic pipeline reached without results")

Expand Down
3 changes: 3 additions & 0 deletions panoptica/panoptica_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
MetricType,
)
from panoptica.utils import EdgeCaseHandler
from panoptica.utils.processing_pair import IntermediateStepsData


class PanopticaResult(object):
Expand All @@ -27,6 +28,7 @@ def __init__(
list_metrics: dict[Metric, list[float]],
edge_case_handler: EdgeCaseHandler,
global_metrics: list[Metric] = [],
intermediate_steps_data: IntermediateStepsData | None = None,
computation_time: float | None = None,
):
"""Result object for Panoptica, contains all calculatable metrics
Expand All @@ -45,6 +47,7 @@ def __init__(
empty_list_std = self._edge_case_handler.handle_empty_list_std().value
self._global_metrics: list[Metric] = global_metrics
self.computation_time = computation_time
self.intermediate_steps_data = intermediate_steps_data
######################
# Evaluation Metrics #
######################
Expand Down
Loading
Loading