Skip to content

Commit

Permalink
refreactored regression metrics to new dir
Browse files Browse the repository at this point in the history
  • Loading branch information
Ido Amos [email protected] committed Sep 10, 2024
1 parent 8aed967 commit da7b348
Show file tree
Hide file tree
Showing 4 changed files with 134 additions and 133 deletions.
2 changes: 1 addition & 1 deletion fuse/eval/examples/examples_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"""

from fuse.eval.metrics.stat.metrics_stat_common import MetricPearsonCorrelation
from fuse.eval.metrics.regression.metrics import MetricPearsonCorrelation
import numpy as np
import pandas as pd
from collections import OrderedDict
Expand Down
Empty file.
131 changes: 131 additions & 0 deletions fuse/eval/metrics/regression/metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
from typing import List, Optional, Union
from fuse.eval.metrics.libs.stat import Stat
from fuse.eval.metrics.metrics_common import MetricDefault
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error


class MetricPearsonCorrelation(MetricDefault):
def __init__(
self, pred: str, target: str, mask: Optional[str] = None, **kwargs: dict
) -> None:
super().__init__(
pred=pred,
target=target,
mask=mask,
metric_func=Stat.pearson_correlation,
**kwargs,
)


class MetricSpearmanCorrelation(MetricDefault):
def __init__(
self, pred: str, target: str, mask: Optional[str] = None, **kwargs: dict
) -> None:
super().__init__(
pred=pred,
target=target,
mask=mask,
metric_func=Stat.spearman_correlation,
**kwargs,
)


class MetricMAE(MetricDefault):
def __init__(
self,
pred: str,
target: str,
**kwargs: dict,
) -> None:
"""
See MetricDefault for the missing params
:param pred: scalar predictions
:param target: ground truth scalar labels
:param threshold: threshold to apply to both pred and target
:param balanced: optionally to use balanced accuracy (from sklearn) instead of regular accuracy.
"""
super().__init__(
pred=pred,
target=target,
metric_func=self.mae,
**kwargs,
)

def mae(
self,
pred: Union[List, np.ndarray],
target: Union[List, np.ndarray],
**kwargs: dict,
) -> float:
return mean_absolute_error(y_true=target, y_pred=pred)


class MetricMSE(MetricDefault):
def __init__(
self,
pred: str,
target: str,
**kwargs: dict,
) -> None:
"""
Our implementation of standard MSE, current version of scikit dones't support it as a metric.
See MetricDefault for the missing params
:param pred: scalar predictions
:param target: ground truth scalar labels
:param threshold: threshold to apply to both pred and target
:param balanced: optionally to use balanced accuracy (from sklearn) instead of regular accuracy.
"""
super().__init__(
pred=pred,
target=target,
metric_func=self.mse,
**kwargs,
)

def mse(
self,
pred: Union[List, np.ndarray],
target: Union[List, np.ndarray],
**kwargs: dict,
) -> float:
return mean_squared_error(y_true=target, y_pred=pred)


class MetricRMSE(MetricDefault):
def __init__(
self,
pred: str,
target: str,
**kwargs: dict,
) -> None:
"""
See MetricDefault for the missing params
:param pred: scalar predictions
:param target: ground truth scalar labels
:param threshold: threshold to apply to both pred and target
:param balanced: optionally to use balanced accuracy (from sklearn) instead of regular accuracy.
"""
super().__init__(
pred=pred,
target=target,
metric_func=self.mse,
**kwargs,
)

def mse(
self,
pred: Union[List, np.ndarray],
target: Union[List, np.ndarray],
**kwargs: dict,
) -> float:

pred = np.array(pred).flatten()
target = np.array(target).flatten()

assert len(pred) == len(
target
), f"Expected pred and target to have the dimensions but found: {len(pred)} elements in pred and {len(target)} in target"

squared_diff = (pred - target) ** 2
return squared_diff.mean()
134 changes: 2 additions & 132 deletions fuse/eval/metrics/stat/metrics_stat_common.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
from typing import Any, Dict, Hashable, Optional, Sequence, Union, List
from typing import Any, Dict, Hashable, Optional, Sequence
from collections import Counter
from fuse.eval.metrics.metrics_common import MetricDefault, MetricWithCollectorBase
from fuse.eval.metrics.libs.stat import Stat

import numpy as np # is this import an issue here? if so can move to other dir
from sklearn.metrics import mean_absolute_error, mean_squared_error
from fuse.eval.metrics.metrics_common import MetricWithCollectorBase


class MetricUniqueValues(MetricWithCollectorBase):
Expand All @@ -23,129 +19,3 @@ def eval(
counter = Counter(values)

return list(counter.items())


class MetricPearsonCorrelation(MetricDefault):
def __init__(
self, pred: str, target: str, mask: Optional[str] = None, **kwargs: dict
) -> None:
super().__init__(
pred=pred,
target=target,
mask=mask,
metric_func=Stat.pearson_correlation,
**kwargs,
)


class MetricSpearmanCorrelation(MetricDefault):
def __init__(
self, pred: str, target: str, mask: Optional[str] = None, **kwargs: dict
) -> None:
super().__init__(
pred=pred,
target=target,
mask=mask,
metric_func=Stat.spearman_correlation,
**kwargs,
)


class MetricMAE(MetricDefault):
def __init__(
self,
pred: str,
target: str,
**kwargs: dict,
) -> None:
"""
See MetricDefault for the missing params
:param pred: scalar predictions
:param target: ground truth scalar labels
:param threshold: threshold to apply to both pred and target
:param balanced: optionally to use balanced accuracy (from sklearn) instead of regular accuracy.
"""
super().__init__(
pred=pred,
target=target,
metric_func=self.mae,
**kwargs,
)

def mae(
self,
pred: Union[List, np.ndarray],
target: Union[List, np.ndarray],
**kwargs: dict,
) -> float:
return mean_absolute_error(y_true=target, y_pred=pred)


class MetricMSE(MetricDefault):
def __init__(
self,
pred: str,
target: str,
**kwargs: dict,
) -> None:
"""
Our implementation of standard MSE, current version of scikit dones't support it as a metric.
See MetricDefault for the missing params
:param pred: scalar predictions
:param target: ground truth scalar labels
:param threshold: threshold to apply to both pred and target
:param balanced: optionally to use balanced accuracy (from sklearn) instead of regular accuracy.
"""
super().__init__(
pred=pred,
target=target,
metric_func=self.mse,
**kwargs,
)

def mse(
self,
pred: Union[List, np.ndarray],
target: Union[List, np.ndarray],
**kwargs: dict,
) -> float:
return mean_squared_error(y_true=target, y_pred=pred)


class MetricRMSE(MetricDefault):
def __init__(
self,
pred: str,
target: str,
**kwargs: dict,
) -> None:
"""
See MetricDefault for the missing params
:param pred: scalar predictions
:param target: ground truth scalar labels
:param threshold: threshold to apply to both pred and target
:param balanced: optionally to use balanced accuracy (from sklearn) instead of regular accuracy.
"""
super().__init__(
pred=pred,
target=target,
metric_func=self.mse,
**kwargs,
)

def mse(
self,
pred: Union[List, np.ndarray],
target: Union[List, np.ndarray],
**kwargs: dict,
) -> float:

pred = np.array(pred).flatten()
target = np.array(target).flatten()

assert len(pred) == len(
target
), f"Expected pred and target to have the dimensions but found: {len(pred)} elements in pred and {len(target)} in target"

squared_diff = (pred - target) ** 2
return squared_diff.mean()

0 comments on commit da7b348

Please sign in to comment.