diff --git a/README.md b/README.md
index 58c95cc8d09..beb3e14cbd4 100644
--- a/README.md
+++ b/README.md
@@ -241,7 +241,7 @@ def function_before_backprop(engine):
## Out-of-the-box metrics
- [Metrics](https://pytorch.org/ignite/metrics.html#complete-list-of-metrics) for various tasks:
- Precision, Recall, Accuracy, Confusion Matrix, IoU etc, ~20 [regression metrics](https://pytorch.org/ignite/contrib/metrics.html#regression-metrics).
+ Precision, Recall, Accuracy, Confusion Matrix, IoU etc, ~20 [regression metrics](https://pytorch.org/ignite/metrics.html#complete-list-of-metrics).
- Users can also [compose their metrics](https://pytorch.org/ignite/metrics.html#metric-arithmetics) with ease from
existing ones using arithmetic operations or torch methods.
@@ -315,13 +315,15 @@ List of available pre-built images
Base
-- `pytorchignite/base:latest`
+
+- `pytorchignite/base:latest`
- `pytorchignite/apex:latest`
- `pytorchignite/hvd-base:latest`
-- `pytorchignite/hvd-apex:latest`
+- `pytorchignite/hvd-apex:latest`
- `pytorchignite/msdp-apex:latest`
Vision:
+
- `pytorchignite/vision:latest`
- `pytorchignite/hvd-vision:latest`
- `pytorchignite/apex-vision:latest`
@@ -329,10 +331,11 @@ Vision:
- `pytorchignite/msdp-apex-vision:latest`
NLP:
+
- `pytorchignite/nlp:latest`
- `pytorchignite/hvd-nlp:latest`
-- `pytorchignite/apex-nlp:latest`
-- `pytorchignite/hvd-apex-nlp:latest`
+- `pytorchignite/apex-nlp:latest`
+- `pytorchignite/hvd-apex-nlp:latest`
- `pytorchignite/msdp-apex-nlp:latest`
@@ -416,8 +419,8 @@ Features:
## Code-Generator application
The easiest way to create your training scripts with PyTorch-Ignite:
-- https://code-generator.pytorch-ignite.ai/
+- https://code-generator.pytorch-ignite.ai/
@@ -502,7 +505,7 @@ Blog articles, tutorials, books
- [The Hero Rises: Build Your Own SSD](https://allegro.ai/blog/the-hero-rises-build-your-own-ssd/)
- [Using Optuna to Optimize PyTorch Ignite Hyperparameters](https://medium.com/pytorch/using-optuna-to-optimize-pytorch-ignite-hyperparameters-626ffe6d4783)
- [PyTorch Ignite - Classifying Tiny ImageNet with EfficientNet](https://towardsdatascience.com/pytorch-ignite-classifying-tiny-imagenet-with-efficientnet-e5b1768e5e8f)
-
+
@@ -516,7 +519,7 @@ Toolkits
- [Nussl - a flexible, object-oriented Python audio source separation library](https://github.com/nussl/nussl)
- [PyTorch Adapt - A fully featured and modular domain adaptation library](https://github.com/KevinMusgrave/pytorch-adapt)
- [gnina-torch: PyTorch implementation of GNINA scoring function](https://github.com/RMeli/gnina-torch)
-
+
diff --git a/docs/source/contrib/handlers.rst b/docs/source/contrib/handlers.rst
index e9d125e50b8..214f23a843b 100644
--- a/docs/source/contrib/handlers.rst
+++ b/docs/source/contrib/handlers.rst
@@ -28,5 +28,5 @@ Time profilers [deprecated]
Loggers [deprecated]
--------------------
-.. deprecated:: 0.4.14
+.. deprecated:: 0.5.1
Loggers moved to :ref:`Loggers`.
diff --git a/docs/source/contrib/metrics.rst b/docs/source/contrib/metrics.rst
index eccaf9e7808..fdaa0432f5c 100644
--- a/docs/source/contrib/metrics.rst
+++ b/docs/source/contrib/metrics.rst
@@ -1,56 +1,15 @@
ignite.contrib.metrics
-======================
+=======================
-Contrib module metrics
-----------------------
+Contrib module metrics [deprecated]
+-----------------------------------
-.. currentmodule:: ignite.contrib.metrics
+.. deprecated:: 0.5.1
+ All metrics moved to :ref:`Complete list of metrics`.
-.. autosummary::
- :nosignatures:
- :toctree: ../generated
- AveragePrecision
- CohenKappa
- GpuInfo
- PrecisionRecallCurve
- ROC_AUC
- RocCurve
+Regression metrics [deprecated]
+--------------------------------
-Regression metrics
-------------------
-
-.. currentmodule:: ignite.contrib.metrics.regression
-
-.. automodule:: ignite.contrib.metrics.regression
-
-
-Module :mod:`ignite.contrib.metrics.regression` provides implementations of
-metrics useful for regression tasks. Definitions of metrics are based on `Botchkarev 2018`_, page 30 "Appendix 2. Metrics mathematical definitions".
-
-.. _`Botchkarev 2018`:
- https://arxiv.org/abs/1809.03006
-
-Complete list of metrics:
-
-.. currentmodule:: ignite.contrib.metrics.regression
-
-.. autosummary::
- :nosignatures:
- :toctree: ../generated
-
- CanberraMetric
- FractionalAbsoluteError
- FractionalBias
- GeometricMeanAbsoluteError
- GeometricMeanRelativeAbsoluteError
- ManhattanDistance
- MaximumAbsoluteError
- MeanAbsoluteRelativeError
- MeanError
- MeanNormalizedBias
- MedianAbsoluteError
- MedianAbsolutePercentageError
- MedianRelativeAbsoluteError
- R2Score
- WaveHedgesDistance
+.. deprecated:: 0.5.1
+ All metrics moved to :ref:`Complete list of metrics`.
diff --git a/docs/source/defaults.rst b/docs/source/defaults.rst
index e47633d8dcd..0a8409e9127 100644
--- a/docs/source/defaults.rst
+++ b/docs/source/defaults.rst
@@ -12,9 +12,8 @@
from ignite.engine import *
from ignite.handlers import *
from ignite.metrics import *
+ from ignite.metrics.regression import *
from ignite.utils import *
- from ignite.contrib.metrics.regression import *
- from ignite.contrib.metrics import *
# create default evaluator for doctests
@@ -46,4 +45,4 @@
('fc', nn.Linear(2, 1))
]))
- manual_seed(666)
\ No newline at end of file
+ manual_seed(666)
diff --git a/docs/source/metrics.rst b/docs/source/metrics.rst
index 0ed0290bd2b..ee660e99d20 100644
--- a/docs/source/metrics.rst
+++ b/docs/source/metrics.rst
@@ -352,6 +352,35 @@ Complete list of metrics
FID
CosineSimilarity
Entropy
+ AveragePrecision
+ CohenKappa
+ GpuInfo
+ PrecisionRecallCurve
+ RocCurve
+ ROC_AUC
+ regression.CanberraMetric
+ regression.FractionalAbsoluteError
+ regression.FractionalBias
+ regression.GeometricMeanAbsoluteError
+ regression.GeometricMeanRelativeAbsoluteError
+ regression.ManhattanDistance
+ regression.MaximumAbsoluteError
+ regression.MeanAbsoluteRelativeError
+ regression.MeanError
+ regression.MeanNormalizedBias
+ regression.MedianAbsoluteError
+ regression.MedianAbsolutePercentageError
+ regression.MedianRelativeAbsoluteError
+ regression.R2Score
+ regression.WaveHedgesDistance
+
+
+.. note::
+
+ Module ignite.metrics.regression provides implementations of metrics useful
+ for regression tasks. Definitions of metrics are based on
+ `Botchkarev 2018`_, page 30 "Appendix 2. Metrics mathematical definitions".
+
Helpers for customizing metrics
-------------------------------
@@ -393,3 +422,6 @@ reinit__is_reduced
sync_all_reduce
~~~~~~~~~~~~~~~
.. autofunction:: sync_all_reduce
+
+.. _`Botchkarev 2018`:
+ https://arxiv.org/abs/1809.03006
diff --git a/examples/mnist/mnist_with_tensorboard_logger.py b/examples/mnist/mnist_with_tensorboard_logger.py
index eaea1a532c9..b7c12631fe2 100644
--- a/examples/mnist/mnist_with_tensorboard_logger.py
+++ b/examples/mnist/mnist_with_tensorboard_logger.py
@@ -91,7 +91,7 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_dir):
trainer.logger = setup_logger("Trainer")
if sys.version_info > (3,):
- from ignite.contrib.metrics.gpu_info import GpuInfo
+ from ignite.metrics.gpu_info import GpuInfo
try:
GpuInfo().attach(trainer)
diff --git a/examples/mnist/mnist_with_tqdm_logger.py b/examples/mnist/mnist_with_tqdm_logger.py
index 980d7029f9f..c0ca7934ec7 100644
--- a/examples/mnist/mnist_with_tqdm_logger.py
+++ b/examples/mnist/mnist_with_tqdm_logger.py
@@ -64,7 +64,7 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, display_gpu_info
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
if display_gpu_info:
- from ignite.contrib.metrics import GpuInfo
+ from ignite.metrics import GpuInfo
GpuInfo().attach(trainer, name="gpu")
diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py
index 0bc52a0ea02..09f769a18d0 100644
--- a/ignite/contrib/engines/common.py
+++ b/ignite/contrib/engines/common.py
@@ -15,7 +15,6 @@
from torch.optim.lr_scheduler import _LRScheduler as PyTorchLRScheduler
import ignite.distributed as idist
-from ignite.contrib.metrics import GpuInfo
from ignite.engine import Engine, Events
from ignite.handlers import (
Checkpoint,
@@ -35,7 +34,7 @@
from ignite.handlers.base_logger import BaseLogger
from ignite.handlers.checkpoint import BaseSaveHandler
from ignite.handlers.param_scheduler import ParamScheduler
-from ignite.metrics import RunningAverage
+from ignite.metrics import GpuInfo, RunningAverage
from ignite.metrics.metric import RunningBatchWise
from ignite.utils import deprecated
@@ -78,14 +77,14 @@ def setup_common_training_handlers(
exclusive with ``save_handler``.
lr_scheduler: learning rate scheduler
as native torch LRScheduler or ignite's parameter scheduler.
- with_gpu_stats: if True, :class:`~ignite.contrib.metrics.GpuInfo` is attached to the
+ with_gpu_stats: if True, :class:`~ignite.metrics.GpuInfo` is attached to the
trainer. This requires `pynvml` package to be installed.
output_names: list of names associated with `update_function` output dictionary.
with_pbars: if True, two progress bars on epochs and optionally on iterations are attached.
Default, True.
with_pbar_on_iters: if True, a progress bar on iterations is attached to the trainer.
Default, True.
- log_every_iters: logging interval for :class:`~ignite.contrib.metrics.GpuInfo` and for
+ log_every_iters: logging interval for :class:`~ignite.metrics.GpuInfo` and for
epoch-wise progress bar. Default, 100.
stop_on_nan: if True, :class:`~ignite.handlers.terminate_on_nan.TerminateOnNan` handler is added to the trainer.
Default, True.
diff --git a/ignite/contrib/metrics/__init__.py b/ignite/contrib/metrics/__init__.py
index a9163ea6a73..d89ce1a4643 100644
--- a/ignite/contrib/metrics/__init__.py
+++ b/ignite/contrib/metrics/__init__.py
@@ -1,6 +1,7 @@
-import ignite.contrib.metrics.regression
-from ignite.contrib.metrics.average_precision import AveragePrecision
-from ignite.contrib.metrics.cohen_kappa import CohenKappa
-from ignite.contrib.metrics.gpu_info import GpuInfo
-from ignite.contrib.metrics.precision_recall_curve import PrecisionRecallCurve
-from ignite.contrib.metrics.roc_auc import ROC_AUC, RocCurve
+import ignite.metrics.regression
+from ignite.metrics import average_precision, cohen_kappa, gpu_info, precision_recall_curve, roc_auc
+from ignite.metrics.average_precision import AveragePrecision
+from ignite.metrics.cohen_kappa import CohenKappa
+from ignite.metrics.gpu_info import GpuInfo
+from ignite.metrics.precision_recall_curve import PrecisionRecallCurve
+from ignite.metrics.roc_auc import ROC_AUC, RocCurve
diff --git a/ignite/contrib/metrics/average_precision.py b/ignite/contrib/metrics/average_precision.py
index 5aae0848ddf..41c02def34b 100644
--- a/ignite/contrib/metrics/average_precision.py
+++ b/ignite/contrib/metrics/average_precision.py
@@ -1,81 +1,22 @@
-from typing import Callable, Union
-
-import torch
-
-from ignite.metrics import EpochMetric
-
-
-def average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:
- from sklearn.metrics import average_precision_score
-
- y_true = y_targets.cpu().numpy()
- y_pred = y_preds.cpu().numpy()
- return average_precision_score(y_true, y_pred)
-
-
-class AveragePrecision(EpochMetric):
- """Computes Average Precision accumulating predictions and the ground-truth during an epoch
- and applying `sklearn.metrics.average_precision_score `_ .
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- check_compute_fn: Default False. If True, `average_precision_score
- `_ is run on the first batch of data to ensure there are
- no issues. User will be warned in case there are any issues computing the function.
- device: optional device specification for internal storage.
-
- Note:
- AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or
- confidence values. To apply an activation to y_pred, use output_transform as shown below:
-
- .. code-block:: python
-
- def activated_output_transform(output):
- y_pred, y = output
- y_pred = torch.softmax(y_pred, dim=1)
- return y_pred, y
- avg_precision = AveragePrecision(activated_output_transform)
-
- Examples:
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- y_pred = torch.tensor([[0.79, 0.21], [0.30, 0.70], [0.46, 0.54], [0.16, 0.84]])
- y_true = torch.tensor([[1, 1], [1, 1], [0, 1], [0, 1]])
-
- avg_precision = AveragePrecision()
- avg_precision.attach(default_evaluator, 'average_precision')
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['average_precision'])
-
- .. testoutput::
-
- 0.9166...
-
- """
-
- def __init__(
- self,
- output_transform: Callable = lambda x: x,
- check_compute_fn: bool = False,
- device: Union[str, torch.device] = torch.device("cpu"),
- ):
- try:
- from sklearn.metrics import average_precision_score # noqa: F401
- except ImportError:
- raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
-
- super(AveragePrecision, self).__init__(
- average_precision_compute_fn,
- output_transform=output_transform,
- check_compute_fn=check_compute_fn,
- device=device,
- )
+""" ``ignite.contrib.metrics.average_precision`` was moved to ``ignite.metrics.average_precision``.
+Note:
+ ``ignite.contrib.metrics.average_precision`` was moved to ``ignite.metrics.average_precision``.
+ Please refer to :mod:`~ignite.metrics.average_precision`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to /ignite/metrics/average_precision.py"
+ + (f" and will be removed in version {removed_in}" if removed_in else "")
+ + ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.average_precision import AveragePrecision
+
+__all__ = [
+ "AveragePrecision",
+]
+
+AveragePrecision = AveragePrecision
diff --git a/ignite/contrib/metrics/cohen_kappa.py b/ignite/contrib/metrics/cohen_kappa.py
index 942a394fb7e..0bccae50f77 100644
--- a/ignite/contrib/metrics/cohen_kappa.py
+++ b/ignite/contrib/metrics/cohen_kappa.py
@@ -1,86 +1,22 @@
-from typing import Callable, Optional, Union
-
-import torch
-
-from ignite.metrics import EpochMetric
-
-
-class CohenKappa(EpochMetric):
- """Compute different types of Cohen's Kappa: Non-Wieghted, Linear, Quadratic.
- Accumulating predictions and the ground-truth during an epoch and applying
- `sklearn.metrics.cohen_kappa_score `_ .
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- weights: a string is used to define the type of Cohen's Kappa whether Non-Weighted or Linear
- or Quadratic. Default, None.
- check_compute_fn: Default False. If True, `cohen_kappa_score
- `_
- is run on the first batch of data to ensure there are
- no issues. User will be warned in case there are any issues computing the function.
- device: optional device specification for internal storage.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in the format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
- to the metric to transform the output into the form expected by the metric.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = CohenKappa()
- metric.attach(default_evaluator, 'ck')
- y_true = torch.tensor([2, 0, 2, 2, 0, 1])
- y_pred = torch.tensor([0, 0, 2, 2, 0, 2])
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['ck'])
-
- .. testoutput::
-
- 0.4285...
-
- """
-
- def __init__(
- self,
- output_transform: Callable = lambda x: x,
- weights: Optional[str] = None,
- check_compute_fn: bool = False,
- device: Union[str, torch.device] = torch.device("cpu"),
- ):
- try:
- from sklearn.metrics import cohen_kappa_score # noqa: F401
- except ImportError:
- raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
- if weights not in (None, "linear", "quadratic"):
- raise ValueError("Kappa Weighting type must be None or linear or quadratic.")
-
- # initalize weights
- self.weights = weights
-
- self.cohen_kappa_compute = self.get_cohen_kappa_fn()
-
- super(CohenKappa, self).__init__(
- self.cohen_kappa_compute,
- output_transform=output_transform,
- check_compute_fn=check_compute_fn,
- device=device,
- )
-
- def get_cohen_kappa_fn(self) -> Callable[[torch.Tensor, torch.Tensor], float]:
- """Return a function computing Cohen Kappa from scikit-learn."""
- from sklearn.metrics import cohen_kappa_score
-
- def wrapper(y_targets: torch.Tensor, y_preds: torch.Tensor) -> float:
- y_true = y_targets.cpu().numpy()
- y_pred = y_preds.cpu().numpy()
- return cohen_kappa_score(y_true, y_pred, weights=self.weights)
-
- return wrapper
+""" ``ignite.contrib.metrics.cohen_kappa`` was moved to ``ignite.metrics.cohen_kappa``.
+Note:
+ ``ignite.contrib.metrics.cohen_kappa`` was moved to ``ignite.metrics.cohen_kappa``.
+ Please refer to :mod:`~ignite.metrics.cohen_kappa`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/cohen_kappa.py"
+ + (f" and will be removed in version {removed_in}" if removed_in else "")
+ + ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.cohen_kappa import CohenKappa
+
+__all__ = [
+ "CohenKappa",
+]
+
+CohenKappa = CohenKappa
diff --git a/ignite/contrib/metrics/gpu_info.py b/ignite/contrib/metrics/gpu_info.py
index 64d467f58fe..71c07093a9a 100644
--- a/ignite/contrib/metrics/gpu_info.py
+++ b/ignite/contrib/metrics/gpu_info.py
@@ -1,106 +1,22 @@
-# -*- coding: utf-8 -*-
-import warnings
-from typing import Any, Dict, List, Tuple, Union
-
-import torch
-
-from ignite.engine import Engine, EventEnum, Events
-from ignite.metrics import Metric
-
-
-class GpuInfo(Metric):
- """Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric
- on each iterations.
-
- .. Note ::
-
- In case if gpu utilization reports "N/A" on a given GPU, corresponding metric value is not set.
-
- Examples:
- .. code-block:: python
-
- # Default GPU measurements
- GpuInfo().attach(trainer, name='gpu') # metric names are 'gpu:X mem(%)', 'gpu:X util(%)'
-
- # Logging with TQDM
- ProgressBar(persist=True).attach(trainer, metric_names=['gpu:0 mem(%)', 'gpu:0 util(%)'])
- # Progress bar will looks like
- # Epoch [2/10]: [12/24] 50%|█████ , gpu:0 mem(%)=79, gpu:0 util(%)=59 [00:17<1:23]
+""" ``ignite.contrib.metrics.gpu_info`` was moved to ``ignite.metrics.gpu_info``.
+Note:
+ ``ignite.contrib.metrics.gpu_info`` was moved to ``ignite.metrics.gpu_info``.
+ Please refer to :mod:`~ignite.metrics.gpu_info`.
+"""
- # Logging with Tensorboard
- tb_logger.attach(trainer,
- log_handler=OutputHandler(tag="training", metric_names='all'),
- event_name=Events.ITERATION_COMPLETED)
- """
-
- def __init__(self) -> None:
- try:
- from pynvml.smi import nvidia_smi
- except ImportError:
- raise ModuleNotFoundError(
- "This contrib module requires pynvml to be installed. "
- "Please install it with command: \n pip install pynvml"
- )
- # Let's check available devices
- if not torch.cuda.is_available():
- raise RuntimeError("This contrib module requires available GPU")
-
- # Let it fail if no libnvidia drivers or NMVL library found
- self.nvsmi = nvidia_smi.getInstance()
- super(GpuInfo, self).__init__()
-
- def reset(self) -> None:
- pass
-
- def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- pass
-
- def compute(self) -> List[Dict[str, Any]]:
- data: Dict[str, List[Dict[str, Any]]] = self.nvsmi.DeviceQuery("memory.used, memory.total, utilization.gpu")
- if len(data) == 0 or ("gpu" not in data):
- warnings.warn("No GPU information available")
- return []
- return data["gpu"]
-
- def completed(self, engine: Engine, name: str) -> None:
- data = self.compute()
- if len(data) < 1:
- warnings.warn("No GPU information available")
- return
-
- for i, data_by_rank in enumerate(data):
- mem_name = f"{name}:{i} mem(%)"
-
- if "fb_memory_usage" not in data_by_rank:
- warnings.warn(f"No GPU memory usage information available in {data_by_rank}")
- continue
- mem_report = data_by_rank["fb_memory_usage"]
- if not ("used" in mem_report and "total" in mem_report):
- warnings.warn(
- "GPU memory usage information does not provide used/total "
- f"memory consumption information in {mem_report}"
- )
- continue
+import warnings
- engine.state.metrics[mem_name] = int(mem_report["used"] * 100.0 / mem_report["total"])
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/gpu_info.py"
+ + (f" and will be removed in version {removed_in}" if removed_in else "")
+ + ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.gpu_info import GpuInfo
- for i, data_by_rank in enumerate(data):
- util_name = f"{name}:{i} util(%)"
- if "utilization" not in data_by_rank:
- warnings.warn(f"No GPU utilization information available in {data_by_rank}")
- continue
- util_report = data_by_rank["utilization"]
- if not ("gpu_util" in util_report):
- warnings.warn(f"GPU utilization information does not provide 'gpu_util' information in {util_report}")
- continue
- try:
- engine.state.metrics[util_name] = int(util_report["gpu_util"])
- except ValueError:
- # Do not set GPU utilization information
- pass
+__all__ = [
+ "GpuInfo",
+]
- # TODO: see issue https://github.com/pytorch/ignite/issues/1405
- def attach( # type: ignore
- self, engine: Engine, name: str = "gpu", event_name: Union[str, EventEnum] = Events.ITERATION_COMPLETED
- ) -> None:
- engine.add_event_handler(event_name, self.completed, name)
+GpuInfo = GpuInfo
diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py
index 5021315904b..9ea7d4a56a3 100644
--- a/ignite/contrib/metrics/precision_recall_curve.py
+++ b/ignite/contrib/metrics/precision_recall_curve.py
@@ -1,120 +1,25 @@
-from typing import Any, Callable, cast, Tuple, Union
-
-import torch
-
-import ignite.distributed as idist
-from ignite.exceptions import NotComputableError
-from ignite.metrics import EpochMetric
-
-
-def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
- try:
- from sklearn.metrics import precision_recall_curve
- except ImportError:
- raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
-
- y_true = y_targets.cpu().numpy()
- y_pred = y_preds.cpu().numpy()
- return precision_recall_curve(y_true, y_pred)
-
-
-class PrecisionRecallCurve(EpochMetric):
- """Compute precision-recall pairs for different probability thresholds for binary classification task
- by accumulating predictions and the ground-truth during an epoch and applying
- `sklearn.metrics.precision_recall_curve `_ .
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- check_compute_fn: Default False. If True, `precision_recall_curve
- `_ is run on the first batch of data to ensure there are
- no issues. User will be warned in case there are any issues computing the function.
-
- Note:
- PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates
- or confidence values. To apply an activation to y_pred, use output_transform as shown below:
-
- .. code-block:: python
-
- def sigmoid_output_transform(output):
- y_pred, y = output
- y_pred = torch.sigmoid(y_pred)
- return y_pred, y
- avg_precision = PrecisionRecallCurve(sigmoid_output_transform)
-
- Examples:
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
- y_true = torch.tensor([0, 0, 1, 1])
- prec_recall_curve = PrecisionRecallCurve()
- prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')
- state = default_evaluator.run([[y_pred, y_true]])
-
- print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])
- print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])
- print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])
-
- .. testoutput::
-
- Precision [0.5, 0.6667, 1.0, 1.0, 1.0]
- Recall [1.0, 1.0, 1.0, 0.5, 0.0]
- Thresholds [0.0474, 0.5987, 0.7109, 0.9997]
-
- """
-
- def __init__(
- self,
- output_transform: Callable = lambda x: x,
- check_compute_fn: bool = False,
- device: Union[str, torch.device] = torch.device("cpu"),
- ) -> None:
- super(PrecisionRecallCurve, self).__init__(
- precision_recall_curve_compute_fn, # type: ignore[arg-type]
- output_transform=output_transform,
- check_compute_fn=check_compute_fn,
- device=device,
- )
-
- def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # type: ignore[override]
- if len(self._predictions) < 1 or len(self._targets) < 1:
- raise NotComputableError("PrecisionRecallCurve must have at least one example before it can be computed.")
-
- if self._result is None:
- _prediction_tensor = torch.cat(self._predictions, dim=0)
- _target_tensor = torch.cat(self._targets, dim=0)
-
- ws = idist.get_world_size()
- if ws > 1:
- # All gather across all processes
- _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
- _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
-
- if idist.get_rank() == 0:
- # Run compute_fn on zero rank only
- precision, recall, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
- precision = torch.tensor(precision, device=_prediction_tensor.device)
- recall = torch.tensor(recall, device=_prediction_tensor.device)
- # thresholds can have negative strides, not compatible with torch tensors
- # https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2
- thresholds = torch.tensor(thresholds.copy(), device=_prediction_tensor.device)
- else:
- precision, recall, thresholds = None, None, None
-
- if ws > 1:
- # broadcast result to all processes
- precision = idist.broadcast(precision, src=0, safe_mode=True)
- recall = idist.broadcast(recall, src=0, safe_mode=True)
- thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
-
- self._result = (precision, recall, thresholds) # type: ignore[assignment]
-
- return cast(Tuple[torch.Tensor, torch.Tensor, torch.Tensor], self._result)
+""" ``ignite.contrib.metrics.precision_recall_curve`` was moved to ``ignite.metrics.precision_recall_curve``.
+Note:
+ ``ignite.contrib.metrics.precision_recall_curve`` was moved to ``ignite.metrics.precision_recall_curve``.
+ Please refer to :mod:`~ignite.metrics.precision_recall_curve`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/precision_recall_curve.py"
+ + (f" and will be removed in version {removed_in}" if removed_in else "")
+ + ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.precision_recall_curve import precision_recall_curve_compute_fn, PrecisionRecallCurve
+
+__all__ = [
+ "PrecisionRecallCurve",
+ "precision_recall_curve_compute_fn",
+]
+
+
+PrecisionRecallCurve = PrecisionRecallCurve
+precision_recall_curve_compute_fn = precision_recall_curve_compute_fn
diff --git a/ignite/contrib/metrics/regression/__init__.py b/ignite/contrib/metrics/regression/__init__.py
index fbee310e3b2..3f0c83214b4 100644
--- a/ignite/contrib/metrics/regression/__init__.py
+++ b/ignite/contrib/metrics/regression/__init__.py
@@ -1,15 +1,66 @@
-from ignite.contrib.metrics.regression.canberra_metric import CanberraMetric
-from ignite.contrib.metrics.regression.fractional_absolute_error import FractionalAbsoluteError
-from ignite.contrib.metrics.regression.fractional_bias import FractionalBias
-from ignite.contrib.metrics.regression.geometric_mean_absolute_error import GeometricMeanAbsoluteError
-from ignite.contrib.metrics.regression.geometric_mean_relative_absolute_error import GeometricMeanRelativeAbsoluteError
-from ignite.contrib.metrics.regression.manhattan_distance import ManhattanDistance
-from ignite.contrib.metrics.regression.maximum_absolute_error import MaximumAbsoluteError
-from ignite.contrib.metrics.regression.mean_absolute_relative_error import MeanAbsoluteRelativeError
-from ignite.contrib.metrics.regression.mean_error import MeanError
-from ignite.contrib.metrics.regression.mean_normalized_bias import MeanNormalizedBias
-from ignite.contrib.metrics.regression.median_absolute_error import MedianAbsoluteError
-from ignite.contrib.metrics.regression.median_absolute_percentage_error import MedianAbsolutePercentageError
-from ignite.contrib.metrics.regression.median_relative_absolute_error import MedianRelativeAbsoluteError
-from ignite.contrib.metrics.regression.r2_score import R2Score
-from ignite.contrib.metrics.regression.wave_hedges_distance import WaveHedgesDistance
+from ignite.metrics.regression import (
+ canberra_metric,
+ fractional_absolute_error,
+ fractional_bias,
+ geometric_mean_absolute_error,
+ geometric_mean_relative_absolute_error,
+ manhattan_distance,
+ maximum_absolute_error,
+ mean_absolute_relative_error,
+ mean_error,
+ mean_normalized_bias,
+ median_absolute_error,
+ median_absolute_percentage_error,
+ median_relative_absolute_error,
+ r2_score,
+ wave_hedges_distance,
+)
+from ignite.metrics.regression.canberra_metric import CanberraMetric
+from ignite.metrics.regression.fractional_absolute_error import FractionalAbsoluteError
+from ignite.metrics.regression.fractional_bias import FractionalBias
+from ignite.metrics.regression.geometric_mean_absolute_error import GeometricMeanAbsoluteError
+from ignite.metrics.regression.geometric_mean_relative_absolute_error import GeometricMeanRelativeAbsoluteError
+from ignite.metrics.regression.manhattan_distance import ManhattanDistance
+from ignite.metrics.regression.maximum_absolute_error import MaximumAbsoluteError
+from ignite.metrics.regression.mean_absolute_relative_error import MeanAbsoluteRelativeError
+from ignite.metrics.regression.mean_error import MeanError
+from ignite.metrics.regression.mean_normalized_bias import MeanNormalizedBias
+from ignite.metrics.regression.median_absolute_error import MedianAbsoluteError
+from ignite.metrics.regression.median_absolute_percentage_error import MedianAbsolutePercentageError
+from ignite.metrics.regression.median_relative_absolute_error import MedianRelativeAbsoluteError
+from ignite.metrics.regression.r2_score import R2Score
+from ignite.metrics.regression.wave_hedges_distance import WaveHedgesDistance
+
+
+__all__ = [
+ "CanberraMetric",
+ "FractionalAbsoluteError",
+ "FractionalBias",
+ "GeometricMeanAbsoluteError",
+ "GeometricMeanRelativeAbsoluteError",
+ "ManhattanDistance",
+ "MaximumAbsoluteError",
+ "MeanAbsoluteRelativeError",
+ "MeanError",
+ "MeanNormalizedBias",
+ "MedianAbsoluteError",
+ "MedianAbsolutePercentageError",
+ "MedianRelativeAbsoluteError",
+ "R2Score",
+ "WaveHedgesDistance",
+ "canberra_metric",
+ "fractional_absolute_error",
+ "fractional_bias",
+ "geometric_mean_absolute_error",
+ "geometric_mean_relative_absolute_error",
+ "manhattan_distance",
+ "maximum_absolute_error",
+ "mean_absolute_relative_error",
+ "mean_error",
+ "mean_normalized_bias",
+ "median_absolute_error",
+ "median_absolute_percentage_error",
+ "median_relative_absolute_error",
+ "r2_score",
+ "wave_hedges_distance",
+]
diff --git a/ignite/contrib/metrics/regression/canberra_metric.py b/ignite/contrib/metrics/regression/canberra_metric.py
index 19e5ddf73ca..8fe5e8a5365 100644
--- a/ignite/contrib/metrics/regression/canberra_metric.py
+++ b/ignite/contrib/metrics/regression/canberra_metric.py
@@ -1,80 +1,21 @@
-from typing import Tuple
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
-
-
-class CanberraMetric(_BaseRegression):
- r"""Calculates the Canberra Metric.
-
- .. math::
- \text{CM} = \sum_{j=1}^n\frac{|A_j - P_j|}{|A_j| + |P_j|}
-
- where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
-
- .. _scikit-learn distance metrics:
- https://scikit-learn.org/stable/modules/generated/sklearn.metrics.DistanceMetric.html
-
- Parameters are inherited from ``Metric.__init__``.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- .. _`Botchkarev 2018`:
- https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = CanberraMetric()
- metric.attach(default_evaluator, 'canberra')
- y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
- y_true = y_pred * 1.5
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['canberra'])
-
- .. testoutput::
-
- 0.8000...
-
- .. versionchanged:: 0.4.3
-
- - Fixed implementation: ``abs`` in denominator.
- - Works with DDP.
- """
-
- _state_dict_all_req_keys = ("_sum_of_errors",)
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._sum_of_errors = torch.tensor(0.0, device=self._device)
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output[0].detach(), output[1].detach()
- errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y) + 1e-15)
- self._sum_of_errors += torch.sum(errors).to(self._device)
-
- @sync_all_reduce("_sum_of_errors")
- def compute(self) -> float:
- return self._sum_of_errors.item()
+""" ``ignite.contrib.metrics.regression.canberra_metric`` was moved to ``ignite.metrics.regression.canberra_metric``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.canberra_metric`` was moved to ``ignite.metrics.regression.canberra_metric``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.canberra_metric`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/canberra_metric.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.canberra_metric import CanberraMetric
+
+__all__ = ["CanberraMetric"]
+
+CanberraMetric = CanberraMetric
diff --git a/ignite/contrib/metrics/regression/fractional_absolute_error.py b/ignite/contrib/metrics/regression/fractional_absolute_error.py
index 9a141341b49..0d373d0bcf9 100644
--- a/ignite/contrib/metrics/regression/fractional_absolute_error.py
+++ b/ignite/contrib/metrics/regression/fractional_absolute_error.py
@@ -1,81 +1,21 @@
-from typing import Tuple
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.exceptions import NotComputableError
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
-
-
-class FractionalAbsoluteError(_BaseRegression):
- r"""Calculates the Fractional Absolute Error.
-
- .. math::
- \text{FAE} = \frac{1}{n}\sum_{j=1}^n\frac{2 |A_j - P_j|}{|A_j| + |P_j|}
-
- where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
-
- __ https://arxiv.org/abs/1809.03006
-
- Parameters are inherited from ``Metric.__init__``.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = FractionalAbsoluteError()
- metric.attach(default_evaluator, 'fractional_abs_error')
- y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
- y_true = y_pred * 0.8
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['fractional_abs_error'])
-
- .. testoutput::
-
- 0.2222...
-
- .. versionchanged:: 0.4.5
- - Works with DDP.
- """
-
- _state_dict_all_req_keys = ("_sum_of_errors", "_num_examples")
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._sum_of_errors = torch.tensor(0.0, device=self._device)
- self._num_examples = 0
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output[0].detach(), output[1].detach()
- errors = 2 * torch.abs(y.view_as(y_pred) - y_pred) / (torch.abs(y_pred) + torch.abs(y.view_as(y_pred)))
- self._sum_of_errors += torch.sum(errors).to(self._device)
- self._num_examples += y.shape[0]
-
- @sync_all_reduce("_num_examples", "_sum_of_errors")
- def compute(self) -> float:
- if self._num_examples == 0:
- raise NotComputableError(
- "FractionalAbsoluteError must have at least one example before it can be computed."
- )
- return self._sum_of_errors.item() / self._num_examples
+""" ``ignite.contrib.metrics.regression.fractional_absolute_error`` was moved to ``ignite.metrics.regression.fractional_absolute_error``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.fractional_absolute_error`` was moved to ``ignite.metrics.regression.fractional_absolute_error``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.fractional_absolute_error`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/fractional_absolute_error.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.fractional_absolute_error import FractionalAbsoluteError
+
+__all__ = ["FractionalAbsoluteError"]
+
+FractionalAbsoluteError = FractionalAbsoluteError
diff --git a/ignite/contrib/metrics/regression/fractional_bias.py b/ignite/contrib/metrics/regression/fractional_bias.py
index 75684519a75..f7bdbababa4 100644
--- a/ignite/contrib/metrics/regression/fractional_bias.py
+++ b/ignite/contrib/metrics/regression/fractional_bias.py
@@ -1,79 +1,21 @@
-from typing import Tuple
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.exceptions import NotComputableError
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
-
-
-class FractionalBias(_BaseRegression):
- r"""Calculates the Fractional Bias.
-
- .. math::
- \text{FB} = \frac{1}{n}\sum_{j=1}^n\frac{2 (A_j - P_j)}{A_j + P_j}
-
- where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
-
- __ https://arxiv.org/abs/1809.03006
-
- Parameters are inherited from ``Metric.__init__``.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = FractionalBias()
- metric.attach(default_evaluator, 'fractional_bias')
- y_pred = torch.tensor([[3.8], [9.9], [5.4], [2.1]])
- y_true = y_pred * 1.5
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['fractional_bias'])
-
- .. testoutput::
-
- 0.4000...
-
- .. versionchanged:: 0.4.5
- - Works with DDP.
- """
-
- _state_dict_all_req_keys = ("_sum_of_errors", "_num_examples")
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._sum_of_errors = torch.tensor(0.0, dtype=torch.double, device=self._device)
- self._num_examples = 0
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output[0].detach(), output[1].detach()
- errors = 2 * (y.view_as(y_pred) - y_pred) / (y_pred + y.view_as(y_pred) + 1e-30)
- self._sum_of_errors += torch.sum(errors).to(self._device)
- self._num_examples += y.shape[0]
-
- @sync_all_reduce("_sum_of_errors", "_num_examples")
- def compute(self) -> float:
- if self._num_examples == 0:
- raise NotComputableError("FractionalBias must have at least one example before it can be computed.")
- return self._sum_of_errors.item() / self._num_examples
+""" ``ignite.contrib.metrics.regression.fractional_bias`` was moved to ``ignite.metrics.regression.fractional_bias``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.fractional_bias`` was moved to ``ignite.metrics.regression.fractional_bias``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.fractional_bias`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/fractional_bias.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.fractional_bias import FractionalBias
+
+__all__ = ["FractionalBias"]
+
+FractionalBias = FractionalBias
diff --git a/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py b/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py
index 79d8000fe17..5b6b396431c 100644
--- a/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py
+++ b/ignite/contrib/metrics/regression/geometric_mean_absolute_error.py
@@ -1,81 +1,21 @@
-from typing import Tuple
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.exceptions import NotComputableError
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
-
-
-class GeometricMeanAbsoluteError(_BaseRegression):
- r"""Calculates the Geometric Mean Absolute Error.
-
- .. math::
- \text{GMAE} = \exp(\frac{1}{n}\sum_{j=1}^n\ln(|A_j - P_j|))
-
- where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
-
- __ https://arxiv.org/abs/1809.03006
-
- Parameters are inherited from ``Metric.__init__``.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = GeometricMeanAbsoluteError()
- metric.attach(default_evaluator, 'gmae')
- y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
- y_true = y_pred * 1.5
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['gmae'])
-
- .. testoutput::
-
- 2.2723...
-
- .. versionchanged:: 0.4.5
- - Works with DDP.
- """
-
- _state_dict_all_req_keys = ("_sum_of_errors", "_num_examples")
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._sum_of_errors = torch.tensor(0.0, device=self._device)
- self._num_examples = 0
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output[0].detach(), output[1].detach()
- errors = torch.log(torch.abs(y.view_as(y_pred) - y_pred))
- self._sum_of_errors += torch.sum(errors).to(self._device)
- self._num_examples += y.shape[0]
-
- @sync_all_reduce("_sum_of_errors", "_num_examples")
- def compute(self) -> float:
- if self._num_examples == 0:
- raise NotComputableError(
- "GeometricMeanAbsoluteError must have at least one example before it can be computed."
- )
- return torch.exp((self._sum_of_errors) / self._num_examples).item()
+""" ``ignite.contrib.metrics.regression.geometric_mean_absolute_error`` was moved to ``ignite.metrics.regression.geometric_mean_absolute_error``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.geometric_mean_absolute_error`` was moved to ``ignite.metrics.regression.geometric_mean_absolute_error``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.geometric_mean_absolute_error`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/geometric_mean_absolute_error.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.geometric_mean_absolute_error import GeometricMeanAbsoluteError
+
+__all__ = ["GeometricMeanAbsoluteError"]
+
+GeometricMeanAbsoluteError = GeometricMeanAbsoluteError
diff --git a/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py b/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py
index 303e24e9d95..15e4a25d13c 100644
--- a/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py
+++ b/ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py
@@ -1,108 +1,21 @@
-from typing import cast, List, Tuple
-
-import torch
-
-import ignite.distributed as idist
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.exceptions import NotComputableError
-from ignite.metrics.metric import reinit__is_reduced
-
-
-class GeometricMeanRelativeAbsoluteError(_BaseRegression):
- r"""Calculates the Geometric Mean Relative Absolute Error.
-
- .. math::
- \text{GMRAE} = \exp(\frac{1}{n}\sum_{j=1}^n \ln\frac{|A_j - P_j|}{|A_j - \bar{A}|})
-
- where :math:`A_j` is the ground truth, :math:`P_j` is the predicted value
- and :math: `bar{A}` is the mean of the ground truth.
-
- More details can be found in `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
-
- __ https://arxiv.org/abs/1809.03006
-
- Parameters are inherited from ``Metric.__init__``.
-
- .. warning::
-
- Current implementation of GMRAE stores all input data (output and target)
- as tensors before computing the metric.
- This can potentially lead to a memory error if the input data is larger than available RAM.
-
- In distributed configuration, all stored data (output and target) is mutually collected across all processes
- using all gather collective operation. This can potentially lead to a memory error.
-
- Compute method compute the metric on zero rank process only and final result is broadcasted to
- all processes.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = GeometricMeanRelativeAbsoluteError()
- metric.attach(default_evaluator, 'gmare')
- y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
- y_pred = y_true * 0.75
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['gmare'])
-
- .. testoutput::
-
- 0.0...
- """
-
- _state_dict_all_req_keys = ("_predictions", "_targets")
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._predictions: List[torch.Tensor] = []
- self._targets: List[torch.Tensor] = []
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output[0].detach(), output[1].detach()
-
- y_pred = y_pred.clone().to(self._device)
- y = y.clone().to(self._device)
-
- self._predictions.append(y_pred)
- self._targets.append(y)
-
- def compute(self) -> float:
- if len(self._predictions) < 1 or len(self._targets) < 1:
- raise NotComputableError(
- "GeometricMeanRelativeAbsoluteError must have at least one example before it can be computed."
- )
-
- _prediction_tensor = torch.cat(self._predictions, dim=0)
- _target_tensor = torch.cat(self._targets, dim=0)
-
- # All gather across all processes
- _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
- _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
-
- result = torch.exp(
- torch.log(
- torch.abs(_target_tensor - _prediction_tensor) / torch.abs(_target_tensor - _target_tensor.mean())
- ).mean()
- ).item()
-
- return result
+""" ``ignite.contrib.metrics.regression.geometric_mean_relative_absolute_error`` was moved to ``ignite.metrics.regression.geometric_mean_relative_absolute_error``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.geometric_mean_relative_absolute_error`` was moved to ``ignite.metrics.regression.geometric_mean_relative_absolute_error``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.geometric_mean_relative_absolute_error`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/geometric_mean_relative_absolute_error.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.geometric_mean_relative_absolute_error import GeometricMeanRelativeAbsoluteError
+
+__all__ = ["GeometricMeanRelativeAbsoluteError"]
+
+GeometricMeanRelativeAbsoluteError = GeometricMeanRelativeAbsoluteError
diff --git a/ignite/contrib/metrics/regression/manhattan_distance.py b/ignite/contrib/metrics/regression/manhattan_distance.py
index b9971cc03d2..48c90046ab4 100644
--- a/ignite/contrib/metrics/regression/manhattan_distance.py
+++ b/ignite/contrib/metrics/regression/manhattan_distance.py
@@ -1,76 +1,21 @@
-from typing import Tuple
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
-
-
-class ManhattanDistance(_BaseRegression):
- r"""Calculates the Manhattan Distance.
-
- .. math::
- \text{MD} = \sum_{j=1}^n |A_j - P_j|
-
- where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in `scikit-learn distance metrics`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
-
- __ https://scikit-learn.org/stable/modules/generated/sklearn.metrics.DistanceMetric.html
-
- Parameters are inherited from ``Metric.__init__``.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = ManhattanDistance()
- metric.attach(default_evaluator, 'manhattan')
- y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
- y_pred = y_true * 0.75
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['manhattan'])
-
- .. testoutput::
-
- 3.75...
-
- .. versionchanged:: 0.4.3
-
- - Fixed sklearn compatibility.
- - Workes with DDP.
- """
-
- _state_dict_all_req_keys = ("_sum_of_errors",)
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._sum_of_errors = torch.tensor(0.0, device=self._device)
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output
- errors = torch.abs(y - y_pred)
- self._sum_of_errors += torch.sum(errors).to(self._device)
-
- @sync_all_reduce("_sum_of_errors")
- def compute(self) -> float:
- return self._sum_of_errors.item()
+""" ``ignite.contrib.metrics.regression.manhattan_distance`` was moved to ``ignite.metrics.regression.manhattan_distance``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.manhattan_distance`` was moved to ``ignite.metrics.regression.manhattan_distance``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.manhattan_distance`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/manhattan_distance.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.manhattan_distance import ManhattanDistance
+
+__all__ = ["ManhattanDistance"]
+
+ManhattanDistance = ManhattanDistance
diff --git a/ignite/contrib/metrics/regression/maximum_absolute_error.py b/ignite/contrib/metrics/regression/maximum_absolute_error.py
index 318e5c37361..f391924bfd1 100644
--- a/ignite/contrib/metrics/regression/maximum_absolute_error.py
+++ b/ignite/contrib/metrics/regression/maximum_absolute_error.py
@@ -1,78 +1,21 @@
-from typing import Tuple
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.exceptions import NotComputableError
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
-
-
-class MaximumAbsoluteError(_BaseRegression):
- r"""Calculates the Maximum Absolute Error.
-
- .. math::
- \text{MaxAE} = \max_{j=1,n} \left( \lvert A_j-P_j \rvert \right)
-
- where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
-
- __ https://arxiv.org/abs/1809.03006
-
- Parameters are inherited from ``Metric.__init__``.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = MaximumAbsoluteError()
- metric.attach(default_evaluator, 'mae')
- y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
- y_pred = y_true * 0.75
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['mae'])
-
- .. testoutput::
-
- 1.25...
-
- .. versionchanged:: 0.4.5
- - Works with DDP.
- """
-
- _state_dict_all_req_keys = ("_max_of_absolute_errors",)
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._max_of_absolute_errors: float = -1
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output[0].detach(), output[1].detach()
- mae = torch.abs(y_pred - y.view_as(y_pred)).max().item()
- if self._max_of_absolute_errors < mae:
- self._max_of_absolute_errors = mae
-
- @sync_all_reduce("_max_of_absolute_errors:MAX")
- def compute(self) -> float:
- if self._max_of_absolute_errors < 0:
- raise NotComputableError("MaximumAbsoluteError must have at least one example before it can be computed.")
- return self._max_of_absolute_errors
+""" ``ignite.contrib.metrics.regression.maximum_absolute_error`` was moved to ``ignite.metrics.regression.maximum_absolute_error``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.maximum_absolute_error`` was moved to ``ignite.metrics.regression.maximum_absolute_error``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.maximum_absolute_error`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/maximum_absolute_error.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.maximum_absolute_error import MaximumAbsoluteError
+
+__all__ = ["MaximumAbsoluteError"]
+
+MaximumAbsoluteError = MaximumAbsoluteError
diff --git a/ignite/contrib/metrics/regression/mean_absolute_relative_error.py b/ignite/contrib/metrics/regression/mean_absolute_relative_error.py
index 86542d9c5cd..964bb3c4eb1 100644
--- a/ignite/contrib/metrics/regression/mean_absolute_relative_error.py
+++ b/ignite/contrib/metrics/regression/mean_absolute_relative_error.py
@@ -1,83 +1,21 @@
-from typing import Tuple
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.exceptions import NotComputableError
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
-
-
-class MeanAbsoluteRelativeError(_BaseRegression):
- r"""Calculate Mean Absolute Relative Error (MARE), also known as Mean Absolute Percentage Error (MAPE).
-
- .. math::
- \text{MARE} = \frac{1}{n}\sum_{j=1}^n\frac{\left|A_j-P_j\right|}{\left|A_j\right|}
-
- where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in the reference `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
-
- __ https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf
-
- Parameters are inherited from ``Metric.__init__``.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = MeanAbsoluteRelativeError()
- metric.attach(default_evaluator, 'mare')
- y_true = torch.tensor([1., 2., 3., 4., 5.])
- y_pred = y_true * 0.75
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['mare'])
-
- .. testoutput::
-
- 0.25...
-
- .. versionchanged:: 0.4.5
- - Works with DDP.
- """
-
- _state_dict_all_req_keys = ("_sum_of_absolute_relative_errors", "_num_samples")
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._sum_of_absolute_relative_errors = torch.tensor(0.0, device=self._device)
- self._num_samples = 0
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output[0].detach(), output[1].detach()
- if (y == 0).any():
- raise NotComputableError("The ground truth has 0.")
- absolute_error = torch.abs(y_pred - y.view_as(y_pred)) / torch.abs(y.view_as(y_pred))
- self._sum_of_absolute_relative_errors += torch.sum(absolute_error).to(self._device)
- self._num_samples += y.size()[0]
-
- @sync_all_reduce("_sum_of_absolute_relative_errors", "_num_samples")
- def compute(self) -> float:
- if self._num_samples == 0:
- raise NotComputableError(
- "MeanAbsoluteRelativeError must have at least one sample before it can be computed."
- )
- return self._sum_of_absolute_relative_errors.item() / self._num_samples
+""" ``ignite.contrib.metrics.regression.mean_absolute_relative_error`` was moved to ``ignite.metrics.regression.mean_absolute_relative_error``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.mean_absolute_relative_error`` was moved to ``ignite.metrics.regression.mean_absolute_relative_error``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.mean_absolute_relative_error`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/mean_absolute_relative_error.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.mean_absolute_relative_error import MeanAbsoluteRelativeError
+
+__all__ = ["MeanAbsoluteRelativeError"]
+
+MeanAbsoluteRelativeError = MeanAbsoluteRelativeError
diff --git a/ignite/contrib/metrics/regression/mean_error.py b/ignite/contrib/metrics/regression/mean_error.py
index 142133c7ec4..45b239fbf82 100644
--- a/ignite/contrib/metrics/regression/mean_error.py
+++ b/ignite/contrib/metrics/regression/mean_error.py
@@ -1,76 +1,21 @@
-from typing import Tuple
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.exceptions import NotComputableError
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
-
-
-class MeanError(_BaseRegression):
- r"""Calculates the Mean Error.
-
- .. math::
- \text{ME} = \frac{1}{n}\sum_{j=1}^n (A_j - P_j)
-
- where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in the reference `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
-
- __ https://arxiv.org/abs/1809.03006
-
- Parameters are inherited from ``Metric.__init__``.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = MeanError()
- metric.attach(default_evaluator, 'me')
- y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
- y_pred = y_true * 0.75
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['me'])
-
- .. testoutput::
-
- 0.625...
- """
-
- _state_dict_all_req_keys = ("_sum_of_errors", "_num_examples")
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._sum_of_errors = torch.tensor(0.0, device=self._device)
- self._num_examples = 0
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output[0].detach(), output[1].detach()
- errors = y.view_as(y_pred) - y_pred
- self._sum_of_errors += torch.sum(errors).item()
- self._num_examples += y.shape[0]
-
- @sync_all_reduce("_sum_of_errors", "_num_examples")
- def compute(self) -> float:
- if self._num_examples == 0:
- raise NotComputableError("MeanError must have at least one example before it can be computed.")
- return self._sum_of_errors.item() / self._num_examples
+""" ``ignite.contrib.metrics.regression.mean_error`` was moved to ``ignite.metrics.regression.mean_error``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.mean_error`` was moved to ``ignite.metrics.regression.mean_error``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.mean_error`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/mean_error.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.mean_error import MeanError
+
+__all__ = ["MeanError"]
+
+MeanError = MeanError
diff --git a/ignite/contrib/metrics/regression/mean_normalized_bias.py b/ignite/contrib/metrics/regression/mean_normalized_bias.py
index 35bfd7f73b1..b44fa7043e1 100644
--- a/ignite/contrib/metrics/regression/mean_normalized_bias.py
+++ b/ignite/contrib/metrics/regression/mean_normalized_bias.py
@@ -1,83 +1,21 @@
-from typing import Tuple
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.exceptions import NotComputableError
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
-
-
-class MeanNormalizedBias(_BaseRegression):
- r"""Calculates the Mean Normalized Bias.
-
- .. math::
- \text{MNB} = \frac{1}{n}\sum_{j=1}^n\frac{A_j - P_j}{A_j}
-
- where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in the reference `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
-
- __ https://arxiv.org/abs/1809.03006
-
- Parameters are inherited from ``Metric.__init__``.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = MeanNormalizedBias()
- metric.attach(default_evaluator, 'mnb')
- y_true = torch.tensor([1., 2., 3., 4., 5.])
- y_pred = y_true * 0.75
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['mnb'])
-
- .. testoutput::
-
- 0.25...
-
- .. versionchanged:: 0.4.5
- - Works with DDP.
- """
-
- _state_dict_all_req_keys = ("_sum_of_errors", "_num_examples")
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._sum_of_errors = torch.tensor(0.0, device=self._device)
- self._num_examples = 0
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output[0].detach(), output[1].detach()
-
- if (y == 0).any():
- raise NotComputableError("The ground truth has 0.")
-
- errors = (y.view_as(y_pred) - y_pred) / y
- self._sum_of_errors += torch.sum(errors).to(self._device)
- self._num_examples += y.shape[0]
-
- @sync_all_reduce("_sum_of_errors", "_num_examples")
- def compute(self) -> float:
- if self._num_examples == 0:
- raise NotComputableError("MeanNormalizedBias must have at least one example before it can be computed.")
- return self._sum_of_errors.item() / self._num_examples
+""" ``ignite.contrib.metrics.regression.mean_normalized_bias`` was moved to ``ignite.metrics.regression.mean_normalized_bias``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.mean_normalized_bias`` was moved to ``ignite.metrics.regression.mean_normalized_bias``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.mean_normalized_bias`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/mean_normalized_bias.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.mean_normalized_bias import MeanNormalizedBias
+
+__all__ = ["MeanNormalizedBias"]
+
+MeanNormalizedBias = MeanNormalizedBias
diff --git a/ignite/contrib/metrics/regression/median_absolute_error.py b/ignite/contrib/metrics/regression/median_absolute_error.py
index d7f376a323b..7cabcdbf162 100644
--- a/ignite/contrib/metrics/regression/median_absolute_error.py
+++ b/ignite/contrib/metrics/regression/median_absolute_error.py
@@ -1,72 +1,21 @@
-from typing import Callable, Union
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _torch_median
-
-from ignite.metrics import EpochMetric
-
-
-def median_absolute_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
- e = torch.abs(y.view_as(y_pred) - y_pred)
- return _torch_median(e)
-
-
-class MedianAbsoluteError(EpochMetric):
- r"""Calculates the Median Absolute Error.
-
- .. math::
- \text{MdAE} = \text{MD}_{j=1,n} \left( |A_j - P_j| \right)
-
- where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
-
- .. warning::
-
- Current implementation stores all input data (output and target) in as tensors before computing a metric.
- This can potentially lead to a memory error if the input data is larger than available RAM.
-
-
- __ https://arxiv.org/abs/1809.03006
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: optional device specification for internal storage.
-
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = MedianAbsoluteError()
- metric.attach(default_evaluator, 'mae')
- y_true = torch.tensor([0, 1, 2, 3, 4, 5])
- y_pred = y_true * 0.75
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['mae'])
-
- .. testoutput::
-
- 0.625
- """
-
- def __init__(
- self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
- ):
- super(MedianAbsoluteError, self).__init__(
- median_absolute_error_compute_fn, output_transform=output_transform, device=device
- )
+""" ``ignite.contrib.metrics.regression.median_absolute_error`` was moved to ``ignite.metrics.regression.median_absolute_error``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.median_absolute_error`` was moved to ``ignite.metrics.regression.median_absolute_error``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.median_absolute_error`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/median_absolute_error.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.median_absolute_error import MedianAbsoluteError
+
+__all__ = ["MedianAbsoluteError"]
+
+MedianAbsoluteError = MedianAbsoluteError
diff --git a/ignite/contrib/metrics/regression/median_absolute_percentage_error.py b/ignite/contrib/metrics/regression/median_absolute_percentage_error.py
index 0d602ba9c68..76ec688d111 100644
--- a/ignite/contrib/metrics/regression/median_absolute_percentage_error.py
+++ b/ignite/contrib/metrics/regression/median_absolute_percentage_error.py
@@ -1,70 +1,21 @@
-from typing import Callable, Union
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _torch_median
-
-from ignite.metrics import EpochMetric
-
-
-def median_absolute_percentage_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
- e = torch.abs(y.view_as(y_pred) - y_pred) / torch.abs(y.view_as(y_pred))
- return 100.0 * _torch_median(e)
-
-
-class MedianAbsolutePercentageError(EpochMetric):
- r"""Calculates the Median Absolute Percentage Error.
-
- .. math::
- \text{MdAPE} = 100 \cdot \text{MD}_{j=1,n} \left( \frac{|A_j - P_j|}{|A_j|} \right)
-
- where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
-
- .. warning::
-
- Current implementation stores all input data (output and target) in as tensors before computing a metric.
- This can potentially lead to a memory error if the input data is larger than available RAM.
-
- __ https://arxiv.org/abs/1809.03006
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: optional device specification for internal storage.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = MedianAbsolutePercentageError()
- metric.attach(default_evaluator, 'mape')
- y_true = torch.tensor([1, 2, 3, 4, 5])
- y_pred = y_true * 0.75
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['mape'])
-
- .. testoutput::
-
- 25.0...
- """
-
- def __init__(
- self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
- ):
- super(MedianAbsolutePercentageError, self).__init__(
- median_absolute_percentage_error_compute_fn, output_transform=output_transform, device=device
- )
+""" ``ignite.contrib.metrics.regression.median_absolute_percentage_error`` was moved to ``ignite.metrics.regression.median_absolute_percentage_error``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.median_absolute_percentage_error`` was moved to ``ignite.metrics.regression.median_absolute_percentage_error``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.median_absolute_percentage_error`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/median_absolute_percentage_error.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.median_absolute_percentage_error import MedianAbsolutePercentageError
+
+__all__ = ["MedianAbsolutePercentageError"]
+
+MedianAbsolutePercentageError = MedianAbsolutePercentageError
diff --git a/ignite/contrib/metrics/regression/median_relative_absolute_error.py b/ignite/contrib/metrics/regression/median_relative_absolute_error.py
index 13bf5d23819..80f53b7d776 100644
--- a/ignite/contrib/metrics/regression/median_relative_absolute_error.py
+++ b/ignite/contrib/metrics/regression/median_relative_absolute_error.py
@@ -1,70 +1,21 @@
-from typing import Callable, Union
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _torch_median
-
-from ignite.metrics import EpochMetric
-
-
-def median_relative_absolute_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
- e = torch.abs(y.view_as(y_pred) - y_pred) / torch.abs(y.view_as(y_pred) - torch.mean(y))
- return _torch_median(e)
-
-
-class MedianRelativeAbsoluteError(EpochMetric):
- r"""Calculates the Median Relative Absolute Error.
-
- .. math::
- \text{MdRAE} = \text{MD}_{j=1,n} \left( \frac{|A_j - P_j|}{|A_j - \bar{A}|} \right)
-
- where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
-
- .. warning::
-
- Current implementation stores all input data (output and target) in as tensors before computing a metric.
- This can potentially lead to a memory error if the input data is larger than available RAM.
-
- __ https://arxiv.org/abs/1809.03006
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: optional device specification for internal storage.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = MedianRelativeAbsoluteError()
- metric.attach(default_evaluator, 'mrae')
- y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
- y_pred = y_true * 0.75
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['mrae'])
-
- .. testoutput::
-
- 0.5...
- """
-
- def __init__(
- self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
- ):
- super(MedianRelativeAbsoluteError, self).__init__(
- median_relative_absolute_error_compute_fn, output_transform=output_transform, device=device
- )
+""" ``ignite.contrib.metrics.regression.median_relative_absolute_error`` was moved to ``ignite.metrics.regression.median_relative_absolute_error``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.median_relative_absolute_error`` was moved to ``ignite.metrics.regression.median_relative_absolute_error``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.median_relative_absolute_error`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/median_relative_absolute_error.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.median_relative_absolute_error import MedianRelativeAbsoluteError
+
+__all__ = ["MedianRelativeAbsoluteError"]
+
+MedianRelativeAbsoluteError = MedianRelativeAbsoluteError
diff --git a/ignite/contrib/metrics/regression/r2_score.py b/ignite/contrib/metrics/regression/r2_score.py
index f2baf27e801..565cae0d937 100644
--- a/ignite/contrib/metrics/regression/r2_score.py
+++ b/ignite/contrib/metrics/regression/r2_score.py
@@ -1,81 +1,21 @@
-from typing import Tuple
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.exceptions import NotComputableError
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
-
-
-class R2Score(_BaseRegression):
- r"""Calculates the R-Squared, the
- `coefficient of determination `_.
-
- .. math::
- R^2 = 1 - \frac{\sum_{j=1}^n(A_j - P_j)^2}{\sum_{j=1}^n(A_j - \bar{A})^2}
-
- where :math:`A_j` is the ground truth, :math:`P_j` is the predicted value and
- :math:`\bar{A}` is the mean of the ground truth.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
-
- Parameters are inherited from ``Metric.__init__``.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = R2Score()
- metric.attach(default_evaluator, 'r2')
- y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
- y_pred = y_true * 0.75
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['r2'])
-
- .. testoutput::
-
- 0.8035...
-
- .. versionchanged:: 0.4.3
- Works with DDP.
- """
-
- _state_dict_all_req_keys = ("_num_examples", "_sum_of_errors", "_y_sq_sum", "_y_sum")
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._num_examples = 0
- self._sum_of_errors = torch.tensor(0.0, device=self._device)
- self._y_sq_sum = torch.tensor(0.0, device=self._device)
- self._y_sum = torch.tensor(0.0, device=self._device)
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output
- self._num_examples += y.shape[0]
- self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).to(self._device)
-
- self._y_sum += torch.sum(y).to(self._device)
- self._y_sq_sum += torch.sum(torch.pow(y, 2)).to(self._device)
-
- @sync_all_reduce("_num_examples", "_sum_of_errors", "_y_sq_sum", "_y_sum")
- def compute(self) -> float:
- if self._num_examples == 0:
- raise NotComputableError("R2Score must have at least one example before it can be computed.")
- return 1 - self._sum_of_errors.item() / (self._y_sq_sum.item() - (self._y_sum.item() ** 2) / self._num_examples)
+""" ``ignite.contrib.metrics.regression.r2_score`` was moved to ``ignite.metrics.regression.r2_score``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.r2_score`` was moved to ``ignite.metrics.regression.r2_score``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.r2_score`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/r2_score.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.r2_score import R2Score
+
+__all__ = ["R2Score"]
+
+R2Score = R2Score
diff --git a/ignite/contrib/metrics/regression/wave_hedges_distance.py b/ignite/contrib/metrics/regression/wave_hedges_distance.py
index f084ac9415b..9972f31d1b4 100644
--- a/ignite/contrib/metrics/regression/wave_hedges_distance.py
+++ b/ignite/contrib/metrics/regression/wave_hedges_distance.py
@@ -1,74 +1,21 @@
-from typing import Tuple
-
-import torch
-
-from ignite.contrib.metrics.regression._base import _BaseRegression
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
-
-
-class WaveHedgesDistance(_BaseRegression):
- r"""Calculates the Wave Hedges Distance.
-
- .. math::
- \text{WHD} = \sum_{j=1}^n\frac{|A_j - P_j|}{max(A_j, P_j)}
-
- where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
-
- More details can be found in `Botchkarev 2018`__.
-
- - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
-
- __ https://arxiv.org/abs/1809.03006
-
- Parameters are inherited from ``Metric.__init__``.
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- device: specifies which device updates are accumulated on. Setting the
- metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
- non-blocking. By default, CPU.
-
- Examples:
- To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
- The output of the engine's ``process_function`` needs to be in format of
- ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- metric = WaveHedgesDistance()
- metric.attach(default_evaluator, 'whd')
- y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
- y_pred = y_true * 0.75
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['whd'])
-
- .. testoutput::
-
- 1.25...
-
- .. versionchanged:: 0.4.5
- - Works with DDP.
- """
-
- _state_dict_all_req_keys = ("_sum_of_errors",)
-
- @reinit__is_reduced
- def reset(self) -> None:
- self._sum_of_errors = torch.tensor(0.0, device=self._device)
-
- def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
- y_pred, y = output[0].detach(), output[1].detach()
- errors = torch.abs(y.view_as(y_pred) - y_pred) / (torch.max(y_pred, y.view_as(y_pred)) + 1e-30)
- self._sum_of_errors += torch.sum(errors).to(self._device)
-
- @sync_all_reduce("_sum_of_errors")
- def compute(self) -> float:
- return self._sum_of_errors.item()
+""" ``ignite.contrib.metrics.regression.wave_hedges_distance`` was moved to ``ignite.metrics.regression.wave_hedges_distance``. # noqa
+Note:
+ ``ignite.contrib.metrics.regression.wave_hedges_distance`` was moved to ``ignite.metrics.regression.wave_hedges_distance``. # noqa
+ Please refer to :mod:`~ignite.metrics.regression.wave_hedges_distance`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/regression/wave_hedges_distance.py"
+ f" and will be removed in version {removed_in}"
+ if removed_in
+ else "" ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.regression.wave_hedges_distance import WaveHedgesDistance
+
+__all__ = ["WaveHedgesDistance"]
+
+WaveHedgesDistance = WaveHedgesDistance
diff --git a/ignite/contrib/metrics/roc_auc.py b/ignite/contrib/metrics/roc_auc.py
index 381e2715861..fd9ced979d4 100644
--- a/ignite/contrib/metrics/roc_auc.py
+++ b/ignite/contrib/metrics/roc_auc.py
@@ -1,194 +1,21 @@
-from typing import Any, Callable, cast, Tuple, Union
-
-import torch
-
-from ignite import distributed as idist
-from ignite.exceptions import NotComputableError
-from ignite.metrics import EpochMetric
-
-
-def roc_auc_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:
- from sklearn.metrics import roc_auc_score
-
- y_true = y_targets.cpu().numpy()
- y_pred = y_preds.cpu().numpy()
- return roc_auc_score(y_true, y_pred)
-
-
-def roc_auc_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
- from sklearn.metrics import roc_curve
-
- y_true = y_targets.cpu().numpy()
- y_pred = y_preds.cpu().numpy()
- return roc_curve(y_true, y_pred)
-
-
-class ROC_AUC(EpochMetric):
- """Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC)
- accumulating predictions and the ground-truth during an epoch and applying
- `sklearn.metrics.roc_auc_score `_ .
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- check_compute_fn: Default False. If True, `roc_curve
- `_ is run on the first batch of data to ensure there are
- no issues. User will be warned in case there are any issues computing the function.
- device: optional device specification for internal storage.
-
- Note:
-
- ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
- values. To apply an activation to y_pred, use output_transform as shown below:
-
- .. code-block:: python
-
- def sigmoid_output_transform(output):
- y_pred, y = output
- y_pred = torch.sigmoid(y_pred)
- return y_pred, y
- avg_precision = ROC_AUC(sigmoid_output_transform)
-
- Examples:
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- roc_auc = ROC_AUC()
- #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
- roc_auc.attach(default_evaluator, 'roc_auc')
- y_pred = torch.tensor([[0.0474], [0.5987], [0.7109], [0.9997]])
- y_true = torch.tensor([[0], [0], [1], [0]])
- state = default_evaluator.run([[y_pred, y_true]])
- print(state.metrics['roc_auc'])
-
- .. testoutput::
-
- 0.6666...
- """
-
- def __init__(
- self,
- output_transform: Callable = lambda x: x,
- check_compute_fn: bool = False,
- device: Union[str, torch.device] = torch.device("cpu"),
- ):
- try:
- from sklearn.metrics import roc_auc_score # noqa: F401
- except ImportError:
- raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
-
- super(ROC_AUC, self).__init__(
- roc_auc_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn, device=device
- )
-
-
-class RocCurve(EpochMetric):
- """Compute Receiver operating characteristic (ROC) for binary classification task
- by accumulating predictions and the ground-truth during an epoch and applying
- `sklearn.metrics.roc_curve `_ .
-
- Args:
- output_transform: a callable that is used to transform the
- :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
- form expected by the metric. This can be useful if, for example, you have a multi-output model and
- you want to compute the metric with respect to one of the outputs.
- check_compute_fn: Default False. If True, `sklearn.metrics.roc_curve
- `_ is run on the first batch of data to ensure there are
- no issues. User will be warned in case there are any issues computing the function.
- device: optional device specification for internal storage.
-
- Note:
- RocCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
- values. To apply an activation to y_pred, use output_transform as shown below:
-
- .. code-block:: python
-
- def sigmoid_output_transform(output):
- y_pred, y = output
- y_pred = torch.sigmoid(y_pred)
- return y_pred, y
- avg_precision = RocCurve(sigmoid_output_transform)
-
- Examples:
-
- .. include:: defaults.rst
- :start-after: :orphan:
-
- .. testcode::
-
- roc_auc = RocCurve()
- #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
- roc_auc.attach(default_evaluator, 'roc_auc')
- y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
- y_true = torch.tensor([0, 0, 1, 0])
- state = default_evaluator.run([[y_pred, y_true]])
- print("FPR", [round(i, 3) for i in state.metrics['roc_auc'][0].tolist()])
- print("TPR", [round(i, 3) for i in state.metrics['roc_auc'][1].tolist()])
- print("Thresholds", [round(i, 3) for i in state.metrics['roc_auc'][2].tolist()])
-
- .. testoutput::
-
- FPR [0.0, 0.333, 0.333, 1.0]
- TPR [0.0, 0.0, 1.0, 1.0]
- Thresholds [inf, 1.0, 0.711, 0.047]
-
- .. versionchanged:: 0.4.11
- added `device` argument
- """
-
- def __init__(
- self,
- output_transform: Callable = lambda x: x,
- check_compute_fn: bool = False,
- device: Union[str, torch.device] = torch.device("cpu"),
- ) -> None:
- try:
- from sklearn.metrics import roc_curve # noqa: F401
- except ImportError:
- raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
-
- super(RocCurve, self).__init__(
- roc_auc_curve_compute_fn, # type: ignore[arg-type]
- output_transform=output_transform,
- check_compute_fn=check_compute_fn,
- device=device,
- )
-
- def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # type: ignore[override]
- if len(self._predictions) < 1 or len(self._targets) < 1:
- raise NotComputableError("RocCurve must have at least one example before it can be computed.")
-
- _prediction_tensor = torch.cat(self._predictions, dim=0)
- _target_tensor = torch.cat(self._targets, dim=0)
-
- ws = idist.get_world_size()
- if ws > 1:
- # All gather across all processes
- _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
- _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
-
- if idist.get_rank() == 0:
- # Run compute_fn on zero rank only
- fpr, tpr, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
- fpr = torch.tensor(fpr, device=_prediction_tensor.device)
- tpr = torch.tensor(tpr, device=_prediction_tensor.device)
- thresholds = torch.tensor(thresholds, device=_prediction_tensor.device)
- else:
- fpr, tpr, thresholds = None, None, None
-
- if ws > 1:
- # broadcast result to all processes
- fpr = idist.broadcast(fpr, src=0, safe_mode=True)
- tpr = idist.broadcast(tpr, src=0, safe_mode=True)
- thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
-
- return fpr, tpr, thresholds
+""" ``ignite.contrib.metrics.roc_auc`` was moved to ``ignite.metrics.roc_auc``.
+Note:
+ ``ignite.contrib.metrics.roc_auc`` was moved to ``ignite.metrics.roc_auc``.
+ Please refer to :mod:`~ignite.metrics.roc_auc`.
+"""
+
+import warnings
+
+removed_in = "0.6.0"
+deprecation_warning = (
+ f"{__file__} has been moved to ignite/metrics/roc_auc.py"
+ + (f" and will be removed in version {removed_in}" if removed_in else "")
+ + ".\n Please refer to the documentation for more details."
+)
+warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
+from ignite.metrics.roc_auc import ROC_AUC, RocCurve
+
+__all__ = ["RocCurve", "ROC_AUC"]
+
+RocCurve = RocCurve
+ROC_AUC = ROC_AUC
diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py
index 83e76cf8db6..865be7e7800 100644
--- a/ignite/engine/__init__.py
+++ b/ignite/engine/__init__.py
@@ -96,7 +96,7 @@ def supervised_training_step(
Added `model_transform` to transform model's output
.. versionchanged:: 0.4.13
Added `model_fn` to customize model's application on the sample
- .. versionchanged:: 0.4.14
+ .. versionchanged:: 0.5.1
Added support for ``mps`` device
"""
@@ -551,7 +551,7 @@ def output_transform_fn(x, y, y_pred, loss):
Added ``model_transform`` to transform model's output
.. versionchanged:: 0.4.13
Added `model_fn` to customize model's application on the sample
- .. versionchanged:: 0.4.14
+ .. versionchanged:: 0.5.1
Added support for ``mps`` device
"""
@@ -799,7 +799,7 @@ def create_supervised_evaluator(
Added ``model_transform`` to transform model's output
.. versionchanged:: 0.4.13
Added `model_fn` to customize model's application on the sample
- .. versionchanged:: 0.4.14
+ .. versionchanged:: 0.5.1
Added support for ``mps`` device
"""
device_type = device.type if isinstance(device, torch.device) else device
diff --git a/ignite/metrics/__init__.py b/ignite/metrics/__init__.py
index 04b490b9486..1b23257d4aa 100644
--- a/ignite/metrics/__init__.py
+++ b/ignite/metrics/__init__.py
@@ -1,6 +1,10 @@
+import ignite.metrics.regression
+
from ignite.metrics.accumulation import Average, GeometricAverage, VariableAccumulation
from ignite.metrics.accuracy import Accuracy
+from ignite.metrics.average_precision import AveragePrecision
from ignite.metrics.classification_report import ClassificationReport
+from ignite.metrics.cohen_kappa import CohenKappa
from ignite.metrics.confusion_matrix import ConfusionMatrix, DiceCoefficient, IoU, JaccardIndex, mIoU
from ignite.metrics.cosine_similarity import CosineSimilarity
from ignite.metrics.entropy import Entropy
@@ -9,6 +13,7 @@
from ignite.metrics.frequency import Frequency
from ignite.metrics.gan.fid import FID
from ignite.metrics.gan.inception_score import InceptionScore
+from ignite.metrics.gpu_info import GpuInfo
from ignite.metrics.loss import Loss
from ignite.metrics.mean_absolute_error import MeanAbsoluteError
from ignite.metrics.mean_pairwise_distance import MeanPairwiseDistance
@@ -19,8 +24,10 @@
from ignite.metrics.nlp.bleu import Bleu
from ignite.metrics.nlp.rouge import Rouge, RougeL, RougeN
from ignite.metrics.precision import Precision
+from ignite.metrics.precision_recall_curve import PrecisionRecallCurve
from ignite.metrics.psnr import PSNR
from ignite.metrics.recall import Recall
+from ignite.metrics.roc_auc import ROC_AUC, RocCurve
from ignite.metrics.root_mean_squared_error import RootMeanSquaredError
from ignite.metrics.running_average import RunningAverage
from ignite.metrics.ssim import SSIM
@@ -62,4 +69,11 @@
"Rouge",
"RougeN",
"RougeL",
+ "regression",
+ "AveragePrecision",
+ "CohenKappa",
+ "GpuInfo",
+ "PrecisionRecallCurve",
+ "RocCurve",
+ "ROC_AUC",
]
diff --git a/ignite/metrics/average_precision.py b/ignite/metrics/average_precision.py
new file mode 100644
index 00000000000..e2dab8b09ab
--- /dev/null
+++ b/ignite/metrics/average_precision.py
@@ -0,0 +1,81 @@
+from typing import Callable, Union
+
+import torch
+
+from ignite.metrics.epoch_metric import EpochMetric
+
+
+def average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:
+ from sklearn.metrics import average_precision_score
+
+ y_true = y_targets.cpu().numpy()
+ y_pred = y_preds.cpu().numpy()
+ return average_precision_score(y_true, y_pred)
+
+
+class AveragePrecision(EpochMetric):
+ """Computes Average Precision accumulating predictions and the ground-truth during an epoch
+ and applying `sklearn.metrics.average_precision_score `_ .
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ check_compute_fn: Default False. If True, `average_precision_score
+ `_ is run on the first batch of data to ensure there are
+ no issues. User will be warned in case there are any issues computing the function.
+ device: optional device specification for internal storage.
+
+ Note:
+ AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or
+ confidence values. To apply an activation to y_pred, use output_transform as shown below:
+
+ .. code-block:: python
+
+ def activated_output_transform(output):
+ y_pred, y = output
+ y_pred = torch.softmax(y_pred, dim=1)
+ return y_pred, y
+ avg_precision = AveragePrecision(activated_output_transform)
+
+ Examples:
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ y_pred = torch.tensor([[0.79, 0.21], [0.30, 0.70], [0.46, 0.54], [0.16, 0.84]])
+ y_true = torch.tensor([[1, 1], [1, 1], [0, 1], [0, 1]])
+
+ avg_precision = AveragePrecision()
+ avg_precision.attach(default_evaluator, 'average_precision')
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['average_precision'])
+
+ .. testoutput::
+
+ 0.9166...
+
+ """
+
+ def __init__(
+ self,
+ output_transform: Callable = lambda x: x,
+ check_compute_fn: bool = False,
+ device: Union[str, torch.device] = torch.device("cpu"),
+ ):
+ try:
+ from sklearn.metrics import average_precision_score # noqa: F401
+ except ImportError:
+ raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
+
+ super(AveragePrecision, self).__init__(
+ average_precision_compute_fn,
+ output_transform=output_transform,
+ check_compute_fn=check_compute_fn,
+ device=device,
+ )
diff --git a/ignite/metrics/cohen_kappa.py b/ignite/metrics/cohen_kappa.py
new file mode 100644
index 00000000000..92d9b07aa4a
--- /dev/null
+++ b/ignite/metrics/cohen_kappa.py
@@ -0,0 +1,86 @@
+from typing import Callable, Optional, Union
+
+import torch
+
+from ignite.metrics.epoch_metric import EpochMetric
+
+
+class CohenKappa(EpochMetric):
+ """Compute different types of Cohen's Kappa: Non-Wieghted, Linear, Quadratic.
+ Accumulating predictions and the ground-truth during an epoch and applying
+ `sklearn.metrics.cohen_kappa_score `_ .
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ weights: a string is used to define the type of Cohen's Kappa whether Non-Weighted or Linear
+ or Quadratic. Default, None.
+ check_compute_fn: Default False. If True, `cohen_kappa_score
+ `_
+ is run on the first batch of data to ensure there are
+ no issues. User will be warned in case there are any issues computing the function.
+ device: optional device specification for internal storage.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in the format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
+ to the metric to transform the output into the form expected by the metric.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = CohenKappa()
+ metric.attach(default_evaluator, 'ck')
+ y_true = torch.tensor([2, 0, 2, 2, 0, 1])
+ y_pred = torch.tensor([0, 0, 2, 2, 0, 2])
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['ck'])
+
+ .. testoutput::
+
+ 0.4285...
+
+ """
+
+ def __init__(
+ self,
+ output_transform: Callable = lambda x: x,
+ weights: Optional[str] = None,
+ check_compute_fn: bool = False,
+ device: Union[str, torch.device] = torch.device("cpu"),
+ ):
+ try:
+ from sklearn.metrics import cohen_kappa_score # noqa: F401
+ except ImportError:
+ raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
+ if weights not in (None, "linear", "quadratic"):
+ raise ValueError("Kappa Weighting type must be None or linear or quadratic.")
+
+ # initalize weights
+ self.weights = weights
+
+ self.cohen_kappa_compute = self.get_cohen_kappa_fn()
+
+ super(CohenKappa, self).__init__(
+ self.cohen_kappa_compute,
+ output_transform=output_transform,
+ check_compute_fn=check_compute_fn,
+ device=device,
+ )
+
+ def get_cohen_kappa_fn(self) -> Callable[[torch.Tensor, torch.Tensor], float]:
+ """Return a function computing Cohen Kappa from scikit-learn."""
+ from sklearn.metrics import cohen_kappa_score
+
+ def wrapper(y_targets: torch.Tensor, y_preds: torch.Tensor) -> float:
+ y_true = y_targets.cpu().numpy()
+ y_pred = y_preds.cpu().numpy()
+ return cohen_kappa_score(y_true, y_pred, weights=self.weights)
+
+ return wrapper
diff --git a/ignite/metrics/confusion_matrix.py b/ignite/metrics/confusion_matrix.py
index a55bbedebb8..75a9f9848a2 100644
--- a/ignite/metrics/confusion_matrix.py
+++ b/ignite/metrics/confusion_matrix.py
@@ -80,9 +80,10 @@ class ConfusionMatrix(Metric):
.. testcode:: 2
def binary_one_hot_output_transform(output):
+ from ignite import utils
y_pred, y = output
y_pred = torch.sigmoid(y_pred).round().long()
- y_pred = ignite.utils.to_onehot(y_pred, 2)
+ y_pred = utils.to_onehot(y_pred, 2)
y = y.long()
return y_pred, y
diff --git a/ignite/metrics/gpu_info.py b/ignite/metrics/gpu_info.py
new file mode 100644
index 00000000000..96ed4f07c57
--- /dev/null
+++ b/ignite/metrics/gpu_info.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+import warnings
+from typing import Any, Dict, List, Tuple, Union
+
+import torch
+
+from ignite.engine import Engine, EventEnum, Events
+from ignite.metrics.metric import Metric
+
+
+class GpuInfo(Metric):
+ """Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric
+ on each iterations.
+
+ .. Note ::
+
+ In case if gpu utilization reports "N/A" on a given GPU, corresponding metric value is not set.
+
+ Examples:
+ .. code-block:: python
+
+ # Default GPU measurements
+ GpuInfo().attach(trainer, name='gpu') # metric names are 'gpu:X mem(%)', 'gpu:X util(%)'
+
+ # Logging with TQDM
+ ProgressBar(persist=True).attach(trainer, metric_names=['gpu:0 mem(%)', 'gpu:0 util(%)'])
+ # Progress bar will looks like
+ # Epoch [2/10]: [12/24] 50%|█████ , gpu:0 mem(%)=79, gpu:0 util(%)=59 [00:17<1:23]
+
+ # Logging with Tensorboard
+ tb_logger.attach(trainer,
+ log_handler=OutputHandler(tag="training", metric_names='all'),
+ event_name=Events.ITERATION_COMPLETED)
+ """
+
+ def __init__(self) -> None:
+ try:
+ from pynvml.smi import nvidia_smi
+ except ImportError:
+ raise ModuleNotFoundError(
+ "This contrib module requires pynvml to be installed. "
+ "Please install it with command: \n pip install pynvml"
+ )
+ # Let's check available devices
+ if not torch.cuda.is_available():
+ raise RuntimeError("This contrib module requires available GPU")
+
+ # Let it fail if no libnvidia drivers or NMVL library found
+ self.nvsmi = nvidia_smi.getInstance()
+ super(GpuInfo, self).__init__()
+
+ def reset(self) -> None:
+ pass
+
+ def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ pass
+
+ def compute(self) -> List[Dict[str, Any]]:
+ data: Dict[str, List[Dict[str, Any]]] = self.nvsmi.DeviceQuery("memory.used, memory.total, utilization.gpu")
+ if len(data) == 0 or ("gpu" not in data):
+ warnings.warn("No GPU information available")
+ return []
+ return data["gpu"]
+
+ def completed(self, engine: Engine, name: str) -> None:
+ data = self.compute()
+ if len(data) < 1:
+ warnings.warn("No GPU information available")
+ return
+
+ for i, data_by_rank in enumerate(data):
+ mem_name = f"{name}:{i} mem(%)"
+
+ if "fb_memory_usage" not in data_by_rank:
+ warnings.warn(f"No GPU memory usage information available in {data_by_rank}")
+ continue
+ mem_report = data_by_rank["fb_memory_usage"]
+ if not ("used" in mem_report and "total" in mem_report):
+ warnings.warn(
+ "GPU memory usage information does not provide used/total "
+ f"memory consumption information in {mem_report}"
+ )
+ continue
+
+ engine.state.metrics[mem_name] = int(mem_report["used"] * 100.0 / mem_report["total"])
+
+ for i, data_by_rank in enumerate(data):
+ util_name = f"{name}:{i} util(%)"
+ if "utilization" not in data_by_rank:
+ warnings.warn(f"No GPU utilization information available in {data_by_rank}")
+ continue
+ util_report = data_by_rank["utilization"]
+ if not ("gpu_util" in util_report):
+ warnings.warn(f"GPU utilization information does not provide 'gpu_util' information in {util_report}")
+ continue
+ try:
+ engine.state.metrics[util_name] = int(util_report["gpu_util"])
+ except ValueError:
+ # Do not set GPU utilization information
+ pass
+
+ # TODO: see issue https://github.com/pytorch/ignite/issues/1405
+ def attach( # type: ignore
+ self, engine: Engine, name: str = "gpu", event_name: Union[str, EventEnum] = Events.ITERATION_COMPLETED
+ ) -> None:
+ engine.add_event_handler(event_name, self.completed, name)
diff --git a/ignite/metrics/nlp/rouge.py b/ignite/metrics/nlp/rouge.py
index 9aa87a269e6..1d3f08c593e 100644
--- a/ignite/metrics/nlp/rouge.py
+++ b/ignite/metrics/nlp/rouge.py
@@ -5,10 +5,9 @@
import torch
from ignite.exceptions import NotComputableError
-from ignite.metrics import Metric
# These decorators helps with distributed settings
-from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
from ignite.metrics.nlp.utils import lcs, ngrams
__all__ = ["Rouge", "RougeN", "RougeL"]
diff --git a/ignite/metrics/precision_recall_curve.py b/ignite/metrics/precision_recall_curve.py
new file mode 100644
index 00000000000..29b3710b58c
--- /dev/null
+++ b/ignite/metrics/precision_recall_curve.py
@@ -0,0 +1,120 @@
+from typing import Any, Callable, cast, Tuple, Union
+
+import torch
+
+import ignite.distributed as idist
+from ignite.exceptions import NotComputableError
+from ignite.metrics.epoch_metric import EpochMetric
+
+
+def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
+ try:
+ from sklearn.metrics import precision_recall_curve
+ except ImportError:
+ raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
+
+ y_true = y_targets.cpu().numpy()
+ y_pred = y_preds.cpu().numpy()
+ return precision_recall_curve(y_true, y_pred)
+
+
+class PrecisionRecallCurve(EpochMetric):
+ """Compute precision-recall pairs for different probability thresholds for binary classification task
+ by accumulating predictions and the ground-truth during an epoch and applying
+ `sklearn.metrics.precision_recall_curve `_ .
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ check_compute_fn: Default False. If True, `precision_recall_curve
+ `_ is run on the first batch of data to ensure there are
+ no issues. User will be warned in case there are any issues computing the function.
+
+ Note:
+ PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates
+ or confidence values. To apply an activation to y_pred, use output_transform as shown below:
+
+ .. code-block:: python
+
+ def sigmoid_output_transform(output):
+ y_pred, y = output
+ y_pred = torch.sigmoid(y_pred)
+ return y_pred, y
+ avg_precision = PrecisionRecallCurve(sigmoid_output_transform)
+
+ Examples:
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
+ y_true = torch.tensor([0, 0, 1, 1])
+ prec_recall_curve = PrecisionRecallCurve()
+ prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')
+ state = default_evaluator.run([[y_pred, y_true]])
+
+ print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])
+ print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])
+ print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])
+
+ .. testoutput::
+
+ Precision [0.5, 0.6667, 1.0, 1.0, 1.0]
+ Recall [1.0, 1.0, 1.0, 0.5, 0.0]
+ Thresholds [0.0474, 0.5987, 0.7109, 0.9997]
+
+ """
+
+ def __init__(
+ self,
+ output_transform: Callable = lambda x: x,
+ check_compute_fn: bool = False,
+ device: Union[str, torch.device] = torch.device("cpu"),
+ ) -> None:
+ super(PrecisionRecallCurve, self).__init__(
+ precision_recall_curve_compute_fn, # type: ignore[arg-type]
+ output_transform=output_transform,
+ check_compute_fn=check_compute_fn,
+ device=device,
+ )
+
+ def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # type: ignore[override]
+ if len(self._predictions) < 1 or len(self._targets) < 1:
+ raise NotComputableError("PrecisionRecallCurve must have at least one example before it can be computed.")
+
+ if self._result is None: # type: ignore
+ _prediction_tensor = torch.cat(self._predictions, dim=0)
+ _target_tensor = torch.cat(self._targets, dim=0)
+
+ ws = idist.get_world_size()
+ if ws > 1:
+ # All gather across all processes
+ _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
+ _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
+
+ if idist.get_rank() == 0:
+ # Run compute_fn on zero rank only
+ precision, recall, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
+ precision = torch.tensor(precision, device=_prediction_tensor.device)
+ recall = torch.tensor(recall, device=_prediction_tensor.device)
+ # thresholds can have negative strides, not compatible with torch tensors
+ # https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2
+ thresholds = torch.tensor(thresholds.copy(), device=_prediction_tensor.device)
+ else:
+ precision, recall, thresholds = None, None, None
+
+ if ws > 1:
+ # broadcast result to all processes
+ precision = idist.broadcast(precision, src=0, safe_mode=True)
+ recall = idist.broadcast(recall, src=0, safe_mode=True)
+ thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
+
+ self._result = (precision, recall, thresholds) # type: ignore[assignment]
+
+ return cast(Tuple[torch.Tensor, torch.Tensor, torch.Tensor], self._result) # type: ignore
diff --git a/ignite/metrics/regression/__init__.py b/ignite/metrics/regression/__init__.py
new file mode 100644
index 00000000000..7a3fc3e56a9
--- /dev/null
+++ b/ignite/metrics/regression/__init__.py
@@ -0,0 +1,15 @@
+from ignite.metrics.regression.canberra_metric import CanberraMetric
+from ignite.metrics.regression.fractional_absolute_error import FractionalAbsoluteError
+from ignite.metrics.regression.fractional_bias import FractionalBias
+from ignite.metrics.regression.geometric_mean_absolute_error import GeometricMeanAbsoluteError
+from ignite.metrics.regression.geometric_mean_relative_absolute_error import GeometricMeanRelativeAbsoluteError
+from ignite.metrics.regression.manhattan_distance import ManhattanDistance
+from ignite.metrics.regression.maximum_absolute_error import MaximumAbsoluteError
+from ignite.metrics.regression.mean_absolute_relative_error import MeanAbsoluteRelativeError
+from ignite.metrics.regression.mean_error import MeanError
+from ignite.metrics.regression.mean_normalized_bias import MeanNormalizedBias
+from ignite.metrics.regression.median_absolute_error import MedianAbsoluteError
+from ignite.metrics.regression.median_absolute_percentage_error import MedianAbsolutePercentageError
+from ignite.metrics.regression.median_relative_absolute_error import MedianRelativeAbsoluteError
+from ignite.metrics.regression.r2_score import R2Score
+from ignite.metrics.regression.wave_hedges_distance import WaveHedgesDistance
diff --git a/ignite/metrics/regression/_base.py b/ignite/metrics/regression/_base.py
new file mode 100644
index 00000000000..b4c08b58a4e
--- /dev/null
+++ b/ignite/metrics/regression/_base.py
@@ -0,0 +1,63 @@
+from abc import abstractmethod
+from typing import Tuple
+
+import torch
+
+from ignite.metrics.metric import Metric, reinit__is_reduced
+
+
+def _check_output_shapes(output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output
+ c1 = y_pred.ndimension() == 2 and y_pred.shape[1] == 1
+ if not (y_pred.ndimension() == 1 or c1):
+ raise ValueError(f"Input y_pred should have shape (N,) or (N, 1), but given {y_pred.shape}")
+
+ c2 = y.ndimension() == 2 and y.shape[1] == 1
+ if not (y.ndimension() == 1 or c2):
+ raise ValueError(f"Input y should have shape (N,) or (N, 1), but given {y.shape}")
+
+ if y_pred.shape != y.shape:
+ raise ValueError(f"Input data shapes should be the same, but given {y_pred.shape} and {y.shape}")
+
+
+def _check_output_types(output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output
+ if y_pred.dtype not in (torch.float16, torch.float32, torch.float64):
+ raise TypeError(f"Input y_pred dtype should be float 16, 32 or 64, but given {y_pred.dtype}")
+
+ if y.dtype not in (torch.float16, torch.float32, torch.float64):
+ raise TypeError(f"Input y dtype should be float 16, 32 or 64, but given {y.dtype}")
+
+
+def _torch_median(output: torch.Tensor) -> float:
+ output = output.view(-1)
+ len_ = len(output)
+
+ if len_ % 2 == 0:
+ return float((torch.kthvalue(output, len_ // 2)[0] + torch.kthvalue(output, len_ // 2 + 1)[0]) / 2)
+ else:
+ return float(torch.kthvalue(output, len_ // 2 + 1)[0])
+
+
+class _BaseRegression(Metric):
+ # Base class for all regression metrics
+ # `update` method check the shapes and call internal overloaded
+ # method `_update`.
+
+ @reinit__is_reduced
+ def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ _check_output_shapes(output)
+ _check_output_types(output)
+ y_pred, y = output[0].detach(), output[1].detach()
+
+ if y_pred.ndimension() == 2 and y_pred.shape[1] == 1:
+ y_pred = y_pred.squeeze(dim=-1)
+
+ if y.ndimension() == 2 and y.shape[1] == 1:
+ y = y.squeeze(dim=-1)
+
+ self._update((y_pred, y))
+
+ @abstractmethod
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ pass
diff --git a/ignite/metrics/regression/canberra_metric.py b/ignite/metrics/regression/canberra_metric.py
new file mode 100644
index 00000000000..f8bd2732a38
--- /dev/null
+++ b/ignite/metrics/regression/canberra_metric.py
@@ -0,0 +1,81 @@
+from typing import Tuple
+
+import torch
+
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class CanberraMetric(_BaseRegression):
+ r"""Calculates the Canberra Metric.
+
+ .. math::
+ \text{CM} = \sum_{j=1}^n\frac{|A_j - P_j|}{|A_j| + |P_j|}
+
+ where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
+
+ .. _scikit-learn distance metrics:
+ https://scikit-learn.org/stable/modules/generated/sklearn.metrics.DistanceMetric.html
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ .. _`Botchkarev 2018`:
+ https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = CanberraMetric()
+ metric.attach(default_evaluator, 'canberra')
+ y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
+ y_true = y_pred * 1.5
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['canberra'])
+
+ .. testoutput::
+
+ 0.8000...
+
+ .. versionchanged:: 0.4.3
+
+ - Fixed implementation: ``abs`` in denominator.
+ - Works with DDP.
+ """
+
+ _state_dict_all_req_keys = ("_sum_of_errors",)
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._sum_of_errors = torch.tensor(0.0, device=self._device)
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output[0].detach(), output[1].detach()
+ errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y) + 1e-15)
+ self._sum_of_errors += torch.sum(errors).to(self._device)
+
+ @sync_all_reduce("_sum_of_errors")
+ def compute(self) -> float:
+ return self._sum_of_errors.item()
diff --git a/ignite/metrics/regression/fractional_absolute_error.py b/ignite/metrics/regression/fractional_absolute_error.py
new file mode 100644
index 00000000000..c66e8e780f7
--- /dev/null
+++ b/ignite/metrics/regression/fractional_absolute_error.py
@@ -0,0 +1,82 @@
+from typing import Tuple
+
+import torch
+
+from ignite.exceptions import NotComputableError
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class FractionalAbsoluteError(_BaseRegression):
+ r"""Calculates the Fractional Absolute Error.
+
+ .. math::
+ \text{FAE} = \frac{1}{n}\sum_{j=1}^n\frac{2 |A_j - P_j|}{|A_j| + |P_j|}
+
+ where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
+
+ __ https://arxiv.org/abs/1809.03006
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = FractionalAbsoluteError()
+ metric.attach(default_evaluator, 'fractional_abs_error')
+ y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
+ y_true = y_pred * 0.8
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['fractional_abs_error'])
+
+ .. testoutput::
+
+ 0.2222...
+
+ .. versionchanged:: 0.4.5
+ - Works with DDP.
+ """
+
+ _state_dict_all_req_keys = ("_sum_of_errors", "_num_examples")
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._sum_of_errors = torch.tensor(0.0, device=self._device)
+ self._num_examples = 0
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output[0].detach(), output[1].detach()
+ errors = 2 * torch.abs(y.view_as(y_pred) - y_pred) / (torch.abs(y_pred) + torch.abs(y.view_as(y_pred)))
+ self._sum_of_errors += torch.sum(errors).to(self._device)
+ self._num_examples += y.shape[0]
+
+ @sync_all_reduce("_num_examples", "_sum_of_errors")
+ def compute(self) -> float:
+ if self._num_examples == 0:
+ raise NotComputableError(
+ "FractionalAbsoluteError must have at least one example before it can be computed."
+ )
+ return self._sum_of_errors.item() / self._num_examples
diff --git a/ignite/metrics/regression/fractional_bias.py b/ignite/metrics/regression/fractional_bias.py
new file mode 100644
index 00000000000..7164cd4f166
--- /dev/null
+++ b/ignite/metrics/regression/fractional_bias.py
@@ -0,0 +1,80 @@
+from typing import Tuple
+
+import torch
+
+from ignite.exceptions import NotComputableError
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class FractionalBias(_BaseRegression):
+ r"""Calculates the Fractional Bias.
+
+ .. math::
+ \text{FB} = \frac{1}{n}\sum_{j=1}^n\frac{2 (A_j - P_j)}{A_j + P_j}
+
+ where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
+
+ __ https://arxiv.org/abs/1809.03006
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = FractionalBias()
+ metric.attach(default_evaluator, 'fractional_bias')
+ y_pred = torch.tensor([[3.8], [9.9], [5.4], [2.1]])
+ y_true = y_pred * 1.5
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['fractional_bias'])
+
+ .. testoutput::
+
+ 0.4000...
+
+ .. versionchanged:: 0.4.5
+ - Works with DDP.
+ """
+
+ _state_dict_all_req_keys = ("_sum_of_errors", "_num_examples")
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._sum_of_errors = torch.tensor(0.0, dtype=torch.double, device=self._device)
+ self._num_examples = 0
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output[0].detach(), output[1].detach()
+ errors = 2 * (y.view_as(y_pred) - y_pred) / (y_pred + y.view_as(y_pred) + 1e-30)
+ self._sum_of_errors += torch.sum(errors).to(self._device)
+ self._num_examples += y.shape[0]
+
+ @sync_all_reduce("_sum_of_errors", "_num_examples")
+ def compute(self) -> float:
+ if self._num_examples == 0:
+ raise NotComputableError("FractionalBias must have at least one example before it can be computed.")
+ return self._sum_of_errors.item() / self._num_examples
diff --git a/ignite/metrics/regression/geometric_mean_absolute_error.py b/ignite/metrics/regression/geometric_mean_absolute_error.py
new file mode 100644
index 00000000000..92f61192839
--- /dev/null
+++ b/ignite/metrics/regression/geometric_mean_absolute_error.py
@@ -0,0 +1,82 @@
+from typing import Tuple
+
+import torch
+
+from ignite.exceptions import NotComputableError
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class GeometricMeanAbsoluteError(_BaseRegression):
+ r"""Calculates the Geometric Mean Absolute Error.
+
+ .. math::
+ \text{GMAE} = \exp(\frac{1}{n}\sum_{j=1}^n\ln(|A_j - P_j|))
+
+ where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
+
+ __ https://arxiv.org/abs/1809.03006
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = GeometricMeanAbsoluteError()
+ metric.attach(default_evaluator, 'gmae')
+ y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
+ y_true = y_pred * 1.5
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['gmae'])
+
+ .. testoutput::
+
+ 2.2723...
+
+ .. versionchanged:: 0.4.5
+ - Works with DDP.
+ """
+
+ _state_dict_all_req_keys = ("_sum_of_errors", "_num_examples")
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._sum_of_errors = torch.tensor(0.0, device=self._device)
+ self._num_examples = 0
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output[0].detach(), output[1].detach()
+ errors = torch.log(torch.abs(y.view_as(y_pred) - y_pred))
+ self._sum_of_errors += torch.sum(errors).to(self._device)
+ self._num_examples += y.shape[0]
+
+ @sync_all_reduce("_sum_of_errors", "_num_examples")
+ def compute(self) -> float:
+ if self._num_examples == 0:
+ raise NotComputableError(
+ "GeometricMeanAbsoluteError must have at least one example before it can be computed."
+ )
+ return torch.exp((self._sum_of_errors) / self._num_examples).item()
diff --git a/ignite/metrics/regression/geometric_mean_relative_absolute_error.py b/ignite/metrics/regression/geometric_mean_relative_absolute_error.py
new file mode 100644
index 00000000000..8e2bfb9c045
--- /dev/null
+++ b/ignite/metrics/regression/geometric_mean_relative_absolute_error.py
@@ -0,0 +1,108 @@
+from typing import cast, List, Tuple
+
+import torch
+
+import ignite.distributed as idist
+from ignite.exceptions import NotComputableError
+from ignite.metrics.metric import reinit__is_reduced
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class GeometricMeanRelativeAbsoluteError(_BaseRegression):
+ r"""Calculates the Geometric Mean Relative Absolute Error.
+
+ .. math::
+ \text{GMRAE} = \exp(\frac{1}{n}\sum_{j=1}^n \ln\frac{|A_j - P_j|}{|A_j - \bar{A}|})
+
+ where :math:`A_j` is the ground truth, :math:`P_j` is the predicted value
+ and :math: `bar{A}` is the mean of the ground truth.
+
+ More details can be found in `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
+
+ __ https://arxiv.org/abs/1809.03006
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ .. warning::
+
+ Current implementation of GMRAE stores all input data (output and target)
+ as tensors before computing the metric.
+ This can potentially lead to a memory error if the input data is larger than available RAM.
+
+ In distributed configuration, all stored data (output and target) is mutually collected across all processes
+ using all gather collective operation. This can potentially lead to a memory error.
+
+ Compute method compute the metric on zero rank process only and final result is broadcasted to
+ all processes.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = GeometricMeanRelativeAbsoluteError()
+ metric.attach(default_evaluator, 'gmare')
+ y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
+ y_pred = y_true * 0.75
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['gmare'])
+
+ .. testoutput::
+
+ 0.0...
+ """
+
+ _state_dict_all_req_keys = ("_predictions", "_targets")
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._predictions: List[torch.Tensor] = []
+ self._targets: List[torch.Tensor] = []
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output[0].detach(), output[1].detach()
+
+ y_pred = y_pred.clone().to(self._device)
+ y = y.clone().to(self._device)
+
+ self._predictions.append(y_pred)
+ self._targets.append(y)
+
+ def compute(self) -> float:
+ if len(self._predictions) < 1 or len(self._targets) < 1:
+ raise NotComputableError(
+ "GeometricMeanRelativeAbsoluteError must have at least one example before it can be computed."
+ )
+
+ _prediction_tensor = torch.cat(self._predictions, dim=0)
+ _target_tensor = torch.cat(self._targets, dim=0)
+
+ # All gather across all processes
+ _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
+ _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
+
+ result = torch.exp(
+ torch.log(
+ torch.abs(_target_tensor - _prediction_tensor) / torch.abs(_target_tensor - _target_tensor.mean())
+ ).mean()
+ ).item()
+
+ return result
diff --git a/ignite/metrics/regression/manhattan_distance.py b/ignite/metrics/regression/manhattan_distance.py
new file mode 100644
index 00000000000..9746fece91e
--- /dev/null
+++ b/ignite/metrics/regression/manhattan_distance.py
@@ -0,0 +1,77 @@
+from typing import Tuple
+
+import torch
+
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class ManhattanDistance(_BaseRegression):
+ r"""Calculates the Manhattan Distance.
+
+ .. math::
+ \text{MD} = \sum_{j=1}^n |A_j - P_j|
+
+ where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in `scikit-learn distance metrics`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
+
+ __ https://scikit-learn.org/stable/modules/generated/sklearn.metrics.DistanceMetric.html
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = ManhattanDistance()
+ metric.attach(default_evaluator, 'manhattan')
+ y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
+ y_pred = y_true * 0.75
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['manhattan'])
+
+ .. testoutput::
+
+ 3.75...
+
+ .. versionchanged:: 0.4.3
+
+ - Fixed sklearn compatibility.
+ - Workes with DDP.
+ """
+
+ _state_dict_all_req_keys = ("_sum_of_errors",)
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._sum_of_errors = torch.tensor(0.0, device=self._device)
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output
+ errors = torch.abs(y - y_pred)
+ self._sum_of_errors += torch.sum(errors).to(self._device)
+
+ @sync_all_reduce("_sum_of_errors")
+ def compute(self) -> float:
+ return self._sum_of_errors.item()
diff --git a/ignite/metrics/regression/maximum_absolute_error.py b/ignite/metrics/regression/maximum_absolute_error.py
new file mode 100644
index 00000000000..5b22f5951b8
--- /dev/null
+++ b/ignite/metrics/regression/maximum_absolute_error.py
@@ -0,0 +1,79 @@
+from typing import Tuple
+
+import torch
+
+from ignite.exceptions import NotComputableError
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class MaximumAbsoluteError(_BaseRegression):
+ r"""Calculates the Maximum Absolute Error.
+
+ .. math::
+ \text{MaxAE} = \max_{j=1,n} \left( \lvert A_j-P_j \rvert \right)
+
+ where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
+
+ __ https://arxiv.org/abs/1809.03006
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = MaximumAbsoluteError()
+ metric.attach(default_evaluator, 'mae')
+ y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
+ y_pred = y_true * 0.75
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['mae'])
+
+ .. testoutput::
+
+ 1.25...
+
+ .. versionchanged:: 0.4.5
+ - Works with DDP.
+ """
+
+ _state_dict_all_req_keys = ("_max_of_absolute_errors",)
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._max_of_absolute_errors: float = -1
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output[0].detach(), output[1].detach()
+ mae = torch.abs(y_pred - y.view_as(y_pred)).max().item()
+ if self._max_of_absolute_errors < mae:
+ self._max_of_absolute_errors = mae
+
+ @sync_all_reduce("_max_of_absolute_errors:MAX")
+ def compute(self) -> float:
+ if self._max_of_absolute_errors < 0:
+ raise NotComputableError("MaximumAbsoluteError must have at least one example before it can be computed.")
+ return self._max_of_absolute_errors
diff --git a/ignite/metrics/regression/mean_absolute_relative_error.py b/ignite/metrics/regression/mean_absolute_relative_error.py
new file mode 100644
index 00000000000..1551ff9be72
--- /dev/null
+++ b/ignite/metrics/regression/mean_absolute_relative_error.py
@@ -0,0 +1,84 @@
+from typing import Tuple
+
+import torch
+
+from ignite.exceptions import NotComputableError
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class MeanAbsoluteRelativeError(_BaseRegression):
+ r"""Calculate Mean Absolute Relative Error (MARE), also known as Mean Absolute Percentage Error (MAPE).
+
+ .. math::
+ \text{MARE} = \frac{1}{n}\sum_{j=1}^n\frac{\left|A_j-P_j\right|}{\left|A_j\right|}
+
+ where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in the reference `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
+
+ __ https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = MeanAbsoluteRelativeError()
+ metric.attach(default_evaluator, 'mare')
+ y_true = torch.tensor([1., 2., 3., 4., 5.])
+ y_pred = y_true * 0.75
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['mare'])
+
+ .. testoutput::
+
+ 0.25...
+
+ .. versionchanged:: 0.4.5
+ - Works with DDP.
+ """
+
+ _state_dict_all_req_keys = ("_sum_of_absolute_relative_errors", "_num_samples")
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._sum_of_absolute_relative_errors = torch.tensor(0.0, device=self._device)
+ self._num_samples = 0
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output[0].detach(), output[1].detach()
+ if (y == 0).any():
+ raise NotComputableError("The ground truth has 0.")
+ absolute_error = torch.abs(y_pred - y.view_as(y_pred)) / torch.abs(y.view_as(y_pred))
+ self._sum_of_absolute_relative_errors += torch.sum(absolute_error).to(self._device)
+ self._num_samples += y.size()[0]
+
+ @sync_all_reduce("_sum_of_absolute_relative_errors", "_num_samples")
+ def compute(self) -> float:
+ if self._num_samples == 0:
+ raise NotComputableError(
+ "MeanAbsoluteRelativeError must have at least one sample before it can be computed."
+ )
+ return self._sum_of_absolute_relative_errors.item() / self._num_samples
diff --git a/ignite/metrics/regression/mean_error.py b/ignite/metrics/regression/mean_error.py
new file mode 100644
index 00000000000..91a8d90408f
--- /dev/null
+++ b/ignite/metrics/regression/mean_error.py
@@ -0,0 +1,77 @@
+from typing import Tuple
+
+import torch
+
+from ignite.exceptions import NotComputableError
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class MeanError(_BaseRegression):
+ r"""Calculates the Mean Error.
+
+ .. math::
+ \text{ME} = \frac{1}{n}\sum_{j=1}^n (A_j - P_j)
+
+ where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in the reference `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
+
+ __ https://arxiv.org/abs/1809.03006
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = MeanError()
+ metric.attach(default_evaluator, 'me')
+ y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
+ y_pred = y_true * 0.75
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['me'])
+
+ .. testoutput::
+
+ 0.625...
+ """
+
+ _state_dict_all_req_keys = ("_sum_of_errors", "_num_examples")
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._sum_of_errors = torch.tensor(0.0, device=self._device)
+ self._num_examples = 0
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output[0].detach(), output[1].detach()
+ errors = y.view_as(y_pred) - y_pred
+ self._sum_of_errors += torch.sum(errors).item()
+ self._num_examples += y.shape[0]
+
+ @sync_all_reduce("_sum_of_errors", "_num_examples")
+ def compute(self) -> float:
+ if self._num_examples == 0:
+ raise NotComputableError("MeanError must have at least one example before it can be computed.")
+ return self._sum_of_errors.item() / self._num_examples
diff --git a/ignite/metrics/regression/mean_normalized_bias.py b/ignite/metrics/regression/mean_normalized_bias.py
new file mode 100644
index 00000000000..9ac2e244dd0
--- /dev/null
+++ b/ignite/metrics/regression/mean_normalized_bias.py
@@ -0,0 +1,84 @@
+from typing import Tuple
+
+import torch
+
+from ignite.exceptions import NotComputableError
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class MeanNormalizedBias(_BaseRegression):
+ r"""Calculates the Mean Normalized Bias.
+
+ .. math::
+ \text{MNB} = \frac{1}{n}\sum_{j=1}^n\frac{A_j - P_j}{A_j}
+
+ where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in the reference `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
+
+ __ https://arxiv.org/abs/1809.03006
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = MeanNormalizedBias()
+ metric.attach(default_evaluator, 'mnb')
+ y_true = torch.tensor([1., 2., 3., 4., 5.])
+ y_pred = y_true * 0.75
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['mnb'])
+
+ .. testoutput::
+
+ 0.25...
+
+ .. versionchanged:: 0.4.5
+ - Works with DDP.
+ """
+
+ _state_dict_all_req_keys = ("_sum_of_errors", "_num_examples")
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._sum_of_errors = torch.tensor(0.0, device=self._device)
+ self._num_examples = 0
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output[0].detach(), output[1].detach()
+
+ if (y == 0).any():
+ raise NotComputableError("The ground truth has 0.")
+
+ errors = (y.view_as(y_pred) - y_pred) / y
+ self._sum_of_errors += torch.sum(errors).to(self._device)
+ self._num_examples += y.shape[0]
+
+ @sync_all_reduce("_sum_of_errors", "_num_examples")
+ def compute(self) -> float:
+ if self._num_examples == 0:
+ raise NotComputableError("MeanNormalizedBias must have at least one example before it can be computed.")
+ return self._sum_of_errors.item() / self._num_examples
diff --git a/ignite/metrics/regression/median_absolute_error.py b/ignite/metrics/regression/median_absolute_error.py
new file mode 100644
index 00000000000..c745ec42c96
--- /dev/null
+++ b/ignite/metrics/regression/median_absolute_error.py
@@ -0,0 +1,72 @@
+from typing import Callable, Union
+
+import torch
+
+from ignite.metrics.epoch_metric import EpochMetric
+
+from ignite.metrics.regression._base import _torch_median
+
+
+def median_absolute_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
+ e = torch.abs(y.view_as(y_pred) - y_pred)
+ return _torch_median(e)
+
+
+class MedianAbsoluteError(EpochMetric):
+ r"""Calculates the Median Absolute Error.
+
+ .. math::
+ \text{MdAE} = \text{MD}_{j=1,n} \left( |A_j - P_j| \right)
+
+ where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
+
+ .. warning::
+
+ Current implementation stores all input data (output and target) in as tensors before computing a metric.
+ This can potentially lead to a memory error if the input data is larger than available RAM.
+
+
+ __ https://arxiv.org/abs/1809.03006
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: optional device specification for internal storage.
+
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = MedianAbsoluteError()
+ metric.attach(default_evaluator, 'mae')
+ y_true = torch.tensor([0, 1, 2, 3, 4, 5])
+ y_pred = y_true * 0.75
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['mae'])
+
+ .. testoutput::
+
+ 0.625
+ """
+
+ def __init__(
+ self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
+ ):
+ super(MedianAbsoluteError, self).__init__(
+ median_absolute_error_compute_fn, output_transform=output_transform, device=device
+ )
diff --git a/ignite/metrics/regression/median_absolute_percentage_error.py b/ignite/metrics/regression/median_absolute_percentage_error.py
new file mode 100644
index 00000000000..5fe094636d3
--- /dev/null
+++ b/ignite/metrics/regression/median_absolute_percentage_error.py
@@ -0,0 +1,70 @@
+from typing import Callable, Union
+
+import torch
+
+from ignite.metrics.epoch_metric import EpochMetric
+
+from ignite.metrics.regression._base import _torch_median
+
+
+def median_absolute_percentage_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
+ e = torch.abs(y.view_as(y_pred) - y_pred) / torch.abs(y.view_as(y_pred))
+ return 100.0 * _torch_median(e)
+
+
+class MedianAbsolutePercentageError(EpochMetric):
+ r"""Calculates the Median Absolute Percentage Error.
+
+ .. math::
+ \text{MdAPE} = 100 \cdot \text{MD}_{j=1,n} \left( \frac{|A_j - P_j|}{|A_j|} \right)
+
+ where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
+
+ .. warning::
+
+ Current implementation stores all input data (output and target) in as tensors before computing a metric.
+ This can potentially lead to a memory error if the input data is larger than available RAM.
+
+ __ https://arxiv.org/abs/1809.03006
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: optional device specification for internal storage.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = MedianAbsolutePercentageError()
+ metric.attach(default_evaluator, 'mape')
+ y_true = torch.tensor([1, 2, 3, 4, 5])
+ y_pred = y_true * 0.75
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['mape'])
+
+ .. testoutput::
+
+ 25.0...
+ """
+
+ def __init__(
+ self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
+ ):
+ super(MedianAbsolutePercentageError, self).__init__(
+ median_absolute_percentage_error_compute_fn, output_transform=output_transform, device=device
+ )
diff --git a/ignite/metrics/regression/median_relative_absolute_error.py b/ignite/metrics/regression/median_relative_absolute_error.py
new file mode 100644
index 00000000000..20b668b0c51
--- /dev/null
+++ b/ignite/metrics/regression/median_relative_absolute_error.py
@@ -0,0 +1,70 @@
+from typing import Callable, Union
+
+import torch
+
+from ignite.metrics.epoch_metric import EpochMetric
+
+from ignite.metrics.regression._base import _torch_median
+
+
+def median_relative_absolute_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
+ e = torch.abs(y.view_as(y_pred) - y_pred) / torch.abs(y.view_as(y_pred) - torch.mean(y))
+ return _torch_median(e)
+
+
+class MedianRelativeAbsoluteError(EpochMetric):
+ r"""Calculates the Median Relative Absolute Error.
+
+ .. math::
+ \text{MdRAE} = \text{MD}_{j=1,n} \left( \frac{|A_j - P_j|}{|A_j - \bar{A}|} \right)
+
+ where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
+
+ .. warning::
+
+ Current implementation stores all input data (output and target) in as tensors before computing a metric.
+ This can potentially lead to a memory error if the input data is larger than available RAM.
+
+ __ https://arxiv.org/abs/1809.03006
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: optional device specification for internal storage.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = MedianRelativeAbsoluteError()
+ metric.attach(default_evaluator, 'mrae')
+ y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
+ y_pred = y_true * 0.75
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['mrae'])
+
+ .. testoutput::
+
+ 0.5...
+ """
+
+ def __init__(
+ self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
+ ):
+ super(MedianRelativeAbsoluteError, self).__init__(
+ median_relative_absolute_error_compute_fn, output_transform=output_transform, device=device
+ )
diff --git a/ignite/metrics/regression/r2_score.py b/ignite/metrics/regression/r2_score.py
new file mode 100644
index 00000000000..f4089a3e2e1
--- /dev/null
+++ b/ignite/metrics/regression/r2_score.py
@@ -0,0 +1,82 @@
+from typing import Tuple
+
+import torch
+
+from ignite.exceptions import NotComputableError
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class R2Score(_BaseRegression):
+ r"""Calculates the R-Squared, the
+ `coefficient of determination `_.
+
+ .. math::
+ R^2 = 1 - \frac{\sum_{j=1}^n(A_j - P_j)^2}{\sum_{j=1}^n(A_j - \bar{A})^2}
+
+ where :math:`A_j` is the ground truth, :math:`P_j` is the predicted value and
+ :math:`\bar{A}` is the mean of the ground truth.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = R2Score()
+ metric.attach(default_evaluator, 'r2')
+ y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
+ y_pred = y_true * 0.75
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['r2'])
+
+ .. testoutput::
+
+ 0.8035...
+
+ .. versionchanged:: 0.4.3
+ Works with DDP.
+ """
+
+ _state_dict_all_req_keys = ("_num_examples", "_sum_of_errors", "_y_sq_sum", "_y_sum")
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._num_examples = 0
+ self._sum_of_errors = torch.tensor(0.0, device=self._device)
+ self._y_sq_sum = torch.tensor(0.0, device=self._device)
+ self._y_sum = torch.tensor(0.0, device=self._device)
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output
+ self._num_examples += y.shape[0]
+ self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).to(self._device)
+
+ self._y_sum += torch.sum(y).to(self._device)
+ self._y_sq_sum += torch.sum(torch.pow(y, 2)).to(self._device)
+
+ @sync_all_reduce("_num_examples", "_sum_of_errors", "_y_sq_sum", "_y_sum")
+ def compute(self) -> float:
+ if self._num_examples == 0:
+ raise NotComputableError("R2Score must have at least one example before it can be computed.")
+ return 1 - self._sum_of_errors.item() / (self._y_sq_sum.item() - (self._y_sum.item() ** 2) / self._num_examples)
diff --git a/ignite/metrics/regression/wave_hedges_distance.py b/ignite/metrics/regression/wave_hedges_distance.py
new file mode 100644
index 00000000000..c226a1eb2a8
--- /dev/null
+++ b/ignite/metrics/regression/wave_hedges_distance.py
@@ -0,0 +1,75 @@
+from typing import Tuple
+
+import torch
+
+from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
+
+from ignite.metrics.regression._base import _BaseRegression
+
+
+class WaveHedgesDistance(_BaseRegression):
+ r"""Calculates the Wave Hedges Distance.
+
+ .. math::
+ \text{WHD} = \sum_{j=1}^n\frac{|A_j - P_j|}{max(A_j, P_j)}
+
+ where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
+
+ More details can be found in `Botchkarev 2018`__.
+
+ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
+
+ __ https://arxiv.org/abs/1809.03006
+
+ Parameters are inherited from ``Metric.__init__``.
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
+ device: specifies which device updates are accumulated on. Setting the
+ metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
+ non-blocking. By default, CPU.
+
+ Examples:
+ To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
+ The output of the engine's ``process_function`` needs to be in format of
+ ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ metric = WaveHedgesDistance()
+ metric.attach(default_evaluator, 'whd')
+ y_true = torch.tensor([0., 1., 2., 3., 4., 5.])
+ y_pred = y_true * 0.75
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['whd'])
+
+ .. testoutput::
+
+ 1.25...
+
+ .. versionchanged:: 0.4.5
+ - Works with DDP.
+ """
+
+ _state_dict_all_req_keys = ("_sum_of_errors",)
+
+ @reinit__is_reduced
+ def reset(self) -> None:
+ self._sum_of_errors = torch.tensor(0.0, device=self._device)
+
+ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
+ y_pred, y = output[0].detach(), output[1].detach()
+ errors = torch.abs(y.view_as(y_pred) - y_pred) / (torch.max(y_pred, y.view_as(y_pred)) + 1e-30)
+ self._sum_of_errors += torch.sum(errors).to(self._device)
+
+ @sync_all_reduce("_sum_of_errors")
+ def compute(self) -> float:
+ return self._sum_of_errors.item()
diff --git a/ignite/metrics/roc_auc.py b/ignite/metrics/roc_auc.py
new file mode 100644
index 00000000000..a4ff51a09a9
--- /dev/null
+++ b/ignite/metrics/roc_auc.py
@@ -0,0 +1,194 @@
+from typing import Any, Callable, cast, Tuple, Union
+
+import torch
+
+from ignite import distributed as idist
+from ignite.exceptions import NotComputableError
+from ignite.metrics.epoch_metric import EpochMetric
+
+
+def roc_auc_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:
+ from sklearn.metrics import roc_auc_score
+
+ y_true = y_targets.cpu().numpy()
+ y_pred = y_preds.cpu().numpy()
+ return roc_auc_score(y_true, y_pred)
+
+
+def roc_auc_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
+ from sklearn.metrics import roc_curve
+
+ y_true = y_targets.cpu().numpy()
+ y_pred = y_preds.cpu().numpy()
+ return roc_curve(y_true, y_pred)
+
+
+class ROC_AUC(EpochMetric):
+ """Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC)
+ accumulating predictions and the ground-truth during an epoch and applying
+ `sklearn.metrics.roc_auc_score `_ .
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ check_compute_fn: Default False. If True, `roc_curve
+ `_ is run on the first batch of data to ensure there are
+ no issues. User will be warned in case there are any issues computing the function.
+ device: optional device specification for internal storage.
+
+ Note:
+
+ ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
+ values. To apply an activation to y_pred, use output_transform as shown below:
+
+ .. code-block:: python
+
+ def sigmoid_output_transform(output):
+ y_pred, y = output
+ y_pred = torch.sigmoid(y_pred)
+ return y_pred, y
+ avg_precision = ROC_AUC(sigmoid_output_transform)
+
+ Examples:
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ roc_auc = ROC_AUC()
+ #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
+ roc_auc.attach(default_evaluator, 'roc_auc')
+ y_pred = torch.tensor([[0.0474], [0.5987], [0.7109], [0.9997]])
+ y_true = torch.tensor([[0], [0], [1], [0]])
+ state = default_evaluator.run([[y_pred, y_true]])
+ print(state.metrics['roc_auc'])
+
+ .. testoutput::
+
+ 0.6666...
+ """
+
+ def __init__(
+ self,
+ output_transform: Callable = lambda x: x,
+ check_compute_fn: bool = False,
+ device: Union[str, torch.device] = torch.device("cpu"),
+ ):
+ try:
+ from sklearn.metrics import roc_auc_score # noqa: F401
+ except ImportError:
+ raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
+
+ super(ROC_AUC, self).__init__(
+ roc_auc_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn, device=device
+ )
+
+
+class RocCurve(EpochMetric):
+ """Compute Receiver operating characteristic (ROC) for binary classification task
+ by accumulating predictions and the ground-truth during an epoch and applying
+ `sklearn.metrics.roc_curve `_ .
+
+ Args:
+ output_transform: a callable that is used to transform the
+ :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
+ form expected by the metric. This can be useful if, for example, you have a multi-output model and
+ you want to compute the metric with respect to one of the outputs.
+ check_compute_fn: Default False. If True, `sklearn.metrics.roc_curve
+ `_ is run on the first batch of data to ensure there are
+ no issues. User will be warned in case there are any issues computing the function.
+ device: optional device specification for internal storage.
+
+ Note:
+ RocCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
+ values. To apply an activation to y_pred, use output_transform as shown below:
+
+ .. code-block:: python
+
+ def sigmoid_output_transform(output):
+ y_pred, y = output
+ y_pred = torch.sigmoid(y_pred)
+ return y_pred, y
+ avg_precision = RocCurve(sigmoid_output_transform)
+
+ Examples:
+
+ .. include:: defaults.rst
+ :start-after: :orphan:
+
+ .. testcode::
+
+ roc_auc = RocCurve()
+ #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
+ roc_auc.attach(default_evaluator, 'roc_auc')
+ y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
+ y_true = torch.tensor([0, 0, 1, 0])
+ state = default_evaluator.run([[y_pred, y_true]])
+ print("FPR", [round(i, 3) for i in state.metrics['roc_auc'][0].tolist()])
+ print("TPR", [round(i, 3) for i in state.metrics['roc_auc'][1].tolist()])
+ print("Thresholds", [round(i, 3) for i in state.metrics['roc_auc'][2].tolist()])
+
+ .. testoutput::
+
+ FPR [0.0, 0.333, 0.333, 1.0]
+ TPR [0.0, 0.0, 1.0, 1.0]
+ Thresholds [inf, 1.0, 0.711, 0.047]
+
+ .. versionchanged:: 0.4.11
+ added `device` argument
+ """
+
+ def __init__(
+ self,
+ output_transform: Callable = lambda x: x,
+ check_compute_fn: bool = False,
+ device: Union[str, torch.device] = torch.device("cpu"),
+ ) -> None:
+ try:
+ from sklearn.metrics import roc_curve # noqa: F401
+ except ImportError:
+ raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
+
+ super(RocCurve, self).__init__(
+ roc_auc_curve_compute_fn, # type: ignore[arg-type]
+ output_transform=output_transform,
+ check_compute_fn=check_compute_fn,
+ device=device,
+ )
+
+ def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # type: ignore[override]
+ if len(self._predictions) < 1 or len(self._targets) < 1:
+ raise NotComputableError("RocCurve must have at least one example before it can be computed.")
+
+ _prediction_tensor = torch.cat(self._predictions, dim=0)
+ _target_tensor = torch.cat(self._targets, dim=0)
+
+ ws = idist.get_world_size()
+ if ws > 1:
+ # All gather across all processes
+ _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
+ _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
+
+ if idist.get_rank() == 0:
+ # Run compute_fn on zero rank only
+ fpr, tpr, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
+ fpr = torch.tensor(fpr, device=_prediction_tensor.device)
+ tpr = torch.tensor(tpr, device=_prediction_tensor.device)
+ thresholds = torch.tensor(thresholds, device=_prediction_tensor.device)
+ else:
+ fpr, tpr, thresholds = None, None, None
+
+ if ws > 1:
+ # broadcast result to all processes
+ fpr = idist.broadcast(fpr, src=0, safe_mode=True)
+ tpr = idist.broadcast(tpr, src=0, safe_mode=True)
+ thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
+
+ return fpr, tpr, thresholds
diff --git a/tests/ignite/contrib/handlers/test_warnings_of_deprecation.py b/tests/ignite/contrib/handlers/test_warnings_of_deprecation_of_handlers.py
similarity index 100%
rename from tests/ignite/contrib/handlers/test_warnings_of_deprecation.py
rename to tests/ignite/contrib/handlers/test_warnings_of_deprecation_of_handlers.py
diff --git a/tests/ignite/contrib/metrics/regression/__init__.py b/tests/ignite/contrib/metrics/regression/__init__.py
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/tests/ignite/contrib/metrics/test_warnings_of_deprecation_of_metrics.py b/tests/ignite/contrib/metrics/test_warnings_of_deprecation_of_metrics.py
new file mode 100644
index 00000000000..45b4667d22c
--- /dev/null
+++ b/tests/ignite/contrib/metrics/test_warnings_of_deprecation_of_metrics.py
@@ -0,0 +1,35 @@
+from importlib import __import__
+
+import pytest
+
+
+@pytest.mark.parametrize(
+ "log_module,fromlist",
+ [
+ ("average_precision", ["AveragePrecision"]),
+ ("cohen_kappa", ["CohenKappa"]),
+ ("gpu_info", ["GpuInfo"]),
+ ("precision_recall_curve", ["PrecisionRecallCurve"]),
+ ("roc_auc", ["ROC_AUC", "RocCurve"]),
+ ("regression.canberra_metric", ["CanberraMetric"]),
+ ("regression.fractional_absolute_error", ["FractionalAbsoluteError"]),
+ ("regression.fractional_bias", ["FractionalBias"]),
+ ("regression.geometric_mean_absolute_error", ["GeometricMeanAbsoluteError"]),
+ ("regression.geometric_mean_relative_absolute_error", ["GeometricMeanRelativeAbsoluteError"]),
+ ("regression.manhattan_distance", ["ManhattanDistance"]),
+ ("regression.maximum_absolute_error", ["MaximumAbsoluteError"]),
+ ("regression.mean_absolute_relative_error", ["MeanAbsoluteRelativeError"]),
+ ("regression.mean_error", ["MeanError"]),
+ ("regression.mean_normalized_bias", ["MeanNormalizedBias"]),
+ ("regression.median_absolute_error", ["MedianAbsoluteError"]),
+ ("regression.median_absolute_percentage_error", ["MedianAbsolutePercentageError"]),
+ ("regression.median_relative_absolute_error", ["MedianRelativeAbsoluteError"]),
+ ("regression.r2_score", ["R2Score"]),
+ ("regression.wave_hedges_distance", ["WaveHedgesDistance"]),
+ ],
+)
+def test_imports(log_module, fromlist):
+ with pytest.warns(DeprecationWarning, match="will be removed in version 0.6.0"):
+ imported = __import__(f"ignite.contrib.metrics.{log_module}", globals(), locals(), fromlist)
+ for attr in fromlist:
+ getattr(imported, attr)
diff --git a/tests/ignite/contrib/metrics/__init__.py b/tests/ignite/metrics/regression/__init__.py
similarity index 100%
rename from tests/ignite/contrib/metrics/__init__.py
rename to tests/ignite/metrics/regression/__init__.py
diff --git a/tests/ignite/contrib/metrics/regression/test__base.py b/tests/ignite/metrics/regression/test__base.py
similarity index 96%
rename from tests/ignite/contrib/metrics/regression/test__base.py
rename to tests/ignite/metrics/regression/test__base.py
index ca7c71a79e6..8caaf0f9b51 100644
--- a/tests/ignite/contrib/metrics/regression/test__base.py
+++ b/tests/ignite/metrics/regression/test__base.py
@@ -7,7 +7,7 @@
import ignite.distributed as idist
-from ignite.contrib.metrics.regression._base import _BaseRegression, _torch_median
+from ignite.metrics.regression._base import _BaseRegression, _torch_median
def test_base_regression_shapes():
diff --git a/tests/ignite/contrib/metrics/regression/test_canberra_metric.py b/tests/ignite/metrics/regression/test_canberra_metric.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_canberra_metric.py
rename to tests/ignite/metrics/regression/test_canberra_metric.py
index 93e2546aa82..eaaee884768 100644
--- a/tests/ignite/contrib/metrics/regression/test_canberra_metric.py
+++ b/tests/ignite/metrics/regression/test_canberra_metric.py
@@ -6,8 +6,8 @@
from sklearn.metrics import DistanceMetric
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import CanberraMetric
from ignite.engine import Engine
+from ignite.metrics.regression import CanberraMetric
def test_wrong_input_shapes():
diff --git a/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py b/tests/ignite/metrics/regression/test_fractional_absolute_error.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py
rename to tests/ignite/metrics/regression/test_fractional_absolute_error.py
index ef9784697c5..c1c3b080576 100644
--- a/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py
+++ b/tests/ignite/metrics/regression/test_fractional_absolute_error.py
@@ -5,9 +5,9 @@
import torch
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import FractionalAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import FractionalAbsoluteError
def test_zero_sample():
diff --git a/tests/ignite/contrib/metrics/regression/test_fractional_bias.py b/tests/ignite/metrics/regression/test_fractional_bias.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_fractional_bias.py
rename to tests/ignite/metrics/regression/test_fractional_bias.py
index 105e7fe4aac..bf78d4870d5 100644
--- a/tests/ignite/contrib/metrics/regression/test_fractional_bias.py
+++ b/tests/ignite/metrics/regression/test_fractional_bias.py
@@ -5,9 +5,9 @@
import torch
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import FractionalBias
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import FractionalBias
def test_zero_sample():
diff --git a/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py b/tests/ignite/metrics/regression/test_geometric_mean_absolute_error.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py
rename to tests/ignite/metrics/regression/test_geometric_mean_absolute_error.py
index e9d6e42ccf7..05f023691a5 100644
--- a/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py
+++ b/tests/ignite/metrics/regression/test_geometric_mean_absolute_error.py
@@ -5,9 +5,9 @@
import torch
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import GeometricMeanAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import GeometricMeanAbsoluteError
def test_zero_sample():
diff --git a/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py b/tests/ignite/metrics/regression/test_geometric_mean_relative_absolute_error.py
similarity index 98%
rename from tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py
rename to tests/ignite/metrics/regression/test_geometric_mean_relative_absolute_error.py
index ccc7c28de2a..9f7b14422e4 100644
--- a/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py
+++ b/tests/ignite/metrics/regression/test_geometric_mean_relative_absolute_error.py
@@ -5,9 +5,9 @@
import torch
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import GeometricMeanRelativeAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import GeometricMeanRelativeAbsoluteError
def test_zero_sample():
diff --git a/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py b/tests/ignite/metrics/regression/test_manhattan_distance.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_manhattan_distance.py
rename to tests/ignite/metrics/regression/test_manhattan_distance.py
index 5b5090d9080..50bdf321164 100644
--- a/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py
+++ b/tests/ignite/metrics/regression/test_manhattan_distance.py
@@ -6,8 +6,8 @@
from sklearn.metrics import DistanceMetric
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import ManhattanDistance
from ignite.engine import Engine
+from ignite.metrics.regression import ManhattanDistance
def test_wrong_input_shapes():
diff --git a/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py b/tests/ignite/metrics/regression/test_maximum_absolute_error.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py
rename to tests/ignite/metrics/regression/test_maximum_absolute_error.py
index fe6ba11bb00..e5e0fb4369f 100644
--- a/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py
+++ b/tests/ignite/metrics/regression/test_maximum_absolute_error.py
@@ -5,9 +5,9 @@
import torch
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import MaximumAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import MaximumAbsoluteError
def test_zero_sample():
diff --git a/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py b/tests/ignite/metrics/regression/test_mean_absolute_relative_error.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py
rename to tests/ignite/metrics/regression/test_mean_absolute_relative_error.py
index 81b1fbbbe05..25b39860ed5 100644
--- a/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py
+++ b/tests/ignite/metrics/regression/test_mean_absolute_relative_error.py
@@ -6,9 +6,9 @@
from pytest import approx, raises
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import MeanAbsoluteRelativeError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import MeanAbsoluteRelativeError
def test_wrong_input_shapes():
diff --git a/tests/ignite/contrib/metrics/regression/test_mean_error.py b/tests/ignite/metrics/regression/test_mean_error.py
similarity index 98%
rename from tests/ignite/contrib/metrics/regression/test_mean_error.py
rename to tests/ignite/metrics/regression/test_mean_error.py
index 39f90f01183..e0e7fc97560 100644
--- a/tests/ignite/contrib/metrics/regression/test_mean_error.py
+++ b/tests/ignite/metrics/regression/test_mean_error.py
@@ -5,9 +5,9 @@
import torch
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import MeanError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import MeanError
def test_zero_sample():
@@ -120,7 +120,7 @@ def _test(metric_device):
np_len = len(np_y_pred)
np_ans = np_sum / np_len
- assert m.compute() == pytest.approx(np_ans)
+ assert m.compute() == pytest.approx(np_ans, rel=1e-5)
for i in range(3):
torch.manual_seed(10 + rank + i)
diff --git a/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py b/tests/ignite/metrics/regression/test_mean_normalized_bias.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py
rename to tests/ignite/metrics/regression/test_mean_normalized_bias.py
index 7177b01e8c1..66129ee7d59 100644
--- a/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py
+++ b/tests/ignite/metrics/regression/test_mean_normalized_bias.py
@@ -5,9 +5,9 @@
import torch
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import MeanNormalizedBias
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import MeanNormalizedBias
def test_zero_sample():
diff --git a/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py b/tests/ignite/metrics/regression/test_median_absolute_error.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_median_absolute_error.py
rename to tests/ignite/metrics/regression/test_median_absolute_error.py
index 615d90fbeb1..7ea373e46a7 100644
--- a/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py
+++ b/tests/ignite/metrics/regression/test_median_absolute_error.py
@@ -5,9 +5,9 @@
import torch
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import MedianAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import MedianAbsoluteError
def test_zero_sample():
diff --git a/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py b/tests/ignite/metrics/regression/test_median_absolute_percentage_error.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py
rename to tests/ignite/metrics/regression/test_median_absolute_percentage_error.py
index 2973a28f193..fa8c549a507 100644
--- a/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py
+++ b/tests/ignite/metrics/regression/test_median_absolute_percentage_error.py
@@ -5,9 +5,9 @@
import torch
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import MedianAbsolutePercentageError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import MedianAbsolutePercentageError
def test_zero_sample():
diff --git a/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py b/tests/ignite/metrics/regression/test_median_relative_absolute_error.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py
rename to tests/ignite/metrics/regression/test_median_relative_absolute_error.py
index a43c46c307e..b2574632c15 100644
--- a/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py
+++ b/tests/ignite/metrics/regression/test_median_relative_absolute_error.py
@@ -5,9 +5,9 @@
import torch
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import MedianRelativeAbsoluteError
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import MedianRelativeAbsoluteError
def test_zero_sample():
diff --git a/tests/ignite/contrib/metrics/regression/test_r2_score.py b/tests/ignite/metrics/regression/test_r2_score.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_r2_score.py
rename to tests/ignite/metrics/regression/test_r2_score.py
index 86113c4b532..62ad0e14938 100644
--- a/tests/ignite/contrib/metrics/regression/test_r2_score.py
+++ b/tests/ignite/metrics/regression/test_r2_score.py
@@ -6,9 +6,9 @@
from sklearn.metrics import r2_score
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import R2Score
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics.regression import R2Score
def test_zero_sample():
diff --git a/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py b/tests/ignite/metrics/regression/test_wave_hedges_distance.py
similarity index 99%
rename from tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py
rename to tests/ignite/metrics/regression/test_wave_hedges_distance.py
index bb615adb086..376dd70b8c2 100644
--- a/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py
+++ b/tests/ignite/metrics/regression/test_wave_hedges_distance.py
@@ -5,8 +5,8 @@
import torch
import ignite.distributed as idist
-from ignite.contrib.metrics.regression import WaveHedgesDistance
from ignite.engine import Engine
+from ignite.metrics.regression import WaveHedgesDistance
def test_wrong_input_shapes():
diff --git a/tests/ignite/contrib/metrics/test_average_precision.py b/tests/ignite/metrics/test_average_precision.py
similarity index 99%
rename from tests/ignite/contrib/metrics/test_average_precision.py
rename to tests/ignite/metrics/test_average_precision.py
index 7a943ae855e..ae9a80a35d1 100644
--- a/tests/ignite/contrib/metrics/test_average_precision.py
+++ b/tests/ignite/metrics/test_average_precision.py
@@ -7,9 +7,9 @@
from sklearn.metrics import average_precision_score
import ignite.distributed as idist
-from ignite.contrib.metrics import AveragePrecision
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics import AveragePrecision
torch.manual_seed(12)
diff --git a/tests/ignite/contrib/metrics/test_cohen_kappa.py b/tests/ignite/metrics/test_cohen_kappa.py
similarity index 99%
rename from tests/ignite/contrib/metrics/test_cohen_kappa.py
rename to tests/ignite/metrics/test_cohen_kappa.py
index fa73a84cdfa..beb25585dc7 100644
--- a/tests/ignite/contrib/metrics/test_cohen_kappa.py
+++ b/tests/ignite/metrics/test_cohen_kappa.py
@@ -7,9 +7,9 @@
from sklearn.metrics import cohen_kappa_score
import ignite.distributed as idist
-from ignite.contrib.metrics import CohenKappa
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics import CohenKappa
torch.manual_seed(12)
diff --git a/tests/ignite/contrib/metrics/test_gpu_info.py b/tests/ignite/metrics/test_gpu_info.py
similarity index 97%
rename from tests/ignite/contrib/metrics/test_gpu_info.py
rename to tests/ignite/metrics/test_gpu_info.py
index cdfbc20fe5a..c2a790e6613 100644
--- a/tests/ignite/contrib/metrics/test_gpu_info.py
+++ b/tests/ignite/metrics/test_gpu_info.py
@@ -3,9 +3,10 @@
import pytest
import torch
-from ignite.contrib.metrics import GpuInfo
from ignite.engine import Engine, State
+from ignite.metrics import GpuInfo
+
def test_no_pynvml_package():
with patch.dict("sys.modules", {"pynvml.smi": None}):
@@ -93,7 +94,7 @@ def getInstance():
@pytest.fixture
def mock_gpu_is_available():
- with patch("ignite.contrib.metrics.gpu_info.torch.cuda") as mock_cuda:
+ with patch("ignite.metrics.gpu_info.torch.cuda") as mock_cuda:
mock_cuda.is_available.return_value = True
yield mock_cuda
diff --git a/tests/ignite/contrib/metrics/test_precision_recall_curve.py b/tests/ignite/metrics/test_precision_recall_curve.py
similarity index 99%
rename from tests/ignite/contrib/metrics/test_precision_recall_curve.py
rename to tests/ignite/metrics/test_precision_recall_curve.py
index 1eaf8ddc8b3..bc7770e9e2b 100644
--- a/tests/ignite/contrib/metrics/test_precision_recall_curve.py
+++ b/tests/ignite/metrics/test_precision_recall_curve.py
@@ -9,9 +9,9 @@
from sklearn.metrics import precision_recall_curve
import ignite.distributed as idist
-from ignite.contrib.metrics.precision_recall_curve import PrecisionRecallCurve
from ignite.engine import Engine
from ignite.metrics.epoch_metric import EpochMetricWarning
+from ignite.metrics.precision_recall_curve import PrecisionRecallCurve
@pytest.fixture()
diff --git a/tests/ignite/contrib/metrics/test_roc_auc.py b/tests/ignite/metrics/test_roc_auc.py
similarity index 99%
rename from tests/ignite/contrib/metrics/test_roc_auc.py
rename to tests/ignite/metrics/test_roc_auc.py
index dcc14aaba30..1e60c480ca1 100644
--- a/tests/ignite/contrib/metrics/test_roc_auc.py
+++ b/tests/ignite/metrics/test_roc_auc.py
@@ -7,9 +7,9 @@
from sklearn.metrics import roc_auc_score
import ignite.distributed as idist
-from ignite.contrib.metrics import ROC_AUC
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
+from ignite.metrics import ROC_AUC
from ignite.metrics.epoch_metric import EpochMetricWarning
torch.manual_seed(12)
diff --git a/tests/ignite/contrib/metrics/test_roc_curve.py b/tests/ignite/metrics/test_roc_curve.py
similarity index 99%
rename from tests/ignite/contrib/metrics/test_roc_curve.py
rename to tests/ignite/metrics/test_roc_curve.py
index ba243b1712d..39198497a6f 100644
--- a/tests/ignite/contrib/metrics/test_roc_curve.py
+++ b/tests/ignite/metrics/test_roc_curve.py
@@ -7,10 +7,10 @@
from sklearn.metrics import roc_curve
from ignite import distributed as idist
-from ignite.contrib.metrics.roc_auc import RocCurve
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics.epoch_metric import EpochMetricWarning
+from ignite.metrics.roc_auc import RocCurve
def test_wrong_setup():