diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..bd5db3083 --- /dev/null +++ b/404.html @@ -0,0 +1,14 @@ + + + + + +Page Not Found | CyclOps + + + + + +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

+ + \ No newline at end of file diff --git a/api/.buildinfo b/api/.buildinfo new file mode 100644 index 000000000..969bee2be --- /dev/null +++ b/api/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 67b59008c58998f711ccce32ad07bc46 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/api/_modules/cyclops/data/aggregate.html b/api/_modules/cyclops/data/aggregate.html new file mode 100644 index 000000000..e0be3dbb1 --- /dev/null +++ b/api/_modules/cyclops/data/aggregate.html @@ -0,0 +1,1167 @@ + + + + + + + + + + + + + + + + cyclops.data.aggregate - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.data.aggregate

+"""Aggregation functions."""
+
+import logging
+from collections import OrderedDict
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+import pandas as pd
+from sklearn.base import TransformerMixin
+
+from cyclops.data.clean import dropna_rows
+from cyclops.data.constants import ALL, FIRST, LAST
+from cyclops.data.df.vectorized import Vectorized
+from cyclops.data.impute import AggregatedImputer, numpy_2d_ffill
+from cyclops.data.utils import has_columns, is_timestamp_series
+from cyclops.utils.common import to_list, to_list_optional
+from cyclops.utils.log import setup_logging
+from cyclops.utils.profile import time_function
+
+
+# Logging.
+LOGGER = logging.getLogger(__name__)
+setup_logging(print_level="INFO", logger=LOGGER)
+
+
+RESTRICT_TIMESTAMP = "restrict_timestamp"
+WINDOW_START_TIMESTAMP = "window_start_timestamp"
+WINDOW_STOP_TIMESTAMP = "window_stop_timestamp"
+START_TIMESTEP = "start_timestep"
+TIMESTEP = "timestep"
+
+
+
+[docs] +class Aggregator(TransformerMixin): # type: ignore + """Equal-spaced aggregation, or binning, of time-series data. + + Computing aggregation metadata is expensive and should be done sparingly. + + Attributes + ---------- + aggfuncs: dict + Aggregation functions mapped from column to aggregation type. + Each value is either function or string, e.g., {col_name: MEAN}. + If a function, it should accept a series and return a single value. + timestamp_col: str + Name of the timestamp column in the data provided. + time_by: list of str + Name of columns by which to group to determine the bucket times. + agg_by: list of str + Name of columns by which to group to perform aggregation. + timestep_size: float + Time in hours for a single timestep, or bin. + window_duration: float or None + Time in hours for the aggregation window. If None, the latest timestamp + for each time_by group is used as the window stop time. + window_start_time: pd.DataFrame or None + An optionally provided window start time for each time_by group. + window_stop_time: pd.DataFrame or None + An optionally provided window stop time for each time_by group. + agg_meta_for: list of str or None + Columns for which to compute aggregation metadata. + window_times: pd.DataFrame or None + The start/stop time windows used to aggregate the data. + imputer: AggregatedImputer or None + An imputer to perform aggregation. + num_timesteps: int or None + The number of timesteps in the aggregation window. + + Notes + ----- + aggfuncs is a dictionary of aggregation functions mapped from column to + aggregation type. Each value is either function or string, e.g., + {col_name: MEAN}. If a function, it should accept a series and return a + single value. If a string, it should be one of the following: + + - ``mean`` + - ``median`` + - ``std`` + - ``var`` + - ``min`` + - ``max`` + - ``count`` + - ``sum`` + + """ + +
+[docs] + def __init__( + self, + aggfuncs: Dict[str, Union[str, Callable[[pd.Series], Any]]], + timestamp_col: str, + time_by: Union[str, List[str]], + agg_by: Union[str, List[str]], + timestep_size: Optional[int] = None, + window_duration: Optional[int] = None, + window_start_time: Optional[pd.DataFrame] = None, + window_stop_time: Optional[pd.DataFrame] = None, + imputer: Optional[AggregatedImputer] = None, + agg_meta_for: Optional[List[str]] = None, + ): + """Init.""" + if agg_meta_for is not None: + LOGGER.warning("Calculation of aggregation metadata slows aggregation.") + self.aggfuncs = self._process_aggfuncs(aggfuncs) + self.timestamp_col = timestamp_col + self.time_by = to_list(time_by) + self.agg_by = to_list(agg_by) + self.agg_meta_for = to_list_optional(agg_meta_for) + self.timestep_size = timestep_size + self.window_duration = window_duration + self.window_start_time = window_start_time + self.window_stop_time = window_stop_time + self.window_times = pd.DataFrame() # Calculated when given the data + self.imputer = imputer + # Parameter checking + if self.agg_meta_for is not None and not set(self.agg_meta_for).issubset( + set(self.aggfuncs), + ): + raise ValueError( + "Cannot compute meta for a column not being aggregated.", + ) + if window_duration is not None and timestep_size is not None: + divided = window_duration / timestep_size + self.num_timesteps = int(divided) + if divided != int(divided): + raise ValueError("Window duration be divisible by bucket size.") + elif timestep_size is not None: + self.num_timesteps = None # type: ignore + else: + self.num_timesteps = 1
+ + + def _process_aggfuncs( + self, + aggfuncs: Dict[str, Union[str, Callable[[pd.Series], Any]]], + ) -> Dict[str, Any]: + """Process aggregation functions for respective columns. + + Given a dict of values as functions or strings, convert a string to an + aggfunc if recognized. Otherwise, simply return the functions. + + Returns + ------- + dict + The processed aggregation function dictionary. + + """ + for _, aggfunc in aggfuncs.items(): + if isinstance(aggfunc, str) or callable(aggfunc): + pass + else: + raise ValueError("Aggfunc must be a string or callable.") + + return OrderedDict(aggfuncs) + + def _check_start_stop_window_ts(self, window_time: pd.DataFrame) -> None: + """Check whether a window start/stop time have the correct format. + + Parameters + ---------- + pandas.DataFrame + Window start/stop time. + + """ + if not window_time.index.names == self.time_by: + raise ValueError(f"Window start/stop times must have index: {self.time_by}") + has_columns( + window_time, + [RESTRICT_TIMESTAMP], + exactly=True, + raise_error=True, + ) + + def _restrict_by_timestamp(self, data: pd.DataFrame) -> pd.DataFrame: + """Restrict events by the window start/stop times. + + Parameters + ---------- + data: pandas.DataFrame + Input data. + + Returns + ------- + pandas.DataFrame + The appropriately restricted data. + + """ + data = data.merge(self.window_times, on=self.time_by, how="left") + cond = (data[self.timestamp_col] >= data[WINDOW_START_TIMESTAMP]) & ( + data[self.timestamp_col] < data[WINDOW_STOP_TIMESTAMP] + ) + # Keep if no match was made (i.e., no restriction performed) + cond = cond | (data[self.timestamp_col].isnull()) + return data[cond] + + def _use_provided_window( + self, + window_time: pd.DataFrame, + default_time: pd.DataFrame, + warning_args: Tuple[str, str], + ) -> pd.DataFrame: + """Process a window start/stop time. + + Parameters + ---------- + window_time: pandas.DataFrame + The provided window start/stop time. + default_time: pandas.DataFrame + A default window start/stop time if the provided version is missing values. + warning_args: tuple + Tuple of strings used to format a warning message. + + Returns + ------- + The processed provided window start/stop time. + + """ + self._check_start_stop_window_ts(window_time) + index_missing = default_time.index.difference(window_time.index) + if len(index_missing) > 0: + LOGGER.warning( + ( + "Not all time_by groups have a specified window %s time. " + "Defaulting missing to %s time." + ), + *warning_args, + ) + # Default non-existent to earliest time. + window_time = default_time.join(window_time) + inds = window_time[RESTRICT_TIMESTAMP].isna() + window_time[RESTRICT_TIMESTAMP][inds] = window_time[self.timestamp_col][ + inds + ] + window_time = window_time.drop(self.timestamp_col, axis=1) + + return window_time + + def _compute_window_start( + self, + data: pd.DataFrame, + window_start_time: Optional[pd.DataFrame] = None, + ) -> pd.DataFrame: + """Compute the start timestamp for each time_by window. + + Parameters + ---------- + data: pandas.DataFrame + Data before aggregation. + window_start_time: pd.DataFrame, optional + An optionally provided window start time. + + Returns + ------- + pandas.DataFrame + Start timestamps for each time_by window. + + """ + # Take the earliest timestamp for each time_by group + earliest_time = ( + data[self.time_by + [self.timestamp_col]] + .groupby(self.time_by, sort=False) + .agg({self.timestamp_col: "min"}) + ) + if window_start_time is None: + # Use earliest times + earliest_time = earliest_time.rename( + {self.timestamp_col: RESTRICT_TIMESTAMP}, + axis=1, + ) + window_start_time = earliest_time + else: + # Use provided start - with earliest times acting as default + window_start_time = self._use_provided_window( + window_start_time, + earliest_time, + ("start", "earliest"), + ) + + self._check_start_stop_window_ts(window_start_time) + + return window_start_time + + def _compute_window_stop( + self, + data: pd.DataFrame, + window_start_time: pd.DataFrame, + window_stop_time: Optional[pd.DataFrame] = None, + ) -> pd.DataFrame: + """Compute the stop timestamp for each time_by window. + + Parameters + ---------- + data: pandas.DataFrame + Data before aggregation. + window_start_time: pd.DataFrame + The window start time, which is necessary to compute the + stop time when window_duration is set. + window_stop_time: pd.DataFrame, optional + An optionally provided window stop time. + + Returns + ------- + pandas.DataFrame + Stop timestamps for each time_by group. + + """ + # Use provided stop + if window_stop_time is not None and self.window_duration is not None: + raise ValueError( + "Cannot provide window_stop_time if window_duration was set.", + ) + if self.window_duration is not None: + # Use window duration to compute the stop times for each group + window_stop_time = window_start_time.copy() + window_stop_time[RESTRICT_TIMESTAMP] += pd.Timedelta( + hours=self.window_duration, + ) + else: + # Take the latest timestamp for each time_by group + latest_time = ( + data[self.time_by + [self.timestamp_col]] + .groupby(self.time_by, sort=False) + .agg({self.timestamp_col: "max"}) + ) + if window_stop_time is None: + # Use latest times + latest_time = latest_time.rename( + {self.timestamp_col: RESTRICT_TIMESTAMP}, + axis=1, + ) + window_stop_time = latest_time + else: + # Use provided stop - with latest times acting as default + window_stop_time = self._use_provided_window( + window_stop_time, + latest_time, + ("stop", "latest"), + ) + self._check_start_stop_window_ts(window_stop_time) + + return window_stop_time + + def _compute_window_times( + self, + data: pd.DataFrame, + ) -> pd.DataFrame: + """Compute the start/stop timestamps for each time_by window. + + Parameters + ---------- + data: pandas.DataFrame + Data before aggregation. + + Returns + ------- + pandas.DataFrame + The start/stop timestamps for each time_by window. + + """ + # Compute window start time + window_start_time = self._compute_window_start( + data, + window_start_time=self.window_start_time, + ) + # Compute window stop time + window_stop_time = self._compute_window_stop( + data, + window_start_time, + window_stop_time=self.window_stop_time, + ) + # Combine and compute additional information + window_start_time = window_start_time.rename( + {RESTRICT_TIMESTAMP: WINDOW_START_TIMESTAMP}, + axis=1, + ) + window_stop_time = window_stop_time.rename( + {RESTRICT_TIMESTAMP: WINDOW_STOP_TIMESTAMP}, + axis=1, + ) + + return window_start_time.join(window_stop_time) + + def _compute_timestep(self, group: pd.DataFrame) -> pd.DataFrame: + """Compute which timestep, or bin, each occurrence falls into. + + Parameters + ---------- + group: pandas.DataFrame + A time_by group. + + Returns + ------- + pandas.DataFrame + The inputted group with an additional TIMESTEP column. + + """ + loc = tuple(group[self.time_by].values[0]) + start = self.window_times.loc[loc][WINDOW_START_TIMESTAMP] + group[TIMESTEP] = (group[self.timestamp_col] - start) / pd.Timedelta( + hours=self.timestep_size, + ) + group[TIMESTEP] = group[TIMESTEP].astype("int") + + return group + + def _compute_agg_meta(self, group: pd.DataFrame) -> pd.DataFrame: + """Compute the aggregation metadata for an agg_by group. + + Parameters + ---------- + group: pandas.DataFrame + An agg_by group. + + Returns + ------- + pandas.DataFrame + The aggergation metadata information. + + """ + # Note: .counts() returns the number of non-null values in the Series. + meta = group.agg( + { + col: [lambda x: x.count(), len] + for col in self.agg_meta_for # type: ignore + }, + dropna=False, + ) + keep = [] + for col in self.agg_meta_for: # type: ignore + meta[col + "_count"] = meta[(col, "len")] + meta[col + "_null_fraction"] = 1 - ( + meta[(col, "<lambda_0>")] / meta[(col, "len")] + ) + keep.extend([col + "_count", col + "_null_fraction"]) + meta = meta[keep] + meta.columns = meta.columns.droplevel(1) + + return meta + + def _compute_aggregation(self, group: pd.DataFrame) -> pd.DataFrame: + """Compute the aggregation for an agg_by group by timestep. + + Parameters + ---------- + group: pandas.DataFrame + An agg_by group. + + Returns + ------- + pandas.DataFrame + The aggregated group. + + """ + group = group.groupby(TIMESTEP, sort=False, dropna=False) + # Compute aggregation meta + if self.agg_meta_for is not None: + agg_meta = self._compute_agg_meta(group) + else: + agg_meta = None + + if self.imputer is not None and self.imputer.intra is not None: + group = self.imputer.intra(group) + AggregatedImputer(group) + group = group.agg(self.aggfuncs) + # Include aggregation meta + if agg_meta is not None: + group = group.join(agg_meta) + + return group + + def _aggregate( + self, + data: pd.DataFrame, + include_timestep_start: bool = True, + ) -> pd.DataFrame: + # Get the timestep according to the timestep for each event + data_with_timesteps = data.groupby( + self.time_by, + sort=False, + group_keys=False, + ).apply(self._compute_timestep) + # Aggregate + has_inter_imputer = True + if self.imputer is None or self.imputer.intra is None: + has_inter_imputer = False + if self.agg_meta_for is None and not has_inter_imputer: + # EFFICIENT - Can perform if no imputation or metadata calculation is done + grouped = data_with_timesteps.groupby(self.agg_by + [TIMESTEP], sort=False) + aggregated = grouped.agg(self.aggfuncs) + else: + # INEFFICIENT - Perform with a custom function to allow added functionality + grouped = data_with_timesteps.groupby(self.agg_by, sort=False) + aggregated = grouped.apply(self._compute_aggregation) + if not include_timestep_start: + return aggregated + # Get the start timestamp for each timestep + aggregated = aggregated.reset_index().set_index(self.time_by) + aggregated = aggregated.join(self.window_times[WINDOW_START_TIMESTAMP]) + aggregated[START_TIMESTEP] = aggregated[ + WINDOW_START_TIMESTAMP + ] + pd.to_timedelta( + aggregated[TIMESTEP] * self.timestep_size, + unit="h", + ) + aggregated = aggregated.drop(WINDOW_START_TIMESTAMP, axis=1) + aggregated = aggregated.reset_index() + + return aggregated.set_index(self.agg_by + [TIMESTEP]) + + @time_function + def vectorize(self, aggregated: pd.DataFrame) -> Vectorized: + """Vectorize aggregated data. + + Parameters + ---------- + aggregated: pandas.DataFrame + Aggregated data. + + Returns + ------- + numpy.ndarray + Vectorized aggregated data of shape: + (# of aggfuncs, *# of unique in each agg_by, window_duration/timestep_size) + + """ + if self.window_duration is None: + raise NotImplementedError( + "Cannot currently vectorize data aggregated with no window duration.", + ) + if self.timestep_size is None: + raise NotImplementedError( + "Cannot currently vectorize data aggregated with no timestep size.", + ) + num_timesteps = int(self.window_duration / self.timestep_size) + # Parameter checking + has_columns(aggregated, list(self.aggfuncs.keys()), raise_error=True) + if not aggregated.index.names == self.agg_by + [TIMESTEP]: + raise ValueError(f"Index must be: {self.agg_by + [TIMESTEP]}.") + + # Reindex to add missing groups/timesteps + index = self.agg_by + [TIMESTEP] + aggregated = aggregated.reset_index().set_index(index) + idx = pd.MultiIndex.from_product( + [aggregated.index.levels[i] for i in range(len(self.agg_by))] + + [range(num_timesteps)], + names=index, + ) + vectorized = aggregated.reindex(idx) + # Calculate new shape and indexes + shape = [ + len(vectorized.index.levels[i]) for i in range(len(vectorized.index.levels)) + ] + indexes = [list(self.aggfuncs.keys())] + indexes.extend([ind.values for ind in vectorized.index.levels]) + # Reshape and vectorize + vectorized = np.stack( + [vectorized[aggfunc].values.reshape(shape) for aggfunc in self.aggfuncs], + ) + + return Vectorized( + data=vectorized, + indexes=indexes, + axis_names=["aggfuncs"] + self.agg_by + [TIMESTEP], + ) + +
+[docs] + def fit( + self, + data: pd.DataFrame, + ) -> None: + """Fit the aggregator. + + Parameters + ---------- + data: pandas.DataFrame + Input data. + + """ + # Parameter checking + if not isinstance(data, pd.DataFrame): + raise ValueError("Data to aggregate must be a DataFrame.") + self.window_times = self._compute_window_times( + data, + )
+ + +
+[docs] + def transform( + self, + data: pd.DataFrame, + y: None = None, + include_timestep_start: bool = True, + ) -> pd.DataFrame: + """Transform the data by aggregating. + + Parameters + ---------- + data: pandas.DataFrame + Input data. + y: None + Placeholder for sklearn compatibility. + include_timestep_start: bool, default = True + Whether to include the window start timestamps for each timestep. + + Returns + ------- + pandas.DataFrame + The aggregated data. + + """ + has_columns( + data, + list(set([self.timestamp_col] + self.time_by + self.agg_by)), + raise_error=True, + ) + if has_columns(data, TIMESTEP): + raise ValueError(f"Input data cannot have a column called {TIMESTEP}.") + # Ensure the timestamp column is a timestamp. Drop null times (NaT). + is_timestamp_series(data[self.timestamp_col], raise_error=True) + data = dropna_rows(data, self.timestamp_col) + # Restrict the data according to the start/stop + data = self._restrict_by_timestamp(data) + grouped = data.groupby(self.agg_by, sort=False) + + if self.num_timesteps == 1: + return grouped.agg(self.aggfuncs) + if self.num_timesteps is None or self.num_timesteps > 1: + return self._aggregate(data, include_timestep_start=include_timestep_start) + + raise ValueError("num_timesteps must be greater than 0.")
+ + +
+[docs] + def fit_transform( + self, + data: pd.DataFrame, + ) -> pd.DataFrame: + """Fit the aggregator and transform the data by aggregating. + + Parameters + ---------- + data: pandas.DataFrame + Input data. + + Returns + ------- + pandas.DataFrame + The aggregated data. + + """ + self.fit(data) + + return self.transform(data)
+
+ + + +
+[docs] +def tabular_as_aggregated( + tab: pd.DataFrame, + index: str, + var_name: str, + value_name: str, + strategy: str = ALL, + num_timesteps: Optional[int] = None, + sort: bool = True, +) -> pd.DataFrame: + """Pose tabular (static, non-timeseries) data as timeseries data. + + Parameters + ---------- + tab: pd.DataFrame + Tabular data. + index: str + Index column name. + var_name: str + The name of the resultant column containing the original tabular column names. + value_name: str + The name of the resultant column containing the tabular values. + strategy: str + Strategy to fake aggregation. E.g., FIRST sets a first timestep to the value, + LAST sets the last timestep to the value, and ALL sets all timesteps to + the value. + num_timesteps: int, optional + The max number of timesteps in the aggregation. This is required by strategies + such as LAST and ALL. + + Returns + ------- + pandas.DataFrame + Tabular data processed as if it is aggregated temporal data. + + """ + supported = [FIRST, LAST, ALL] + if strategy not in supported: + raise ValueError( + f"Strategy not recognized. Must be in: {', '.join(supported)}.", + ) + if num_timesteps is None and strategy in [LAST, ALL]: + raise ValueError("Must specify num_timesteps for this strategy.") + tab = tab.set_index(index) + tab = tab.melt(var_name=var_name, value_name=value_name, ignore_index=False) + tab = tab.reset_index() + # Set value in the first timestep + if strategy == FIRST: + tab[TIMESTEP] = 0 + # Set value in the last timestep + elif strategy == LAST: + assert num_timesteps is not None + tab[TIMESTEP] = num_timesteps - 1 + # Repeat value across all timesteps + elif strategy == ALL: + assert num_timesteps is not None + tab = pd.concat( + [t.assign(**{TIMESTEP: i}) for i, t in enumerate([tab] * num_timesteps)], + ) + tab = tab.set_index([index, var_name, TIMESTEP]) + if sort: + return tab.sort_index() + + return tab
+ + + +
+[docs] +def timestamp_ffill_agg( + timesteps: pd.Series, + num_timesteps: int, + val: float = 1, + fill_nan: Optional[float] = None, +) -> np.typing.ArrayLike: + """Perform single-value aggregation with fill forward functionality given timesteps. + + If a timestep is negative, it is treated as occurring before the regular window and + is "filled forward" through all the timesteps. + + If a timestep is between 0 and num_timesteps, it is bucketed accordingly and then + forward filled. + + The timesteps can be nan. + + Parameters + ---------- + timesteps: pandas.Series + A series of integer timesteps + num_timesteps: int + The total number of timesteps to consider in the aggregation window. + val: float, default = 1 + The value with which to fill. + fill_nan: float, optional + Optionally fill any remaining nan with a value. + + Returns + ------- + numpy.ndarray + The filled forward aggregated data in the form of a 2-dimensional array. + + """ + shape = (len(timesteps), num_timesteps) + arr = np.empty(shape) + arr[:, :] = np.NaN + before = (timesteps < 0).values + after = (timesteps >= 0).values + # Predict 1 from beginning to end + arr[before] = val + # Predict 1 in a specific timestep + rows = np.where(after)[0] + cols = timesteps[after].values.astype(int) + before_end = cols < num_timesteps + rows = rows[before_end] + cols = cols[before_end] + arr[rows, cols] = val + arr = numpy_2d_ffill(arr) + if fill_nan is not None: + arr = np.nan_to_num(arr, nan=fill_nan) + + return arr
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/data/features/medical_image.html b/api/_modules/cyclops/data/features/medical_image.html new file mode 100644 index 000000000..7a2fdabe2 --- /dev/null +++ b/api/_modules/cyclops/data/features/medical_image.html @@ -0,0 +1,693 @@ + + + + + + + + + + + + + + + + cyclops.data.features.medical_image - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.data.features.medical_image

+"""Medical image feature."""
+
+import os
+import tempfile
+from dataclasses import dataclass, field
+from io import BytesIO
+from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Tuple, Union
+
+import numpy as np
+import numpy.typing as npt
+import pyarrow as pa
+from datasets import config
+from datasets.download.streaming_download_manager import xopen
+from datasets.features import Image, features
+from datasets.utils.file_utils import is_local_path
+from datasets.utils.py_utils import string_to_dict
+
+from cyclops.utils.optional import import_optional_module
+
+
+if TYPE_CHECKING:
+    from monai.data.image_reader import ImageReader
+    from monai.data.image_writer import ITKWriter
+    from monai.transforms.compose import Compose
+    from monai.transforms.io.array import LoadImage
+    from monai.transforms.utility.array import ToNumpy
+else:
+    ImageReader = import_optional_module(
+        "monai.data.image_reader",
+        attribute="ImageReader",
+        error="warn",
+    )
+    ITKWriter = import_optional_module(
+        "monai.data.image_writer",
+        attribute="ITKWriter",
+        error="warn",
+    )
+    Compose = import_optional_module(
+        "monai.transforms.compose",
+        attribute="Compose",
+        error="warn",
+    )
+    LoadImage = import_optional_module(
+        "monai.transforms.io.array",
+        attribute="LoadImage",
+        error="warn",
+    )
+    ToNumpy = import_optional_module(
+        "monai.transforms.utility.array",
+        attribute="ToNumpy",
+        error="warn",
+    )
+_monai_available = all(
+    module is not None
+    for module in (
+        ImageReader,
+        ITKWriter,
+        Compose,
+        LoadImage,
+        ToNumpy,
+    )
+)
+_monai_unavailable_message = (
+    "The MONAI library is required to use the `MedicalImage` feature. "
+    "Please install it with `pip install monai`."
+)
+
+
+
+[docs] +@dataclass +class MedicalImage(Image): # type: ignore + """Medical image `Feature` to read medical image files. + + Parameters + ---------- + reader : Union[str, ImageReader], optional, default="ITKReader" + The MONAI image reader to use. + suffix : str, optional, default=".jpg" + The suffix to use when decoding bytes to image. + decode : bool, optional, default=True + Whether to decode the image. If False, the image will be returned as a + dictionary in the format `{"path": image_path, "bytes": image_bytes}`. + id : str, optional, default=None + The id of the feature. + + """ + + reader: Union[str, ImageReader] = "ITKReader" + suffix: str = ".jpg" # used when decoding/encoding bytes to image + + _loader = None + if _monai_available: + _loader = Compose( + [ + LoadImage( + reader=reader, + simple_keys=True, + dtype=None, + image_only=False, + ), + ToNumpy(), + ], + ) + + # Automatically constructed + dtype: ClassVar[str] = "dict" + pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) + _type: str = field(default="MedicalImage", init=False, repr=False) + +
+[docs] + def encode_example( + self, + value: Union[str, Dict[str, Any], npt.NDArray[Any]], + ) -> Dict[str, Any]: + """Encode example into a format for Arrow. + + Parameters + ---------- + value : Union[str, dict, np.ndarray] + Data passed as input to MedicalImage feature. + + Returns + ------- + dict + The encoded example. + + """ + if isinstance(value, list): + value = np.asarray(value) + + if isinstance(value, str): + return {"path": value, "bytes": None} + + if isinstance(value, np.ndarray): + return _encode_ndarray(value, image_format=self.suffix) + + if "array" in value and "metadata" in value: + output_ext_ = self.suffix + metadata_ = value["metadata"] + filename = metadata_.get("filename_or_obj", None) + if filename is not None and filename != "": + output_ext_ = os.path.splitext(filename)[1] + return _encode_ndarray( + value["array"], + metadata=metadata_, + image_format=output_ext_, + ) + if value.get("path") is not None and os.path.isfile(value["path"]): + # we set "bytes": None to not duplicate the data + # if they're already available locally + return {"bytes": None, "path": value.get("path")} + if value.get("bytes") is not None or value.get("path") is not None: + # store the image bytes, and path is used to infer the image format + # using the file extension + return {"bytes": value.get("bytes"), "path": value.get("path")} + + raise ValueError( + "An image sample should have one of 'path' or 'bytes' " + f"but they are missing or None in {value}.", + )
+ + +
+[docs] + def decode_example( + self, + value: Dict[str, Any], + token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None, + ) -> Dict[str, Any]: + """Decode an example from the serialized version to the feature type version. + + Parameters + ---------- + value : dict + The serialized example. + token_per_repo_id : dict, optional + To access and decode image files from private repositories on the Hub, + you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`). + + Returns + ------- + dict + The deserialized example as a dictionary in the format: + `{"array": np.ndarray, "metadata": dict}`. + + """ + if not self.decode: + raise RuntimeError( + "Decoding is disabled for this feature. " + "Please use `MedicalImage(decode=True)` instead.", + ) + + if token_per_repo_id is None: + token_per_repo_id = {} + + path, bytes_ = value["path"], value["bytes"] + if bytes_ is None: + if path is None: + raise ValueError( + "An image should have one of 'path' or 'bytes' but both are " + f"None in {value}.", + ) + + if is_local_path(path): + if self._loader is None: + raise RuntimeError(_monai_unavailable_message) + image, metadata = self._loader(path) + else: + source_url = path.split("::")[-1] + try: + repo_id = string_to_dict(source_url, config.HUB_DATASETS_URL)[ + "repo_id" + ] + use_auth_token = token_per_repo_id.get(repo_id) + except ValueError: + use_auth_token = None + with xopen( + path, + "rb", + use_auth_token=use_auth_token, + ) as file_obj, BytesIO(file_obj.read()) as buffer: + image, metadata = self._read_file_from_bytes(buffer) + metadata["filename_or_obj"] = path + + else: + with BytesIO(bytes_) as buffer: + image, metadata = self._read_file_from_bytes(buffer) + + return {"array": image, "metadata": metadata}
+ + + def _read_file_from_bytes( + self, + buffer: BytesIO, + ) -> Tuple[npt.NDArray[Any], Dict[str, Any]]: + """Read an image from bytes. + + Parameters + ---------- + buffer : BytesIO + BytesIO object containing image data as bytes. + + Returns + ------- + Tuple[np.ndarray, dict] + Image as numpy array and metadata as dictionary. + + """ + if self._loader is None: + raise RuntimeError(_monai_unavailable_message) + + # XXX: Can we avoid writing to disk? + with tempfile.NamedTemporaryFile(mode="wb", suffix=self.suffix) as fp: + fp.write(buffer.getvalue()) + fp.flush() + image, metadata = self._loader(fp.name) + metadata["filename_or_obj"] = "" + return image, metadata
+ + + +def _encode_ndarray( + array: npt.NDArray[Any], + metadata: Optional[Dict[str, Any]] = None, + image_format: str = ".png", +) -> Dict[str, Any]: + """Encode a numpy array or torch tensor as bytes. + + Parameters + ---------- + array : numpy.ndarray + Numpy array to encode. + metadata : dict, optional, default=None + Metadata dictionary. + image_format : str, optional, default=".png" + Output image format. + + Returns + ------- + dict + Dictionary containing the image bytes and path. + + """ + if not _monai_available: + raise RuntimeError(_monai_unavailable_message) + + if not image_format.startswith("."): + image_format = "." + image_format + + # TODO: find a way to avoid writing to disk + # TODO: figure out output dtype + + with tempfile.NamedTemporaryFile(mode="wb", suffix=image_format) as temp_file: + writer = ITKWriter(output_dtype=np.uint8) + writer.set_data_array(data_array=array, channel_dim=-1, squeeze_end_dims=False) + writer.set_metadata(meta_dict=metadata, resample=True) + writer.write(temp_file.name) + + temp_file.flush() + + # read tmp file into bytes + with open(temp_file.name, "rb") as f_obj: + temp_file_bytes = f_obj.read() + + return {"path": None, "bytes": temp_file_bytes} + + +# add the `MedicalImage` feature to the `features` module namespace +features.MedicalImage = MedicalImage +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/data/slicer.html b/api/_modules/cyclops/data/slicer.html new file mode 100644 index 000000000..5a2bd5d34 --- /dev/null +++ b/api/_modules/cyclops/data/slicer.html @@ -0,0 +1,1385 @@ + + + + + + + + + + + + + + + + cyclops.data.slicer - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.data.slicer

+"""Functions and classes for creating subsets of Hugging Face datasets."""
+
+import copy
+import datetime
+import itertools
+import json
+from dataclasses import dataclass, field
+from functools import partial
+from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
+
+import numpy as np
+import pandas as pd
+import pyarrow as pa
+import pyarrow.compute as pc
+from datasets.formatting.formatting import LazyBatch
+from dateutil.parser import parse
+from pyarrow import ArrowInvalid
+
+
+
+[docs] +@dataclass +class SliceSpec: + """Specifications for creating a slices of a dataset. + + Parameters + ---------- + spec_list : List[Union[Dict[str, Any], List[Dict[str, Any]]]], default=[{}] + A list of slice specifications. Each specification is a dictionary mapping + a column name to a slice specification for that column. A slice specification + is a dictionary containing one or more of the following keys: + + - `value`: The exact value of a column to select. This can be a single value + a list of values. If a list of values is provided, the slice selects rows + where the column value is in the list. Time strings are supported (e.g. + `"2021-01-01 00:00:00"`). + - `min_value`: The minimum value of a column to select. Specifying the + `min_inclusive` key indicates whether to include the minimum value in the + range. This works for numerical and datetime columns. Time strings are + supported. The default value is set to -inf, if `max_value` is specified and + `min_value` is not. + - `min_inclusive`: Boolean value to indicated whether to include the `min_value` + in the range. If True, the slice selects rows where the value is greater than + or equal to the `min_value`. Defaults to True. + - `max_value`: The maximum value of a column to select. This works for numerical + and datetime columns. Specifying the `max_inclusive` key indicates whether to + include the maximum value in the range. Time strings are supported. + The default value is set to `inf`, if `min_value` is specified and `max_value` + is not. + - `max_inclusive`: Boolean value to indicated whether to include the `max_value` + in the range. If True, the slice selects rows where the value is less than or + equal to the `max_value`. Defaults to True. + - `year`: A single (numerical or string) value or list of values for selecting + rows in a datetime column where the year matches the value(s). Defaults to + None. + - `month`: A single (numerical) value or list of values between 1 and 12 for + selecting rows in a datetime column where the month matches the value(s). + Defaults to None. + - `day`: A single (numerical) value or list of values between 1 and 31 for + selecting rows in a datetime column where the day matches the value(s). + Defaults to None. + - `hour`: A single (numerical) value or list of values between 0 and 23 for + selecting rows in a datetime column where the hour matches the value(s). + Defaults to None. + - `negate`: A boolean flag indicating whether to negate the slice. If True, the + slice selects rows where the feature value does not match the specification. + Defaults to False. + - `keep_nulls`: A boolean flag indicating whether to keep rows where the value + is null. If used in conjunction with `negate`, the slice selects rows where + the value is not null. Can be used on its own. Defaults to False. + intersections : List[Tuple[int]], int, optional, default=None + An indication of slices to intersect. If a list of tuples is provided, the + tuples should contain the indices of the slices to intersect. If an integer is + provided, it will be passed as the argument `r` in `itertools.combinations`, + and all combinations of `r` slices will be intersected. The intersections are + created _before_ the slices are registered. + include_overall : bool, default=True + Whether to include an `overall` slice that selects all examples. + validate : bool, default=True + Whether to validate the column names in the slice specifications. + column_names : List[str], optional, default=None + List of column names in the dataset. If provided and `validate` is True, it is + used to validate the column names in the slice specifications. + + + Attributes + ---------- + spec_list : List[Union[Dict[str, Any], List[Dict[str, Any]]]] + List of slice specifications. + include_overall : bool + Whether to include an `overall` slice that selects all examples. + validate : bool + Whether to validate the column names in the slice specifications. + column_names : List[str] + List of column names in the dataset. + _registry : Dict[str, Callable] + Dictionary mapping slice names to functions that create the slice. + + Examples + -------- + >>> from cyclops.data.slicer import SliceSpec + >>> slice_spec = SliceSpec( + ... spec_list=[ + ... {"feature_1": {"keep_nulls": False}}, + ... { + ... "feature_2": {"keep_nulls": False}, + ... "feature_3": {"keep_nulls": False}, + ... }, + ... {"feature_1": {"value": "value_1"}}, + ... {"feature_1": {"value": ["value_1", "value_2"]}}, + ... {"feature_1": {"value": "value_1", "negate": True, "keep_nulls": True}}, + ... {"feature_1": {"min_value": "2020-01-01", "max_value": "2020-12-31"}}, + ... { + ... "feature_1": { + ... "min_value": 5, + ... "max_value": 60, + ... "min_inclusive": False, + ... "max_inclusive": False, + ... } + ... }, + ... {"feature_1": {"year": [2020, 2021, 2022]}}, + ... {"feature_1": {"month": [6, 7, 8]}}, + ... {"feature_1": {"month": 6, "day": 1}}, + ... {"feature_1": {"contains": "value_1"}}, + ... {"feature_1": {"contains": ["value_1", "value_2"]}}, + ... { + ... "feature_1": {"value": "value_1"}, + ... "feature_2": { + ... "min_value": "2020-01-01", + ... "keep_nulls": False, + ... }, + ... "feature_3": {"year": ["2000", "2010", "2020"]}, + ... }, + ... ], + ... ) + >>> for slice_name, slice_func in slice_spec.slices(): + ... print(slice_name) + ... # do something with slice_func here (e.g. dataset.filter(slice_func)) + feature_1:non_null + feature_2:non_null&feature_3:non_null + feature_1:value_1 + feature_1:value_1, value_2 + !(feature_1:value_1) + feature_1:[2020-01-01 - 2020-12-31] + feature_1:(5 - 60) + feature_1:year=[2020, 2021, 2022] + feature_1:month=[6, 7, 8] + feature_1:month=6, day=1 + feature_1:contains value_1 + feature_1:contains ['value_1', 'value_2'] + feature_1:value_1&feature_2:[2020-01-01 - inf]&feature_3:year=['2000', '2010', '2020'] + overall + + >>> # a different way to create intersections/compound slices + >>> slice_spec = SliceSpec( + ... spec_list=[ + ... {"feature_1": {"keep_nulls": False}}, + ... {"feature_2": {"keep_nulls": False}}, + ... ], + ... include_overall=False, + ... intersections=2, + ... ) + >>> for slice_name, slice_func in slice_spec.slices(): + ... print(slice_name) + ... # do something with slice_func here (e.g. dataset.filter(slice_func)) + feature_1:non_null + feature_2:non_null + feature_1:non_null&feature_2:non_null + + """ # noqa: W505 + + spec_list: List[Dict[str, Dict[str, Any]]] = field( + default_factory=lambda: [{}], + init=True, + repr=True, + hash=True, + compare=True, + ) + intersections: Optional[Union[List[Tuple[int, ...]], int]] = None + validate: bool = True + include_overall: bool = True + column_names: Optional[List[str]] = None + + _registry: Dict[str, Callable[[Dict[str, Any]], List[bool]]] = field( + default_factory=dict, + init=False, + repr=False, + hash=False, + compare=False, + ) + + def __post_init__(self) -> None: + """Create and register slice functions out of the slice specifications.""" + self.spec_list = copy.deepcopy(self.spec_list) + if self.intersections is not None: + self._create_intersections() + for slice_spec in self.spec_list: + self._parse_and_register_slice_specs(slice_spec) + + if self.include_overall: + self._registry["overall"] = overall + +
+[docs] + def add_slice_spec(self, slice_spec: Dict[str, Dict[str, Any]]) -> None: + """Add slice specification to the list of slice specifications. + + Parameters + ---------- + slice_spec : Dict[str, Dict[str, Any]] + A dictionary mapping column names to dictionaries containing one or more of + the following keys: `value`, `min_value`, `max_value`, `year`, `month`, + `day`, `hour`, `negate`, `keep_nulls`. See :class:`SliceSpec` for more + details on the slice specification format. + + """ + self._parse_and_register_slice_specs(slice_spec=slice_spec) + self.spec_list.append(slice_spec)
+ + +
+[docs] + def get_slices( + self, + ) -> Dict[str, Callable[[Dict[str, Any]], List[bool]]]: + """Return the slice function registry.""" + return self._registry
+ + +
+[docs] + def slices(self) -> Generator[Tuple[str, Callable[..., Any]], None, None]: + """Return a generator of slice names and slice functions.""" + for registration_key, slice_function in self._registry.items(): + yield registration_key, slice_function
+ + + def _create_intersections(self) -> None: + """Create intersections of slices.""" + intersect_list = [] + if isinstance(self.intersections, list) and isinstance( + self.intersections[0], tuple + ): + for intersection in self.intersections: + intersect_dict = {} + for index in set(intersection): + intersect_dict.update(self.spec_list[index]) + intersect_list.append(intersect_dict) + elif isinstance(self.intersections, int): + combinations = itertools.combinations(self.spec_list, self.intersections) + for combination in combinations: + intersect_dict = {} + for slice_ in combination: + intersect_dict.update(slice_) + intersect_list.append(intersect_dict) + else: + raise ValueError( + "Expected `intersections` to be a list of tuples or an integer. " + f"Got {self.intersections} instead.", + ) + self.spec_list.extend(intersect_list) + + # remove duplicates + seen = set() + result = [] + + for spec in self.spec_list: + spec_str = json.dumps(spec, sort_keys=True) + if spec_str not in seen: + seen.add(spec_str) + result.append(spec) + + seen.clear() + self.spec_list = result + + def _parse_and_register_slice_specs( + self, + slice_spec: Dict[str, Dict[str, Any]], + ) -> None: + """Construct and register a slice functions from slice specifications.""" + if not isinstance(slice_spec, dict): + raise TypeError( + f"Expected `slice_spec` to be a dictionary. Got {type(slice_spec)}", + ) + + if len(slice_spec) == 0: # empty dictionary. Interpret as `overall` slice + registration_key = "overall" + slice_function = overall + elif len(slice_spec) == 1: # slice on a single feature + registration_key, slice_function = self._parse_single_spec_dict(slice_spec) + else: # compound slicing (bitwise AND of component slices) + registration_key = "" + slice_functions = [] + for column_name, spec in slice_spec.items(): + sub_registration_key, slice_function = self._parse_single_spec_dict( + {column_name: spec}, + ) + slice_functions.append(slice_function) + registration_key += f"{sub_registration_key}&" + registration_key = registration_key[:-1] # remove trailing ampersand + + slice_function = partial(compound_filter, slice_functions=slice_functions) + + self._registry[registration_key] = slice_function + + def _parse_single_spec_dict( + self, + slice_spec: Dict[str, Dict[str, Any]], + ) -> Tuple[str, Callable[..., List[bool]]]: + """Return the registration key and slice function for a single slice spec.""" + column_name, spec = next(iter(slice_spec.items())) + + # validate column name and spec + self._check_column_names(column_names=column_name) + if not isinstance(spec, dict): + raise TypeError( + f"Expected feature value to be a dictionary. Got {type(spec)} ", + ) + + if "value" in spec: # filter on exact value + substring: Union[Any, List[Any]] = spec["value"] + negated: bool = spec.get("negate", False) + + registration_key = f"{column_name}:{substring}" + if isinstance(substring, list): + # show at most 10 values in registration key. If more than 10, + # show first 5, ..., last 5 + if len(substring) > 10: + value_list_repr = ( + ", ".join(map(str, substring[:5])) + + ", ..., " + + ", ".join(map(str, substring[-5:])) + ) + else: + value_list_repr = ", ".join(map(str, substring)) + + registration_key = f"{column_name}:{value_list_repr}" + + slice_function = partial( + filter_value, + column_name=column_name, + value=substring, + negate=negated, + keep_nulls=spec.get("keep_nulls", False), + ) + elif "min_value" in spec or "max_value" in spec: + min_value = spec.get("min_value", -np.inf) + max_value = spec.get("max_value", np.inf) + min_inclusive = spec.get("min_inclusive", True) + max_inclusive = spec.get("max_inclusive", True) + negated = spec.get("negate", False) + + min_end = "[" if min_inclusive else "(" + max_end = "]" if max_inclusive else ")" + registration_key = ( + f"{column_name}:{min_end}{min_value} - {max_value}{max_end}" + ) + + slice_function = partial( + filter_range, + column_name=column_name, + min_value=min_value, + max_value=max_value, + min_inclusive=min_inclusive, + max_inclusive=max_inclusive, + negate=negated, + keep_nulls=spec.get("keep_nulls", False), + ) + elif any(k in spec for k in ("year", "month", "day", "hour")): + year = spec.get("year") + month = spec.get("month") + day = spec.get("day") + hour = spec.get("hour") + negated = spec.get("negate", False) + + # create registration key with year, month, day, hour if specified + registration_key = f"{column_name}:" + ", ".join( + [ + f"{k}={v}" + for k, v in zip( + ("year", "month", "day", "hour"), + (year, month, day, hour), + ) + if v is not None + ], + ) + + slice_function = partial( + filter_datetime, + column_name=column_name, + year=year, + month=month, + day=day, + hour=hour, + negate=negated, + keep_nulls=spec.get("keep_nulls", False), + ) + elif "contains" in spec: + substring = spec["contains"] + negated = spec.get("negate", False) + + registration_key = f"{column_name}:contains {substring}" + + slice_function = partial( + filter_string_contains, + column_name=column_name, + contains=substring, + negate=negated, + keep_nulls=spec.get("keep_nulls", False), + ) + elif "keep_nulls" in spec: + keep_nulls = spec["keep_nulls"] + negated = spec.get("negate", False) + + # keep_nulls=True and negate=True => filter_non-null(negate=False) + # keep_nulls=False and negate=True => filter_non-null(negate=True) + # keep_nulls=True and negate=False => filter_non-null(negate=True) + # keep_nulls=False and negate=False => filter_non-null(negate=False) + negated = keep_nulls ^ negated # XOR + + registration_key = f"{column_name}:non_null" + slice_function = partial( + filter_non_null, + column_names=column_name, + negate=negated, + ) + else: + raise ValueError( + "Expected the slice specification to contain `value`, `min_value`, " + "`max_value`, `contains`, `year`, `month`, `day`, `hour` or " + f"`keep_nulls`. Got {spec} instead.", + ) + + if negated: + registration_key = f"!({registration_key})" + + return registration_key, slice_function + + def _check_column_names(self, column_names: Union[str, List[str]]) -> None: + """Check that the given column names are valid.""" + if isinstance(column_names, list): + for column_name in column_names: + self._check_column_names(column_name) + + if isinstance(column_names, str): + if ( + self.validate + and self.column_names is not None + and column_names not in self.column_names + ): + raise KeyError( + f"Column name '{column_names}' is not in the dataset. " + f"Valid column names are: {self.column_names}", + ) + else: + raise TypeError( + "Expected `column_names` to be a string or list of strings." + f"Got {type(column_names)} instead.", + )
+ + + +# filter functions +
+[docs] +def overall(examples: Union[pa.Table, LazyBatch]) -> List[bool]: + """Return True for all examples. + + Parameters + ---------- + examples : pyarrow.Table, datasets.formatting.formatting.LazyBatch + A batch of examples. + + Returns + ------- + List[bool] + A list of booleans containing `True` for all examples. + + """ + _check_examples(examples) + return [True] * ( + len(list(examples.values())[0]) + if isinstance(examples, LazyBatch) + else len(examples) + )
+ + + +
+[docs] +def filter_non_null( + examples: Union[pa.Table, LazyBatch], + column_names: Union[str, List[str]], + negate: bool = False, +) -> List[bool]: + """Return True for all examples where the feature/column is not null. + + Parameters + ---------- + examples : pyarrow.Table, datasets.formatting.formatting.LazyBatch + A batch of examples to filter. + column_names : Union[str, List[str]] + The column name(s) on which to filter. + negate : bool, optional, default=False + If `True`, negate the filter, i.e. return `True` for all examples where + the value is null. + + Returns + ------- + List[bool] + A list of booleans containing `True` for all examples where the value is + not null. + + Notes + ----- + Floating-point NaN values will not be considered as null. + + """ + _check_examples(examples) + if not ( + isinstance(column_names, str) + or ( + isinstance(column_names, list) + and all(isinstance(key, str) for key in column_names) + ) + ): + raise ValueError( + "Expected `column_names` to be a string or list of strings. " + f"Got {column_names} of type {type(column_names)}", + ) + + if isinstance(column_names, str): + column_names = [column_names] + + mask = pc.invert(pc.is_null(examples[column_names[0]])) + for column_name in column_names[1:]: + mask = pc.and_not(mask, pc.is_null(examples[column_name])) + + if negate: + mask = pc.invert(mask) + + return mask.to_pylist() # type: ignore
+ + + +
+[docs] +def filter_value( + examples: Union[pa.Table, LazyBatch], + column_name: str, + value: Union[Any, List[Any]], + negate: bool = False, + keep_nulls: bool = False, +) -> List[bool]: + """Return True for all examples where the feature/column has the given value. + + Parameters + ---------- + examples : pyarrow.Table, datasets.formatting.formatting.LazyBatch + A batch of examples to filter. + column_name : str + The column name on which to filter. + value : Union[Any, List[Any]] + The value or values to find. Exact match is performed. + negate : bool, optional, default=False + If `True`, return `True` for all examples where the column does not have + the given value. + keep_nulls : bool, optional, default=False + If `True`, return `True` for all examples in the column where the value is null. + + Returns + ------- + List[bool] + A list of booleans containing `True` for all examples where the feature + has the given value or values. + + """ + _check_examples(examples) + value_is_datetime = is_datetime(value) # only checks timestrings + + if not isinstance(value, list): + value = [value] + value_arr: pa.Array = pa.array(value) + + if value_is_datetime: + value_arr = pc.cast(value_arr, pa.timestamp("ns")) + + example_values = ( + pc.cast(examples[column_name], pa.timestamp("ns")) + if value_is_datetime + else examples[column_name] + ) + + mask = pc.is_in(example_values, value_arr) + + if negate: + mask = pc.invert(mask) + + nulls = pc.is_null(example_values) + mask = pc.or_(mask, nulls) if keep_nulls else pc.and_not(mask, nulls) + + return mask.to_pylist() # type: ignore
+ + + +
+[docs] +def filter_range( + examples: Union[pa.Table, LazyBatch], + column_name: str, + min_value: float = -np.inf, + max_value: float = np.inf, + min_inclusive: bool = True, + max_inclusive: bool = True, + negate: bool = False, + keep_nulls: bool = False, +) -> List[bool]: + """Return True for all examples where the value is in the given range. + + Parameters + ---------- + examples : pyarrow.Table, datasets.formatting.formatting.LazyBatch + A batch of examples to filter. + column_name : str + The column name on which to filter. + min_value : float, optional, default=-np.inf + The minimum value of the range. + max_value : float, optional, default=np.inf + The maximum value of the range. + min_inclusive : bool, optional, default=True + If `True`, include the minimum value in the range. + max_inclusive : bool, optional, default=True + If `True`, include the maximum value in the range. + negate : bool, optional, default=False + If `True`, return `True` for all examples in the column where the value is + not in the given range. + keep_nulls : bool, optional, default=False + If `True`, return `True` for all examples in the column where the value is null. + + Returns + ------- + List[bool] + A list of booleans containing `True` for all examples in the column where + the value is in the given range. + + Raises + ------ + ValueError + If `max_value` is less than `min_value` or if `min_value` and `max_value` + are equal and either `min_inclusive` or `max_inclusive` is False. + TypeError + If the column does not contain numeric or datetime values. + + """ + _check_examples(examples) + # handle datetime values + min_value, max_value, value_is_datetime = _maybe_convert_to_datetime( + min_value, + max_value, + ) + + if min_value > max_value: + raise ValueError( + "Expected `min_value` to be less than or equal to `max_value`, but got " + f"min_value={min_value} and max_value={max_value}.", + ) + if min_value == max_value and not (min_inclusive and max_inclusive): + raise ValueError( + "`min_value` and `max_value` are equal and either `min_inclusive` or " + "`max_inclusive` is False. This would result in an empty range.", + ) + + example_values = pa.array(examples[column_name]) + if value_is_datetime: + example_values = pc.cast(example_values, pa.timestamp("ns")) + min_value = np.repeat(min_value, len(example_values)) # type: ignore[assignment] + max_value = np.repeat(max_value, len(example_values)) # type: ignore[assignment] + + if not ( # column does not contain number or datetime values + pa.types.is_integer(example_values.type) + or pa.types.is_floating(example_values.type) + or pa.types.is_timestamp(example_values.type) + ): + raise TypeError( + "Expected feature to be numeric or datetime, but got " + f"{example_values.type}.", + ) + + ge = ( + pc.greater_equal(example_values, min_value) + if min_inclusive + else pc.greater(example_values, min_value) + ) + le = ( + pc.less_equal(example_values, max_value) + if max_inclusive + else pc.less(example_values, max_value) + ) + + mask = pc.and_(ge, le).fill_null(False) + + if negate: + mask = pc.invert(mask) + + nulls = pc.is_null(example_values) + mask = pc.or_(mask, nulls) if keep_nulls else pc.and_not(mask, nulls) + + return mask.to_pylist() # type: ignore
+ + + +
+[docs] +def filter_datetime( + examples: Union[pa.Table, LazyBatch], + column_name: str, + year: Optional[Union[int, str, List[int], List[str]]] = None, + month: Optional[Union[int, List[int]]] = None, + day: Optional[Union[int, List[int]]] = None, + hour: Optional[Union[int, List[int]]] = None, + negate: bool = False, + keep_nulls: bool = False, +) -> List[bool]: + """Return True for all examples where the datetime value matches the given datetime. + + Parameters + ---------- + examples : pyarrow.Table, datasets.formatting.formatting.LazyBatch + A batch of examples to filter. + column_name : str + The column name on which to filter. + year : int, str, List[int], List[str], optional, default=None + The year to match. If string, it must be a valid year string (e.g. "2020"). + If a list is provided, return `True` for all examples where the year matches + any of the values in the list. + month : int, List[int], optional, default=None + The month to match. If a list is provided, return `True` for all examples + where the month matches any of the values in the list. + day : int, List[int], optional, default=None + The day to match. If a list is provided, return `True` for all examples + where the day matches any of the values in the list. + hour : int, List[int], optional, default=None + The hour to match. If a list is provided, return `True` for all examples + where the hour matches any of the values in the list. + negate : bool, optional, default=False + If `True`, return `True` for all examples where the value does not match + the given datetime components. + keep_nulls : bool, optional, default=False + If `True`, return `True` for all examples that have a null value. + + Returns + ------- + List[bool] + A list of booleans containing `True` for all examples where the value of + a column matches the given datetime components. + + Raises + ------ + TypeError + If the column does not contain datetime values. + + """ + _check_examples(examples) + example_values = pa.array(examples[column_name]) + try: + example_values = pc.cast(example_values, pa.timestamp("ns")) + except ArrowInvalid as exc: + raise TypeError( + "Expected datetime feature, but got feature of type " + f"{example_values.dtype.name}.", + ) from exc + + def _apply_mask( + values: pa.Int64Array, + value_set: Union[int, str, List[int], List[str]], + mask: pa.BooleanArray, + ) -> pa.BooleanArray: + if isinstance(value_set, (str, int)): + value_set = [value_set] # type: ignore[assignment] + + return pc.and_( + mask, + pc.is_in( + values, + pa.array(np.asanyarray(value_set, dtype=int), type=pa.int64()), + ), + ) + + mask = pa.array([True] * len(example_values), type=pa.bool_()) + if year is not None: + years = pc.year(example_values) + mask = _apply_mask(years, year, mask) + if month is not None: + months = pc.month(example_values) + mask = _apply_mask(months, month, mask) + if day is not None: + days = pc.year(example_values) + mask = _apply_mask(days, day, mask) + if hour is not None: + hours = pc.hour(example_values) + mask = _apply_mask(hours, hour, mask) + + if negate: + mask = pc.invert(mask) + + nulls = pc.is_null(example_values) + mask = pc.or_(mask, nulls) if keep_nulls else pc.and_not(mask, nulls) + + return mask.to_pylist() # type: ignore
+ + + +
+[docs] +def filter_string_contains( + examples: Union[pa.Table, LazyBatch], + column_name: str, + contains: Union[str, List[str]], + negate: bool = False, + keep_nulls: bool = False, +) -> List[bool]: + """Return True for all examples where the value contains the given substring. + + Parameters + ---------- + examples : pyarrow.Table, datasets.formatting.formatting.LazyBatch + A batch of examples to filter. + column_name : str + The column name on which to filter. + contains : str, List[str] + The substring(s) to match. If a list is provided, return `True` for all + examples where the value contains any of the substrings in the list. + negate : bool, optional, default=False + If `True`, return `True` for all examples where the value does not contain + the given substring. + keep_nulls : bool, optional, default=False + If `True`, return `True` for all examples that have a null value. + + Returns + ------- + List[bool] + A list of booleans containing `True` for all examples where the value of + a column contains the given substring. + + Raises + ------ + TypeError + If the column does not contain string values or if the values in + `contains` are not strings. + + """ + _check_examples(examples) + # make sure the column has string type + example_values = pa.array(examples[column_name]) + if not pa.types.is_string(example_values.type): + raise ValueError( + "Expected string feature, but got feature of type " + f"{example_values.type}.", + ) + + # get all the values that contain the given substring + mask = pa.array([False] * len(example_values), type=pa.bool_()) + if isinstance(contains, str): + contains = [contains] + + for substring in contains: + if not isinstance(substring, str): + raise TypeError( + f"Expected string value for `contains`, but got value of type " + f"{type(substring)}.", + ) + mask = pc.or_(mask, pc.match_substring(example_values, substring)) + + if negate: + mask = pc.invert(mask) + + nulls = pc.is_null(example_values) + mask = pc.or_(mask, nulls) if keep_nulls else pc.and_not(mask, nulls) + + return mask.to_pylist() # type: ignore
+ + + +
+[docs] +def compound_filter( + examples: Union[pa.Table, LazyBatch], + slice_functions: List[Callable[..., List[bool]]], +) -> List[bool]: + """Combine the result of multiple slices using bitwise AND. + + Parameters + ---------- + examples : pyarrow.Table, datasets.formatting.formatting.LazyBatch + A dictionary mapping column names to values. + slice_functions : List[Callable[..., List[bool]]] + A list of functions to apply to the examples. The signature of each + function should be: `slice_function(examples, **kwargs)`. The result of + each function should be a list of booleans. + + Returns + ------- + List[bool] + A list of booleans containing `True` for all examples where each slice + function returns `True`. + + """ + _check_examples(examples) + mask: List[bool] = np.bitwise_and.reduce( + [slice_function(examples) for slice_function in slice_functions], + ) + + return mask
+ + + +# utility functions +def _check_examples(examples: Union[pa.Table, LazyBatch]) -> None: + """Check the type of `examples.""" + if not isinstance(examples, (pa.Table, LazyBatch)): + raise TypeError( + "Expected `examples` to be an instance of pyarrow.table or " + "datasets.formatting.formatting.LazyBatch but got " + f"{type(examples)}" + ) + + +
+[docs] +def is_datetime( + value: Union[ + str, + datetime.datetime, + np.datetime64, + np.ndarray[Any, np.dtype[Any]], + List[Any], + Any, + ], +) -> bool: + """Check if the given value is a datetime. + + Parameters + ---------- + value : Union[str, datetime.datetime, np.datetime64, np.ndarray, List] + The value(s) to check. + + Returns + ------- + bool + True if the value is a datetime, False otherwise. + + """ + if isinstance(value, str): + try: + parse(value) + return True + except ValueError: + return False + if isinstance(value, (list, np.ndarray)): + return all((is_datetime(v) for v in value)) + if isinstance(value, (datetime.datetime, np.datetime64)): + return True + + return False
+ + + +def _maybe_convert_to_datetime(min_value: Any, max_value: Any) -> Tuple[Any, Any, bool]: + """Convert datetime and infinity values to np.datetime64. + + Parameters + ---------- + min_value : Any + The minimum value. + max_value : Any + The maximum value. + + Returns + ------- + Tuple[Any, Any, bool] + The minimum and maximum values, and a boolean indicating whether the + values are datetime values. + + """ + if isinstance(min_value, datetime.date): + min_value = datetime.datetime.combine(min_value, datetime.time.min) + if isinstance(max_value, datetime.date): + max_value = datetime.datetime.combine(max_value, datetime.time.max) + + # convert datetime and infinity values to np.datetime64 + value_is_datetime = False + if is_datetime(min_value): + min_value = np.datetime64(min_value) + value_is_datetime = True + if max_value == np.inf: + max_value = pd.Timestamp.max.to_datetime64() + if is_datetime(max_value): + max_value = np.datetime64(max_value) + value_is_datetime = True + if min_value == -np.inf: + min_value = pd.Timestamp.min.to_datetime64() + + return min_value, max_value, value_is_datetime +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/evaluator.html b/api/_modules/cyclops/evaluate/evaluator.html new file mode 100644 index 000000000..9c8f221ec --- /dev/null +++ b/api/_modules/cyclops/evaluate/evaluator.html @@ -0,0 +1,702 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.evaluator - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.evaluator

+"""Evaluate one or more models on a dataset."""
+
+import logging
+import warnings
+from dataclasses import asdict
+from typing import Any, Dict, List, Literal, Optional, Sequence, Union
+
+from datasets import Dataset, DatasetDict, config, load_dataset
+from datasets.splits import Split
+
+from cyclops.data.slicer import SliceSpec
+from cyclops.data.utils import set_decode
+from cyclops.evaluate.fairness.config import FairnessConfig
+from cyclops.evaluate.fairness.evaluator import evaluate_fairness
+from cyclops.evaluate.metrics.experimental.metric import Metric
+from cyclops.evaluate.metrics.experimental.metric_dict import MetricDict
+from cyclops.evaluate.metrics.experimental.utils.types import Array
+from cyclops.evaluate.utils import (
+    _format_column_names,
+    check_required_columns,
+    choose_split,
+    get_columns_as_array,
+)
+from cyclops.utils.log import setup_logging
+
+
+LOGGER = logging.getLogger(__name__)
+setup_logging(print_level="WARN", logger=LOGGER)
+
+
+
+[docs] +def evaluate( + dataset: Union[str, Dataset, DatasetDict], + metrics: Union[Metric, Sequence[Metric], Dict[str, Metric], MetricDict], + target_columns: Union[str, List[str]], + prediction_columns: Union[str, List[str]], + ignore_columns: Optional[Union[str, List[str]]] = None, + slice_spec: Optional[SliceSpec] = None, + split: Optional[Union[str, Split]] = None, + batch_size: Optional[int] = config.DEFAULT_MAX_BATCH_SIZE, + raise_on_empty_slice: bool = False, + fairness_config: Optional[FairnessConfig] = None, + override_fairness_metrics: bool = True, + load_dataset_kwargs: Optional[Dict[str, Any]] = None, + array_lib: Literal["numpy", "torch", "cupy"] = "numpy", +) -> Dict[str, Any]: + """Evaluate one or more models on a dataset using one or more metrics. + + Parameters + ---------- + dataset : Union[str, Dataset, DatasetDict] + The dataset to evaluate on. If a string, the dataset will be loaded + using `datasets.load_dataset`. If `DatasetDict`, the `split` argument + must be specified. + metrics : Union[Metric, Sequence[Metric], Dict[str, Metric], MetricDict] + The metrics to compute. + target_columns : Union[str, List[str]] + The name of the column(s) containing the target values. A string value + indicates a single column. A list of strings indicates a multi-label + task - the target values will be the union of the columns. + prediction_columns : Union[str, List[str]] + The names of the prediction columns used to compute metrics. If a string, it + should be the name of a column in the dataset. If a list, it should be a list + of column names in the dataset. Lists allow for evaluating multiple models + on the same dataset. + ignore_columns : Union[str, List[str]], optional + The name of the column(s) to ignore while filtering the dataset and computing + metrics. This is useful if the dataset contains columns that are not needed + for computing metrics but may be expensive to keep in memory + (e.g. image columns). + slice_spec : SliceSpec, optional + The slice specification to use for computing metrics. If None, no slices + will be computed - the metrics will be computed on the entire dataset. + Note that this is not used for computing fairness metrics. + split : Union[str, Split], optional + The split of the dataset to use. If None and `dataset` is a string, a + split will be chosen based on the available splits in the dataset. The + first split that matches one of the following in order will be chosen: + ("test", "testing", "eval", "evaluation", "validation", "val", "valid", + "dev", "train", "training") + If `dataset` is a `DatasetDict`, this must be specified. + batch_size : int, optional + The batch size to use when computing metrics. If None or a negative + integer, the entire dataset will be loaded into memory and metrics + will be computed in one batch. + raise_on_empty_slice : bool, default=False + Whether to raise an error if a slice is empty. If False, a warning will + be logged and the metric values will be set to `NaN`. + fairness_config : Optional[FairnessConfig], optional + The configuration for computing fairness metrics. If None, no fairness + metrics will be computed. Before computing fairness metrics, the following + arguments in the configuration will be overridden by the arguments provided + to this function: `dataset`, `target_columns`, `prediction_columns`, + `remove_columns`, and `batch_size`. If `override_fairness_metrics` is True, + the metrics in the configuration will be overridden by the metrics provided + to this function. + override_fairness_metrics : bool, optional, default=True + If True, the `metrics` argument in fairness_config will be overridden by + the `metrics` argument provided to this function. + load_dataset_kwargs : Dict[str, Any], optional + Keyword arguments to pass to `datasets.load_dataset`. Only used if + `dataset` is a string. + array_lib : {"numpy", "torch", "cupy"}, default="numpy" + The array library to use for the metric computation. The metric results + will be returned in the format of `array_lib`. + + Returns + ------- + Dict[str, Any] + A dictionary containing the results of the evaluation. + + Raises + ------ + ValueError + - If `dataset` is a `DatasetDict` and `split` is None. + + """ + dataset = _load_data(dataset, split, **(load_dataset_kwargs or {})) + metrics = _prepare_metrics(metrics) + + check_required_columns( + dataset.column_names, + target_columns, + prediction_columns, + ignore_columns, + ) + + if slice_spec is None: + slice_spec = SliceSpec() + + metric_results = _compute_metrics( + dataset=dataset, + metrics=metrics, + slice_spec=slice_spec, + target_columns=target_columns, + prediction_columns=prediction_columns, + ignore_columns=ignore_columns, + batch_size=batch_size, + raise_on_empty_slice=raise_on_empty_slice, + array_lib=array_lib, + ) + + results = {} + results.update(metric_results) + + if fairness_config is not None: + if override_fairness_metrics: + fairness_config.metrics = metrics + + fairness_config.dataset = dataset + fairness_config.target_columns = target_columns + fairness_config.prediction_columns = prediction_columns + fairness_config.batch_size = batch_size + fairness_config.remove_columns = ignore_columns + + fairness_results = evaluate_fairness( + **asdict(fairness_config), array_lib=array_lib + ) + results["fairness"] = fairness_results + + return results
+ + + +def _load_data( + dataset: Union[str, Dataset, DatasetDict], + split: Optional[Union[str, Split]] = None, + **load_dataset_kwargs: Any, +) -> Dataset: + """Load data for evaluation.""" + if isinstance(dataset, str): + if split is None: + split = choose_split(dataset, **load_dataset_kwargs) + LOGGER.warning( + "Got `split=None` but `dataset` is a string. " + "Using `split=%s` instead.", + split, + ) + + if load_dataset_kwargs is None: + load_dataset_kwargs = {} + # remove `split` from `load_dataset_kwargs` if it's there + load_dataset_kwargs.pop("split", None) + + dataset_ = load_dataset(dataset, split=split, **load_dataset_kwargs) + assert isinstance( + dataset_, + Dataset, + ), f"Expected a `Dataset` but got {type(dataset_)}." + return dataset_ + if isinstance(dataset, DatasetDict): + if split is None: + split = choose_split(dataset) + LOGGER.warning( + "Got `split=None` but `dataset` is a DatasetDict or " + "IterableDatasetDict. Using `split=%s` instead.", + split, + ) + + if split == Split.ALL: + raise ValueError( + "Got `split=Split.ALL` but `dataset` is a DatasetDict. " + "Please specify a split name.", + ) + + return dataset[split] + if isinstance(dataset, Dataset): + return dataset + + raise TypeError( + f"Invalid type for `dataset`: {type(dataset)}. Expected one of: " + "string, Dataset, DatasetDict.", + ) + + +def _prepare_metrics( + metrics: Union[Metric, Sequence[Metric], Dict[str, Metric], MetricDict], +) -> MetricDict: + """Prepare metrics for evaluation.""" + # TODO [fcogidi]: wrap in BootstrappedMetric if computing confidence intervals + if isinstance(metrics, (Metric, Sequence, Dict)) and not isinstance( + metrics, + MetricDict, + ): + return MetricDict(metrics) # type: ignore[arg-type] + if isinstance(metrics, MetricDict): + return metrics + + raise TypeError( + f"Invalid type for `metrics`: {type(metrics)}. " + "Expected one of: Metric, Sequence[Metric], Dict[str, Metric], " + "MetricDict.", + ) + + +def _compute_metrics( + dataset: Dataset, + metrics: MetricDict, + slice_spec: SliceSpec, + target_columns: Union[str, List[str]], + prediction_columns: Union[str, List[str]], + ignore_columns: Optional[Union[str, List[str]]] = None, + batch_size: Optional[int] = config.DEFAULT_MAX_BATCH_SIZE, + raise_on_empty_slice: bool = False, + array_lib: Literal["numpy", "torch", "cupy"] = "numpy", +) -> Dict[str, Dict[str, Any]]: + """Compute metrics for a dataset.""" + target_columns = _format_column_names(target_columns) + prediction_columns = _format_column_names(prediction_columns) + + # temporarily stop decoding features to save memory + set_decode(dataset, False, exclude=target_columns + prediction_columns) + + with dataset.formatted_as("arrow", columns=target_columns + prediction_columns): + results: Dict[str, Dict[str, Any]] = {} + for slice_name, slice_fn in slice_spec.slices(): + sliced_dataset = dataset.remove_columns(ignore_columns or []).filter( + slice_fn, + batched=True, + batch_size=batch_size, + desc=f"Filter -> {slice_name}", + ) + + if len(sliced_dataset) == 0 and raise_on_empty_slice: + raise RuntimeError( + f"Slice {slice_name} is empty. Please check your slice " + f"configuration or the data.", + ) + + for prediction_column in prediction_columns: + if len(sliced_dataset) == 0: + warnings.warn( + "Got an empty dataset after applying the slice " + "%s. Metric values will be set to `None`." % slice_name, + RuntimeWarning, + stacklevel=1, + ) + metric_output: Dict[str, Array] = { + metric_name: float("NaN") # type: ignore + for metric_name in metrics # type: ignore + } + elif ( + batch_size is None or batch_size < 0 + ): # dataset.iter does not support getting all batches at once + targets = get_columns_as_array( + dataset=sliced_dataset, + columns=target_columns, + array_lib=array_lib, + ) + predictions = get_columns_as_array( + dataset=sliced_dataset, + columns=prediction_column, + array_lib=array_lib, + ) + metric_output = metrics(targets, predictions) + else: + for batch in sliced_dataset.iter(batch_size=batch_size): + targets = get_columns_as_array( + dataset=batch, columns=target_columns, array_lib=array_lib + ) + predictions = get_columns_as_array( + dataset=batch, + columns=prediction_column, + array_lib=array_lib, + ) + + # update the metric state + metrics.update(targets, predictions) + + metric_output = metrics.compute() + metrics.reset() + + model_name: str = "model_for_%s" % prediction_column + results.setdefault(model_name, {}) + results[model_name][slice_name] = metric_output + + set_decode(dataset, True) # restore decoding features + + return results +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/fairness/evaluator.html b/api/_modules/cyclops/evaluate/fairness/evaluator.html new file mode 100644 index 000000000..5f63d6983 --- /dev/null +++ b/api/_modules/cyclops/evaluate/fairness/evaluator.html @@ -0,0 +1,1375 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.fairness.evaluator - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.fairness.evaluator

+"""Fairness evaluator."""
+
+import inspect
+import itertools
+import logging
+import warnings
+from datetime import datetime
+from typing import Any, Callable, Dict, List, Literal, Optional, Union
+
+import numpy as np
+import pandas as pd
+from datasets import Dataset, config
+from datasets.features import Features
+
+from cyclops.data.slicer import SliceSpec, is_datetime
+from cyclops.data.utils import (
+    feature_is_datetime,
+    feature_is_numeric,
+    set_decode,
+)
+from cyclops.evaluate.metrics.experimental.functional.precision_recall_curve import (
+    _format_thresholds,
+    _validate_thresholds,
+)
+from cyclops.evaluate.metrics.experimental.metric import Metric, OperatorMetric
+from cyclops.evaluate.metrics.experimental.metric_dict import MetricDict
+from cyclops.evaluate.metrics.experimental.utils.types import Array
+from cyclops.evaluate.metrics.factory import create_metric
+from cyclops.evaluate.utils import (
+    _SUPPORTED_ARRAY_LIBS,
+    _format_column_names,
+    check_required_columns,
+    get_columns_as_array,
+)
+from cyclops.utils.log import setup_logging
+
+
+LOGGER = logging.getLogger(__name__)
+setup_logging(print_level="WARN", logger=LOGGER)
+
+
+
+[docs] +def evaluate_fairness( # noqa: PLR0912 + metrics: Union[str, Callable[..., Any], Metric, MetricDict], + dataset: Dataset, + groups: Union[str, List[str]], + target_columns: Union[str, List[str]], + prediction_columns: Union[str, List[str]], + group_values: Optional[Dict[str, Any]] = None, + group_bins: Optional[Dict[str, Union[int, List[Any]]]] = None, + group_base_values: Optional[Dict[str, Any]] = None, + thresholds: Optional[Union[int, List[float]]] = None, + compute_optimal_threshold: bool = False, # expensive operation + remove_columns: Optional[Union[str, List[str]]] = None, + batch_size: Optional[int] = config.DEFAULT_MAX_BATCH_SIZE, + raise_on_empty_slice: bool = False, + metric_name: Optional[str] = None, + metric_kwargs: Optional[Dict[str, Any]] = None, + array_lib: Literal["torch", "numpy", "cupy"] = "numpy", +) -> Union[Dict[str, Dict[str, Any]], Dict[str, Dict[str, Dict[str, Any]]]]: + """Compute fairness indicators. + + This function computes fairness indicators for a dataset that includes + predictions and targets. + + Parameters + ---------- + metrics : Union[str, Callable[..., Any], Metric, MetricDict] + The metric or metrics to compute. If a string, it should be the name of a + metric provided by CyclOps. If a callable, it should be a function that + takes target, prediction, and optionally threshold/thresholds as arguments + and returns a dictionary of metric names and values. + dataset : Dataset + The dataset to compute the metrics on. + groups : Union[str, List[str]] + The group or groups to evaluate fairness on. If a string, it should be the + name of a column in the dataset. If a list, it should be a list of column + names in the dataset. Lists allow for evaluating fairness at the intersection + of multiple groups. + target_columns : Union[str, List[str]] + The target or targets columns used to compute metrics. If a string, it should + be the name of a column in the dataset. If a list, it should be a list of + column names in the dataset. Lists will be treated as multilabel targets. + prediction_columns : Union[str, List[str]] + The names of the prediction columns used to compute metrics. If a string, it + should be the name of a column in the dataset. If a list, it should be a list + of column names in the dataset. Lists allow for evaluating multiple models + on the same dataset. + group_values : Dict[str, Any], optional, default=None + The values to use for groups. If None, the values will be the unique values + in the group. This can be used to limit the number of groups that are + evaluated. + group_bins : Dict[str, Union[int, List[Any]]], optional, default=None + Bins to use for groups with continuous values. If int, an equal number of + bins will be created for the group. If list, the bins will be created from + the values in the list. If None, the bins will be created from the unique + values in the group, which may be very slow for large groups. + group_base_values : Dict[str, Any], optional, default=None + The base values to use for groups. This is used in the denominator when + computing parity across groups. If None, the base value will be the overall + metric value. + thresholds : Optional[Union[int, List[float]]], optional, default=None + The thresholds to use when computing metrics. If int, thresholds will be + created using np.linspace(0, 1, thresholds). If list, the values must be + between 0 and 1, and monotonic. If None, the default threshold value for the + metric will be used. + compute_optimal_threshold : bool, optional, default=False + Whether to compute the optimal threshold for each metric. This is an + expensive operation, and should only be used when necessary. + remove_columns : Union[str, List[str]], optional, default=None + The name of the column(s) to remove from the dataset before filtering + and computing metrics. This is useful if the dataset contains columns + that are not needed for computing metrics but may be expensive to + keep in memory (e.g. image columns). + batch_size : int, optional, default=1000 + The batch size to use when computing metrics. This is used to control memory + usage when computing metrics on large datasets. For image datasets, this + value should be relatively small (e.g. 32) to avoid memory issues. + raise_on_empty_slice : bool, optional, default=False + Whether to raise an error if an empty slice is encountered when computing + metrics. If False, the metric values for the slice will be set to `NaN`. + metric_name : Optional[str], optional, default=None + The name of the metric. If None, the name of the metric will be used. + metric_kwargs : Optional[Dict[str, Any]], optional, default=None + Keyword arguments to use when creating the metric. Only used if metrics is a + string. + array_lib : {"numpy", "torch", "cupy"}, default="numpy" + The array library to use for the metric computation. The metric results + will be returned in the format of `array_lib`. + + Returns + ------- + Union[Dict[str, Dict[str, Any]], Dict[str, Dict[str, Dict[str, Any]]]] + A nested dictionary containing the metric values. The first level of the + dictionary is keyed by the prediction columns. The second level of the + dictionary is keyed by the metric names. The third level of the dictionary + is keyed by the slice names. If there is only one prediction column, the + first level of the dictionary will be omitted. + + Raises + ------ + ValueError + If the dataset does not contain the required columns. + TypeError + If the dataset is not a HuggingFace Dataset object or if the batch size is + not an integer. + RuntimeError + If an empty slice is encountered when computing metrics. + + """ + # input validation and formatting + if not isinstance(dataset, Dataset): + raise TypeError( + "Expected `dataset` to be of type `Dataset`, but got " f"{type(dataset)}.", + ) + if array_lib not in _SUPPORTED_ARRAY_LIBS: + raise NotImplementedError(f"The array library `{array_lib}` is not supported.") + _validate_thresholds(thresholds) + + metrics_: Union[Callable[..., Any], MetricDict] = _format_metrics( + metrics, + metric_name, + **(metric_kwargs or {}), + ) + fmt_thresholds = _format_thresholds(thresholds, xp=_SUPPORTED_ARRAY_LIBS[array_lib]) + fmt_groups: List[str] = _format_column_names(groups) + fmt_target_columns: List[str] = _format_column_names(target_columns) + fmt_prediction_columns: List[str] = _format_column_names(prediction_columns) + + check_required_columns( + dataset.column_names, + fmt_groups, + fmt_target_columns, + fmt_prediction_columns, + list(group_base_values.keys()) if group_base_values is not None else None, + list(group_bins.keys()) if group_bins is not None else None, + list(group_values.keys()) if group_values is not None else None, + ) + + set_decode( + dataset, + decode=False, + exclude=fmt_target_columns + fmt_prediction_columns, + ) # don't decode columns that we don't need; pass dataset by reference + + with dataset.formatted_as( + "arrow", columns=fmt_groups + fmt_target_columns + fmt_prediction_columns + ): + unique_values: Dict[str, List[Any]] = _get_unique_values( + dataset=dataset, + groups=fmt_groups, + group_values=group_values, + ) + + if group_base_values is not None: + _validate_base_values( + base_values=group_base_values, + groups=fmt_groups, + unique_values=unique_values, + ) + # reorder keys to match order in `groups` + group_base_values = { + group: group_base_values[group] + for group in fmt_groups + if group in group_base_values + } + + if group_bins is None: + warn_too_many_unique_values(unique_values=unique_values) + else: + _validate_group_bins( + group_bins=group_bins, + groups=fmt_groups, + unique_values=unique_values, + ) + + group_bins = { + group: group_bins[group] for group in fmt_groups if group in group_bins + } # reorder keys to match order given in `groups` + + bins = _create_bins( + group_bins=group_bins, + dataset_features=dataset.features, + unique_values=unique_values, + ) + + if group_base_values is not None: # update the base values with bins + group_base_values = _update_base_values_with_bins( + base_values=group_base_values, + bins=bins, + ) + + unique_values.update(bins) # update unique values with bins + + slice_spec = _get_slice_spec( + groups=fmt_groups, + unique_values=unique_values, + column_names=dataset.column_names, + ) + + if group_base_values is not None: + # since we have base values, remove overall slice + slice_spec._registry.pop("overall", None) + + results: Dict[str, Dict[str, Dict[str, Any]]] = {} + + for slice_name, slice_fn in slice_spec.slices(): + sliced_dataset = dataset.remove_columns(remove_columns or []).filter( + slice_fn, + batched=True, + batch_size=batch_size, + desc=f"Filter -> {slice_name}", + ) + + if len(sliced_dataset) == 0 and raise_on_empty_slice: + raise RuntimeError( + f"Slice {slice_name} is empty. Please check your slice " + f"configuration or the data.", + ) + + for prediction_column in fmt_prediction_columns: + results.setdefault(prediction_column, {}) + results[prediction_column].setdefault(slice_name, {}).update( + {"Group Size": len(sliced_dataset)}, + ) + + pred_result = _get_metric_results_for_prediction_and_slice( + metrics=metrics_, + dataset=sliced_dataset, + target_columns=fmt_target_columns, + prediction_column=prediction_column, + slice_name=slice_name, + batch_size=batch_size, + metric_name=metric_name, + thresholds=fmt_thresholds, + array_lib=array_lib, + ) + # if metric_name does not exist, add it to the dictionary + # otherwise, update the dictionary for the metric_name + for key, slice_result in pred_result.items(): + results[prediction_column].setdefault(key, {}).update(slice_result) + + if compute_optimal_threshold: + # TODO: generate a comprehensive list of thresholds and compute + # the metric for each threshold. Next compute the parity metrics + # for each threshold and select the threshold that leads to + # the least disparity across all slices for each metric. + raise NotImplementedError( + "Computing optimal threshold is not yet implemented.", + ) + + set_decode(dataset, decode=True) # reset decode + + # compute parity metrics + if group_base_values is not None: + base_slice_name = _construct_base_slice_name(base_values=group_base_values) + parity_results = _compute_parity_metrics( + results=results, base_slice_name=base_slice_name, array_lib=array_lib + ) + else: + parity_results = _compute_parity_metrics( + results=results, base_slice_name="overall", array_lib=array_lib + ) + + # add parity metrics to the results + for pred_column, pred_results in parity_results.items(): + for slice_name, slice_results in pred_results.items(): + results[pred_column][slice_name].update(slice_results) + + if len(fmt_prediction_columns) == 1: + return results[fmt_prediction_columns[0]] + + return results
+ + + +def warn_too_many_unique_values( + unique_values: Union[List[Any], Dict[str, List[Any]]], + max_unique_values: int = 50, +) -> None: + """Warns if the number of unique values is greater than `max_unique_values`. + + Parameters + ---------- + unique_values : Union[List[Any], Dict[str, List[Any]]] + A list of unique values or a mapping from group names to lists of unique + values. + max_unique_values : int, default=50 + The maximum number of unique values to allow before warning. + + Raises + ------ + TypeError + If `unique_values` is not a list or a mapping. + + Warnings + -------- + If the number of unique values in any group is greater than `max_unique_values`. + + """ + if not (isinstance(max_unique_values, int) and max_unique_values > 0): + raise TypeError( + "`max_unique_values` must be a positive integer. Got " + f"{type(max_unique_values)}.", + ) + + msg = ( + "The number of unique values for the group is greater than " + "%s. This may take a long time to compute. " + "Consider binning the values into fewer groups." + ) + if isinstance(unique_values, list): + if len(unique_values) > max_unique_values: + LOGGER.warning(msg, max_unique_values) + return + if isinstance(unique_values, dict): + if any(len(values) > max_unique_values for values in unique_values.values()): + LOGGER.warning(msg, max_unique_values) + return + raise TypeError( + f"`unique_values` must be a list or a mapping. Got {type(unique_values)}.", + ) + + +def _format_metrics( + metrics: Union[str, Callable[..., Any], Metric, MetricDict], + metric_name: Optional[str] = None, + **metric_kwargs: Any, +) -> Union[Callable[..., Any], Metric, MetricDict]: + """Format the metrics argument. + + Parameters + ---------- + metrics : Union[str, Callable[..., Any], Metric, MetricDict] + The metrics to use for computing the metric results. + metric_name : str, optional, default=None + The name of the metric. This is only used if `metrics` is a callable. + **metric_kwargs : Any + Additional keyword arguments to pass when creating the metric. Only used + if `metrics` is a string. + + Returns + ------- + Union[Callable[..., Any], Metric, MetricDict] + The formatted metrics. + + Raises + ------ + TypeError + If `metrics` is not of type `str`, `Callable`, `Metric`, or `MetricDict`. + + """ + if isinstance(metrics, str): + metrics = create_metric(metric_name=metrics, experimental=True, **metric_kwargs) + if isinstance(metrics, Metric): + if metric_name is not None and isinstance(metrics, OperatorMetric): + # single metric created from arithmetic operation, with given name + return MetricDict({metric_name: metrics}) + return MetricDict(metrics) + if isinstance(metrics, MetricDict): + return metrics + if callable(metrics): + if metric_name is None: + LOGGER.warning( + "No metric name was specified. The metric name will be set to " + "the function name or 'Unnammed Metric' if the function does not " + "have a name.", + ) + return metrics + + raise TypeError( + f"Expected `metrics` to be of type `str`, `Metric`, `MetricDict`, or " + f"`Callable`, but got {type(metrics)}.", + ) + + +def _get_unique_values( + dataset: Dataset, + groups: List[str], + group_values: Optional[Dict[str, Any]], +) -> Dict[str, List[Any]]: + """Get the unique values for a group.""" + unique_values = {} + for group in groups: + column_unique_values = dataset.unique(group) + if group_values is not None and group in group_values: + udv = group_values[group] # user defined values + if not isinstance(udv, list): + udv = [udv] + + # check that the user defined values are in the unique values + if not set(udv).issubset(set(column_unique_values)): + raise ValueError( + f"User defined values {udv} for group {group} are not a subset of " + f"the unique values {column_unique_values}.", + ) + unique_values[group] = udv + else: + unique_values[group] = column_unique_values + return unique_values + + +def _validate_base_values( + base_values: Dict[str, Any], + groups: List[str], + unique_values: Dict[str, List[Any]], +) -> None: + """Check that the base values are valid. + + Parameters + ---------- + base_values : Dict[str, Any] + The base values for each group. + groups : List[str] + The groups to use for computing the metric results. + unique_values : Dict[str, List[Any]] + The unique values for each group. + + Raises + ------ + ValueError + If the base values are not defined for all groups or if the base values + are not part of the unique values for the group. + + """ + base_group_names = set(base_values.keys()) + group_names = set(groups) + if not base_group_names == group_names: + raise ValueError( + f"The base values must be defined for all groups. Got {base_group_names} " + f"but expected {group_names}.", + ) + + # base values for each group must be part of the unique values + # unless it a numeric or datetime type, then it can be any value + # in the range of the unique values + for group, base_value in base_values.items(): + if isinstance(base_value, (int, float, datetime)) or is_datetime(base_value): + continue + if base_value not in unique_values[group]: + raise ValueError( + f"The base value {base_value} for group {group} is not part of the " + f"unique values for the group. Got {unique_values[group]}.", + ) + + +def _validate_group_bins( + group_bins: Dict[str, Union[int, List[Any]]], + groups: List[str], + unique_values: Dict[str, List[Any]], +) -> None: + """Check that the group bins are valid. + + Parameters + ---------- + group_bins : Dict[str, Union[int, List[Any]]] + The bins for each group. + groups : List[str] + The groups to use for accessing fairness. + unique_values : Dict[str, List[Any]] + The unique values for each group. + + Raises + ------ + ValueError + If extra groups are defined in `group_bins` that are not in `groups` or + if the number of bins is less than 2 or greater than the number of unique + values for the group. + TypeError + If the bins for a group are not a list or an integer. + + """ + group_bin_names = set(group_bins.keys()) + group_names = set(groups) + if not group_bin_names.issubset(group_names): + raise ValueError( + "All groups defined in `group_bins` must be in `groups`. " + f"Found {group_bin_names - group_names} in `group_bins` but not in " + f"`groups`.", + ) + + for group, bins in group_bins.items(): + if not isinstance(bins, (list, int)): + raise TypeError( + f"The bins for {group} must be a list or an integer. " + f"Got {type(bins)}.", + ) + + if isinstance(bins, int) and not 2 <= bins < len(unique_values[group]): + raise ValueError( + f"The number of bins must be greater than or equal to 2 " + f"and less than the number of unique values for {group}. " + f"Got {bins} bins and {len(unique_values[group])} unique values.", + ) + + if isinstance(bins, list) and len(bins) < 2: + raise ValueError( + f"The number of bin values must be greater than or equal to 2. " + f"Got {len(bins)}.", + ) + + +def _create_bins( + group_bins: Dict[str, Union[int, List[Any]]], + dataset_features: Features, + unique_values: Dict[str, List[Any]], +) -> Dict[str, pd.IntervalIndex]: + """Create the bins for numeric and datetime features. + + Parameters + ---------- + group_bins : Dict[str, Union[int, List[Any]]] + The user-defined bins for each group. + dataset_features : Features + The features of the dataset. + unique_values : Dict[str, List[Any]] + The unique values for each group. + + Returns + ------- + Dict[str, pandas.IntervalIndex] + The bins for each group. + + Raises + ------ + ValueError + If the feature for any group is not numeric or datetime. + + """ + breaks = {} + for group, bins in group_bins.items(): + group_feature = dataset_features[group] + column_is_datetime = feature_is_datetime(group_feature) + if not (feature_is_numeric(group_feature) or column_is_datetime): + raise ValueError( + f"Column {group} in the must have a numeric or datetime dtype. " + f"Got {group_feature.dtype}.", + ) + + if isinstance(bins, list): + # make sure it is monotonic + if not all(bins[i] <= bins[i + 1] for i in range(len(bins) - 1)): + bins = sorted(bins) # noqa: PLW2901 + + # convert timestring values to datetime + if column_is_datetime: + bins = pd.to_datetime(bins).values # noqa: PLW2901 + + cut_data = pd.Series( + unique_values[group], + dtype="datetime64[ns]" if column_is_datetime else None, + ).to_numpy() + out = pd.cut(cut_data, bins, duplicates="drop") + + intervals = out.categories + + # add -inf and inf to the left and right ends + left_end = pd.Interval( + left=pd.Timestamp.min if column_is_datetime else -np.inf, + right=intervals[0].left, + ) + right_end = pd.Interval( + left=intervals[-1].right, + right=pd.Timestamp.max if column_is_datetime else np.inf, + ) + + lefts = ( + [left_end.left] + + [interval.left for interval in intervals] + + [right_end.left] + ) + rights = ( + [left_end.right] + + [interval.right for interval in intervals] + + [right_end.right] + ) + + breaks[group] = pd.IntervalIndex.from_arrays(lefts, rights) + + return breaks + + +def _update_base_values_with_bins( + base_values: Dict[str, Any], + bins: Dict[str, pd.IntervalIndex], +) -> Dict[str, Any]: + """Update the base values with the corresponding interval. + + Parameters + ---------- + base_values : Dict[str, Any] + The base values for each group. + bins : Dict[str, pandas.IntervalIndex] + The bins for each group. + + Returns + ------- + Dict[str, Any] + The base values with the corresponding interval for datetime and numeric + columns. + + """ + for group, bin_values in bins.items(): + base_value = base_values[group] + + # find the interval that contains the base value + for interval in bin_values: + if isinstance(interval.left, pd.Timestamp): + base_value = pd.to_datetime(base_value) + if interval.left <= base_value <= interval.right: + base_values[group] = interval + break + + return base_values + + +def _get_slice_spec( + groups: List[str], + unique_values: Dict[str, List[Any]], + column_names: List[str], +) -> SliceSpec: + """Create the slice specifications for computing the metrics. + + Parameters + ---------- + groups : List[str] + The groups (columns) to slice on. + unique_values : Dict[str, List[Any]] + The unique values for each group. + column_names : List[str] + The names of the columns in the dataset. + + Returns + ------- + SliceSpec + The slice specifications for computing the metrics. + + """ + slices = [] + + group_combinations = list(itertools.product(*unique_values.values())) + + for combination in group_combinations: + slice_dict = {} + for group, value in zip(groups, combination): + if isinstance(value, pd.Interval): + slice_dict[group] = { + "min_value": -np.inf + if value.left == pd.Timestamp.min + else value.left, + "max_value": np.inf + if value.right == pd.Timestamp.max + else value.right, + "min_inclusive": value.closed_left, + "max_inclusive": value.closed_right, + "keep_nulls": False, + } + else: + slice_dict[group] = {"value": value, "keep_nulls": False} + slices.append(slice_dict) + + return SliceSpec(slices, validate=True, column_names=column_names) + + +def _compute_metrics( # noqa: C901, PLR0912 + metrics: Union[Callable[..., Any], MetricDict], + dataset: Dataset, + target_columns: List[str], + prediction_column: str, + threshold: Optional[float] = None, + batch_size: Optional[int] = config.DEFAULT_MAX_BATCH_SIZE, + metric_name: Optional[str] = None, + array_lib: Literal["torch", "numpy", "cupy"] = "numpy", +) -> Dict[str, Any]: + """Compute the metrics for the dataset. + + Parameters + ---------- + metrics : Union[Callable, MetricDict] + The metrics to compute. + dataset : Dataset + The dataset to compute the metrics on. + target_columns : Union[str, List[str]] + The target columns. + prediction_column : str + The prediction column. + threshold : Union[float, List[float]], optional, default=None + The threshold to use for the metrics. + batch_size : int + The batch size to use for the computation. + metric_name : Optional[str] + The name of the metric to compute. + array_lib : {"torch", "numpy, "cupy"}, default="numpy" + The array library to use for the metric computation. The metric results + will be returned in the format of `array_lib`. + + Returns + ------- + Dict[str, Any] + The computed metrics. + + """ + empty_dataset_msg = ( + "Encountered empty dataset while computing metrics. " + "The metric values will be set to `None`." + ) + if isinstance(metrics, MetricDict): + if threshold is not None: + # set the threshold for each metric in the collection + for name, metric in metrics.items(): + if isinstance(metric, Metric) and hasattr(metric, "threshold"): + metric.threshold = threshold + elif isinstance(metric, OperatorMetric): + if hasattr(metric.metric_a, "threshold") and hasattr( + metric.metric_b, + "threshold", + ): + metric.metric_a.threshold = threshold + metric.metric_b.threshold = threshold # type: ignore[union-attr] + else: + LOGGER.warning( + "Metric %s does not have a threshold attribute. " + "Skipping setting the threshold.", + name, + ) + + if len(dataset) == 0: + warnings.warn(empty_dataset_msg, RuntimeWarning, stacklevel=1) + results: Dict[str, Any] = { + metric_name: float("NaN") + for metric_name in metrics # type: ignore[attr-defined] + } + elif ( + batch_size is None or batch_size <= 0 + ): # dataset.iter does not support getting all rows + targets = get_columns_as_array( + dataset=dataset, columns=target_columns, array_lib=array_lib + ) + predictions = get_columns_as_array( + dataset=dataset, columns=prediction_column, array_lib=array_lib + ) + results = metrics(targets, predictions) + else: + for batch in dataset.iter(batch_size=batch_size): + targets = get_columns_as_array( + dataset=batch, columns=target_columns, array_lib=array_lib + ) + predictions = get_columns_as_array( + dataset=batch, columns=prediction_column, array_lib=array_lib + ) + + metrics.update(targets, predictions) + + results = metrics.compute() + + metrics.reset() + + return results + if callable(metrics): + if metric_name is None: + metric_name = getattr(metrics, "__name__", "Unnamed Metric") + + if len(dataset) == 0: + warnings.warn(empty_dataset_msg, RuntimeWarning, stacklevel=1) + return {metric_name.title(): float("NaN")} + + targets = get_columns_as_array( + dataset=dataset, columns=target_columns, array_lib=array_lib + ) + predictions = get_columns_as_array( + dataset=dataset, columns=prediction_column, array_lib=array_lib + ) + + # check if the callable can take thresholds as an argument + if threshold is not None: + if "threshold" in inspect.signature(metrics).parameters: + output = metrics(targets, predictions, threshold=threshold) + else: + LOGGER.warning( + "The `metrics` argument is a callable that does not take a " + "`threshold` or `thresholds` argument. The `threshold` argument " + "will be ignored.", + ) + output = metrics(targets, predictions) + else: + output = metrics(targets, predictions) + + return {metric_name.title(): output} + + raise TypeError( + "The `metrics` argument must be a string, a Metric, a MetricDict, " + f"or a callable. Got {type(metrics)}.", + ) + + +def _get_metric_results_for_prediction_and_slice( + metrics: Union[Callable[..., Any], MetricDict], + dataset: Dataset, + target_columns: List[str], + prediction_column: str, + slice_name: str, + batch_size: Optional[int] = config.DEFAULT_MAX_BATCH_SIZE, + metric_name: Optional[str] = None, + thresholds: Optional[Array] = None, + array_lib: Literal["torch", "numpy", "cupy"] = "numpy", +) -> Dict[str, Dict[str, Any]]: + """Compute metrics for a slice of a dataset. + + Parameters + ---------- + metrics : Union[Callable, MetricDict] + The metrics to compute. + dataset : Dataset + The dataset to compute the metrics on. + target_columns : Union[str, List[str]] + The target columns. + prediction_column : str + The prediction column. + slice_name : str + The name of the slice. + batch_size : int + The batch size to use for the computation. + metric_name : Optional[str] + The name of the metric to compute. + thresholds : Optional[Array] + The thresholds to use for the metrics. + array_lib : {"numpy", "torch", "cupy"}, default="numpy" + The array library to use for the metric computation. The metric results + will be returned in the format of `array_lib`. + + Returns + ------- + Dict[str, Dict[str, Any]] + The computed metrics. + + """ + if thresholds is None: + metric_output = _compute_metrics( + metrics=metrics, + dataset=dataset, + target_columns=target_columns, + prediction_column=prediction_column, + batch_size=batch_size, + metric_name=metric_name, + array_lib=array_lib, + ) + + # result format -> {slice_name: {metric_name: metric_value}} + return {slice_name: metric_output} + + results: Dict[str, Dict[str, Any]] = {} + for threshold in thresholds: # type: ignore[attr-defined] + metric_output = _compute_metrics( + metrics=metrics, + dataset=dataset, + target_columns=target_columns, + prediction_column=prediction_column, + batch_size=batch_size, + threshold=threshold, + metric_name=metric_name, + array_lib=array_lib, + ) + + # result format -> {slice_name: {metric_name@threshold: metric_value}} + for key, value in metric_output.items(): + results.setdefault(slice_name, {}).update({f"{key}@{threshold}": value}) + return results + + +def _construct_base_slice_name(base_values: Dict[str, Any]) -> str: + """Construct the slice name for the base group. + + Parameters + ---------- + base_values : Dict[str, Any] + A dictionary mapping the group name to the base value. + + Returns + ------- + base_slice_name : str + A string representing the slice name for the base group. + + """ + base_slice_name = "" + for group, base_value in base_values.items(): + if isinstance(base_value, pd.Interval): + min_value = ( + -np.inf if base_value.left == pd.Timestamp.min else base_value.left + ) + max_value = ( + np.inf if base_value.right == pd.Timestamp.max else base_value.right + ) + min_end = "[" if base_value.closed_left else "(" + max_end = "]" if base_value.closed_right else ")" + base_slice_name += f"{group}:{min_end}{min_value} - {max_value}{max_end}&" + else: + base_slice_name += f"{group}:{base_value}&" + return base_slice_name[:-1] + + +def _compute_parity_metrics( + results: Dict[str, Dict[str, Dict[str, Any]]], + base_slice_name: str, + array_lib: Literal["numpy", "torch", "cupy"], +) -> Dict[str, Dict[str, Dict[str, Any]]]: + """Compute the parity metrics for each group and threshold if specified. + + Parameters + ---------- + results : Dict[str, Dict[str, Dict[str, Any]]] + A dictionary mapping the prediction column to the metrics dictionary. + base_slice_name : str + The name of the base slice. + array_lib : {"numpy", "torch", "cupy"}, default="numpy" + The array library to use for the metric computation. The metric results + will be returned in the format of `array_lib`. + + Returns + ------- + Dict[str, Dict[str, Dict[str, Any]]] + A dictionary mapping the prediction column to the metrics dictionary. + + """ + xp = _SUPPORTED_ARRAY_LIBS[array_lib] + parity_results: Dict[str, Dict[str, Dict[str, Any]]] = {} + + for key, prediction_result in results.items(): + parity_results[key] = {} + for slice_name, slice_result in prediction_result.items(): + for metric_name, metric_value in slice_result.items(): + if metric_name == "Group Size": + continue + + # add 'Parity' to the metric name before @threshold, if specified + metric_name_parts = metric_name.split("@") + parity_metric_name = f"{metric_name_parts[0]} Parity" + if len(metric_name_parts) > 1: + parity_metric_name += f"@{metric_name_parts[1]}" + + numerator = metric_value + denominator = prediction_result[base_slice_name][metric_name] + # value is NaN if either the numerator or denominator is NaN + if xp.all(xp.isnan(numerator)) or xp.all(xp.isnan(denominator)): + parity_metric_value = xp.nan + else: + parity_metric_value = xp.divide(numerator, denominator) + + parity_results[key].setdefault(slice_name, {}).update( + {parity_metric_name: parity_metric_value}, + ) + + return parity_results +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/accuracy.html b/api/_modules/cyclops/evaluate/metrics/accuracy.html new file mode 100644 index 000000000..20ca41840 --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/accuracy.html @@ -0,0 +1,758 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.accuracy - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.accuracy

+"""Classes for computing accuracy metrics."""
+
+from typing import Literal, Optional, Type, Union, cast
+
+import numpy as np
+import numpy.typing as npt
+
+from cyclops.evaluate.metrics.functional.accuracy import _accuracy_reduce
+from cyclops.evaluate.metrics.metric import Metric
+from cyclops.evaluate.metrics.stat_scores import (
+    BinaryStatScores,
+    MulticlassStatScores,
+    MultilabelStatScores,
+)
+from cyclops.evaluate.metrics.utils import _check_average_arg
+
+
+
+[docs] +class BinaryAccuracy(BinaryStatScores, registry_key="binary_accuracy"): + """Compute accuracy score for binary classification tasks. + + Parameters + ---------- + pos_label : int, default=1 + The label of the positive class. Can be 0 or 1. + threshold : float, default=0.5 + The threshold value for converting probability or logit scores to + binary. A sigmoid function is first applied to logits to convert them + to probabilities. + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinaryAccuracy + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 1] + >>> metric = BinaryAccuracy() + >>> metric(target, preds) + 0.75 + >>> metric.reset_state() + >>> target = [[0, 1, 0, 1], [1, 0, 1, 0]] + >>> preds = [[0, 1, 1, 1], [1, 0, 1, 0]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.875 + + """ + + name = "Accuracy Score" + + def __init__( + self, + threshold: float = 0.5, + pos_label: int = 1, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__(threshold=threshold, pos_label=pos_label) + self.zero_division = zero_division + + def compute(self) -> float: # type: ignore[override] + """Compute the accuracy score from the state.""" + tp, fp, tn, fn = self._final_state() + score = _accuracy_reduce( + tp=tp, + fp=fp, + tn=tn, + fn=fn, + task_type="binary", + average=None, + zero_division=self.zero_division, + ) + return cast(float, score)
+ + + +
+[docs] +class MulticlassAccuracy(MulticlassStatScores, registry_key="multiclass_accuracy"): + """Compute the accuracy score for multiclass classification problems. + + Parameters + ---------- + num_classes : int + Number of classes in the dataset. + top_k : int, default=None + Number of highest probability predictions or logits to consider when + computing the accuracy score. + average : Literal["micro", "macro", "weighted", None], default=None + If not None, this determines the type of averaging performed on the data: + + - ``micro``: Calculate metrics globally. + - ``macro``: Calculate metrics for each class, and find their unweighted + mean. This does not take class imbalance into account. + - ``weighted``: Calculate metrics for each class, and find their average, + weighted by support (the number of true instances for each class). + This alters ``macro`` to account for class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MulticlassAccuracy + >>> target = [0, 1, 2, 2, 2] + >>> preds = [0, 0, 2, 2, 1] + >>> metric = MulticlassAccuracy(num_classes=3) + >>> metric(target, preds) + array([1. , 0. , 0.66666667]) + >>> metric.reset_state() + >>> target = [[0, 1, 2], [2, 1, 0]] + >>> preds = [ + ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1], [0.2, 0.6, 0.2]], + ... [[0.1, 0.8, 0.1], [0.05, 0.95, 0], [0.2, 0.6, 0.2]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0., 1., 0.]) + + """ + + name = "Accuracy Score" + + def __init__( + self, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__(num_classes=num_classes, top_k=top_k, classwise=True) + _check_average_arg(average) + + self.average = average + self.zero_division = zero_division + + def compute(self) -> Union[float, npt.NDArray[np.float_]]: # type: ignore[override] + """Compute the accuracy score from the state.""" + tp, fp, tn, fn = self._final_state() + return _accuracy_reduce( + tp=tp, + fp=fp, + tn=tn, + fn=fn, + task_type="multiclass", + average=self.average, + zero_division=self.zero_division, + )
+ + + +
+[docs] +class MultilabelAccuracy(MultilabelStatScores, registry_key="multilabel_accuracy"): + """Compute the accuracy score for multilabel-indicator targets. + + Parameters + ---------- + num_labels : int + Number of labels in the multilabel classification task. + threshold : float, default=0.5 + Threshold value for binarizing the output of the classifier. + top_k : int, optional, default=None + The number of highest probability or logit predictions considered + to find the correct label. Only works when ``preds`` contains + probabilities/logits. + average : Literal['micro', 'macro', 'weighted', None], default=None + If None, return the accuracy score per label, otherwise this determines + the type of averaging performed on the data: + + - ``micro``: Calculate metrics globally. + - ``macro``: Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label, and find their + average, weighted by support (the number of true instances for + each label). + zero_division : Literal['warn', 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MultilabelAccuracy + >>> target = [[0, 1, 1], [1, 0, 0]] + >>> preds = [[0, 1, 0], [1, 0, 1]] + >>> metric = MultilabelAccuracy(num_labels=3) + >>> metric(target, preds) + array([1., 1., 0.]) + >>> metric.reset_state() + >>> target = [[[0, 1, 1], [1, 0, 0]], [[1, 0, 0], [0, 1, 1]]] + >>> preds = [[[0.05, 0.95, 0], [0.1, 0.8, 0.1]], [[0.1, 0.8, 0.1], [0.05, 0.95, 0]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.5, 0.5, 0.5]) + + """ + + name = "Accuracy Score" + + def __init__( + self, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__( + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + labelwise=True, + ) + _check_average_arg(average) + + self.average = average + self.zero_division = zero_division + + def compute(self) -> Union[float, npt.NDArray[np.float_]]: # type: ignore[override] + """Compute the accuracy score from the state.""" + tp, fp, tn, fn = self._final_state() + return _accuracy_reduce( + tp=tp, + fp=fp, + tn=tn, + fn=fn, + task_type="multilabel", + average=self.average, + zero_division=self.zero_division, + )
+ + + +
+[docs] +class Accuracy(Metric, registry_key="accuracy", force_register=True): + """Compute accuracy score for different classification tasks. + + Parameters + ---------- + task : Literal["binary", "multiclass", "multilabel"] + The type of task for the input data. One of 'binary', 'multiclass' + or 'multilabel'. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int, default=None + Number of classes for the task. Required if ``task`` is ``"multiclass"``. + threshold : float, default=0.5 + Threshold for deciding the positive class. Only used if ``task`` is + ``"binary"`` or ``"multilabel"``. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Only used if ``task`` is ``"multiclass"`` or ``"multilabel"``. + num_labels : int, default=None + Number of labels for the task. Required if ``task`` is ``"multilabel"``. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the recall score for each label/class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label/class, and find + their average weighted by support (the number of true instances + for each label/class). This alters ``macro`` to account for + label/class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics import Accuracy + >>> target = [0, 0, 1, 1] + >>> preds = [0, 1, 1, 1] + >>> metric = Accuracy(task="binary") + >>> metric(target, preds) + 0.75 + >>> metric.reset_state() + >>> target = [[0, 0, 1, 1], [1, 1, 0, 0]] + >>> preds = [[0.05, 0.95, 0, 0], [0.1, 0.8, 0.1, 0]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.5 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics import Accuracy + >>> target = [0, 1, 2, 2, 2] + >>> preds = [0, 0, 2, 2, 1] + >>> metric = Accuracy(task="multiclass", num_classes=3) + >>> metric(target, preds) + array([1. , 0. , 0.66666667]) + >>> metric.reset_state() + >>> target = [[0, 1, 2], [2, 1, 0]] + >>> preds = [ + ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1], [0.2, 0.6, 0.2]], + ... [[0.1, 0.8, 0.1], [0.05, 0.95, 0], [0.2, 0.6, 0.2]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0., 1., 0.]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics import Accuracy + >>> target = [[0, 1, 1], [1, 0, 0]] + >>> preds = [[0, 1, 0], [1, 0, 1]] + >>> metric = Accuracy(task="multilabel", num_labels=3) + >>> metric(target, preds) + array([1., 1., 0.]) + >>> metric.reset_state() + >>> target = [[[0, 1, 1], [1, 0, 0]], [[1, 0, 0], [0, 1, 1]]] + >>> preds = [[[0.05, 0.95, 0], [0.1, 0.8, 0.1]], [[0.1, 0.8, 0.1], [0.05, 0.95, 0]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.5, 0.5, 0.5]) + + """ + + name: str = "Accuracy Score" + + def __new__( # type: ignore # mypy expects a subclass of Accuracy + cls: Type[Metric], + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> Metric: + """Create a task-specific instance of the metric.""" + if task == "binary": + return BinaryAccuracy( + threshold=threshold, + pos_label=pos_label, + zero_division=zero_division, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + return MulticlassAccuracy( + num_classes=num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + return MultilabelAccuracy( + num_labels=num_labels, + threshold=threshold, + average=average, + zero_division=zero_division, + ) + raise ValueError( + f"Task {task} is not supported, expected one of 'binary', 'multiclass'" + " or 'multilabel'", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/auroc.html b/api/_modules/cyclops/evaluate/metrics/auroc.html new file mode 100644 index 000000000..e2dd3572b --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/auroc.html @@ -0,0 +1,753 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.auroc - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.auroc

+"""Classes for computing area under the ROC curve."""
+
+from typing import List, Literal, Optional, Union
+
+import numpy as np
+import numpy.typing as npt
+
+from cyclops.evaluate.metrics.functional.auroc import (
+    _binary_auroc_compute,
+    _multiclass_auroc_compute,
+    _multilabel_auroc_compute,
+)
+from cyclops.evaluate.metrics.metric import Metric
+from cyclops.evaluate.metrics.precision_recall_curve import (
+    BinaryPrecisionRecallCurve,
+    MulticlassPrecisionRecallCurve,
+    MultilabelPrecisionRecallCurve,
+)
+from cyclops.evaluate.metrics.utils import _check_average_arg
+
+
+
+[docs] +class BinaryAUROC(BinaryPrecisionRecallCurve, registry_key="binary_auroc"): + """Compute the area under the ROC curve for binary classification tasks. + + Parameters + ---------- + max_fpr : float, default=None + The maximum value of the false positive rate. If not None, then + the partial AUCROC in the range [0, max_fpr] is returned. + thresholds : Union[int, List[float], numpy.ndarray], default=None + Thresholds used for binarizing the values of ``preds``. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinaryAUROC + >>> target = [0, 0, 1, 1] + >>> preds = [0.1, 0.4, 0.35, 0.8] + >>> metric = BinaryAUROC() + >>> metric(target, preds) + 0.75 + >>> metric.reset_state() + >>> target = [[0, 1, 0], [1, 0, 1]] + >>> preds = [[0.1, 0.9, 0.8], [0.7, 0.2, 0.1]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.6111111111111112 + + """ + + name: str = "ROC AUC Score" + + def __init__( + self, + max_fpr: Optional[float] = None, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + pos_label: int = 1, + ) -> None: + """Initialize the metric.""" + super().__init__(thresholds=thresholds, pos_label=pos_label) + self.max_fpr = max_fpr + + def compute(self) -> float: # type: ignore # super().compute() returns Tuple + """Compute the area under the ROC curve from the state variables.""" + if self.thresholds is None: + state = ( + np.concatenate(self.target, axis=0), # type: ignore[attr-defined] + np.concatenate(self.preds, axis=0), # type: ignore[attr-defined] + ) + else: + state = self.confmat # type: ignore[attr-defined] + + return _binary_auroc_compute( + state, + thresholds=self.thresholds, + max_fpr=self.max_fpr, + pos_label=self.pos_label, + )
+ + + +
+[docs] +class MulticlassAUROC(MulticlassPrecisionRecallCurve, registry_key="multiclass_auroc"): + """Compute the area under the ROC curve for multiclass classification tasks. + + Parameters + ---------- + num_classes : int + Number of classes. + thresholds : Union[int, List[float], numpy.ndarray], default=None + Thresholds used for binarizing the values of ``preds``. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + average : Literal["macro", "weighted"], default=None + If ``None``, then the scores for each class are returned. Otherwise, + this determines the type of averaging performed on the scores. One of: + + - `macro`: Calculate metrics for each class, and find their unweighted + mean. This does not take class imbalance into account. + - `weighted`: Calculate metrics for each class, and find their average, + weighted by support (the number of true instances for each class). + + Examples + -------- + >>> from cyclops.evaluate.metrics import MulticlassAUROC + >>> target = [0, 1, 2, 0] + >>> preds = [ + ... [0.9, 0.05, 0.05], + ... [0.05, 0.89, 0.06], + ... [0.05, 0.01, 0.94], + ... [0.9, 0.05, 0.05], + ... ] + >>> metric = MulticlassAUROC(num_classes=3) + >>> metric(target, preds) + array([1., 1., 1.]) + >>> metric.reset_state() + >>> target = [[0, 1, 0], [1, 0, 1]] + >>> preds = [ + ... [[0.1, 0.9, 0.0], [0.7, 0.2, 0.1], [0.2, 0.3, 0.5]], + ... [[0.1, 0.1, 0.8], [0.7, 0.2, 0.1], [0.2, 0.3, 0.5]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.5 , 0.22222222, 0. ]) + + """ + + name: str = "ROC AUC Score" + + def __init__( + self, + num_classes: int, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + average: Optional[Literal["macro", "weighted"]] = None, + ) -> None: + """Initialize the metric.""" + super().__init__(num_classes=num_classes, thresholds=thresholds) + _check_average_arg(average) + self.average = average + + def compute(self) -> Union[float, npt.NDArray[np.float_]]: # type:ignore[override] + """Compute the area under the ROC curve from the state variables.""" + if self.thresholds is None: + state = ( + np.concatenate(self.target, axis=0), # type: ignore[attr-defined] + np.concatenate(self.preds, axis=0), # type: ignore[attr-defined] + ) + else: + state = self.confmat # type: ignore[attr-defined] + + return _multiclass_auroc_compute( + state=state, + num_classes=self.num_classes, + thresholds=self.thresholds, + average=self.average, + )
+ + + +
+[docs] +class MultilabelAUROC(MultilabelPrecisionRecallCurve, registry_key="multilabel_auroc"): + """Compute the area under the ROC curve for multilabel classification tasks. + + Parameters + ---------- + num_labels : int + The number of labels in the multilabel classification task. + thresholds : Union[int, List[float], numpy.ndarray], default=None + Thresholds used for binarizing the values of ``preds``. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + average : Literal["micro", "macro", "weighted"], default=None + If ``None``, then the scores for each label are returned. Otherwise, + this determines the type of averaging performed on the scores. One of: + + - `micro`: Calculate metrics globally. + - `macro`: Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + - `weighted``: Calculate metrics for each label, and find their average, + weighted by support (the number of true instances for each label). + + Examples + -------- + >>> from cyclops.evaluate.metrics import MultilabelAUROC + >>> target = [[0, 1], [1, 1], [1, 0]] + >>> preds = [[0.9, 0.05], [0.05, 0.89], [0.05, 0.01]] + >>> metric = MultilabelAUROC(num_labels=2) + >>> metric(target, preds) + array([1., 1.]) + >>> metric.reset_state() + >>> target = [[[0, 1], [1, 0]], [[1, 1], [1, 0]]] + >>> preds = [[[0.9, 0.05], [0.05, 0.89]], [[0.05, 0.89], [0.05, 0.01]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([1. , 0.625]) + + """ + + name: str = "ROC AUC Score" + + def __init__( + self, + num_labels: int, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + average: Optional[Literal["micro", "macro", "weighted"]] = None, + ) -> None: + """Initialize the metric.""" + super().__init__(num_labels=num_labels, thresholds=thresholds) + _check_average_arg(average) + self.average = average + + def compute(self) -> Union[float, npt.NDArray[np.float_]]: # type: ignore[override] + """Compute the area under the ROC curve from the state variables.""" + if self.thresholds is None: + state = ( + np.concatenate(self.target, axis=0), # type: ignore[attr-defined] + np.concatenate(self.preds, axis=0), # type: ignore[attr-defined] + ) + else: + state = self.confmat # type: ignore[attr-defined] + + return _multilabel_auroc_compute( + state=state, + num_labels=self.num_labels, + thresholds=self.thresholds, + average=self.average, + )
+ + + +
+[docs] +class AUROC(Metric, registry_key="auroc", force_register=True): + """Compute the AUROC curve for different types of classification tasks. + + Parameters + ---------- + task : Literal["binary", "multiclass", "multilabel"] + Task type. One of ``binary``, ``multiclass``, ``multilabel``. + max_fpr : float, default=None + The maximum value of the false positive rate. If not None, a partial AUC + in the range [0, max_fpr] is returned. Only used for binary classification. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for binarizing the values of ``preds``. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + num_classes : int, default=None + Number of classes. This parameter is required for the ``multiclass`` task. + num_labels : int, default=None + Number of labels. This parameter is required for the ``multilabel`` task. + average : Literal["micro", "macro", "weighted"], default=None + If not None, apply the method to compute the average area under the + ROC curve. Only applicable for the ``multiclass`` and ``multilabel`` + tasks. One of: + + - ``micro``: Calculate metrics globally. + - ``macro``: Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label, and find their average, + weighted by support (accounting for label imbalance). + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics import BinaryAUROC + >>> target = [0, 0, 1, 1] + >>> preds = [0.1, 0.4, 0.35, 0.8] + >>> metric = BinaryAUROC() + >>> metric(target, preds) + 0.75 + >>> metric.reset_state() + >>> target = [[0, 1, 0], [1, 0, 1]] + >>> preds = [[0.1, 0.9, 0.8], [0.7, 0.2, 0.1]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.6111111111111112 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics import MulticlassAUROC + >>> target = [0, 1, 2, 0] + >>> preds = [ + ... [0.9, 0.05, 0.05], + ... [0.05, 0.89, 0.06], + ... [0.05, 0.01, 0.94], + ... [0.9, 0.05, 0.05], + ... ] + >>> metric = MulticlassAUROC(num_classes=3) + >>> metric(target, preds) + array([1., 1., 1.]) + >>> metric.reset_state() + >>> target = [[0, 1, 0], [1, 0, 1]] + >>> preds = [ + ... [[0.1, 0.9, 0.0], [0.7, 0.2, 0.1], [0.2, 0.3, 0.5]], + ... [[0.1, 0.1, 0.8], [0.7, 0.2, 0.1], [0.2, 0.3, 0.5]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.5 , 0.22222222, 0. ]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics import MultilabelAUROC + >>> target = [[0, 1], [1, 1], [1, 0]] + >>> preds = [[0.9, 0.05], [0.05, 0.89], [0.05, 0.01]] + >>> metric = MultilabelAUROC(num_labels=2) + >>> metric(target, preds) + array([1., 1.]) + >>> metric.reset_state() + >>> target = [[[0, 1], [1, 0]], [[1, 1], [1, 0]]] + >>> preds = [[[0.9, 0.05], [0.05, 0.89]], [[0.05, 0.89], [0.05, 0.01]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([1. , 0.625]) + + """ + + name: str = "ROC AUC Score" + + def __new__( # type: ignore # mypy expects a subclass of AUROC + cls, + task: Literal["binary", "multiclass", "multilabel"], + max_fpr: Optional[float] = None, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["micro", "macro", "weighted"]] = None, + ) -> Metric: + """Create a task-specific instance of the AUROC metric.""" + if task == "binary": + return BinaryAUROC(max_fpr=max_fpr, thresholds=thresholds) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be a positive integer." + return MulticlassAUROC( + num_classes=num_classes, + thresholds=thresholds, + average=average, # type: ignore + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be a positive integer." + return MultilabelAUROC( + num_labels=num_labels, + thresholds=thresholds, + average=average, + ) + raise ValueError( + "Expected argument `task` to be either 'binary', 'multiclass' or " + f"'multilabel', but got {task}", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/average_precision.html b/api/_modules/cyclops/evaluate/metrics/average_precision.html new file mode 100644 index 000000000..79f09977b --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/average_precision.html @@ -0,0 +1,526 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.average_precision - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.average_precision

+"""Classes for computing area under the Average Precision (AUPRC)."""
+
+from typing import List, Literal, Optional, Type, Union
+
+import numpy as np
+import numpy.typing as npt
+
+from cyclops.evaluate.metrics.functional.average_precision import (
+    _binary_average_precision_compute,
+)
+from cyclops.evaluate.metrics.metric import Metric
+from cyclops.evaluate.metrics.precision_recall_curve import (
+    BinaryPrecisionRecallCurve,
+)
+
+
+
+[docs] +class BinaryAveragePrecision( + BinaryPrecisionRecallCurve, + registry_key="binary_average_precision", +): + """Compute average precision for binary input. + + Parameters + ---------- + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for computing the precision and recall scores. + If int, then the number of thresholds to use. + If list or numpy.ndarray, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + pos_label : int + The label of the positive class. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinaryAveragePrecision + >>> target = [0, 1, 0, 1] + >>> preds = [0.1, 0.4, 0.35, 0.8] + >>> metric = BinaryAveragePrecision(thresholds=3) + >>> metric(target, preds) + 0.75 + >>> metric.reset_state() + >>> target = [[0, 1, 0, 1], [1, 1, 0, 0]] + >>> preds = [[0.1, 0.4, 0.35, 0.8], [0.6, 0.3, 0.1, 0.7]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.5833333333333333 + + """ + + name: str = "Average Precision" + + def compute( # type: ignore[override] + self, + ) -> float: + """Compute the average precision score from the state.""" + if self.thresholds is None: + state = ( + np.concatenate(self.target, axis=0), # type: ignore[attr-defined] + np.concatenate(self.preds, axis=0), # type: ignore[attr-defined] + ) + else: + state = self.confmat # type: ignore[attr-defined] + + return _binary_average_precision_compute( + state, + self.thresholds, + self.pos_label, + )
+ + + +class AveragePrecision( + Metric, + registry_key="average_precision", + force_register=True, +): + """Compute the precision-recall curve for different classification tasks. + + Parameters + ---------- + task : Literal["binary", "multiclass", "multilabel"] + The task for which the precision-recall curve is computed. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for computing the precision and recall scores. If int, + then the number of thresholds to use. If list or array, then the + thresholds to use. If None, then the thresholds are automatically + determined by the sunique values in ``preds`` + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int, optional + The number of classes in the dataset. Required if ``task`` is + ``"multiclass"``. + num_labels : int, optional + The number of labels in the dataset. Required if ``task`` is + ``"multilabel"``. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics import PrecisionRecallCurve + >>> target = [1, 1, 1, 0] + >>> preds = [0.6, 0.2, 0.3, 0.8] + >>> metric = AveragePrecision(task="binary", thresholds=None) + >>> metric(target, preds) + 0.6388888888888888 + >>> metric.reset_state() + >>> target = [[1, 0, 1, 1], [0, 0, 0, 1]] + >>> preds = [[0.5, 0.4, 0.1, 0.3], [0.9, 0.6, 0.45, 0.8]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.48214285714285715 + + """ + + name: str = "Average Precision" + + def __new__( # type: ignore # mypy expects a subclass of AveragePrecision + cls: Type[Metric], + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + pos_label: int = 1, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + ) -> Metric: + """Create a task-specific instance of the average precision metric.""" + if task == "binary": + return BinaryAveragePrecision( + thresholds=thresholds, + pos_label=pos_label, + ) + if task == "multiclass": + NotImplementedError("Multiclass average precision is not implemented.") + if task == "multilabel": + NotImplementedError("Multilabel average precision is not implemented.") + + raise ValueError( + "Expected argument `task` to be either 'binary', 'multiclass' or " + f"'multilabel', but got {task}", + ) +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/f_beta.html b/api/_modules/cyclops/evaluate/metrics/f_beta.html new file mode 100644 index 000000000..dc4f4a39c --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/f_beta.html @@ -0,0 +1,1113 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.f_beta - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.f_beta

+"""Classes for computing the F-beta score."""
+
+from typing import Literal, Optional, Type, Union, cast
+
+import numpy as np
+import numpy.typing as npt
+
+from cyclops.evaluate.metrics.functional.f_beta import _check_beta, _fbeta_reduce
+from cyclops.evaluate.metrics.metric import Metric
+from cyclops.evaluate.metrics.stat_scores import (
+    BinaryStatScores,
+    MulticlassStatScores,
+    MultilabelStatScores,
+)
+from cyclops.evaluate.metrics.utils import _check_average_arg
+
+
+
+[docs] +class BinaryFbetaScore(BinaryStatScores, registry_key="binary_fbeta_score"): + """Compute the F-beta score for binary classification tasks. + + Parameters + ---------- + beta : float + Weight of precision in harmonic mean. + pos_label : int, default=1 + The positive class label. One of [0, 1]. + threshold : float, default=0.5 + Threshold value for converting probabilities and logits to binary. + Logits will be converted to probabilities using the sigmoid function. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there are no true positives or true negatives. + If set to ``warn``, this acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinaryFbetaScore + >>> target = [0, 1, 1, 0] + >>> preds = [0, 1, 0, 0] + >>> metric = BinaryFbetaScore(beta=0.5) + >>> metric(target, preds) + 0.8333333333333334 + >>> metric.reset_state() + >>> target = [[1, 0, 1, 0], [1, 0, 0, 1]] + >>> preds = [[0.2, 0.8, 0.3, 0.4], [0.6, 0.3, 0.1, 0.5]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.625 + + """ + + name: str = "F-beta Score" + + def __init__( + self, + beta: float, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__(threshold=threshold, pos_label=pos_label) + + _check_beta(beta=beta) + + self.beta = beta + self.zero_division = zero_division + + def compute(self) -> float: # type: ignore[override] + """Compute the metric from the state.""" + tp, fp, _, fn = self._final_state() + f_score = _fbeta_reduce( + tp=tp, + fp=fp, + fn=fn, + beta=self.beta, + average=None, + zero_division=self.zero_division, + ) + return cast(float, f_score)
+ + + +
+[docs] +class MulticlassFbetaScore(MulticlassStatScores, registry_key="multiclass_fbeta_score"): + """Compute the F-beta score for multiclass classification tasks. + + Parameters + ---------- + beta : float + Weight of precision in harmonic mean. + num_classes : int + The number of classes in the dataset. + top_k : int, optional + If given, and predictions are probabilities/logits, the score will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metric globally. + - ``macro``: Calculate metric for each class, and find their + unweighted mean. This does not take class imbalance into account. + - ``weighted``: Calculate metric for each class, and find their + average weighted by the support (the number of true instances + for each class). This alters "macro" to account for class + imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MulticlassFbetaScore + >>> target = [0, 1, 2, 0] + >>> preds = [0, 2, 1, 0] + >>> metric = MulticlassFbetaScore(beta=0.5, num_classes=3) + >>> metric(target, preds) + array([1., 0., 0.]) + >>> metric.reset_state() + >>> target = [[0, 1, 2, 0], [2, 1, 2, 0]] + >>> preds = [ + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.90909091, 0. , 0. ]) + + """ + + name: str = "F-beta Score" + + def __init__( + self, + beta: float, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__(num_classes=num_classes, top_k=top_k, classwise=True) + + _check_beta(beta=beta) + _check_average_arg(average=average) + + self.beta = beta + self.average = average + self.zero_division = zero_division + + def compute(self) -> Union[float, npt.NDArray[np.float_]]: # type: ignore[override] + """Compute the metric from the state.""" + tp, fp, _, fn = self._final_state() + return _fbeta_reduce( + tp=tp, + fp=fp, + fn=fn, + beta=self.beta, + average=self.average, + zero_division=self.zero_division, + )
+ + + +
+[docs] +class MultilabelFbetaScore(MultilabelStatScores, registry_key="multilabel_fbeta_score"): + """Compute the F-beta score for multilabel classification tasks. + + Parameters + ---------- + beta : float + Weight of precision in harmonic mean. + num_labels : int + Number of labels for the task. + threshold : float, default=0.5 + Threshold for deciding the positive class if predictions are logits + or probability scores. Logits will be converted to probabilities using + the sigmoid function. + top_k : int, optional + If given, and predictions are probabilities/logits, the score will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each label. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metric globally. + - ``macro``: Calculate metric for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each label, and find their + average weighted by the support (the number of true instances + for each label). This alters "macro" to account for label imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MultilabelFbetaScore + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.8, 0.2]] + >>> metric = MultilabelFbetaScore(beta=0.5, num_labels=2) + >>> metric(target, preds) + array([1. , 0.83333333]) + >>> metric.reset_state() + >>> target = [[[0, 1], [1, 1]], [[1, 1], [1, 0]]] + >>> preds = [[[0, 1], [1, 0]], [[1, 1], [1, 0]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([1. , 0.90909091]) + + """ + + name: str = "F-beta Score" + + def __init__( + self, + beta: float, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__( + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + labelwise=True, + ) + + _check_beta(beta=beta) + _check_average_arg(average=average) + + self.beta = beta + self.average = average + self.zero_division = zero_division + + def compute(self) -> Union[float, npt.NDArray[np.float_]]: # type: ignore[override] + """Compute the metric from the state.""" + tp, fp, _, fn = self._final_state() + return _fbeta_reduce( + tp=tp, + fp=fp, + fn=fn, + beta=self.beta, + average=self.average, + zero_division=self.zero_division, + )
+ + + +
+[docs] +class FbetaScore(Metric, registry_key="fbeta_score", force_register=True): + """Compute the F-beta score for different types of classification tasks. + + Parameters + ---------- + beta : float + Weight of precision in harmonic mean. + task : Literal["binary", "multiclass", "multilabel"] + Type of classification task. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int + Number of classes for the task. Required if ``task`` is ``"multiclass"``. + threshold : float, default=0.5 + Threshold for deciding the positive class. Only used if ``task`` is + ``"binary"`` or ``"multilabel"``. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Only used if ``task`` is ``"multiclass"`` or ``"multilabel"``. + num_labels : int + Number of labels for the task. Required if ``task`` is ``"multilabel"``. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each label/class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take label/class imbalance into + account. + - ``weighted``: Calculate metrics for each label/class, and find + their average weighted by support (the number of true instances + for each label/class). This alters ``macro`` to account for + label/class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics import FbetaScore + >>> target = [0, 1, 1, 0] + >>> preds = [0.1, 0.8, 0.4, 0.3] + >>> metric = FbetaScore(beta=0.5, task="binary") + >>> metric(target, preds) + 0.8333333333333334 + >>> metric.reset_state() + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.8, 0.2]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.9090909090909091 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics import FbetaScore + >>> target = [0, 1, 2, 0] + >>> preds = [[0.1, 0.8, 0.1], [0.1, 0.1, 0.8], [0.1, 0.1, 0.8], [0.8, 0.1, 0.1]] + >>> metric = FbetaScore(beta=0.5, task="multiclass", num_classes=3) + >>> metric(target, preds) + array([0.83333333, 0. , 0.55555556]) + >>> metric.reset_state() + >>> target = [[0, 1, 0], [0, 0, 1]] + >>> preds = [ + ... [[0.1, 0.8, 0.1], [0.1, 0.1, 0.8], [0.8, 0.1, 0.1]], + ... [[0.1, 0.1, 0.8], [0.8, 0.1, 0.1], [0.1, 0.8, 0.1]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.83333333, 0.5 , 0. ]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics import FbetaScore + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.8, 0.2]] + >>> metric = FbetaScore(beta=0.5, task="multilabel", num_labels=2) + >>> metric(target, preds) + array([1. , 0.83333333]) + >>> metric.reset_state() + >>> target = [[[0, 1], [1, 1]], [[1, 1], [1, 0]]] + >>> preds = [[[0, 1], [1, 0]], [[1, 1], [1, 0]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([1. , 0.90909091]) + + """ + + name: str = "F-beta Score" + + def __new__( # type: ignore # mypy expects a subclass of FbetaScore + cls: Type[Metric], + beta: float, + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> Metric: + """Create a task-specific FbetaScore instance.""" + if task == "binary": + return BinaryFbetaScore( + beta=beta, + threshold=threshold, + pos_label=pos_label, + zero_division=zero_division, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + return MulticlassFbetaScore( + beta=beta, + num_classes=num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + return MultilabelFbetaScore( + beta=beta, + num_labels=num_labels, + threshold=threshold, + average=average, + zero_division=zero_division, + ) + raise ValueError( + f"Task {task} is not supported, expected one of 'binary', 'multiclass'" + " or 'multilabel'", + )
+ + + +
+[docs] +class BinaryF1Score(BinaryFbetaScore, registry_key="binary_f1_score"): + """Compute the F1 score for binary classification tasks. + + Parameters + ---------- + pos_label: int, default=1 + The label of the positive class. + threshold : float, default=0.5 + Threshold value for binarizing predictions in form of logits or + probability scores. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinaryF1Score + >>> target = [0, 1, 1, 0] + >>> preds = [0.1, 0.8, 0.4, 0.3] + >>> metric = BinaryF1Score() + >>> metric(target, preds) + 0.6666666666666666 + >>> metric.reset_state() + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.8, 0.2]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.8 + + """ + + name: str = "F1 Score" + + def __init__( + self, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__( + beta=1.0, + threshold=threshold, + pos_label=pos_label, + zero_division=zero_division, + )
+ + + +
+[docs] +class MulticlassF1Score(MulticlassFbetaScore, registry_key="multiclass_f1_score"): + """Compute the F1 score for multiclass classification tasks. + + Parameters + ---------- + num_classes : int + Number of classes in the dataset. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each class. Otherwise, use one of + the following options to compute the average score: + + - ``micro``: Calculate metric globally. + - ``macro``: Calculate metric for each class, and find their + unweighted mean. This does not take class imbalance into account. + - ``weighted``: Calculate metric for each class, and find their + average weighted by the support (the number of true instances + for each class). This alters "macro" to account for class + imbalance. It can result in an F-score that is not between + precision and recall. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MulticlassF1Score + >>> target = [0, 1, 2, 0] + >>> preds = [[0.1, 0.6, 0.3], [0.05, 0.95, 0], [0.1, 0.8, 0.1], [0.95, 0.05, 0]] + >>> metric = MulticlassF1Score(num_classes=3) + >>> metric(target, preds) + array([0.66666667, 0.5 , 0. ]) + >>> metric.reset_state() + >>> target = [[0, 1], [1, 1]] + >>> preds = [[[0.1, 0.9, 0], [0.05, 0.95, 0]], [[0.1, 0.8, 0.1], [0.05, 0.95, 0]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0. , 0.85714286, 0. ]) + + """ + + name: str = "F1 Score" + + def __init__( + self, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the Metric.""" + super().__init__( + beta=1.0, + num_classes=num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +class MultilabelF1Score(MultilabelFbetaScore, registry_key="multilabel_f1_score"): + """Compute the F1 score for multilabel classification tasks. + + Parameters + ---------- + num_labels : int + Number of labels for the task. + threshold : float, default=0.5 + Threshold for deciding the positive class. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each label. Otherwise, use one of + the following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives and false positives. + - ``macro``: Calculate metric for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each label, and find their + average weighted by the support (the number of true instances + for each label). This alters "macro" to account for label imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MultilabelF1Score + >>> target = [[0, 1, 1], [1, 0, 0]] + >>> preds = [[0.1, 0.9, 0.8], [0.05, 0.1, 0.2]] + >>> metric = MultilabelF1Score(num_labels=3) + >>> metric(target, preds) + array([0., 1., 1.]) + >>> metric.reset_state() + >>> target = [[[0, 1, 0], [1, 0, 1]], [[0, 1, 1], [1, 0, 0]]] + >>> preds = [ + ... [[0.1, 0.7, 0.2], [0.2, 0.8, 0.3]], + ... [[0.5, 0.9, 0.0], [0.3, 0.4, 0.2]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0. , 0.8, 0. ]) + + """ + + name: str = "F1 Score" + + def __init__( + self, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the Metric.""" + super().__init__( + beta=1.0, + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +class F1Score(FbetaScore, registry_key="f1_score", force_register=True): + """Compute the F1 score for different types of classification tasks. + + Parameters + ---------- + task : Literal["binary", "multiclass", "multilabel"] + Type of classification task. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int, default=None + Number of classes for the task. Required if ``task`` is ``"multiclass"``. + threshold : float, default=0.5 + Threshold for deciding the positive class. Only used if ``task`` is + ``"binary"`` or ``"multilabel"``. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Only used if ``task`` is ``"multiclass"`` or ``"multilabel"``. + num_labels : int, default=None + Number of labels for the task. Required if ``task`` is ``"multilabel"``. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each label/class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take label/class imbalance into + account. + - ``weighted``: Calculate metrics for each label/class, and find + their average weighted by support (the number of true instances + for each label/class). This alters ``macro`` to account for + label/class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics import F1Score + >>> target = [0, 1, 1, 0] + >>> preds = [0.1, 0.8, 0.4, 0.3] + >>> metric = F1Score(task="binary") + >>> metric(target, preds) + 0.6666666666666666 + >>> metric.reset_state() + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.8, 0.2]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.8 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics import F1Score + >>> target = [0, 1, 2, 0] + >>> preds = [[0.1, 0.6, 0.3], [0.05, 0.95, 0], [0.1, 0.8, 0.1], [0.95, 0.05, 0]] + >>> metric = F1Score(task="multiclass", num_classes=3) + >>> metric(target, preds) + array([0.66666667, 0.5 , 0. ]) + >>> metric.reset_state() + >>> target = [[0, 1], [1, 1]] + >>> preds = [[[0.1, 0.9, 0], [0.05, 0.95, 0]], [[0.1, 0.8, 0.1], [0.05, 0.95, 0]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0. , 0.85714286, 0. ]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics import F1Score + >>> target = [[0, 1, 1], [1, 0, 0]] + >>> preds = [[0.1, 0.9, 0.8], [0.05, 0.1, 0.2]] + >>> metric = F1Score(task="multilabel", num_labels=3) + >>> metric(target, preds) + array([0., 1., 1.]) + >>> metric.reset_state() + >>> target = [[[0, 1, 0], [1, 0, 1]], [[0, 1, 1], [1, 0, 0]]] + >>> preds = [ + ... [[0.1, 0.7, 0.2], [0.2, 0.8, 0.3]], + ... [[0.5, 0.9, 0.0], [0.3, 0.4, 0.2]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0. , 0.8, 0. ]) + + """ + + name: str = "F1 Score" + + def __new__( # type: ignore # mypy expects a subclass of F1Score + cls: Type[Metric], + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> Metric: + """Create a task-specific F1 score instance.""" + if task == "binary": + return BinaryF1Score( + threshold=threshold, + pos_label=pos_label, + zero_division=zero_division, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + return MulticlassF1Score( + num_classes=num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + return MultilabelF1Score( + num_labels=num_labels, + threshold=threshold, + average=average, + zero_division=zero_division, + ) + raise ValueError( + f"Task {task} is not supported, expected one of 'binary', 'multiclass'" + " or 'multilabel'", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/functional/accuracy.html b/api/_modules/cyclops/evaluate/metrics/functional/accuracy.html new file mode 100644 index 000000000..2776c015b --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/functional/accuracy.html @@ -0,0 +1,869 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.functional.accuracy - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.functional.accuracy

+"""Functions for computing accuracy scores for classification tasks."""
+
+from typing import Literal, Optional, Union, cast
+
+import numpy as np
+import numpy.typing as npt
+from sklearn.metrics._classification import _prf_divide
+
+from cyclops.evaluate.metrics.functional.stat_scores import (
+    _binary_stat_scores_args_check,
+    _binary_stat_scores_format,
+    _binary_stat_scores_update,
+    _multiclass_stat_scores_format,
+    _multiclass_stat_scores_update,
+    _multilabel_stat_scores_format,
+    _multilabel_stat_scores_update,
+)
+from cyclops.evaluate.metrics.utils import (
+    _check_average_arg,
+    _get_value_if_singleton_array,
+)
+
+
+def _accuracy_reduce(
+    tp: Union[npt.NDArray[np.int_], np.int_],
+    fp: Union[npt.NDArray[np.int_], np.int_],
+    tn: Union[npt.NDArray[np.int_], np.int_],
+    fn: Union[npt.NDArray[np.int_], np.int_],
+    task_type: Literal["binary", "multiclass", "multilabel"],
+    average: Literal["micro", "macro", "weighted", None],
+    zero_division: Literal["warn", 0, 1] = "warn",
+) -> Union[float, npt.NDArray[np.float_]]:
+    """Compute accuracy score per class or sample and apply average.
+
+    Parameters
+    ----------
+    tp : numpy.ndarray or int
+        The number of true positives.
+    fp : numpy.ndarray or int
+        The number of false positives.
+    tn : numpy.ndarray or int
+        The number of true negatives.
+    fn : numpy.ndarray or int
+        The number of false negatives.
+    task_type : Literal["binary", "multiclass", "multilabel"]
+        The type of task for the input data. One of 'binary', 'multiclass'
+        or 'multilabel'.
+    average : Literal["micro", "macro", "weighted", None]
+        The type of averaging to apply to the accuracy scores. One of
+        'micro', 'macro', 'weighted' or None.
+    zero_division : Literal["warn", 0, 1]
+        Sets the value to return when there is a zero division. If set to "warn",
+        this acts as 0, but warnings are also raised.
+
+    Returns
+    -------
+    accuracy : numpy.ndarray or float
+        The average accuracy score if 'average' is not None, otherwise the
+        accuracy score per class.
+
+    """
+    if average == "micro":
+        tp = np.array(np.sum(tp))
+        fn = np.array(np.sum(fn))
+        numerator: Union[npt.NDArray[np.int_], np.int_] = tp
+        denominator: Union[npt.NDArray[np.int_], np.int_] = tp + fn
+        if task_type == "multilabel":
+            fp = np.array(np.sum(fp))
+            tn = np.array(np.sum(tn))
+            numerator = tp + tn
+            denominator = tp + fp + fn + tn
+    elif task_type in ["binary", "multilabel"]:
+        numerator = tp + tn
+        denominator = tp + fp + fn + tn
+    else:
+        numerator = tp
+        denominator = tp + fn
+
+    score = _prf_divide(
+        np.expand_dims(numerator, axis=0) if numerator.ndim == 0 else numerator,
+        np.expand_dims(denominator, axis=0) if denominator.ndim == 0 else denominator,
+        metric="accuracy",
+        modifier="true",
+        average=average,
+        warn_for=("accuracy",),
+        zero_division=zero_division,
+    )
+
+    if average in ["macro", "weighted"]:
+        weights = None
+        if average == "weighted":
+            weights = tp + fn
+
+        if weights is not None and np.sum(weights) == 0:
+            return (
+                np.zeros_like(score, dtype=np.float64)
+                if zero_division in ["warn", 0]
+                else np.ones_like(score, dtype=np.float64)
+            )
+
+        avg_score: Union[float, npt.NDArray[np.float_]] = np.average(
+            score,
+            weights=weights,
+        )
+        return avg_score
+
+    ret_value = _get_value_if_singleton_array(score)
+    return cast(Union[float, npt.NDArray[np.float_]], ret_value)
+
+
+
+[docs] +def binary_accuracy( + target: npt.ArrayLike, + preds: npt.ArrayLike, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", +) -> float: + """Compute accuracy score for binary classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated targets (predictions) as returned by a classifier. + pos_label : int, default=1 + The label of the positive class. Can be 0 or 1. + threshold : float, default=0.5 + The threshold value for converting probability or logit scores to + binary. A sigmoid function is first applied to logits to convert them + to probabilities. + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Returns + ------- + acc_score : float + The accuracy score. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import binary_accuracy + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 1] + >>> binary_accuracy(target, preds) + 0.75 + >>> target = [0, 1, 0, 1] + >>> preds = [0.1, 0.9, 0.8, 0.4] + >>> binary_accuracy(target, preds, threshold=0.5) + 0.5 + + """ + _binary_stat_scores_args_check(threshold=threshold, pos_label=pos_label) + + target, preds = _binary_stat_scores_format( + target, + preds, + threshold=threshold, + pos_label=pos_label, + ) + + tp, fp, tn, fn = _binary_stat_scores_update(target, preds, pos_label=pos_label) + acc_score = _accuracy_reduce( + tp, + fp, + tn, + fn, + task_type="binary", + average=None, + zero_division=zero_division, + ) + return cast(float, acc_score)
+ + + +
+[docs] +def multiclass_accuracy( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the accuracy score for multiclass classification problems. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated targets (predictions) as returned by a classifier. + num_classes : int + Number of classes in the dataset. + top_k : int, default=None + Number of highest probability predictions or logits to consider when + computing the accuracy score. + average : Literal["micro", "macro", "weighted", None], default=None + If not None, this determines the type of averaging performed on the data: + + - ``micro``: Calculate metrics globally by counting the total + true positives, false negatives, false positives and true + negatives. + - ``macro``: Calculate metrics for each class, and find their + unweighted mean. This does not take class imbalance into account. + - ``weighted``: Calculate metrics for each class, and find their + average, weighted by support (the number of true instances for + each class). This alters ``macro`` to account for class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + The average accuracy score as a float if ``average`` is not None, + otherwise a numpy array of accuracy scores per class/label. + + Raises + ------ + ValueError + If ``average`` is not one of ``micro``, ``macro``, ``weighted`` or ``None``. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multiclass_accuracy + >>> target = [0, 1, 2, 2, 2] + >>> preds = [0, 0, 2, 2, 1] + >>> multiclass_accuracy(target, preds, num_classes=3) + array([1. , 0. , 0.66666667]) + >>> multiclass_accuracy(target, preds, num_classes=3, average="micro") + 0.6 + >>> multiclass_accuracy(target, preds, num_classes=3, average="macro") + 0.5555555555555555 + >>> multiclass_accuracy(target, preds, num_classes=3, average="weighted") + 0.6 + + """ + _check_average_arg(average) + + target, preds = _multiclass_stat_scores_format( + target, + preds, + num_classes=num_classes, + top_k=top_k, + ) + + tp, fp, tn, fn = _multiclass_stat_scores_update(target, preds, num_classes) + + return cast( + float, + _accuracy_reduce( + tp, + fp, + tn, + fn, + task_type="multiclass", + average=average, + zero_division=zero_division, + ), + )
+ + + +
+[docs] +def multilabel_accuracy( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the accuracy score for multilabel-indicator targets. + + Parameters + ---------- + target : array-like of shape (num_samples, num_labels) + Ground truth (correct) target values. + preds : array-like of shape (num_samples, num_labels) + Estimated targets as returned by a classifier. + num_labels : int + Number of labels in the multilabel classification task. + threshold : float, default=0.5 + Threshold value for binarizing the output of the classifier. + top_k : int, optional, default=None + The number of highest probability or logit predictions considered + to find the correct label. Only works when ``preds`` contains + probabilities/logits. + average : Literal['micro', 'macro', 'weighted', None], default=None + If None, return the accuracy score per label, otherwise this determines + the type of averaging performed on the data: + + - ``micro``: Calculate metrics globally by counting the total + true positives, false negatives, true negatives and false positives. + - ``macro``: Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label, and find their + average, weighted by support (the number of true instances for + each label). + zero_division : Literal['warn', 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + The average accuracy score as a flot if ``average`` is not None, + otherwise a numpy array of accuracy scores per label. + + Raises + ------ + ValueError + If ``average`` is not one of ``micro``, ``macro``, ``weighted``, + or ``None``. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multilabel_accuracy + >>> target = [[0, 1, 1], [1, 0, 0]] + >>> preds = [[0, 1, 0], [1, 0, 1]] + >>> multilabel_accuracy(target, preds, num_labels=3, average=None) + array([1., 1., 0.]) + >>> multilabel_accuracy(target, preds, num_labels=3, average="micro") + 0.6666666666666666 + + """ + _check_average_arg(average) + + target, preds = _multilabel_stat_scores_format( + target, + preds, + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + ) + + tp, fp, tn, fn = _multilabel_stat_scores_update(target, preds, num_labels) + + return _accuracy_reduce( + tp, + fp, + tn, + fn, + task_type="multilabel", + average=average, + zero_division=zero_division, + )
+ + + +def accuracy( + target: npt.ArrayLike, + preds: npt.ArrayLike, + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute accuracy score for different classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated targets (predictions) as returned by a classifier. + task : Literal["binary", "multiclass", "multilabel"] + The type of task for the input data. One of 'binary', 'multiclass' + or 'multilabel'. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int, default=None + Number of classes for the task. Required if ``task`` is ``"multiclass"``. + threshold : float, default=0.5 + Threshold for deciding the positive class. Only used if ``task`` is + ``"binary"`` or ``"multilabel"``. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Only used if ``task`` is ``"multiclass"`` or ``"multilabel"``. + num_labels : int, default=None + Number of labels for the task. Required if ``task`` is ``"multilabel"``. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the recall score for each label/class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally by counting the total true + positives. false positives, true negatives and false negatives. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label/class, and find + their average weighted by support (the number of true instances + for each label/class). This alters ``macro`` to account for + label/class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Returns + ------- + accuracy_score : float or numpy.ndarray + The average accuracy score as a float if ``average`` is not None, + otherwise a numpy array of accuracy scores per class/label. + + Raises + ------ + ValueError + If ``task`` is not one of ``binary``, ``multiclass`` or ``multilabel``. + AssertionError + If ``task`` is ``multiclass`` and ``num_classes`` is not provided or is + less than 0. + AssertionError + If ``task`` is ``multilabel`` and ``num_labels`` is not provided or is + less than 0. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics.functional import accuracy + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 1] + >>> accuracy(target, preds, task="binary") + 0.75 + + >>> # (multiclass) + >>> target = [0, 1, 2, 2, 2] + >>> preds = [0, 0, 2, 2, 1] + >>> accuracy(target, preds, task="multiclass", num_classes=3, average="micro") + 0.6 + + >>> # (multilabel) + >>> target = [[0, 1, 1], [1, 0, 0]] + >>> preds = [[0, 1, 0], [1, 0, 1]] + >>> accuracy(target, preds, task="multilabel", num_labels=3, average="micro") + 0.6666666666666666 + + """ + if task == "binary": + accuracy_score: Union[float, npt.NDArray[np.float_]] = binary_accuracy( + target, + preds, + pos_label=pos_label, + threshold=threshold, + zero_division=zero_division, + ) + elif task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + accuracy_score = multiclass_accuracy( + target, + preds, + num_classes=num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + elif task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + accuracy_score = multilabel_accuracy( + target, + preds, + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + else: + raise ValueError( + f"Task {task} is not supported, expected one of 'binary', 'multiclass'" + " or 'multilabel'", + ) + return accuracy_score +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/functional/auroc.html b/api/_modules/cyclops/evaluate/metrics/functional/auroc.html new file mode 100644 index 000000000..f40b72e42 --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/functional/auroc.html @@ -0,0 +1,996 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.functional.auroc - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.functional.auroc

+"""Functions for computing the area under the ROC curve (AUROC)."""
+
+import logging
+from typing import Any, List, Literal, Optional, Tuple, Union, cast
+
+import numpy as np
+import numpy.typing as npt
+from sklearn.metrics import auc
+
+from cyclops.evaluate.metrics.functional.precision_recall_curve import (
+    _binary_precision_recall_curve_format,
+    _binary_precision_recall_curve_update,
+    _format_thresholds,
+    _multiclass_precision_recall_curve_format,
+    _multiclass_precision_recall_curve_update,
+    _multilabel_precision_recall_curve_format,
+    _multilabel_precision_recall_curve_update,
+)
+from cyclops.evaluate.metrics.functional.roc import (
+    _binary_roc_compute,
+    _multiclass_roc_compute,
+    _multilabel_roc_compute,
+)
+from cyclops.evaluate.metrics.utils import _check_thresholds
+from cyclops.utils.log import setup_logging
+
+
+LOGGER = logging.getLogger(__name__)
+setup_logging(print_level="WARN", logger=LOGGER)
+
+
+def _reduce_auroc(
+    fpr: Union[npt.NDArray[Any], List[npt.NDArray[Any]]],
+    tpr: Union[npt.NDArray[Any], List[npt.NDArray[Any]]],
+    average: Optional[Literal["macro", "weighted"]] = None,
+    weights: Optional[npt.NDArray[Any]] = None,
+) -> Union[float, npt.NDArray[np.float_]]:
+    """Compute the area under the ROC curve and apply ``average`` method.
+
+    Parameters
+    ----------
+    fpr : numpy.ndarray or list of numpy.ndarray
+        False positive rate.
+    tpr : numpy.ndarray or list of numpy.ndarray
+        True positive rate.
+    average : Literal["macro", "weighted"], default=None
+        If not None, apply the method to compute the average area under the
+        ROC curve.
+    weights : numpy.ndarray, default=None
+        Sample weights.
+
+    Returns
+    -------
+    auroc : float or numpy.ndarray
+        Area under the ROC curve. If ``average`` is not None, ``auroc`` is a
+        numpy array.
+
+    Raises
+    ------
+    ValueError
+        If ``average`` is not one of ``macro`` or ``weighted`` or if
+        ``average`` is ``weighted`` and ``weights`` is None.
+
+    """
+    result = [
+        auc(x, y) for x, y in zip(fpr, tpr)
+    ]  # without the loop: np.trapz(tpr, fpr, axis=1) * direction
+    result = np.stack(result)
+
+    if average is not None:
+        if np.isnan(result).any():
+            LOGGER.warning(
+                "Average precision score for one or more classes was `nan`. "
+                "Ignoring these classes in %s-average",
+                average,
+            )
+        idx = ~np.isnan(result)
+
+        if average == "macro":
+            result = result[idx].mean()
+        elif average == "weighted" and weights is not None:
+            weights = np.divide(
+                weights[idx],
+                weights[idx].sum(),
+                out=np.zeros_like(weights, dtype=np.float64),
+                where=weights[idx].sum() != 0,
+            )
+            result = (result[idx] * weights).sum()
+        else:
+            raise ValueError(
+                "Received an incompatible combinations of inputs to make reduction.",
+            )
+
+    return cast(Union[float, npt.NDArray[np.float_]], result)
+
+
+def _binary_auroc_compute(
+    state: Union[Tuple[npt.NDArray[Any], npt.NDArray[Any]], npt.NDArray[Any]],
+    thresholds: Optional[npt.NDArray[np.float_]] = None,
+    max_fpr: Optional[float] = None,
+    pos_label: int = 1,
+) -> float:
+    """Compute the area under the ROC curve for binary classification tasks.
+
+    Parameters
+    ----------
+    state : Union[Tuple[numpy.ndarray, numpy.ndarray], numpy.ndarray]
+        If ``state`` is a tuple, then it must be a tuple of two numpy arrays
+        ``(target, preds)``. If ``state`` is a numpy array, then it is a multi-
+        threshold confusion matrix.
+    thresholds : numpy.ndarray, default=None
+        Thresholds used for computing binarizing the predictions. If None,
+        then the thresholds are automatically determined by the unique values
+        in ``preds``.
+    max_fpr : float, default=None
+        The maximum value of the false positive rate. If ``None``, the
+        false positive rate is set to the complement of the true positive
+        rate.
+    pos_label : int, default=1
+        The label of the positive class.
+
+    Returns
+    -------
+    auroc : float
+        Area under the ROC curve.
+
+    """
+    fpr, tpr, _ = _binary_roc_compute(state, thresholds=thresholds, pos_label=pos_label)
+
+    if max_fpr is None or max_fpr == 1:
+        roc_auc = auc(x=fpr, y=tpr)
+        return cast(float, roc_auc)
+
+    # Add a single point at max_fpr by linear interpolation
+    stop: int = np.searchsorted(fpr, max_fpr, "right")  # type: ignore[assignment]
+    x_interp = [fpr[stop - 1], fpr[stop]]
+    y_interp = [tpr[stop - 1], tpr[stop]]
+    tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp))
+    fpr = np.append(fpr[:stop], max_fpr)
+    partial_auc = auc(fpr, tpr)
+
+    # standardize result to be 0.5 if non-discriminant and 1 if maximal
+    min_area = 0.5 * max_fpr**2
+    max_area = max_fpr
+
+    roc_auc = 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
+    return cast(float, roc_auc)
+
+
+
+[docs] +def binary_auroc( + target: npt.ArrayLike, + preds: npt.ArrayLike, + max_fpr: Optional[float] = None, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + pos_label: int = 1, +) -> float: + """Compute the area under the ROC curve for binary classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated probabilities or decision function. If the values in ``preds`` + are not in the range [0, 1], then they will be transformed to this range + via a sigmoid function. + max_fpr : float, default=None + The maximum value of the false positive rate. If not None, then + the partial AUCROC in the range [0, max_fpr] is returned. + thresholds : Union[int, List[float], numpy.ndarray], default=None + Thresholds used for binarizing the values of ``preds``. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + pos_label : int, default=1 + The label of the positive class. + + Returns + ------- + auroc : float + Area under the ROC curve. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import binary_auroc + >>> target = [1, 0, 0, 1] + >>> preds = [0.1, 0.9, 0.4, 0.6] + >>> binary_auroc(target, preds, thresholds=5) + 0.25 + + """ + _check_thresholds(thresholds) + + if max_fpr is not None: + if not isinstance(max_fpr, (int, float)): + raise ValueError( + "Expected argument ``max_fpr`` to be a float or integer, but got" + f" {max_fpr}", + ) + if max_fpr <= 0 or max_fpr > 1: + raise ValueError( + "Expected argument ``max_fpr`` to be in the range (0, 1], but got" + f" {max_fpr}", + ) + + target, preds = _binary_precision_recall_curve_format( + target, + preds, + pos_label=pos_label, + ) + thresholds = _format_thresholds(thresholds) + + state = _binary_precision_recall_curve_update(target, preds, thresholds) + + return _binary_auroc_compute( + state, + thresholds=thresholds, + max_fpr=max_fpr, + pos_label=pos_label, + )
+ + + +def _multiclass_auroc_compute( + state: Union[ + Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]], + npt.NDArray[np.int_], + ], + num_classes: int, + thresholds: Optional[npt.NDArray[np.float_]] = None, + average: Optional[Literal["macro", "weighted"]] = None, +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the area under the ROC curve for multiclass classification tasks. + + Parameters + ---------- + state : Union[numpy.ndarray, Tuple[numpy.ndarray, numpy.ndarray]] + If ``state`` is a numpy array, then it is a multi-threshold confusion + matrix. If ``state`` is a tuple, then it must be a tuple of two numpy + arrays ``(target, preds)``. + num_classes : int + Number of classes. + thresholds : numpy.ndarray, default=None + Thresholds used for computing binarizing the predictions. If None, + then the thresholds are automatically determined by the unique values + in ``preds``. + average : Literal["macro", "weighted"], default=None + If ``None``, then the scores for each class are returned. Otherwise, + this determines the type of averaging performed on the scores. + + Returns + ------- + auroc : Union[float, numpy.ndarray] + Area under the ROC curve. If ``average`` is ``None``, then a numpy array + of shape (num_classes,) is returned, otherwise a float is returned. + + """ + fpr, tpr, _ = _multiclass_roc_compute(state, num_classes, thresholds=thresholds) + return _reduce_auroc( + fpr, + tpr, + average=average, + weights=np.bincount(state[0], minlength=num_classes) + if thresholds is None + else state[1][:, 1, :].sum(-1), + ) + + +
+[docs] +def multiclass_auroc( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + average: Optional[Literal["macro", "weighted"]] = None, +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the area under the ROC curve for multiclass classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated probabilities or decision function. If the values in ``preds`` + are not in the range [0, 1], then they will be transformed to this range + via a softmax function. + num_classes : int + Number of classes. + thresholds : Union[int, List[float], numpy.ndarray], default=None + Thresholds used for binarizing the values of ``preds``. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + average : Literal["macro", "weighted"], default=None + If ``None``, then the scores for each class are returned. Otherwise, + this determines the type of averaging performed on the scores. One of + + - `macro`: Calculate metrics for each class, and find their unweighted + mean. This does not take class imbalance into account. + - `weighted`: Calculate metrics for each class, and find their average, + weighted by support (the number of true instances for each class). + + Returns + ------- + auroc : Union[float, numpy.ndarray] + Area under the ROC curve. If ``average`` is ``None``, then a numpy array + of shape (num_classes,) is returned, otherwise a float is returned. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multiclass_auroc + >>> target = [1, 0, 2, 0] + >>> preds = [ + ... [0.9, 0.05, 0.05], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.05, 0.9], + ... [0.9, 0.05, 0.05], + ... ] + >>> multiclass_auroc(target, preds, num_classes=3, thresholds=5, average=None) + array([0.5 , 0.33333333, 1. ]) + + """ + _check_thresholds(thresholds) + + if average is not None and average not in ("macro", "weighted"): + raise ValueError( + "Expected argument `average` to be one of ('macro', 'weighted', None)," + f" but got {average}", + ) + + target, preds = _multiclass_precision_recall_curve_format( + target, + preds, + num_classes=num_classes, + ) + thresholds = _format_thresholds(thresholds) + + state = _multiclass_precision_recall_curve_update( + target, + preds, + num_classes=num_classes, + thresholds=thresholds, + ) + + return _multiclass_auroc_compute( + state, + num_classes, + thresholds=thresholds, + average=average, + )
+ + + +def _multilabel_auroc_compute( + state: Union[ + Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]], + npt.NDArray[np.int_], + ], + num_labels: int, + thresholds: Optional[npt.NDArray[np.float_]] = None, + average: Optional[Literal["micro", "macro", "weighted"]] = None, +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the area under the ROC curve for multilabel classification tasks. + + Parameters + ---------- + state : Union[Tuple[numpy.ndarray, numpy.ndarray], numpy.ndarray] + If ``state`` is a numpy array, then it is a multi-threshold confusion + matrix. If ``state`` is a tuple, then it must be a tuple of two numpy + arrays ``(target, preds)``. + num_labels : int + Number of labels. + thresholds : numpy.ndarray, default=None + Thresholds used for computing binarizing the predictions. If None, + then the thresholds are automatically determined by the unique values + in ``preds``. + average : Literal["micro", "macro", "weighted"], default=None + If ``None``, then the scores for each label are returned. Otherwise, + this determines the type of averaging performed on the scores. One of + + - `micro`: Calculate metrics globally. + - `macro`: Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + - `weighted`: Calculate metrics for each label, and find their average, + weighted by support (the number of true instances for each label). + + Returns + ------- + float or numpy.ndarray + Area under the ROC curve. If ``average`` is ``None``, then a numpy array + of shape (num_labels,) is returned, otherwise a float is returned. + + """ + if average == "micro": + if isinstance(state, np.ndarray) and thresholds is not None: + return _binary_auroc_compute(state.sum(1), thresholds, max_fpr=None) + + target = state[0].flatten() + preds = state[1].flatten() + return _binary_auroc_compute((target, preds), thresholds, max_fpr=None) + fpr, tpr, _ = _multilabel_roc_compute(state, num_labels, thresholds=thresholds) + return _reduce_auroc( + fpr, + tpr, + average=average, + weights=(state[0] == 1).sum(axis=0).astype(np.float64) + if thresholds is None + else state[1][:, 1, :].sum(axis=-1), + ) + + +
+[docs] +def multilabel_auroc( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + average: Optional[Literal["micro", "macro", "weighted"]] = None, +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the area under the ROC curve for multilabel classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated probabilities or decision function. If the values in ``preds`` + are not in the range [0, 1], then they will be transformed to this range + via a softmax function. + num_labels : int + Number of labels. + thresholds : Union[int, List[float], numpy.ndarray], default=None + Thresholds used for binarizing the values of ``preds``. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + average : Literal["micro", "macro", "weighted"], default=None + If ``None``, then the scores for each label are returned. Otherwise, + this determines the type of averaging performed on the scores. One of + + - `micro`: Calculate metrics globally by counting the total true + positives, false negatives and false positives. + - `macro`: Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + - `weighted``: Calculate metrics for each label, and find their average, + weighted by support (the number of true instances for each label). + + Returns + ------- + float or numpy.ndarray + Area under the ROC curve. If ``average`` is ``None``, then a numpy array + of shape (num_labels,) is returned, otherwise a float is returned. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multilabel_auroc + >>> target = [[0, 1, 0], [0, 1, 1], [1, 0, 1]] + >>> preds = [[0.1, 0.9, 0.8], [0.05, 0.1, 0.9], [0.8, 0.2, 0.3]] + >>> multilabel_auroc(target, preds, num_labels=3, thresholds=5, average=None) + array([1. , 0.75, 0.25]) + + """ + _check_thresholds(thresholds) + + if average is not None and average not in ("micro", "macro", "weighted"): + raise ValueError( + "Expected argument `average` to be one of ('micro', 'macro', 'weighted'" + f" , None), but got {average}", + ) + + target, preds = _multilabel_precision_recall_curve_format( + target, + preds, + num_labels=num_labels, + ) + thresholds = _format_thresholds(thresholds) + + state = _multilabel_precision_recall_curve_update( + target, + preds, + num_labels=num_labels, + thresholds=thresholds, + ) + + return _multilabel_auroc_compute( + state, + num_labels, + thresholds=thresholds, + average=average, + )
+ + + +
+[docs] +def auroc( + target: npt.ArrayLike, + preds: npt.ArrayLike, + task: Literal["binary", "multiclass", "multilabel"], + max_fpr: Optional[float] = None, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["micro", "macro", "weighted"]] = None, +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the area under the ROC curve for different tasks. + + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated probabilities or decision function. If ``preds`` is not in the + range [0, 1], a sigmoid function is applied to transform it to the range + [0, 1]. + task : Literal["binary", "multiclass", "multilabel"] + Task type. One of ``binary``, ``multiclass``, ``multilabel``. + max_fpr : float, default=None + The maximum value of the false positive rate. If not None, a partial AUC + in the range [0, max_fpr] is returned. Only used for binary classification. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for binarizing the values of ``preds``. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the unique + values in ``preds``. + num_classes : int, default=None + Number of classes. This parameter is required for the ``multiclass`` task. + num_labels : int, default=None + Number of labels. This parameter is required for the ``multilabel`` task. + average : Literal["micro", "macro", "weighted"], default=None + If not None, apply the method to compute the average area under the + ROC curve. Only applicable for the ``multiclass`` and ``multilabel`` + tasks. One of: + + - ``micro``: Calculate metrics globally by counting the total true + positives, false negatives and false positives. + - ``macro``: Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label, and find their + average, weighted by support (accounting for label imbalance). + + Returns + ------- + auroc_score : float or numpy.ndarray + Area under the ROC curve. If ``average`` is None or task is ``binary``, + ``auroc`` is a float. Otherwise, ``auroc`` is a numpy array. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics.functional import auroc + >>> target = [0, 1, 0, 1] + >>> preds = [0.1, 0.35, 0.4, 0.8] + >>> auroc(target, preds, task="binary") + 0.75 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics.functional import auroc + >>> target = [0, 1, 2, 0, 1, 2] + >>> preds = [ + ... [0.1, 0.6, 0.3], + ... [0.05, 0.95, 0], + ... [0.5, 0.3, 0.2], + ... [0.1, 0.6, 0.3], + ... [0.05, 0.95, 0], + ... [0.5, 0.3, 0.2], + ... ] + >>> auroc(target, preds, task="multiclass", num_classes=3, average=None) + array([0.5, 1. , 0.5]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics.functional import auroc + >>> target = [[0, 1], [1, 1], [0, 0], [1, 0]] + >>> preds = [[0.1, 0.9], [0.8, 0.2], [0.4, 0.6], [0.2, 0.8]] + >>> auroc(target, preds, task="multilabel", num_labels=2, average=None) + array([0.25, 0.5 ]) + + """ + if task == "binary": + return binary_auroc(target, preds, max_fpr=max_fpr, thresholds=thresholds) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be a positive integer." + return multiclass_auroc( + target, + preds, + num_classes, + thresholds=thresholds, + average=average, # type: ignore[arg-type] + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be a positive integer." + return multilabel_auroc( + target, + preds, + num_labels, + thresholds=thresholds, + average=average, + ) + + raise ValueError( + "Expected argument `task` to be either 'binary', 'multiclass' or " + f"'multilabel', but got {task}", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/functional/average_precision.html b/api/_modules/cyclops/evaluate/metrics/functional/average_precision.html new file mode 100644 index 000000000..109263cbd --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/functional/average_precision.html @@ -0,0 +1,557 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.functional.average_precision - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.functional.average_precision

+"""Functions for computing average precision (AUPRC) for classification tasks."""
+
+from typing import Literal, Optional, Tuple, Union
+
+import numpy as np
+import numpy.typing as npt
+
+from cyclops.evaluate.metrics.functional.precision_recall_curve import (
+    _binary_precision_recall_curve_compute,
+    _binary_precision_recall_curve_format,
+    _binary_precision_recall_curve_update,
+)
+
+
+def _binary_average_precision_compute(
+    state: Union[
+        Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]],
+        npt.NDArray[np.int_],
+    ],
+    thresholds: Optional[npt.NDArray[np.float_]],
+    pos_label: Optional[int] = None,
+) -> float:
+    """Compute average precision for binary classification task.
+
+    Parameters
+    ----------
+    state : Tuple or numpy.ndarray
+        State from which the precision-recall curve can be computed. Can be
+        either a tuple of (target, preds) or a multi-threshold confusion matrix.
+    thresholds : numpy.ndarray
+        Thresholds used for computing the precision and recall scores. If not None,
+        must be a 1D numpy array of floats in the [0, 1] range and monotonically
+        increasing.
+    pos_label : int
+        The label of the positive class.
+
+    Returns
+    -------
+    float
+        The average precision score.
+
+    Raises
+    ------
+    ValueError
+        If ``thresholds`` is None.
+
+    """
+    precision, recall, _ = _binary_precision_recall_curve_compute(
+        state,
+        thresholds,
+        pos_label,
+    )
+    return -np.sum(np.diff(recall) * np.array(precision)[:-1])  # type: ignore
+
+
+
+[docs] +def binary_average_precision( + target: npt.ArrayLike, + preds: npt.ArrayLike, + thresholds: Optional[npt.NDArray[np.float_]], + pos_label: int = 1, +) -> float: + """Compute average precision for binary classification task. + + Parameters + ---------- + target : npt.ArrayLike + Target values. + preds : npt.ArrayLike + Predicted values. + thresholds : numpy.ndarray + Thresholds used for computing the precision and recall scores. If not None, + must be a 1D numpy array of floats in the [0, 1] range and monotonically + increasing. + pos_label : int + The label of the positive class. + + Returns + ------- + float + The average precision score. + + Raises + ------ + ValueError + If ``thresholds`` is None. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import binary_average_precision + >>> target = [0, 1, 1, 0] + >>> preds = [0, 0.5, 0.7, 0.8] + >>> binary_average_precision(target, preds, thresholds=None) + 0.5833333333333333 + + """ + target, preds = _binary_precision_recall_curve_format(target, preds, pos_label) + state = _binary_precision_recall_curve_update(target, preds) + return _binary_average_precision_compute(state, thresholds, pos_label)
+ + + +
+[docs] +def average_precision( + target: npt.ArrayLike, + preds: npt.ArrayLike, + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[npt.NDArray[np.float_]] = None, + pos_label: int = 1, + num_classes: Optional[int] = None, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[npt.NDArray[np.float_], float]: + """Compute average precision for classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Target values. + preds : npt.ArrayLike + Predicted values. + task : {"binary", "multiclass", "multilabel"} + The task type. + pos_label : int + The label of the positive class. + num_classes : int + The number of classes. + threshold : float + The threshold for converting probabilities to binary predictions. + top_k : int + The number of top predictions to consider for multilabel classification. + num_labels : int + The number of labels for multilabel classification. + average : Literal["micro", "macro", "weighted", None], default=None + If None, return the specificity for each class, otherwise return the + average specificity. Average options are: + + - ``micro``: Calculate metrics globally by counting the total + true positives, false negatives, false positives and true negatives. + - ``macro``: Calculate metrics for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label, and find their + average, weighted by support (the number of true instances for + each label). + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + The average precision score(s). + + Raises + ------ + ValueError + If ``task`` is not one of ``binary``, ``multiclass`` or ``multilabel``. + + """ + if task == "binary": + return binary_average_precision(target, preds, thresholds, pos_label) + if task == "multiclass": + NotImplementedError("Multiclass average precision is not implemented.") + if task == "multilabel": + NotImplementedError("Multilabel average precision is not implemented.") + + raise ValueError( + "Expected argument `task` to be either 'binary', 'multiclass' or " + f"'multilabel', but got {task}", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/functional/f_beta.html b/api/_modules/cyclops/evaluate/metrics/functional/f_beta.html new file mode 100644 index 000000000..be40bc779 --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/functional/f_beta.html @@ -0,0 +1,1185 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.functional.f_beta - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.functional.f_beta

+"""Functions for computing F-beta and F1 scores for different input types."""
+
+from typing import Literal, Optional, Union, cast
+
+import numpy as np
+import numpy.typing as npt
+from sklearn.metrics._classification import _prf_divide
+
+from cyclops.evaluate.metrics.functional.stat_scores import (
+    _binary_stat_scores_args_check,
+    _binary_stat_scores_format,
+    _binary_stat_scores_update,
+    _multiclass_stat_scores_format,
+    _multiclass_stat_scores_update,
+    _multilabel_stat_scores_format,
+    _multilabel_stat_scores_update,
+)
+from cyclops.evaluate.metrics.utils import (
+    _check_average_arg,
+    _get_value_if_singleton_array,
+)
+
+
+def _fbeta_reduce(
+    tp: Union[npt.NDArray[np.int_], np.int_],
+    fp: Union[npt.NDArray[np.int_], np.int_],
+    fn: Union[npt.NDArray[np.int_], np.int_],
+    beta: float,
+    average: Literal["micro", "macro", "weighted", None],
+    zero_division: Literal["warn", 0, 1] = "warn",
+) -> Union[float, npt.NDArray[np.float_]]:
+    """Compute the F-beta score, a generalization of F-measure.
+
+    Parameters
+    ----------
+    tp : numpy.ndarray
+        True positives per class.
+    fp : numpy.ndarray
+        False positives per class.
+    fn : numpy.ndarray
+        False negatives per class.
+    beta : float
+        Weight of precision in harmonic mean (beta < 1 lends more weight to
+        precision, beta > 1 favors recall).
+    average : Literal["micro", "macro", "weighted", None], default=None
+        If ``None``, return the score for each class. Otherwise,
+        use one of the following options to compute the average score:
+
+        - ``micro``: Calculate metric globally from the total count of true
+            positives, false positives and false negatives.
+        - ``macro``: Calculate metric for each label, and find their
+            unweighted mean. This does not take label/class imbalance
+            into account.
+        - ``weighted``: Calculate metric for each label/class, and find their
+            average weighted by the support (the number of true instances
+            for each label/class). This alters "macro" to account for
+            label/class imbalance.
+    zero_division : Literal["warn", 0, 1], default="warn"
+        Value to return when there is a zero division. If set to "warn", this
+        acts as 0, but warnings are also raised.
+
+    Returns
+    -------
+    result : float or numpy.ndarray
+        F-beta score or array of scores if ``average=None``.
+
+    Raises
+    ------
+    ValueError
+        if beta is less than 0.
+
+    """
+    _check_beta(beta=beta)
+
+    beta2 = beta**2
+
+    numerator = (1 + beta2) * tp
+    denominator = (1 + beta2) * tp + beta2 * fn + fp
+
+    if average == "micro":
+        numerator = np.array(np.sum(numerator))
+        denominator = np.array(np.sum(denominator))
+
+    score = _prf_divide(
+        np.expand_dims(numerator, axis=0) if numerator.ndim == 0 else numerator,
+        np.expand_dims(denominator, axis=0) if denominator.ndim == 0 else denominator,
+        metric="f-score",
+        modifier="true nor predicted",
+        average=average,
+        warn_for="f-score",
+        zero_division=zero_division,
+    )
+
+    if average == "weighted":
+        weights = tp + fn
+        if np.sum(weights) == 0:
+            result = np.ones_like(score, dtype=np.float64)
+            if zero_division in ["warn", 0]:
+                result = np.zeros_like(score, dtype=np.float64)
+            return result
+    else:
+        weights = None
+
+    if average is not None and score.ndim != 0 and len(score) > 1:
+        result = np.average(score, weights=weights)
+    else:
+        result = _get_value_if_singleton_array(score)  # type: ignore[assignment]
+
+    return result
+
+
+def _check_beta(beta: float) -> None:
+    """Check the ``beta`` argument for F-beta metrics."""
+    if beta < 0:
+        raise ValueError("beta should be >=0 in the F-beta score")
+
+
+
+[docs] +def binary_fbeta_score( + target: npt.ArrayLike, + preds: npt.ArrayLike, + beta: float, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", +) -> float: + """Compute the F-beta score for binary classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + beta : float + Weight of precision in harmonic mean. + pos_label : int, default=1 + The positive class label. One of [0, 1]. + threshold : float, default=0.5 + Threshold value for converting probabilities and logits to binary. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there are no true positives or true negatives. + If set to ``warn``, this acts as 0, but warnings are also raised. + + Returns + ------- + float + The binary F-beta score. + + Raises + ------ + ValueError + beta is less than 0. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import binary_fbeta_score + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 1] + >>> binary_fbeta_score(target, preds, beta=0.5) + 0.7142857142857143 + + """ + _check_beta(beta=beta) + _binary_stat_scores_args_check(threshold=threshold, pos_label=pos_label) + + target, preds = _binary_stat_scores_format( + target=target, + preds=preds, + threshold=threshold, + pos_label=pos_label, + ) + + tp, fp, _, fn = _binary_stat_scores_update( + target=target, + preds=preds, + pos_label=pos_label, + ) + + f_score = _fbeta_reduce( + tp=tp, + fp=fp, + fn=fn, + beta=beta, + average=None, + zero_division=zero_division, + ) + + return cast(float, f_score)
+ + + +
+[docs] +def multiclass_fbeta_score( + target: npt.ArrayLike, + preds: npt.ArrayLike, + beta: float, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the F-beta score for multiclass data. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + beta : float + Weight of precision in harmonic mean. + num_classes : int + The number of classes in the dataset. + top_k : int, optional + If given, and predictions are probabilities/logits, the score will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives, false positives and false negatives. + - ``macro``: Calculate metric for each class, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each class, and find their + average weighted by the support (the number of true instances + for each class). This alters "macro" to account for class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + The multiclass F-beta score. If ``average`` is ``None``, a numpy array + of shape (num_classes,) is returned. + + Raises + ------ + ValueError + ``average`` is not one of ``micro``, ``macro``, ``weighted``, or ``None``, + or ``beta`` is less than 0. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multiclass_fbeta_score + >>> target = [0, 1, 2, 0] + >>> preds = [0, 2, 1, 0] + >>> multiclass_fbeta_score(target, preds, beta=0.5, num_classes=3) + array([1., 0., 0.]) + + """ + _check_beta(beta=beta) + _check_average_arg(average=average) + + target, preds = _multiclass_stat_scores_format( + target, + preds, + num_classes=num_classes, + top_k=top_k, + ) + + tp, fp, _, fn = _multiclass_stat_scores_update( + target=target, + preds=preds, + num_classes=num_classes, + ) + + return _fbeta_reduce( + tp=tp, + fp=fp, + fn=fn, + beta=beta, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def multilabel_fbeta_score( + target: npt.ArrayLike, + preds: npt.ArrayLike, + beta: float, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the F-beta score for multilabel data. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + beta : float + Weight of precision in harmonic mean. + num_labels : int + Number of labels for the task. + threshold : float, default=0.5 + Threshold for deciding the positive class. + top_k : int, optional + If given, and predictions are probabilities/logits, the score will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each label. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives, false positives and false negatives. + - ``macro``: Calculate metric for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each label, and find their + average weighted by the support (the number of true instances + for each label). This alters "macro" to account for label + imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + The multilabel F-beta score. If ``average`` is ``None``, a numpy array + of shape (num_labels,) is returned. + + Raises + ------ + ValueError + ``average`` is not one of ``micro``, ``macro``, ``weighted``, or ``None``, + or ``beta`` is less than 0. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multilabel_fbeta_score + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.8, 0.2]] + >>> multilabel_fbeta_score(target, preds, beta=0.5, num_labels=2) + array([1. , 0.83333333]) + + """ + _check_beta(beta=beta) + _check_average_arg(average=average) + + target, preds = _multilabel_stat_scores_format( + target, + preds, + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + ) + + tp, fp, _, fn = _multilabel_stat_scores_update( + target=target, + preds=preds, + num_labels=num_labels, + ) + + return _fbeta_reduce( + tp=tp, + fp=fp, + fn=fn, + beta=beta, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def fbeta_score( + target: npt.ArrayLike, + preds: npt.ArrayLike, + beta: float, + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the F-beta score for binary, multiclass, or multilabel data. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated targets as returned by a classifier. + beta : float + Weight of precision in harmonic mean. + task : Literal["binary", "multiclass", "multilabel"] + Type of classification task. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int + Number of classes for the task. Required if ``task`` is ``"multiclass"``. + threshold : float, default=0.5 + Threshold for deciding the positive class. Only used if ``task`` is + ``"binary"`` or ``"multilabel"``. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Only used if ``task`` is ``"multiclass"`` or ``"multilabel"``. + num_labels : int + Number of labels for the task. Required if ``task`` is ``"multilabel"``. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each label/class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally by counting the total true + positives, false positives and false negatives. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take label/class imbalance into + account. + - ``weighted``: Calculate metrics for each label/class, and find + their average weighted by support (the number of true instances + for each label/class). This alters ``macro`` to account for + label/class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + score: float or numpy.ndarray + The F-beta score. If ``average`` is not ``None`` and ``task`` is not + ``binary``, a numpy array of shape (num_classes,) is returned. + + Raises + ------ + ValueError + If ``task`` is not one of ``binary``, ``multiclass``, or + ``multilabel``. + + Examples + -------- + (binary) + >>> from cyclops.evaluate.metrics.functional import fbeta_score + >>> target = [0, 1, 1, 0] + >>> preds = [0.1, 0.8, 0.4, 0.3] + >>> fbeta_score(target, preds, beta=0.5, task="binary") + 0.8333333333333334 + + (multiclass) + >>> from cyclops.evaluate.metrics.functional import fbeta_score + >>> target = [0, 1, 2, 2] + >>> preds = [1, 2, 2, 0] + >>> fbeta_score(target, preds, beta=0.5, task="multiclass", num_classes=3) + array([0. , 0. , 0.5]) + + (multilabel) + >>> from cyclops.evaluate.metrics.functional import fbeta_score + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.8, 0.2]] + >>> fbeta_score(target, preds, beta=0.5, task="multilabel", num_labels=2) + array([1. , 0.83333333]) + + """ + if task == "binary": + return binary_fbeta_score( + target, + preds, + beta, + pos_label=pos_label, + threshold=threshold, + zero_division=zero_division, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + return multiclass_fbeta_score( + target, + preds, + beta, + num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + return multilabel_fbeta_score( + target, + preds, + beta, + num_labels, + threshold=threshold, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + + raise ValueError( + f"Task {task} is not supported, expected one of 'binary', 'multiclass'" + " or 'multilabel'", + )
+ + + +
+[docs] +def binary_f1_score( + target: npt.ArrayLike, + preds: npt.ArrayLike, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", +) -> float: + """Compute the F1 score for binary classification tasks. + + Parameters + ---------- + target : ArrayLike + Ground truth (correct) target values. + preds : ArrayLike + Predictions as returned by a classifier. + pos_label: int, default=1 + The label of the positive class. + threshold : float, default=0.5 + Threshold value for binarizing predictions in form of logits or + probability scores. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + float + The F1 score. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import binary_f1_score + >>> target = [0, 1, 1, 0] + >>> preds = [0.1, 0.8, 0.4, 0.3] + >>> binary_f1_score(target, preds) + 0.6666666666666666 + + """ + return binary_fbeta_score( + target, + preds, + beta=1.0, + pos_label=pos_label, + threshold=threshold, + zero_division=zero_division, + )
+ + + +
+[docs] +def multiclass_f1_score( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the F1 score for multiclass classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + num_classes : int + Number of classes in the dataset. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each class. Otherwise, use one of + the following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives, false positives and false negatives. + - ``macro``: Calculate metric for each class, and find their + unweighted mean. This does not take class imbalance into account. + - ``weighted``: Calculate metric for each class, and find their + average weighted by the support (the number of true instances + for each class). This alters "macro" to account for class + imbalance. It can result in an F-score that is not between + precision and recall. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + The F1 score. If ``average`` is ``None``, a numpy.ndarray of shape + (``num_classes``,) is returned. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multiclass_f1_score + >>> target = [0, 1, 2, 0] + >>> preds = [1, 1, 1, 0] + >>> multiclass_f1_score(target, preds, num_classes=3) + array([0.66666667, 0.5 , 0. ]) + + """ + return multiclass_fbeta_score( + target, + preds, + beta=1.0, + num_classes=num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def multilabel_f1_score( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the F1 score for multilabel classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + num_labels : int + Number of labels for the task. + threshold : float, default=0.5 + Threshold for deciding the positive class. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each label. Otherwise, use one of + the following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives, false positives and false negatives. + - ``macro``: Calculate metric for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each label, and find their + average weighted by the support (the number of true instances + for each label). This alters "macro" to account for label imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + The F1 score. If ``average`` is ``None``, a numpy.ndarray of shape + (``num_labels``,) is returned. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multilabel_f1_score + >>> target = [[0, 1, 1], [1, 0, 0]] + >>> preds = [[0.1, 0.9, 0.8], [0.05, 0.1, 0.2]] + >>> multilabel_f1_score(target, preds, num_labels=3) + array([0., 1., 1.]) + + """ + return multilabel_fbeta_score( + target, + preds, + beta=1.0, + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def f1_score( + target: npt.ArrayLike, + preds: npt.ArrayLike, + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute the F1 score for multiclass data. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated targets as returned by a classifier. + task : Literal["binary", "multiclass", "multilabel"] + Type of classification task. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int, default=None + Number of classes for the task. Required if ``task`` is ``"multiclass"``. + threshold : float, default=0.5 + Threshold for deciding the positive class. Only used if ``task`` is + ``"binary"`` or ``"multilabel"``. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Only used if ``task`` is ``"multiclass"`` or ``"multilabel"``. + num_labels : int, default=None + Number of labels for the task. Required if ``task`` is ``"multilabel"``. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each label/class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally by counting the total true + positives, false positives and false negatives. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take label/class imbalance into + account. + - ``weighted``: Calculate metrics for each label/class, and find + their average weighted by support (the number of true instances + for each label/class). This alters ``macro`` to account for + label/class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + + Returns + ------- + float or numpy.ndarray + The F1 score. If ``average`` is ``None`` and ``task`` is not ``binary``, + a numpy.ndarray of shape (``num_classes`` or ``num_labels``,) is returned. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics.functional import f1_score + >>> target = [0, 1, 0, 1] + >>> preds = [0.1, 0.9, 0.8, 0.2] + >>> f1_score(target, preds, task="binary") + 0.5 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics.functional import f1_score + >>> target = [0, 1, 2, 0] + >>> preds = [[0.05, 0.95, 0], [0.1, 0.8, 0.1], [0.2, 0.2, 0.6], [0.9, 0.1, 0]] + >>> f1_score(target, preds, task="multiclass", num_classes=3) + array([0.66666667, 0.66666667, 1. ]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics.functional import f1_score + >>> target = [[0, 1, 1], [1, 0, 0]] + >>> preds = [[0.1, 0.9, 0.8], [0.05, 0.1, 0.2]] + >>> f1_score(target, preds, task="multilabel", num_labels=3) + array([0., 1., 1.]) + + """ + return fbeta_score( + target, + preds, + 1.0, + task, + pos_label=pos_label, + num_classes=num_classes, + threshold=threshold, + top_k=top_k, + num_labels=num_labels, + average=average, + zero_division=zero_division, + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/functional/precision_recall.html b/api/_modules/cyclops/evaluate/metrics/functional/precision_recall.html new file mode 100644 index 000000000..1aff6b0b2 --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/functional/precision_recall.html @@ -0,0 +1,1220 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.functional.precision_recall - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.functional.precision_recall

+"""Functions for computing precision and recall scores on different input types."""
+
+from typing import Literal, Optional, Union, cast
+
+import numpy as np
+import numpy.typing as npt
+from sklearn.metrics._classification import _prf_divide
+
+from cyclops.evaluate.metrics.functional.stat_scores import (
+    _binary_stat_scores_args_check,
+    _binary_stat_scores_format,
+    _binary_stat_scores_update,
+    _multiclass_stat_scores_format,
+    _multiclass_stat_scores_update,
+    _multilabel_stat_scores_format,
+    _multilabel_stat_scores_update,
+)
+from cyclops.evaluate.metrics.utils import (
+    _check_average_arg,
+    _get_value_if_singleton_array,
+)
+
+
+def _precision_recall_reduce(
+    tp: Union[npt.NDArray[np.int_], np.int_],
+    fp: Union[npt.NDArray[np.int_], np.int_],
+    fn: Union[npt.NDArray[np.int_], np.int_],
+    metric: Literal["precision", "recall"],
+    average: Literal["micro", "macro", "weighted", None],
+    zero_division: Literal["warn", 0, 1] = "warn",
+) -> Union[npt.NDArray[np.float_], float]:
+    """Compute precision or recall scores and apply specified average.
+
+    Parameters
+    ----------
+    tp : numpy.ndarray
+        True positives.
+    fp : numpy.ndarray
+        False positives.
+    fn : numpy.ndarray
+        False negatives.
+    metric : Literal["precision", "recall"]
+        Metric to compute.
+    average : Literal["micro", "macro", "weighted", None]
+        Average to apply. If None, return scores for each class.
+    zero_division : Literal["warn", 0, 1]
+        Value to return when there are no true positives or true negatives.
+        If set to "warn", this acts as 0, but warnings are also raised.
+
+    Returns
+    -------
+    scores : numpy.ndarray or float
+        Precision or recall scores. If ``average`` is None, return scores for
+        each class as a numpy.ndarray. Otherwise, return the average as a
+        float.
+
+    """
+    numerator = tp
+    different_metric = fp if metric == "precision" else fn
+    denominator = numerator + different_metric
+
+    if average == "micro":
+        numerator = np.array(np.sum(numerator))
+        denominator = np.array(np.sum(denominator))
+
+    modifier = "predicted" if metric == "precision" else "true"
+
+    score = _prf_divide(
+        np.expand_dims(numerator, axis=0) if numerator.ndim == 0 else numerator,
+        np.expand_dims(denominator, axis=0) if denominator.ndim == 0 else denominator,
+        metric,
+        modifier,
+        average,
+        warn_for=metric,
+        zero_division=zero_division,
+    )
+
+    if average == "weighted":
+        weights = tp + fn
+        if np.sum(weights) == 0:
+            result = np.ones_like(score, dtype=np.float64)
+            if zero_division in ["warn", 0]:
+                result = np.zeros_like(score)
+            return result
+    else:
+        weights = None
+
+    if average is not None and score.ndim != 0 and len(score) > 1:
+        result = np.average(score, weights=weights)
+    else:
+        result = _get_value_if_singleton_array(score)  # type: ignore[assignment]
+
+    return result
+
+
+
+[docs] +def binary_precision( + target: npt.ArrayLike, + preds: npt.ArrayLike, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", +) -> float: + """Compute precision score for binary classification. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + pos_label : int, default=1 + The label of the positive class. + threshold : float, default=0.5 + Threshold for deciding the positive class. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + float + Precision score. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import binary_precision + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 1] + >>> binary_precision(target, preds) + 0.6666666666666666 + >>> target = [0, 1, 0, 1, 0, 1] + >>> preds = [0.11, 0.22, 0.84, 0.73, 0.33, 0.92] + >>> binary_precision(target, preds) + 0.6666666666666666 + + """ + _binary_stat_scores_args_check(threshold=threshold, pos_label=pos_label) + + target, preds = _binary_stat_scores_format( + target, + preds, + threshold=threshold, + pos_label=pos_label, + ) + + tp, fp, _, fn = _binary_stat_scores_update(target, preds, pos_label=pos_label) + + precision_score = _precision_recall_reduce( + tp, + fp, + fn, + metric="precision", + average=None, + zero_division=zero_division, + ) + + return cast(float, precision_score)
+ + + +
+[docs] +def multiclass_precision( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[npt.NDArray[np.float_], float]: + """Compute precision score for multiclass classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + num_classes : int + Number of classes in the dataset. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the precision score for each class. Otherwise, + use one of the following options to compute the average precision score: + + - ``micro``: Calculate metric globally from the total count of true + positives and false positives. + - ``macro``: Calculate metric for each class, and find their unweighted + mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each class, and find their average + weighted by the support (the number of true instances for each class). + This alters "macro" to account for class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + precision : float or numpy.ndarray + Precision score. If ``average`` is None, return a numpy.ndarray of + precision scores for each class. + + Raises + ------ + ValueError + If ``average`` is not one of ``micro``, ``macro``, ``weighted`` + or ``None``. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multiclass_precision + >>> target = [0, 1, 2, 0] + >>> preds = [0, 2, 1, 0] + >>> multiclass_precision(target, preds, num_classes=3) + array([1., 0., 0.]) + + """ + _check_average_arg(average) + + target, preds = _multiclass_stat_scores_format( + target, + preds, + num_classes=num_classes, + top_k=top_k, + ) + + tp, fp, _, fn = _multiclass_stat_scores_update(target, preds, num_classes) + + return _precision_recall_reduce( + tp, + fp, + fn, + metric="precision", + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def multilabel_precision( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[npt.NDArray[np.float_], float]: + """Compute precision score for multilabel classification tasks. + + The input is expected to be an array-like of shape (N, L), where N is the + number of samples and L is the number of labels. The input is expected to + be a binary array-like, where 1 indicates the presence of a label and 0 + indicates its absence. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + num_labels : int + Number of labels for the task. + threshold : float, default=0.5 + Threshold for deciding the positive class. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the precision score for each label. Otherwise, + use one of the following options to compute the average precision score: + + - ``micro``: Calculate metric globally from the total count of true + positives and false positives. + - ``macro``: Calculate metric for each label, and find their unweighted + mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each label, and find their average + weighted by the support (the number of true instances for each label). + This alters "macro" to account for label imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + precision: float or numpy.ndarray + Precision score. If ``average`` is None, return a numpy.ndarray of + precision scores for each label. + + Raises + ------ + ValueError + If average is not one of ``micro``, ``macro``, ``weighted``, + or ``None``. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multilabel_precision + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.2, 0.8]] + >>> multilabel_precision(target, preds, num_labels=2) + array([0., 1.]) + + """ + _check_average_arg(average) + + target, preds = _multilabel_stat_scores_format( + target, + preds, + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + ) + + tp, fp, _, fn = _multilabel_stat_scores_update(target, preds, num_labels) + + return _precision_recall_reduce( + tp, + fp, + fn, + metric="precision", + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def precision( + target: npt.ArrayLike, + preds: npt.ArrayLike, + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[npt.NDArray[np.float_], float]: + """Compute precision score for different classification tasks. + + Precision is the ratio of correctly predicted positive observations to the + total predicted positive observations. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + task : Literal["binary", "multiclass", "multilabel"] + Task type. + pos_label : int + Label of the positive class. Only used for binary classification. + num_classes : Optional[int] + Number of classes. Only used for multiclass classification. + threshold : float + Threshold for positive class predictions. Default is 0.5. + top_k : Optional[int] + Number of highest probability or logits predictions to consider when + computing multiclass or multilabel metrics. Default is None. + num_labels : Optional[int] + Number of labels. Only used for multilabel classification. + average : Literal["micro", "macro", "weighted", None] + Average to apply. If None, return scores for each class. Default is + None. One of: + + - ``micro``: Calculate metrics globally by counting the total true + positives and and false positives. + - ``macro``: Calculate metrics for each label/class, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label, and find their + average weighted by support (the number of true instances for + each label). This alters ``macro`` to account for label imbalance. + zero_division : Literal["warn", 0, 1] + Value to return when there are no true positives or true negatives. + If set to ``warn``, this acts as 0, but warnings are also raised. + + Returns + ------- + precision_score : numpy.ndarray or float + Precision score. If ``average`` is not None or task is ``binary``, + return a float. Otherwise, return a numpy.ndarray of precision scores + for each class/label. + + Raises + ------ + ValueError + If task is not one of ``binary``, ``multiclass`` or ``multilabel``. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics.functional import precision + >>> target = [0, 1, 1, 0] + >>> preds = [0.1, 0.9, 0.8, 0.3] + >>> precision(target, preds, task="binary") + 1.0 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics.functional import precision + >>> target = [0, 1, 2, 0, 1, 2] + >>> preds = [ + ... [0.1, 0.6, 0.3], + ... [0.05, 0.95, 0], + ... [0.1, 0.8, 0.1], + ... [0.5, 0.3, 0.2], + ... [0.2, 0.5, 0.3], + ... [0.2, 0.2, 0.6], + ... ] + >>> precision(target, preds, task="multiclass", num_classes=3, average="macro") + 0.8333333333333334 + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics.functional import precision + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.2, 0.8]] + >>> precision(target, preds, task="multilabel", num_labels=2, average="macro") + 0.5 + + """ + if task == "binary": + return binary_precision( + target, + preds, + pos_label=pos_label, + threshold=threshold, + zero_division=zero_division, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + return multiclass_precision( + target, + preds, + num_classes=num_classes, + average=average, + top_k=top_k, + zero_division=zero_division, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + return multilabel_precision( + target, + preds, + num_labels=num_labels, + threshold=threshold, + average=average, + top_k=top_k, + zero_division=zero_division, + ) + + raise ValueError( + f"Task '{task}' not supported, expected 'binary', 'multiclass' or " + f"'multilabel'.", + )
+ + + +def binary_recall( + target: npt.ArrayLike, + preds: npt.ArrayLike, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", +) -> float: + """Compute recall score for binary classification. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + pos_label : int, default=1 + Label of the positive class. + threshold : float, default=0.5 + Threshold for deciding the positive class. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + float + Recall score. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import binary_recall + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 0] + >>> binary_recall(target, preds) + 0.5 + + """ + target, preds = _binary_stat_scores_format( + target, + preds, + threshold=threshold, + pos_label=pos_label, + ) + + tp, fp, _, fn = _binary_stat_scores_update(target, preds, pos_label=pos_label) + + recall_score = _precision_recall_reduce( + tp, + fp, + fn, + metric="recall", + average=None, + zero_division=zero_division, + ) + + return cast(float, recall_score) + + +
+[docs] +def multiclass_recall( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[npt.NDArray[np.float_], float]: + """Compute recall score for multiclass classification. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + num_classes : int + Number of classes. + top_k : Optional[int] + If given, and predictions are probabilities/logits, the recall will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None] + Average to apply. If None, return scores for each class. Default is + None. One of: + + - ``micro``: Calculate metrics globally by counting the total true + positives and false negatives. + - ``macro``: Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label, and find their + average weighted by support (the number of true instances for each label). + This alters "macro" to account for label imbalance. + zero_division : Literal["warn", 0, 1] + Value to return when there are no true positives or true negatives. + If set to ``warn``, this acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + Recall score. If ``average`` is None, return a numpy.ndarray of + recall scores for each class. + + Raises + ------ + ValueError + If ``average`` is not one of ``micro``, ``macro``, ``weighted`` + or ``None``. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multiclass_recall + >>> target = [0, 1, 2, 0, 1, 2] + >>> preds = [ + ... [0.4, 0.1, 0.5], + ... [0.1, 0.8, 0.1], + ... [0.2, 0.2, 0.6], + ... [0.5, 0.3, 0.2], + ... [0.2, 0.5, 0.3], + ... [0.2, 0.2, 0.6], + ... ] + >>> multiclass_recall(target, preds, num_classes=3, average="macro") + 0.8333333333333334 + + """ + _check_average_arg(average) + + target, preds = _multiclass_stat_scores_format( + target, + preds, + num_classes=num_classes, + top_k=top_k, + ) + + tp, fp, _, fn = _multiclass_stat_scores_update(target, preds, num_classes) + + return _precision_recall_reduce( + tp, + fp, + fn, + metric="recall", + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def multilabel_recall( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[npt.NDArray[np.float_], float]: + """Compute recall score for multilabel classification tasks. + + The input is expected to be an array-like of shape (N, L), where N is the + number of samples and L is the number of labels. The input is expected to + be a binary array-like, where 1 indicates the presence of a label and 0 + indicates its absence. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + num_labels : int + Number of labels in the dataset. + threshold : float, default=0.5 + Threshold for deciding the positive class. + top_k : int, optional, default=None + If given, and predictions are probabilities/logits, the top k scores + will be converted to 1s and the rest will be converted to 0s. Otherwise, + the threshold will be used to convert scores to 0s and 1s. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the recall score for each class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives and false negatives. + - ``macro``: Calculate metric for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each label, and find their + average weighted by the support (the number of true instances + for each label). This alters "macro" to account for label imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + Recall score. If ``average`` is None, return a numpy.ndarray of + recall scores for each label. + + Raises + ------ + ValueError + If ``average`` is not one of ``micro``, ``macro``, ``weighted`` + or ``None``. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multilabel_recall + >>> target = [[0, 1, 0], [1, 0, 1]] + >>> preds = [[0, 0, 1], [1, 0, 1]] + >>> multilabel_recall(target, preds, num_labels=3) + array([1., 0., 1.]) + + """ + _check_average_arg(average) + + target, preds = _multilabel_stat_scores_format( + target, + preds, + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + ) + + tp, fp, _, fn = _multilabel_stat_scores_update(target, preds, num_labels) + + return _precision_recall_reduce( + tp, + fp, + fn, + metric="recall", + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def recall( + target: npt.ArrayLike, + preds: npt.ArrayLike, + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[npt.NDArray[np.float_], float]: + """Compute recall score for different classification tasks. + + Recall is the ratio tp / (tp + fn) where tp is the number of true positives + and fn the number of false negatives. The recall is intuitively the ability + of the classifier to find all the positive samples. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + task : Literal["binary", "multiclass", "multilabel"] + Task type. + pos_label : int + Label of the positive class. Only used for binary classification. + num_classes : Optional[int] + Number of classes. Only used for multiclass classification. + threshold : float, default=0.5 + Threshold for positive class predictions. + top_k : Optional[int] + Number of highest probability or logits predictions to consider when + computing multiclass or multilabel metrics. Default is None. + num_labels : Optional[int] + Number of labels. Only used for multilabel classification. + average : Literal["micro", "macro", "weighted", None], default=None + Average to apply. If None, return scores for each class. One of: + + - ``micro``: Calculate metrics globally by counting the total true + positives and false negatives. + - ``macro``: Calculate metrics for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label, and find their + average weighted by support (the number of true instances for + each label). This alters ``macro`` to account for label imbalance. + zero_division : Literal["warn", 0, 1] + Value to return when there are no true positives or true negatives. + If set to ``warn``, this acts as 0, but warnings are also raised. + + Returns + ------- + recall_score : float or numpy.ndarray + Recall score. If ``average`` is not None or ``task`` is ``binary``, + return a float. Otherwise, return a numpy.ndarray of recall scores + for each class/label. + + Raises + ------ + ValueError + If ``task`` is not one of ``binary``, ``multiclass`` or ``multilabel``. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics.functional import recall + >>> target = [0, 1, 1, 0, 1] + >>> preds = [0.4, 0.2, 0.0, 0.6, 0.9] + >>> recall(target, preds, task="binary") + 0.3333333333333333 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics.functional import recall + >>> target = [1, 1, 2, 0, 2, 2] + >>> preds = [1, 2, 2, 0, 2, 0] + >>> recall(target, preds, task="multiclass", num_classes=3) + array([1. , 0.5 , 0.66666667]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics.functional import recall + >>> target = [[1, 0, 1], [0, 1, 0]] + >>> preds = [[0.4, 0.2, 0.0], [0.6, 0.9, 0.1]] + >>> recall(target, preds, task="multilabel", num_labels=3) + array([0., 1., 0.]) + + """ + if task == "binary": + return binary_recall( + target, + preds, + pos_label=pos_label, + threshold=threshold, + zero_division=zero_division, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + return multiclass_recall( + target, + preds, + num_classes=num_classes, + average=average, + top_k=top_k, + zero_division=zero_division, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + return multilabel_recall( + target, + preds, + num_labels=num_labels, + threshold=threshold, + average=average, + top_k=top_k, + zero_division=zero_division, + ) + + raise ValueError( + f"Task '{task}' not supported, expected 'binary', 'multiclass' or " + f"'multilabel'.", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/functional/precision_recall_curve.html b/api/_modules/cyclops/evaluate/metrics/functional/precision_recall_curve.html new file mode 100644 index 000000000..cdae16824 --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/functional/precision_recall_curve.html @@ -0,0 +1,1475 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.functional.precision_recall_curve - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.functional.precision_recall_curve

+"""Functions for computing the precision-recall curve for different input types."""
+
+from typing import Any, List, Literal, NamedTuple, Optional, Tuple, Union
+
+import numpy as np
+import numpy.typing as npt
+import scipy as sp
+from sklearn.metrics._ranking import _binary_clf_curve
+from sklearn.preprocessing import label_binarize
+
+from cyclops.evaluate.metrics.utils import (
+    _check_thresholds,
+    common_input_checks_and_format,
+    sigmoid,
+)
+
+
+class PRCurve(NamedTuple):
+    """Named tuple with Precision-Recall curve (Precision, Recall and thresholds)."""
+
+    precision: Union[npt.NDArray[np.float_], List[npt.NDArray[np.float_]]]
+    recall: Union[npt.NDArray[np.float_], List[npt.NDArray[np.float_]]]
+    thresholds: Union[npt.NDArray[np.float_], List[npt.NDArray[np.float_]]]
+
+
+def _format_thresholds(
+    thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None,
+) -> Optional[npt.NDArray[np.float_]]:
+    """Format thresholds to be a 1D numpy array of floats."""
+    if isinstance(thresholds, int):
+        thresholds = np.linspace(0, 1, thresholds)
+    elif isinstance(thresholds, list):
+        thresholds = np.array(thresholds)
+
+    return thresholds
+
+
+def _ovr_multi_threshold_confusion_matrix(
+    target: npt.NDArray[np.int_],
+    preds: npt.NDArray[np.int_],
+    num_classes: int,
+    num_thresholds: int,
+) -> npt.NDArray[np.int_]:
+    """Compute multi-threshold confusion matrix for one-vs-rest classification."""
+    pred_sum = np.count_nonzero(preds, axis=0)
+    target_sum = np.count_nonzero(target, axis=0)
+
+    tp = np.count_nonzero(np.multiply(preds, target), axis=0)
+    fp = pred_sum - tp
+    fn = target_sum - tp
+    tn = target.shape[0] - tp - fp - fn
+
+    return np.array([tn, fp, fn, tp]).T.reshape(num_thresholds, num_classes, 2, 2)
+
+
+def _precision_recall_curve_compute_from_confmat(
+    confmat: npt.NDArray[np.int_],
+    thresholds: npt.NDArray[np.float_],
+) -> Tuple[npt.NDArray[np.float_], npt.NDArray[np.float_], npt.NDArray[np.float_]]:
+    """Compute precision-recall curve from a multi-threshold confusion matrix."""
+    tps = confmat[..., 1, 1]
+    fps = confmat[..., 0, 1]
+    fns = confmat[..., 1, 0]
+
+    precision = np.divide(
+        tps,
+        tps + fps,
+        out=np.zeros_like(tps, dtype=np.float64),
+        where=(tps + fps) != 0,
+    )
+    recall = np.divide(
+        tps,
+        tps + fns,
+        out=np.zeros_like(tps, dtype=np.float64),
+        where=(tps + fns) != 0,
+    )
+
+    sort_idx = np.argsort(thresholds)
+    thresholds = thresholds[sort_idx]
+    precision = precision[sort_idx]
+    recall = recall[sort_idx]  # in descending order
+
+    return precision, recall, thresholds
+
+
+def _binary_precision_recall_curve_format(
+    target: npt.ArrayLike,
+    preds: npt.ArrayLike,
+    pos_label: int,
+) -> Tuple[npt.NDArray[Any], npt.NDArray[Any]]:
+    """Check and format binary precision-recall curve input/data.
+
+    Parameters
+    ----------
+    target : npt.ArrayLike
+        Ground truth (correct) target values.
+    preds : npt.ArrayLike
+        Estimated probabilities or non-thresholded output of decision function.
+        A sigmoid function is applied if ``preds`` are not in [0, 1].
+    pos_label : int
+        Label of the positive class.
+
+    Returns
+    -------
+    target : numpy.ndarray
+        Ground truth (correct) target values as a numpy array.
+    preds : numpy.ndarray
+        Estimated probabilities or non-thresholded output of decision function
+        as a numpy array.
+
+    Raises
+    ------
+    ValueError
+        If ``target`` is not binary, with only 1 and 0 as values; If ``target`` and
+        ``preds`` are not of the same shape; If ``preds`` is not continuous.
+
+    """
+    target, preds, type_target, type_preds = common_input_checks_and_format(
+        target,
+        preds,
+    )
+
+    if pos_label not in [0, 1]:
+        raise ValueError(f"Positive label must be 0 or 1, got {pos_label}.")
+
+    if type_preds == "continuous-multioutput":
+        assert preds.shape[-1] == 2, (
+            "The argument `preds` must either be a 1D array or a 2D array with "
+            f"exactly 2 columns, got an array with shape: {preds.shape}."
+        )
+        preds = preds[
+            ...,
+            pos_label,
+        ]  # keep only the probabilities for the positive class
+        type_preds = "continuous"
+
+    if preds.shape != target.shape:
+        raise ValueError(
+            "The arguments `preds` and `target` should have the same shape. "
+            f"Got {preds.shape} and {target.shape}.",
+        )
+
+    if type_target != "binary" or type_preds != "continuous":
+        raise ValueError(
+            "Expected argument `target` to be binary and `preds` to be an array of"
+            f" floats with probability/logit scores, got {type_target} and"
+            f" {type_preds} respectively.",
+        )
+
+    if not np.all(np.isin(target, [0, 1])):
+        raise ValueError(
+            "Expected argument `target` to be an array of 0s and 1s, but got "
+            f"array with values {np.unique(target)}",
+        )
+
+    if not np.all(np.logical_and(preds >= 0.0, preds <= 1.0)):
+        preds = sigmoid(preds)
+
+    return target, preds
+
+
+def _binary_precision_recall_curve_update(
+    target: npt.NDArray[Any],
+    preds: npt.NDArray[Any],
+    thresholds: Optional[npt.NDArray[np.float_]] = None,
+) -> Union[Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]], npt.NDArray[np.int_]]:
+    """Compute the state from which the precision-recall curve can be computed.
+
+    Parameters
+    ----------
+    target : numpy.ndarray
+        Binary target values.
+    preds : numpy.ndarray
+        Predicted probabilities.
+    thresholds : Optional[numpy.ndarray]
+        Thresholds used for computing the precision and recall scores.
+
+    Returns
+    -------
+    (target, preds): Tuple[numpy.ndarray, numpy.ndarray]
+        Target and predicted probabilities, if ``thresholds`` is None.
+    confmat : numpy.ndarray
+        Multi-threshold confusion matrix, if ``thresholds`` is not None.
+
+    """
+    if thresholds is None:
+        return target, preds
+
+    # compute multi-threshold confusion matrix
+    len_t = len(thresholds)
+    preds_t = (
+        np.expand_dims(preds, axis=-1) >= np.expand_dims(thresholds, axis=0)
+    ).astype(np.int64)
+
+    tp = np.sum((target == preds_t.T) & (target == 1), axis=1)
+    fp = np.sum((target != preds_t.T) & (target == 0), axis=1)
+    tn = np.sum((target == preds_t.T) & (target == 0), axis=1)
+    fn = np.sum((target != preds_t.T) & (target == 1), axis=1)
+
+    confmat: npt.NDArray[np.int_] = np.stack([tn, fp, fn, tp], axis=1).reshape(
+        len_t,
+        2,
+        2,
+    )
+
+    return confmat
+
+
+def _binary_precision_recall_curve_compute(
+    state: Union[
+        Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]],
+        npt.NDArray[np.int_],
+    ],
+    thresholds: Optional[npt.NDArray[np.float_]],
+    pos_label: Optional[int] = None,
+) -> Tuple[npt.NDArray[np.float_], npt.NDArray[np.float_], npt.NDArray[np.float_]]:
+    """Compute precision-recall curve from a state.
+
+    Parameters
+    ----------
+    state : Tuple or numpy.ndarray
+        State from which the precision-recall curve can be computed. Can be
+        either a tuple of (target, preds) or a multi-threshold confusion matrix.
+    thresholds : numpy.ndarray
+        Thresholds used for computing the precision and recall scores. If not None,
+        must be a 1D numpy array of floats in the [0, 1] range and monotonically
+        increasing.
+    pos_label : int
+        The label of the positive class.
+
+    Returns
+    -------
+    precision : numpy.ndarray
+        Precision scores such that element i is the precision of predictions
+        with score >= thresholds[i].
+    recall : numpy.ndarray
+        Recall scores in descending order.
+    thresholds : numpy.ndarray
+        Thresholds used for computing the precision and recall scores.
+
+    Raises
+    ------
+    ValueError
+        If ``thresholds`` is None.
+
+    """
+    if isinstance(state, np.ndarray):
+        precision, recall, thresholds = _precision_recall_curve_compute_from_confmat(
+            state,
+            thresholds,  # type: ignore[arg-type]
+        )
+    else:
+        fps, tps, thresholds = _binary_clf_curve(
+            state[0],
+            state[1],
+            pos_label=pos_label,
+            sample_weight=None,
+        )
+
+        precision = np.divide(
+            tps,
+            tps + fps,
+            out=np.zeros_like(tps, dtype=np.float64),
+            where=(tps + fps) != 0,
+        )
+        recall = np.divide(
+            tps,
+            tps[-1],
+            out=np.zeros_like(tps, dtype=np.float64),
+            where=tps[-1] != 0,
+        )
+
+        # stop when full recall attained
+        # and reverse the outputs so recall is decreasing
+        last_ind = tps.searchsorted(tps[-1], side="right")
+        sliced = slice(last_ind, None, -1)
+
+        precision = np.hstack((precision[sliced], 1))
+        recall = np.hstack((recall[sliced], 0))
+        thresholds = thresholds[sliced]  # type: ignore[index]
+
+    return precision, recall, thresholds
+
+
+def binary_precision_recall_curve(
+    target: npt.ArrayLike,
+    preds: npt.ArrayLike,
+    thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None,
+    pos_label: int = 1,
+) -> PRCurve:
+    """Compute precision-recall curve for binary input.
+
+    Parameters
+    ----------
+    target : npt.ArrayLike
+        Binary target values.
+    preds : npt.ArrayLike
+        Predicted probabilities or output of a decision function. If ``preds``
+        are logits, they will be transformed to probabilities via the sigmoid
+        function.
+    thresholds : int or list of floats or numpy.ndarray of floats, default=None
+        Thresholds used for computing the precision and recall scores.
+        If int, then the number of thresholds to use.
+        If list or numpy.ndarray, then the thresholds to use.
+        If None, then the thresholds are automatically determined by the
+        unique values in ``preds``.
+    pos_label : int
+        The label of the positive class.
+
+    Returns
+    -------
+    PRCurve
+       A named tuple containing the precision (element i is the precision of predictions
+       with score >= thresholds[i]), recall (scores in descending order)
+       and thresholds used to compute the precision-recall curve.
+
+    Examples
+    --------
+    >>> from cyclops.evaluate.metrics.functional import binary_precision_recall_curve
+    >>> target = [0, 0, 1, 1]
+    >>> preds = [0.1, 0.4, 0.35, 0.8]
+    >>> precision, recall, thresholds = binary_precision_recall_curve(
+    ...     target, preds, thresholds=5
+    ... )
+    >>> precision
+    array([0.5       , 0.66666667, 1.        , 1.        , 0.        ])
+    >>> recall
+    array([1. , 1. , 0.5, 0.5, 0. ])
+    >>> thresholds
+    array([0.  , 0.25, 0.5 , 0.75, 1.  ])
+
+    """
+    _check_thresholds(thresholds)
+
+    target, preds = _binary_precision_recall_curve_format(
+        target,
+        preds,
+        pos_label=pos_label,
+    )
+    thresholds = _format_thresholds(thresholds)
+
+    state = _binary_precision_recall_curve_update(target, preds, thresholds)
+    precision_, recall_, thresholds_ = _binary_precision_recall_curve_compute(
+        state,
+        thresholds,
+        pos_label=pos_label,
+    )
+
+    return PRCurve(precision_, recall_, thresholds_)
+
+
+def _multiclass_precision_recall_curve_format(
+    target: npt.ArrayLike,
+    preds: npt.ArrayLike,
+    num_classes: int,
+) -> Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]]:
+    """Check and format the input for the multiclass precision-recall curve.
+
+    Parameters
+    ----------
+    target : npt.ArrayLike
+        The target values.
+    preds : npt.ArrayLike
+        The predicted probabilities or output of a decision function. If
+        ``preds`` is not in the [0, 1] range, it will be transformed into this
+        range via the softmax function.
+    num_classes : int
+        Number of classes.
+
+    Returns
+    -------
+    target : numpy.ndarray
+        The target values as a numpy array.
+    preds : numpy.ndarray
+        The predicted probabilities as a numpy array.
+    thresholds : numpy.ndarray
+        Thresholds used for computing the precision and recall scores as a numpy array.
+
+    Raises
+    ------
+    ValueError
+        If ``target`` is not a 1D array of integers or contains values outside the
+        range [0, num_classes) or does not have one more dimension than ``preds``;
+        if ``preds`` is not a 2D array of floats or does not have the same number
+        of classes as ``num_classes``; if ``preds and ``target`` do not have the
+        same number of samples.
+
+    """
+    formatted = common_input_checks_and_format(target, preds)
+    target_: npt.NDArray[np.int_] = formatted[0]
+    preds_: npt.NDArray[np.float_] = formatted[1]
+    type_target: str = formatted[2]
+    type_preds: str = formatted[3]
+
+    if preds_.ndim != target_.ndim + 1:
+        raise ValueError(
+            "Expected argument `preds` to have one more dimension than argument "
+            f"`target`, but got {preds_.ndim} and {target_.ndim} respectively",
+        )
+
+    if type_target not in ["binary", "multiclass"]:
+        raise ValueError(
+            "Expected argument `target` to be an array of integers with "
+            f"shape (N,) but got {type_target}",
+        )
+
+    if type_target == "binary" and not num_classes > 2:
+        raise ValueError(
+            "Expected `target` to be a multiclass target, but got a binary target",
+        )
+
+    if type_preds != "continuous-multioutput":
+        raise ValueError(
+            "Expected argument `preds` to be `preds` to be an array of floats"
+            f" with probability/logit scores but got {type_preds}",
+        )
+
+    if preds_.shape[-1] != num_classes:
+        raise ValueError(
+            "Expected argument `preds` to have the same number of classes as "
+            f"argument `num_classes`, but got {preds_.shape[-1]} and {num_classes} "
+            "respectively",
+        )
+
+    if preds_.shape[0] != target_.shape[0]:
+        raise ValueError(
+            "Expected argument `preds` to have the same number of samples as "
+            f"argument `target`, but got {preds_.shape[0]} and {target_.shape[0]} "
+            "respectively",
+        )
+
+    num_implied_classes = len(np.unique(target_))
+    if num_implied_classes > num_classes:
+        raise ValueError(
+            "Detected more unique values in `target` than `num_classes`. Expected only "
+            f"{num_classes} but found {num_implied_classes} in `target`.",
+        )
+
+    if not np.all(np.logical_and(preds_ >= 0.0, preds_ <= 1.0)):
+        preds_ = sp.special.softmax(preds_, axis=1)  # logit to probability
+
+    if not np.allclose(1, preds_.sum(axis=1)):
+        raise ValueError(
+            "``preds`` need to be probabilities for multiclass problems"
+            " i.e. they should sum up to 1.0 over classes",
+        )
+
+    return target_, preds_
+
+
+def _multiclass_precision_recall_curve_update(
+    target: npt.NDArray[np.int_],
+    preds: npt.NDArray[np.float_],
+    num_classes: int,
+    thresholds: Optional[npt.NDArray[np.float_]] = None,
+) -> Union[Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]], npt.NDArray[np.int_]]:
+    """Update the state of the multiclass precision-recall curve.
+
+    Parameters
+    ----------
+    target : numpy.ndarray
+        Binary target values.
+    preds : numpy.ndarray
+        Predicted probabilities.
+    num_classes : int
+        Number of classes.
+    thresholds : numpy.ndarray, default=None
+        Thresholds used for computing the precision and recall scores.
+
+    Returns
+    -------
+    (target, preds) : Tuple[numpy.ndarray, numpy.ndarray]
+        The target and predicted probabilities, if ``thresholds`` is None.
+    state : numpy.ndarray
+        The state of the multiclass precision-recall curve,  if ``thresholds``
+        is not None.
+
+    """
+    if thresholds is None:
+        return target, preds
+
+    # one-vs-all multi-threshold confusion matrix
+    len_t = len(thresholds)
+    preds_t = (
+        np.expand_dims(preds, axis=-1)
+        >= np.expand_dims(np.expand_dims(thresholds, axis=0), axis=0)
+    ).astype(np.int64)
+
+    target_t = np.expand_dims(
+        label_binarize(target, classes=np.arange(num_classes)),
+        axis=-1,
+    )
+
+    return _ovr_multi_threshold_confusion_matrix(
+        target_t,
+        preds_t,
+        num_classes=num_classes,
+        num_thresholds=len_t,
+    )
+
+
+def _multiclass_precision_recall_curve_compute(
+    state: Union[
+        Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]],
+        npt.NDArray[np.int_],
+    ],
+    thresholds: npt.NDArray[np.float_],
+    num_classes: int,
+) -> Union[
+    Tuple[npt.NDArray[np.float_], npt.NDArray[np.float_], npt.NDArray[np.float_]],
+    Tuple[
+        List[npt.NDArray[np.float_]],
+        List[npt.NDArray[np.float_]],
+        List[npt.NDArray[np.float_]],
+    ],
+]:
+    """Compute the multiclass precision-recall curve.
+
+    Parameters
+    ----------
+    state : numpy.ndarray
+        The state of the multiclass precision-recall curve. If ``thresholds`` is
+        None, then ``state`` is a tuple of the target and predicted probabilities.
+        Otherwise, ``state`` is the one-vs-all multi-threshold confusion matrix.
+    thresholds : numpy.ndarray
+        Thresholds used for computing the precision and recall scores.
+    num_classes : int
+        Number of classes.
+
+    Returns
+    -------
+    precision : numpy.ndarray or list of numpy.ndarray
+        Precision scores where element i is the precision score corresponding to the
+        threshold i. If state is a tuple of the target and predicted probabilities,
+        then precision is a list of arrays, where each array corresponds to the
+        precision scores for a class.
+    recall : numpy.ndarray or list of numpy.ndarray
+        Recall scores where element `i` is the recall score corresponding to the
+        threshold  `i`. If state is a tuple of the target and predicted probabilities,
+        then recall is a list of arrays, where each array corresponds to the recall
+        scores for a class.
+    thresholds : numpy.ndarray or list of numpy.ndarray
+        Thresholds used for computing the precision and recall scores.
+
+    """
+    if isinstance(state, np.ndarray):
+        precision, recall, thresholds = _precision_recall_curve_compute_from_confmat(
+            state,
+            thresholds,
+        )
+
+        precision = np.hstack((precision.T, np.ones((num_classes, 1))))
+        recall = np.hstack((recall.T, np.zeros((num_classes, 1))))
+
+        return precision, recall, thresholds
+
+    precision_list, recall_list, thresholds_list = [], [], []
+    for i in range(num_classes):
+        (
+            precision_i,
+            recall_i,
+            thresholds_i,
+        ) = _binary_precision_recall_curve_compute(
+            (state[0], state[1][:, i]),
+            thresholds=None,
+            pos_label=i,
+        )
+
+        precision_list.append(precision_i)
+        recall_list.append(recall_i)
+        thresholds_list.append(thresholds_i)
+
+    return precision_list, recall_list, thresholds_list
+
+
+
+[docs] +def multiclass_precision_recall_curve( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, +) -> PRCurve: + """Compute the precision-recall curve for multiclass problems. + + Parameters + ---------- + target : ArrayLike + Ground truth (correct) target values. + preds : ArrayLike + Estimated probabilities or decision function. If ``preds`` is a logit, it + will be converted to a probability using the softmax function. + num_classes : int + The number of classes in the dataset. + thresholds : Union[int, List[float], numpy.ndarray], default=None + Thresholds used for computing the precision and recall scores. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + + Returns + ------- + PRcurve + A named tuple containing the precision, recall, and thresholds. + Precision and recall are arrays where element i is the precision and + recall score corresponding to threshold i. If state is a tuple of the + target and predicted probabilities, then precision and recall are lists + of arrays, where each array corresponds to the precision and recall + scores for a class. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import ( + ... multiclass_precision_recall_curve, + ... ) + >>> target = [0, 1, 2, 2] + >>> preds = [[0.1, 0.6, 0.3], [0.05, 0.95, 0], [0.5, 0.3, 0.2], [0.3, 0.4, 0.3]] + >>> precision, recall, thresholds = multiclass_precision_recall_curve( + ... target, preds, num_classes=3, thresholds=5 + ... ) + >>> precision + array([[0.25, 0. , 0. , 0. , 0. , 1. ], + [0.25, 0.25, 0.5 , 1. , 0. , 1. ], + [0.5 , 0.5 , 0. , 0. , 0. , 1. ]]) + >>> recall + array([[1. , 0. , 0. , 0. , 0. , 0. ], + [1. , 1. , 1. , 1. , 0. , 0. ], + [1. , 0.5, 0. , 0. , 0. , 0. ]]) + >>> thresholds + array([0. , 0.25, 0.5 , 0.75, 1. ]) + + """ + _check_thresholds(thresholds) + + target, preds = _multiclass_precision_recall_curve_format( + target, + preds, + num_classes=num_classes, + ) + + thresholds = _format_thresholds(thresholds) + + state = _multiclass_precision_recall_curve_update( + target, + preds, + num_classes=num_classes, + thresholds=thresholds, + ) + + precision_, recall_, thresholds_ = _multiclass_precision_recall_curve_compute( + state, + thresholds, # type: ignore + num_classes, + ) + return PRCurve(precision_, recall_, thresholds_)
+ + + +def _multilabel_precision_recall_curve_format( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, +) -> Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]]: + """Check and format the multilabel precision-recall curve input/data. + + Parameters + ---------- + target : npt.ArrayLike + The target values. + preds : npt.ArrayLike + Predicted probabilities or output of a decision function. If the + values are not in [0, 1], then they are converted into probabilities + by applying the sigmoid function. + num_labels : int + The number of labels in the dataset. + thresholds : int, list of floats or numpy.ndarray of floats, default=None + Thresholds used for computing the precision and recall scores. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + + Returns + ------- + target : numpy.ndarray + The target values as a numpy array. + preds : numpy.ndarray + The predicted probabilities as a numpy array. + + Raises + ------ + ValueError + If ``target`` is not in multilabel-indicator format. + ValueError + If ``preds`` does not contain float values. + ValueError + If ``num_labels`` does not match up with the number of columns in ``preds``. + ValueError + If the number of columns in ``preds`` is not the same as the number of + columns in ``target``. + + """ + target, preds, type_target, type_preds = common_input_checks_and_format( + target, + preds, + ) + + # allow single-sample inputs + if type_preds in ["continuous", "binary"] and type_target == "binary": + preds = np.expand_dims(preds, axis=0) + type_preds = ( + "continuous-multioutput" + if type_preds == "continuous" + else "multilabel-indicator" + ) + if type_target == "binary": + target = np.expand_dims(target, axis=0) + type_target = "multilabel-indicator" + + # validate input types + if type_target != "multilabel-indicator": + raise ValueError( + "Expected argument `target` to be a multilabel indicator array, but got " + f"{type_target}", + ) + + if type_preds != "continuous-multioutput": + raise ValueError( + "Expected argument `preds` to be an array of floats with" + f" probabilities/logit scores, but got {type_preds}", + ) + + if num_labels != preds.shape[1]: + raise ValueError( + "Expected `num_labels` to be equal to the number of columns in `preds`, " + f"but got {num_labels} and {preds.shape[1]}", + ) + + if target.shape[1] != preds.shape[1]: + raise ValueError( + "Number of columns in `target` and `preds` must be the same." + f"Got {target.shape[1]} and {preds.shape[1]}.", + ) + + if not np.all(np.logical_and(preds >= 0.0, preds <= 1.0)): + preds = sigmoid(preds) + + return target, preds + + +def _multilabel_precision_recall_curve_update( + target: npt.NDArray[np.int_], + preds: npt.NDArray[np.float_], + num_labels: int, + thresholds: Optional[npt.NDArray[np.float_]] = None, +) -> Union[Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]], npt.NDArray[np.int_]]: + """Update the multilabel precision-recall curve state. + + Parameters + ---------- + target : numpy.ndarray + The target values. + preds : numpy.ndarray + Predicted probabilities or output of a decision function. + num_labels : int + The number of labels in the dataset. + thresholds : numpy.ndarray + Thresholds used for computing the precision and recall scores. + + Returns + ------- + (target, preds) : Tuple[numpy.ndarray, numpy.ndarray] + The target and predicted values, if ``thresholds`` is None. + state : numpy.ndarray + One-vs-rest multi-threshold confusion matrix, if ``thresholds`` is not None. + + """ + if thresholds is None: + return target, preds + + # one-vs-all multi-threshold confusion matrix + len_t = len(thresholds) + preds_t = ( + np.expand_dims(preds, axis=-1) >= np.expand_dims(thresholds, axis=0) + ).astype(np.int_) + + target_t = np.expand_dims(target, axis=-1) + + return _ovr_multi_threshold_confusion_matrix(target_t, preds_t, num_labels, len_t) + + +def _multilabel_precision_recall_curve_compute( + state: Union[ + Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]], + npt.NDArray[np.int_], + ], + thresholds: npt.NDArray[np.float_], + num_labels: int, +) -> Union[ + Tuple[npt.NDArray[np.float_], npt.NDArray[np.float_], npt.NDArray[np.float_]], + Tuple[ + List[npt.NDArray[np.float_]], + List[npt.NDArray[np.float_]], + List[npt.NDArray[np.float_]], + ], +]: + """Compute the precision-recall curve for multilabel data. + + Parameters + ---------- + state : Tuple[numpy.ndarray, numpy.ndarray] or numpy.ndarray + The target and predicted values, if ``thresholds`` is None. Otherwise, + the one-vs-rest multi-threshold confusion matrix. + thresholds : numpy.ndarray + Thresholds used for computing the precision and recall scores. + num_labels : int + Number of labels. + + Returns + ------- + precision : numpy.ndarray or List[numpy.ndarray] + Precision values for each label. + recall : numpy.ndarray or List[numpy.ndarray] + Recall values for each label. + thresholds : numpy.ndarray or List[numpy.ndarray] + If ``thresholds`` is None, then thresholds is a list of arrays, one for + each label. Otherwise, thresholds is a single array with shape + (len(``thresholds``,). + + """ + if isinstance(state, np.ndarray): + precision, recall, thresholds = _precision_recall_curve_compute_from_confmat( + state, + thresholds, + ) + + precision = np.hstack((precision.T, np.ones((num_labels, 1)))) + recall = np.hstack((recall.T, np.zeros((num_labels, 1)))) + + return precision, recall, thresholds + + precision_list, recall_list, thresholds_list = [], [], [] + for i in range(num_labels): + target = state[0][:, i] + preds = state[1][:, i] + ( + precision_i, + recall_i, + thresholds_i, + ) = _binary_precision_recall_curve_compute( + (target, preds), + thresholds=None, + pos_label=1, + ) + + precision_list.append(precision_i) + recall_list.append(recall_i) + thresholds_list.append(thresholds_i) + + return precision_list, recall_list, thresholds_list + + +
+[docs] +def multilabel_precision_recall_curve( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, +) -> PRCurve: + """Compute the precision-recall curve for multilabel input. + + Parameters + ---------- + target : npt.ArrayLike + The target values. + preds : npt.ArrayLike + Predicted probabilities or output of a decision function. If the + values are not in [0, 1], then they are converted into that range + by applying the sigmoid function. + num_labels : int + The number of labels in the dataset. + thresholds : numpy.ndarray + Thresholds used for computing the precision and recall scores. + If int, then the number of thresholds to use. + If list of floats, then the thresholds to use. + If None, then the thresholds are computed automatically from the unique + values in ``preds``. + + Returns + ------- + PRCurve + A named tuple with the following: + - ``precision``: numpy.ndarray or List[numpy.ndarray]. + Precision values for each label. If ``thresholds`` is None, then + precision is a list of arrays, one for each label. Otherwise, + precision is a single array with shape + (``num_labels``, len(``thresholds``)). + - ``recall``: numpy.ndarray or List[numpy.ndarray]. + Recall values for each label. If ``thresholds`` is None, then + recall is a list of arrays, one for each label. Otherwise, + recall is a single array with shape (``num_labels``, len(``thresholds``)). + - ``thresholds``: numpy.ndarray or List[numpy.ndarray]. + If ``thresholds`` is None, then thresholds is a list of arrays, one for + each label. Otherwise, thresholds is a single array with shape + (len(``thresholds``,). + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import ( + ... multilabel_precision_recall_curve, + ... ) + >>> target = [[1, 1, 0], [0, 1, 0]] + >>> preds = [[0.1, 0.9, 0.8], [0.05, 0.95, 0.35]] + >>> precision, recall, thresholds = multilabel_precision_recall_curve( + ... target, preds, num_labels=3, thresholds=5 + ... ) + >>> precision + array([[0.5, 0. , 0. , 0. , 0. , 1. ], + [1. , 1. , 1. , 1. , 0. , 1. ], + [0. , 0. , 0. , 0. , 0. , 1. ]]) + >>> recall + array([[1., 0., 0., 0., 0., 0.], + [1., 1., 1., 1., 0., 0.], + [0., 0., 0., 0., 0., 0.]]) + >>> thresholds + array([0. , 0.25, 0.5 , 0.75, 1. ]) + + """ + _check_thresholds(thresholds) + + target, preds = _multilabel_precision_recall_curve_format( + target, + preds, + num_labels=num_labels, + ) + + thresholds = _format_thresholds(thresholds) + + state = _multilabel_precision_recall_curve_update( + target, + preds, + num_labels=num_labels, + thresholds=thresholds, + ) + + precision_, recall_, thresholds_ = _multilabel_precision_recall_curve_compute( + state, + thresholds, # type: ignore + num_labels, + ) + return PRCurve(precision_, recall_, thresholds_)
+ + + +
+[docs] +def precision_recall_curve( + target: npt.ArrayLike, + preds: npt.ArrayLike, + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + pos_label: int = 1, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, +) -> PRCurve: + """Compute the precision-recall curve for different tasks/input types. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated probabilities or non-thresholded output of decision function. + task : Literal["binary", "multiclass", "multilabel"] + The task for which the precision-recall curve is computed. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for computing the precision and recall scores. If int, + then the number of thresholds to use. If list or array, then the + thresholds to use. If None, then the thresholds are automatically + determined by the sunique values in ``preds`` + pos_label : int, default=1 + The label of the positive class. + num_classes : int, optional + The number of classes in the dataset. Required if ``task`` is ``"multiclass"``. + num_labels : int, optional + The number of labels in the dataset. Required if ``task`` is ``"multilabel"``. + + Returns + ------- + PRCurve + A named tuple with the following: + - ``precision``: numpy.ndarray or List[numpy.ndarray]. + The precision scores where ``precision[i]`` is the precision score for + ``scores >= thresholds[i]``. If ``task`` is 'multiclass' or 'multilabel', + then ``precision`` is a list of numpy arrays, where ``precision[i]`` is the + precision scores for class or label ``i``. + - ``recall``: numpy.ndarray or List[numpy.ndarray]. + The recall scores where ``recall[i]`` is the recall score for ``scores >= + thresholds[i]``. If ``task`` is 'multiclass' or 'multilaabel', then + ``recall`` is a list of numpy arrays, where ``recall[i]`` is the recall + scores for class or label ``i``. + - ``thresholds``: numpy.ndarray or List[numpy.ndarray]. + Thresholds used for computing the precision and recall scores. + + Raises + ------ + ValueError + If ``task`` is not one of 'binary', 'multiclass' or 'multilabel'. + AssertionError + If ``task`` is ``multiclass`` and ``num_classes`` is not provided. + AssertionError + If ``task`` is ``multilabel`` and ``num_labels`` is not provided. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics.functional import precision_recall_curve + >>> target = [0, 0, 1, 1] + >>> preds = [0.1, 0.4, 0.35, 0.8] + >>> precision, recall, thresholds = precision_recall_curve(target, preds, "binary") + >>> precision + array([0.5 , 0.66666667, 0.5 , 1. , 1. ]) + >>> recall + array([1. , 1. , 0.5, 0.5, 0. ]) + >>> thresholds + array([0.1 , 0.35, 0.4 , 0.8 ]) + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics.functional import precision_recall_curve + >>> target = [0, 1, 2, 2] + >>> preds = [[0.1, 0.6, 0.3], [0.05, 0.95, 0], [0.5, 0.3, 0.2], [0.3, 0.4, 0.3]] + >>> precision, recall, thresholds = precision_recall_curve( + ... target, preds, task="multiclass", num_classes=3 + ... ) + >>> [prec.tolist() for prec in precision] + [[0.25, 0.3333333333333333, 0.0, 0.0, 1.0], [0.25, 0.3333333333333333, 0.5, 1.0, 1.0], [0.5, 0.6666666666666666, 0.5, 1.0]] + >>> [rec.tolist() for rec in recall] + [[1.0, 1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.5, 0.0]] + >>> thresholds + [array([0.05, 0.1 , 0.3 , 0.5 ]), array([0.3 , 0.4 , 0.6 , 0.95]), array([0. , 0.2, 0.3])] + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics.functional import precision_recall_curve + >>> target = [[1, 1, 0], [0, 1, 0]] + >>> preds = [[0.1, 0.9, 0.8], [0.05, 0.95, 0.35]] + >>> precision, recall, thresholds = precision_recall_curve( + ... target, preds, "multilabel", num_labels=3 + ... ) + >>> precision + [array([0.5, 1. , 1. ]), array([1., 1., 1.]), array([0., 0., 1.])] + >>> recall + [array([1., 1., 0.]), array([1. , 0.5, 0. ]), array([0., 0., 0.])] + >>> thresholds + [array([0.05, 0.1 ]), array([0.9 , 0.95]), array([0.35, 0.8 ])] + + """ # noqa: W505 + if task == "binary": + return binary_precision_recall_curve( + target, + preds, + thresholds=thresholds, + pos_label=pos_label, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be a positive integer." + + return multiclass_precision_recall_curve( + target, + preds, + num_classes=num_classes, + thresholds=thresholds, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be a positive integer." + + return multilabel_precision_recall_curve( + target, + preds, + num_labels=num_labels, + thresholds=thresholds, + ) + + raise ValueError( + "Expected argument `task` to be either 'binary', 'multiclass' or " + f"'multilabel', but got {task}", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/functional/roc.html b/api/_modules/cyclops/evaluate/metrics/functional/roc.html new file mode 100644 index 000000000..f99711902 --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/functional/roc.html @@ -0,0 +1,1006 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.functional.roc - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.functional.roc

+"""Functions for computing the receiver operating characteristic (ROC) curve."""
+
+import logging
+from typing import Any, List, Literal, NamedTuple, Optional, Tuple, Union
+
+import numpy as np
+import numpy.typing as npt
+from sklearn.metrics._ranking import _binary_clf_curve
+
+from cyclops.evaluate.metrics.functional.precision_recall_curve import (
+    _binary_precision_recall_curve_format,
+    _binary_precision_recall_curve_update,
+    _format_thresholds,
+    _multiclass_precision_recall_curve_format,
+    _multiclass_precision_recall_curve_update,
+    _multilabel_precision_recall_curve_format,
+    _multilabel_precision_recall_curve_update,
+)
+from cyclops.evaluate.metrics.utils import _check_thresholds
+from cyclops.utils.log import setup_logging
+
+
+LOGGER = logging.getLogger(__name__)
+setup_logging(print_level="WARN", logger=LOGGER)
+
+
+class ROCCurve(NamedTuple):
+    """Named tuple to store ROC curve (FPR, TPR and thresholds)."""
+
+    fpr: Union[npt.NDArray[np.float_], List[npt.NDArray[np.float_]]]
+    tpr: Union[npt.NDArray[np.float_], List[npt.NDArray[np.float_]]]
+    thresholds: Union[npt.NDArray[np.float_], List[npt.NDArray[np.float_]]]
+
+
+def _roc_compute_from_confmat(
+    confmat: npt.NDArray[Any],
+    thresholds: npt.NDArray[np.float_],
+) -> Tuple[npt.NDArray[np.float_], npt.NDArray[np.float_], npt.NDArray[np.float_]]:
+    """Compute the ROC curve from a multi-threshold confusion matrix.
+
+    Parameters
+    ----------
+    confmat : numpy.ndarray
+        A multi-threshold confusion matrix of size (num_thresholds, 2, 2) or
+        (num_thresholds, num_classes, 2, 2).
+    thresholds : numpy.ndarray of floats, default=None
+        Thresholds used to binarize the predicted probabilities.
+
+    Returns
+    -------
+    fpr : numpy.ndarray
+        False positive rate.
+    tpr : numpy.ndarray
+        True positive rate.
+    thresholds : numpy.ndarray
+        Thresholds used to compute fpr and tpr.
+
+    """
+    tps = confmat[..., 1, 1]
+    fns = confmat[..., 1, 0]
+    fps = confmat[..., 0, 1]
+    tns = confmat[..., 0, 0]
+
+    tpr = np.divide(
+        tps,
+        tps + fns,
+        out=np.zeros_like(tps, dtype=np.float64),
+        where=(tps + fns) != 0,
+    )
+    fpr = np.divide(
+        fps,
+        fps + tns,
+        out=np.zeros_like(fps, dtype=np.float64),
+        where=(fps + tns) != 0,
+    )
+
+    # reverse order of arrays
+    tpr = np.flip(tpr, axis=0)
+    fpr = np.flip(fpr, axis=0)
+    thresholds = np.flip(thresholds, axis=0)
+
+    return fpr, tpr, thresholds
+
+
+def _binary_roc_compute(
+    state: Union[Tuple[npt.NDArray[Any], npt.NDArray[Any]], npt.NDArray[Any]],
+    thresholds: Optional[npt.NDArray[np.float_]] = None,
+    pos_label: int = 1,
+) -> Tuple[npt.NDArray[np.float_], npt.NDArray[np.float_], npt.NDArray[np.float_]]:
+    """Compute the ROC curve for binary classification.
+
+    Parameters
+    ----------
+    state : tuple of numpy.ndarray or numpy.ndarray
+        If ``thresholds`` is not None, ``state`` is a multi-threshold confusion
+        matrix. If ``thresholds`` is None, ``state`` is a tuple of (target, preds).
+        probabilities.
+    thresholds : numpy.ndarray, default=None
+        Thresholds used to binarize the predicted probabilities. If None,
+        the unique values of the predicted probabilities are used as
+        thresholds.
+    pos_label : int, optional
+        The label of the positive class.
+
+    Returns
+    -------
+    fpr : numpy.ndarray
+        False positive rate.
+    tpr : numpy.ndarray
+        True positive rate.
+    thresholds : numpy.ndarray
+        Thresholds used to compute fpr and tpr.
+
+    """
+    if isinstance(state, np.ndarray) and thresholds is not None:
+        fpr, tpr, thresholds = _roc_compute_from_confmat(state, thresholds)
+    else:
+        fps, tps, thresholds = _binary_clf_curve(
+            y_true=state[0],
+            y_score=state[1],
+            pos_label=pos_label,
+            sample_weight=None,
+        )
+
+        # start the curve at (0, 0)
+        fps = np.hstack((0, fps))
+        tps = np.hstack((0, tps))
+        thresholds = np.hstack((1, thresholds))  # type: ignore[arg-type]
+
+        if fps[-1] <= 0:
+            LOGGER.warning(
+                "No negative samples in `target`, false positive value should be "
+                "meaningless. Returning zero array in false positive score",
+            )
+            fpr = np.zeros_like(thresholds, dtype=np.float64)
+        else:
+            fpr = fps / fps[-1]
+
+        if tps[-1] <= 0:
+            LOGGER.warning(
+                "No positive samples in `target`, true positive value should be"
+                " meaningless. Returning zero array in true positive score",
+            )
+            tpr = np.zeros_like(thresholds, dtype=np.float64)
+        else:
+            tpr = tps / tps[-1]
+
+    return fpr, tpr, thresholds
+
+
+
+[docs] +def binary_roc_curve( + target: npt.ArrayLike, + preds: npt.ArrayLike, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + pos_label: int = 1, +) -> ROCCurve: + """Compute the ROC curve for binary classification tasks. + + Parameters + ---------- + target : ArrayLike + Ground truth (correct) target values. + preds : ArrayLike + Estimated probabilities or decision function. If ``preds`` is not in + the range [0, 1], a sigmoid function is applied to transform it to + the range [0, 1]. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for computing the precision and recall scores. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + pos_label : int, optional + The label of the positive class. + + Returns + ------- + ROCCurve + A named tuple containing the false positive rate, true positive rate, + and thresholds used to compute the ROC curve. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import binary_roc_curve + >>> target = [1, 0, 1, 0] + >>> preds = [0.9, 0.2, 0.8, 0.3] + >>> fpr, tpr, thresholds = binary_roc_curve(target, preds, thresholds=5) + >>> fpr + array([0. , 0. , 0. , 0.5, 1. ]) + >>> tpr + array([0., 1., 1., 1., 1.]) + >>> thresholds + array([1. , 0.75, 0.5 , 0.25, 0. ]) + + """ + _check_thresholds(thresholds) + + target, preds = _binary_precision_recall_curve_format( + target, + preds, + pos_label=pos_label, + ) + thresholds = _format_thresholds(thresholds) + + state = _binary_precision_recall_curve_update(target, preds, thresholds) + fpr, tpr, thresholds = _binary_roc_compute(state, thresholds, pos_label) + + return ROCCurve(fpr, tpr, thresholds)
+ + + +def _multiclass_roc_compute( + state: Union[ + Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]], + npt.NDArray[np.int_], + ], + num_classes: int, + thresholds: Optional[npt.NDArray[np.float_]] = None, +) -> Union[ + Tuple[npt.NDArray[np.float_], npt.NDArray[np.float_], npt.NDArray[np.float_]], + Tuple[ + List[npt.NDArray[np.float_]], + List[npt.NDArray[np.float_]], + List[npt.NDArray[np.float_]], + ], +]: + """Compute the ROC curve for multiclass classification tasks. + + Parameters + ---------- + state : numpy.ndarray or tuple of numpy.ndarray + If ``thresholds`` is not None, ``state`` is a multi-threshold confusion + matrix. If ``thresholds`` is None, ``state`` is a tuple of (target, preds). + num_classes : int + Number of classes. + thresholds : numpy.ndarray, default=None + Thresholds used for binarizing the predicted probabilities. If not + None, must be a 1D numpy array of floats in the [0, 1] range and + monotonically increasing. + + Returns + ------- + fpr : numpy.ndarray or list of numpy.ndarray + False positive rate. If ``threshold`` is not None, ``fpr`` is a 1d numpy + array. Otherwise, ``fpr`` is a list of 1d numpy arrays, one for each + class. + tpr : numpy.ndarray or list of numpy.ndarray + True positive rate. If ``threshold`` is not None, ``tpr`` is a 1d numpy + array. Otherwise, ``tpr`` is a list of 1d numpy arrays, one for each class. + thresholds : numpy.ndarray or list of numpy.ndarray + Thresholds used to compute fpr and tpr. ``threshold`` is not None, + thresholds is a 1d numpy array. Otherwise, thresholds is a list of + 1d numpy arrays, one for each class. + + """ + if isinstance(state, np.ndarray) and thresholds is not None: + fpr, tpr, thresholds = _roc_compute_from_confmat(state, thresholds) + + tpr = tpr.T + fpr = fpr.T + + return fpr, tpr, thresholds + + fpr_list, tpr_list, thresholds_list = [], [], [] + for i in range(num_classes): + res = _binary_roc_compute( + (state[0], state[1][:, i]), + thresholds=None, + pos_label=i, + ) + fpr_list.append(res[0]) + tpr_list.append(res[1]) + thresholds_list.append(res[2]) + + return fpr_list, tpr_list, thresholds_list + + +
+[docs] +def multiclass_roc_curve( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, +) -> ROCCurve: + """Compute the ROC curve for multiclass classification tasks. + + Parameters + ---------- + target : ArrayLike + Ground truth (correct) target values. + preds : ArrayLike + Estimated probabilities or decision function. If ``preds`` is not in + the range [0, 1], a softmax function is applied to transform it to + the range [0, 1]. + num_classes : int + Number of classes. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for binarizing the predicted probabilities. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + + Returns + ------- + ROCCurve + A named tuple containing the false positive rate, true positive rate, + and thresholds used to compute the ROC curve. If ``threshold`` is not None, + ``fpr``, ``tpr`` and ``thresholds`` are 1d numpy arrays, else they are lists + of 1d numpy arrays, one for each label. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multiclass_roc_curve + >>> target = [1, 0, 2, 0] + >>> preds = [ + ... [0.9, 0.05, 0.05], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.05, 0.9], + ... [0.9, 0.05, 0.05], + ... ] + >>> fpr, tpr, thresholds = multiclass_roc_curve( + ... target, preds, num_classes=3, thresholds=5 + ... ) + >>> fpr + array([[0. , 0.5 , 0.5 , 0.5 , 1. ], + [0. , 0.33333333, 0.33333333, 0.33333333, 1. ], + [0. , 0. , 0. , 0. , 1. ]]) + >>> tpr + array([[0. , 0.5, 0.5, 0.5, 1. ], + [0. , 0. , 0. , 0. , 1. ], + [0. , 1. , 1. , 1. , 1. ]]) + >>> thresholds + array([1. , 0.75, 0.5 , 0.25, 0. ]) + + """ + _check_thresholds(thresholds) + target, preds = _multiclass_precision_recall_curve_format( + target, + preds, + num_classes=num_classes, + ) + thresholds = _format_thresholds(thresholds) + + state = _multiclass_precision_recall_curve_update( + target, + preds, + num_classes=num_classes, + thresholds=thresholds, + ) + fpr_, tpr_, thresholds_ = _multiclass_roc_compute(state, num_classes, thresholds) + + return ROCCurve(fpr=fpr_, tpr=tpr_, thresholds=thresholds_)
+ + + +def _multilabel_roc_compute( + state: Union[ + Tuple[npt.NDArray[np.int_], npt.NDArray[np.float_]], + npt.NDArray[np.int_], + ], + num_labels: int, + thresholds: Optional[npt.NDArray[np.float_]] = None, +) -> Union[ + Tuple[npt.NDArray[np.float_], npt.NDArray[np.float_], npt.NDArray[np.float_]], + Tuple[ + List[npt.NDArray[np.float_]], + List[npt.NDArray[np.float_]], + List[npt.NDArray[np.float_]], + ], +]: + """Compute the ROC curve for multilabel classification tasks. + + Parameters + ---------- + state : numpy.ndarray or tuple of numpy.ndarray + If ``thresholds`` is not None, ``state`` is a multi-threshold confusion + matrix. Otherwise, ``state`` is a tuple of (target, preds). + num_labels : int + Number of labels. + thresholds : numpy.ndarray, default=None + Thresholds used for binarizing the predicted probabilities. If not + None, must be a 1D numpy array of floats in the [0, 1] range and + monotonically increasing. + + Returns + ------- + fpr : numpy.ndarray or list of numpy.ndarray + False positive rate. If ``threshold`` is not None, ``fpr`` is a 1d numpy + array. Otherwise, ``fpr`` is a list of 1d numpy arrays, one for each + label. + tpr : numpy.ndarray or list of numpy.ndarray + True positive rate. If ``threshold`` is not None, ``tpr`` is a 1d numpy + array. Otherwise, ``tpr`` is a list of 1d numpy arrays, one for each label. + thresholds : numpy.ndarray or list of numpy.ndarray + Thresholds used to compute fpr and tpr. ``threshold`` is not None, + thresholds is a 1d numpy array. Otherwise, thresholds is a list of + 1d numpy arrays, one for each label. + + """ + if isinstance(state, np.ndarray) and thresholds is not None: + fpr, tpr, thresholds = _roc_compute_from_confmat(state, thresholds) + + tpr = tpr.T + fpr = fpr.T + + return fpr, tpr, thresholds + + fpr_list, tpr_list, thresholds_list = [], [], [] + for i in range(num_labels): + res = _binary_roc_compute( + (state[0][:, i], state[1][:, i]), + thresholds=None, + pos_label=i, + ) + fpr_list.append(res[0]) + tpr_list.append(res[1]) + thresholds_list.append(res[2]) + + return fpr_list, tpr_list, thresholds_list + + +
+[docs] +def multilabel_roc_curve( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, +) -> ROCCurve: + """Compute the ROC curve for multilabel classification tasks. + + Parameters + ---------- + target : ArrayLike + Ground truth (correct) target values. + preds : ArrayLike + Estimated probabilities or decision function. If ``preds`` is not in + the range [0, 1], a sigmoid function is applied to transform it to + the range [0, 1]. + num_labels : int + The number of labels in the dataset. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for binarizing the values of ``preds``. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + + Returns + ------- + ROCCurve + A named tuple containing the false positive rate, true positive rate, + and thresholds used to compute the ROC curve. If ``threshold`` is not None, + ``fpr``, ``tpr`` and ``thresholds`` are 1d numpy arrays, else they are lists + of 1d numpy arrays, one for each label. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multilabel_roc_curve + >>> target = [[0, 1, 0], [0, 1, 1], [1, 0, 1]] + >>> preds = [[0.1, 0.9, 0.8], [0.05, 0.1, 0.9], [0.8, 0.2, 0.3]] + >>> fpr, tpr, thresholds = multilabel_roc_curve( + ... target, preds, num_labels=3, thresholds=5 + ... ) + >>> fpr + array([[0., 0., 0., 0., 1.], + [0., 0., 0., 0., 1.], + [0., 1., 1., 1., 1.]]) + >>> tpr + array([[0. , 1. , 1. , 1. , 1. ], + [0. , 0.5, 0.5, 0.5, 1. ], + [0. , 0.5, 0.5, 1. , 1. ]]) + >>> thresholds + array([1. , 0.75, 0.5 , 0.25, 0. ]) + + """ + _check_thresholds(thresholds) + target, preds = _multilabel_precision_recall_curve_format( + target, + preds, + num_labels=num_labels, + ) + thresholds = _format_thresholds(thresholds) + + state = _multilabel_precision_recall_curve_update( + target, + preds, + num_labels=num_labels, + thresholds=thresholds, + ) + fpr_, tpr_, thresholds_ = _multilabel_roc_compute(state, num_labels, thresholds) + + return ROCCurve(fpr=fpr_, tpr=tpr_, thresholds=thresholds_)
+ + + +
+[docs] +def roc_curve( + target: npt.ArrayLike, + preds: npt.ArrayLike, + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + pos_label: int = 1, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, +) -> ROCCurve: + """Compute the ROC curve for different tasks/input types. + + Parameters + ---------- + target : ArrayLike + Ground truth (correct) target values. + preds : ArrayLike + Estimated probabilities or non-thresholded output of decision function. + If ``task`` is ``multiclass`` and the values in ``preds`` are not + probabilities, they will be converted to probabilities using the softmax + function. If ``task`` is ``multilabel`` and the values in ``preds`` are + not probabilities, they will be converted to probabilities using the + sigmoid function. + task : Literal["binary", "multiclass", "multilabel"] + The type of task for the input data. One of 'binary', 'multiclass' + or 'multilabel'. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for computing the ROC curve. Can be one of: + + - None: use the unique values of ``preds`` as thresholds + - int: generate ``thresholds`` number of evenly spaced values between + 0 and 1 as thresholds. + - list of floats: use the values in the list as thresholds. The list + of values should be monotonically increasing. The list will be + converted into a numpy array. + - numpy.ndarray of floats: use the values in the array as thresholds. + The array should be 1d and monotonically increasing. + pos_label : int, default=1 + The label of the positive class. + num_classes : int, optional + The number of classes in the dataset. Required for multiclass tasks. + num_labels : int, optional + The number of labels in the dataset. Required for multilabel tasks. + + Returns + ------- + ROCCurve + A named tuple containing the false positive rate, true positive rate, + and thresholds used to compute the ROC curve. If ``threshold`` is not None, + ``fpr``, ``tpr`` and ``thresholds`` are 1d numpy arrays, else they are lists + of 1d numpy arrays, one for each label. + + Raises + ------ + ValueError + If ``task`` is not one of 'binary', 'multiclass' or 'multilabel'. + AssertionError + If ``task`` is ``multiclass`` and ``num_classes`` is not provided. + AssertionError + If ``task`` is ``multilabel`` and ``num_labels`` is not provided. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics.functional import roc_curve + >>> target = [0, 0, 1, 1] + >>> preds = [0.1, 0.4, 0.35, 0.8] + >>> fpr, tpr, thresholds = roc_curve(target, preds, task="binary") + >>> fpr + array([0. , 0. , 0.5, 0.5, 1. ]) + >>> tpr + array([0. , 0.5, 0.5, 1. , 1. ]) + >>> thresholds + array([1. , 0.8 , 0.4 , 0.35, 0.1 ]) + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics.functional import roc_curve + >>> target = [0, 1, 2] + >>> preds = [[0.9, 0.05, 0.05], [0.05, 0.89, 0.06], [0.02, 0.03, 0.95]] + >>> fpr, tpr, thresholds = roc_curve( + ... target, preds, task="multiclass", num_classes=3 + ... ) + >>> fpr + [array([0. , 0. , 0.5, 1. ]), array([0. , 0. , 0.5, 1. ]), array([0. , 0. , 0.5, 1. ])] + >>> tpr + [array([0., 1., 1., 1.]), array([0., 1., 1., 1.]), array([0., 1., 1., 1.])] + >>> thresholds + [array([1. , 0.9 , 0.05, 0.02]), array([1. , 0.89, 0.05, 0.03]), array([1. , 0.95, 0.06, 0.05])] + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics.functional import roc_curve + >>> target = [[1, 1], [0, 1], [1, 0]] + >>> preds = [[0.9, 0.8], [0.2, 0.7], [0.8, 0.3]] + >>> fpr, tpr, thresholds = roc_curve(target, preds, task="multilabel", num_labels=2) + >>> fpr + [array([0. , 0.5, 1. , 1. ]), array([0., 0., 0., 1.])] + >>> tpr + [array([0., 0., 0., 1.]), array([0. , 0.5, 1. , 1. ])] + >>> thresholds + [array([1. , 0.9, 0.8, 0.2]), array([1. , 0.8, 0.7, 0.3])] + + """ # noqa: W505 + _check_thresholds(thresholds) + if task == "binary": + return binary_roc_curve(target, preds, thresholds, pos_label=pos_label) + if task == "multiclass": + assert isinstance( + num_classes, + int, + ), "Number of classes must be a positive integer." + return multiclass_roc_curve(target, preds, num_classes, thresholds) + if task == "multilabel": + assert isinstance( + num_labels, + int, + ), "Number of labels must be a positive integer." + return multilabel_roc_curve(target, preds, num_labels, thresholds) + + raise ValueError( + "Expected argument `task` to be either 'binary', 'multiclass' or " + f"'multilabel', but got {task}", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/functional/sensitivity.html b/api/_modules/cyclops/evaluate/metrics/functional/sensitivity.html new file mode 100644 index 000000000..f701a91ae --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/functional/sensitivity.html @@ -0,0 +1,715 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.functional.sensitivity - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.functional.sensitivity

+"""Functions for computing sensitivity scores on different input types."""
+
+from typing import Literal, Optional, Union
+
+import numpy as np
+import numpy.typing as npt
+
+from cyclops.evaluate.metrics.functional.precision_recall import (
+    binary_recall,
+    multiclass_recall,
+    multilabel_recall,
+    recall,
+)
+
+
+
+[docs] +def binary_sensitivity( + target: npt.ArrayLike, + preds: npt.ArrayLike, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", +) -> float: + """Compute sensitvity score for binary classification problems. + + Sensitivity is the recall of the positive class in a binary classification + problem. + + Parameters + ---------- + target : ArrayLike + Ground truth (correct) target values. + preds : ArrayLike + Predictions as returned by a classifier. + pos_label : int, default=1 + Label of the positive class. + threshold : float, default=0.5 + Threshold for deciding the positive class. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + float + sensitivity score. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import binary_sensitivity + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 0] + >>> binary_sensitivity(target, preds) + 0.5 + + """ + return binary_recall( + target, + preds, + pos_label=pos_label, + threshold=threshold, + zero_division=zero_division, + )
+ + + +
+[docs] +def multiclass_sensitivity( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute sensitivity score for multiclass classification problems. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + num_classes : int + Total number of classes in the dataset. + top_k : Optional[int] + If given, and predictions are probabilities/logits, the sensitivity will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + Average to apply. If None, return scores for each class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally by counting the total true + positives and false negatives. + - ``macro``: Calculate metrics for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label, and find their + average weighted by support (the number of true instances for each label). + This alters "macro" to account for label imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there are no true positives or true negatives. + If set to ``warn``, this acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + Sensitivity score. If ``average`` is None, return a numpy.ndarray of + sensitivity scores for each class. + + Raises + ------ + ValueError + If ``average`` is not one of ``micro``, ``macro``, ``weighted`` + or ``None``. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multiclass_sensitivity + >>> target = [0, 1, 2, 0, 1, 2] + >>> preds = [ + ... [0.4, 0.1, 0.5], + ... [0.1, 0.8, 0.1], + ... [0.2, 0.2, 0.6], + ... [0.5, 0.3, 0.2], + ... [0.2, 0.5, 0.3], + ... [0.2, 0.2, 0.6], + ... ] + >>> multiclass_sensitivity(target, preds, num_classes=3, average="macro") + 0.8333333333333334 + + """ + return multiclass_recall( + target, + preds, + num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def multilabel_sensitivity( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute sensitivity score for multilabel classification tasks. + + The input is expected to be an array-like of shape (N, L), where N is the + number of samples and L is the number of labels. The input is expected to + be a binary array-like, where 1 indicates the presence of a label and 0 + indicates its absence. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + num_labels : int + Number of labels in the dataset. + threshold : float, default=0.5 + Threshold for deciding the positive class. + top_k : int, optional, default=None + If given, and predictions are probabilities/logits, the top k scores + will be converted to 1s and the rest will be converted to 0s. Otherwise, + the threshold will be used to convert scores to 0s and 1s. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the sensitivity score for each class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives and false negatives. + - ``macro``: Calculate metric for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each label, and find their + average weighted by the support (the number of true instances + for each label). This alters "macro" to account for label imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + Sensitivity score. If ``average`` is None, return a numpy.ndarray of + sensitivity scores for each label. + + Raises + ------ + ValueError + If ``average`` is not one of ``micro``, ``macro``, ``weighted`` + or ``None``. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multilabel_sensitivity + >>> target = [[1, 0, 1], [0, 0, 0], [0, 1, 1], [1, 1, 1]] + >>> preds = [ + ... [0.75, 0.05, 0.35], + ... [0.45, 0.75, 0.05], + ... [0.05, 0.55, 0.75], + ... [0.05, 0.65, 0.05], + ... ] + >>> multilabel_sensitivity(target, preds, num_labels=3) + array([0.5 , 1. , 0.33333333]) + + """ + return multilabel_recall( + target, + preds, + num_labels, + threshold=threshold, + top_k=top_k, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def sensitivity( + target: npt.ArrayLike, + preds: npt.ArrayLike, + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute sensitivity score for different classification tasks. + + Sensitivity is the ratio tp / (tp + fn) where tp is the number of true positives + and fn the number of false negatives. The sensitivity is intuitively the ability + of the classifier to find all the positive samples. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Predictions as returned by a classifier. + task : Literal["binary", "multiclass", "multilabel"] + Task type. + pos_label : int + Label of the positive class. Only used for binary classification. + num_classes : Optional[int] + Number of classes. Only used for multiclass classification. + threshold : float, default=0.5 + Threshold for positive class predictions. + top_k : Optional[int] + Number of highest probability or logits predictions to consider when + computing multiclass or multilabel metrics. Default is None. + num_labels : Optional[int] + Number of labels. Only used for multilabel classification. + average : Literal["micro", "macro", "weighted", None], default=None + Average to apply. If None, return scores for each class/label. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally by counting the total true + positives and false negatives. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take class/label imbalance into account. + - ``weighted``: Calculate metrics for each class/label, and find + their average weighted by support (the number of true instances for each + class/label). This alters ``macro`` to account for class/label imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there are no true positives or true negatives. + If set to ``warn``, this acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + Sensitivity score. If ``average`` is not None or ``task`` is ``binary``, + return a float. Otherwise, return a numpy.ndarray of sensitivity scores + for each class/label. + + Raises + ------ + ValueError + If ``task`` is not one of ``binary``, ``multiclass`` or ``multilabel``. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics.functional import sensitivity + >>> target = [0, 1, 1, 0, 1] + >>> preds = [0.4, 0.2, 0.0, 0.6, 0.9] + >>> sensitivity(target, preds, task="binary") + 0.3333333333333333 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics.functional import sensitivity + >>> target = [1, 1, 2, 0, 2, 2] + >>> preds = [1, 2, 2, 0, 2, 0] + >>> sensitivity(target, preds, task="multiclass", num_classes=3) + array([1. , 0.5 , 0.66666667]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics.functional import sensitivity + >>> target = [[1, 0, 1], [0, 1, 0]] + >>> preds = [[0.4, 0.2, 0.0], [0.6, 0.9, 0.1]] + >>> sensitivity(target, preds, task="multilabel", num_labels=3) + array([0., 1., 0.]) + + """ + return recall( + target, + preds, + task, + pos_label=pos_label, + num_classes=num_classes, + threshold=threshold, + top_k=top_k, + num_labels=num_labels, + average=average, + zero_division=zero_division, + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/functional/specificity.html b/api/_modules/cyclops/evaluate/metrics/functional/specificity.html new file mode 100644 index 000000000..f989d3b0d --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/functional/specificity.html @@ -0,0 +1,859 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.functional.specificity - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.functional.specificity

+"""Functions to compute the specificity metric."""
+
+from typing import Literal, Optional, Union, cast
+
+import numpy as np
+import numpy.typing as npt
+from sklearn.metrics._classification import _prf_divide
+
+from cyclops.evaluate.metrics.functional.stat_scores import (
+    _binary_stat_scores_args_check,
+    _binary_stat_scores_format,
+    _binary_stat_scores_update,
+    _multiclass_stat_scores_format,
+    _multiclass_stat_scores_update,
+    _multilabel_stat_scores_format,
+    _multilabel_stat_scores_update,
+)
+from cyclops.evaluate.metrics.utils import (
+    _check_average_arg,
+    _get_value_if_singleton_array,
+)
+
+
+def _specificity_reduce(
+    tp: Union[npt.NDArray[np.int_], np.int_],
+    fp: Union[npt.NDArray[np.int_], np.int_],
+    tn: Union[npt.NDArray[np.int_], np.int_],
+    fn: Union[npt.NDArray[np.int_], np.int_],
+    average: Literal["micro", "macro", "weighted", None],
+    zero_division: Literal["warn", 0, 1] = "warn",
+) -> Union[float, npt.NDArray[np.float_]]:
+    """Reduce specificity.
+
+    Parameters
+    ----------
+    tp : numpy.ndarray or int
+        True positives.
+    fp : numpy.ndarray or int
+        False positives.
+    tn : numpy.ndarray or int
+        True negatives.
+    fn : numpy.ndarray or int
+        False negatives.
+    average : Literal["micro", "macro", "weighted", None], default=None
+        If None, return the specificity for each class, otherwise return the
+        average specificity. Average options are:
+
+        - ``micro``: Calculate metrics globally by counting the total
+            true positives, false negatives, false positives and true negatives.
+        - ``macro``: Calculate metrics for each label, and find their
+            unweighted mean. This does not take label imbalance into account.
+        - ``weighted``: Calculate metrics for each label, and find their
+            average, weighted by support (the number of true instances for
+            each label).
+    zero_division : Literal["warn", 0, 1], default="warn"
+        Sets the value to return when there is a zero division. If set to ``warn``,
+        this acts as 0, but warnings are also raised.
+
+    Returns
+    -------
+    specificity : float or numpy.ndarray (if average is None).
+
+    """
+    numerator = tn
+    denominator = tn + fp
+
+    if average == "micro":
+        numerator = np.array(np.sum(numerator))
+        denominator = np.array(np.sum(denominator))
+
+    score = _prf_divide(
+        np.expand_dims(numerator, axis=0) if numerator.ndim == 0 else numerator,
+        np.expand_dims(denominator, axis=0) if denominator.ndim == 0 else denominator,
+        metric="specificity",
+        modifier="score",
+        average=average,
+        warn_for=("specificity",),
+        zero_division=zero_division,
+    )
+
+    weights = tp + fn if average == "weighted" else None
+
+    if weights is not None and np.sum(weights) == 0:
+        result = np.ones_like(score, dtype=np.float64)
+        if zero_division in ["warn", 0]:
+            result = np.zeros_like(score, dtype=np.float64)
+        return result
+
+    if average is not None and score.ndim != 0 and len(score) > 1:
+        result = np.average(score, weights=weights)
+    else:
+        result = _get_value_if_singleton_array(score)  # type: ignore[assignment]
+
+    return result
+
+
+
+[docs] +def binary_specificity( + target: npt.ArrayLike, + preds: npt.ArrayLike, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", +) -> float: + """Compute specificity for binary classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated targets (predictions) as returned by a classifier. + pos_label : int, default=1 + The label to use for the positive class. + threshold : float, default=0.5 + The threshold to use for converting the predictions to binary + values. Logits will be converted to probabilities using the sigmoid + function. + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Returns + ------- + float + The specificity score. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import binary_specificity + >>> target = [0, 1, 1, 0, 1] + >>> preds = [0.1, 0.9, 0.8, 0.5, 0.4] + >>> binary_specificity(target, preds) + 0.5 + + """ + _binary_stat_scores_args_check(threshold=threshold, pos_label=pos_label) + + target, preds = _binary_stat_scores_format( + target, + preds, + threshold=threshold, + pos_label=pos_label, + ) + + tp, fp, tn, fn = _binary_stat_scores_update(target, preds, pos_label=pos_label) + + score = _specificity_reduce( + tp, + fp, + tn, + fn, + average=None, + zero_division=zero_division, + ) + + return cast(float, score)
+ + + +
+[docs] +def multiclass_specificity( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute specificity for multiclass classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated targets (predictions) as returned by a classifier. + num_classes : int + The number of classes in the dataset. + top_k : int, optional + Number of highest probability or logit score predictions considered + to find the correct label. Only works when ``preds`` contain + probabilities/logits. + average : Literal["micro", "macro", "weighted", None], default=None + If None, return the specificity for each class, otherwise return the + average specificity. Average options are: + + - ``micro``: Calculate metrics globally by counting the total true + positives, false negatives, false positives and true negatives. + - ``macro``: Calculate metrics for each class, and find their unweighted + mean. This does not take class imbalance into account. + - ``weighted``: Calculate metrics for each class, and find their + average, weighted by support (the number of true instances for each + label). + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + The specificity score. If ``average`` is None, a numpy.ndarray of + shape (``num_classes``,) is returned. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multiclass_specificity + >>> target = [0, 1, 2, 0, 1, 2] + >>> preds = [ + ... [0.9, 0.05, 0.05], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.2, 0.75], + ... [0.35, 0.5, 0.15], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.05, 0.9], + ... ] + >>> multiclass_specificity(target, preds, num_classes=3) + array([1. , 0.75, 1. ]) + + """ + _check_average_arg(average) + + target, preds = _multiclass_stat_scores_format( + target, + preds, + num_classes=num_classes, + top_k=top_k, + ) + + tp, fp, tn, fn = _multiclass_stat_scores_update(target, preds, num_classes) + + return _specificity_reduce( + tp, + fp, + tn, + fn, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def multilabel_specificity( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute specificity for multilabel classification tasks. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth (correct) target values. + preds : npt.ArrayLike + Estimated targets (predictions) as returned by a classifier. + num_labels : int + The number of labels in the dataset. + threshold : float, default=0.5 + The threshold value for converting probability or logit scores to + binary. A sigmoid function is first applied to logits to convert them + to probabilities. + top_k : int, optional + Number of highest probability or logit score predictions considered + to find the correct label. Only works when ``preds`` contains + probabilities/logits. + average : Literal["micro", "macro", "weighted", None], default=None + If None, return the specificity for each class, otherwise return the + average specificity. Average options are: + + - ``micro``: Calculate metrics globally by counting the total + true positives, false negatives, false positives and true + negatives. + - ``macro``: Calculate metrics for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label, and find their + average, weighted by support (the number of true instances for + each label). + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Returns + ------- + float or numpy.ndarray + The specificity score. If ``average`` is None, a numpy.ndarray of + shape (``num_labels``,) is returned. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multilabel_specificity + >>> target = [[0, 1, 1], [1, 0, 1], [1, 1, 0], [0, 0, 1], [1, 0, 0]] + >>> preds = [ + ... [0.9, 0.05, 0.05], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.2, 0.75], + ... [0.35, 0.5, 0.15], + ... [0.05, 0.9, 0.05], + ... ] + >>> multilabel_specificity(target, preds, num_labels=3) + array([0.5, 0. , 0.5]) + + """ + _check_average_arg(average) + + target, preds = _multilabel_stat_scores_format( + target, + preds, + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + ) + + tp, fp, tn, fn = _multilabel_stat_scores_update(target, preds, num_labels) + + return _specificity_reduce( + tp, + fp, + tn, + fn, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +def specificity( + target: npt.ArrayLike, + preds: npt.ArrayLike, + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", +) -> Union[float, npt.NDArray[np.float_]]: + """Compute specificity score for different classification tasks. + + The specificity is the ratio of true negatives to the sum of true negatives and + false positives. It is also the recall of the negative class. + + Parameters + ---------- + target : ArrayLike + Ground truth (correct) target values. + preds : ArrayLike + Estimated targets as returned by a classifier. + task : Literal["binary", "multiclass", "multilabel"] + Type of classification task. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int + Number of classes for the task. Required if ``task`` is ``"multiclass"``. + threshold : float, default=0.5 + Threshold for deciding the positive class. Only used if ``task`` is + ``"binary"`` or ``"multilabel"``. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Only used if ``task`` is ``"multiclass"`` or ``"multilabel"``. + num_labels : int + Number of labels for the task. Required if ``task`` is ``"multilabel"``. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each label/class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally by counting the total true + positives, false positives, false negatives and true negatives. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take label/class imbalance into + account. + - ``weighted``: Calculate metrics for each label/class, and find + their average weighted by support (the number of true instances + for each label/class). This alters ``macro`` to account for + label/class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Returns + ------- + score : float or numpy.ndarray + The specificity score. If ``average`` is ``None`` and ``task`` is not + ``binary``, a numpy.ndarray of shape (``num_classes`` or ``num_labels``,) + is returned. + + Raises + ------ + ValueError + If ``task`` is not one of ``binary``, ``multiclass``, or ``multilabel``. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics.functional import specificity + >>> target = [0, 1, 1, 0, 1] + >>> preds = [0.9, 0.05, 0.05, 0.35, 0.05] + >>> specificity(target, preds, task="binary") + 0.5 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics.functional import specificity + >>> target = [0, 1, 2, 0, 1] + >>> preds = [ + ... [0.9, 0.05, 0.05], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.2, 0.75], + ... [0.35, 0.5, 0.15], + ... [0.05, 0.9, 0.05], + ... ] + >>> specificity(target, preds, task="multiclass", num_classes=3) + array([1. , 0.66666667, 1. ]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics.functional import specificity + >>> target = [[0, 1, 1], [1, 0, 1], [1, 1, 0], [0, 0, 1], [1, 0, 0]] + >>> preds = [ + ... [0.9, 0.05, 0.05], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.2, 0.75], + ... [0.35, 0.5, 0.15], + ... [0.05, 0.9, 0.05], + ... ] + >>> specificity(target, preds, task="multilabel", num_labels=3) + array([0.5, 0. , 0.5]) + + """ + if task == "binary": + return binary_specificity( + target, + preds, + pos_label=pos_label, + threshold=threshold, + zero_division=zero_division, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + return multiclass_specificity( + target, + preds, + num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + return multilabel_specificity( + target, + preds, + num_labels, + threshold=threshold, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + + raise ValueError( + f"Task {task} is not supported, expected one of 'binary', 'multiclass'" + " or 'multilabel'", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/functional/stat_scores.html b/api/_modules/cyclops/evaluate/metrics/functional/stat_scores.html new file mode 100644 index 000000000..7892d8453 --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/functional/stat_scores.html @@ -0,0 +1,1248 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.functional.stat_scores - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.functional.stat_scores

+"""Functions for computing stat scores for different types of inputs.
+
+The stat scores are the number of true positives, false positives, true negatives, and
+false negatives. Binary, multiclass and multilabel data are supported, including logits
+and probabilities.
+
+"""
+
+from typing import Literal, Optional, Tuple, Union
+
+import numpy as np
+import numpy.typing as npt
+import scipy as sp
+from sklearn.metrics import multilabel_confusion_matrix
+from sklearn.preprocessing import label_binarize
+
+from cyclops.evaluate.metrics.utils import (
+    check_topk,
+    common_input_checks_and_format,
+    select_topk,
+    sigmoid,
+)
+
+
+def _stat_scores_compute(
+    tp: Union[npt.NDArray[np.int_], np.int_],
+    fp: Union[npt.NDArray[np.int_], np.int_],
+    tn: Union[npt.NDArray[np.int_], np.int_],
+    fn: Union[npt.NDArray[np.int_], np.int_],
+    classwise: Optional[bool] = True,
+) -> npt.NDArray[np.int_]:
+    """Compute true positives, false positives, true negatives and false negatives.
+
+    Concatenates the results in a single array, along with the support.
+
+    Parameters
+    ----------
+    tp : numpy.ndarray or numpy.int_
+        True positives.
+    fp : numpy.ndarray or numpy.int_
+        False positives.
+    tn : numpy.ndarray or numpy.int_
+        True negatives.
+    fn : numpy.ndarray or numpy.int_
+        False negatives.
+    classwise : bool, default=True
+        If True, compute the stat scores for each class separately. Otherwise,
+        compute the stat scores for the whole array.
+
+    Returns
+    -------
+    The stat scores.
+
+    """
+    if not classwise:
+        tp = tp.sum()
+        fp = fp.sum()
+        tn = tn.sum()
+        fn = fn.sum()
+
+    if tp.ndim == 1 and tp.size == 1:  # 1D array with 1 element
+        stats = [tp, fp, tn, fn, tp + fn]
+    else:
+        stats = [
+            np.expand_dims(tp, axis=-1),
+            np.expand_dims(fp, axis=-1),
+            np.expand_dims(tn, axis=-1),
+            np.expand_dims(fn, axis=-1),
+            np.expand_dims(tp, axis=-1) + np.expand_dims(fn, axis=-1),  # support
+        ]
+
+    output: npt.NDArray[np.int_] = np.concatenate(stats, axis=-1)
+
+    return output
+
+
+def _stat_scores_from_confmat(
+    target: npt.ArrayLike,
+    preds: npt.ArrayLike,
+    labels: Optional[npt.ArrayLike] = None,
+) -> Tuple[
+    Union[npt.NDArray[np.int_], np.int_],
+    Union[npt.NDArray[np.int_], np.int_],
+    Union[npt.NDArray[np.int_], np.int_],
+    Union[npt.NDArray[np.int_], np.int_],
+]:
+    """Compute true positives, false positives, true negatives and false negatives.
+
+    Parameters
+    ----------
+    preds : numpy.ndarray
+        Predictions.
+    target : numpy.ndarray
+        Ground truth.
+    labels : numpy.ndarray, default=None
+        The set of labels to include.
+
+    Returns
+    -------
+    Tuple of true positives, false positives, true negatives and false negatives.
+
+    """
+    confmat = multilabel_confusion_matrix(
+        target,
+        preds,
+        labels=labels,
+    )  # shape: (n_classes, 2, 2)
+
+    tn = confmat[:, 0, 0]  # shape: (n_classes,)
+    fn = confmat[:, 1, 0]
+    tp = confmat[:, 1, 1]
+    fp = confmat[:, 0, 1]
+
+    return (
+        tp.astype(np.int_),
+        fp.astype(np.int_),
+        tn.astype(np.int_),
+        fn.astype(np.int_),
+    )
+
+
+def _binary_stat_scores_args_check(threshold: float, pos_label: int) -> None:
+    """Check the arguments for binary stat scores.
+
+    Parameters
+    ----------
+    threshold : float
+        Threshold for converting logits and probability predictions to binary
+        [1, 0].
+    pos_label : int
+        The positive label to report.
+
+    Returns
+    -------
+    None
+
+    Raises
+    ------
+    ValueError
+        If the threshold is not in [0, 1] or if the pos_label is not 0 or 1.
+
+    """
+    if not 0.0 <= threshold <= 1.0:
+        raise ValueError(f"Threshold must be in [0, 1], got {threshold}.")
+
+    if pos_label not in [0, 1]:
+        raise ValueError(f"Positive label must be 0 or 1, got {pos_label}.")
+
+
+def _binary_stat_scores_format(
+    target: npt.ArrayLike,
+    preds: npt.ArrayLike,
+    threshold: float,
+    pos_label: int,
+) -> Tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]:
+    """Format the input for computing binary stat scores.
+
+    Checks that ``target`` and ``preds`` are binary and have the same shape.
+    If ``preds`` is in continuous form, it is binarized using the given threshold.
+    Logits are converted to probabilities using the sigmoid function.
+
+    Parameters
+    ----------
+    target : npt.ArrayLike
+        Ground truth.
+    preds : npt.ArrayLike
+        Predictions.
+    threshold : float
+        Threshold for converting logits and probability predictions to binary
+        [1, 0].
+    pos_label : int
+        The positive label to report.
+
+    Returns
+    -------
+    Tuple[numpy.ndarray, numpy.ndarray]
+        The formatted target and preds as numpy.ndarray.
+
+    Raises
+    ------
+    ValueError
+        If the target and preds are not binary.
+
+    ValueError
+        If the target and preds have non-binary values.
+
+    """
+    target, preds, type_target, type_preds = common_input_checks_and_format(
+        target,
+        preds,
+    )
+
+    if type_target != "binary":
+        raise ValueError(f"The argument `target` must be binary, got {type_target}")
+
+    if type_preds == "continuous-multioutput":
+        assert preds.shape[-1] == 2, (
+            "The argument `preds` must either be a 1D array or a 2D array with "
+            f"exactly 2 columns, got an array with shape: {preds.shape}."
+        )
+        preds = preds[
+            ...,
+            pos_label,
+        ]  # keep only the probabilities for the positive class
+        type_preds = "continuous"
+
+    if type_preds not in ["binary", "continuous"]:
+        raise ValueError(
+            f"The arguments `preds` must be binary or continuous, got {type_preds}",
+        )
+
+    # check the number of classes
+    unique_values = np.unique(target)
+    check = any((unique_values != 0) & (unique_values != 1))
+    if check:
+        raise ValueError(
+            f"Detected the following values in `target`: {unique_values} but"
+            f" expected only the following values {[0,1]}.",
+        )
+
+    # If preds is label array, also check that it only contains [0,1] values
+    if type_preds != "continuous":
+        unique_values = np.unique(preds)
+        if any((unique_values != 0) & (unique_values != 1)):
+            raise ValueError(
+                f"Detected the following values in `preds`: {unique_values} but"
+                f" expected only [0,1] values since `preds` is a label array.",
+            )
+
+    if type_preds == "continuous":
+        if not np.all(np.logical_and(preds >= 0.0, preds <= 1.0)):
+            preds = sigmoid(preds)  # convert logits to probabilities
+
+        preds = preds >= threshold  # binarize the predictions
+
+    return target.astype(np.int_), preds.astype(np.int_)
+
+
+def _binary_stat_scores_update(
+    target: npt.ArrayLike,
+    preds: npt.ArrayLike,
+    pos_label: int = 1,
+) -> Tuple[
+    Union[npt.NDArray[np.int_], np.int_],
+    Union[npt.NDArray[np.int_], np.int_],
+    Union[npt.NDArray[np.int_], np.int_],
+    Union[npt.NDArray[np.int_], np.int_],
+]:
+    """Compute the statistics for binary inputs.
+
+    Parameters
+    ----------
+    target : npt.ArrayLike
+        Ground truth.
+    preds : npt.ArrayLike
+        Predictions.
+    pos_label : int, default=1
+        The positive label to report. Can be either 0, 1.
+
+    Returns
+    -------
+    Tuple[Union[numpy.ndarray, numpy.int_], Union[numpy.ndarray, numpy.int_],
+    Union[numpy.ndarray, numpy.int_], Union[numpy.ndarray, numpy.int_]]
+        The true positives, false positives, true negatives and false negatives.
+
+    Raises
+    ------
+    ValueError
+        If the target and preds are not numeric.
+
+    """
+    return _stat_scores_from_confmat(target, preds, labels=[pos_label])
+
+
+
+[docs] +def binary_stat_scores( + target: npt.ArrayLike, + preds: npt.ArrayLike, + pos_label: int = 1, + threshold: float = 0.5, +) -> npt.NDArray[np.int_]: + """Compute the stat scores for binary inputs. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth. + preds : npt.ArrayLike + Predictions. + pos_label : int, default=1 + The label to use for the positive class. + threshold : float, default=0.5 + The threshold to use for converting the predictions to binary + values. Logits will be converted to probabilities using the sigmoid + function. + + Returns + ------- + numpy.ndarray + The true positives, false positives, true negatives and false negatives + and support in that order. + + Raises + ------ + ValueError + If the threshold is not in [0, 1] or if the pos_label is not 0 or 1. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import binary_stat_scores + >>> target = [0, 1, 1, 0] + >>> preds = [0, 1, 0, 0] + >>> binary_stat_scores(target, preds) + array([1, 0, 2, 1, 2]) + + """ + _binary_stat_scores_args_check(threshold=threshold, pos_label=pos_label) + + target, preds = _binary_stat_scores_format( + target=target, + preds=preds, + threshold=threshold, + pos_label=pos_label, + ) + + tp, fp, tn, fn = _binary_stat_scores_update( + target=target, + preds=preds, + pos_label=pos_label, + ) + + return _stat_scores_compute(tp=tp, fp=fp, tn=tn, fn=fn, classwise=True)
+ + + +def _multiclass_stat_scores_format( # noqa: C901 + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, + top_k: Optional[int] = 1, +) -> Tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]: + """Format the target and preds for multiclass inputs. + + Checks that the target and preds are of the same length and that the target + and preds are of the correct shape. Converts the target and preds to the + correct type. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth. + preds : npt.ArrayLike + Predictions. + num_classes : int + The total number of classes for the problem. + top_k : int + The number of top predictions to consider when computing the statistics. + Defaults to 1. + + Returns + ------- + Tuple[numpy.ndarray, numpy.ndarray] + The formatted target and preds. + + Raises + ------ + ValueError + If the target is not in binary (with maximum label > 1) or multiclass + format. + RuntimeError + If more unique values are detected in `target` than `num_classes`. + ValueError + If the predictions are not in multiclass or continuous-multioutput + (logits or probabilities) format. + RuntimeError + If more unique values are detected in `preds` than `num_classes`. + + """ + # convert target and preds to numpy arrays + formatted = common_input_checks_and_format(target, preds) + target: npt.NDArray[np.int_] = formatted[0] # type: ignore[no-redef] + preds: npt.NDArray[np.int_] = formatted[1] # type: ignore[no-redef] + type_target: str = formatted[2] + type_preds: str = formatted[3] + + # check the target + if type_target not in ["binary", "multiclass"]: + raise ValueError( + f"The argument `target` must be multiclass, got {type_target}.", + ) + + num_implied_classes = len(np.unique(target)) + if num_implied_classes > num_classes: + raise ValueError( + "Detected more unique values in `target` than `num_classes`. Expected only " + f"{num_classes} but found {num_implied_classes} in `target`.", + ) + + # check the preds + if type_preds == "binary" and num_classes > 2: + type_preds = "multiclass" + if type_preds == "continuous" and target.size != 1: # type: ignore[union-attr] + raise ValueError( + "Expected a single element in `target` when `preds` is an array of " + f"continuous values, but found {target.size} elements in `target`.", # type: ignore[union-attr] # noqa: E501 + ) + if type_preds not in ["multiclass", "continuous-multioutput", "continuous"]: + raise ValueError( + f"The argument `preds` must be multiclass or continuous multioutput, " + f"got {type_preds}.", + ) + + if type_preds == "multiclass": + num_implied_classes = len(np.unique(preds)) + if num_implied_classes > num_classes: + raise ValueError( + "Detected more unique values in `preds` than `num_classes`. Expected " + f"only {num_classes} but found {num_implied_classes} in `preds`.", + ) + + # check top_k + if top_k is not None: + check_topk(top_k, type_preds, type_target, num_classes) + + # handle probabilities and logits + if type_preds in ["continuous-multioutput", "continuous"]: + if not np.all(np.logical_and(preds >= 0.0, preds <= 1.0)): # type: ignore + preds = sp.special.softmax( + preds, + axis=-1, + ) # convert logits to probabilities + + if not np.allclose(1, preds.sum(axis=-1)): # type: ignore[union-attr] + raise ValueError( + "``preds`` need to be probabilities for multiclass problems" + " i.e. they should sum up to 1.0 over classes", + ) + + # convert `preds` and `target` to multilabel-indicator format + preds = select_topk(preds, top_k or 1) + target = label_binarize(target, classes=np.arange(num_classes)) + + if type_preds == "continuous": + # target shape is (1, num_classes), remove the first dimension + target = target.squeeze(0) # type: ignore[union-attr] + + return target.astype(np.int_), preds.astype(np.int_) # type: ignore[union-attr] + + +def _multiclass_stat_scores_update( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, +) -> Tuple[ + Union[npt.NDArray[np.int_], np.int_], + Union[npt.NDArray[np.int_], np.int_], + Union[npt.NDArray[np.int_], np.int_], + Union[npt.NDArray[np.int_], np.int_], +]: + """Update the stat scores for multiclass inputs. + + Parameters + ---------- + target : numpy.ndarray + Ground truth. + preds : numpy.ndarray + Predictions. + num_classes : int + The total number of classes for the problem. + + Returns + ------- + Tuple[Union[numpy.ndarray, numpy.int_], Union[numpy.ndarray, numpy.int_], + Union[numpy.ndarray, numpy.int_], Union[numpy.ndarray, numpy.int_]] + The true positives, false positives, true negatives and false negatives. + + Raises + ------ + ValueError + If the input target and preds are not numeric. + + """ + return _stat_scores_from_confmat(target, preds, labels=np.arange(num_classes)) + + +
+[docs] +def multiclass_stat_scores( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_classes: int, + top_k: Optional[int] = None, + classwise: Optional[bool] = True, +) -> npt.NDArray[np.int_]: + """Compute stat scores for multiclass targets. + + Parameters + ---------- + target : npt.ArrayLike + The ground truth values. + preds : npt.ArrayLike + The predictions. If determined to be in continuous format, will be + converted to multiclass using the ``top_k`` parameter. + num_classes : int + The total number of classes for the problem. + top_k : Optional[int], default=None + The number of top predictions to consider when computing the + stat scores. If ``None``, it is assumed to be 1. + classwise : bool, default=True + Whether to return the stat scores for each class or sum over all + classes. + + Returns + ------- + numpy.nadarray + The number of true positives, false positives, true negatives, false + negatives and support. If ``classwise`` is ``True``, the shape is + ``(num_classes, 5)``. Otherwise, the shape is ``(5,)`` + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multiclass_stat_scores + >>> target = [0, 1, 2, 2, 2] + >>> preds = [0, 2, 1, 2, 0] + >>> multiclass_stat_scores(target, preds, num_classes=3) + array([[1, 1, 3, 0, 1], + [0, 1, 3, 1, 1], + [1, 1, 1, 2, 3]]) + + """ + target, preds = _multiclass_stat_scores_format( + target=target, + preds=preds, + num_classes=num_classes, + top_k=top_k, + ) + + tp, fp, tn, fn = _multiclass_stat_scores_update( + target=target, + preds=preds, + num_classes=num_classes, + ) + + return _stat_scores_compute(tp=tp, fp=fp, tn=tn, fn=fn, classwise=classwise)
+ + + +def _multilabel_stat_scores_format( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, +) -> Tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]: + """Format the target and preds for multilabel inputs. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth. + preds : npt.ArrayLike + Predictions. + num_labels : int + The total number of labels for the problem. + threshold : float, default=0.5 + Threshold value for binarizing the predictions. + top_k : int, default=None + The number of top predictions to consider when computing the statistics. + + Returns + ------- + Tuple[numpy.ndarray, numpy.ndarray] + The formatted target and preds. + + Raises + ------ + ValueError + If the target is not in multilabel format. + ValueError + If the predictions are not in multilabel or continuous-multioutput + (probabilities or logits) format. + RuntimeError + If the number of labels implied by the predictions is inconsistent with + ``num_labels``. + + """ + target, preds, type_target, type_preds = common_input_checks_and_format( + target, + preds, + ) + + # allow single-sample inputs + if type_preds in ["continuous", "binary"] and type_target == "binary": + preds = np.expand_dims(preds, axis=0) + type_preds = ( + "continuous-multioutput" + if type_preds == "continuous" + else "multilabel-indicator" + ) + if type_target == "binary": + target = np.expand_dims(target, axis=0) + type_target = "multilabel-indicator" + + # validate input type + if not type_target == "multilabel-indicator": + raise ValueError( + f"The argument `target` must be multilabel-indicator, got {type_target}.", + ) + + if type_preds not in ["multilabel-indicator", "continuous-multioutput"]: + raise ValueError( + f"The argument `preds` must be multilabel-indicator, or continuous " + f"multioutput, got {type_preds}.", + ) + + implied_num_labels = preds.shape[1] + if implied_num_labels != num_labels: + raise ValueError( + f"Detected {implied_num_labels} labels in `preds` but expected " + f"{num_labels}.", + ) + + if top_k is not None: + check_topk(top_k, type_preds, type_target, num_labels) + + if type_preds == "continuous-multioutput" and not np.all( + np.logical_and(preds >= 0.0, preds <= 1.0), + ): + preds = sigmoid(preds) + + if type_preds == "continuous-multioutput": + preds = select_topk(preds, top_k) if top_k is not None else preds >= threshold + + return target.astype(np.int_), preds.astype(np.int_) + + +def _multilabel_stat_scores_update( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, +) -> Tuple[ + Union[npt.NDArray[np.int_], np.int_], + Union[npt.NDArray[np.int_], np.int_], + Union[npt.NDArray[np.int_], np.int_], + Union[npt.NDArray[np.int_], np.int_], +]: + """Update the stat scores for multilabel inputs. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth. + preds : npt.ArrayLike + Predictions. + num_labels : int + The total number of labels for the problem. + labelwise : bool, default=False + Whether to return the statistics for each label or sum over all labels. + + Returns + ------- + numpy.ndarray + The number of true positives, false positives, true negatives and false + negatives. + + Raises + ------ + ValueError + If the input target and preds are not numeric. + + """ + return _stat_scores_from_confmat(target, preds, labels=np.arange(num_labels)) + + +
+[docs] +def multilabel_stat_scores( + target: npt.ArrayLike, + preds: npt.ArrayLike, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + labelwise: Optional[bool] = False, +) -> npt.NDArray[np.int_]: + """Compute the stat scores for multilabel inputs. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth. + preds : npt.ArrayLike + Predictions. + num_labels : int + The total number of labels for the problem. + threshold : float, default=0.5 + Threshold value for binarizing predictions that are probabilities or + logits. A sigmoid function is applied if the predictions are logits. + top_k : int, default=None + The number of top predictions to consider when computing the statistics. + labelwise : bool, default=False + Whether to return the stat scores for each label or sum over all labels. + + Returns + ------- + numpy.ndarray + The number of true positives, false positives, true negatives and false + negatives and the support. The shape of the array is ``(5, num_labels)`` + if ``labelwise=True`` and ``(5,)`` otherwise. + + Raises + ------ + ValueError + If ``threshold`` is not between ``0`` and ``1``. + + Examples + -------- + >>> from cyclops.evaluate.metrics.functional import multilabel_stat_scores + >>> target = [[0, 1, 1], [1, 0, 1]] + >>> preds = [[0.1, 0.9, 0.8], [0.8, 0.2, 0.7]] + >>> multilabel_stat_scores(target, preds, num_labels=3) + array([4, 0, 2, 0, 4]) + + """ + _binary_stat_scores_args_check(threshold=threshold, pos_label=1) + + target, preds = _multilabel_stat_scores_format( + target=target, + preds=preds, + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + ) + + tp, fp, tn, fn = _multilabel_stat_scores_update( + target=target, + preds=preds, + num_labels=num_labels, + ) + + return _stat_scores_compute(tp=tp, fp=fp, tn=tn, fn=fn, classwise=labelwise)
+ + + +
+[docs] +def stat_scores( + target: npt.ArrayLike, + preds: npt.ArrayLike, + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + threshold: float = 0.5, + num_classes: Optional[int] = None, + classwise: Optional[bool] = True, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + labelwise: Optional[bool] = False, +) -> npt.NDArray[np.int_]: + """Compute stat scores for binary, multiclass or multilabel problems. + + This function acts as an entry point to the specialized functions for each + task. + + Parameters + ---------- + target : npt.ArrayLike + Ground truth. + preds : npt.ArrayLike + Predictions. + task : Literal["binary", "multiclass", "multilabel"] + The task type. Can be either ``binary``, ``multiclass`` or + ``multilabel``. + pos_label : int, default=1 + The positive label to report. Only used for binary tasks. + threshold : float, default=0.5 + The threshold to use for binarizing the predictions if logits or + probabilities are provided. If logits are provided, a sigmoid function + is applied prior to binarization. Used for binary and multilabel tasks. + num_classes : int + The number of classes for the problem. Required for multiclass tasks. + classwise : bool, default=True + Whether to return the stat scores for each class or sum over all + classes. Only used for multiclass tasks. + top_k : int, default=None + The number of top predictions to consider when computing the statistics. + If ``None``, ``top_k`` is set to 1. Used for multiclass and multilabel + tasks. + num_labels : int + The number of labels. Only used for multilabel tasks. + labelwise : bool, default=False + Whether to compute the stat scores labelwise. Only used for multilabel + tasks. + + Returns + ------- + scores : numpy.ndarray + The stat scores - true positives, false positives, true negatives, + false negatives and support. For binary tasks, the shape is (5,). + For multiclass tasks, the shape is (n_classes, 5) if ``classwise`` is + True, otherwise (5,). For multilabel tasks, the shape is (n_labels, 5) + if ``labelwise`` is True, otherwise (n_classes, 5). + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics.functional import stat_scores + >>> target = [0, 1, 1, 0] + >>> preds = [0, 1, 0, 0] + >>> stat_scores(target, preds, task="binary") + array([1, 0, 2, 1, 2]) + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics.functional import multiclass_stat_scores + >>> target = [0, 1, 2, 2, 2] + >>> preds = [0, 2, 1, 2, 0] + >>> stat_scores(target, preds, task="multiclass", num_classes=3) + array([[1, 1, 3, 0, 1], + [0, 1, 3, 1, 1], + [1, 1, 1, 2, 3]]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics.functional import stat_scores + >>> target = [[0, 1, 1], [1, 0, 1]] + >>> preds = [[0.1, 0.9, 0.8], [0.8, 0.2, 0.7]] + >>> stat_scores(target, preds, task="multilabel", num_labels=3) + array([4, 0, 2, 0, 4]) + + """ + if task == "binary": + scores = binary_stat_scores( + target, + preds, + pos_label=pos_label, + threshold=threshold, + ) + elif task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be a positive integer." + scores = multiclass_stat_scores( + target, + preds, + num_classes, + classwise=classwise, + top_k=top_k, + ) + elif task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be a positive integer." + scores = multilabel_stat_scores( + target, + preds, + num_labels, + labelwise=labelwise, + threshold=threshold, + top_k=top_k, + ) + else: + raise ValueError( + f"Unsupported task: {task}, expected one of 'binary', 'multiclass' or " + f"'multilabel'.", + ) + + return scores
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/precision_recall.html b/api/_modules/cyclops/evaluate/metrics/precision_recall.html new file mode 100644 index 000000000..93095c95d --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/precision_recall.html @@ -0,0 +1,1126 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.precision_recall - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.precision_recall

+"""Classes for computing precision and recall metrics."""
+
+from typing import Literal, Optional, Type, Union, cast
+
+import numpy as np
+import numpy.typing as npt
+
+from cyclops.evaluate.metrics.functional.precision_recall import (
+    _precision_recall_reduce,
+)
+from cyclops.evaluate.metrics.metric import Metric
+from cyclops.evaluate.metrics.stat_scores import (
+    BinaryStatScores,
+    MulticlassStatScores,
+    MultilabelStatScores,
+)
+from cyclops.evaluate.metrics.utils import _check_average_arg
+
+
+
+[docs] +class BinaryPrecision(BinaryStatScores, registry_key="binary_precision"): + """Compute the precision score for binary classification tasks. + + Parameters + ---------- + pos_label : int, default=1 + The label of the positive class. + threshold : float, default=0.5 + Threshold for deciding the positive class. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinaryPrecision + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 1] + >>> metric = BinaryPrecision() + >>> metric(target, preds) + 0.6666666666666666 + >>> metric.reset_state() + >>> target = [[0, 1, 0, 1], [0, 0, 1, 1]] + >>> preds = [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.6666666666666666 + + """ + + name: str = "Precision Score" + + def __init__( + self, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__(threshold=threshold, pos_label=pos_label) + self.zero_division = zero_division + + def compute(self) -> float: # type: ignore[override] + """Compute the precision score from the state.""" + tp, fp, _, fn = self._final_state() + score = _precision_recall_reduce( + tp=tp, + fp=fp, + fn=fn, + metric="precision", + average=None, + zero_division=self.zero_division, + ) + return cast(float, score)
+ + + +
+[docs] +class MulticlassPrecision(MulticlassStatScores, registry_key="multiclass_precision"): + """Compute the precision score for multiclass classification tasks. + + Parameters + ---------- + num_classes : int + Number of classes in the dataset. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each class. Otherwise, use one of the + following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives and false positives. + - ``macro``: Calculate metric for each class, and find their + unweighted mean. This does not take class imbalance into account. + - ``weighted``: Calculate metric for each class, and find their + average weighted by the support (the number of true instances + for each class). This alters "macro" to account for class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MulticlassPrecision + >>> target = [0, 1, 2, 0] + >>> preds = [0, 2, 1, 0] + >>> metric = MulticlassPrecision(num_classes=3, average=None) + >>> metric(target, preds) + array([1., 0., 0.]) + >>> metric.reset_state() + >>> target = [[0, 1, 2, 0], [2, 1, 2, 0]] + >>> preds = [ + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([1., 0., 0.]) + + """ + + name: str = "Precision Score" + + def __init__( + self, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__(num_classes=num_classes, top_k=top_k, classwise=True) + _check_average_arg(average) + + self.average = average + self.zero_division = zero_division + + def compute(self) -> Union[npt.NDArray[np.float_], float]: # type: ignore[override] + """Compute the precision score from the state.""" + tp, fp, _, fn = self._final_state() + return _precision_recall_reduce( + tp=tp, + fp=fp, + fn=fn, + metric="precision", + average=self.average, + zero_division=self.zero_division, + )
+ + + +
+[docs] +class MultilabelPrecision(MultilabelStatScores, registry_key="multilabel_precision"): + """Compute the precision score for multilabel classification tasks. + + Parameters + ---------- + num_labels : int + Number of labels for the task. + threshold : float, default=0.5 + Threshold for deciding the positive class. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the precision score for each label. Otherwise, + use one of the following options to compute the average precision score: + + - ``micro``: Calculate metric globally from the total count of true + positives and false positives. + - ``macro``: Calculate metric for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each label, and find their + average weighted by the support (the number of true instances + for each label). This alters "macro" to account for label imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MultilabelPrecision + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.2, 0.8]] + >>> metric = MultilabelPrecision(num_labels=2, average=None) + >>> metric(target, preds) + array([0., 1.]) + >>> metric.reset_state() + >>> target = [[[0, 1], [1, 1]], [[1, 1], [1, 0]]] + >>> preds = [[[0.1, 0.7], [0.2, 0.8]], [[0.5, 0.9], [0.3, 0.4]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([1., 1.]) + + """ + + name: str = "Precision Score" + + def __init__( + self, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__( + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + labelwise=True, + ) + _check_average_arg(average) + + self.average = average + self.zero_division = zero_division + + def compute(self) -> Union[npt.NDArray[np.float_], float]: # type: ignore[override] + """Compute the precision score from the state.""" + tp, fp, _, fn = self._final_state() + return _precision_recall_reduce( + tp=tp, + fp=fp, + fn=fn, + metric="precision", + average=self.average, + zero_division=self.zero_division, + )
+ + + +
+[docs] +class Precision(Metric, registry_key="precision", force_register=True): + """Compute the precision score for different types of classification tasks. + + This metric can be used for binary, multiclass, and multilabel classification + tasks. It creates the appropriate metric based on the ``task`` parameter. + + Parameters + ---------- + task : Literal["binary", "multiclass", "multilabel"] + Type of classification task. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int, default=None + Number of classes for the task. Required if ``task`` is ``"multiclass"``. + threshold : float, default=0.5 + Threshold for deciding the positive class. Only used if ``task`` is + ``"binary"`` or ``"multilabel"``. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Only used if ``task`` is ``"multiclass"`` or ``"multilabel"``. + num_labels : int, default=None + Number of labels for the task. Required if ``task`` is ``"multilabel"``. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the precision score for each label/class. Otherwise, + use one of the following options to compute the average precision score: + + - ``micro``: Calculate metrics globally by counting the total true + positives and false positives. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take label/class imbalance into + account. + - ``weighted``: Calculate metrics for each label/class, and find + their average weighted by support (the number of true instances + for each label/class). This alters ``macro`` to account for + label/class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + (binary) + >>> from cyclops.evaluate.metrics import Precision + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 1] + >>> metric = Precision(task="binary") + >>> metric(target, preds) + 0.6666666666666666 + >>> metric.reset_state() + >>> target = [[0, 1, 0, 1], [0, 0, 1, 1]] + >>> preds = [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.6666666666666666 + + (multiclass) + >>> from cyclops.evaluate.metrics import Precision + >>> target = [0, 1, 2, 0] + >>> preds = [0, 2, 1, 0] + >>> metric = Precision(task="multiclass", num_classes=3) + >>> metric(target, preds) + array([1., 0., 0.]) + >>> metric.reset_state() + >>> target = [[0, 1, 2, 0], [2, 1, 2, 0]] + >>> preds = [ + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([1., 0., 0.]) + + (multilabel) + >>> from cyclops.evaluate.metrics import Precision + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.2, 0.8]] + >>> metric = Precision(task="multilabel", num_labels=2) + >>> metric.update_state(target, preds) + >>> metric.compute() + array([0., 1.]) + >>> metric.reset_state() + >>> target = [[[0, 1], [1, 1]], [[1, 1], [1, 0]]] + >>> preds = [[[0.1, 0.7], [0.2, 0.8]], [[0.5, 0.9], [0.3, 0.4]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([1., 1.]) + + """ + + name: str = "Precision Score" + + def __new__( # type: ignore # mypy expects a subclass of Precision + cls: Type[Metric], + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> Metric: + """Create a task-specific precision metric.""" + if task == "binary": + return BinaryPrecision( + threshold=threshold, + pos_label=pos_label, + zero_division=zero_division, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + return MulticlassPrecision( + num_classes=num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + return MultilabelPrecision( + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + raise ValueError( + f"Task '{task}' not supported, expected 'binary', 'multiclass' or " + f"'multilabel'.", + )
+ + + +
+[docs] +class BinaryRecall(BinaryStatScores, registry_key="binary_recall"): + """Computes recall score for binary classification. + + Parameters + ---------- + pos_label : int, default=1 + Label of the positive class. + threshold : float, default=0.5 + Threshold for deciding the positive class. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinaryRecall + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 0] + >>> metric = BinaryRecall() + >>> metric(target, preds) + 0.5 + >>> metric.reset_state() + >>> target = [[0, 1, 0, 1], [0, 0, 1, 1]] + >>> preds = [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.5 + + """ + + name: str = "Recall Score" + + def __init__( + self, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__(threshold=threshold, pos_label=pos_label) + self.zero_division = zero_division + + def compute(self) -> float: # type: ignore[override] + """Compute the recall score from the state.""" + tp, fp, _, fn = self._final_state() + score = _precision_recall_reduce( + tp=tp, + fp=fp, + fn=fn, + metric="recall", + average=None, + zero_division=self.zero_division, + ) + return cast(float, score)
+ + + +
+[docs] +class MulticlassRecall(MulticlassStatScores, registry_key="multiclass_recall"): + """Compute the recall score for multiclass classification tasks. + + Parameters + ---------- + num_classes : int + Number of classes in the dataset. + top_k : int, optional + If given, and predictions are probabilities/logits, the recall will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the recall score for each class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives and false negatives. + - ``macro``: Calculate metric for each class, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each class, and find their + average weighted by the support (the number of true instances + for each class). This alters "macro" to account for class + imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MulticlassRecall + >>> target = [0, 1, 2, 0] + >>> preds = [2, 0, 2, 1] + >>> metric = MulticlassRecall(num_classes=3) + >>> metric(target, preds) + array([0., 0., 1.]) + >>> metric.reset_state() + >>> target = [[0, 1, 2, 0], [2, 1, 2, 0]] + >>> preds = [ + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.66666667, 0. , 0. ]) + + """ + + name: str = "Recall Score" + + def __init__( + self, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__(num_classes=num_classes, top_k=top_k, classwise=True) + self.average = average + self.zero_division = zero_division + + def compute(self) -> Union[npt.NDArray[np.float_], float]: # type: ignore[override] + """Compute the recall score from the state.""" + tp, fp, _, fn = self._final_state() + return _precision_recall_reduce( + tp=tp, + fp=fp, + fn=fn, + metric="recall", + average=self.average, + zero_division=self.zero_division, + )
+ + + +
+[docs] +class MultilabelRecall(MultilabelStatScores, registry_key="multilabel_recall"): + """Compute the recall score for multilabel classification tasks. + + Parameters + ---------- + num_labels : int + Number of labels in the dataset. + threshold : float, default=0.5 + Threshold for deciding the positive class. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives and false negatives. + - ``macro``: Calculate metric for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each label, and find their + average weighted by the support (the number of true instances + for each label). This alters "macro" to account for label + imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MultilabelRecall + >>> target = [[0, 1, 0, 1], [0, 0, 1, 1]] + >>> preds = [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]] + >>> metric = MultilabelRecall(num_labels=4) + >>> metric(target, preds) + array([0., 1., 1., 0.]) + >>> metric.reset_state() + >>> target = [[[0, 1, 0, 1], [0, 0, 1, 1]], [[0, 1, 0, 1], [0, 0, 1, 1]]] + >>> preds = [ + ... [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]], + ... [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0., 1., 1., 0.]) + + """ + + name: str = "Recall Score" + + def __init__( + self, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__( + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + labelwise=True, + ) + self.average = average + self.zero_division = zero_division + + def compute(self) -> Union[npt.NDArray[np.float_], float]: # type: ignore[override] + """Compute the recall score from the state.""" + tp, fp, _, fn = self._final_state() + return _precision_recall_reduce( + tp=tp, + fp=fp, + fn=fn, + metric="recall", + average=self.average, + zero_division=self.zero_division, + )
+ + + +
+[docs] +class Recall(Metric, registry_key="recall", force_register=True): + """Compute the recall score for different types of classification tasks. + + This metric can be used for binary, multiclass, and multilabel classification + tasks. It creates the appropriate class based on the ``task`` parameter. + + Parameters + ---------- + task : Literal["binary", "multiclass", "multilabel"] + Type of classification task. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int, default=None + Number of classes for the task. Required if ``task`` is ``"multiclass"``. + threshold : float, default=0.5 + Threshold for deciding the positive class. Only used if ``task`` is + ``"binary"`` or ``"multilabel"``. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Only used if ``task`` is ``"multiclass"`` or ``"multilabel"``. + num_labels : int, default=None + Number of labels for the task. Required if ``task`` is ``"multilabel"``. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the recall score for each label/class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally by counting the total true + positives and false negatives. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label/class, and find + their average weighted by support (the number of true instances + for each label/class). This alters ``macro`` to account for + label/class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics import Recall + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 1] + >>> metric = Recall(task="binary") + >>> metric(target, preds) + 1.0 + >>> metric.reset_state() + >>> target = [[0, 1, 0, 1], [0, 0, 1, 1]] + >>> preds = [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.5 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics import Recall + >>> target = [0, 1, 2, 0] + >>> preds = [0, 2, 1, 0] + >>> metric = Recall(task="multiclass", num_classes=3) + >>> metric(target, preds) + array([1., 0., 0.]) + >>> metric.reset_state() + >>> target = [[0, 1, 2, 0], [2, 1, 2, 0]] + >>> preds = [ + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.66666667, 0. , 0. ]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics import Recall + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.2, 0.8]] + >>> metric = Recall(task="multilabel", num_labels=2) + >>> metric(target, preds) + array([0., 1.]) + >>> metric.reset_state() + >>> target = [[[0, 1], [1, 1]], [[1, 1], [1, 0]]] + >>> preds = [[[0.1, 0.7], [0.2, 0.8]], [[0.5, 0.9], [0.3, 0.4]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.33333333, 1. ]) + + """ + + name: str = "Recall Score" + + def __new__( # type: ignore # mypy expects a subclass of Recall + cls: Type[Metric], + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> Metric: + """Create a task-specific metric for computing the recall score.""" + if task == "binary": + return BinaryRecall( + threshold=threshold, + pos_label=pos_label, + zero_division=zero_division, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + return MulticlassRecall( + num_classes=num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + return MultilabelRecall( + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + raise ValueError( + f"Task '{task}' not supported, expected 'binary', 'multiclass' or " + f"'multilabel'.", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/precision_recall_curve.html b/api/_modules/cyclops/evaluate/metrics/precision_recall_curve.html new file mode 100644 index 000000000..ad56484f5 --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/precision_recall_curve.html @@ -0,0 +1,981 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.precision_recall_curve - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.precision_recall_curve

+"""Classes for computing precision-recall curves."""
+
+from typing import Any, List, Literal, Optional, Type, Union
+
+import numpy as np
+import numpy.typing as npt
+
+from cyclops.evaluate.metrics.functional.precision_recall_curve import (  # type: ignore # noqa: E501
+    PRCurve,
+    _binary_precision_recall_curve_compute,
+    _binary_precision_recall_curve_format,
+    _binary_precision_recall_curve_update,
+    _check_thresholds,
+    _format_thresholds,
+    _multiclass_precision_recall_curve_compute,
+    _multiclass_precision_recall_curve_format,
+    _multiclass_precision_recall_curve_update,
+    _multilabel_precision_recall_curve_compute,
+    _multilabel_precision_recall_curve_format,
+    _multilabel_precision_recall_curve_update,
+)
+from cyclops.evaluate.metrics.metric import Metric
+
+
+
+[docs] +class BinaryPrecisionRecallCurve(Metric, registry_key="binary_precision_recall_curve"): + """Compute precision-recall curve for binary input. + + Parameters + ---------- + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for computing the precision and recall scores. + If int, then the number of thresholds to use. + If list or numpy.ndarray, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + pos_label : int + The label of the positive class. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinaryPrecisionRecallCurve + >>> target = [0, 1, 0, 1] + >>> preds = [0.1, 0.4, 0.35, 0.8] + >>> metric = BinaryPrecisionRecallCurve(thresholds=3) + >>> metric(target, preds) + PRCurve(precision=array([0.5, 1. , 0. ]), recall=array([1. , 0.5, 0. ]), thresholds=array([0. , 0.5, 1. ])) + >>> metric.reset_state() + >>> target = [[0, 1, 0, 1], [1, 1, 0, 0]] + >>> preds = [[0.1, 0.4, 0.35, 0.8], [0.6, 0.3, 0.1, 0.7]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + PRCurve(precision=array([0.5 , 0.66666667, 0. ]), recall=array([1. , 0.5, 0. ]), thresholds=array([0. , 0.5, 1. ])) + + """ + + name: str = "Precision-Recall Curve" + + def __init__( + self, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + pos_label: int = 1, + ) -> None: + """Initialize the metric.""" + super().__init__() + _check_thresholds(thresholds) + thresholds = _format_thresholds(thresholds) + if thresholds is None: + self.add_state("preds", default=[]) + self.add_state("target", default=[]) + else: + self.add_state( + "confmat", + default=np.zeros((len(thresholds), 2, 2), dtype=np.int_), + ) + self.thresholds = thresholds + self.pos_label = pos_label + + def update_state(self, target: npt.ArrayLike, preds: npt.ArrayLike) -> None: + """Update the state of the metric. + + The state is either a list of targets and predictions (if ``thresholds`` is + ``None``) or a confusion matrix. + + """ + target, preds = _binary_precision_recall_curve_format( + target=target, + preds=preds, + pos_label=self.pos_label, + ) + state = _binary_precision_recall_curve_update( + target=target, + preds=preds, + thresholds=self.thresholds, + ) + + if isinstance(state, np.ndarray): + self.confmat += state # type: ignore[attr-defined] + else: + self.target.append(state[0]) # type: ignore[attr-defined] + self.preds.append(state[1]) # type: ignore[attr-defined] + + def compute( + self, + ) -> PRCurve: + """Compute the precision-recall curve from the state.""" + if self.thresholds is None: + state = ( + np.concatenate(self.target, axis=0), # type: ignore[attr-defined] + np.concatenate(self.preds, axis=0), # type: ignore[attr-defined] + ) + else: + state = self.confmat # type: ignore[attr-defined] + + precision, recall, thresholds = _binary_precision_recall_curve_compute( + state=state, + thresholds=self.thresholds, + pos_label=self.pos_label, + ) + return PRCurve(precision, recall, thresholds) + + def __setattr__(self, name: str, value: Any) -> None: + """Set the attribute ``name`` to ``value``. + + This is defined for the case where `thresholds` is modified and the state + needs to be updated. For example, if thresholds was `None` and is later + set to a list or integer, we need to add the state "confmat" and remove + the states "preds" and "target" + + Parameters + ---------- + name : str + The name of the attribute to set. + value : Any + The value to set the attribute to. + + """ + if name == "thresholds" and "thresholds" in self.__dict__: + _check_thresholds(thresholds=value) + value = _format_thresholds(thresholds=value) + self.reset_state() + if self.thresholds is None and value is not None: + self.__dict__["thresholds"] = value + self.add_state( + "confmat", + default=np.zeros((len(value), 2, 2), dtype=np.int_), + ) + del self.__dict__["preds"] + del self.__dict__["target"] + elif self.thresholds is not None and value is None: + self.__dict__["thresholds"] = value + self.add_state("preds", default=[]) + self.add_state("target", default=[]) + del self.__dict__["confmat"] + else: + self.__dict__["thresholds"] = value + return + + super().__setattr__(name, value)
+ + + +
+[docs] +class MulticlassPrecisionRecallCurve( + Metric, + registry_key="multiclass_precision_recall_curve", +): + """Compute the precision-recall curve for multiclass problems. + + Parameters + ---------- + num_classes : int + The number of classes in the dataset. + thresholds : Union[int, List[float], numpy.ndarray], default=None + Thresholds used for computing the precision and recall scores. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MulticlassPrecisionRecallCurve + >>> target = [0, 1, 2, 0] + >>> preds = [[0.1, 0.6, 0.3], [0.05, 0.95, 0.0], [0.5, 0.3, 0.2], [0.2, 0.5, 0.3]] + >>> metric = MulticlassPrecisionRecallCurve(num_classes=3, thresholds=3) + >>> metric(target, preds) + PRCurve(precision=array([[0.5 , 0. , 0. , 1. ], + [0.25 , 0.33333333, 0. , 1. ], + [0.25 , 0. , 0. , 1. ]]), recall=array([[1., 0., 0., 0.], + [1., 1., 0., 0.], + [1., 0., 0., 0.]]), thresholds=array([0. , 0.5, 1. ])) + >>> metric.reset_state() + >>> target = [[0, 1, 2, 0], [1, 2, 0, 1]] + >>> preds = [ + ... [[0.1, 0.6, 0.3], [0.05, 0.95, 0.0], [0.5, 0.3, 0.2], [0.2, 0.5, 0.3]], + ... [[0.3, 0.2, 0.5], [0.1, 0.7, 0.2], [0.6, 0.1, 0.3], [0.1, 0.8, 0.1]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + PRCurve(precision=array([[0.375, 0.5 , 0. , 1. ], + [0.375, 0.4 , 0. , 1. ], + [0.25 , 0. , 0. , 1. ]]), recall=array([[1. , 0.33333333, 0. , 0. ], + [1. , 0.66666667, 0. , 0. ], + [1. , 0. , 0. , 0. ]]), thresholds=array([0. , 0.5, 1. ])) + + """ + + name: str = "Precision-Recall Curve" + + def __init__( + self, + num_classes: int, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + ) -> None: + """Initialize the metric.""" + super().__init__() + _check_thresholds(thresholds) + + thresholds = _format_thresholds(thresholds) + if thresholds is None: + self.add_state("preds", default=[]) + self.add_state("target", default=[]) + else: + self.add_state( + "confmat", + default=np.zeros((len(thresholds), num_classes, 2, 2), dtype=np.int_), + ) + self.thresholds = thresholds + self.num_classes = num_classes + + def update_state(self, target: npt.ArrayLike, preds: npt.ArrayLike) -> None: + """Update the state of the metric. + + The state is either a list of targets and predictions (if ``thresholds`` is + ``None``) or a confusion matrix. + + """ + target, preds = _multiclass_precision_recall_curve_format( + target=target, + preds=preds, + num_classes=self.num_classes, + ) + state = _multiclass_precision_recall_curve_update( + target=target, + preds=preds, + thresholds=self.thresholds, + num_classes=self.num_classes, + ) + + if isinstance(state, np.ndarray): + self.confmat += state # type: ignore[attr-defined] + else: + self.target.append(state[0]) # type: ignore[attr-defined] + self.preds.append(state[1]) # type: ignore[attr-defined] + + def compute( + self, + ) -> PRCurve: + """Compute the precision-recall curve from the state.""" + if self.thresholds is None: + state = ( + np.concatenate(self.target, axis=0), # type: ignore[attr-defined] + np.concatenate(self.preds, axis=0), # type: ignore[attr-defined] + ) + else: + state = self.confmat # type: ignore[attr-defined] + + precision, recall, thresholds = _multiclass_precision_recall_curve_compute( + state=state, + thresholds=self.thresholds, # type: ignore[arg-type] + num_classes=self.num_classes, + ) + return PRCurve(precision, recall, thresholds) + + def __setattr__(self, name: str, value: Any) -> None: + """Set the attribute ``name`` to ``value``. + + This is defined for the case where `thresholds` is modified and the state + needs to be updated. For example, if thresholds was `None` and is later + set to a list or integer, we need to add the state "confmat" and remove + the states "preds" and "target" + + Parameters + ---------- + name : str + The name of the attribute to set. + value : Any + The value to set the attribute to. + + """ + if name == "thresholds" and "thresholds" in self.__dict__: + _check_thresholds(thresholds=value) + value = _format_thresholds(thresholds=value) + self.reset_state() + if self.thresholds is None and value is not None: + self.__dict__["thresholds"] = value + self.add_state( + "confmat", + default=np.zeros((len(value), 2, 2), dtype=np.int_), + ) + del self.__dict__["preds"] + del self.__dict__["target"] + elif self.thresholds is not None and value is None: + self.__dict__["thresholds"] = value + self.add_state("preds", default=[]) + self.add_state("target", default=[]) + del self.__dict__["confmat"] + else: + self.__dict__["thresholds"] = value + return + + super().__setattr__(name, value)
+ + + +
+[docs] +class MultilabelPrecisionRecallCurve( + Metric, + registry_key="multilabel_precision_recall_curve", +): + """Check and format the multilabel precision-recall curve input/data. + + Parameters + ---------- + num_labels : int + The number of labels in the dataset. + thresholds : int, list of floats or numpy.ndarray of floats, default=None + Thresholds used for computing the precision and recall scores. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MultilabelPrecisionRecallCurve + >>> target = [[0, 1], [1, 0]] + >>> preds = [[0.1, 0.9], [0.8, 0.2]] + >>> metric = MultilabelPrecisionRecallCurve(num_labels=2, thresholds=3) + >>> metric(target, preds) + PRCurve(precision=array([[0.5, 1. , 0. , 1. ], + [0.5, 1. , 0. , 1. ]]), recall=array([[1., 1., 0., 0.], + [1., 1., 0., 0.]]), thresholds=array([0. , 0.5, 1. ])) + >>> metric.reset_state() + >>> target = [[[0, 1], [1, 0]], [[1, 0], [0, 1]]] + >>> preds = [[[0.1, 0.9], [0.8, 0.2]], [[0.2, 0.8], [0.7, 0.3]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + PRCurve(precision=array([[0.5, 0.5, 0. , 1. ], + [0.5, 0.5, 0. , 1. ]]), recall=array([[1. , 0.5, 0. , 0. ], + [1. , 0.5, 0. , 0. ]]), thresholds=array([0. , 0.5, 1. ])) + + """ + + name: str = "Precision-Recall Curve" + + def __init__( + self, + num_labels: int, + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + ) -> None: + """Initialize the metric.""" + super().__init__() + + _check_thresholds(thresholds) + thresholds = _format_thresholds(thresholds) + if thresholds is None: + self.add_state("preds", default=[]) + self.add_state("target", default=[]) + else: + self.add_state( + "confmat", + default=np.zeros((len(thresholds), num_labels, 2, 2), dtype=np.int_), + ) + self.thresholds = thresholds + self.num_labels = num_labels + + def update_state(self, target: npt.ArrayLike, preds: npt.ArrayLike) -> None: + """Update the state of the metric. + + The state is either a list of targets and predictions (if ``thresholds`` is + ``None``) or a confusion matrix. + + """ + target, preds = _multilabel_precision_recall_curve_format( + target=target, + preds=preds, + num_labels=self.num_labels, + ) + state = _multilabel_precision_recall_curve_update( + target, + preds, + num_labels=self.num_labels, + thresholds=self.thresholds, + ) + + if isinstance(state, np.ndarray): + self.confmat += state # type: ignore[attr-defined] + else: + self.target.append(state[0]) # type: ignore[attr-defined] + self.preds.append(state[1]) # type: ignore[attr-defined] + + def compute( + self, + ) -> PRCurve: + """Compute the precision-recall curve from the state.""" + if self.thresholds is None: + state = ( + np.concatenate(self.target, axis=0), # type: ignore[attr-defined] + np.concatenate(self.preds, axis=0), # type: ignore[attr-defined] + ) + else: + state = self.confmat # type: ignore[attr-defined] + + precision, recall, thresholds = _multilabel_precision_recall_curve_compute( + state, + thresholds=self.thresholds, # type: ignore[arg-type] + num_labels=self.num_labels, + ) + return PRCurve(precision, recall, thresholds) + + def __setattr__(self, name: str, value: Any) -> None: + """Set the attribute ``name`` to ``value``. + + This is defined for the case where `thresholds` is modified and the state + needs to be updated. For example, if thresholds was `None` and is later + set to a list or integer, we need to add the state "confmat" and remove + the states "preds" and "target" + + Parameters + ---------- + name : str + The name of the attribute to set. + value : Any + The value to set the attribute to. + + """ + if name == "thresholds" and "thresholds" in self.__dict__: + _check_thresholds(thresholds=value) + value = _format_thresholds(thresholds=value) + self.reset_state() + if self.thresholds is None and value is not None: + self.__dict__["thresholds"] = value + self.add_state( + "confmat", + default=np.zeros((len(value), 2, 2), dtype=np.int_), + ) + del self.__dict__["preds"] + del self.__dict__["target"] + elif self.thresholds is not None and value is None: + self.__dict__["thresholds"] = value + self.add_state("preds", default=[]) + self.add_state("target", default=[]) + del self.__dict__["confmat"] + else: + self.__dict__["thresholds"] = value + return + + super().__setattr__(name, value)
+ + + +# ruff: noqa: W505 +
+[docs] +class PrecisionRecallCurve( + Metric, + registry_key="precision_recall_curve", + force_register=True, +): + """Compute the precision-recall curve for different classification tasks. + + Parameters + ---------- + task : Literal["binary", "multiclass", "multilabel"] + The task for which the precision-recall curve is computed. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for computing the precision and recall scores. If int, + then the number of thresholds to use. If list or array, then the + thresholds to use. If None, then the thresholds are automatically + determined by the sunique values in ``preds`` + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int, optional + The number of classes in the dataset. Required if ``task`` is + ``"multiclass"``. + num_labels : int, optional + The number of labels in the dataset. Required if ``task`` is + ``"multilabel"``. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics import PrecisionRecallCurve + >>> target = [1, 1, 1, 0] + >>> preds = [0.6, 0.2, 0.3, 0.8] + >>> metric = PrecisionRecallCurve(task="binary", thresholds=None) + >>> metric(target, preds) + PRCurve(precision=array([0.75 , 0.66666667, 0.5 , 0. , 1. ]), recall=array([1. , 0.66666667, 0.33333333, 0. , 0. ]), thresholds=array([0.2, 0.3, 0.6, 0.8])) + >>> metric.reset_state() + >>> target = [[1, 0, 1, 1], [0, 0, 0, 1]] + >>> preds = [[0.5, 0.4, 0.1, 0.3], [0.9, 0.6, 0.45, 0.8]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + PRCurve(precision=array([0.5 , 0.42857143, 0.33333333, 0.4 , 0.5 , + 0.33333333, 0.5 , 0. , 1. ]), recall=array([1. , 0.75, 0.5 , 0.5 , 0.5 , 0.25, 0.25, 0. , 0. ]), thresholds=array([0.1 , 0.3 , 0.4 , 0.45, 0.5 , 0.6 , 0.8 , 0.9 ])) + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics import PrecisionRecallCurve + >>> target = [0, 1, 2, 2] + >>> preds = [[0.05, 0.95, 0], [0.1, 0.8, 0.1], [0.2, 0.2, 0.6], [0.2, 0.2, 0.6]] + >>> metric = PrecisionRecallCurve(task="multiclass", num_classes=3, thresholds=3) + >>> metric(target, preds) + PRCurve(precision=array([[0.25, 0. , 0. , 1. ], + [0.25, 0.5 , 0. , 1. ], + [0.5 , 1. , 0. , 1. ]]), recall=array([[1., 0., 0., 0.], + [1., 1., 0., 0.], + [1., 1., 0., 0.]]), thresholds=array([0. , 0.5, 1. ])) + >>> metric.reset_state() + >>> target = [[0, 1, 2, 2], [1, 2, 0, 1]] + >>> preds = [ + ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1], [0.2, 0.2, 0.6], [0.2, 0.2, 0.6]], + ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1], [0.2, 0.2, 0.6], [0.2, 0.2, 0.6]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + PRCurve(precision=array([[0.25 , 0. , 0. , 1. ], + [0.375, 0.5 , 0. , 1. ], + [0.375, 0.5 , 0. , 1. ]]), recall=array([[1. , 0. , 0. , 0. ], + [1. , 0.66666667, 0. , 0. ], + [1. , 0.66666667, 0. , 0. ]]), thresholds=array([0. , 0.5, 1. ])) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics import PrecisionRecallCurve + >>> target = [[0, 1], [1, 0]] + >>> preds = [[0.1, 0.9], [0.8, 0.2]] + >>> metric = PrecisionRecallCurve(task="multilabel", num_labels=2, thresholds=3) + >>> metric(target, preds) + PRCurve(precision=array([[0.5, 1. , 0. , 1. ], + [0.5, 1. , 0. , 1. ]]), recall=array([[1., 1., 0., 0.], + [1., 1., 0., 0.]]), thresholds=array([0. , 0.5, 1. ])) + >>> metric.reset_state() + >>> target = [[[0, 1], [1, 0]], [[1, 0], [0, 1]]] + >>> preds = [[[0.1, 0.9], [0.8, 0.2]], [[0.1, 0.9], [0.8, 0.2]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + PRCurve(precision=array([[0.5, 0.5, 0. , 1. ], + [0.5, 0.5, 0. , 1. ]]), recall=array([[1. , 0.5, 0. , 0. ], + [1. , 0.5, 0. , 0. ]]), thresholds=array([0. , 0.5, 1. ])) + + """ + + name: str = "Precision-Recall Curve" + + def __new__( # type: ignore # mypy expects a subclass of PrecisionRecallCurve + cls: Type[Metric], + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + pos_label: int = 1, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + ) -> Metric: + """Create a task-specific instance of the precision-recall curve metric.""" + if task == "binary": + return BinaryPrecisionRecallCurve( + thresholds=thresholds, + pos_label=pos_label, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be a positive integer." + return MulticlassPrecisionRecallCurve( + num_classes=num_classes, + thresholds=thresholds, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be a positive integer." + return MultilabelPrecisionRecallCurve( + num_labels=num_labels, + thresholds=thresholds, + ) + raise ValueError( + "Expected argument `task` to be either 'binary', 'multiclass' or " + f"'multilabel', but got {task}", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/roc.html b/api/_modules/cyclops/evaluate/metrics/roc.html new file mode 100644 index 000000000..09d59b291 --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/roc.html @@ -0,0 +1,728 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.roc - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.roc

+"""Classes for computing ROC metrics."""
+
+from typing import List, Literal, Optional, Union
+
+import numpy as np
+import numpy.typing as npt
+
+from cyclops.evaluate.metrics.functional.roc import (
+    ROCCurve as ROCCurveData,
+)
+from cyclops.evaluate.metrics.functional.roc import (
+    _binary_roc_compute,
+    _multiclass_roc_compute,
+    _multilabel_roc_compute,
+)
+from cyclops.evaluate.metrics.metric import Metric
+from cyclops.evaluate.metrics.precision_recall_curve import (
+    BinaryPrecisionRecallCurve,
+    MulticlassPrecisionRecallCurve,
+    MultilabelPrecisionRecallCurve,
+)
+
+
+
+[docs] +class BinaryROCCurve(BinaryPrecisionRecallCurve, registry_key="binary_roc_curve"): + """Compute the ROC curve for binary classification tasks. + + Parameters + ---------- + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for computing the precision and recall scores. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + pos_label : int, default=1 + The label of the positive class. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinaryROCCurve + >>> target = [0, 0, 1, 1] + >>> preds = [0.1, 0.4, 0.35, 0.8] + >>> metric = BinaryROCCurve() + >>> metric(target, preds) + ROCCurve(fpr=array([0. , 0. , 0.5, 0.5, 1. ]), tpr=array([0. , 0.5, 0.5, 1. , 1. ]), thresholds=array([1. , 0.8 , 0.4 , 0.35, 0.1 ])) + >>> metric.reset_state() + >>> target = [[1, 1, 0, 0], [0, 0, 1, 1]] + >>> preds = [[0.1, 0.2, 0.3, 0.4], [0.6, 0.5, 0.4, 0.3]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + ROCCurve(fpr=array([0. , 0.25, 0.5 , 0.75, 1. , 1. , 1. ]), tpr=array([0. , 0. , 0. , 0.25, 0.5 , 0.75, 1. ]), thresholds=array([1. , 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])) + + """ # noqa: W505 + + name: str = "ROC Curve" + + def compute( # type: ignore + self, + ) -> ROCCurveData: + """Compute the ROC curve from the state variables.""" + if self.thresholds is None: + state = ( + np.concatenate(self.target, axis=0), # type: ignore[attr-defined] + np.concatenate(self.preds, axis=0), # type: ignore[attr-defined] + ) + else: + state = self.confmat # type: ignore[attr-defined] + fpr_, tpr_, thresholds_ = _binary_roc_compute( + state, thresholds=self.thresholds, pos_label=self.pos_label + ) + + return ROCCurveData(fpr_, tpr_, thresholds_)
+ + + +
+[docs] +class MulticlassROCCurve( + MulticlassPrecisionRecallCurve, + registry_key="multiclass_roc_curve", +): + """Compute the ROC curve for multiclass classification tasks. + + Parameters + ---------- + target : ArrayLike + Ground truth (correct) target values. + preds : ArrayLike + Estimated probabilities or decision function. If ``preds`` is not in + the range [0, 1], a softmax function is applied to transform it to + the range [0, 1]. + num_classes : int + Number of classes. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for binarizing the predicted probabilities. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MulticlassROCCurve + >>> target = [0, 1, 2, 0] + >>> preds = [[0.05, 0.95, 0], [0.1, 0.8, 0.1], [0.2, 0.2, 0.6], [0.9, 0.1, 0]] + >>> metric = MulticlassROCCurve(num_classes=3, thresholds=4) + >>> metric(target, preds) + ROCCurve(fpr=array([[0. , 0. , 0. , 1. ], + [0. , 0.33333333, 0.33333333, 1. ], + [0. , 0. , 0. , 1. ]]), tpr=array([[0. , 0.5, 0.5, 1. ], + [0. , 1. , 1. , 1. ], + [0. , 0. , 1. , 1. ]]), thresholds=array([1. , 0.66666667, 0.33333333, 0. ])) + >>> metric.reset_state() + >>> target = [[1, 1, 0, 0], [0, 0, 1, 1]] + >>> preds = [ + ... [[0.1, 0.2, 0.7], [0.5, 0.4, 0.1], [0.2, 0.3, 0.5], [0.8, 0.1, 0.1]], + ... [[0.1, 0.2, 0.7], [0.5, 0.4, 0.1], [0.2, 0.3, 0.5], [0.8, 0.1, 0.1]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + ROCCurve(fpr=array([[0. , 0.25, 0.5 , 1. ], + [0. , 0. , 0.25, 1. ], + [0. , 0.25, 0.5 , 1. ]]), tpr=array([[0. , 0.25, 0.5 , 1. ], + [0. , 0. , 0.25, 1. ], + [0. , 0. , 0. , 0. ]]), thresholds=array([1. , 0.66666667, 0.33333333, 0. ])) + + """ # noqa: W505 + + name: str = "ROC Curve" + + def compute( # type: ignore + self, + ) -> ROCCurveData: + """Compute the ROC curve from the state variables.""" + if self.thresholds is None: + state = ( + np.concatenate(self.target, axis=0), # type: ignore[attr-defined] + np.concatenate(self.preds, axis=0), # type: ignore[attr-defined] + ) + else: + state = self.confmat # type: ignore[attr-defined] + fpr_, tpr_, thresholds_ = _multiclass_roc_compute( + state, thresholds=self.thresholds, num_classes=self.num_classes + ) + + return ROCCurveData(fpr_, tpr_, thresholds_)
+ + + +
+[docs] +class MultilabelROCCurve( + MultilabelPrecisionRecallCurve, + registry_key="multilabel_roc_curve", +): + """Compute the ROC curve for multilabel classification tasks. + + Parameters + ---------- + num_labels : int + The number of labels in the dataset. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for binarizing the values of ``preds``. + If int, then the number of thresholds to use. + If list or array, then the thresholds to use. + If None, then the thresholds are automatically determined by the + unique values in ``preds``. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MultilabelROCCurve + >>> target = [[1, 1, 0], [0, 1, 0]] + >>> preds = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] + >>> metric = MultilabelROCCurve(num_labels=3, thresholds=4) + >>> metric(target, preds) + ROCCurve(fpr=array([[0. , 0. , 0. , 1. ], + [0. , 0. , 0. , 0. ], + [0. , 0.5, 0.5, 1. ]]), tpr=array([[0., 0., 0., 1.], + [0., 1., 1., 1.], + [0., 0., 0., 0.]]), thresholds=array([1. , 0.66666667, 0.33333333, 0. ])) + >>> metric.reset_state() + >>> target = [[[1, 1, 0], [0, 1, 0]], [[1, 1, 0], [0, 1, 0]]] + >>> preds = [[[0.1, 0.9, 0.8], [0.05, 0.95, 0]], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + ROCCurve(fpr=array([[0. , 0. , 0. , 1. ], + [0. , 0. , 0. , 0. ], + [0. , 0.5, 0.5, 1. ]]), tpr=array([[0., 0., 0., 1.], + [0., 1., 1., 1.], + [0., 0., 0., 0.]]), thresholds=array([1. , 0.66666667, 0.33333333, 0. ])) + + """ # noqa: W505 + + name: str = "ROC Curve" + + def compute( # type: ignore + self, + ) -> ROCCurveData: + """Compute the ROC curve from the state variables.""" + if self.thresholds is None: + state = ( + np.concatenate(self.target, axis=0), # type: ignore[attr-defined] + np.concatenate(self.preds, axis=0), # type: ignore[attr-defined] + ) + else: + state = self.confmat # type: ignore[attr-defined] + + fpr_, tpr_, thresholds_ = _multilabel_roc_compute( + state=state, + num_labels=self.num_labels, + thresholds=self.thresholds, + ) + return ROCCurveData(fpr_, tpr_, thresholds_)
+ + + +
+[docs] +class ROCCurve(Metric, registry_key="roc_curve", force_register=True): + """Compute the ROC curve for different types of classification tasks. + + Parameters + ---------- + task : Literal["binary", "multiclass", "multilabel"] + The type of task for the input data. One of 'binary', 'multiclass' + or 'multilabel'. + thresholds : int or list of floats or numpy.ndarray of floats, default=None + Thresholds used for computing the ROC curve. Can be one of: + + - None: use the unique values of ``preds`` as thresholds + - int: generate ``thresholds`` number of evenly spaced values between + 0 and 1 as thresholds. + - list of floats: use the values in the list as thresholds. The list + of values should be monotonically increasing. The list will be + converted into a numpy array. + - numpy.ndarray of floats: use the values in the array as thresholds. + The array should be 1d and monotonically increasing. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int, optional + The number of classes in the dataset. Required if ``task`` is + ``"multiclass"``. + num_labels : int, optional + The number of labels in the dataset. Required if ``task`` is + ``"multilabel"``. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics import ROCCurve + >>> target = [0, 0, 1, 1] + >>> preds = [0.1, 0.4, 0.35, 0.8] + >>> metric = ROCCurve(task="binary", thresholds=None) + >>> metric(target, preds) + ROCCurve(fpr=array([0. , 0. , 0.5, 0.5, 1. ]), tpr=array([0. , 0.5, 0.5, 1. , 1. ]), thresholds=array([1. , 0.8 , 0.4 , 0.35, 0.1 ])) + >>> metric.reset_state() + >>> target = [[1, 1, 0, 0], [0, 0, 1, 1]] + >>> preds = [[0.1, 0.2, 0.3, 0.4], [0.6, 0.5, 0.4, 0.3]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + ROCCurve(fpr=array([0. , 0.25, 0.5 , 0.75, 1. , 1. , 1. ]), tpr=array([0. , 0. , 0. , 0.25, 0.5 , 0.75, 1. ]), thresholds=array([1. , 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])) + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics import ROCCurve + >>> target = [1, 2, 0] + >>> preds = [[0.05, 0.95, 0], [0.1, 0.8, 0.1], [0.2, 0.2, 0.6]] + >>> metric = ROCCurve(task="multiclass", num_classes=3, thresholds=4) + >>> metric(target, preds) + ROCCurve(fpr=array([[0. , 0. , 0. , 1. ], + [0. , 0.5, 0.5, 1. ], + [0. , 0. , 0.5, 1. ]]), tpr=array([[0., 0., 0., 1.], + [0., 1., 1., 1.], + [0., 0., 0., 1.]]), thresholds=array([1. , 0.66666667, 0.33333333, 0. ])) + >>> metric.reset_state() + >>> target = [1, 2] + >>> preds = [[[0.05, 0.75, 0.2]], [[0.1, 0.8, 0.1]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + ROCCurve(fpr=array([[0., 0., 0., 1.], + [0., 1., 1., 1.], + [0., 0., 0., 1.]]), tpr=array([[0., 0., 0., 0.], + [0., 1., 1., 1.], + [0., 0., 0., 1.]]), thresholds=array([1. , 0.66666667, 0.33333333, 0. ])) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics import ROCCurve + >>> target = [[1, 1, 0], [0, 1, 0]] + >>> preds = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] + >>> metric = ROCCurve(task="multilabel", num_labels=3, thresholds=4) + >>> metric(target, preds) + ROCCurve(fpr=array([[0. , 0. , 0. , 1. ], + [0. , 0. , 0. , 0. ], + [0. , 0.5, 0.5, 1. ]]), tpr=array([[0., 0., 0., 1.], + [0., 1., 1., 1.], + [0., 0., 0., 0.]]), thresholds=array([1. , 0.66666667, 0.33333333, 0. ])) + >>> metric.reset_state() + >>> target = [[[1, 1, 0], [0, 1, 0]], [[1, 1, 0], [0, 1, 0]]] + >>> preds = [[[0.1, 0.9, 0.8], [0.05, 0.95, 0]], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + ROCCurve(fpr=array([[0. , 0. , 0. , 1. ], + [0. , 0. , 0. , 0. ], + [0. , 0.5, 0.5, 1. ]]), tpr=array([[0., 0., 0., 1.], + [0., 1., 1., 1.], + [0., 0., 0., 0.]]), thresholds=array([1. , 0.66666667, 0.33333333, 0. ])) + + """ # noqa: W505 + + name: str = "ROC Curve" + + def __new__( # type: ignore # mypy expects a subclass of ROCCurve + cls, + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[Union[int, List[float], npt.NDArray[np.float_]]] = None, + pos_label: int = 1, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + ) -> Metric: + """Create a task-specific instance of the ROC curve metric.""" + if task == "binary": + return BinaryROCCurve(thresholds=thresholds, pos_label=pos_label) + if task == "multiclass": + assert isinstance( + num_classes, + int, + ), "Number of classes must be a positive integer." + return MulticlassROCCurve(num_classes=num_classes, thresholds=thresholds) + if task == "multilabel": + assert isinstance( + num_labels, + int, + ), "Number of labels must be a positive integer." + return MultilabelROCCurve(num_labels=num_labels, thresholds=thresholds) + raise ValueError( + "Expected argument `task` to be either 'binary', 'multiclass' or " + f"'multilabel', but got {task}", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/sensitivity.html b/api/_modules/cyclops/evaluate/metrics/sensitivity.html new file mode 100644 index 000000000..4b28ba805 --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/sensitivity.html @@ -0,0 +1,722 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.sensitivity - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.sensitivity

+"""Classes for computing sensitivity metrics."""
+
+from typing import Literal, Optional
+
+from cyclops.evaluate.metrics.metric import Metric
+from cyclops.evaluate.metrics.precision_recall import (
+    BinaryRecall,
+    MulticlassRecall,
+    MultilabelRecall,
+)
+
+
+
+[docs] +class BinarySensitivity(BinaryRecall, registry_key="binary_sensitivity"): + """Computes sensitivity score for binary classification. + + Parameters + ---------- + pos_label : int, default=1 + Label of the positive class. + threshold : float, default=0.5 + Threshold for deciding the positive class. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinarySensitivity + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 0] + >>> metric = BinarySensitivity() + >>> metric(target, preds) + 0.5 + >>> metric.reset_state() + >>> target = [[0, 1, 0, 1], [0, 0, 1, 1]] + >>> preds = [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.5 + + """ + + name: str = "Sensitivity Score" + + def __init__( + self, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__( + pos_label=pos_label, + threshold=threshold, + zero_division=zero_division, + )
+ + + +
+[docs] +class MulticlassSensitivity(MulticlassRecall, registry_key="multiclass_sensitivity"): + """Compute the sensitivity score for multiclass classification tasks. + + Parameters + ---------- + num_classes : int + Number of classes in the dataset. + top_k : int, optional + If given, and predictions are probabilities/logits, the sensitivity will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the sensitivity score for each class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives and false negatives. + - ``macro``: Calculate metric for each class, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each class, and find their + average weighted by the support (the number of true instances + for each class). This alters "macro" to account for class + imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MulticlassSensitivity + >>> target = [0, 1, 2, 0] + >>> preds = [2, 0, 2, 1] + >>> metric = MulticlassSensitivity(num_classes=3) + >>> metric(target, preds) + array([0., 0., 1.]) + >>> metric.reset_state() + >>> target = [[0, 1, 2, 0], [2, 1, 2, 0]] + >>> preds = [ + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.66666667, 0. , 0. ]) + + """ + + name: str = "Sensitivity Score" + + def __init__( + self, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__( + num_classes=num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +class MultilabelSensitivity(MultilabelRecall, registry_key="multilabel_sensitivity"): + """Compute the sensitivity score for multilabel classification tasks. + + Parameters + ---------- + num_labels : int + Number of labels in the dataset. + threshold : float, default=0.5 + Threshold for deciding the positive class. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metric globally from the total count of true + positives and false negatives. + - ``macro``: Calculate metric for each label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metric for each label, and find their + average weighted by the support (the number of true instances + for each label). This alters "macro" to account for label + imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MultilabelSensitivity + >>> target = [[0, 1, 0, 1], [0, 0, 1, 1]] + >>> preds = [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]] + >>> metric = MultilabelSensitivity(num_labels=4) + >>> metric(target, preds) + array([0., 1., 1., 0.]) + >>> metric.reset_state() + >>> target = [[[0, 1, 0, 1], [0, 0, 1, 1]], [[0, 1, 0, 1], [0, 0, 1, 1]]] + >>> preds = [ + ... [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]], + ... [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0., 1., 1., 0.]) + + """ + + name: str = "Sensitivity Score" + + def __init__( + self, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__( + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + average=average, + zero_division=zero_division, + )
+ + + +
+[docs] +class Sensitivity(Metric, registry_key="sensitivity", force_register=True): + """Compute the sensitivity score for different types of classification tasks. + + This metric can be used for binary, multiclass, and multilabel classification + tasks. It creates the appropriate class based on the ``task`` parameter. + + Parameters + ---------- + task : Literal["binary", "multiclass", "multilabel"] + Type of classification task. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int, default=None + Number of classes for the task. Required if ``task`` is ``"multiclass"``. + threshold : float, default=0.5 + Threshold for deciding the positive class. Only used if ``task`` is + ``"binary"`` or ``"multilabel"``. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Only used if ``task`` is ``"multiclass"`` or ``"multilabel"``. + num_labels : int, default=None + Number of labels for the task. Required if ``task`` is ``"multilabel"``. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the sensitivity score for each label/class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally by counting the total true + positives and false negatives. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label/class, and find + their average weighted by support (the number of true instances + for each label/class). This alters ``macro`` to account for + label/class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics import Sensitivity + >>> target = [0, 1, 0, 1] + >>> preds = [0, 1, 1, 1] + >>> metric = Sensitivity(task="binary") + >>> metric.update_state(target, preds) + >>> metric.compute() + 1.0 + >>> metric.reset_state() + >>> target = [[0, 1, 0, 1], [0, 0, 1, 1]] + >>> preds = [[0.1, 0.9, 0.8, 0.2], [0.2, 0.3, 0.6, 0.1]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.5 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics import Sensitivity + >>> target = [0, 1, 2, 0] + >>> preds = [0, 2, 1, 0] + >>> metric = Sensitivity(task="multiclass", num_classes=3) + >>> metric(target, preds) + array([1., 0., 0.]) + >>> metric.reset_state() + >>> target = [[0, 1, 2, 0], [2, 1, 2, 0]] + >>> preds = [ + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... [[0.1, 0.6, 0.3], [0.05, 0.1, 0.85], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.66666667, 0. , 0. ]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics import Sensitivity + >>> target = [[0, 1], [1, 1]] + >>> preds = [[0.1, 0.9], [0.2, 0.8]] + >>> metric = Sensitivity(task="multilabel", num_labels=2) + >>> metric(target, preds) + array([0., 1.]) + >>> metric.reset_state() + >>> target = [[[0, 1], [1, 1]], [[1, 1], [1, 0]]] + >>> preds = [[[0.1, 0.7], [0.2, 0.8]], [[0.5, 0.9], [0.3, 0.4]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.33333333, 1. ]) + + """ + + name: str = "Sensitivity Score" + + def __new__( # type: ignore # mypy expects a subclass of Sensitivity + cls, + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> Metric: + """Create a task-specific metric for computing the sensitivity score.""" + if task == "binary": + return BinarySensitivity( + threshold=threshold, + pos_label=pos_label, + zero_division=zero_division, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + return MulticlassSensitivity( + num_classes=num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + return MultilabelSensitivity( + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + raise ValueError( + f"Task '{task}' not supported, expected 'binary', 'multiclass' or " + f"'multilabel'.", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/specificity.html b/api/_modules/cyclops/evaluate/metrics/specificity.html new file mode 100644 index 000000000..8a0978203 --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/specificity.html @@ -0,0 +1,794 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.specificity - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.specificity

+"""Classes for computing specificity metrics."""
+
+from typing import Literal, Optional, Type, Union, cast
+
+import numpy as np
+import numpy.typing as npt
+
+from cyclops.evaluate.metrics.functional.specificity import _specificity_reduce
+from cyclops.evaluate.metrics.metric import Metric
+from cyclops.evaluate.metrics.stat_scores import (
+    BinaryStatScores,
+    MulticlassStatScores,
+    MultilabelStatScores,
+)
+from cyclops.evaluate.metrics.utils import _check_average_arg
+
+
+
+[docs] +class BinarySpecificity(BinaryStatScores, registry_key="binary_specificity"): + """Compute specificity for binary classification tasks. + + Parameters + ---------- + target : ArrayLike + Ground truth (correct) target values. + preds : ArrayLike + Estimated targets (predictions) as returned by a classifier. + pos_label : int, default=1 + The label to use for the positive class. + threshold : float, default=0.5 + The threshold to use for converting the predictions to binary + values. Logits will be converted to probabilities using the sigmoid + function. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinarySpecificity + >>> target = [0, 1, 1, 0] + >>> preds = [0, 1, 0, 0] + >>> metric = BinarySpecificity() + >>> metric(target, preds) + 1.0 + >>> metric.reset_state() + >>> target = [[0, 1, 1, 0], [1, 1, 0, 0]] + >>> preds = [[0, 1, 0, 0], [1, 0, 0, 0]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 1.0 + + """ + + name: str = "Specificity Score" + + def __init__( + self, + pos_label: int = 1, + threshold: float = 0.5, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__(threshold=threshold, pos_label=pos_label) + self.zero_division = zero_division + + def compute(self) -> float: # type: ignore[override] + """Compute the specificity score from the state.""" + tp, fp, tn, fn = self._final_state() + score = _specificity_reduce( + tp=tp, + fp=fp, + tn=tn, + fn=fn, + average=None, + zero_division=self.zero_division, + ) + return cast(float, score)
+ + + +
+[docs] +class MulticlassSpecificity( + MulticlassStatScores, + registry_key="multiclass_specificity", +): + """Compute specificity for multiclass classification tasks. + + Parameters + ---------- + num_classes : int + The number of classes in the dataset. + top_k : int, optional + Number of highest probability or logit score predictions considered + to find the correct label. Only works when ``preds`` contain + probabilities/logits. + average : Literal["micro", "macro", "weighted", None], default=None + If None, return the specificity for each class, otherwise return the + average specificity. Average options are: + + - ``micro``: Calculate metrics globally. + - ``macro``: Calculate metrics for each class, and find their unweighted + mean. This does not take class imbalance into account. + - ``weighted``: Calculate metrics for each class, and find their + average, weighted by support (the number of true instances for each + label). + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MulticlassSpecificity + >>> target = [0, 1, 2, 0, 1, 2] + >>> preds = [ + ... [0.9, 0.05, 0.05], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.2, 0.75], + ... [0.35, 0.5, 0.15], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.05, 0.9], + ... ] + >>> metric = MulticlassSpecificity(num_classes=3) + >>> metric(target, preds) + array([1. , 0.75, 1. ]) + >>> metric.reset_state() + >>> target = [[0, 1, 2, 0, 1, 2], [1, 1, 2, 0, 0, 1]] + >>> preds = [[0, 2, 1, 2, 0, 1], [1, 0, 1, 2, 2, 0]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.625 , 0.57142857, 0.55555556]) + + """ + + name: str = "Specificity Score" + + def __init__( + self, + num_classes: int, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__(num_classes=num_classes, top_k=top_k, classwise=True) + _check_average_arg(average) + + self.average = average + self.zero_division = zero_division + + def compute(self) -> Union[float, npt.NDArray[np.float_]]: # type: ignore[override] + """Compute the specificity score from the state.""" + tp, fp, tn, fn = self._final_state() + return _specificity_reduce( + tp=tp, + fp=fp, + tn=tn, + fn=fn, + average=self.average, + zero_division=self.zero_division, + )
+ + + +
+[docs] +class MultilabelSpecificity( + MultilabelStatScores, + registry_key="multilabel_specificity", +): + """Compute specificity for multilabel classification tasks. + + Parameters + ---------- + num_labels : int + The number of labels in the dataset. + threshold : float, default=0.5 + The threshold value for converting probability or logit scores to + binary. A sigmoid function is first applied to logits to convert them + to probabilities. + top_k : int, optional + Number of highest probability or logit score predictions considered + to find the correct label. Only works when ``preds`` contains + probabilities/logits. + average : Literal["micro", "macro", "weighted", None], default=None + If None, return the specificity for each class, otherwise return the + average specificity. Average options are: + + - ``micro``: Calculate metrics globally. + - ``macro``: Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + - ``weighted``: Calculate metrics for each label, and find their average, + weighted by support (the number of true instances for each label). + zero_division : Literal["warn", 0, 1], default="warn" + Sets the value to return when there is a zero division. If set to ``warn``, + this acts as 0, but warnings are also raised. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MultilabelSpecificity + >>> target = [[0, 1, 1], [1, 0, 1], [1, 1, 0], [0, 0, 1], [1, 0, 0]] + >>> preds = [ + ... [0.9, 0.05, 0.05], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.2, 0.75], + ... [0.35, 0.5, 0.15], + ... [0.05, 0.9, 0.05], + ... ] + >>> metric = MultilabelSpecificity(num_labels=3) + >>> metric(target, preds) + array([0.5, 0. , 0.5]) + >>> metric.reset_state() + >>> target = [ + ... [[0, 1, 1], [1, 0, 1], [1, 1, 0], [0, 0, 1], [1, 0, 0]], + ... [[1, 0, 1], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 0]], + ... ] + >>> preds = [ + ... [[1, 0, 0], [0, 1, 0], [0, 1, 1], [0, 0, 1], [1, 0, 0]], + ... [[0, 1, 1], [1, 0, 1], [1, 1, 0], [0, 0, 1], [1, 0, 0]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.5 , 0.66666667, 0.6 ]) + + """ + + name: str = "Specificity Score" + + def __init__( + self, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> None: + """Initialize the metric.""" + super().__init__( + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + labelwise=True, + ) + _check_average_arg(average) + + self.average = average + self.zero_division = zero_division + + def compute(self) -> Union[float, npt.NDArray[np.float_]]: # type: ignore[override] + """Compute the specificity score from the state.""" + tp, fp, tn, fn = self._final_state() + return _specificity_reduce( + tp=tp, + fp=fp, + tn=tn, + fn=fn, + average=self.average, + zero_division=self.zero_division, + )
+ + + +
+[docs] +class Specificity(Metric, registry_key="specificity", force_register=True): + """Compute specificity score for different classification tasks. + + The specificity is the ratio of true negatives to the sum of true negatives and + false positives. It is also the recall of the negative class. + + Parameters + ---------- + task : Literal["binary", "multiclass", "multilabel"] + Type of classification task. + pos_label : int, default=1 + Label to consider as positive for binary classification tasks. + num_classes : int + Number of classes for the task. Required if ``task`` is ``"multiclass"``. + threshold : float, default=0.5 + Threshold for deciding the positive class. Only used if ``task`` is + ``"binary"`` or ``"multilabel"``. + top_k : int, optional + If given, and predictions are probabilities/logits, the precision will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Only used if ``task`` is ``"multiclass"`` or ``"multilabel"``. + num_labels : int + Number of labels for the task. Required if ``task`` is ``"multilabel"``. + average : Literal["micro", "macro", "weighted", None], default=None + If ``None``, return the score for each label/class. Otherwise, + use one of the following options to compute the average score: + + - ``micro``: Calculate metrics globally. + - ``macro``: Calculate metrics for each class/label, and find their + unweighted mean. This does not take label/class imbalance into + account. + - ``weighted``: Calculate metrics for each label/class, and find + their average weighted by support (the number of true instances + for each label/class). This alters ``macro`` to account for + label/class imbalance. + zero_division : Literal["warn", 0, 1], default="warn" + Value to return when there is a zero division. If set to "warn", this + acts as 0, but warnings are also raised. + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics import Specificity + >>> target = [0, 1, 1, 0, 1] + >>> preds = [0.9, 0.05, 0.05, 0.35, 0.05] + >>> metric = Specificity(task="binary") + >>> metric(target, preds) + 0.5 + >>> metric.reset_state() + >>> target = [[0, 1, 1], [1, 0, 1]] + >>> preds = [[0.9, 0.05, 0.05], [0.05, 0.9, 0.05]] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + 0.0 + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics import Specificity + >>> target = [0, 1, 2, 0, 1, 2] + >>> preds = [ + ... [0.9, 0.05, 0.05], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.2, 0.75], + ... [0.35, 0.5, 0.15], + ... [0.05, 0.9, 0.05], + ... [0.05, 0.05, 0.9], + ... ] + >>> metric = Specificity(task="multiclass", num_classes=3) + >>> metric(target, preds) + array([1. , 0.75, 1. ]) + >>> metric.reset_state() + >>> target = [[0, 1, 1], [1, 2, 1]] + >>> preds = [ + ... [[0.9, 0.05, 0.05], [0.05, 0.9, 0.05], [0.05, 0.2, 0.75]], + ... [[0.35, 0.5, 0.15], [0.25, 0.5, 0.25], [0.5, 0.05, 0.45]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.8, 0.5, 0.8]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics import Specificity + >>> target = [[0, 1, 1], [1, 0, 1], [1, 1, 0]] + >>> preds = [[0.9, 0.05, 0.05], [0.05, 0.2, 0.75], [0.35, 0.5, 0.15]] + >>> metric = Specificity(task="multilabel", num_labels=3) + >>> metric(target, preds) + array([0., 1., 1.]) + >>> metric.reset_state() + >>> target = [[[0, 1, 0], [1, 0, 1]], [[0, 1, 1], [1, 0, 0]]] + >>> preds = [ + ... [[0.1, 0.7, 0.2], [0.2, 0.8, 0.3]], + ... [[0.5, 0.9, 0.0], [0.3, 0.4, 0.2]], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(t, p) + >>> metric.compute() + array([0.5, 0.5, 1. ]) + + """ + + name: str = "Specificity Score" + + def __new__( # type: ignore # mypy expects a subclass of Specificity + cls: Type[Metric], + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + num_classes: Optional[int] = None, + threshold: float = 0.5, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + average: Literal["micro", "macro", "weighted", None] = None, + zero_division: Literal["warn", 0, 1] = "warn", + ) -> Metric: + """Create task-specific instance of the metric.""" + if task == "binary": + return BinarySpecificity( + threshold=threshold, + pos_label=pos_label, + zero_division=zero_division, + ) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be specified for multiclass classification." + return MulticlassSpecificity( + num_classes=num_classes, + top_k=top_k, + average=average, + zero_division=zero_division, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be specified for multilabel classification." + return MultilabelSpecificity( + num_labels=num_labels, + threshold=threshold, + average=average, + zero_division=zero_division, + ) + raise ValueError( + f"Task {task} is not supported, expected one of 'binary', 'multiclass'" + " or 'multilabel'", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/evaluate/metrics/stat_scores.html b/api/_modules/cyclops/evaluate/metrics/stat_scores.html new file mode 100644 index 000000000..aa5dddb5d --- /dev/null +++ b/api/_modules/cyclops/evaluate/metrics/stat_scores.html @@ -0,0 +1,905 @@ + + + + + + + + + + + + + + + + cyclops.evaluate.metrics.stat_scores - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.evaluate.metrics.stat_scores

+"""Classes for computing stat scores."""
+
+from typing import Literal, Optional, Tuple, Type, Union
+
+import numpy as np
+import numpy.typing as npt
+
+from cyclops.evaluate.metrics.functional.stat_scores import (
+    _binary_stat_scores_args_check,
+    _binary_stat_scores_format,
+    _binary_stat_scores_update,
+    _multiclass_stat_scores_format,
+    _multiclass_stat_scores_update,
+    _multilabel_stat_scores_format,
+    _multilabel_stat_scores_update,
+    _stat_scores_compute,
+)
+from cyclops.evaluate.metrics.metric import Metric
+
+
+class _AbstractScores(Metric):
+    """Abstract base class for classes that compute stat scores."""
+
+    name: str = "Statistical Scores"
+
+    def _create_state(self, size: int = 1) -> None:
+        """Create the state variables.
+
+        For the stat scores, the state variables are the true positives (tp),
+        false positives (fp), true negatives (tn), and false negatives (fn).
+
+        Parameters
+        ----------
+        size : int
+            The size of the default numpy.ndarray to create for the state
+            variables.
+
+        Returns
+        -------
+        None
+
+        Raises
+        ------
+        AssertionError
+            If ``size`` is not greater than 0.
+
+        """
+        assert size > 0, "``size`` must be greater than 0."
+
+        def default() -> npt.NDArray[np.int_]:
+            return np.zeros(shape=size, dtype=np.int_)
+
+        self.add_state("tp", default())
+        self.add_state("fp", default())
+        self.add_state("tn", default())
+        self.add_state("fn", default())
+
+    def _update_state(
+        self,
+        tp: Union[npt.NDArray[np.int_], np.int_],
+        fp: Union[npt.NDArray[np.int_], np.int_],
+        tn: Union[npt.NDArray[np.int_], np.int_],
+        fn: Union[npt.NDArray[np.int_], np.int_],
+    ) -> None:
+        """Update the state variables.
+
+        Parameters
+        ----------
+        tp : numpy.ndarray
+            The true positives.
+        fp : numpy.ndarray
+            The false positives.
+        tn : numpy.ndarray
+            The true negatives.
+        fn : numpy.ndarray
+            The false negatives.
+
+        Returns
+        -------
+        None
+
+        """
+        self.tp += tp  # type: ignore[attr-defined]
+        self.fp += fp  # type: ignore[attr-defined]
+        self.tn += tn  # type: ignore[attr-defined]
+        self.fn += fn  # type: ignore[attr-defined]
+
+    def _final_state(
+        self,
+    ) -> Tuple[
+        Union[npt.NDArray[np.int_], np.int_],
+        Union[npt.NDArray[np.int_], np.int_],
+        Union[npt.NDArray[np.int_], np.int_],
+        Union[npt.NDArray[np.int_], np.int_],
+    ]:
+        """Return the final state variables.
+
+        Returns
+        -------
+        Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]
+            The final state variables. The order is true positives (tp),
+            false positives (fp), true negatives (tn), and false negatives
+
+        """
+        tp = self.tp  # type: ignore[attr-defined]
+        fp = self.fp  # type: ignore[attr-defined]
+        tn = self.tn  # type: ignore[attr-defined]
+        fn = self.fn  # type: ignore[attr-defined]
+        return tp, fp, tn, fn
+
+
+
+[docs] +class BinaryStatScores(_AbstractScores, registry_key="binary_stat_scores"): + """Compute binary stat scores. + + Parameters + ---------- + pos_label : int, default=1 + The label to use for the positive class. + threshold : float, default=0.5 + The threshold to use for converting the predictions to binary + values. Logits will be converted to probabilities using the sigmoid + function. + + Examples + -------- + >>> from cyclops.evaluate.metrics import BinaryStatScores + >>> target = [0, 1, 1, 0] + >>> preds = [0, 1, 0, 0] + >>> metric = BinaryStatScores(threshold=0.5, pos_label=1) + >>> metric(target=target, preds=preds) + array([1, 0, 2, 1, 2]) + >>> metric.reset_state() + >>> target = [[1, 1, 0, 1, 0, 0], [0, 0, 1, 1, 0, 0]] + >>> preds = [[0.9, 0.8, 0.3, 0.4, 0.5, 0.2], [0.2, 0.3, 0.6, 0.9, 0.4, 0.8]] + >>> for t, p in zip(target, preds): + ... metric.update_state(target=t, preds=p) + >>> metric.compute() + array([4, 2, 5, 1, 5]) + + """ + + def __init__(self, pos_label: int = 1, threshold: float = 0.5) -> None: + """Initialize the class.""" + super(_AbstractScores, self).__init__() + + _binary_stat_scores_args_check(threshold=threshold, pos_label=pos_label) + + self.threshold = threshold + self.pos_label = pos_label + + self._create_state(size=1) + + def update_state(self, target: npt.ArrayLike, preds: npt.ArrayLike) -> None: + """Update the state variables.""" + target, preds = _binary_stat_scores_format( + target, + preds, + threshold=self.threshold, + pos_label=self.pos_label, + ) + + tp, fp, tn, fn = _binary_stat_scores_update( + target, + preds, + pos_label=self.pos_label, + ) + self._update_state(tp, fp, tn, fn) + + def compute(self) -> npt.NDArray[np.int_]: + """Compute the binary stat scores from the state variables. + + Returns + ------- + numpy.ndarray + The binary stat scores. The order is true positives (tp), + false positives (fp), true negatives (tn), false negatives + (fn) and support (tp + fn). + + """ + tp, fp, tn, fn = self._final_state() + return _stat_scores_compute(tp=tp, fp=fp, tn=tn, fn=fn, classwise=True)
+ + + +
+[docs] +class MulticlassStatScores(_AbstractScores, registry_key="multiclass_stat_scores"): + """Compute multiclass stat scores. + + Parameters + ---------- + num_classes : int + The total number of classes for the problem. + top_k : Optional[int], default=None + If given, and predictions are probabilities/logits, the score will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + classwise : bool, default=True + Whether to return the stat scores for each class or sum over all + classes. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MulticlassStatScores + >>> target = [2, 1, 0, 0] + >>> preds = [2, 1, 0, 1] + >>> metric = MulticlassStatScores(num_classes=3) + >>> metric(target=target, preds=preds) + array([[1, 0, 2, 1, 2], + [1, 1, 2, 0, 1], + [1, 0, 3, 0, 1]]) + >>> preds = [ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(target=t, preds=p) + >>> metric.compute() + array([[ 8, 1, 5, 2, 10], + [ 4, 2, 9, 1, 5], + [ 1, 0, 15, 0, 1]]) + + """ + + def __init__( + self, + num_classes: int, + top_k: Optional[int] = None, + classwise: bool = True, + ) -> None: + """Initialize the class.""" + super(_AbstractScores, self).__init__() + + assert num_classes > 1, "``num_classes`` must be greater than 1" + + self.num_classes = num_classes + self.top_k = top_k + self.classwise = classwise + + self._create_state(size=num_classes) + + def update_state(self, target: npt.ArrayLike, preds: npt.ArrayLike) -> None: + """Update the state variables.""" + target, preds = _multiclass_stat_scores_format( + target, + preds, + num_classes=self.num_classes, + top_k=self.top_k, + ) + tp, fp, tn, fn = _multiclass_stat_scores_update( + target, + preds, + num_classes=self.num_classes, + ) + self._update_state(tp, fp, tn, fn) + + def compute(self) -> npt.NDArray[np.int_]: + """Compute the multiclass stat scores from the state variables. + + Returns + ------- + numpy.ndarray + The multiclass stat scores. The order is true positives (tp), + false positives (fp), true negatives (tn), false negatives + (fn) and support (tp + fn). If ``classwise`` is ``True``, the + shape is ``(num_classes, 5)``. Otherwise, the shape is ``(5,)``. + + """ + tp, fp, tn, fn = self._final_state() + return _stat_scores_compute( + tp=tp, + fp=fp, + tn=tn, + fn=fn, + classwise=self.classwise, + )
+ + + +
+[docs] +class MultilabelStatScores(_AbstractScores, registry_key="multilabel_stat_scores"): + """Compute stat scores for multilabel problems. + + Parameters + ---------- + threshold : float, default=0.5 + Threshold value for binarizing predictions that are probabilities or + logits. A sigmoid function is applied if the predictions are logits. + top_k : int, default=None + If given, and predictions are probabilities/logits, the score will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. + labelwise : bool, default=True + Whether to return the stat scores for each label or sum over all labels. + + Examples + -------- + >>> from cyclops.evaluate.metrics import MultilabelStatScores + >>> target = [[0, 1, 1], [1, 0, 1]] + >>> preds = [[0.1, 0.9, 0.8], [0.8, 0.2, 0.7]] + >>> metric = MultilabelStatScores(num_labels=3, labelwise=True) + >>> metric(target=target, preds=preds) + array([[1, 0, 1, 0, 1], + [1, 0, 1, 0, 1], + [2, 0, 0, 0, 2]]) + >>> metric.reset_state() + >>> target = [[[0, 1, 1], [1, 0, 1]], [[0, 0, 1], [1, 1, 1]]] + >>> preds = [[[0.1, 0.9, 0.8], [0.8, 0.2, 0.7]], [[0.1, 0.9, 0.8], [0.8, 0.2, 0.7]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(target=t, preds=p) + >>> metric.compute() + array([[2, 0, 2, 0, 2], + [1, 1, 1, 1, 2], + [4, 0, 0, 0, 4]]) + + """ + + def __init__( + self, + num_labels: int, + threshold: float = 0.5, + top_k: Optional[int] = None, + labelwise: bool = True, + ) -> None: + """Initialize the class.""" + super().__init__() + + _binary_stat_scores_args_check(threshold=threshold, pos_label=1) + + self.num_labels = num_labels + self.threshold = threshold + self.top_k = top_k + self.labelwise = labelwise + + self._create_state(size=num_labels) + + def update_state(self, target: npt.ArrayLike, preds: npt.ArrayLike) -> None: + """Update the state variables.""" + target, preds = _multilabel_stat_scores_format( + target, + preds, + num_labels=self.num_labels, + threshold=self.threshold, + top_k=self.top_k, + ) + tp, fp, tn, fn = _multilabel_stat_scores_update( + target, + preds, + num_labels=self.num_labels, + ) + self._update_state(tp, fp, tn, fn) + + def compute(self) -> npt.NDArray[np.int_]: + """Compute the multilabel stat scores from the state variables. + + Returns + ------- + numpy.ndarray + The multilabel stat scores. The order is true positives (tp), + false positives (fp), true negatives (tn), false negatives + (fn) and support (tp + fn). If ``labelwise`` is ``True``, the + shape is ``(num_labels, 5)``. Otherwise, the shape is ``(5,)``. + + """ + tp, fp, tn, fn = self._final_state() + return _stat_scores_compute( + tp=tp, + fp=fp, + tn=tn, + fn=fn, + classwise=self.labelwise, + )
+ + + +
+[docs] +class StatScores(Metric, registry_key="stat_scores", force_register=True): + """Compute stat scores for binary, multiclass and multilabel problems. + + Parameters + ---------- + task : Literal["binary", "multiclass", "multilabel"] + The task type. Can be either ``binary``, ``multiclass`` or ``multilabel``. + pos_label : int, default=1 + The positive label to report. Only used for binary tasks. + threshold : float, default=0.5 + The threshold to use for binarizing the predictions if logits or + probabilities are provided. If logits are provided, a sigmoid function + is applied prior to binarization. Used for binary and multilabel tasks. + num_classes : int + The number of classes for the problem. Required for multiclass tasks. + classwise : bool, default=True + Whether to return the stat scores for each class or sum over all + classes. Only used for multiclass tasks. + top_k : int, default=None + If given, and predictions are probabilities/logits, the score will + be computed only for the top k classes. Otherwise, ``top_k`` will be + set to 1. Used for multiclass and multilabel tasks. + num_labels : int + The number of labels. Only used for multilabel tasks. + labelwise : bool, default=False + Whether to compute the stat scores labelwise. Only used for multilabel + tasks. + + + Examples + -------- + >>> # (binary) + >>> from cyclops.evaluate.metrics import StatScores + >>> target = [0, 1, 1, 0] + >>> preds = [0, 1, 0, 0] + >>> metric = StatScores(task="binary", threshold=0.5, pos_label=1) + >>> metric.update_state(target=target, preds=preds) + >>> metric.compute() + array([1, 0, 2, 1, 2]) + >>> metric.reset_state() + >>> target = [[1, 1, 0, 1, 0, 0], [0, 0, 1, 1, 0, 0]] + >>> preds = [[0.9, 0.8, 0.3, 0.4, 0.5, 0.2], [0.2, 0.3, 0.6, 0.9, 0.4, 0.8]] + >>> for t, p in zip(target, preds): + ... metric.update_state(target=t, preds=p) + >>> metric.compute() + array([4, 2, 5, 1, 5]) + + >>> # (multiclass) + >>> from cyclops.evaluate.metrics import StatScores + >>> target = [0, 1, 2, 2, 2] + >>> preds = [0, 2, 1, 2, 0] + >>> metric = StatScores(task="multiclass", num_classes=3, classwise=True) + >>> metric.update_state(target=target, preds=preds) + >>> metric.compute() + array([[1, 1, 3, 0, 1], + [0, 1, 3, 1, 1], + [1, 1, 1, 2, 3]]) + >>> metric.reset_state() + >>> target = [[2, 0, 2, 2, 1], [1, 1, 0, 2, 2]] + >>> preds = [ + ... [ + ... [0.1, 0.2, 0.7], + ... [0.7, 0.1, 0.2], + ... [0.2, 0.7, 0.1], + ... [0.2, 0.7, 0.1], + ... [0.7, 0.2, 0.1], + ... ], + ... [ + ... [0.05, 0.15, 0.8], + ... [0.15, 0.05, 0.8], + ... [0.8, 0.15, 0.05], + ... [0.25, 0.7, 0.05], + ... [0.15, 0.7, 0.15], + ... ], + ... ] + >>> for t, p in zip(target, preds): + ... metric.update_state(target=t, preds=p) + >>> metric.compute() + array([[2, 1, 7, 0, 2], + [0, 4, 3, 3, 3], + [1, 2, 3, 4, 5]]) + + >>> # (multilabel) + >>> from cyclops.evaluate.metrics import StatScores + >>> target = [[0, 1, 1], [1, 0, 1]] + >>> preds = [[0.1, 0.9, 0.8], [0.8, 0.2, 0.7]] + >>> metric = StatScores(task="multilabel", num_labels=3, labelwise=True) + >>> metric(target=target, preds=preds) + array([[1, 0, 1, 0, 1], + [1, 0, 1, 0, 1], + [2, 0, 0, 0, 2]]) + >>> metric.reset_state() + >>> target = [[[0, 1, 1], [1, 0, 1]], [[0, 0, 1], [1, 1, 1]]] + >>> preds = [[[0.1, 0.9, 0.8], [0.8, 0.2, 0.7]], [[0.1, 0.9, 0.8], [0.8, 0.2, 0.7]]] + >>> for t, p in zip(target, preds): + ... metric.update_state(target=t, preds=p) + >>> metric.compute() + array([[2, 0, 2, 0, 2], + [1, 1, 1, 1, 2], + [4, 0, 0, 0, 4]]) + + """ + + def __new__( # type: ignore # mypy expects a subclass of StatScores + cls: Type[Metric], + task: Literal["binary", "multiclass", "multilabel"], + pos_label: int = 1, + threshold: float = 0.5, + num_classes: Optional[int] = None, + classwise: bool = True, + top_k: Optional[int] = None, + num_labels: Optional[int] = None, + labelwise: bool = False, + ) -> Metric: + """Create a task-specific instance of the StatScores metric.""" + if task == "binary": + return BinaryStatScores(threshold=threshold, pos_label=pos_label) + if task == "multiclass": + assert ( + isinstance(num_classes, int) and num_classes > 0 + ), "Number of classes must be a positive integer." + return MulticlassStatScores( + num_classes=num_classes, + top_k=top_k, + classwise=classwise, + ) + if task == "multilabel": + assert ( + isinstance(num_labels, int) and num_labels > 0 + ), "Number of labels must be a positive integer." + return MultilabelStatScores( + num_labels=num_labels, + threshold=threshold, + top_k=top_k, + labelwise=labelwise, + ) + raise ValueError( + f"Unsupported task: {task}, expected one of 'binary', 'multiclass' or " + f"'multilabel'.", + )
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/monitor/clinical_applicator.html b/api/_modules/cyclops/monitor/clinical_applicator.html new file mode 100644 index 000000000..c8039518b --- /dev/null +++ b/api/_modules/cyclops/monitor/clinical_applicator.html @@ -0,0 +1,961 @@ + + + + + + + + + + + + + + + + cyclops.monitor.clinical_applicator - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.monitor.clinical_applicator

+"""Clinical Shift Applicator module."""
+
+from typing import Callable, Dict, List, Optional, Tuple, Union
+
+from datasets.arrow_dataset import Dataset
+
+from cyclops.data.slicer import SliceSpec
+from cyclops.data.utils import set_decode
+
+
+
+[docs] +class ClinicalShiftApplicator: + """The ClinicalShiftApplicator class is used induce synthetic clinical shifts. + + Takes a dataset and generates a source and + target dataset with a specified clinical shift. + The shift is induced by splitting along categorical features in the dataset. + The source and target datasets are then generated by splitting + the original dataset along the categorical feature. + + # Examples + # -------- + # >>> from cyclops.monitor.clinical_applicator import ClinicalShiftApplicator + # >>> from cyclops.data.loader import load_nihcxr + # >>> ds = load_nihcxr(path="/mnt/data/nihcxr") + # >>> applicator = ClinicalShiftApplicator("hospital_type", + # source = ["hospital_type_1", "hospital_type_2"] + # target = ["hospital_type_3", "hospital_type_4", "hospital_type_5"] + # ) + # >>> ds_source, ds_target = applicator.apply_shift(ds) + + + Parameters + ---------- + shift_type: str + method used to induce shift in data. Options include: + "time", "month", "hospital_type", "custom" + source: list + List of values for source data. + target: list + List of values for target data. + shift_id: str + Column name for shift id. Default is None. + + """ + + def __init__( + self, + shift_type: str, + source: Union[str, SliceSpec], + target: Union[str, SliceSpec], + shift_id: Optional[str] = None, + ) -> None: + self.shift_type = shift_type + self.shift_id = shift_id + self.source = source + self.target = target + + self.shift_types: Dict[str, Callable[..., Dataset]] = { + "age": self.age, + "sex": self.sex, + "hospital_type": self.hospital_type, + "time": self.time, + "month": self.month, + "custom": self.custom, + } + + if self.shift_type not in self.shift_types: + raise ValueError(f"Shift type {self.shift_type} not supported. ") + +
+[docs] + def apply_shift( + self, + dataset: Dataset, + batched: bool = True, + batch_size: int = 1000, + num_proc: int = 1, + ) -> Tuple[Dataset, Dataset]: + """Apply shift to dataset using specified shift type. + + Returns + ------- + ds_source: huggingface Dataset + Dataset with source data. + ds_target: huggingface Dataset + Dataset with target data. + + """ + ds_source, ds_target = self.shift_types[self.shift_type]( + dataset, + self.source, + self.target, + self.shift_id, + batched, + batch_size, + num_proc, + ) + return ds_source, ds_target
+ + + def _get_source_target( + self, + dataset: Dataset, + source_slice: SliceSpec, + target_slice: SliceSpec, + batched: bool = True, + batch_size: int = 1000, + num_proc: int = 1, + ) -> Tuple[Dataset, Dataset]: + """Get source and target datasets. + + Parameters + ---------- + dataset: huggingface Dataset + Dataset to apply shift to. + source: SliceSpec + SliceSpec for source data. + target: SliceSpec + SliceSpec for target data. + + Returns + ------- + ds_source: huggingface Dataset + Dataset with source data. + ds_target: huggingface Dataset + Dataset with target data. + + """ + set_decode(dataset, False) + if source_slice: + for _, shift_func in source_slice.get_slices().items(): + ds_source = dataset.filter( + shift_func, + batched=batched, + batch_size=batch_size, + num_proc=num_proc, + ) + else: + ds_source = dataset + if target_slice: + for _, shift_func in target_slice.get_slices().items(): + ds_target = dataset.filter( + shift_func, + batched=batched, + batch_size=batch_size, + num_proc=num_proc, + ) + else: + ds_target = dataset + set_decode(dataset, True) + set_decode(ds_source, True) + set_decode(ds_target, True) + return ds_source, ds_target + +
+[docs] + def age( + self, + dataset: Dataset, + source: List[str], + target: List[str], + shift_id: str, + batched: bool = True, + batch_size: int = 1000, + num_proc: int = 1, + ) -> Tuple[Dataset, Dataset]: + """Apply age shift to dataset. + + Parameters + ---------- + dataset: huggingface Dataset + Dataset to apply shift to. + shift_id: str + Column name for shift id. + source: list + List of values for source data. + target: list + List of values for target data. + shift_id: str + Column name for shift id. + batched: bool + Whether to use batching or not. Default is True. + batch_size: int + Batch size. Default is 1000. + num_proc: int + Number of processes to use. Default is 1. + + Returns + ------- + ds_source: huggingface Dataset + Dataset with source data. + ds_target: huggingface Dataset + Dataset with target data. + + """ + if source: + source_slice = SliceSpec( + spec_list=[ + { + shift_id: { + "min_value": source[0], + "max_value": source[1], + "min_inclusive": True, + "max_inclusive": True, + }, + }, + ], + include_overall=False, + ) + else: + source_slice = None + if target: + target_slice = SliceSpec( + spec_list=[ + { + shift_id: { + "min_value": target[0], + "max_value": target[1], + "min_inclusive": True, + "max_inclusive": True, + }, + }, + ], + include_overall=False, + ) + else: + target_slice = None + ds_source, ds_target = self._get_source_target( + dataset, + source_slice, + target_slice, + batched, + batch_size, + num_proc, + ) + return ds_source, ds_target
+ + +
+[docs] + def sex( + self, + dataset: Dataset, + source: List[str], + target: List[str], + shift_id: str, + batched: bool = True, + batch_size: int = 1000, + num_proc: int = 1, + ) -> Tuple[Dataset, Dataset]: + """Apply shift for sex to dataset. + + Parameters + ---------- + dataset: huggingface Dataset + Dataset to apply shift to. + shift_id: str + Column name for shift id. + source: list + List of values for source data. + target: list + List of values for target data. + shift_id: str + Column name for shift id. + batched: bool + Whether to use batching or not. Default is True. + batch_size: int + Batch size. Default is 1000. + num_proc: int + Number of processes to use. Default is 1. + + Returns + ------- + ds_source: huggingface Dataset + Dataset with source data. + ds_target: huggingface Dataset + Dataset with target data. + + """ + if source: + source_slice = SliceSpec( + spec_list=[{shift_id: {"value": source}}], + include_overall=False, + ) + else: + source_slice = None + if target: + target_slice = SliceSpec( + spec_list=[{shift_id: {"value": target}}], + include_overall=False, + ) + else: + target_slice = None + ds_source, ds_target = self._get_source_target( + dataset, + source_slice, + target_slice, + batched, + batch_size, + num_proc, + ) + return ds_source, ds_target
+ + +
+[docs] + def time( + self, + dataset: Dataset, + source: List[str], + target: List[str], + shift_id: str, + batched: bool = True, + batch_size: int = 1000, + num_proc: int = 1, + ) -> Tuple[Dataset, Dataset]: + """Apply time shift to dataset. + + Parameters + ---------- + dataset: huggingface Dataset + Dataset to apply shift to. + shift_id: str + Column name for shift id. + source: list + List of values for source data. + target: list + List of values for target data. + shift_id: str + Column name for shift id. + batched: bool + Whether to use batching or not. Default is True. + batch_size: int + Batch size. Default is 1000. + num_proc: int + Number of processes to use. Default is 1. + + Returns + ------- + ds_source: huggingface Dataset + Dataset with source data. + ds_target: huggingface Dataset + Dataset with target data. + + """ + if source: + source_slice = SliceSpec( + spec_list=[ + { + shift_id: { + "min_value": source[0], + "max_value": source[1], + "min_inclusive": True, + "max_inclusive": True, + }, + }, + ], + include_overall=False, + ) + else: + source_slice = None + if target: + target_slice = SliceSpec( + spec_list=[ + { + shift_id: { + "min_value": target[0], + "max_value": target[1], + "min_inclusive": True, + "max_inclusive": True, + }, + }, + ], + include_overall=False, + ) + else: + target_slice = None + ds_source, ds_target = self._get_source_target( + dataset, + source_slice, + target_slice, + batched, + batch_size, + num_proc, + ) + return ds_source, ds_target
+ + +
+[docs] + def month( + self, + dataset: Dataset, + source: List[str], + target: List[str], + shift_id: str, + batched: bool = True, + batch_size: int = 1000, + num_proc: int = 1, + ) -> Tuple[Dataset, Dataset]: + """Apply shift for selection of months. + + Parameters + ---------- + dataset: huggingface Dataset + Dataset to apply shift to. + shift_id: str + Column name for shift id. + source: list + List of values for source data. + target: list + List of values for target data. + shift_id: str + Column name for shift id. + batched: bool + Whether to use batching or not. Default is True. + batch_size: int + Batch size. Default is 1000. + num_proc: int + Number of processes to use. Default is 1. + + Returns + ------- + ds_source: huggingface Dataset + Dataset with source data. + ds_target: huggingface Dataset + Dataset with target data. + + """ + if source: + source_slice = SliceSpec( + spec_list=[{shift_id: {"month": source}}], + include_overall=False, + ) + else: + source_slice = None + if target: + target_slice = SliceSpec( + spec_list=[{shift_id: {"month": target}}], + include_overall=False, + ) + else: + target_slice = None + ds_source, ds_target = self._get_source_target( + dataset, + source_slice, + target_slice, + batched, + batch_size, + num_proc, + ) + return ds_source, ds_target
+ + +
+[docs] + def hospital_type( + self, + dataset: Dataset, + source: List[str], + target: List[str], + shift_id: str, + batched: bool = True, + batch_size: int = 1000, + num_proc: int = 1, + ) -> Tuple[Dataset, Dataset]: + """Apply shift for selection of hospital types. + + Parameters + ---------- + dataset: huggingface Dataset + Dataset to apply shift to. + shift_id: str + Column name for shift id. + source: list + List of values for source data. + target: list + List of values for target data. + shift_id: str + Column name for shift id. + batched: bool + Whether to use batching or not. Default is True. + batch_size: int + Batch size. Default is 1000. + num_proc: int + Number of processes to use. Default is 1. + + Returns + ------- + ds_source: huggingface Dataset + Dataset with source data. + ds_target: huggingface Dataset + Dataset with target data. + + """ + if source: + source_slice = SliceSpec( + spec_list=[{shift_id: {"value": source}}], + include_overall=False, + ) + else: + source_slice = None + if target: + target_slice = SliceSpec( + spec_list=[{shift_id: {"value": target}}], + include_overall=False, + ) + else: + target_slice = None + ds_source, ds_target = self._get_source_target( + dataset, + source_slice, + target_slice, + batched, + batch_size, + num_proc, + ) + return ds_source, ds_target
+ + +
+[docs] + def custom( + self, + dataset: Dataset, + source: SliceSpec, + target: SliceSpec, + shift_id: Optional[str] = None, + batched: bool = True, + batch_size: int = 1000, + num_proc: int = 1, + ) -> Tuple[Dataset, Dataset]: + """Build custom shift. + + Build a custom shift by passing in a SliceSpec for source and target data. + + Parameters + ---------- + dataset: huggingface Dataset + Dataset to apply shift to. + source: SliceSpec + SliceSpec for source data. + target: SliceSpec + SliceSpec for target data. + shift_id: str + Column name for shift id. + batched: bool + Whether to use batching or not. Default is True. + batch_size: int + Batch size. Default is 1000. + num_proc: int + Number of processes to use. Default is 1. + + Returns + ------- + ds_source: huggingface Dataset + Dataset with source data. + ds_target: huggingface Dataset + Dataset with target data. + + """ + if shift_id: + raise ValueError( + "Shift id not required for custom shift. \ + Please remove shift_id from method call.", + ) + ds_source, ds_target = self._get_source_target( + dataset, + source, + target, + batched, + batch_size, + num_proc, + ) + return ds_source, ds_target
+
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/monitor/synthetic_applicator.html b/api/_modules/cyclops/monitor/synthetic_applicator.html new file mode 100644 index 000000000..b68a6f8b2 --- /dev/null +++ b/api/_modules/cyclops/monitor/synthetic_applicator.html @@ -0,0 +1,715 @@ + + + + + + + + + + + + + + + + cyclops.monitor.synthetic_applicator - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.monitor.synthetic_applicator

+"""SyntheticShiftApplicator class."""
+
+import math
+from typing import Any, Callable, Dict
+
+import numpy as np
+import pandas as pd
+from datasets.arrow_dataset import Dataset
+from sklearn.feature_selection import SelectKBest
+
+from cyclops.monitor.utils import get_args
+
+
+
+[docs] +class SyntheticShiftApplicator: + """The SyntheticShiftApplicator class is used induce synthetic dataset shift. + + # Examples + # -------- + # >>> from sklearn.datasets import load_diabetes + # >>> X, y = load_diabetes(return_X_y=True) + # >>> dataset = Dataset.from_dict({"X": X, "y": y}) + # >>> dataset = dataset.train_test_split(test_size=0.5, seed=42) + # >>> applicator = SyntheticShiftApplicator(shift_type="gn_shift") + # >>> X_shift = applicator.apply_shift(dataset["test"]) + + Parameters + ---------- + shift_type: str + method used to induce shift in data. Options include: + "gn_shift", "ko_shift", "cp_shift", "mfa_shift", "bn_shift", "tolerance_shift" + + """ + + def __init__(self, shift_type: str, **kwargs: Dict[str, Any]) -> None: + self.shift_type = shift_type + + self.shift_types: Dict[str, Callable[..., Dataset]] = { + "gn_shift": gaussian_noise_shift, + "bn_shift": binary_noise_shift, + "ko_shift": knockout_shift, + "fs_shift": feature_swap_shift, + "fa_shift": feature_association_shift, + } + self.shift_args = get_args(self.shift_types[self.shift_type], kwargs) + + if self.shift_type not in self.shift_types: + raise ValueError(f"shift_type must be one of {self.shift_types.keys()}") + +
+[docs] + def apply_shift(self, dataset: Dataset) -> Dataset: + """apply_shift. + + Returns + ------- + ds_shift: Dataset + Data to have noise added + + """ + return self.shift_types[self.shift_type](dataset, **self.shift_args)
+
+ + + +
+[docs] +def gaussian_noise_shift( + X: np.ndarray[float, np.dtype[np.float64]], + noise_amt: float = 0.5, + normalization: float = 1, + delta: float = 0.5, + clip: bool = False, +) -> np.ndarray[float, np.dtype[np.float64]]: + """Create gaussian noise of specified parameters in input data. + + Parameters + ---------- + X: numpy.matrix + covariate data + noise_amt: int + standard deviation of gaussian noise + normalization: int + normalization parameter to divide noise by (e.g. 255 for images) + delta: float + fraction of data affected + + Returns + ------- + X: numpy.matrix + covariate data with gaussian noise + indices: list + indices of data affected + + """ + # add if temporal then flatten then unflatten at end + X_df = pd.DataFrame(X) + + bin_cols = X_df.loc[:, (X_df.isin([0, 1])).all()].columns.values + c_cols = [x for x in X_df.columns if x not in bin_cols] + indices = np.random.choice(X.shape[0], math.ceil(X.shape[0] * delta), replace=False) + X_mod = X[np.ix_(indices, c_cols)] + + if len(c_cols) == 1: + noise = np.random.normal(0, noise_amt / normalization, X_mod.shape[0]).reshape( + X_mod.shape[0], + 1, + ) + else: + noise = np.random.normal( + 0, + noise_amt / normalization, + (X_mod.shape[0], len(c_cols)), + ) + X_mod = np.clip(X_mod + noise, 0.0, 1.0) if clip else X_mod + noise + + X[np.ix_(indices, c_cols)] = X_mod + + return X
+ + + +# Remove instances of a single class. +
+[docs] +def knockout_shift( + X: np.ndarray[float, np.dtype[np.float64]], + y: np.ndarray[float, np.dtype[np.float64]], + delta: float = 0.5, + shift_class: int = 1, +) -> np.ndarray[float, np.dtype[np.float64]]: + """Create class imbalance by removing a fraction of samples from a class. + + Parameters + ---------- + X: numpy.matrix + covariate data + y: list + label data + delta: float + fraction of samples removed + shift_class: int + class to remove samples from + + Returns + ------- + X: numpy.matrix + covariate data with class imbalance + y: numpy.array + placeholder for labels + + """ + del_indices = np.where(y == shift_class)[0] + until_index = math.ceil(delta * len(del_indices)) + if until_index % 2 != 0: + until_index = until_index + 1 + del_indices = del_indices[:until_index] + X = np.delete(X, del_indices, axis=0) + y = np.delete(y, del_indices, axis=0) + return X
+ + + +
+[docs] +def feature_swap_shift( + X: np.ndarray[float, np.dtype[np.float64]], + y: np.ndarray[float, np.dtype[np.float64]], + shift_class: int = 1, + n_shuffle: float = 0.25, + rank: bool = False, +) -> np.ndarray[float, np.dtype[np.float64]]: + """Feature swap shift swaps features on a changepoint axis. + + Parameters + ---------- + X_ref: numpy.matrix + source data + y_ref: list + source label + X: numpy.matrix + target data + y: list + target label + cl: int + class (e.g. 0,1,2,3, etc.) + n_shuffle: float + number of features to shuffle + rank: Bool + should features should be ranked or not? + + Returns + ------- + X: numpy.matrix + covariate data with feature swap + y: numpy.array + labels for covariate data + + """ + n_feats = X.shape[1] + n_shuffle_feats = int(n_shuffle * n_feats) + + # Get importance values - should sub for model-specific + selector = SelectKBest(k=n_feats) + selection = selector.fit(X, y) + ranked_x = sorted( + zip(selection.scores_, selection.get_support(indices=True)), + reverse=True, + ) + shuffle_list = list(range(0, n_feats)) + + # Get shuffled features + if rank: + prev = shuffle_list[ranked_x[0][1]] + for i in range(n_shuffle_feats): + shuffle_list[ranked_x[i][1]] = shuffle_list[ + ranked_x[(i + 1) % n_shuffle_feats][1] + ] + shuffle_list[ranked_x[i][1]] = prev + else: + prev = shuffle_list[ranked_x[n_feats - n_shuffle_feats][1]] + for i in range(n_feats - n_shuffle_feats, n_feats): + shuffle_list[ranked_x[i][1]] = shuffle_list[ranked_x[(i + 1) % n_feats][1]] + shuffle_list[ranked_x[i][1]] = prev + + # Shuffle features + for i in range(len(X)): + if y[i] == shift_class: + X[i, :] = X[i, shuffle_list] + return X
+ + + +
+[docs] +def feature_association_shift( + X: np.ndarray[float, np.dtype[np.float64]], + n_shuffle: float = 0.25, + keep_rows_constant: bool = True, + repermute_each_column: bool = True, +) -> np.ndarray[float, np.dtype[np.float64]]: + """Multiway feature association shift swaps individuals within features. + + Parameters + ---------- + X: numpy.matrix + target data + y: list + target label + cl: int + class (e.g. 0,1,2,3, etc.) + n_shuffle: floatnumpy.matrix + number of individuals to shuffle + keep_rows_constant: + are the permutations the same across features? + repermute_each_column: + are the individuals selected for permutation the same across features? + + Returns + ------- + X: numpy.matrix + covariate data with feature association + y: numpy.array + placeholder for labels + + """ + n_inds = X.shape[0] + n_shuffle_inds = int(n_shuffle * n_inds) + shuffle_start = np.random.randint(n_inds - n_shuffle_inds) + shuffle_end = shuffle_start + n_shuffle_inds + shuffle_list = np.random.permutation(range(shuffle_start, shuffle_end)) + for i in range(X.shape[1]): + rng = np.random.default_rng(i) + rng.random(1) + if repermute_each_column: + shuffle_start = np.random.randint(n_inds - n_shuffle_inds) + shuffle_end = shuffle_start + n_shuffle_inds + if not keep_rows_constant: + shuffle_list = np.random.permutation(range(shuffle_start, shuffle_end)) + indices = ( + list(range(0, shuffle_start)) + + list(shuffle_list) + + list(range(shuffle_end, n_inds)) + ) + # Implement so that it changes only for a specific class + X[:, i] = X[indices, i] + + return X
+ + + +
+[docs] +def binary_noise_shift( + X: np.ndarray[float, np.dtype[np.float64]], + prob: float = 0.5, + delta: float = 0.5, +) -> np.ndarray[float, np.dtype[np.float64]]: + """Create binary noise of specified parameters in input data. + + Parameters + ---------- + X: numpy.matrix + covariate data + p: float + Proportion of case to control + delta: float + fraction of data affected + + Returns + ------- + X: numpy.matrix + covariate data with binary noise + indices: list + indices of data affected + + """ + # add if temporal then flatten then unflatten at end + X_df = pd.DataFrame(X) + bin_cols = X_df.loc[:, (X_df.isin([0, 1])).all()].columns.values + indices = np.random.choice(X.shape[0], math.ceil(X.shape[0] * delta), replace=False) + X_mod = X[indices, :][:, bin_cols] + + if X_mod.shape[1] == 1: + noise = np.random.binomial(1, prob, X_mod.shape[0]) + else: + noise = np.random.binomial(1, prob, (X_mod.shape[0], X_mod.shape[1])) + + X[np.ix_(indices, bin_cols)] = noise + + return X
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/report/report.html b/api/_modules/cyclops/report/report.html new file mode 100644 index 000000000..94e37c02a --- /dev/null +++ b/api/_modules/cyclops/report/report.html @@ -0,0 +1,1629 @@ + + + + + + + + + + + + + + + + cyclops.report.report - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.report.report

+"""Cyclops model report module."""
+
+import base64
+import glob
+import os
+from datetime import date as dt_date
+from datetime import datetime as dt_datetime
+from io import BytesIO
+from typing import Any, Callable, Dict, List, Literal, Optional, Type, Union
+
+import jinja2
+from PIL import Image
+from plotly.graph_objects import Figure
+from plotly.io import write_image
+from plotly.offline import get_plotlyjs
+from pydantic import BaseModel, StrictStr, create_model
+from scour import scour
+
+from cyclops.report.model_card import ModelCard  # type: ignore[attr-defined]
+from cyclops.report.model_card.base import BaseModelCardField
+from cyclops.report.model_card.fields import (
+    Citation,
+    Dataset,
+    ExplainabilityReport,
+    FairnessAssessment,
+    FairnessReport,
+    Graphic,
+    GraphicsCollection,
+    License,
+    MetricCard,
+    MetricCardCollection,
+    Owner,
+    PerformanceMetric,
+    Reference,
+    RegulatoryRequirement,
+    Risk,
+    SensitiveData,
+    Test,
+    UseCase,
+    User,
+    Version,
+)
+from cyclops.report.utils import (
+    _object_is_in_model_card_module,
+    _raise_if_not_dict_with_str_keys,
+    create_metric_cards,
+    empty,
+    get_histories,
+    get_names,
+    get_passed,
+    get_slices,
+    get_thresholds,
+    get_timestamps,
+    regex_replace,
+    regex_search,
+    str_to_snake_case,
+    sweep_graphics,
+    sweep_metric_cards,
+    sweep_metrics,
+    sweep_tests,
+)
+
+
+_TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), "templates", "model_report")
+_DEFAULT_TEMPLATE_FILENAME = "model_report.jinja"
+
+
+
+[docs] +class ModelCardReport: + """Model card report. + + This class serves as an interface to populate a `ModelCard` object and generate + an HTML report from it. + + Parameters + ---------- + output_dir : str, optional + Path to the directory where the model card report will be saved. If not + provided, the report will be saved in the current working directory. + + """ + + def __init__(self, output_dir: Optional[str] = None) -> None: + self.output_dir = output_dir or os.getcwd() + self._model_card = ModelCard() # type: ignore[call-arg] + +
+[docs] + @classmethod + def from_json_file( + cls, + path: str, + output_dir: Optional[str] = None, + ) -> "ModelCardReport": + """Load a model card from a file. + + Parameters + ---------- + path : str + The path to a JSON file containing model card data. + output_dir : str, optional + The directory to save the report to. If not provided, the report will + be saved in a directory called `cyclops_report` in the current working + directory. + + Returns + ------- + ModelCardReport + The model card report. + + """ + model_card = ModelCard.parse_file(path) + report = ModelCardReport(output_dir=output_dir) + report._model_card = model_card + return report
+ + + def _log_field( + self, + data: Dict[str, Any], + section_name: str, + field_name: str, + field_type: Type[BaseModel], + ) -> None: + """Populate a field in the model card. + + Parameters + ---------- + data : Dict[str, Any] + Data to populate the field with. + section_name : str + Name of the section to populate. + field_name : str + Name of the field to populate. If the field does not exist, it will be + created and added to the section. + field_type : BaseModel + Type of the field to populate. + + """ + section_name = str_to_snake_case(section_name) + section = self._model_card.get_section(section_name) + field_value = field_type.parse_obj(data) + + if field_name in section.__fields__: + section.update_field(field_name, field_value) + else: + field_name = str_to_snake_case(field_name) + section.add_field(field_name, field_value) + +
+[docs] + def log_from_dict(self, data: Dict[str, Any], section_name: str) -> None: + """Populate fields in the model card from a dictionary. + + The keys of the dictionary serve as the field names in the specified section. + + Parameters + ---------- + data : Dict[str, Any] + Dictionary of data to populate the fields with. + section_name : str + Name of the section to populate. + + """ + _raise_if_not_dict_with_str_keys(data) + section_name = str_to_snake_case(section_name) + section = self._model_card.get_section(section_name) + + # get data already in section and update with new data + section_data = section.dict() + section_data.update(data) + + populated_section = section.__class__.parse_obj(section_data) + setattr(self._model_card, section_name, populated_section)
+ + +
+[docs] + def log_descriptor( + self, + name: str, + description: str, + section_name: str, + **extra: Any, + ) -> None: + """Add a descriptor to a section of the report. + + This method will create a new pydantic `BaseModel` subclass with the given + name, which has a field named `description` of type `str`. As long as the + descriptor name does not conflict with a defined class in the `model_card` + module, the descriptor can be added to any section of the report. + + Parameters + ---------- + name : str + The name of the descriptor. + description : str + A description of the descriptor. + section_name : str + The section of the report to add the descriptor to. + **extra + Any extra fields to add to the descriptor. + + Raises + ------ + KeyError + If the given section name is not valid. + ValueError + If the given name conflicts with a defined class in the `model_card` module. + + Examples + -------- + >>> from cyclops.report import ModelCardReport + >>> report = ModelCardReport() + >>> report.log_descriptor( + ... name="tradeoff", + ... description="We trade off performance for interpretability.", + ... section_name="considerations", + ... ) + + """ + # use `name` to create BaseModel subclass + field_obj = create_model( + "".join(char for char in name.title() if not char.isspace()), # PascalCase + __base__=BaseModelCardField, + __cls_kwargs__={"list_factory": True}, # all descriptors are lists + description=( + StrictStr, + None, + ), # <field_name>=(<field_type>, <default_value>) + ) + + # make sure the field_obj doesn't conflict with any of the existing objects + if _object_is_in_model_card_module(field_obj): + raise ValueError( + "Encountered name conflict when trying to create a descriptor for " + f"{name}. Please use a different name.", + ) + + self._log_field( + data={"description": description, **extra}, + section_name=section_name, + field_name=str_to_snake_case(name), + field_type=field_obj, + )
+ + + def _log_graphic_collection( + self, + graphic: Graphic, + description: str, + section_name: str, + ) -> None: + # get the section + section_name = str_to_snake_case(section_name) + section = self._model_card.get_section(section_name) + + # append graphic to existing GraphicsCollection or create new one + if ( + "graphics" in section.__fields__ + and section.__fields__["graphics"].type_ is GraphicsCollection + and section.graphics is not None # type: ignore + ): + section.graphics.collection.append(graphic) # type: ignore + else: + self._log_field( + data={"description": description, "collection": [graphic]}, + section_name=section_name, + field_name="graphics", + field_type=GraphicsCollection, + ) + + def _log_metric_card_collection( + self, + metrics: List[str], + tooltips: List[Optional[str]], + slices: List[str], + values: List[List[str]], + metric_cards: List[MetricCard], + section_name: str = "overview", + ) -> None: + # get the section + section_name = str_to_snake_case(section_name) + section = self._model_card.get_section(section_name) + + # append graphic to existing GraphicsCollection or create new one + if ( + "metric_cards" in section.__fields__ + and section.__fields__["metric_cards"].type_ is MetricCardCollection + and section.metric_cards is not None # type: ignore + ): + section.metric_cards.collection.append(metric_cards) # type: ignore + else: + self._log_field( + data={ + "metrics": metrics, + "tooltips": tooltips, + "slices": slices, + "values": values, + "collection": metric_cards, + }, + section_name=section_name, + field_name="metric_cards", + field_type=MetricCardCollection, + ) + +
+[docs] + def log_image(self, img_path: str, caption: str, section_name: str) -> None: + """Add an image to a section of the report. + + Parameters + ---------- + img_path : str + The path to the image file. + caption : str + The caption for the image. + section_name : str + The section of the report to add the image to. + + Raises + ------ + KeyError + If the given section name is not valid. + ValueError + If the given image path does not exist. + + """ + if not os.path.exists(img_path): + raise ValueError(f"Image path {img_path} does not exist.") + + with Image.open(img_path) as img: + buffered = BytesIO() + img.save(buffered, format=img.format) + img_base64 = base64.b64encode(buffered.getvalue()).decode() + + graphic = Graphic.parse_obj( + {"name": caption, "image": f"data:image/{img.format};base64,{img_base64}"}, + ) + + self._log_graphic_collection(graphic, "Images", section_name)
+ + +
+[docs] + def log_plotly_figure( + self, + fig: Figure, + caption: str, + section_name: str, + interactive: bool = True, + ) -> None: + """Add a plotly figure to a section of the report. + + Parameters + ---------- + fig : Figure + The plotly figure to add. + caption : str + The caption for the figure. + section_name : str + The section of the report to add the figure to. + interactive : bool, optional, default=True + Whether or not the figure should be an interactive plot. + + Raises + ------ + KeyError + If the given section name is not valid. + + """ + if interactive: + data = { + "name": caption, + "image": fig.to_html(full_html=False, include_plotlyjs=False), + } + else: + bytes_buffer = BytesIO() + write_image(fig, bytes_buffer, format="svg", validate=True) + + scour_options = scour.sanitizeOptions() + scour_options.remove_descriptive_elements = True + svg: str = scour.scourString(bytes_buffer.getvalue(), options=scour_options) + + # convert svg to base64 + svg = base64.b64encode(svg.encode("utf-8")).decode("utf-8") + + data = {"name": caption, "image": f"data:image/svg+xml;base64,{svg}"} + + graphic = Graphic.parse_obj(data) # create Graphic object from data + + self._log_graphic_collection(graphic, "Plots", section_name)
+ + +
+[docs] + def log_owner( + self, + name: str, + contact: Optional[str] = None, + role: Optional[str] = None, + section_name: str = "model_details", + **extra: Any, + ) -> None: + """Add an owner to a section of the report. + + Parameters + ---------- + name : str + The name of the owner. + contact : str, optional + The contact information for the owner. + role : str, optional + The role of the owner. + section_name : str, optional + The name of the section of the report to log the owner to. If not provided, + the owner will be added to the `model_details` section, representing + the model owner. + **extra + Any extra fields to add to the Owner. + + Raises + ------ + KeyError + If the given section name is not valid. + + """ + self._log_field( + data={"name": name, "contact": contact, "role": role, **extra}, + section_name=section_name, + field_name="owners", + field_type=Owner, + )
+ + +
+[docs] + def log_version( + self, + version_str: str, + date: Optional[Union[dt_date, dt_datetime, str, int, float]] = None, + description: Optional[str] = None, + section_name: str = "model_details", + **extra: Any, + ) -> None: + """Add a version to a section of the report. + + Parameters + ---------- + version_str : str + The version number or identifier as a string. This can be a semantic + version number, e.g. "1.0.0", or a custom identifier, e.g. "v1". + date : Union[dt_date, dt_datetime, str, int, float], optional + The date of the version. This can be a datetime/date object, an integer + or float representing a UNIX timestamp, or a string in the format + `YYYY-MM-DD[T]HH:MM[:SS[.ffffff]][Z or [±]HH[:]MM]]` or `YYYY-MM-DD`. + description : str, optional + A description of the version. This can be used to summarize the changes + made in the version or to provide additional context. + section_name : str, optional + The section of the report to add the version to. If not provided, + the version will be added to the `model_details` section, representing + the version of the model as a whole. + **extra + Any extra fields to add to the Version. + + Raises + ------ + KeyError + If the given section name is not valid. + + """ + self._log_field( + data={ + "version": version_str, + "date": date, + "description": description, + **extra, + }, + section_name=section_name, + field_name="version", + field_type=Version, + )
+ + +
+[docs] + def log_license( + self, + identifier: str, + text: Optional[str] = None, + section_name: str = "model_details", + **extra: Any, + ) -> None: + """Add a license to a section of the report. + + Parameters + ---------- + identifier : str + The SPDX identifier of the license, e.g. "Apache-2.0". + See https://spdx.org/licenses/ for a list of valid identifiers. + For custom licenses, set the `identifier` to "unknown", "unlicensed", + or "proprietary" and provide the full license text in the `text` field, + if available. + text : str, optional + The full text of the license. This is useful for custom licenses + that are not in the SPDX list. + section_name : str, optional + The section of the report to add the license to. If not provided, + the license will be added to the `model_details` section, representing + the license for the model as a whole. + **extra + Any extra fields to add to the License. + + Raises + ------ + KeyError + If the given section name is not valid. + + Notes + ----- + If the license is not found in the SPDX list, the license text will be + left blank. If the license text is provided, it will be used instead. + + """ + self._log_field( + data={"identifier": identifier, "text": text, **extra}, + section_name=section_name, + field_name="licenses", + field_type=License, + )
+ + +
+[docs] + def log_citation( + self, + citation: str, + section_name: str = "model_details", + **extra: Any, + ) -> None: + """Add a citation to a section of the report. + + Parameters + ---------- + citation : str + The citation content. + section_name : str, optional + The section of the report to add the citation to. If not provided, + the citation will be added to the `model_details` section, representing + the citation for the model. + **extra + + Raises + ------ + KeyError + If the given section name is not valid. + + Notes + ----- + If the citation content is a valid BibTeX entry, the citation will be + formatted as plain text and added to the report. + + """ + self._log_field( + data={"content": citation, **extra}, + section_name=section_name, + field_name="citations", + field_type=Citation, + )
+ + +
+[docs] + def log_reference( + self, + link: str, + section_name: str = "model_details", + **extra: Any, + ) -> None: + """Add a reference to a section of the report. + + Parameters + ---------- + link : str + A link to a resource that provides relevant context. + section_name : str, optional + The section of the report to add the reference to. If not provided, + the reference will be added to the `model_details` section, representing + the reference for the model. + **extra + Any extra fields to add to the Reference. + + Raises + ------ + KeyError + If the given section name is not valid. + + """ + self._log_field( + data={"link": link, **extra}, + section_name=section_name, + field_name="references", + field_type=Reference, + )
+ + +
+[docs] + def log_regulation( + self, + regulation: str, + section_name: str = "model_details", + **extra: Any, + ) -> None: + """Add a regulatory requirement to a section of the report. + + Parameters + ---------- + regulation : str + The regulatory requirement that must be complied with. + section_name : str, optional + The section of the report to add the regulatory requirement to. + If not provided, the regulatory requirement will be added to the + `model_details` section. + + Raises + ------ + KeyError + If the given section name is not valid. + + """ + self._log_field( + data={"regulation": regulation, **extra}, + section_name=section_name, + field_name="regulatory_requirements", + field_type=RegulatoryRequirement, + )
+ + +
+[docs] + def log_model_parameters(self, params: Dict[str, Any]) -> None: + """Log model parameters. + + Parameters + ---------- + params : Dict[str, Any] + A dictionary of model parameters. + + """ + self.log_from_dict(params, section_name="model_parameters")
+ + +
+[docs] + def log_dataset( + self, + description: Optional[str] = None, + citation: Optional[str] = None, + link: Optional[str] = None, + license_id: Optional[str] = None, + version: Optional[str] = None, + features: Optional[List[str]] = None, + split: Optional[str] = None, + sensitive_features: Optional[List[str]] = None, + sensitive_feature_justification: Optional[str] = None, + **extra: Any, + ) -> None: + """Log information about the dataset used to train/evaluate the model. + + Parameters + ---------- + description : str, optional + A description of the dataset. + citation : str, optional + A citation for the dataset. This can be a BibTeX entry or a plain-text + citation. + link : str, optional + A link to a resource that provides relevant context e.g. the homepage + of the dataset. + license_id : str, optional + The SPDX identifier of the license, e.g. "Apache-2.0". + See https://spdx.org/licenses/ for a list of valid identifiers. + For custom licenses, set the `identifier` to "unknown", "unlicensed", + or "proprietary". + version : str, optional + The version of the dataset. + features : list of str, optional + The names of the features used to train/evaluate the model. + split : str, optional + The name of the split used to train/evaluate the model. + sensitive_features : list of str, optional + The names of the sensitive features used to train/evaluate the model. + sensitive_feature_justification : str, optional + A justification for the sensitive features used to train/evaluate the + model. + **extra + Any extra fields to add to the Dataset. + + Raises + ------ + AssertionError + If the sensitive features are not in the features list. + + """ + # sensitive features must be in features + if features is not None and sensitive_features is not None: + assert all( + feature in features for feature in sensitive_features + ), "All sensitive features must be in the features list." + + # TODO: plot dataset distribution + data = { + "description": description, + "citation": Citation(content=citation), + "reference": Reference(link=link), # type: ignore + "license": License(identifier=license_id), # type: ignore + "version": Version(version_str=version), # type: ignore + "features": features, + "split": split, + "sensitive_data": SensitiveData( + sensitive_data_used=sensitive_features, + justification=sensitive_feature_justification, + ), + **extra, + } + self._log_field( + data=data, + section_name="datasets", + field_name="data", + field_type=Dataset, + )
+ + +
+[docs] + def log_user( + self, + description: str, + section_name: str = "considerations", + **extra: Any, + ) -> None: + """Add a user description to a section of the report. + + Parameters + ---------- + description : str + A description of the user. + section_name : str, optional + The section of the report to add the user to. If not provided, the user + will be added to the `considerations` section. + **extra + Any extra fields to add to the User. + + Raises + ------ + KeyError + If the given section name is not valid. + + """ + self._log_field( + data={"description": description, **extra}, + section_name=section_name, + field_name="users", + field_type=User, + )
+ + +
+[docs] + def log_use_case( + self, + description: str, + kind: Literal["primary", "out-of-scope"], + section_name: str = "considerations", + **extra: Any, + ) -> None: + """Add a use case to a section of the report. + + Parameters + ---------- + description : str + A description of the use case. + kind : Literal["primary", "out-of-scope"] + The kind of use case - either "primary" or "out-of-scope". + section_name : str, optional + The section of the report to add the use case to. If not provided, + the use case will be added to the `considerations` section. + **extra + Any extra fields to add to the UseCase. + + Raises + ------ + KeyError + If the given section name is not valid. + + """ + self._log_field( + data={"description": description, "kind": kind, **extra}, + section_name=section_name, + field_name="use_cases", + field_type=UseCase, + )
+ + +
+[docs] + def log_risk( + self, + risk: str, + mitigation_strategy: str, + section_name: str = "considerations", + **extra: Any, + ) -> None: + """Add a risk to a section of the report. + + Parameters + ---------- + risk : str + A description of the risk. + mitigation_strategy : str + A description of the mitigation strategy. + section_name : str, optional + The section of the report to add the risk to. If not provided, the + risk will be added to the `considerations` section. + **extra + Any extra information to add in relation to the risk. + + Raises + ------ + KeyError + If the given section name is not valid. + + """ + self._log_field( + data={ + "risk": risk, + "mitigation_strategy": mitigation_strategy, + **extra, + }, + section_name=section_name, + field_name="ethical_considerations", + field_type=Risk, + )
+ + +
+[docs] + def log_fairness_assessment( + self, + affected_group: str, + benefit: str, + harm: str, + mitigation_strategy: str, + section_name: str = "considerations", + **extra: Any, + ) -> None: + """Add a fairness assessment to a section of the report. + + Parameters + ---------- + affected_group : str + A description of the affected group. + benefit : str + A description of the benefit(s) to the affected group. + harm : str + A description of the harm(s) to the affected group. + mitigation_strategy : str + A description of the mitigation strategy. + section_name : str, optional + The section of the report to add the fairness assessment to. If not + provided, the fairness assessment will be added to the `considerations` + section. + **extra + Any extra information to add in relation to the fairness assessment. + + Raises + ------ + KeyError + If the given section name is not valid. + + """ + self._log_field( + data={ + "affected_group": affected_group, + "benefits": benefit, + "harms": harm, + "mitigation_strategy": mitigation_strategy, + **extra, + }, + section_name=section_name, + field_name="fairness_assessment", + field_type=FairnessAssessment, + )
+ + +
+[docs] + def log_quantitative_analysis( + self, + analysis_type: Literal["performance", "fairness", "explainability"], + name: str, + value: Any, + metric_slice: Optional[str] = None, + decision_threshold: Optional[float] = None, + description: Optional[str] = None, + pass_fail_thresholds: Optional[Union[float, List[float]]] = None, + pass_fail_threshold_fns: Optional[ + Union[Callable[[Any, float], bool], List[Callable[[Any, float], bool]]] + ] = None, + **extra: Any, + ) -> None: + """Add a quantitative analysis to the report. + + Parameters + ---------- + analysis_type : Literal["performance", "fairness", "explainability"] + The type of analysis to log. + name : str + The name of the metric. + value : Any + The value of the metric. + metric_slice : str, optional + The name of the slice. If not provided, the slice name will be "overall". + decision_threshold : float, optional + The decision threshold for the metric. + description : str, optional + A description of the metric. + pass_fail_thresholds : Union[float, List[float]], optional + The pass/fail threshold(s) for the metric. If a single threshold is + provided, a single test will be created. If multiple thresholds are + provided, multiple tests will be created. + pass_fail_threshold_fns : Union[Callable[[Any, float], bool], + List[Callable[[Any, float], bool]]], optional + The pass/fail threshold function(s) for the metric. If a single function + is provided, a single test will be created. If multiple functions are + provided, multiple tests will be created. + **extra + Any extra fields to add to the metric. + + Raises + ------ + ValueError + If the given metric type is not valid. + + """ + if analysis_type not in ["performance", "fairness", "explainability"]: + raise ValueError( + f"Invalid metric type {analysis_type}. Must be one of 'performance', " + "'fairness', or 'explainability'.", + ) + + section_name: str + field_name: str + field_type: Any + + section_name, field_name, field_type = { + "performance": ( + "quantitative_analysis", + "performance_metrics", + PerformanceMetric, + ), + "fairness": ("fairness_analysis", "fairness_reports", FairnessReport), + "explainability": ( + "explainability_analysis", + "explainability_reports", + ExplainabilityReport, + ), + }[analysis_type] + + data = { + "type": name, + "value": value, + "slice": metric_slice, + "decision_threshold": decision_threshold, + "description": description, + **extra, + } + + # TODO: create graphics + + if pass_fail_thresholds is not None and pass_fail_threshold_fns is not None: + if isinstance(pass_fail_thresholds, float): + pass_fail_thresholds = [pass_fail_thresholds] + if callable(pass_fail_threshold_fns): + pass_fail_threshold_fns = [pass_fail_threshold_fns] + + # create Test objects + tests = [] + for threshold, threshold_fn in zip( + pass_fail_thresholds, + pass_fail_threshold_fns, + ): + tests.append( + Test( + name=f"{name}/{metric_slice}" if metric_slice else name, + description=None, + threshold=threshold, + result=value, + passed=threshold_fn(value, threshold), + graphics=None, + ), + ) + + data["tests"] = tests + + self._log_field( + data=data, + section_name=section_name, + field_name=field_name, + field_type=field_type, + )
+ + +
+[docs] + def log_performance_metrics(self, metrics: Dict[str, Any]) -> None: + """Add a performance metric to the `Quantitative Analysis` section. + + Parameters + ---------- + metrics : Dict[str, Any] + A dictionary of performance metrics. The keys should be the name of the + metric, and the values should be the value of the metric. If the metric + is a slice metric, the key should be the slice name followed by a slash + and then the metric name (e.g. "slice_name/metric_name"). If no slice + name is provided, the slice name will be "overall". + + Raises + ------ + TypeError + If the given metrics are not a dictionary with string keys. + + """ + _raise_if_not_dict_with_str_keys(metrics) + for metric_name, metric_value in metrics.items(): + name_split = metric_name.split("/") + if len(name_split) == 1: + slice_name = "overall" + metric_name = name_split[0] # noqa: PLW2901 + else: # everything before the last slash is the slice name + slice_name = "/".join(name_split[:-1]) + metric_name = name_split[-1] # noqa: PLW2901 + + # TODO: create plot + + self._log_field( + data={"type": metric_name, "value": metric_value, "slice": slice_name}, + section_name="quantitative_analysis", + field_name="performance_metrics", + field_type=PerformanceMetric, + )
+ + + # TODO: MERGE/COMPARE MODEL CARDS + + def _validate(self) -> None: + """Validate the model card.""" + ModelCard.validate(self._model_card.dict()) + + def _write_file(self, path: str, content: str) -> None: + """Write a file to the given path. + + If the path does not exist, create it. + + """ + os.makedirs(os.path.dirname(path), exist_ok=True) + with open(path, "w+", encoding="utf-8") as f_handle: + f_handle.write(content) + + def _jinja_loader(self, template_dir: str) -> jinja2.FileSystemLoader: + """Create a jinja2 file system loader.""" + return jinja2.FileSystemLoader(template_dir) + + def _get_jinja_template( + self, + template_path: Optional[str] = None, + ) -> jinja2.Template: + """Get a jinja2 template.""" + _template_path = template_path or os.path.join( + _TEMPLATE_DIR, + _DEFAULT_TEMPLATE_FILENAME, + ) + template_dir = os.path.dirname(_template_path) + template_file = os.path.basename(_template_path) + + jinja_env = jinja2.Environment( + loader=self._jinja_loader(template_dir), + autoescape=True, + auto_reload=True, + cache_size=0, + ) + + jinja_env.filters["regex_replace"] = regex_replace + jinja_env.filters["regex_search"] = regex_search + jinja_env.filters["zip"] = zip + jinja_env.tests["list"] = lambda x: isinstance(x, list) + jinja_env.tests["empty"] = empty + jinja_env.tests["hasattr"] = hasattr + jinja_env.tests["None"] = lambda x: x is None + jinja_env.tests["int"] = lambda x: isinstance(x, int) + jinja_env.tests["float"] = lambda x: isinstance(x, float) + jinja_env.tests["bool"] = lambda x: isinstance(x, bool) + + return jinja_env.get_template(template_file) + +
+[docs] + def export( + self, + output_filename: Optional[str] = None, + template_path: Optional[str] = None, + interactive: bool = True, + save_json: bool = True, + last_n_evals: Optional[int] = None, + mean_std_min_evals: int = 3, + synthetic_timestamp: Optional[str] = None, + ) -> str: + """Export the model card report to an HTML file. + + Parameters + ---------- + output_filename : str, optional + The name of the output file. If not provided, the file will be named + with the current date and time. + template_path : str, optional + The path to the jinja2 template to use. The default is None, which uses + the default template provided by CyclOps. + interactive : bool, optional + Whether to create an interactive HTML report. The default is True. + save_json : bool, optional + Whether to save the model card as a JSON file. The default is True. + last_n_evals : int, optional + The number of most recent evaluations to include in the report and + calculate trends for. If not provided, all evaluations will be included. + mean_std_min_evals : int + The minimum number of evaluations required to calculate the mean and + standard deviation for the performance over time plot in the overview + section. The default is 3. + synthetic_timestamp : str, optional + A synthetic timestamp to use for the report. This is useful for + generating back-dated reports. The default is None, which uses the + current date and time. + + Returns + ------- + str + Path of the saved HTML report file. + + """ + assert ( + output_filename is None + or isinstance(output_filename, str) + and output_filename.endswith(".html") + ), "`output_filename` must be a string ending with '.html'" + + # write to file + if synthetic_timestamp is not None: + today_now = synthetic_timestamp + else: + today_now = dt_datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + current_report_metrics: Union[ + List[List[PerformanceMetric]], List[PerformanceMetric] + ] = [] + sweep_metrics(self._model_card, current_report_metrics) + current_report_metrics_set = ( + current_report_metrics[0] + if isinstance(current_report_metrics[0], list) + else [current_report_metrics[0]] + ) + + report_paths = glob.glob( + os.path.join( + self.output_dir, + "cyclops_report", + "*.json", + ), + ) + + if len(report_paths) != 0: + latest_report_path = sorted(report_paths)[-1] + latest_report = ModelCard.parse_file( + latest_report_path, + ) + latest_report_metric_cards: List[List[MetricCard]] = [] + sweep_metric_cards(latest_report, latest_report_metric_cards) + latest_report_metric_cards_set = latest_report_metric_cards[0] + else: + latest_report_metric_cards_set = None + # check if overview section exists + if self._model_card.overview is None: + # compare tests + metrics, tooltips, slices, values, metric_cards = create_metric_cards( + current_report_metrics_set, + today_now, + latest_report_metric_cards_set, + ) + self._log_metric_card_collection( + metrics, + tooltips, + slices, + values, + metric_cards, + ) + + if self._model_card.overview is not None: + last_n_evals = 0 if last_n_evals is None else last_n_evals + self._model_card.overview.last_n_evals = last_n_evals + self._model_card.overview.mean_std_min_evals = mean_std_min_evals + + self._validate() + template = self._get_jinja_template(template_path=template_path) + + func_dict = { + "sweep_tests": sweep_tests, + "sweep_graphics": sweep_graphics, + "get_slices": get_slices, + "get_thresholds": get_thresholds, + "get_passed": get_passed, + "get_names": get_names, + "get_histories": get_histories, + "get_timestamps": get_timestamps, + } + template.globals.update(func_dict) + + plotlyjs = get_plotlyjs() if interactive else None + content = template.render(model_card=self._model_card, plotlyjs=plotlyjs) + + report_path = os.path.join( + self.output_dir, + "cyclops_report", + output_filename or "model_card.html", + ) + self._write_file(report_path, content) + if save_json: + json_path = report_path.replace(".html", ".json") + self._write_file( + json_path, + self._model_card.json(indent=2, exclude_unset=True), + ) + + return report_path
+
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/cyclops/tasks/classification.html b/api/_modules/cyclops/tasks/classification.html new file mode 100644 index 000000000..25b3ec041 --- /dev/null +++ b/api/_modules/cyclops/tasks/classification.html @@ -0,0 +1,987 @@ + + + + + + + + + + + + + + + + cyclops.tasks.classification - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for cyclops.tasks.classification

+"""Classification tasks."""
+
+import logging
+from functools import partial
+from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union
+
+import numpy as np
+import pandas as pd
+from datasets import Dataset, DatasetDict, config
+from sklearn.compose import ColumnTransformer
+from sklearn.exceptions import NotFittedError
+from sklearn.pipeline import Pipeline
+
+from cyclops.data.slicer import SliceSpec
+from cyclops.evaluate.evaluator import evaluate
+from cyclops.evaluate.fairness.config import FairnessConfig
+from cyclops.evaluate.metrics.experimental.metric_dict import MetricDict
+from cyclops.evaluate.metrics.factory import create_metric
+from cyclops.models.catalog import (
+    _img_model_keys,
+    _model_names_mapping,
+    _static_model_keys,
+)
+from cyclops.models.utils import get_split
+from cyclops.models.wrappers import WrappedModel
+from cyclops.models.wrappers.sk_model import SKModel
+from cyclops.models.wrappers.utils import to_numpy
+from cyclops.tasks.base import BaseTask
+from cyclops.tasks.utils import apply_image_transforms
+from cyclops.utils.log import setup_logging
+from cyclops.utils.optional import import_optional_module
+
+
+if TYPE_CHECKING:
+    from torchvision.transforms import Compose
+else:
+    Compose = import_optional_module(
+        "torchvision.transforms",
+        attribute="Compose",
+        error="warn",
+    )
+
+
+LOGGER = logging.getLogger(__name__)
+setup_logging(print_level="INFO", logger=LOGGER)
+
+
+
+[docs] +class BinaryTabularClassificationTask(BaseTask): + """Binary tabular classification task.""" + + @property + def task_type(self) -> str: + """The classification task type. + + Returns + ------- + str + Classification task type. + + """ + return "binary" + + @property + def data_type(self) -> str: + """The data type. + + Returns + ------- + str + The data type. + + """ + return "tabular" + + def _validate_models(self) -> None: + """Validate the models for the task data type.""" + assert all( + _model_names_mapping.get(model.model.__name__) in _static_model_keys # type: ignore + for model in self.models.values() + ), "All models must be static type model." + +
+[docs] + def train( + self, + X: Union[np.typing.NDArray[Any], pd.DataFrame, Dataset, DatasetDict], + y: Optional[Union[np.typing.NDArray[Any], pd.Series]] = None, + model_name: Optional[str] = None, + transforms: Optional[Union[ColumnTransformer, Pipeline]] = None, + best_model_params: Optional[Dict[str, Any]] = None, + splits_mapping: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> WrappedModel: + """Fit a model on tabular data. + + Parameters + ---------- + X_train : Union[np.ndarray, pd.DataFrame, Dataset, DatasetDict] + Data features. + y_train : Optional[Union[np.ndarray, pd.Series]] + Data labels, required when the input data is not a Hugging Face dataset, \ + by default None + model_name : Optional[str], optional + Model name, required if more than one model exists, \ + by default None + transforms : Optional[Union[ColumnTransformer, Pipeline]], optional + Transformations to be applied to the data before \ + fitting the model, default None + splits_mapping: Optional[dict], optional + Mapping from 'train', 'validation' and 'test' to dataset splits names \ + used when input is a dataset dictionary, \ + by default {"train": "train", "validation": "validation"} + best_model_params : Optional[Dict[str, Any]], optional + Parameters for finding the best model, by default None + **kwargs: Any, optional + Additional parameters for the model. + + Returns + ------- + WrappedModel + The trained model. + + """ + if splits_mapping is None: + splits_mapping = {"train": "train", "validation": "validation"} + model_name, model = self.get_model(model_name) + if isinstance(X, (Dataset, DatasetDict)): + if best_model_params: + metric = best_model_params.pop("metric", None) + method = best_model_params.pop("method", "grid") + model.find_best( + best_model_params, + X, + feature_columns=self.task_features, + target_columns=self.task_target, + transforms=transforms, + metric=metric, + method=method, + splits_mapping=splits_mapping, + **kwargs, + ) + else: + model.fit( + X, + feature_columns=self.task_features, + target_columns=self.task_target, + transforms=transforms, + splits_mapping=splits_mapping, + **kwargs, + ) + else: + if y is None: + raise ValueError( + "Missing data labels 'y'. Please provide the labels for \ + the training data when not using a Hugging Face dataset \ + as the input.", + ) + X = to_numpy(X) + if transforms is not None: + try: + X = transforms.transform(X) + except NotFittedError: + X = transforms.fit_transform(X) + y = to_numpy(y) + assert len(X) == len(y) + if best_model_params: + metric = best_model_params.pop("metric", None) + method = best_model_params.pop("method", "grid") + model.find_best( + best_model_params, + X, + y=y, # type: ignore + metric=metric, + method=method, + **kwargs, + ) + else: + model.fit(X, y, **kwargs) # type: ignore + self.trained_models.append(model_name) + + return model
+ + +
+[docs] + def predict( + self, + dataset: Union[np.typing.NDArray[Any], pd.DataFrame, Dataset, DatasetDict], + model_name: Optional[str] = None, + transforms: Optional[ColumnTransformer] = None, + proba: bool = True, + splits_mapping: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> Union[np.typing.NDArray[Any], Dataset]: + """Predict mortality on the given dataset. + + Parameters + ---------- + dataset : Union[np.ndarray, pd.DataFrame, Dataset, DatasetDict] + Data features. + model_name : Optional[str], optional + Model name, required if more than one model exists, by default None + transforms : Optional[ColumnTransformer], optional + Transformations to be applied to the data before \ + prediction. This is used when the input is a \ + Hugging Face Dataset, by default None, by default None + proba: bool + Predict probabilities, default True + splits_mapping: Optional[dict], optional + Mapping from 'train', 'validation' and 'test' to dataset splits names, \ + used when input is a dataset dictionary, by default {"test": "test"} + **kwargs: Any, optional + Additional parameters for the prediction. + + Returns + ------- + Union[np.ndarray, Dataset] + Predicted labels or the Hugging Face dataset with predicted labels. + + Raises + ------ + NotFittedError + If the model is not fitted or not loaded with a pretrained estimator. + + """ + if splits_mapping is None: + splits_mapping = {"test": "test"} + model_name, model = self.get_model(model_name) + if model_name not in self.pretrained_models + self.trained_models: + raise NotFittedError( + f"It seems you have neither trained the {model_name} model nor \ + loaded a pretrained model.", + ) + if isinstance(dataset, (Dataset, DatasetDict)): + if proba and isinstance(model, SKModel): + return model.predict_proba( + dataset, + feature_columns=self.task_features, + transforms=transforms, + model_name=model_name, + splits_mapping=splits_mapping, + **kwargs, + ) + return model.predict( + dataset, + feature_columns=self.task_features, + transforms=transforms, + model_name=model_name, + splits_mapping=splits_mapping, + **kwargs, + ) + dataset = to_numpy(dataset) + if transforms is not None: + try: + dataset = transforms.transform(dataset) + except NotFittedError: + LOGGER.warning("Fitting preprocessor on evaluation dataset.") + dataset = transforms.fit_transform(dataset) + if proba and isinstance(model, SKModel): + predictions = model.predict_proba(dataset, **kwargs) + else: + predictions = model.predict(dataset, **kwargs) + + return predictions
+ + +
+[docs] + def evaluate( + self, + dataset: Union[Dataset, DatasetDict], + metrics: Union[List[str], MetricDict], + model_names: Optional[Union[str, List[str]]] = None, + transforms: Optional[ColumnTransformer] = None, + prediction_column_prefix: str = "predictions", + splits_mapping: Optional[Dict[str, str]] = None, + slice_spec: Optional[SliceSpec] = None, + batch_size: int = config.DEFAULT_MAX_BATCH_SIZE, + remove_columns: Optional[Union[str, List[str]]] = None, + fairness_config: Optional[FairnessConfig] = None, + override_fairness_metrics: bool = False, + array_lib: Literal["numpy", "torch", "cupy"] = "numpy", + ) -> Tuple[Dict[str, Any], Dataset]: + """Evaluate model(s) on a HuggingFace dataset. + + Parameters + ---------- + dataset : Union[Dataset, DatasetDict] + HuggingFace dataset. + metrics : Union[List[str], MetricDict] + Metrics to be evaluated. + model_names : Union[str, List[str]], optional + Model names to be evaluated, if not specified all fitted models \ + will be used for evaluation, by default None + transforms : Optional[ColumnTransformer], optional + Transformations to be applied to the data before prediction, \ + by default None + prediction_column_prefix : str, optional + Name of the prediction column to be added to \ + the dataset, by default "predictions" + splits_mapping: Optional[dict], optional + Mapping from 'train', 'validation' and 'test' to dataset splits names \ + used when input is a dataset dictionary, by default {"test": "test"} + slice_spec : Optional[SlicingConfig], optional + Specifications for creating a slices of a dataset, by default None + batch_size : int, optional + Batch size for batched prediction and evaluation, \ + by default config.DEFAULT_MAX_BATCH_SIZE + remove_columns : Optional[Union[str, List[str]]], optional + Unnecessary columns to be removed from the dataset, by default None + fairness_config : Optional[FairnessConfig], optional + The configuration for computing fairness metrics. If None, no fairness \ + metrics will be computed, by default None + override_fairness_metrics : bool, optional + If True, the `metrics` argument in fairness_config will be overridden by \ + the `metrics`, by default False + array_lib : {"numpy", "torch", "cupy"}, default="numpy" + The array library to use for the metric computation. The metric results + will be returned in the format of `array_lib`. + + Returns + ------- + Dict[str, Any] + Dictionary with evaluation results. + + """ + if splits_mapping is None: + splits_mapping = {"test": "test"} + if isinstance(metrics, list) and len(metrics): + metrics_collection = MetricDict( + [ + create_metric( # type: ignore[misc] + m, + task=self.task_type, + num_labels=len(self.task_features), + ) + for m in metrics + ], + ) + elif isinstance(metrics, MetricDict): + metrics_collection = metrics + if isinstance(model_names, str): + model_names = [model_names] + elif not model_names: + model_names = self.pretrained_models + self.trained_models + for model_name in model_names: + if model_name not in self.pretrained_models + self.trained_models: + LOGGER.warning( + "It seems you have neither trained the model nor \ + loaded a pretrained model.", + ) + dataset = self.predict( + dataset, + model_name=model_name, + transforms=transforms, + prediction_column_prefix=prediction_column_prefix, + only_predictions=False, + splits_mapping=splits_mapping, + ) + + # select the probability scores of the positive class since metrics + # expect a single column of probabilities + dataset = dataset.map( # type: ignore[union-attr] + lambda examples: { + f"{prediction_column_prefix}.{model_name}": np.array( # noqa: B023 + examples, + )[ + :, + 1, + ].tolist(), + }, + batched=True, + batch_size=batch_size, + input_columns=f"{prediction_column_prefix}.{model_name}", + ) + results = evaluate( + dataset=dataset, + metrics=metrics_collection, + target_columns=self.task_target, + slice_spec=slice_spec, + prediction_columns=[ + f"{prediction_column_prefix}.{model_name}" for model_name in model_names + ], + ignore_columns=remove_columns, + split=splits_mapping["test"], + batch_size=batch_size, + fairness_config=fairness_config, + override_fairness_metrics=override_fairness_metrics, + array_lib=array_lib, + ) + return results, dataset
+
+ + + +
+[docs] +class MultilabelImageClassificationTask(BaseTask): + """Binary tabular classification task.""" + + @property + def task_type(self) -> str: + """The classification task type. + + Returns + ------- + str + Classification task type. + + """ + return "multilabel" + + @property + def data_type(self) -> str: + """The data type. + + Returns + ------- + str + The data type. + + """ + return "image" + + def _validate_models(self) -> None: + """Validate the models for the task data type.""" + assert all( + _model_names_mapping.get(model.model.__name__) in _img_model_keys # type: ignore + for model in self.models.values() + ), "All models must be image type model." + for model in self.models.values(): + model.initialize() + +
+[docs] + def predict( + self, + dataset: Union[np.typing.NDArray[Any], Dataset, DatasetDict], + model_name: Optional[str] = None, + transforms: Optional[Compose] = None, + splits_mapping: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> Union[np.typing.NDArray[Any], Dataset]: + """Predict the pathologies on the given dataset. + + Parameters + ---------- + dataset : Union[np.ndarray, Dataset, DatasetDict] + Image representation as a numpy array or a Hugging Face dataset. + model_name : Optional[str], optional + Model name, required if more than one model exists, by default None + transforms : Optional[Compose], optional + Transforms to be applied to the data, by default None + splits_mapping: Optional[dict], optional + Mapping from 'train', 'validation' and 'test' to dataset splits names, \ + used when input is a dataset dictionary, by default {"test": "test"} + **kwargs: Any, optional + Additional parameters for the prediction. + + Returns + ------- + Union[np.typing.NDArray[Any], Dataset] + Predicted labels or the Hugging Face dataset with predicted labels. + + """ + if splits_mapping is None: + splits_mapping = {"test": "test"} + model_name, model = self.get_model(model_name) + if transforms: + transforms = partial(apply_image_transforms, transforms=transforms) + if isinstance(dataset, (Dataset, DatasetDict)): + return model.predict( + dataset, + feature_columns=self.task_features, + transforms=transforms, + model_name=model_name, + splits_mapping=splits_mapping, + **kwargs, + ) + + return model.predict(dataset, **kwargs)
+ + +
+[docs] + def evaluate( + self, + dataset: Union[Dataset, DatasetDict], + metrics: Union[List[str], MetricDict], + model_names: Optional[Union[str, List[str]]] = None, + transforms: Optional[Compose] = None, + prediction_column_prefix: str = "predictions", + splits_mapping: Optional[Dict[str, str]] = None, + slice_spec: Optional[SliceSpec] = None, + batch_size: int = 64, + remove_columns: Optional[Union[str, List[str]]] = None, + fairness_config: Optional[FairnessConfig] = None, + override_fairness_metrics: bool = False, + array_lib: Literal["numpy", "torch", "cupy"] = "numpy", + ) -> Tuple[Dict[str, Any], Dataset]: + """Evaluate model(s) on a HuggingFace dataset. + + Parameters + ---------- + dataset : Union[Dataset, DatasetDict] + HuggingFace dataset. + metrics : Union[List[str], MetricDict] + Metrics to be evaluated. + model_names : Union[str, List[str]], optional + Model names to be evaluated, required if more than one model exists, \ + by default Nonee + transforms : Optional[Compose], optional + Transforms to be applied to the data, by default None + prediction_column_prefix : str, optional + Name of the prediction column to be added to the dataset, \ + by default "predictions" + splits_mapping: Optional[dict], optional + Mapping from 'train', 'validation' and 'test' to dataset splits names \ + used when input is a dataset dictionary, by default {"test": "test"} + slice_spec : Optional[SliceSpec], optional + Specifications for creating a slices of a dataset, by default None + batch_size : int, optional + Batch size for batched evaluation, by default 64 + remove_columns : Optional[Union[str, List[str]]], optional + Unnecessary columns to be removed from the dataset, by default None + fairness_config : Optional[FairnessConfig], optional + The configuration for computing fairness metrics. If None, no fairness \ + metrics will be computed, by default None + override_fairness_metrics : bool, optional + If True, the `metrics` argument in fairness_config will be overridden by \ + the `metrics`, by default False + array_lib : {"numpy", "torch", "cupy"}, default="numpy" + The array library to use for the metric computation. The metric results + will be returned in the format of `array_lib`. + + Returns + ------- + Dict[str, Any] + Dictionary with evaluation results. + + """ + if splits_mapping is None: + splits_mapping = {"test": "test"} + if isinstance(dataset, DatasetDict): + split = get_split(dataset, "test", splits_mapping=splits_mapping) + dataset = dataset[split] + + missing_labels = [ + label for label in self.task_target if label not in dataset.column_names + ] + if len(missing_labels): + + def add_missing_labels(examples: Dict[str, Any]) -> Dict[str, Any]: + for label in missing_labels: + examples[label] = 0.0 + return examples + + dataset = dataset.map(add_missing_labels) + if isinstance(metrics, list) and len(metrics): + metrics_collection = MetricDict( + [ + create_metric( # type: ignore[misc] + m, + task=self.task_type, + num_labels=len(self.task_target), + ) + for m in metrics + ], + ) + elif isinstance(metrics, MetricDict): + metrics_collection = metrics + if isinstance(model_names, str): + model_names = [model_names] + elif model_names is None: + model_names = self.list_models() + for model_name in model_names: + dataset = self.predict( + dataset, + model_name=model_name, + transforms=transforms, + prediction_column_prefix=prediction_column_prefix, + only_predictions=False, + splits_mapping=splits_mapping, + ) + results = evaluate( + dataset=dataset, + metrics=metrics_collection, + slice_spec=slice_spec, + target_columns=self.task_target, + prediction_columns=[ + f"{prediction_column_prefix}.{model_name}" for model_name in model_names + ], + ignore_columns=remove_columns, + split=splits_mapping["test"], + batch_size=batch_size, + fairness_config=fairness_config, + override_fairness_metrics=override_fairness_metrics, + array_lib=array_lib, + ) + + return results, dataset
+
+ +
+
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_modules/index.html b/api/_modules/index.html new file mode 100644 index 000000000..23be9d4b8 --- /dev/null +++ b/api/_modules/index.html @@ -0,0 +1,411 @@ + + + + + + + + + + + + + + + + Overview: module code - cyclops documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

All modules for which code is available

+ +
+
+ +
+ +
+
+ \ No newline at end of file diff --git a/api/_sources/api.rst.txt b/api/_sources/api.rst.txt new file mode 100644 index 000000000..b953ba773 --- /dev/null +++ b/api/_sources/api.rst.txt @@ -0,0 +1,14 @@ +API Reference +============= + +.. toctree:: + :maxdepth: 1 + :glob: + + reference/api/cyclops.data.rst + reference/api/cyclops.tasks.rst + reference/api/evaluator.rst + reference/api/fairness_evaluator.rst + reference/api/metrics.rst + reference/api/cyclops.monitor.rst + reference/api/cyclops.report.rst diff --git a/api/_sources/contributing.rst.txt b/api/_sources/contributing.rst.txt new file mode 100644 index 000000000..95ba77181 --- /dev/null +++ b/api/_sources/contributing.rst.txt @@ -0,0 +1,33 @@ +Contributing to cyclops +======================= + +Thanks for your interest in contributing to cyclops! + +To submit PRs, please fill out the PR template along with the PR. If the +PR fixes an issue, don’t forget to link the PR to the issue! + +Pre-commit hooks +---------------- + +Once the python virtual environment is setup, you can run pre-commit +hooks using: + +.. code:: bash + + pre-commit run --all-files + +Coding guidelines +----------------- + +For code style, we recommend the `PEP 8 style +guide `__. + +For docstrings we use `numpy +format `__. + +We use `ruff `__ for code formatting and static code +analysis. Ruff checks various rules including `flake8 `__. The pre-commit hooks show errors which you need to fix before +submitting a PR. + +Last but not the least, we use type hints in our code which is then +checked using `mypy `__. diff --git a/api/_sources/developing.rst.txt b/api/_sources/developing.rst.txt new file mode 100644 index 000000000..0ad8258a2 --- /dev/null +++ b/api/_sources/developing.rst.txt @@ -0,0 +1,34 @@ +🧑🏿‍💻 Developing +======================= + +Using poetry +------------ + +The development environment can be set up using `poetry `__. Hence, make sure it is installed and then run: + +.. code:: bash + + python3 -m poetry install + source $(poetry env info --path)/bin/activate + +In order to install dependencies for testing (codestyle, unit tests, integration tests), run: + +.. code:: bash + + python3 -m poetry install --with test + +API documentation is built using `Sphinx `__ and can be locally built by: + +.. code:: bash + + python3 -m poetry install --with docs + cd docs + make html SPHINXOPTS="-D nbsphinx_allow_errors=True" + + +If you need to build the documentations locally, make sure to install ``Pandoc`` in addition to ``docs`` poetry group. + +Contributing +------------ + +Contributing to cyclops is welcomed. See `Contributing `__ for guidelines. diff --git a/api/_sources/evaluation.rst.txt b/api/_sources/evaluation.rst.txt new file mode 100644 index 000000000..0b1f7d2dc --- /dev/null +++ b/api/_sources/evaluation.rst.txt @@ -0,0 +1,25 @@ +Evaluation +========== + +The Evaluation API equips you with a rich toolbox to assess your models across key +dimensions. Dive into detailed performance metrics, unveil potential fairness +concerns, and gain granular insights through data slicing. + +Key capabilities +**************** + + * **Performance**: Employ a robust selection of common metrics to evaluate your + model's effectiveness and identify areas for improvement. + * **Data slicing**: Isolate the model's behavior on specific subsets of your + data, revealing performance nuances across demographics, features, or other + important characteristics. + * **Fairness**: Uncover and analyze potential biases within your model to ensure + responsible and equitable outcomes. + + .. image:: https://github.com/VectorInstitute/cyclops/assets/8986523/416170db-1265-42a3-a3c1-d34558b72b65 + +Follow the example below for the instructions: + +.. toctree:: + + examples/metrics.ipynb diff --git a/api/_sources/index.rst.txt b/api/_sources/index.rst.txt new file mode 100644 index 000000000..ff5605517 --- /dev/null +++ b/api/_sources/index.rst.txt @@ -0,0 +1,30 @@ +What is CyclOps? +================ + +CyclOps is an open-source toolkit for scientists, engineers, and clinicians working +in the intersection of artificial intelligence and health. + +The toolkit is designed to help researchers and practitioners to adopt +`Vector Institute's AI trust and safety principles `__. Specifically, the toolkit focuses on evaluation and monitoring of AI systems +developed for clinical applications where robustness, safety, and fairness are critical. + +The primary goal of CyclOps is to improve transparency, accountability, and trust in +AI systems that are developed for clinical applications. + +Quick Start +^^^^^^^^^^^ + +.. card:: Heart Failure Prediction + :link: https://vectorinstitute.github.io/cyclops/api/tutorials/kaggle/heart_failure_prediction.html + + Example use case showing how to use CyclOps to evaluate a heart failure + prediction model and monitor its performance over time + +.. toctree:: + :maxdepth: 2 + + user_guide + tutorials + developing + contributing + api diff --git a/api/_sources/installation.rst.txt b/api/_sources/installation.rst.txt new file mode 100644 index 000000000..16118bcea --- /dev/null +++ b/api/_sources/installation.rst.txt @@ -0,0 +1,29 @@ +Installation +============ + +Using pip +--------- + +.. code:: bash + + python3 -m pip install pycyclops + +``cyclops`` has many optional dependencies that are used for specific functionality. For example, the `monai `__ library is used for loading DICOM images to create datasets. Hence, `monai` can be installed using ``python3 -m pip install pycyclops[monai]``. Specific sets of dependencies are listed below. + ++-----------------------------+--------------------------+---------------------------------------------------------------------------------------------------------------+ +| Dependency | pip extra | Notes | ++=============================+==========================+===============================================================================================================+ +| xgboost | xgboost | Allows use of `XGBoost `__ model | ++-----------------------------+--------------------------+---------------------------------------------------------------------------------------------------------------+ +| torch | torch | Allows use of `PyTorch `__ models | ++-----------------------------+--------------------------+---------------------------------------------------------------------------------------------------------------+ +| torchvision | torchvision | Allows use of `Torchvision `__ library | ++-----------------------------+--------------------------+---------------------------------------------------------------------------------------------------------------+ +| torchxrayvision | torchxrayvision | Uses `TorchXRayVision `__ library | ++-----------------------------+--------------------------+---------------------------------------------------------------------------------------------------------------+ +| monai | monai | Uses `MONAI `__ to load and transform images | ++-----------------------------+--------------------------+---------------------------------------------------------------------------------------------------------------+ +| alibi | alibi | Uses `Alibi `__ for additional explainability functionality | ++-----------------------------+--------------------------+---------------------------------------------------------------------------------------------------------------+ +| alibi-detect | alibi-detect | Uses `Alibi Detect `__ for dataset shift detection | ++-----------------------------+--------------------------+---------------------------------------------------------------------------------------------------------------+ diff --git a/api/_sources/model_report.rst.txt b/api/_sources/model_report.rst.txt new file mode 100644 index 000000000..1b5dbc6a4 --- /dev/null +++ b/api/_sources/model_report.rst.txt @@ -0,0 +1,104 @@ +Model Report +============ + +The model report helps technicians, data scientists and clinicians to understand the +model's performance better by offering: + + * Clear Visualizations: Effortlessly incorporate the generated figures from model + evaluation into your report, providing a clear picture of model performance + for everyone. + + * Detailed Model Specs: Document and view all relevant model details + for easy reference. + + * Interactive Exploration: Gain insights into model performance across different + subgroups over time. Interact with the plots to select specific subgroups and + adjust displayed metrics. + + .. image:: https://github.com/VectorInstitute/cyclops/assets/8986523/bc62f4c4-63f3-4c82-adf1-9e50c9f0abf0 + + +Let's dive into the key sections of a model report and explore what each one tells us. +Depending on what developers have added to the model report, it may or may not have +all the sections. + +Overview +-------- +This section provides a comprehensive overview of the various metrics used to evaluate +the model's performance. Color-coded plots allow for quick visual identification of +any significant changes in model performance. + +Report developers can tailor the number of metrics displayed in each plot to suit their +needs. Additionally, users can access brief descriptions of each metric +(e.g., Accuracy, F1 Score) by hovering over the corresponding title. + +.. image:: https://github.com/VectorInstitute/cyclops/assets/8986523/23d2c7ac-1551-4e9b-9d30-9286ea5cdf3c + +Additionally, the CyclOps model report allows you to conveniently view model performance +on specific subgroups and add multiple metrics in a single plot: + +.. image:: https://github.com/VectorInstitute/cyclops/assets/8986523/f71cf618-caac-46f7-9221-48d6a71dc1a6 + +The timestamp of each evaluation is on the X-axis, and each metric-slice is shown with +a distinct color. + +In :doc:`Monitoring User Guide ` you'll find instructions on how to interact +with these plots. + +Dataset +------- +In the dataset section, you will be able to view all the plots that are generated to +explore the distribution of the dataset features. By hovering on any part of the plot +you see the detail about that feature. Also, the plots allow interactions such as +zooming or panning: + +.. image:: https://github.com/VectorInstitute/cyclops/assets/5112312/85186099-d932-4fe5-8ac6-ee06f4736a3a + +Quantitative Analysis +--------------------- +Quantitative analysis is the section where users can further investigate last evaluation results with extra metrics and plots for each slice of dataset. + +.. image:: https://github.com/VectorInstitute/cyclops/assets/5112312/90500d21-94ba-4ede-b488-97669df21a6e + +Metric comparison charts are also a handy tool to compare how the model is performing +in different subgroups and over all of them. + +.. image:: https://github.com/VectorInstitute/cyclops/assets/5112312/5a5f8300-18de-4737-918e-9d77c33a1ceb + +Fairness Analysis +----------------- +Fairness analysis checks if the model's predictions are independent of a sensitive +attribute, like age, race or gender. Ideally, the model should have the same outcome +for all groups. This ensures that the model isn't biased towards or against a particular +group. + +Here's a plot example you may see in Fairness Analysis section: + +.. image:: https://github.com/VectorInstitute/cyclops/assets/8986523/7e10a84a-0482-4348-8d75-913c7cd1bcb2 + +Model Details +------------- +Here you can view details and metadata about the model, such as its description, +developers/owners or external links to the model repository or paper. + +.. image:: https://github.com/VectorInstitute/cyclops/assets/8986523/344a9cee-6542-4a4f-bc16-b1eb269732d3 + +Model Parameters +---------------- +Scientists or model developers may add model parameters in the model report in this +section. This is an example: + +.. image:: https://github.com/VectorInstitute/cyclops/assets/8986523/97c1bb21-0afa-4474-9341-cce1ddd79f85 + +Considerations +-------------- +Considerations entails information about use cases of the model, ethical considerations, +groups at risk, etc. + +.. image:: https://github.com/VectorInstitute/cyclops/assets/8986523/402f2e3c-a68e-484d-bd1f-ee458d15d45c + +Follow the example below for the instructions on how to generate a model report: + +.. toctree:: + + examples/report.ipynb diff --git a/api/_sources/monitoring.rst.txt b/api/_sources/monitoring.rst.txt new file mode 100644 index 000000000..591b5bd00 --- /dev/null +++ b/api/_sources/monitoring.rst.txt @@ -0,0 +1,40 @@ +Monitoring +========== + +After initial evaluation and model report generation, how can we monitor model +performance over time? + +We accomplish this by evaluating the model on new test data and generating new model +reports. We then see the performance trends over time. +The model report allows users to select and view the number of latest evaluations. + +Everytime an evaluation is performed and added to the report, a new entry is added +to the model report's `JSON` file and by doing so repeatedly, +the users will be able to view the trend of performance over days, weeks, or months. + +.. image:: https://github.com/VectorInstitute/cyclops/assets/8986523/f71cf618-caac-46f7-9221-48d6a71dc1a6 + +Overview Performance +-------------------- + +At top level and in a quick glance, there are overall performance metrics: + +.. image:: https://github.com/VectorInstitute/cyclops/assets/5112312/92cacabf-ff8e-42a5-bf3d-f338d0f3ce3d + +The number on the top left of each figure indicates the metric value for latest +evaluation, each with their corresponding timestamp on the x-axis. The figures are +color coded based on a minimum threshold that was set by the user. +Once the metric for the latest evaluation drops below the acceptable range it's shown +in red, and when everything is good, it appears in green. + +Subgroup Performance +-------------------- + +To get a better prespective about different subgroups such as age intervals or sex, +you have the option of visualizing multiple plots in a single figure: + +.. image:: https://github.com/VectorInstitute/cyclops/assets/5112312/9e34e789-8d29-44dc-8631-3d9630fbb8f7 + + +Again, this plot has the feature of viewing a number of past evaluations using the +slider at the top right. diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.Aggregator.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.Aggregator.rst.txt new file mode 100644 index 000000000..5e389f103 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.Aggregator.rst.txt @@ -0,0 +1,29 @@ +cyclops.data.aggregate.Aggregator +================================= + +.. currentmodule:: cyclops.data.aggregate + +.. autoclass:: Aggregator + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + :nosignatures: + + ~Aggregator.fit + ~Aggregator.fit_transform + ~Aggregator.set_output + ~Aggregator.transform + ~Aggregator.vectorize + + + + + + \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.rst.txt new file mode 100644 index 000000000..47eb20b66 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.rst.txt @@ -0,0 +1,41 @@ +cyclops.data.aggregate +====================== + +.. automodule:: cyclops.data.aggregate + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + :nosignatures: + + tabular_as_aggregated + timestamp_ffill_agg + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + :nosignatures: + + Aggregator + + + + + + + + + diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.tabular_as_aggregated.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.tabular_as_aggregated.rst.txt new file mode 100644 index 000000000..a944b5667 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.tabular_as_aggregated.rst.txt @@ -0,0 +1,6 @@ +cyclops.data.aggregate.tabular\_as\_aggregated +============================================== + +.. currentmodule:: cyclops.data.aggregate + +.. autofunction:: tabular_as_aggregated \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.timestamp_ffill_agg.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.timestamp_ffill_agg.rst.txt new file mode 100644 index 000000000..fb4809ca0 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.aggregate.timestamp_ffill_agg.rst.txt @@ -0,0 +1,6 @@ +cyclops.data.aggregate.timestamp\_ffill\_agg +============================================ + +.. currentmodule:: cyclops.data.aggregate + +.. autofunction:: timestamp_ffill_agg \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.features.medical_image.MedicalImage.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.features.medical_image.MedicalImage.rst.txt new file mode 100644 index 000000000..237c148e2 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.features.medical_image.MedicalImage.rst.txt @@ -0,0 +1,41 @@ +cyclops.data.features.medical\_image.MedicalImage +================================================= + +.. currentmodule:: cyclops.data.features.medical_image + +.. autoclass:: MedicalImage + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + :nosignatures: + + ~MedicalImage.cast_storage + ~MedicalImage.decode_example + ~MedicalImage.embed_storage + ~MedicalImage.encode_example + ~MedicalImage.flatten + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MedicalImage.decode + ~MedicalImage.dtype + ~MedicalImage.id + ~MedicalImage.mode + ~MedicalImage.pa_type + ~MedicalImage.reader + ~MedicalImage.suffix + + \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.features.medical_image.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.features.medical_image.rst.txt new file mode 100644 index 000000000..c313e872f --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.features.medical_image.rst.txt @@ -0,0 +1,32 @@ +cyclops.data.features.medical\_image +==================================== + +.. automodule:: cyclops.data.features.medical_image + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + :nosignatures: + + MedicalImage + + + + + + + + + diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.slicer.SliceSpec.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.SliceSpec.rst.txt new file mode 100644 index 000000000..61ae391f4 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.SliceSpec.rst.txt @@ -0,0 +1,37 @@ +cyclops.data.slicer.SliceSpec +============================= + +.. currentmodule:: cyclops.data.slicer + +.. autoclass:: SliceSpec + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + :nosignatures: + + ~SliceSpec.add_slice_spec + ~SliceSpec.get_slices + ~SliceSpec.slices + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SliceSpec.column_names + ~SliceSpec.include_overall + ~SliceSpec.intersections + ~SliceSpec.validate + ~SliceSpec.spec_list + + \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.slicer.compound_filter.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.compound_filter.rst.txt new file mode 100644 index 000000000..79d56b70f --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.compound_filter.rst.txt @@ -0,0 +1,6 @@ +cyclops.data.slicer.compound\_filter +==================================== + +.. currentmodule:: cyclops.data.slicer + +.. autofunction:: compound_filter \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_datetime.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_datetime.rst.txt new file mode 100644 index 000000000..073084ef4 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_datetime.rst.txt @@ -0,0 +1,6 @@ +cyclops.data.slicer.filter\_datetime +==================================== + +.. currentmodule:: cyclops.data.slicer + +.. autofunction:: filter_datetime \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_non_null.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_non_null.rst.txt new file mode 100644 index 000000000..5c8a6a87d --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_non_null.rst.txt @@ -0,0 +1,6 @@ +cyclops.data.slicer.filter\_non\_null +===================================== + +.. currentmodule:: cyclops.data.slicer + +.. autofunction:: filter_non_null \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_range.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_range.rst.txt new file mode 100644 index 000000000..aa05a5498 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_range.rst.txt @@ -0,0 +1,6 @@ +cyclops.data.slicer.filter\_range +================================= + +.. currentmodule:: cyclops.data.slicer + +.. autofunction:: filter_range \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_string_contains.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_string_contains.rst.txt new file mode 100644 index 000000000..d24aa507a --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_string_contains.rst.txt @@ -0,0 +1,6 @@ +cyclops.data.slicer.filter\_string\_contains +============================================ + +.. currentmodule:: cyclops.data.slicer + +.. autofunction:: filter_string_contains \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_value.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_value.rst.txt new file mode 100644 index 000000000..4fc31ec2c --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.filter_value.rst.txt @@ -0,0 +1,6 @@ +cyclops.data.slicer.filter\_value +================================= + +.. currentmodule:: cyclops.data.slicer + +.. autofunction:: filter_value \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.slicer.is_datetime.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.is_datetime.rst.txt new file mode 100644 index 000000000..ed425db0d --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.is_datetime.rst.txt @@ -0,0 +1,6 @@ +cyclops.data.slicer.is\_datetime +================================ + +.. currentmodule:: cyclops.data.slicer + +.. autofunction:: is_datetime \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.slicer.overall.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.overall.rst.txt new file mode 100644 index 000000000..9849994ae --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.overall.rst.txt @@ -0,0 +1,6 @@ +cyclops.data.slicer.overall +=========================== + +.. currentmodule:: cyclops.data.slicer + +.. autofunction:: overall \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.data.slicer.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.rst.txt new file mode 100644 index 000000000..17f303a1d --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.data.slicer.rst.txt @@ -0,0 +1,47 @@ +cyclops.data.slicer +=================== + +.. automodule:: cyclops.data.slicer + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + :nosignatures: + + compound_filter + filter_datetime + filter_non_null + filter_range + filter_string_contains + filter_value + is_datetime + overall + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + :nosignatures: + + SliceSpec + + + + + + + + + diff --git a/api/_sources/reference/api/_autosummary/cyclops.monitor.clinical_applicator.ClinicalShiftApplicator.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.monitor.clinical_applicator.ClinicalShiftApplicator.rst.txt new file mode 100644 index 000000000..4df12c68f --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.monitor.clinical_applicator.ClinicalShiftApplicator.rst.txt @@ -0,0 +1,31 @@ +cyclops.monitor.clinical\_applicator.ClinicalShiftApplicator +============================================================ + +.. currentmodule:: cyclops.monitor.clinical_applicator + +.. autoclass:: ClinicalShiftApplicator + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + :nosignatures: + + ~ClinicalShiftApplicator.age + ~ClinicalShiftApplicator.apply_shift + ~ClinicalShiftApplicator.custom + ~ClinicalShiftApplicator.hospital_type + ~ClinicalShiftApplicator.month + ~ClinicalShiftApplicator.sex + ~ClinicalShiftApplicator.time + + + + + + \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.monitor.clinical_applicator.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.monitor.clinical_applicator.rst.txt new file mode 100644 index 000000000..1ae46d83a --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.monitor.clinical_applicator.rst.txt @@ -0,0 +1,32 @@ +cyclops.monitor.clinical\_applicator +==================================== + +.. automodule:: cyclops.monitor.clinical_applicator + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + :nosignatures: + + ClinicalShiftApplicator + + + + + + + + + diff --git a/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.SyntheticShiftApplicator.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.SyntheticShiftApplicator.rst.txt new file mode 100644 index 000000000..70abb45f7 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.SyntheticShiftApplicator.rst.txt @@ -0,0 +1,25 @@ +cyclops.monitor.synthetic\_applicator.SyntheticShiftApplicator +============================================================== + +.. currentmodule:: cyclops.monitor.synthetic_applicator + +.. autoclass:: SyntheticShiftApplicator + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + :nosignatures: + + ~SyntheticShiftApplicator.apply_shift + + + + + + \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.binary_noise_shift.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.binary_noise_shift.rst.txt new file mode 100644 index 000000000..5275bdb97 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.binary_noise_shift.rst.txt @@ -0,0 +1,6 @@ +cyclops.monitor.synthetic\_applicator.binary\_noise\_shift +========================================================== + +.. currentmodule:: cyclops.monitor.synthetic_applicator + +.. autofunction:: binary_noise_shift \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.feature_association_shift.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.feature_association_shift.rst.txt new file mode 100644 index 000000000..87845017d --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.feature_association_shift.rst.txt @@ -0,0 +1,6 @@ +cyclops.monitor.synthetic\_applicator.feature\_association\_shift +================================================================= + +.. currentmodule:: cyclops.monitor.synthetic_applicator + +.. autofunction:: feature_association_shift \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.feature_swap_shift.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.feature_swap_shift.rst.txt new file mode 100644 index 000000000..743334a26 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.feature_swap_shift.rst.txt @@ -0,0 +1,6 @@ +cyclops.monitor.synthetic\_applicator.feature\_swap\_shift +========================================================== + +.. currentmodule:: cyclops.monitor.synthetic_applicator + +.. autofunction:: feature_swap_shift \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.gaussian_noise_shift.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.gaussian_noise_shift.rst.txt new file mode 100644 index 000000000..589258073 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.gaussian_noise_shift.rst.txt @@ -0,0 +1,6 @@ +cyclops.monitor.synthetic\_applicator.gaussian\_noise\_shift +============================================================ + +.. currentmodule:: cyclops.monitor.synthetic_applicator + +.. autofunction:: gaussian_noise_shift \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.knockout_shift.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.knockout_shift.rst.txt new file mode 100644 index 000000000..d3156c23c --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.knockout_shift.rst.txt @@ -0,0 +1,6 @@ +cyclops.monitor.synthetic\_applicator.knockout\_shift +===================================================== + +.. currentmodule:: cyclops.monitor.synthetic_applicator + +.. autofunction:: knockout_shift \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.rst.txt new file mode 100644 index 000000000..6cc5ba3d8 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.monitor.synthetic_applicator.rst.txt @@ -0,0 +1,44 @@ +cyclops.monitor.synthetic\_applicator +===================================== + +.. automodule:: cyclops.monitor.synthetic_applicator + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + :nosignatures: + + binary_noise_shift + feature_association_shift + feature_swap_shift + gaussian_noise_shift + knockout_shift + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + :nosignatures: + + SyntheticShiftApplicator + + + + + + + + + diff --git a/api/_sources/reference/api/_autosummary/cyclops.report.report.ModelCardReport.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.report.report.ModelCardReport.rst.txt new file mode 100644 index 000000000..4f91f4a81 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.report.report.ModelCardReport.rst.txt @@ -0,0 +1,44 @@ +cyclops.report.report.ModelCardReport +===================================== + +.. currentmodule:: cyclops.report.report + +.. autoclass:: ModelCardReport + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + :nosignatures: + + ~ModelCardReport.export + ~ModelCardReport.from_json_file + ~ModelCardReport.log_citation + ~ModelCardReport.log_dataset + ~ModelCardReport.log_descriptor + ~ModelCardReport.log_fairness_assessment + ~ModelCardReport.log_from_dict + ~ModelCardReport.log_image + ~ModelCardReport.log_license + ~ModelCardReport.log_model_parameters + ~ModelCardReport.log_owner + ~ModelCardReport.log_performance_metrics + ~ModelCardReport.log_plotly_figure + ~ModelCardReport.log_quantitative_analysis + ~ModelCardReport.log_reference + ~ModelCardReport.log_regulation + ~ModelCardReport.log_risk + ~ModelCardReport.log_use_case + ~ModelCardReport.log_user + ~ModelCardReport.log_version + + + + + + \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.report.report.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.report.report.rst.txt new file mode 100644 index 000000000..d8a4df398 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.report.report.rst.txt @@ -0,0 +1,32 @@ +cyclops.report.report +===================== + +.. automodule:: cyclops.report.report + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + :nosignatures: + + ModelCardReport + + + + + + + + + diff --git a/api/_sources/reference/api/_autosummary/cyclops.tasks.classification.BinaryTabularClassificationTask.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.tasks.classification.BinaryTabularClassificationTask.rst.txt new file mode 100644 index 000000000..2a5a70377 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.tasks.classification.BinaryTabularClassificationTask.rst.txt @@ -0,0 +1,41 @@ +cyclops.tasks.classification.BinaryTabularClassificationTask +============================================================ + +.. currentmodule:: cyclops.tasks.classification + +.. autoclass:: BinaryTabularClassificationTask + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + :nosignatures: + + ~BinaryTabularClassificationTask.add_model + ~BinaryTabularClassificationTask.evaluate + ~BinaryTabularClassificationTask.get_model + ~BinaryTabularClassificationTask.list_models + ~BinaryTabularClassificationTask.list_models_params + ~BinaryTabularClassificationTask.load_model + ~BinaryTabularClassificationTask.predict + ~BinaryTabularClassificationTask.save_model + ~BinaryTabularClassificationTask.train + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BinaryTabularClassificationTask.data_type + ~BinaryTabularClassificationTask.models_count + ~BinaryTabularClassificationTask.task_type + + \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.tasks.classification.MultilabelImageClassificationTask.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.tasks.classification.MultilabelImageClassificationTask.rst.txt new file mode 100644 index 000000000..f4d56cd98 --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.tasks.classification.MultilabelImageClassificationTask.rst.txt @@ -0,0 +1,40 @@ +cyclops.tasks.classification.MultilabelImageClassificationTask +============================================================== + +.. currentmodule:: cyclops.tasks.classification + +.. autoclass:: MultilabelImageClassificationTask + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + :nosignatures: + + ~MultilabelImageClassificationTask.add_model + ~MultilabelImageClassificationTask.evaluate + ~MultilabelImageClassificationTask.get_model + ~MultilabelImageClassificationTask.list_models + ~MultilabelImageClassificationTask.list_models_params + ~MultilabelImageClassificationTask.load_model + ~MultilabelImageClassificationTask.predict + ~MultilabelImageClassificationTask.save_model + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MultilabelImageClassificationTask.data_type + ~MultilabelImageClassificationTask.models_count + ~MultilabelImageClassificationTask.task_type + + \ No newline at end of file diff --git a/api/_sources/reference/api/_autosummary/cyclops.tasks.classification.rst.txt b/api/_sources/reference/api/_autosummary/cyclops.tasks.classification.rst.txt new file mode 100644 index 000000000..3106bbcdc --- /dev/null +++ b/api/_sources/reference/api/_autosummary/cyclops.tasks.classification.rst.txt @@ -0,0 +1,33 @@ +cyclops.tasks.classification +============================ + +.. automodule:: cyclops.tasks.classification + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + :nosignatures: + + BinaryTabularClassificationTask + MultilabelImageClassificationTask + + + + + + + + + diff --git a/api/_sources/reference/api/cyclops.data.rst.txt b/api/_sources/reference/api/cyclops.data.rst.txt new file mode 100644 index 000000000..9d22be996 --- /dev/null +++ b/api/_sources/reference/api/cyclops.data.rst.txt @@ -0,0 +1,29 @@ +.. role:: hidden + :class: hidden-section + + +cyclops.data +============ + +.. automodule:: cyclops.data + +.. autosummary:: + :toctree: _autosummary + :nosignatures: + :template: custom-module-template.rst + + slicer + aggregate + + +cyclops.data.features +--------------------- + +.. automodule:: cyclops.data.features + +.. autosummary:: + :toctree: _autosummary + :nosignatures: + :template: custom-module-template.rst + + medical_image diff --git a/api/_sources/reference/api/cyclops.monitor.rst.txt b/api/_sources/reference/api/cyclops.monitor.rst.txt new file mode 100644 index 000000000..d513a61ca --- /dev/null +++ b/api/_sources/reference/api/cyclops.monitor.rst.txt @@ -0,0 +1,16 @@ +.. role:: hidden + :class: hidden-section + + +cyclops.monitor +=============== + +.. automodule:: cyclops.monitor + +.. autosummary:: + :toctree: _autosummary + :nosignatures: + :template: custom-module-template.rst + + clinical_applicator + synthetic_applicator diff --git a/api/_sources/reference/api/cyclops.report.rst.txt b/api/_sources/reference/api/cyclops.report.rst.txt new file mode 100644 index 000000000..03a932344 --- /dev/null +++ b/api/_sources/reference/api/cyclops.report.rst.txt @@ -0,0 +1,15 @@ +.. role:: hidden + :class: hidden-section + + +cyclops.report +============== + +.. automodule:: cyclops.report + +.. autosummary:: + :toctree: _autosummary + :nosignatures: + :template: custom-module-template.rst + + report diff --git a/api/_sources/reference/api/cyclops.tasks.rst.txt b/api/_sources/reference/api/cyclops.tasks.rst.txt new file mode 100644 index 000000000..2d8c5e6c7 --- /dev/null +++ b/api/_sources/reference/api/cyclops.tasks.rst.txt @@ -0,0 +1,15 @@ +.. role:: hidden + :class: hidden-section + + +cyclops.tasks +============= + +.. automodule:: cyclops.tasks + +.. autosummary:: + :toctree: _autosummary + :nosignatures: + :template: custom-module-template.rst + + classification diff --git a/api/_sources/reference/api/evaluator.rst.txt b/api/_sources/reference/api/evaluator.rst.txt new file mode 100644 index 000000000..f21431725 --- /dev/null +++ b/api/_sources/reference/api/evaluator.rst.txt @@ -0,0 +1,12 @@ +.. role:: hidden + :class: hidden-section + +######### +Evaluator +######### + +The evaluator module consists of the ``evaluate`` API method, which is used to evaluate +the performance of the model. It returns a dictionary containing various evaluation +metrics. + +.. autofunction:: cyclops.evaluate.evaluator.evaluate diff --git a/api/_sources/reference/api/fairness_evaluator.rst.txt b/api/_sources/reference/api/fairness_evaluator.rst.txt new file mode 100644 index 000000000..c9834e738 --- /dev/null +++ b/api/_sources/reference/api/fairness_evaluator.rst.txt @@ -0,0 +1,12 @@ +.. role:: hidden + :class: hidden-section + +################## +Fairness Evaluator +################## + +The fairness evaluator module consists of the ``evaluate_fairness`` API method, +which is used to evaluate the fairness of a model. The method returns parity metrics +for the model. + +.. autofunction:: cyclops.evaluate.fairness.evaluator.evaluate_fairness diff --git a/api/_sources/reference/api/metrics.rst.txt b/api/_sources/reference/api/metrics.rst.txt new file mode 100644 index 000000000..24af790e8 --- /dev/null +++ b/api/_sources/reference/api/metrics.rst.txt @@ -0,0 +1,8 @@ +Evaluation Metrics +================== + +.. toctree:: + :maxdepth: 1 + :glob: + + metrics/* diff --git a/api/_sources/reference/api/metrics/accuracy.rst.txt b/api/_sources/reference/api/metrics/accuracy.rst.txt new file mode 100644 index 000000000..0f08f7acf --- /dev/null +++ b/api/_sources/reference/api/metrics/accuracy.rst.txt @@ -0,0 +1,39 @@ +######## +Accuracy +######## + +Module Interface +________________ + +Accuracy +^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.Accuracy + :class-doc-from: class + +BinaryAccuracy +^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinaryAccuracy + :class-doc-from: class + +MulticlassAccuracy +^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MulticlassAccuracy + +MultilabelAccuracy +^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MultilabelAccuracy + +Functional Interface +____________________ + +binary_accuracy +^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.accuracy.binary_accuracy + +multiclass_accuracy +^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.accuracy.multiclass_accuracy + +multilabel_accuracy +^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.accuracy.multilabel_accuracy diff --git a/api/_sources/reference/api/metrics/auroc.rst.txt b/api/_sources/reference/api/metrics/auroc.rst.txt new file mode 100644 index 000000000..c3554cc00 --- /dev/null +++ b/api/_sources/reference/api/metrics/auroc.rst.txt @@ -0,0 +1,45 @@ +##### +AUROC +##### + +Module Interface +________________ + +AUROC +^^^^^ +.. autoclass:: cyclops.evaluate.metrics.AUROC + :class-doc-from: class + +BinaryAUROC +^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinaryAUROC + :class-doc-from: class + +MulticlassAUROC +^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MulticlassAUROC + :class-doc-from: class + +MultilabelAUROC +^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MultilabelAUROC + :class-doc-from: class + +Functional Interface +____________________ + +auroc +^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.auroc.auroc + +binary_auroc +^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.auroc.binary_auroc + +multiclass_auroc +^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.auroc.multiclass_auroc + +multilabel_auroc +^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.auroc.multilabel_auroc diff --git a/api/_sources/reference/api/metrics/average_precision.rst.txt b/api/_sources/reference/api/metrics/average_precision.rst.txt new file mode 100644 index 000000000..4b9e16174 --- /dev/null +++ b/api/_sources/reference/api/metrics/average_precision.rst.txt @@ -0,0 +1,29 @@ +################# +Average Precision +################# + +Module Interface +________________ + +.. TODO AveragePrecision is not exposed to API +.. AveragePrecision +.. ^^^^^^^^^^^^^^^^ +.. .. autoclass:: cyclops.evaluate.metrics.AveragePrecision + .. :class-doc-from: class + +BinaryAveragePrecision +^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinaryAveragePrecision + :class-doc-from: class + + +Functional Interface +____________________ + +average_precision +^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.average_precision.average_precision + +binary_average_precision +^^^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.average_precision.binary_average_precision diff --git a/api/_sources/reference/api/metrics/f1score.rst.txt b/api/_sources/reference/api/metrics/f1score.rst.txt new file mode 100644 index 000000000..726ad0174 --- /dev/null +++ b/api/_sources/reference/api/metrics/f1score.rst.txt @@ -0,0 +1,49 @@ +######### +F1-Score +######### + +Module Interface +________________ + +F1Score +^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.F1Score + :class-doc-from: class + +BinaryF1Score +^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinaryF1Score + :class-doc-from: class + +MulticlassF1Score +^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MulticlassF1Score + :class-doc-from: class + + +MultilabelF1Score +^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MultilabelF1Score + :class-doc-from: class + + + +Functional Interface +____________________ + +f1_score +^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.f_beta.f1_score + +binary_f1_score +^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.f_beta.binary_f1_score + +multiclass_f1_score +^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.f_beta.multiclass_f1_score + + +multilabel_f1_score +^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.f_beta.multilabel_f1_score diff --git a/api/_sources/reference/api/metrics/f_beta.rst.txt b/api/_sources/reference/api/metrics/f_beta.rst.txt new file mode 100644 index 000000000..3f2a0f229 --- /dev/null +++ b/api/_sources/reference/api/metrics/f_beta.rst.txt @@ -0,0 +1,48 @@ +############# +F-beta Score +############# + +Module Interface +________________ + +FbetaScore +^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.FbetaScore + :class-doc-from: class + +BinaryFbetaScore +^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinaryFbetaScore + :class-doc-from: class + +MulticlassFbetaScore +^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MulticlassFbetaScore + :class-doc-from: class + + +MultilabelFbetaScore +^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MultilabelFbetaScore + :class-doc-from: class + + + +Functional Interface +____________________ + +fbeta_score +^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.f_beta.fbeta_score + +binary_fbeta_score +^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.f_beta.binary_fbeta_score + +multiclass_fbeta_score +^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.f_beta.multiclass_fbeta_score + +multilabel_fbeta_score +^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.f_beta.multilabel_fbeta_score diff --git a/api/_sources/reference/api/metrics/precision.rst.txt b/api/_sources/reference/api/metrics/precision.rst.txt new file mode 100644 index 000000000..8cf9fc165 --- /dev/null +++ b/api/_sources/reference/api/metrics/precision.rst.txt @@ -0,0 +1,46 @@ +######### +Precision +######### + +Module Interface +________________ + +Precision +^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.Precision + :class-doc-from: class + +BinaryPrecision +^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinaryPrecision + :class-doc-from: class + +MulticlassPrecision +^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MulticlassPrecision + :class-doc-from: class + +MultilabelPrecision +^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MultilabelPrecision + :class-doc-from: class + + +Functional Interface +____________________ + +precision +^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall.precision + +binary_precision +^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall.binary_precision + +multiclass_precision +^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall.multiclass_precision + +multilabel_precision +^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall.multilabel_precision diff --git a/api/_sources/reference/api/metrics/precision_recall_curve.rst.txt b/api/_sources/reference/api/metrics/precision_recall_curve.rst.txt new file mode 100644 index 000000000..2c74a67b7 --- /dev/null +++ b/api/_sources/reference/api/metrics/precision_recall_curve.rst.txt @@ -0,0 +1,45 @@ +#################### +PrecisionRecallCurve +#################### + +Module Interface +________________ + +PrecisionRecallCurve +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.PrecisionRecallCurve + :class-doc-from: class + +BinaryPrecisionRecallCurve +^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinaryPrecisionRecallCurve + :class-doc-from: class + +MulticlassPrecisionRecallCurve +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MulticlassPrecisionRecallCurve + :class-doc-from: class + +MultilabelPrecisionRecallCurve +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MultilabelPrecisionRecallCurve + :class-doc-from: class + + +Functional Interface +____________________ + +precision_recall_curve +^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall_curve.precision_recall_curve + +binary_precision_recall_curve +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall_curve.binary_precision_recall_curve + +multiclass_precision_recall_curve +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall_curve.multiclass_precision_recall_curve + +multilabel_precision_recall_curve +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall_curve.multilabel_precision_recall_curve diff --git a/api/_sources/reference/api/metrics/recall.rst.txt b/api/_sources/reference/api/metrics/recall.rst.txt new file mode 100644 index 000000000..bc749deee --- /dev/null +++ b/api/_sources/reference/api/metrics/recall.rst.txt @@ -0,0 +1,45 @@ +###### +Recall +###### + +Module Interface +________________ + +Recall +^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.Recall + :class-doc-from: class + +BinaryRecall +^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinaryRecall + :class-doc-from: class + +MulticlassRecall +^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MulticlassRecall + :class-doc-from: class + +MultilabelRecall +^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MultilabelRecall + :class-doc-from: class + + +Functional Interface +____________________ + +recall +^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall.recall + +binary_recall +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall.binary_recall + +multiclass_recall +^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall.multiclass_recall + +multilabel_recall +^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.precision_recall.multilabel_recall diff --git a/api/_sources/reference/api/metrics/roc.rst.txt b/api/_sources/reference/api/metrics/roc.rst.txt new file mode 100644 index 000000000..fac737805 --- /dev/null +++ b/api/_sources/reference/api/metrics/roc.rst.txt @@ -0,0 +1,45 @@ +######### +ROC Curve +######### + +Module Interface +________________ + +ROCCurve +^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.ROCCurve + :class-doc-from: class + +BinaryROCCurve +^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinaryROCCurve + :class-doc-from: class + +MulticlassROCCurve +^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MulticlassROCCurve + :class-doc-from: class + +MultilabelROCCurve +^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MultilabelROCCurve + :class-doc-from: class + +Functional Interface +____________________ + +roc_curve +^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.roc.roc_curve + +binary_roc_curve +^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.roc.binary_roc_curve + +multiclass_roc_curve +^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.roc.multiclass_roc_curve + +multilabel_roc_curve +^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.roc.multilabel_roc_curve diff --git a/api/_sources/reference/api/metrics/sensitivity.rst.txt b/api/_sources/reference/api/metrics/sensitivity.rst.txt new file mode 100644 index 000000000..fa3e7ea3c --- /dev/null +++ b/api/_sources/reference/api/metrics/sensitivity.rst.txt @@ -0,0 +1,46 @@ +########### +Sensitivity +########### + +Module Interface +________________ + +Sensitivity +^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.Sensitivity + :class-doc-from: class + +BinarySensitivity +^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinarySensitivity + :class-doc-from: class + +MulticlassSensitivity +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MulticlassSensitivity + :class-doc-from: class + +MultilabelSensitivity +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MultilabelSensitivity + :class-doc-from: class + + +Functional Interface +____________________ + +sensitivity +^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.sensitivity.sensitivity + +binary_sensitivity +^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.sensitivity.binary_sensitivity + +multiclass_sensitivity +^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.sensitivity.multiclass_sensitivity + +multilabel_sensitivity +^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.sensitivity.multilabel_sensitivity diff --git a/api/_sources/reference/api/metrics/specificity.rst.txt b/api/_sources/reference/api/metrics/specificity.rst.txt new file mode 100644 index 000000000..8655bcf56 --- /dev/null +++ b/api/_sources/reference/api/metrics/specificity.rst.txt @@ -0,0 +1,46 @@ +########### +Specificity +########### + +Module Interface +________________ + +Specificity +^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.Specificity + :class-doc-from: class + +BinarySpecificity +^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinarySpecificity + :class-doc-from: class + +MulticlassSpecificity +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MulticlassSpecificity + :class-doc-from: class + +MultilabelSpecificity +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MultilabelSpecificity + :class-doc-from: class + + +Functional Interface +____________________ + +specificity +^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.specificity.specificity + +binary_specificity +^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.specificity.binary_specificity + +multiclass_specificity +^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.specificity.multiclass_specificity + +multilabel_specificity +^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.specificity.multilabel_specificity diff --git a/api/_sources/reference/api/metrics/stat_scores.rst.txt b/api/_sources/reference/api/metrics/stat_scores.rst.txt new file mode 100644 index 000000000..a95ec59b8 --- /dev/null +++ b/api/_sources/reference/api/metrics/stat_scores.rst.txt @@ -0,0 +1,46 @@ +########## +StatScores +########## + +Module Interface +________________ + +StatScores +^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.StatScores + :class-doc-from: class + +BinaryStatScores +^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.BinaryStatScores + :class-doc-from: class + +MulticlassStatScores +^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MulticlassStatScores + :class-doc-from: class + +MultilabelStatScores +^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: cyclops.evaluate.metrics.MultilabelStatScores + :class-doc-from: class + + +Functional Interface +____________________ + +stat_scores +^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.stat_scores.stat_scores + +binary_stat_scores +^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.stat_scores.binary_stat_scores + +multiclass_stat_scores +^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.stat_scores.multiclass_stat_scores + +multilabel_stat_scores +^^^^^^^^^^^^^^^^^^^^^^ +.. autofunction:: cyclops.evaluate.metrics.functional.stat_scores.multilabel_stat_scores diff --git a/api/_sources/tutorials.rst.txt b/api/_sources/tutorials.rst.txt new file mode 100644 index 000000000..0105f629e --- /dev/null +++ b/api/_sources/tutorials.rst.txt @@ -0,0 +1,7 @@ +Tutorials +========= + +.. toctree:: + :maxdepth: 3 + + tutorials_use_cases diff --git a/api/_sources/tutorials_monitor.rst.txt b/api/_sources/tutorials_monitor.rst.txt new file mode 100644 index 000000000..d07b73b7f --- /dev/null +++ b/api/_sources/tutorials_monitor.rst.txt @@ -0,0 +1,12 @@ +monitor API +=========== + +The monitor API allows you to detect data drift relevant for clinical use-cases. +It can generate source/target datasets and perform drift experiments on them. +It supports a number of dimensionality reduction techniques and statistical tests. +Experiments can include sensitivity tests or rolling windows tests across time. + + +.. toctree:: + + tutorials/nihcxr/monitor_api.ipynb diff --git a/api/_sources/tutorials_use_cases.rst.txt b/api/_sources/tutorials_use_cases.rst.txt new file mode 100644 index 000000000..6de09ecd6 --- /dev/null +++ b/api/_sources/tutorials_use_cases.rst.txt @@ -0,0 +1,68 @@ +Example use cases +================= + +Tabular data +------------ + +Kaggle Heart Failure Prediction +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is a binary classification problem where the goal is to predict +risk of heart disease. The `heart failure dataset `_ +is available on Kaggle. The dataset contains 11 features and 1 target +variable. + +.. toctree:: + + tutorials/kaggle/heart_failure_prediction.ipynb + +MIMICIV Mortality Prediction +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is a binary classification problem where the goal is to predict +risk of in-hospital mortality. The `MIMICIV dataset `_ is an EHR dataset collected from a single hospital site, which includes ICU data. + +.. toctree:: + + tutorials/mimiciv/mortality_prediction.ipynb + +Synthea Prolonged Length of Stay Prediction +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is a binary classification problem where the goal is to predict +whether a patient will have a prolonged length of stay in the hospital +(more than 7 days). The `synthea dataset `_ +is generated using Synthea which is a synthetic patient generator. The dataset +contains observations, medications and procedures as features. + +.. toctree:: + + tutorials/synthea/los_prediction.ipynb + +Diabetes 130-US Hospitals for Years 1999-2008 Readmission Prediction +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is a binary classification problem where the goal is to predict +risk of readmission. The `diabetes dataset `_ +is available on UCI Machine Learning Repository. The dataset contains +47 features and 1 target variable. + +.. toctree:: + + tutorials/diabetes_130/readmission_prediction.ipynb + +Image data +---------- + +NIH Chest X-ray classification +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This tutorial showcases the use of the ``tasks`` API to implement a chest X-ray +classification task. The dataset used is the `NIH Chest X-ray dataset `__, which contains 112,120 frontal-view X-ray images of 30,805 unique patients with 14 disease labels. + +The tutorial also demonstrates the use of the ``evaluate`` API to evaluate the +performance of a model on the task. + +.. toctree:: + + tutorials/nihcxr/cxr_classification.ipynb diff --git a/api/_sources/user_guide.rst.txt b/api/_sources/user_guide.rst.txt new file mode 100644 index 000000000..8c62297e8 --- /dev/null +++ b/api/_sources/user_guide.rst.txt @@ -0,0 +1,9 @@ +User Guide +========== + +.. toctree:: + + installation + evaluation + model_report + monitoring diff --git a/api/_sphinx_design_static/design-style.1e8bd061cd6da7fc9cf755528e8ffc24.min.css b/api/_sphinx_design_static/design-style.1e8bd061cd6da7fc9cf755528e8ffc24.min.css new file mode 100644 index 000000000..eb19f698a --- /dev/null +++ b/api/_sphinx_design_static/design-style.1e8bd061cd6da7fc9cf755528e8ffc24.min.css @@ -0,0 +1 @@ +.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative}details.sd-dropdown .sd-summary-title{font-weight:700;padding-right:3em !important;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;user-select:none}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary{list-style:none;padding:1em}details.sd-dropdown summary .sd-octicon.no-title{vertical-align:middle}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown summary::-webkit-details-marker{display:none}details.sd-dropdown summary:focus{outline:none}details.sd-dropdown .sd-summary-icon{margin-right:.5em}details.sd-dropdown .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary:hover .sd-summary-up svg,details.sd-dropdown summary:hover .sd-summary-down svg{opacity:1;transform:scale(1.1)}details.sd-dropdown .sd-summary-up svg,details.sd-dropdown .sd-summary-down svg{display:block;opacity:.6}details.sd-dropdown .sd-summary-up,details.sd-dropdown .sd-summary-down{pointer-events:none;position:absolute;right:1em;top:1em}details.sd-dropdown[open]>.sd-summary-title .sd-summary-down{visibility:hidden}details.sd-dropdown:not([open])>.sd-summary-title .sd-summary-up{visibility:hidden}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #0071bc;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0060a0;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem} diff --git a/api/_sphinx_design_static/design-tabs.js b/api/_sphinx_design_static/design-tabs.js new file mode 100644 index 000000000..36b38cf0d --- /dev/null +++ b/api/_sphinx_design_static/design-tabs.js @@ -0,0 +1,27 @@ +var sd_labels_by_text = {}; + +function ready() { + const li = document.getElementsByClassName("sd-tab-label"); + for (const label of li) { + syncId = label.getAttribute("data-sync-id"); + if (syncId) { + label.onclick = onLabelClick; + if (!sd_labels_by_text[syncId]) { + sd_labels_by_text[syncId] = []; + } + sd_labels_by_text[syncId].push(label); + } + } +} + +function onLabelClick() { + // Activate other inputs with the same sync id. + syncId = this.getAttribute("data-sync-id"); + for (label of sd_labels_by_text[syncId]) { + if (label === this) continue; + label.previousElementSibling.checked = true; + } + window.localStorage.setItem("sphinx-design-last-tab", syncId); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/api/_static/basic.css b/api/_static/basic.css new file mode 100644 index 000000000..30fee9d0f --- /dev/null +++ b/api/_static/basic.css @@ -0,0 +1,925 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/api/_static/check-solid.svg b/api/_static/check-solid.svg new file mode 100644 index 000000000..92fad4b5c --- /dev/null +++ b/api/_static/check-solid.svg @@ -0,0 +1,4 @@ + + + + diff --git a/api/_static/clipboard.min.js b/api/_static/clipboard.min.js new file mode 100644 index 000000000..54b3c4638 --- /dev/null +++ b/api/_static/clipboard.min.js @@ -0,0 +1,7 @@ +/*! + * clipboard.js v2.0.8 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */ +!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return n={686:function(t,e,n){"use strict";n.d(e,{default:function(){return o}});var e=n(279),i=n.n(e),e=n(370),u=n.n(e),e=n(817),c=n.n(e);function a(t){try{return document.execCommand(t)}catch(t){return}}var f=function(t){t=c()(t);return a("cut"),t};var l=function(t){var e,n,o,r=1 + + + + diff --git a/api/_static/copybutton.css b/api/_static/copybutton.css new file mode 100644 index 000000000..f1916ec7d --- /dev/null +++ b/api/_static/copybutton.css @@ -0,0 +1,94 @@ +/* Copy buttons */ +button.copybtn { + position: absolute; + display: flex; + top: .3em; + right: .3em; + width: 1.7em; + height: 1.7em; + opacity: 0; + transition: opacity 0.3s, border .3s, background-color .3s; + user-select: none; + padding: 0; + border: none; + outline: none; + border-radius: 0.4em; + /* The colors that GitHub uses */ + border: #1b1f2426 1px solid; + background-color: #f6f8fa; + color: #57606a; +} + +button.copybtn.success { + border-color: #22863a; + color: #22863a; +} + +button.copybtn svg { + stroke: currentColor; + width: 1.5em; + height: 1.5em; + padding: 0.1em; +} + +div.highlight { + position: relative; +} + +/* Show the copybutton */ +.highlight:hover button.copybtn, button.copybtn.success { + opacity: 1; +} + +.highlight button.copybtn:hover { + background-color: rgb(235, 235, 235); +} + +.highlight button.copybtn:active { + background-color: rgb(187, 187, 187); +} + +/** + * A minimal CSS-only tooltip copied from: + * https://codepen.io/mildrenben/pen/rVBrpK + * + * To use, write HTML like the following: + * + *

Short

+ */ + .o-tooltip--left { + position: relative; + } + + .o-tooltip--left:after { + opacity: 0; + visibility: hidden; + position: absolute; + content: attr(data-tooltip); + padding: .2em; + font-size: .8em; + left: -.2em; + background: grey; + color: white; + white-space: nowrap; + z-index: 2; + border-radius: 2px; + transform: translateX(-102%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); +} + +.o-tooltip--left:hover:after { + display: block; + opacity: 1; + visibility: visible; + transform: translateX(-100%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); + transition-delay: .5s; +} + +/* By default the copy button shouldn't show up when printing a page */ +@media print { + button.copybtn { + display: none; + } +} diff --git a/api/_static/copybutton.js b/api/_static/copybutton.js new file mode 100644 index 000000000..19f1fbddd --- /dev/null +++ b/api/_static/copybutton.js @@ -0,0 +1,248 @@ +// Localization support +const messages = { + 'en': { + 'copy': 'Copy', + 'copy_to_clipboard': 'Copy to clipboard', + 'copy_success': 'Copied!', + 'copy_failure': 'Failed to copy', + }, + 'es' : { + 'copy': 'Copiar', + 'copy_to_clipboard': 'Copiar al portapapeles', + 'copy_success': '¡Copiado!', + 'copy_failure': 'Error al copiar', + }, + 'de' : { + 'copy': 'Kopieren', + 'copy_to_clipboard': 'In die Zwischenablage kopieren', + 'copy_success': 'Kopiert!', + 'copy_failure': 'Fehler beim Kopieren', + }, + 'fr' : { + 'copy': 'Copier', + 'copy_to_clipboard': 'Copier dans le presse-papier', + 'copy_success': 'Copié !', + 'copy_failure': 'Échec de la copie', + }, + 'ru': { + 'copy': 'Скопировать', + 'copy_to_clipboard': 'Скопировать в буфер', + 'copy_success': 'Скопировано!', + 'copy_failure': 'Не удалось скопировать', + }, + 'zh-CN': { + 'copy': '复制', + 'copy_to_clipboard': '复制到剪贴板', + 'copy_success': '复制成功!', + 'copy_failure': '复制失败', + }, + 'it' : { + 'copy': 'Copiare', + 'copy_to_clipboard': 'Copiato negli appunti', + 'copy_success': 'Copiato!', + 'copy_failure': 'Errore durante la copia', + } +} + +let locale = 'en' +if( document.documentElement.lang !== undefined + && messages[document.documentElement.lang] !== undefined ) { + locale = document.documentElement.lang +} + +let doc_url_root = DOCUMENTATION_OPTIONS.URL_ROOT; +if (doc_url_root == '#') { + doc_url_root = ''; +} + +/** + * SVG files for our copy buttons + */ +let iconCheck = ` + ${messages[locale]['copy_success']} + + +` + +// If the user specified their own SVG use that, otherwise use the default +let iconCopy = ``; +if (!iconCopy) { + iconCopy = ` + ${messages[locale]['copy_to_clipboard']} + + + +` +} + +/** + * Set up copy/paste for code blocks + */ + +const runWhenDOMLoaded = cb => { + if (document.readyState != 'loading') { + cb() + } else if (document.addEventListener) { + document.addEventListener('DOMContentLoaded', cb) + } else { + document.attachEvent('onreadystatechange', function() { + if (document.readyState == 'complete') cb() + }) + } +} + +const codeCellId = index => `codecell${index}` + +// Clears selected text since ClipboardJS will select the text when copying +const clearSelection = () => { + if (window.getSelection) { + window.getSelection().removeAllRanges() + } else if (document.selection) { + document.selection.empty() + } +} + +// Changes tooltip text for a moment, then changes it back +// We want the timeout of our `success` class to be a bit shorter than the +// tooltip and icon change, so that we can hide the icon before changing back. +var timeoutIcon = 2000; +var timeoutSuccessClass = 1500; + +const temporarilyChangeTooltip = (el, oldText, newText) => { + el.setAttribute('data-tooltip', newText) + el.classList.add('success') + // Remove success a little bit sooner than we change the tooltip + // So that we can use CSS to hide the copybutton first + setTimeout(() => el.classList.remove('success'), timeoutSuccessClass) + setTimeout(() => el.setAttribute('data-tooltip', oldText), timeoutIcon) +} + +// Changes the copy button icon for two seconds, then changes it back +const temporarilyChangeIcon = (el) => { + el.innerHTML = iconCheck; + setTimeout(() => {el.innerHTML = iconCopy}, timeoutIcon) +} + +const addCopyButtonToCodeCells = () => { + // If ClipboardJS hasn't loaded, wait a bit and try again. This + // happens because we load ClipboardJS asynchronously. + if (window.ClipboardJS === undefined) { + setTimeout(addCopyButtonToCodeCells, 250) + return + } + + // Add copybuttons to all of our code cells + const COPYBUTTON_SELECTOR = 'div.highlight pre'; + const codeCells = document.querySelectorAll(COPYBUTTON_SELECTOR) + codeCells.forEach((codeCell, index) => { + const id = codeCellId(index) + codeCell.setAttribute('id', id) + + const clipboardButton = id => + `` + codeCell.insertAdjacentHTML('afterend', clipboardButton(id)) + }) + +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} + + +var copyTargetText = (trigger) => { + var target = document.querySelector(trigger.attributes['data-clipboard-target'].value); + + // get filtered text + let exclude = '.linenos'; + + let text = filterText(target, exclude); + return formatCopyText(text, '>>> |\\.\\.\\. ', true, true, true, true, '', '') +} + + // Initialize with a callback so we can modify the text before copy + const clipboard = new ClipboardJS('.copybtn', {text: copyTargetText}) + + // Update UI with error/success messages + clipboard.on('success', event => { + clearSelection() + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_success']) + temporarilyChangeIcon(event.trigger) + }) + + clipboard.on('error', event => { + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_failure']) + }) +} + +runWhenDOMLoaded(addCopyButtonToCodeCells) \ No newline at end of file diff --git a/api/_static/copybutton_funcs.js b/api/_static/copybutton_funcs.js new file mode 100644 index 000000000..dbe1aaad7 --- /dev/null +++ b/api/_static/copybutton_funcs.js @@ -0,0 +1,73 @@ +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +export function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +export function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} diff --git a/api/_static/css/cyclops.css b/api/_static/css/cyclops.css new file mode 100644 index 000000000..56b1c6989 --- /dev/null +++ b/api/_static/css/cyclops.css @@ -0,0 +1,49 @@ +footer span.commit code, +.rst-content pre.literal-block, +.rst-content div[class^='highlight'] pre, +.rst-content .linenodiv pre, +.rst-content tt, +.rst-content code, +.rst-content pre, +.rst-content kbd, +.rst-content samp { + font-family: 'IBM Plex Mono', monospace; + font-size: 0.8rem; +} +footer { + color: var(--text-color); +} +footer .footer-small-text { + font-weight: 300; + font-size: 0.9rem; +} +footer .copyright { + font-weight: 300; + font-size: 0.8rem; +} +footer div.logo { + display: flex; + flex-wrap: wrap; + justify-content: space-around; + margin: 0px; + padding: 10px 0px 0px 0px; +} +footer a.logo { + /* Using flex here (to vertically align the child img) causes aspect-ratio issues */ + flex-basis: 120px; + margin: 10px auto 10px auto; + text-align: center; +} +footer a.logo:hover { + text-decoration: none; +} +footer span.logo { + display: inline-block; + height: 100%; + vertical-align: middle; +} +footer img.logo { + display: inline-block; + vertical-align: middle; + height: auto; +} diff --git a/api/_static/custom.js b/api/_static/custom.js new file mode 100644 index 000000000..738d14549 --- /dev/null +++ b/api/_static/custom.js @@ -0,0 +1,6 @@ +requirejs.config({ + paths: { + base: '/static/base', + plotly: 'https://cdn.plot.ly/plotly-2.30.0.min.js?noext', + }, +}); diff --git a/api/_static/cyclops_logo-dark.png b/api/_static/cyclops_logo-dark.png new file mode 100644 index 000000000..6f7f3c152 Binary files /dev/null and b/api/_static/cyclops_logo-dark.png differ diff --git a/api/_static/cyclops_logo.png b/api/_static/cyclops_logo.png new file mode 100644 index 000000000..0b98f8585 Binary files /dev/null and b/api/_static/cyclops_logo.png differ diff --git a/api/_static/debug.css b/api/_static/debug.css new file mode 100644 index 000000000..74d4aec33 --- /dev/null +++ b/api/_static/debug.css @@ -0,0 +1,69 @@ +/* + This CSS file should be overridden by the theme authors. It's + meant for debugging and developing the skeleton that this theme provides. +*/ +body { + font-family: -apple-system, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, + "Apple Color Emoji", "Segoe UI Emoji"; + background: lavender; +} +.sb-announcement { + background: rgb(131, 131, 131); +} +.sb-announcement__inner { + background: black; + color: white; +} +.sb-header { + background: lightskyblue; +} +.sb-header__inner { + background: royalblue; + color: white; +} +.sb-header-secondary { + background: lightcyan; +} +.sb-header-secondary__inner { + background: cornflowerblue; + color: white; +} +.sb-sidebar-primary { + background: lightgreen; +} +.sb-main { + background: blanchedalmond; +} +.sb-main__inner { + background: antiquewhite; +} +.sb-header-article { + background: lightsteelblue; +} +.sb-article-container { + background: snow; +} +.sb-article-main { + background: white; +} +.sb-footer-article { + background: lightpink; +} +.sb-sidebar-secondary { + background: lightgoldenrodyellow; +} +.sb-footer-content { + background: plum; +} +.sb-footer-content__inner { + background: palevioletred; +} +.sb-footer { + background: pink; +} +.sb-footer__inner { + background: salmon; +} +.sb-article { + background: white; +} diff --git a/api/_static/design-style.1e8bd061cd6da7fc9cf755528e8ffc24.min.css b/api/_static/design-style.1e8bd061cd6da7fc9cf755528e8ffc24.min.css new file mode 100644 index 000000000..eb19f698a --- /dev/null +++ b/api/_static/design-style.1e8bd061cd6da7fc9cf755528e8ffc24.min.css @@ -0,0 +1 @@ +.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative}details.sd-dropdown .sd-summary-title{font-weight:700;padding-right:3em !important;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;user-select:none}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary{list-style:none;padding:1em}details.sd-dropdown summary .sd-octicon.no-title{vertical-align:middle}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown summary::-webkit-details-marker{display:none}details.sd-dropdown summary:focus{outline:none}details.sd-dropdown .sd-summary-icon{margin-right:.5em}details.sd-dropdown .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary:hover .sd-summary-up svg,details.sd-dropdown summary:hover .sd-summary-down svg{opacity:1;transform:scale(1.1)}details.sd-dropdown .sd-summary-up svg,details.sd-dropdown .sd-summary-down svg{display:block;opacity:.6}details.sd-dropdown .sd-summary-up,details.sd-dropdown .sd-summary-down{pointer-events:none;position:absolute;right:1em;top:1em}details.sd-dropdown[open]>.sd-summary-title .sd-summary-down{visibility:hidden}details.sd-dropdown:not([open])>.sd-summary-title .sd-summary-up{visibility:hidden}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #0071bc;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0060a0;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem} diff --git a/api/_static/design-tabs.js b/api/_static/design-tabs.js new file mode 100644 index 000000000..36b38cf0d --- /dev/null +++ b/api/_static/design-tabs.js @@ -0,0 +1,27 @@ +var sd_labels_by_text = {}; + +function ready() { + const li = document.getElementsByClassName("sd-tab-label"); + for (const label of li) { + syncId = label.getAttribute("data-sync-id"); + if (syncId) { + label.onclick = onLabelClick; + if (!sd_labels_by_text[syncId]) { + sd_labels_by_text[syncId] = []; + } + sd_labels_by_text[syncId].push(label); + } + } +} + +function onLabelClick() { + // Activate other inputs with the same sync id. + syncId = this.getAttribute("data-sync-id"); + for (label of sd_labels_by_text[syncId]) { + if (label === this) continue; + label.previousElementSibling.checked = true; + } + window.localStorage.setItem("sphinx-design-last-tab", syncId); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/api/_static/doctools.js b/api/_static/doctools.js new file mode 100644 index 000000000..d06a71d75 --- /dev/null +++ b/api/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/api/_static/documentation_options.js b/api/_static/documentation_options.js new file mode 100644 index 000000000..7e4c114f2 --- /dev/null +++ b/api/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/api/_static/favicon.ico b/api/_static/favicon.ico new file mode 100644 index 000000000..8e0b9daca Binary files /dev/null and b/api/_static/favicon.ico differ diff --git a/api/_static/file.png b/api/_static/file.png new file mode 100644 index 000000000..a858a410e Binary files /dev/null and b/api/_static/file.png differ diff --git a/api/_static/language_data.js b/api/_static/language_data.js new file mode 100644 index 000000000..250f5665f --- /dev/null +++ b/api/_static/language_data.js @@ -0,0 +1,199 @@ +/* + * language_data.js + * ~~~~~~~~~~~~~~~~ + * + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, is available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/api/_static/logos/gemini_logo.png b/api/_static/logos/gemini_logo.png new file mode 100644 index 000000000..3ccca7315 Binary files /dev/null and b/api/_static/logos/gemini_logo.png differ diff --git a/api/_static/logos/vector_logo.png b/api/_static/logos/vector_logo.png new file mode 100644 index 000000000..3d9b106b0 Binary files /dev/null and b/api/_static/logos/vector_logo.png differ diff --git a/api/_static/minus.png b/api/_static/minus.png new file mode 100644 index 000000000..d96755fda Binary files /dev/null and b/api/_static/minus.png differ diff --git a/api/_static/plus.png b/api/_static/plus.png new file mode 100644 index 000000000..7107cec93 Binary files /dev/null and b/api/_static/plus.png differ diff --git a/api/_static/pygments.css b/api/_static/pygments.css new file mode 100644 index 000000000..c2e07c71e --- /dev/null +++ b/api/_static/pygments.css @@ -0,0 +1,258 @@ +.highlight pre { line-height: 125%; } +.highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #8f5902; font-style: italic } /* Comment */ +.highlight .err { color: #a40000; border: 1px solid #ef2929 } /* Error */ +.highlight .g { color: #000000 } /* Generic */ +.highlight .k { color: #204a87; font-weight: bold } /* Keyword */ +.highlight .l { color: #000000 } /* Literal */ +.highlight .n { color: #000000 } /* Name */ +.highlight .o { color: #ce5c00; font-weight: bold } /* Operator */ +.highlight .x { color: #000000 } /* Other */ +.highlight .p { color: #000000; font-weight: bold } /* Punctuation */ +.highlight .ch { color: #8f5902; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #8f5902; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #8f5902; font-style: italic } /* Comment.Preproc */ +.highlight .cpf { color: #8f5902; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #8f5902; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #8f5902; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #a40000 } /* Generic.Deleted */ +.highlight .ge { color: #000000; font-style: italic } /* Generic.Emph */ +.highlight .ges { color: #000000; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +.highlight .gr { color: #ef2929 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #000000; font-style: italic } /* Generic.Output */ +.highlight .gp { color: #8f5902 } /* Generic.Prompt */ +.highlight .gs { color: #000000; font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #a40000; font-weight: bold } /* Generic.Traceback */ +.highlight .kc { color: #204a87; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #204a87; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #204a87; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #204a87; font-weight: bold } /* Keyword.Pseudo */ +.highlight .kr { color: #204a87; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #204a87; font-weight: bold } /* Keyword.Type */ +.highlight .ld { color: #000000 } /* Literal.Date */ +.highlight .m { color: #0000cf; font-weight: bold } /* Literal.Number */ +.highlight .s { color: #4e9a06 } /* Literal.String */ +.highlight .na { color: #c4a000 } /* Name.Attribute */ +.highlight .nb { color: #204a87 } /* Name.Builtin */ +.highlight .nc { color: #000000 } /* Name.Class */ +.highlight .no { color: #000000 } /* Name.Constant */ +.highlight .nd { color: #5c35cc; font-weight: bold } /* Name.Decorator */ +.highlight .ni { color: #ce5c00 } /* Name.Entity */ +.highlight .ne { color: #cc0000; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #000000 } /* Name.Function */ +.highlight .nl { color: #f57900 } /* Name.Label */ +.highlight .nn { color: #000000 } /* Name.Namespace */ +.highlight .nx { color: #000000 } /* Name.Other */ +.highlight .py { color: #000000 } /* Name.Property */ +.highlight .nt { color: #204a87; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #000000 } /* Name.Variable */ +.highlight .ow { color: #204a87; font-weight: bold } /* Operator.Word */ +.highlight .pm { color: #000000; font-weight: bold } /* Punctuation.Marker */ +.highlight .w { color: #f8f8f8 } /* Text.Whitespace */ +.highlight .mb { color: #0000cf; font-weight: bold } /* Literal.Number.Bin */ +.highlight .mf { color: #0000cf; font-weight: bold } /* Literal.Number.Float */ +.highlight .mh { color: #0000cf; font-weight: bold } /* Literal.Number.Hex */ +.highlight .mi { color: #0000cf; font-weight: bold } /* Literal.Number.Integer */ +.highlight .mo { color: #0000cf; font-weight: bold } /* Literal.Number.Oct */ +.highlight .sa { color: #4e9a06 } /* Literal.String.Affix */ +.highlight .sb { color: #4e9a06 } /* Literal.String.Backtick */ +.highlight .sc { color: #4e9a06 } /* Literal.String.Char */ +.highlight .dl { color: #4e9a06 } /* Literal.String.Delimiter */ +.highlight .sd { color: #8f5902; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4e9a06 } /* Literal.String.Double */ +.highlight .se { color: #4e9a06 } /* Literal.String.Escape */ +.highlight .sh { color: #4e9a06 } /* Literal.String.Heredoc */ +.highlight .si { color: #4e9a06 } /* Literal.String.Interpol */ +.highlight .sx { color: #4e9a06 } /* Literal.String.Other */ +.highlight .sr { color: #4e9a06 } /* Literal.String.Regex */ +.highlight .s1 { color: #4e9a06 } /* Literal.String.Single */ +.highlight .ss { color: #4e9a06 } /* Literal.String.Symbol */ +.highlight .bp { color: #3465a4 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #000000 } /* Name.Function.Magic */ +.highlight .vc { color: #000000 } /* Name.Variable.Class */ +.highlight .vg { color: #000000 } /* Name.Variable.Global */ +.highlight .vi { color: #000000 } /* Name.Variable.Instance */ +.highlight .vm { color: #000000 } /* Name.Variable.Magic */ +.highlight .il { color: #0000cf; font-weight: bold } /* Literal.Number.Integer.Long */ +@media not print { +body[data-theme="dark"] .highlight pre { line-height: 125%; } +body[data-theme="dark"] .highlight td.linenos .normal { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight .hll { background-color: #404040 } +body[data-theme="dark"] .highlight { background: #202020; color: #d0d0d0 } +body[data-theme="dark"] .highlight .c { color: #ababab; font-style: italic } /* Comment */ +body[data-theme="dark"] .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ +body[data-theme="dark"] .highlight .esc { color: #d0d0d0 } /* Escape */ +body[data-theme="dark"] .highlight .g { color: #d0d0d0 } /* Generic */ +body[data-theme="dark"] .highlight .k { color: #6ebf26; font-weight: bold } /* Keyword */ +body[data-theme="dark"] .highlight .l { color: #d0d0d0 } /* Literal */ +body[data-theme="dark"] .highlight .n { color: #d0d0d0 } /* Name */ +body[data-theme="dark"] .highlight .o { color: #d0d0d0 } /* Operator */ +body[data-theme="dark"] .highlight .x { color: #d0d0d0 } /* Other */ +body[data-theme="dark"] .highlight .p { color: #d0d0d0 } /* Punctuation */ +body[data-theme="dark"] .highlight .ch { color: #ababab; font-style: italic } /* Comment.Hashbang */ +body[data-theme="dark"] .highlight .cm { color: #ababab; font-style: italic } /* Comment.Multiline */ +body[data-theme="dark"] .highlight .cp { color: #ff3a3a; font-weight: bold } /* Comment.Preproc */ +body[data-theme="dark"] .highlight .cpf { color: #ababab; font-style: italic } /* Comment.PreprocFile */ +body[data-theme="dark"] .highlight .c1 { color: #ababab; font-style: italic } /* Comment.Single */ +body[data-theme="dark"] .highlight .cs { color: #e50808; font-weight: bold; background-color: #520000 } /* Comment.Special */ +body[data-theme="dark"] .highlight .gd { color: #d22323 } /* Generic.Deleted */ +body[data-theme="dark"] .highlight .ge { color: #d0d0d0; font-style: italic } /* Generic.Emph */ +body[data-theme="dark"] .highlight .ges { color: #d0d0d0; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +body[data-theme="dark"] .highlight .gr { color: #d22323 } /* Generic.Error */ +body[data-theme="dark"] .highlight .gh { color: #ffffff; font-weight: bold } /* Generic.Heading */ +body[data-theme="dark"] .highlight .gi { color: #589819 } /* Generic.Inserted */ +body[data-theme="dark"] .highlight .go { color: #cccccc } /* Generic.Output */ +body[data-theme="dark"] .highlight .gp { color: #aaaaaa } /* Generic.Prompt */ +body[data-theme="dark"] .highlight .gs { color: #d0d0d0; font-weight: bold } /* Generic.Strong */ +body[data-theme="dark"] .highlight .gu { color: #ffffff; text-decoration: underline } /* Generic.Subheading */ +body[data-theme="dark"] .highlight .gt { color: #d22323 } /* Generic.Traceback */ +body[data-theme="dark"] .highlight .kc { color: #6ebf26; font-weight: bold } /* Keyword.Constant */ +body[data-theme="dark"] .highlight .kd { color: #6ebf26; font-weight: bold } /* Keyword.Declaration */ +body[data-theme="dark"] .highlight .kn { color: #6ebf26; font-weight: bold } /* Keyword.Namespace */ +body[data-theme="dark"] .highlight .kp { color: #6ebf26 } /* Keyword.Pseudo */ +body[data-theme="dark"] .highlight .kr { color: #6ebf26; font-weight: bold } /* Keyword.Reserved */ +body[data-theme="dark"] .highlight .kt { color: #6ebf26; font-weight: bold } /* Keyword.Type */ +body[data-theme="dark"] .highlight .ld { color: #d0d0d0 } /* Literal.Date */ +body[data-theme="dark"] .highlight .m { color: #51b2fd } /* Literal.Number */ +body[data-theme="dark"] .highlight .s { color: #ed9d13 } /* Literal.String */ +body[data-theme="dark"] .highlight .na { color: #bbbbbb } /* Name.Attribute */ +body[data-theme="dark"] .highlight .nb { color: #2fbccd } /* Name.Builtin */ +body[data-theme="dark"] .highlight .nc { color: #71adff; text-decoration: underline } /* Name.Class */ +body[data-theme="dark"] .highlight .no { color: #40ffff } /* Name.Constant */ +body[data-theme="dark"] .highlight .nd { color: #ffa500 } /* Name.Decorator */ +body[data-theme="dark"] .highlight .ni { color: #d0d0d0 } /* Name.Entity */ +body[data-theme="dark"] .highlight .ne { color: #bbbbbb } /* Name.Exception */ +body[data-theme="dark"] .highlight .nf { color: #71adff } /* Name.Function */ +body[data-theme="dark"] .highlight .nl { color: #d0d0d0 } /* Name.Label */ +body[data-theme="dark"] .highlight .nn { color: #71adff; text-decoration: underline } /* Name.Namespace */ +body[data-theme="dark"] .highlight .nx { color: #d0d0d0 } /* Name.Other */ +body[data-theme="dark"] .highlight .py { color: #d0d0d0 } /* Name.Property */ +body[data-theme="dark"] .highlight .nt { color: #6ebf26; font-weight: bold } /* Name.Tag */ +body[data-theme="dark"] .highlight .nv { color: #40ffff } /* Name.Variable */ +body[data-theme="dark"] .highlight .ow { color: #6ebf26; font-weight: bold } /* Operator.Word */ +body[data-theme="dark"] .highlight .pm { color: #d0d0d0 } /* Punctuation.Marker */ +body[data-theme="dark"] .highlight .w { color: #666666 } /* Text.Whitespace */ +body[data-theme="dark"] .highlight .mb { color: #51b2fd } /* Literal.Number.Bin */ +body[data-theme="dark"] .highlight .mf { color: #51b2fd } /* Literal.Number.Float */ +body[data-theme="dark"] .highlight .mh { color: #51b2fd } /* Literal.Number.Hex */ +body[data-theme="dark"] .highlight .mi { color: #51b2fd } /* Literal.Number.Integer */ +body[data-theme="dark"] .highlight .mo { color: #51b2fd } /* Literal.Number.Oct */ +body[data-theme="dark"] .highlight .sa { color: #ed9d13 } /* Literal.String.Affix */ +body[data-theme="dark"] .highlight .sb { color: #ed9d13 } /* Literal.String.Backtick */ +body[data-theme="dark"] .highlight .sc { color: #ed9d13 } /* Literal.String.Char */ +body[data-theme="dark"] .highlight .dl { color: #ed9d13 } /* Literal.String.Delimiter */ +body[data-theme="dark"] .highlight .sd { color: #ed9d13 } /* Literal.String.Doc */ +body[data-theme="dark"] .highlight .s2 { color: #ed9d13 } /* Literal.String.Double */ +body[data-theme="dark"] .highlight .se { color: #ed9d13 } /* Literal.String.Escape */ +body[data-theme="dark"] .highlight .sh { color: #ed9d13 } /* Literal.String.Heredoc */ +body[data-theme="dark"] .highlight .si { color: #ed9d13 } /* Literal.String.Interpol */ +body[data-theme="dark"] .highlight .sx { color: #ffa500 } /* Literal.String.Other */ +body[data-theme="dark"] .highlight .sr { color: #ed9d13 } /* Literal.String.Regex */ +body[data-theme="dark"] .highlight .s1 { color: #ed9d13 } /* Literal.String.Single */ +body[data-theme="dark"] .highlight .ss { color: #ed9d13 } /* Literal.String.Symbol */ +body[data-theme="dark"] .highlight .bp { color: #2fbccd } /* Name.Builtin.Pseudo */ +body[data-theme="dark"] .highlight .fm { color: #71adff } /* Name.Function.Magic */ +body[data-theme="dark"] .highlight .vc { color: #40ffff } /* Name.Variable.Class */ +body[data-theme="dark"] .highlight .vg { color: #40ffff } /* Name.Variable.Global */ +body[data-theme="dark"] .highlight .vi { color: #40ffff } /* Name.Variable.Instance */ +body[data-theme="dark"] .highlight .vm { color: #40ffff } /* Name.Variable.Magic */ +body[data-theme="dark"] .highlight .il { color: #51b2fd } /* Literal.Number.Integer.Long */ +@media (prefers-color-scheme: dark) { +body:not([data-theme="light"]) .highlight pre { line-height: 125%; } +body:not([data-theme="light"]) .highlight td.linenos .normal { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight .hll { background-color: #404040 } +body:not([data-theme="light"]) .highlight { background: #202020; color: #d0d0d0 } +body:not([data-theme="light"]) .highlight .c { color: #ababab; font-style: italic } /* Comment */ +body:not([data-theme="light"]) .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ +body:not([data-theme="light"]) .highlight .esc { color: #d0d0d0 } /* Escape */ +body:not([data-theme="light"]) .highlight .g { color: #d0d0d0 } /* Generic */ +body:not([data-theme="light"]) .highlight .k { color: #6ebf26; font-weight: bold } /* Keyword */ +body:not([data-theme="light"]) .highlight .l { color: #d0d0d0 } /* Literal */ +body:not([data-theme="light"]) .highlight .n { color: #d0d0d0 } /* Name */ +body:not([data-theme="light"]) .highlight .o { color: #d0d0d0 } /* Operator */ +body:not([data-theme="light"]) .highlight .x { color: #d0d0d0 } /* Other */ +body:not([data-theme="light"]) .highlight .p { color: #d0d0d0 } /* Punctuation */ +body:not([data-theme="light"]) .highlight .ch { color: #ababab; font-style: italic } /* Comment.Hashbang */ +body:not([data-theme="light"]) .highlight .cm { color: #ababab; font-style: italic } /* Comment.Multiline */ +body:not([data-theme="light"]) .highlight .cp { color: #ff3a3a; font-weight: bold } /* Comment.Preproc */ +body:not([data-theme="light"]) .highlight .cpf { color: #ababab; font-style: italic } /* Comment.PreprocFile */ +body:not([data-theme="light"]) .highlight .c1 { color: #ababab; font-style: italic } /* Comment.Single */ +body:not([data-theme="light"]) .highlight .cs { color: #e50808; font-weight: bold; background-color: #520000 } /* Comment.Special */ +body:not([data-theme="light"]) .highlight .gd { color: #d22323 } /* Generic.Deleted */ +body:not([data-theme="light"]) .highlight .ge { color: #d0d0d0; font-style: italic } /* Generic.Emph */ +body:not([data-theme="light"]) .highlight .ges { color: #d0d0d0; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +body:not([data-theme="light"]) .highlight .gr { color: #d22323 } /* Generic.Error */ +body:not([data-theme="light"]) .highlight .gh { color: #ffffff; font-weight: bold } /* Generic.Heading */ +body:not([data-theme="light"]) .highlight .gi { color: #589819 } /* Generic.Inserted */ +body:not([data-theme="light"]) .highlight .go { color: #cccccc } /* Generic.Output */ +body:not([data-theme="light"]) .highlight .gp { color: #aaaaaa } /* Generic.Prompt */ +body:not([data-theme="light"]) .highlight .gs { color: #d0d0d0; font-weight: bold } /* Generic.Strong */ +body:not([data-theme="light"]) .highlight .gu { color: #ffffff; text-decoration: underline } /* Generic.Subheading */ +body:not([data-theme="light"]) .highlight .gt { color: #d22323 } /* Generic.Traceback */ +body:not([data-theme="light"]) .highlight .kc { color: #6ebf26; font-weight: bold } /* Keyword.Constant */ +body:not([data-theme="light"]) .highlight .kd { color: #6ebf26; font-weight: bold } /* Keyword.Declaration */ +body:not([data-theme="light"]) .highlight .kn { color: #6ebf26; font-weight: bold } /* Keyword.Namespace */ +body:not([data-theme="light"]) .highlight .kp { color: #6ebf26 } /* Keyword.Pseudo */ +body:not([data-theme="light"]) .highlight .kr { color: #6ebf26; font-weight: bold } /* Keyword.Reserved */ +body:not([data-theme="light"]) .highlight .kt { color: #6ebf26; font-weight: bold } /* Keyword.Type */ +body:not([data-theme="light"]) .highlight .ld { color: #d0d0d0 } /* Literal.Date */ +body:not([data-theme="light"]) .highlight .m { color: #51b2fd } /* Literal.Number */ +body:not([data-theme="light"]) .highlight .s { color: #ed9d13 } /* Literal.String */ +body:not([data-theme="light"]) .highlight .na { color: #bbbbbb } /* Name.Attribute */ +body:not([data-theme="light"]) .highlight .nb { color: #2fbccd } /* Name.Builtin */ +body:not([data-theme="light"]) .highlight .nc { color: #71adff; text-decoration: underline } /* Name.Class */ +body:not([data-theme="light"]) .highlight .no { color: #40ffff } /* Name.Constant */ +body:not([data-theme="light"]) .highlight .nd { color: #ffa500 } /* Name.Decorator */ +body:not([data-theme="light"]) .highlight .ni { color: #d0d0d0 } /* Name.Entity */ +body:not([data-theme="light"]) .highlight .ne { color: #bbbbbb } /* Name.Exception */ +body:not([data-theme="light"]) .highlight .nf { color: #71adff } /* Name.Function */ +body:not([data-theme="light"]) .highlight .nl { color: #d0d0d0 } /* Name.Label */ +body:not([data-theme="light"]) .highlight .nn { color: #71adff; text-decoration: underline } /* Name.Namespace */ +body:not([data-theme="light"]) .highlight .nx { color: #d0d0d0 } /* Name.Other */ +body:not([data-theme="light"]) .highlight .py { color: #d0d0d0 } /* Name.Property */ +body:not([data-theme="light"]) .highlight .nt { color: #6ebf26; font-weight: bold } /* Name.Tag */ +body:not([data-theme="light"]) .highlight .nv { color: #40ffff } /* Name.Variable */ +body:not([data-theme="light"]) .highlight .ow { color: #6ebf26; font-weight: bold } /* Operator.Word */ +body:not([data-theme="light"]) .highlight .pm { color: #d0d0d0 } /* Punctuation.Marker */ +body:not([data-theme="light"]) .highlight .w { color: #666666 } /* Text.Whitespace */ +body:not([data-theme="light"]) .highlight .mb { color: #51b2fd } /* Literal.Number.Bin */ +body:not([data-theme="light"]) .highlight .mf { color: #51b2fd } /* Literal.Number.Float */ +body:not([data-theme="light"]) .highlight .mh { color: #51b2fd } /* Literal.Number.Hex */ +body:not([data-theme="light"]) .highlight .mi { color: #51b2fd } /* Literal.Number.Integer */ +body:not([data-theme="light"]) .highlight .mo { color: #51b2fd } /* Literal.Number.Oct */ +body:not([data-theme="light"]) .highlight .sa { color: #ed9d13 } /* Literal.String.Affix */ +body:not([data-theme="light"]) .highlight .sb { color: #ed9d13 } /* Literal.String.Backtick */ +body:not([data-theme="light"]) .highlight .sc { color: #ed9d13 } /* Literal.String.Char */ +body:not([data-theme="light"]) .highlight .dl { color: #ed9d13 } /* Literal.String.Delimiter */ +body:not([data-theme="light"]) .highlight .sd { color: #ed9d13 } /* Literal.String.Doc */ +body:not([data-theme="light"]) .highlight .s2 { color: #ed9d13 } /* Literal.String.Double */ +body:not([data-theme="light"]) .highlight .se { color: #ed9d13 } /* Literal.String.Escape */ +body:not([data-theme="light"]) .highlight .sh { color: #ed9d13 } /* Literal.String.Heredoc */ +body:not([data-theme="light"]) .highlight .si { color: #ed9d13 } /* Literal.String.Interpol */ +body:not([data-theme="light"]) .highlight .sx { color: #ffa500 } /* Literal.String.Other */ +body:not([data-theme="light"]) .highlight .sr { color: #ed9d13 } /* Literal.String.Regex */ +body:not([data-theme="light"]) .highlight .s1 { color: #ed9d13 } /* Literal.String.Single */ +body:not([data-theme="light"]) .highlight .ss { color: #ed9d13 } /* Literal.String.Symbol */ +body:not([data-theme="light"]) .highlight .bp { color: #2fbccd } /* Name.Builtin.Pseudo */ +body:not([data-theme="light"]) .highlight .fm { color: #71adff } /* Name.Function.Magic */ +body:not([data-theme="light"]) .highlight .vc { color: #40ffff } /* Name.Variable.Class */ +body:not([data-theme="light"]) .highlight .vg { color: #40ffff } /* Name.Variable.Global */ +body:not([data-theme="light"]) .highlight .vi { color: #40ffff } /* Name.Variable.Instance */ +body:not([data-theme="light"]) .highlight .vm { color: #40ffff } /* Name.Variable.Magic */ +body:not([data-theme="light"]) .highlight .il { color: #51b2fd } /* Literal.Number.Integer.Long */ +} +} \ No newline at end of file diff --git a/api/_static/require.min.js b/api/_static/require.min.js new file mode 100644 index 000000000..73dbde8aa --- /dev/null +++ b/api/_static/require.min.js @@ -0,0 +1 @@ +var requirejs,require,define;!function(global,setTimeout){var req,s,head,baseElement,dataMain,src,interactiveScript,currentlyAddingScript,mainScript,subPath,version="2.3.6",commentRegExp=/\/\*[\s\S]*?\*\/|([^:"'=]|^)\/\/.*$/gm,cjsRequireRegExp=/[^.]\s*require\s*\(\s*["']([^'"\s]+)["']\s*\)/g,jsSuffixRegExp=/\.js$/,currDirRegExp=/^\.\//,op=Object.prototype,ostring=op.toString,hasOwn=op.hasOwnProperty,isBrowser=!("undefined"==typeof window||"undefined"==typeof navigator||!window.document),isWebWorker=!isBrowser&&"undefined"!=typeof importScripts,readyRegExp=isBrowser&&"PLAYSTATION 3"===navigator.platform?/^complete$/:/^(complete|loaded)$/,defContextName="_",isOpera="undefined"!=typeof opera&&"[object Opera]"===opera.toString(),contexts={},cfg={},globalDefQueue=[],useInteractive=!1;function commentReplace(e,t){return t||""}function isFunction(e){return"[object Function]"===ostring.call(e)}function isArray(e){return"[object Array]"===ostring.call(e)}function each(e,t){var i;if(e)for(i=0;i{var t={212:function(t,e,n){var o,r;r=void 0!==n.g?n.g:"undefined"!=typeof window?window:this,o=function(){return function(t){"use strict";var e={navClass:"active",contentClass:"active",nested:!1,nestedClass:"active",offset:0,reflow:!1,events:!0},n=function(t,e,n){if(n.settings.events){var o=new CustomEvent(t,{bubbles:!0,cancelable:!0,detail:n});e.dispatchEvent(o)}},o=function(t){var e=0;if(t.offsetParent)for(;t;)e+=t.offsetTop,t=t.offsetParent;return e>=0?e:0},r=function(t){t&&t.sort((function(t,e){return o(t.content)=Math.max(document.body.scrollHeight,document.documentElement.scrollHeight,document.body.offsetHeight,document.documentElement.offsetHeight,document.body.clientHeight,document.documentElement.clientHeight)},l=function(t,e){var n=t[t.length-1];if(function(t,e){return!(!s()||!c(t.content,e,!0))}(n,e))return n;for(var o=t.length-1;o>=0;o--)if(c(t[o].content,e))return t[o]},a=function(t,e){if(e.nested&&t.parentNode){var n=t.parentNode.closest("li");n&&(n.classList.remove(e.nestedClass),a(n,e))}},i=function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.remove(e.navClass),t.content.classList.remove(e.contentClass),a(o,e),n("gumshoeDeactivate",o,{link:t.nav,content:t.content,settings:e}))}},u=function(t,e){if(e.nested){var n=t.parentNode.closest("li");n&&(n.classList.add(e.nestedClass),u(n,e))}};return function(o,c){var s,a,d,f,m,v={setup:function(){s=document.querySelectorAll(o),a=[],Array.prototype.forEach.call(s,(function(t){var e=document.getElementById(decodeURIComponent(t.hash.substr(1)));e&&a.push({nav:t,content:e})})),r(a)},detect:function(){var t=l(a,m);t?d&&t.content===d.content||(i(d,m),function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.add(e.navClass),t.content.classList.add(e.contentClass),u(o,e),n("gumshoeActivate",o,{link:t.nav,content:t.content,settings:e}))}}(t,m),d=t):d&&(i(d,m),d=null)}},h=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame(v.detect)},g=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame((function(){r(a),v.detect()}))};return v.destroy=function(){d&&i(d,m),t.removeEventListener("scroll",h,!1),m.reflow&&t.removeEventListener("resize",g,!1),a=null,s=null,d=null,f=null,m=null},m=function(){var t={};return Array.prototype.forEach.call(arguments,(function(e){for(var n in e){if(!e.hasOwnProperty(n))return;t[n]=e[n]}})),t}(e,c||{}),v.setup(),v.detect(),t.addEventListener("scroll",h,!1),m.reflow&&t.addEventListener("resize",g,!1),v}}(r)}.apply(e,[]),void 0===o||(t.exports=o)}},e={};function n(o){var r=e[o];if(void 0!==r)return r.exports;var c=e[o]={exports:{}};return t[o].call(c.exports,c,c.exports,n),c.exports}n.n=t=>{var e=t&&t.__esModule?()=>t.default:()=>t;return n.d(e,{a:e}),e},n.d=(t,e)=>{for(var o in e)n.o(e,o)&&!n.o(t,o)&&Object.defineProperty(t,o,{enumerable:!0,get:e[o]})},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(t){if("object"==typeof window)return window}}(),n.o=(t,e)=>Object.prototype.hasOwnProperty.call(t,e),(()=>{"use strict";var t=n(212),e=n.n(t),o=null,r=null,c=window.pageYOffset||document.documentElement.scrollTop;const s=64;function l(){const t=localStorage.getItem("theme")||"auto";var e;"light"!==(e=window.matchMedia("(prefers-color-scheme: dark)").matches?"auto"===t?"light":"light"==t?"dark":"auto":"auto"===t?"dark":"dark"==t?"light":"auto")&&"dark"!==e&&"auto"!==e&&(console.error(`Got invalid theme mode: ${e}. Resetting to auto.`),e="auto"),document.body.dataset.theme=e,localStorage.setItem("theme",e),console.log(`Changed to ${e} mode.`)}function a(){!function(){const t=document.getElementsByClassName("theme-toggle");Array.from(t).forEach((t=>{t.addEventListener("click",l)}))}(),function(){let t=0,e=!1;window.addEventListener("scroll",(function(n){t=window.scrollY,e||(window.requestAnimationFrame((function(){var n;n=t,0==Math.floor(r.getBoundingClientRect().top)?r.classList.add("scrolled"):r.classList.remove("scrolled"),function(t){tc&&document.documentElement.classList.remove("show-back-to-top"),c=t}(n),function(t){null!==o&&(0==t?o.scrollTo(0,0):Math.ceil(t)>=Math.floor(document.documentElement.scrollHeight-window.innerHeight)?o.scrollTo(0,o.scrollHeight):document.querySelector(".scroll-current"))}(n),e=!1})),e=!0)})),window.scroll()}(),null!==o&&new(e())(".toc-tree a",{reflow:!0,recursive:!0,navClass:"scroll-current",offset:()=>{let t=parseFloat(getComputedStyle(document.documentElement).fontSize);return r.getBoundingClientRect().height+.5*t+1}})}document.addEventListener("DOMContentLoaded",(function(){document.body.parentNode.classList.remove("no-js"),r=document.querySelector("header"),o=document.querySelector(".toc-scroll"),a()}))})()})(); +//# sourceMappingURL=furo.js.map \ No newline at end of file diff --git a/api/_static/scripts/furo.js.LICENSE.txt b/api/_static/scripts/furo.js.LICENSE.txt new file mode 100644 index 000000000..1632189c7 --- /dev/null +++ b/api/_static/scripts/furo.js.LICENSE.txt @@ -0,0 +1,7 @@ +/*! + * gumshoejs v5.1.2 (patched by @pradyunsg) + * A simple, framework-agnostic scrollspy script. + * (c) 2019 Chris Ferdinandi + * MIT License + * http://github.com/cferdinandi/gumshoe + */ diff --git a/api/_static/scripts/furo.js.map b/api/_static/scripts/furo.js.map new file mode 100644 index 000000000..470530223 --- /dev/null +++ b/api/_static/scripts/furo.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/furo.js","mappings":";iCAAA,MAQWA,SAWS,IAAX,EAAAC,EACH,EAAAA,EACkB,oBAAXC,OACLA,OACAC,KAbO,EAAF,WACP,OAaJ,SAAUD,GACR,aAMA,IAAIE,EAAW,CAEbC,SAAU,SACVC,aAAc,SAGdC,QAAQ,EACRC,YAAa,SAGbC,OAAQ,EACRC,QAAQ,EAGRC,QAAQ,GA6BNC,EAAY,SAAUC,EAAMC,EAAMC,GAEpC,GAAKA,EAAOC,SAASL,OAArB,CAGA,IAAIM,EAAQ,IAAIC,YAAYL,EAAM,CAChCM,SAAS,EACTC,YAAY,EACZL,OAAQA,IAIVD,EAAKO,cAAcJ,EAVgB,CAWrC,EAOIK,EAAe,SAAUR,GAC3B,IAAIS,EAAW,EACf,GAAIT,EAAKU,aACP,KAAOV,GACLS,GAAYT,EAAKW,UACjBX,EAAOA,EAAKU,aAGhB,OAAOD,GAAY,EAAIA,EAAW,CACpC,EAMIG,EAAe,SAAUC,GACvBA,GACFA,EAASC,MAAK,SAAUC,EAAOC,GAG7B,OAFcR,EAAaO,EAAME,SACnBT,EAAaQ,EAAMC,UACF,EACxB,CACT,GAEJ,EAwCIC,EAAW,SAAUlB,EAAME,EAAUiB,GACvC,IAAIC,EAASpB,EAAKqB,wBACd1B,EAnCU,SAAUO,GAExB,MAA+B,mBAApBA,EAASP,OACX2B,WAAWpB,EAASP,UAItB2B,WAAWpB,EAASP,OAC7B,CA2Be4B,CAAUrB,GACvB,OAAIiB,EAEAK,SAASJ,EAAOD,OAAQ,KACvB/B,EAAOqC,aAAeC,SAASC,gBAAgBC,cAG7CJ,SAASJ,EAAOS,IAAK,KAAOlC,CACrC,EAMImC,EAAa,WACf,OACEC,KAAKC,KAAK5C,EAAOqC,YAAcrC,EAAO6C,cAnCjCF,KAAKG,IACVR,SAASS,KAAKC,aACdV,SAASC,gBAAgBS,aACzBV,SAASS,KAAKE,aACdX,SAASC,gBAAgBU,aACzBX,SAASS,KAAKP,aACdF,SAASC,gBAAgBC,aAkC7B,EAmBIU,EAAY,SAAUzB,EAAUX,GAClC,IAAIqC,EAAO1B,EAASA,EAAS2B,OAAS,GACtC,GAbgB,SAAUC,EAAMvC,GAChC,SAAI4B,MAAgBZ,EAASuB,EAAKxB,QAASf,GAAU,GAEvD,CAUMwC,CAAYH,EAAMrC,GAAW,OAAOqC,EACxC,IAAK,IAAII,EAAI9B,EAAS2B,OAAS,EAAGG,GAAK,EAAGA,IACxC,GAAIzB,EAASL,EAAS8B,GAAG1B,QAASf,GAAW,OAAOW,EAAS8B,EAEjE,EAOIC,EAAmB,SAAUC,EAAK3C,GAEpC,GAAKA,EAAST,QAAWoD,EAAIC,WAA7B,CAGA,IAAIC,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASR,aAG7BkD,EAAiBG,EAAI7C,GAV0B,CAWjD,EAOIiD,EAAa,SAAUC,EAAOlD,GAEhC,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASX,UAC7B6D,EAAMnC,QAAQgC,UAAUC,OAAOhD,EAASV,cAGxCoD,EAAiBG,EAAI7C,GAGrBJ,EAAU,oBAAqBiD,EAAI,CACjCM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,IAjBM,CAmBpB,EAOIoD,EAAiB,SAAUT,EAAK3C,GAElC,GAAKA,EAAST,OAAd,CAGA,IAAIsD,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASR,aAG1B4D,EAAeP,EAAI7C,GAVS,CAW9B,EA6LA,OA1JkB,SAAUsD,EAAUC,GAKpC,IACIC,EAAU7C,EAAU8C,EAASC,EAAS1D,EADtC2D,EAAa,CAUjBA,MAAmB,WAEjBH,EAAWhC,SAASoC,iBAAiBN,GAGrC3C,EAAW,GAGXkD,MAAMC,UAAUC,QAAQC,KAAKR,GAAU,SAAUjB,GAE/C,IAAIxB,EAAUS,SAASyC,eACrBC,mBAAmB3B,EAAK4B,KAAKC,OAAO,KAEjCrD,GAGLJ,EAAS0D,KAAK,CACZ1B,IAAKJ,EACLxB,QAASA,GAEb,IAGAL,EAAaC,EACf,EAKAgD,OAAoB,WAElB,IAAIW,EAASlC,EAAUzB,EAAUX,GAG5BsE,EASDb,GAAWa,EAAOvD,UAAY0C,EAAQ1C,UAG1CkC,EAAWQ,EAASzD,GAzFT,SAAUkD,EAAOlD,GAE9B,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASX,UAC1B6D,EAAMnC,QAAQgC,UAAUM,IAAIrD,EAASV,cAGrC8D,EAAeP,EAAI7C,GAGnBJ,EAAU,kBAAmBiD,EAAI,CAC/BM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,IAjBM,CAmBpB,CAqEIuE,CAASD,EAAQtE,GAGjByD,EAAUa,GAfJb,IACFR,EAAWQ,EAASzD,GACpByD,EAAU,KAchB,GAMIe,EAAgB,SAAUvE,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,sBAAsBf,EAAWgB,OACpD,EAMIC,EAAgB,SAAU3E,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,uBAAsB,WACrChE,EAAaC,GACbgD,EAAWgB,QACb,GACF,EAkDA,OA7CAhB,EAAWkB,QAAU,WAEfpB,GACFR,EAAWQ,EAASzD,GAItBd,EAAO4F,oBAAoB,SAAUN,GAAe,GAChDxE,EAASN,QACXR,EAAO4F,oBAAoB,SAAUF,GAAe,GAItDjE,EAAW,KACX6C,EAAW,KACXC,EAAU,KACVC,EAAU,KACV1D,EAAW,IACb,EAOEA,EA3XS,WACX,IAAI+E,EAAS,CAAC,EAOd,OANAlB,MAAMC,UAAUC,QAAQC,KAAKgB,WAAW,SAAUC,GAChD,IAAK,IAAIC,KAAOD,EAAK,CACnB,IAAKA,EAAIE,eAAeD,GAAM,OAC9BH,EAAOG,GAAOD,EAAIC,EACpB,CACF,IACOH,CACT,CAkXeK,CAAOhG,EAAUmE,GAAW,CAAC,GAGxCI,EAAW0B,QAGX1B,EAAWgB,SAGXzF,EAAOoG,iBAAiB,SAAUd,GAAe,GAC7CxE,EAASN,QACXR,EAAOoG,iBAAiB,SAAUV,GAAe,GAS9CjB,CACT,CAOF,CArcW4B,CAAQvG,EAChB,UAFM,SAEN,uBCXDwG,EAA2B,CAAC,EAGhC,SAASC,EAAoBC,GAE5B,IAAIC,EAAeH,EAAyBE,GAC5C,QAAqBE,IAAjBD,EACH,OAAOA,EAAaE,QAGrB,IAAIC,EAASN,EAAyBE,GAAY,CAGjDG,QAAS,CAAC,GAOX,OAHAE,EAAoBL,GAAU1B,KAAK8B,EAAOD,QAASC,EAAQA,EAAOD,QAASJ,GAGpEK,EAAOD,OACf,CCrBAJ,EAAoBO,EAAKF,IACxB,IAAIG,EAASH,GAAUA,EAAOI,WAC7B,IAAOJ,EAAiB,QACxB,IAAM,EAEP,OADAL,EAAoBU,EAAEF,EAAQ,CAAEG,EAAGH,IAC5BA,CAAM,ECLdR,EAAoBU,EAAI,CAACN,EAASQ,KACjC,IAAI,IAAInB,KAAOmB,EACXZ,EAAoBa,EAAED,EAAYnB,KAASO,EAAoBa,EAAET,EAASX,IAC5EqB,OAAOC,eAAeX,EAASX,EAAK,CAAEuB,YAAY,EAAMC,IAAKL,EAAWnB,IAE1E,ECNDO,EAAoBxG,EAAI,WACvB,GAA0B,iBAAf0H,WAAyB,OAAOA,WAC3C,IACC,OAAOxH,MAAQ,IAAIyH,SAAS,cAAb,EAChB,CAAE,MAAOC,GACR,GAAsB,iBAAX3H,OAAqB,OAAOA,MACxC,CACA,CAPuB,GCAxBuG,EAAoBa,EAAI,CAACrB,EAAK6B,IAAUP,OAAOzC,UAAUqB,eAAenB,KAAKiB,EAAK6B,4CCK9EC,EAAY,KACZC,EAAS,KACTC,EAAgB/H,OAAO6C,aAAeP,SAASC,gBAAgByF,UACnE,MAAMC,EAAmB,GA2EzB,SAASC,IACP,MAAMC,EAAeC,aAAaC,QAAQ,UAAY,OAZxD,IAAkBC,EACH,WADGA,EAaItI,OAAOuI,WAAW,gCAAgCC,QAI/C,SAAjBL,EACO,QACgB,SAAhBA,EACA,OAEA,OAIU,SAAjBA,EACO,OACgB,QAAhBA,EACA,QAEA,SA9BoB,SAATG,GAA4B,SAATA,IACzCG,QAAQC,MAAM,2BAA2BJ,yBACzCA,EAAO,QAGThG,SAASS,KAAK4F,QAAQC,MAAQN,EAC9BF,aAAaS,QAAQ,QAASP,GAC9BG,QAAQK,IAAI,cAAcR,UA0B5B,CAkDA,SAASnC,KART,WAEE,MAAM4C,EAAUzG,SAAS0G,uBAAuB,gBAChDrE,MAAMsE,KAAKF,GAASlE,SAASqE,IAC3BA,EAAI9C,iBAAiB,QAAS8B,EAAe,GAEjD,CAGEiB,GA9CF,WAEE,IAAIC,EAA6B,EAC7BC,GAAU,EAEdrJ,OAAOoG,iBAAiB,UAAU,SAAUuB,GAC1CyB,EAA6BpJ,OAAOsJ,QAE/BD,IACHrJ,OAAOwF,uBAAsB,WAzDnC,IAAuB+D,IA0DDH,EA9GkC,GAAlDzG,KAAK6G,MAAM1B,EAAO7F,wBAAwBQ,KAC5CqF,EAAOjE,UAAUM,IAAI,YAErB2D,EAAOjE,UAAUC,OAAO,YAI5B,SAAmCyF,GAC7BA,EAAYtB,EACd3F,SAASC,gBAAgBsB,UAAUC,OAAO,oBAEtCyF,EAAYxB,EACdzF,SAASC,gBAAgBsB,UAAUM,IAAI,oBAC9BoF,EAAYxB,GACrBzF,SAASC,gBAAgBsB,UAAUC,OAAO,oBAG9CiE,EAAgBwB,CAClB,CAoCEE,CAA0BF,GAlC5B,SAA6BA,GACT,OAAd1B,IAKa,GAAb0B,EACF1B,EAAU6B,SAAS,EAAG,GAGtB/G,KAAKC,KAAK2G,IACV5G,KAAK6G,MAAMlH,SAASC,gBAAgBS,aAAehD,OAAOqC,aAE1DwF,EAAU6B,SAAS,EAAG7B,EAAU7E,cAGhBV,SAASqH,cAAc,mBAc3C,CAKEC,CAAoBL,GAwDdF,GAAU,CACZ,IAEAA,GAAU,EAEd,IACArJ,OAAO6J,QACT,CA6BEC,GA1BkB,OAAdjC,GAKJ,IAAI,IAAJ,CAAY,cAAe,CACzBrH,QAAQ,EACRuJ,WAAW,EACX5J,SAAU,iBACVI,OAAQ,KACN,IAAIyJ,EAAM9H,WAAW+H,iBAAiB3H,SAASC,iBAAiB2H,UAChE,OAAOpC,EAAO7F,wBAAwBkI,OAAS,GAAMH,EAAM,CAAC,GAiBlE,CAcA1H,SAAS8D,iBAAiB,oBAT1B,WACE9D,SAASS,KAAKW,WAAWG,UAAUC,OAAO,SAE1CgE,EAASxF,SAASqH,cAAc,UAChC9B,EAAYvF,SAASqH,cAAc,eAEnCxD,GACF","sources":["webpack:///./src/furo/assets/scripts/gumshoe-patched.js","webpack:///webpack/bootstrap","webpack:///webpack/runtime/compat get default export","webpack:///webpack/runtime/define property getters","webpack:///webpack/runtime/global","webpack:///webpack/runtime/hasOwnProperty shorthand","webpack:///./src/furo/assets/scripts/furo.js"],"sourcesContent":["/*!\n * gumshoejs v5.1.2 (patched by @pradyunsg)\n * A simple, framework-agnostic scrollspy script.\n * (c) 2019 Chris Ferdinandi\n * MIT License\n * http://github.com/cferdinandi/gumshoe\n */\n\n(function (root, factory) {\n if (typeof define === \"function\" && define.amd) {\n define([], function () {\n return factory(root);\n });\n } else if (typeof exports === \"object\") {\n module.exports = factory(root);\n } else {\n root.Gumshoe = factory(root);\n }\n})(\n typeof global !== \"undefined\"\n ? global\n : typeof window !== \"undefined\"\n ? window\n : this,\n function (window) {\n \"use strict\";\n\n //\n // Defaults\n //\n\n var defaults = {\n // Active classes\n navClass: \"active\",\n contentClass: \"active\",\n\n // Nested navigation\n nested: false,\n nestedClass: \"active\",\n\n // Offset & reflow\n offset: 0,\n reflow: false,\n\n // Event support\n events: true,\n };\n\n //\n // Methods\n //\n\n /**\n * Merge two or more objects together.\n * @param {Object} objects The objects to merge together\n * @returns {Object} Merged values of defaults and options\n */\n var extend = function () {\n var merged = {};\n Array.prototype.forEach.call(arguments, function (obj) {\n for (var key in obj) {\n if (!obj.hasOwnProperty(key)) return;\n merged[key] = obj[key];\n }\n });\n return merged;\n };\n\n /**\n * Emit a custom event\n * @param {String} type The event type\n * @param {Node} elem The element to attach the event to\n * @param {Object} detail Any details to pass along with the event\n */\n var emitEvent = function (type, elem, detail) {\n // Make sure events are enabled\n if (!detail.settings.events) return;\n\n // Create a new event\n var event = new CustomEvent(type, {\n bubbles: true,\n cancelable: true,\n detail: detail,\n });\n\n // Dispatch the event\n elem.dispatchEvent(event);\n };\n\n /**\n * Get an element's distance from the top of the Document.\n * @param {Node} elem The element\n * @return {Number} Distance from the top in pixels\n */\n var getOffsetTop = function (elem) {\n var location = 0;\n if (elem.offsetParent) {\n while (elem) {\n location += elem.offsetTop;\n elem = elem.offsetParent;\n }\n }\n return location >= 0 ? location : 0;\n };\n\n /**\n * Sort content from first to last in the DOM\n * @param {Array} contents The content areas\n */\n var sortContents = function (contents) {\n if (contents) {\n contents.sort(function (item1, item2) {\n var offset1 = getOffsetTop(item1.content);\n var offset2 = getOffsetTop(item2.content);\n if (offset1 < offset2) return -1;\n return 1;\n });\n }\n };\n\n /**\n * Get the offset to use for calculating position\n * @param {Object} settings The settings for this instantiation\n * @return {Float} The number of pixels to offset the calculations\n */\n var getOffset = function (settings) {\n // if the offset is a function run it\n if (typeof settings.offset === \"function\") {\n return parseFloat(settings.offset());\n }\n\n // Otherwise, return it as-is\n return parseFloat(settings.offset);\n };\n\n /**\n * Get the document element's height\n * @private\n * @returns {Number}\n */\n var getDocumentHeight = function () {\n return Math.max(\n document.body.scrollHeight,\n document.documentElement.scrollHeight,\n document.body.offsetHeight,\n document.documentElement.offsetHeight,\n document.body.clientHeight,\n document.documentElement.clientHeight,\n );\n };\n\n /**\n * Determine if an element is in view\n * @param {Node} elem The element\n * @param {Object} settings The settings for this instantiation\n * @param {Boolean} bottom If true, check if element is above bottom of viewport instead\n * @return {Boolean} Returns true if element is in the viewport\n */\n var isInView = function (elem, settings, bottom) {\n var bounds = elem.getBoundingClientRect();\n var offset = getOffset(settings);\n if (bottom) {\n return (\n parseInt(bounds.bottom, 10) <\n (window.innerHeight || document.documentElement.clientHeight)\n );\n }\n return parseInt(bounds.top, 10) <= offset;\n };\n\n /**\n * Check if at the bottom of the viewport\n * @return {Boolean} If true, page is at the bottom of the viewport\n */\n var isAtBottom = function () {\n if (\n Math.ceil(window.innerHeight + window.pageYOffset) >=\n getDocumentHeight()\n )\n return true;\n return false;\n };\n\n /**\n * Check if the last item should be used (even if not at the top of the page)\n * @param {Object} item The last item\n * @param {Object} settings The settings for this instantiation\n * @return {Boolean} If true, use the last item\n */\n var useLastItem = function (item, settings) {\n if (isAtBottom() && isInView(item.content, settings, true)) return true;\n return false;\n };\n\n /**\n * Get the active content\n * @param {Array} contents The content areas\n * @param {Object} settings The settings for this instantiation\n * @return {Object} The content area and matching navigation link\n */\n var getActive = function (contents, settings) {\n var last = contents[contents.length - 1];\n if (useLastItem(last, settings)) return last;\n for (var i = contents.length - 1; i >= 0; i--) {\n if (isInView(contents[i].content, settings)) return contents[i];\n }\n };\n\n /**\n * Deactivate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var deactivateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested || !nav.parentNode) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Remove the active class\n li.classList.remove(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n deactivateNested(li, settings);\n };\n\n /**\n * Deactivate a nav and content area\n * @param {Object} items The nav item and content to deactivate\n * @param {Object} settings The settings for this instantiation\n */\n var deactivate = function (items, settings) {\n // Make sure there are items to deactivate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Remove the active class from the nav and content\n li.classList.remove(settings.navClass);\n items.content.classList.remove(settings.contentClass);\n\n // Deactivate any parent navs in a nested navigation\n deactivateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeDeactivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Activate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var activateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Add the active class\n li.classList.add(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n activateNested(li, settings);\n };\n\n /**\n * Activate a nav and content area\n * @param {Object} items The nav item and content to activate\n * @param {Object} settings The settings for this instantiation\n */\n var activate = function (items, settings) {\n // Make sure there are items to activate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Add the active class to the nav and content\n li.classList.add(settings.navClass);\n items.content.classList.add(settings.contentClass);\n\n // Activate any parent navs in a nested navigation\n activateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeActivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Create the Constructor object\n * @param {String} selector The selector to use for navigation items\n * @param {Object} options User options and settings\n */\n var Constructor = function (selector, options) {\n //\n // Variables\n //\n\n var publicAPIs = {};\n var navItems, contents, current, timeout, settings;\n\n //\n // Methods\n //\n\n /**\n * Set variables from DOM elements\n */\n publicAPIs.setup = function () {\n // Get all nav items\n navItems = document.querySelectorAll(selector);\n\n // Create contents array\n contents = [];\n\n // Loop through each item, get it's matching content, and push to the array\n Array.prototype.forEach.call(navItems, function (item) {\n // Get the content for the nav item\n var content = document.getElementById(\n decodeURIComponent(item.hash.substr(1)),\n );\n if (!content) return;\n\n // Push to the contents array\n contents.push({\n nav: item,\n content: content,\n });\n });\n\n // Sort contents by the order they appear in the DOM\n sortContents(contents);\n };\n\n /**\n * Detect which content is currently active\n */\n publicAPIs.detect = function () {\n // Get the active content\n var active = getActive(contents, settings);\n\n // if there's no active content, deactivate and bail\n if (!active) {\n if (current) {\n deactivate(current, settings);\n current = null;\n }\n return;\n }\n\n // If the active content is the one currently active, do nothing\n if (current && active.content === current.content) return;\n\n // Deactivate the current content and activate the new content\n deactivate(current, settings);\n activate(active, settings);\n\n // Update the currently active content\n current = active;\n };\n\n /**\n * Detect the active content on scroll\n * Debounced for performance\n */\n var scrollHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(publicAPIs.detect);\n };\n\n /**\n * Update content sorting on resize\n * Debounced for performance\n */\n var resizeHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(function () {\n sortContents(contents);\n publicAPIs.detect();\n });\n };\n\n /**\n * Destroy the current instantiation\n */\n publicAPIs.destroy = function () {\n // Undo DOM changes\n if (current) {\n deactivate(current, settings);\n }\n\n // Remove event listeners\n window.removeEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.removeEventListener(\"resize\", resizeHandler, false);\n }\n\n // Reset variables\n contents = null;\n navItems = null;\n current = null;\n timeout = null;\n settings = null;\n };\n\n /**\n * Initialize the current instantiation\n */\n var init = function () {\n // Merge user options into defaults\n settings = extend(defaults, options || {});\n\n // Setup variables based on the current DOM\n publicAPIs.setup();\n\n // Find the currently active content\n publicAPIs.detect();\n\n // Setup event listeners\n window.addEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.addEventListener(\"resize\", resizeHandler, false);\n }\n };\n\n //\n // Initialize and return the public APIs\n //\n\n init();\n return publicAPIs;\n };\n\n //\n // Return the Constructor\n //\n\n return Constructor;\n },\n);\n","// The module cache\nvar __webpack_module_cache__ = {};\n\n// The require function\nfunction __webpack_require__(moduleId) {\n\t// Check if module is in cache\n\tvar cachedModule = __webpack_module_cache__[moduleId];\n\tif (cachedModule !== undefined) {\n\t\treturn cachedModule.exports;\n\t}\n\t// Create a new module (and put it into the cache)\n\tvar module = __webpack_module_cache__[moduleId] = {\n\t\t// no module.id needed\n\t\t// no module.loaded needed\n\t\texports: {}\n\t};\n\n\t// Execute the module function\n\t__webpack_modules__[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n\t// Return the exports of the module\n\treturn module.exports;\n}\n\n","// getDefaultExport function for compatibility with non-harmony modules\n__webpack_require__.n = (module) => {\n\tvar getter = module && module.__esModule ?\n\t\t() => (module['default']) :\n\t\t() => (module);\n\t__webpack_require__.d(getter, { a: getter });\n\treturn getter;\n};","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.g = (function() {\n\tif (typeof globalThis === 'object') return globalThis;\n\ttry {\n\t\treturn this || new Function('return this')();\n\t} catch (e) {\n\t\tif (typeof window === 'object') return window;\n\t}\n})();","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","import Gumshoe from \"./gumshoe-patched.js\";\n\n////////////////////////////////////////////////////////////////////////////////\n// Scroll Handling\n////////////////////////////////////////////////////////////////////////////////\nvar tocScroll = null;\nvar header = null;\nvar lastScrollTop = window.pageYOffset || document.documentElement.scrollTop;\nconst GO_TO_TOP_OFFSET = 64;\n\nfunction scrollHandlerForHeader() {\n if (Math.floor(header.getBoundingClientRect().top) == 0) {\n header.classList.add(\"scrolled\");\n } else {\n header.classList.remove(\"scrolled\");\n }\n}\n\nfunction scrollHandlerForBackToTop(positionY) {\n if (positionY < GO_TO_TOP_OFFSET) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n } else {\n if (positionY < lastScrollTop) {\n document.documentElement.classList.add(\"show-back-to-top\");\n } else if (positionY > lastScrollTop) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n }\n }\n lastScrollTop = positionY;\n}\n\nfunction scrollHandlerForTOC(positionY) {\n if (tocScroll === null) {\n return;\n }\n\n // top of page.\n if (positionY == 0) {\n tocScroll.scrollTo(0, 0);\n } else if (\n // bottom of page.\n Math.ceil(positionY) >=\n Math.floor(document.documentElement.scrollHeight - window.innerHeight)\n ) {\n tocScroll.scrollTo(0, tocScroll.scrollHeight);\n } else {\n // somewhere in the middle.\n const current = document.querySelector(\".scroll-current\");\n if (current == null) {\n return;\n }\n\n // https://github.com/pypa/pip/issues/9159 This breaks scroll behaviours.\n // // scroll the currently \"active\" heading in toc, into view.\n // const rect = current.getBoundingClientRect();\n // if (0 > rect.top) {\n // current.scrollIntoView(true); // the argument is \"alignTop\"\n // } else if (rect.bottom > window.innerHeight) {\n // current.scrollIntoView(false);\n // }\n }\n}\n\nfunction scrollHandler(positionY) {\n scrollHandlerForHeader();\n scrollHandlerForBackToTop(positionY);\n scrollHandlerForTOC(positionY);\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Theme Toggle\n////////////////////////////////////////////////////////////////////////////////\nfunction setTheme(mode) {\n if (mode !== \"light\" && mode !== \"dark\" && mode !== \"auto\") {\n console.error(`Got invalid theme mode: ${mode}. Resetting to auto.`);\n mode = \"auto\";\n }\n\n document.body.dataset.theme = mode;\n localStorage.setItem(\"theme\", mode);\n console.log(`Changed to ${mode} mode.`);\n}\n\nfunction cycleThemeOnce() {\n const currentTheme = localStorage.getItem(\"theme\") || \"auto\";\n const prefersDark = window.matchMedia(\"(prefers-color-scheme: dark)\").matches;\n\n if (prefersDark) {\n // Auto (dark) -> Light -> Dark\n if (currentTheme === \"auto\") {\n setTheme(\"light\");\n } else if (currentTheme == \"light\") {\n setTheme(\"dark\");\n } else {\n setTheme(\"auto\");\n }\n } else {\n // Auto (light) -> Dark -> Light\n if (currentTheme === \"auto\") {\n setTheme(\"dark\");\n } else if (currentTheme == \"dark\") {\n setTheme(\"light\");\n } else {\n setTheme(\"auto\");\n }\n }\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////////////////////////\nfunction setupScrollHandler() {\n // Taken from https://developer.mozilla.org/en-US/docs/Web/API/Document/scroll_event\n let last_known_scroll_position = 0;\n let ticking = false;\n\n window.addEventListener(\"scroll\", function (e) {\n last_known_scroll_position = window.scrollY;\n\n if (!ticking) {\n window.requestAnimationFrame(function () {\n scrollHandler(last_known_scroll_position);\n ticking = false;\n });\n\n ticking = true;\n }\n });\n window.scroll();\n}\n\nfunction setupScrollSpy() {\n if (tocScroll === null) {\n return;\n }\n\n // Scrollspy -- highlight table on contents, based on scroll\n new Gumshoe(\".toc-tree a\", {\n reflow: true,\n recursive: true,\n navClass: \"scroll-current\",\n offset: () => {\n let rem = parseFloat(getComputedStyle(document.documentElement).fontSize);\n return header.getBoundingClientRect().height + 0.5 * rem + 1;\n },\n });\n}\n\nfunction setupTheme() {\n // Attach event handlers for toggling themes\n const buttons = document.getElementsByClassName(\"theme-toggle\");\n Array.from(buttons).forEach((btn) => {\n btn.addEventListener(\"click\", cycleThemeOnce);\n });\n}\n\nfunction setup() {\n setupTheme();\n setupScrollHandler();\n setupScrollSpy();\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Main entrypoint\n////////////////////////////////////////////////////////////////////////////////\nfunction main() {\n document.body.parentNode.classList.remove(\"no-js\");\n\n header = document.querySelector(\"header\");\n tocScroll = document.querySelector(\".toc-scroll\");\n\n setup();\n}\n\ndocument.addEventListener(\"DOMContentLoaded\", main);\n"],"names":["root","g","window","this","defaults","navClass","contentClass","nested","nestedClass","offset","reflow","events","emitEvent","type","elem","detail","settings","event","CustomEvent","bubbles","cancelable","dispatchEvent","getOffsetTop","location","offsetParent","offsetTop","sortContents","contents","sort","item1","item2","content","isInView","bottom","bounds","getBoundingClientRect","parseFloat","getOffset","parseInt","innerHeight","document","documentElement","clientHeight","top","isAtBottom","Math","ceil","pageYOffset","max","body","scrollHeight","offsetHeight","getActive","last","length","item","useLastItem","i","deactivateNested","nav","parentNode","li","closest","classList","remove","deactivate","items","link","activateNested","add","selector","options","navItems","current","timeout","publicAPIs","querySelectorAll","Array","prototype","forEach","call","getElementById","decodeURIComponent","hash","substr","push","active","activate","scrollHandler","cancelAnimationFrame","requestAnimationFrame","detect","resizeHandler","destroy","removeEventListener","merged","arguments","obj","key","hasOwnProperty","extend","setup","addEventListener","factory","__webpack_module_cache__","__webpack_require__","moduleId","cachedModule","undefined","exports","module","__webpack_modules__","n","getter","__esModule","d","a","definition","o","Object","defineProperty","enumerable","get","globalThis","Function","e","prop","tocScroll","header","lastScrollTop","scrollTop","GO_TO_TOP_OFFSET","cycleThemeOnce","currentTheme","localStorage","getItem","mode","matchMedia","matches","console","error","dataset","theme","setItem","log","buttons","getElementsByClassName","from","btn","setupTheme","last_known_scroll_position","ticking","scrollY","positionY","floor","scrollHandlerForBackToTop","scrollTo","querySelector","scrollHandlerForTOC","scroll","setupScrollHandler","recursive","rem","getComputedStyle","fontSize","height"],"sourceRoot":""} \ No newline at end of file diff --git a/api/_static/searchtools.js b/api/_static/searchtools.js new file mode 100644 index 000000000..7918c3fab --- /dev/null +++ b/api/_static/searchtools.js @@ -0,0 +1,574 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + /** + * execute search (requires search index to be loaded) + */ + query: (query) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + // array of [docname, title, anchor, descr, score, filename] + let results = []; + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + results.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id] of foundEntries) { + let score = Math.round(100 * queryLower.length / entry.length) + results.push([ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // lookup as object + objectTerms.forEach((term) => + results.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); + + // now sort the results by score (in opposite order of appearance, since the + // display function below uses pop() to retrieve items) and then + // alphabetically + results.sort((a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; + }); + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + results = results.reverse(); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord) && !terms[word]) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord) && !titleTerms[word]) + arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); + }); + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) + fileMap.get(file).push(word); + else fileMap.set(file, [word]); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords) => { + const text = Search.htmlToText(htmlText); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/api/_static/skeleton.css b/api/_static/skeleton.css new file mode 100644 index 000000000..467c878c6 --- /dev/null +++ b/api/_static/skeleton.css @@ -0,0 +1,296 @@ +/* Some sane resets. */ +html { + height: 100%; +} + +body { + margin: 0; + min-height: 100%; +} + +/* All the flexbox magic! */ +body, +.sb-announcement, +.sb-content, +.sb-main, +.sb-container, +.sb-container__inner, +.sb-article-container, +.sb-footer-content, +.sb-header, +.sb-header-secondary, +.sb-footer { + display: flex; +} + +/* These order things vertically */ +body, +.sb-main, +.sb-article-container { + flex-direction: column; +} + +/* Put elements in the center */ +.sb-header, +.sb-header-secondary, +.sb-container, +.sb-content, +.sb-footer, +.sb-footer-content { + justify-content: center; +} +/* Put elements at the ends */ +.sb-article-container { + justify-content: space-between; +} + +/* These elements grow. */ +.sb-main, +.sb-content, +.sb-container, +article { + flex-grow: 1; +} + +/* Because padding making this wider is not fun */ +article { + box-sizing: border-box; +} + +/* The announcements element should never be wider than the page. */ +.sb-announcement { + max-width: 100%; +} + +.sb-sidebar-primary, +.sb-sidebar-secondary { + flex-shrink: 0; + width: 17rem; +} + +.sb-announcement__inner { + justify-content: center; + + box-sizing: border-box; + height: 3rem; + + overflow-x: auto; + white-space: nowrap; +} + +/* Sidebars, with checkbox-based toggle */ +.sb-sidebar-primary, +.sb-sidebar-secondary { + position: fixed; + height: 100%; + top: 0; +} + +.sb-sidebar-primary { + left: -17rem; + transition: left 250ms ease-in-out; +} +.sb-sidebar-secondary { + right: -17rem; + transition: right 250ms ease-in-out; +} + +.sb-sidebar-toggle { + display: none; +} +.sb-sidebar-overlay { + position: fixed; + top: 0; + width: 0; + height: 0; + + transition: width 0ms ease 250ms, height 0ms ease 250ms, opacity 250ms ease; + + opacity: 0; + background-color: rgba(0, 0, 0, 0.54); +} + +#sb-sidebar-toggle--primary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--primary"], +#sb-sidebar-toggle--secondary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--secondary"] { + width: 100%; + height: 100%; + opacity: 1; + transition: width 0ms ease, height 0ms ease, opacity 250ms ease; +} + +#sb-sidebar-toggle--primary:checked ~ .sb-container .sb-sidebar-primary { + left: 0; +} +#sb-sidebar-toggle--secondary:checked ~ .sb-container .sb-sidebar-secondary { + right: 0; +} + +/* Full-width mode */ +.drop-secondary-sidebar-for-full-width-content + .hide-when-secondary-sidebar-shown { + display: none !important; +} +.drop-secondary-sidebar-for-full-width-content .sb-sidebar-secondary { + display: none !important; +} + +/* Mobile views */ +.sb-page-width { + width: 100%; +} + +.sb-article-container, +.sb-footer-content__inner, +.drop-secondary-sidebar-for-full-width-content .sb-article, +.drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 100vw; +} + +.sb-article, +.match-content-width { + padding: 0 1rem; + box-sizing: border-box; +} + +@media (min-width: 32rem) { + .sb-article, + .match-content-width { + padding: 0 2rem; + } +} + +/* Tablet views */ +@media (min-width: 42rem) { + .sb-article-container { + width: auto; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 42rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 46rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 46rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 50rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 50rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Tablet views */ +@media (min-width: 59rem) { + .sb-sidebar-secondary { + position: static; + } + .hide-when-secondary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 63rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 67rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Desktop views */ +@media (min-width: 76rem) { + .sb-sidebar-primary { + position: static; + } + .hide-when-primary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} + +/* Full desktop views */ +@media (min-width: 80rem) { + .sb-article, + .match-content-width { + width: 46rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } +} + +@media (min-width: 84rem) { + .sb-article, + .match-content-width { + width: 50rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } +} + +@media (min-width: 88rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-page-width { + width: 88rem; + } +} diff --git a/api/_static/sphinx_highlight.js b/api/_static/sphinx_highlight.js new file mode 100644 index 000000000..8a96c69a1 --- /dev/null +++ b/api/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/api/_static/styles/furo-extensions.css b/api/_static/styles/furo-extensions.css new file mode 100644 index 000000000..bc447f228 --- /dev/null +++ b/api/_static/styles/furo-extensions.css @@ -0,0 +1,2 @@ +#furo-sidebar-ad-placement{padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)}#furo-sidebar-ad-placement .ethical-sidebar{background:var(--color-background-secondary);border:none;box-shadow:none}#furo-sidebar-ad-placement .ethical-sidebar:hover{background:var(--color-background-hover)}#furo-sidebar-ad-placement .ethical-sidebar a{color:var(--color-foreground-primary)}#furo-sidebar-ad-placement .ethical-callout a{color:var(--color-foreground-secondary)!important}#furo-readthedocs-versions{background:transparent;display:block;position:static;width:100%}#furo-readthedocs-versions .rst-versions{background:#1a1c1e}#furo-readthedocs-versions .rst-current-version{background:var(--color-sidebar-item-background);cursor:unset}#furo-readthedocs-versions .rst-current-version:hover{background:var(--color-sidebar-item-background)}#furo-readthedocs-versions .rst-current-version .fa-book{color:var(--color-foreground-primary)}#furo-readthedocs-versions>.rst-other-versions{padding:0}#furo-readthedocs-versions>.rst-other-versions small{opacity:1}#furo-readthedocs-versions .injected .rst-versions{position:unset}#furo-readthedocs-versions:focus-within,#furo-readthedocs-versions:hover{box-shadow:0 0 0 1px var(--color-sidebar-background-border)}#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:hover .rst-current-version{background:#1a1c1e;font-size:inherit;height:auto;line-height:inherit;padding:12px;text-align:right}#furo-readthedocs-versions:focus-within .rst-current-version .fa-book,#furo-readthedocs-versions:hover .rst-current-version .fa-book{color:#fff;float:left}#furo-readthedocs-versions:focus-within .fa-caret-down,#furo-readthedocs-versions:hover .fa-caret-down{display:none}#furo-readthedocs-versions:focus-within .injected,#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:focus-within .rst-other-versions,#furo-readthedocs-versions:hover .injected,#furo-readthedocs-versions:hover .rst-current-version,#furo-readthedocs-versions:hover .rst-other-versions{display:block}#furo-readthedocs-versions:focus-within>.rst-current-version,#furo-readthedocs-versions:hover>.rst-current-version{display:none}.highlight:hover button.copybtn{color:var(--color-code-foreground)}.highlight button.copybtn{align-items:center;background-color:var(--color-code-background);border:none;color:var(--color-background-item);cursor:pointer;height:1.25em;opacity:1;right:.5rem;top:.625rem;transition:color .3s,opacity .3s;width:1.25em}.highlight button.copybtn:hover{background-color:var(--color-code-background);color:var(--color-brand-content)}.highlight button.copybtn:after{background-color:transparent;color:var(--color-code-foreground);display:none}.highlight button.copybtn.success{color:#22863a;transition:color 0ms}.highlight button.copybtn.success:after{display:block}.highlight button.copybtn svg{padding:0}body{--sd-color-primary:var(--color-brand-primary);--sd-color-primary-highlight:var(--color-brand-content);--sd-color-primary-text:var(--color-background-primary);--sd-color-shadow:rgba(0,0,0,.05);--sd-color-card-border:var(--color-card-border);--sd-color-card-border-hover:var(--color-brand-content);--sd-color-card-background:var(--color-card-background);--sd-color-card-text:var(--color-foreground-primary);--sd-color-card-header:var(--color-card-marginals-background);--sd-color-card-footer:var(--color-card-marginals-background);--sd-color-tabs-label-active:var(--color-brand-content);--sd-color-tabs-label-hover:var(--color-foreground-muted);--sd-color-tabs-label-inactive:var(--color-foreground-muted);--sd-color-tabs-underline-active:var(--color-brand-content);--sd-color-tabs-underline-hover:var(--color-foreground-border);--sd-color-tabs-underline-inactive:var(--color-background-border);--sd-color-tabs-overline:var(--color-background-border);--sd-color-tabs-underline:var(--color-background-border)}.sd-tab-content{box-shadow:0 -2px var(--sd-color-tabs-overline),0 1px var(--sd-color-tabs-underline)}.sd-card{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)}.sd-shadow-sm{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-md{box-shadow:0 .3rem .75rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-lg{box-shadow:0 .6rem 1.5rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-card-hover:hover{transform:none}.sd-cards-carousel{gap:.25rem;padding:.25rem}body{--tabs--label-text:var(--color-foreground-muted);--tabs--label-text--hover:var(--color-foreground-muted);--tabs--label-text--active:var(--color-brand-content);--tabs--label-text--active--hover:var(--color-brand-content);--tabs--label-background:transparent;--tabs--label-background--hover:transparent;--tabs--label-background--active:transparent;--tabs--label-background--active--hover:transparent;--tabs--padding-x:0.25em;--tabs--margin-x:1em;--tabs--border:var(--color-background-border);--tabs--label-border:transparent;--tabs--label-border--hover:var(--color-foreground-muted);--tabs--label-border--active:var(--color-brand-content);--tabs--label-border--active--hover:var(--color-brand-content)}[role=main] .container{max-width:none;padding-left:0;padding-right:0}.shadow.docutils{border:none;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)!important}.sphinx-bs .card{background-color:var(--color-background-secondary);color:var(--color-foreground)} +/*# sourceMappingURL=furo-extensions.css.map*/ \ No newline at end of file diff --git a/api/_static/styles/furo-extensions.css.map b/api/_static/styles/furo-extensions.css.map new file mode 100644 index 000000000..9ba5637f9 --- /dev/null +++ b/api/_static/styles/furo-extensions.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo-extensions.css","mappings":"AAGA,2BACE,oFACA,4CAKE,6CAHA,YACA,eAEA,CACA,kDACE,yCAEF,8CACE,sCAEJ,8CACE,kDAEJ,2BAGE,uBACA,cAHA,gBACA,UAEA,CAGA,yCACE,mBAEF,gDAEE,gDADA,YACA,CACA,sDACE,gDACF,yDACE,sCAEJ,+CACE,UACA,qDACE,UAGF,mDACE,eAEJ,yEAEE,4DAEA,mHASE,mBAPA,kBAEA,YADA,oBAGA,aADA,gBAIA,CAEA,qIAEE,WADA,UACA,CAEJ,uGACE,aAEF,iUAGE,cAEF,mHACE,aC1EJ,gCACE,mCAEF,0BAKE,mBAUA,8CACA,YAFA,mCAKA,eAZA,cALA,UASA,YADA,YAYA,iCAdA,YAcA,CAEA,gCAEE,8CADA,gCACA,CAEF,gCAGE,6BADA,mCADA,YAEA,CAEF,kCAEE,cADA,oBACA,CACA,wCACE,cAEJ,8BACE,UC5CN,KAEE,6CAA8C,CAC9C,uDAAwD,CACxD,uDAAwD,CAGxD,iCAAsC,CAGtC,+CAAgD,CAChD,uDAAwD,CACxD,uDAAwD,CACxD,oDAAqD,CACrD,6DAA8D,CAC9D,6DAA8D,CAG9D,uDAAwD,CACxD,yDAA0D,CAC1D,4DAA6D,CAC7D,2DAA4D,CAC5D,8DAA+D,CAC/D,iEAAkE,CAClE,uDAAwD,CACxD,wDAAyD,CAG3D,gBACE,qFAGF,SACE,6EAEF,cACE,uFAEF,cACE,uFAEF,cACE,uFAGF,qBACE,eAEF,mBACE,WACA,eChDF,KACE,gDAAiD,CACjD,uDAAwD,CACxD,qDAAsD,CACtD,4DAA6D,CAC7D,oCAAqC,CACrC,2CAA4C,CAC5C,4CAA6C,CAC7C,mDAAoD,CACpD,wBAAyB,CACzB,oBAAqB,CACrB,6CAA8C,CAC9C,gCAAiC,CACjC,yDAA0D,CAC1D,uDAAwD,CACxD,8DAA+D,CCbjE,uBACE,eACA,eACA,gBAGF,iBACE,YACA,+EAGF,iBACE,mDACA","sources":["webpack:///./src/furo/assets/styles/extensions/_readthedocs.sass","webpack:///./src/furo/assets/styles/extensions/_copybutton.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-design.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-inline-tabs.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-panels.sass"],"sourcesContent":["// This file contains the styles used for tweaking how ReadTheDoc's embedded\n// contents would show up inside the theme.\n\n#furo-sidebar-ad-placement\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n .ethical-sidebar\n // Remove the border and box-shadow.\n border: none\n box-shadow: none\n // Manage the background colors.\n background: var(--color-background-secondary)\n &:hover\n background: var(--color-background-hover)\n // Ensure the text is legible.\n a\n color: var(--color-foreground-primary)\n\n .ethical-callout a\n color: var(--color-foreground-secondary) !important\n\n#furo-readthedocs-versions\n position: static\n width: 100%\n background: transparent\n display: block\n\n // Make the background color fit with the theme's aesthetic.\n .rst-versions\n background: rgb(26, 28, 30)\n\n .rst-current-version\n cursor: unset\n background: var(--color-sidebar-item-background)\n &:hover\n background: var(--color-sidebar-item-background)\n .fa-book\n color: var(--color-foreground-primary)\n\n > .rst-other-versions\n padding: 0\n small\n opacity: 1\n\n .injected\n .rst-versions\n position: unset\n\n &:hover,\n &:focus-within\n box-shadow: 0 0 0 1px var(--color-sidebar-background-border)\n\n .rst-current-version\n // Undo the tweaks done in RTD's CSS\n font-size: inherit\n line-height: inherit\n height: auto\n text-align: right\n padding: 12px\n\n // Match the rest of the body\n background: #1a1c1e\n\n .fa-book\n float: left\n color: white\n\n .fa-caret-down\n display: none\n\n .rst-current-version,\n .rst-other-versions,\n .injected\n display: block\n\n > .rst-current-version\n display: none\n",".highlight\n &:hover button.copybtn\n color: var(--color-code-foreground)\n\n button.copybtn\n // Make it visible\n opacity: 1\n\n // Align things correctly\n align-items: center\n\n height: 1.25em\n width: 1.25em\n\n top: 0.625rem // $code-spacing-vertical\n right: 0.5rem\n\n // Make it look better\n color: var(--color-background-item)\n background-color: var(--color-code-background)\n border: none\n\n // Change to cursor to make it obvious that you can click on it\n cursor: pointer\n\n // Transition smoothly, for aesthetics\n transition: color 300ms, opacity 300ms\n\n &:hover\n color: var(--color-brand-content)\n background-color: var(--color-code-background)\n\n &::after\n display: none\n color: var(--color-code-foreground)\n background-color: transparent\n\n &.success\n transition: color 0ms\n color: #22863a\n &::after\n display: block\n\n svg\n padding: 0\n","body\n // Colors\n --sd-color-primary: var(--color-brand-primary)\n --sd-color-primary-highlight: var(--color-brand-content)\n --sd-color-primary-text: var(--color-background-primary)\n\n // Shadows\n --sd-color-shadow: rgba(0, 0, 0, 0.05)\n\n // Cards\n --sd-color-card-border: var(--color-card-border)\n --sd-color-card-border-hover: var(--color-brand-content)\n --sd-color-card-background: var(--color-card-background)\n --sd-color-card-text: var(--color-foreground-primary)\n --sd-color-card-header: var(--color-card-marginals-background)\n --sd-color-card-footer: var(--color-card-marginals-background)\n\n // Tabs\n --sd-color-tabs-label-active: var(--color-brand-content)\n --sd-color-tabs-label-hover: var(--color-foreground-muted)\n --sd-color-tabs-label-inactive: var(--color-foreground-muted)\n --sd-color-tabs-underline-active: var(--color-brand-content)\n --sd-color-tabs-underline-hover: var(--color-foreground-border)\n --sd-color-tabs-underline-inactive: var(--color-background-border)\n --sd-color-tabs-overline: var(--color-background-border)\n --sd-color-tabs-underline: var(--color-background-border)\n\n// Tabs\n.sd-tab-content\n box-shadow: 0 -2px var(--sd-color-tabs-overline), 0 1px var(--sd-color-tabs-underline)\n\n// Shadows\n.sd-card // Have a shadow by default\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n.sd-shadow-sm\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-md\n box-shadow: 0 0.3rem 0.75rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-lg\n box-shadow: 0 0.6rem 1.5rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Cards\n.sd-card-hover:hover // Don't change scale on hover\n transform: none\n\n.sd-cards-carousel // Have a bit of gap in the carousel by default\n gap: 0.25rem\n padding: 0.25rem\n","// This file contains styles to tweak sphinx-inline-tabs to work well with Furo.\n\nbody\n --tabs--label-text: var(--color-foreground-muted)\n --tabs--label-text--hover: var(--color-foreground-muted)\n --tabs--label-text--active: var(--color-brand-content)\n --tabs--label-text--active--hover: var(--color-brand-content)\n --tabs--label-background: transparent\n --tabs--label-background--hover: transparent\n --tabs--label-background--active: transparent\n --tabs--label-background--active--hover: transparent\n --tabs--padding-x: 0.25em\n --tabs--margin-x: 1em\n --tabs--border: var(--color-background-border)\n --tabs--label-border: transparent\n --tabs--label-border--hover: var(--color-foreground-muted)\n --tabs--label-border--active: var(--color-brand-content)\n --tabs--label-border--active--hover: var(--color-brand-content)\n","// This file contains styles to tweak sphinx-panels to work well with Furo.\n\n// sphinx-panels includes Bootstrap 4, which uses .container which can conflict\n// with docutils' `.. container::` directive.\n[role=\"main\"] .container\n max-width: initial\n padding-left: initial\n padding-right: initial\n\n// Make the panels look nicer!\n.shadow.docutils\n border: none\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Make panel colors respond to dark mode\n.sphinx-bs .card\n background-color: var(--color-background-secondary)\n color: var(--color-foreground)\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/api/_static/styles/furo.css b/api/_static/styles/furo.css new file mode 100644 index 000000000..3d29a218f --- /dev/null +++ b/api/_static/styles/furo.css @@ -0,0 +1,2 @@ +/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{-webkit-text-size-adjust:100%;line-height:1.15}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}[hidden],template{display:none}@media print{.content-icon-container,.headerlink,.mobile-header,.related-pages{display:none!important}.highlight{border:.1pt solid var(--color-foreground-border)}a,blockquote,dl,ol,pre,table,ul{page-break-inside:avoid}caption,figure,h1,h2,h3,h4,h5,h6,img{page-break-after:avoid;page-break-inside:avoid}dl,ol,ul{page-break-before:avoid}}.visually-hidden{clip:rect(0,0,0,0)!important;border:0!important;height:1px!important;margin:-1px!important;overflow:hidden!important;padding:0!important;position:absolute!important;white-space:nowrap!important;width:1px!important}:-moz-focusring{outline:auto}body{--font-stack:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji;--font-stack--monospace:"SFMono-Regular",Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace;--font-size--normal:100%;--font-size--small:87.5%;--font-size--small--2:81.25%;--font-size--small--3:75%;--font-size--small--4:62.5%;--sidebar-caption-font-size:var(--font-size--small--2);--sidebar-item-font-size:var(--font-size--small);--sidebar-search-input-font-size:var(--font-size--small);--toc-font-size:var(--font-size--small--3);--toc-font-size--mobile:var(--font-size--normal);--toc-title-font-size:var(--font-size--small--4);--admonition-font-size:0.8125rem;--admonition-title-font-size:0.8125rem;--code-font-size:var(--font-size--small--2);--api-font-size:var(--font-size--small);--header-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*4);--header-padding:0.5rem;--sidebar-tree-space-above:1.5rem;--sidebar-caption-space-above:1rem;--sidebar-item-line-height:1rem;--sidebar-item-spacing-vertical:0.5rem;--sidebar-item-spacing-horizontal:1rem;--sidebar-item-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*2);--sidebar-expander-width:var(--sidebar-item-height);--sidebar-search-space-above:0.5rem;--sidebar-search-input-spacing-vertical:0.5rem;--sidebar-search-input-spacing-horizontal:0.5rem;--sidebar-search-input-height:1rem;--sidebar-search-icon-size:var(--sidebar-search-input-height);--toc-title-padding:0.25rem 0;--toc-spacing-vertical:1.5rem;--toc-spacing-horizontal:1.5rem;--toc-item-spacing-vertical:0.4rem;--toc-item-spacing-horizontal:1rem;--icon-search:url('data:image/svg+xml;charset=utf-8,');--icon-pencil:url('data:image/svg+xml;charset=utf-8,');--icon-abstract:url('data:image/svg+xml;charset=utf-8,');--icon-info:url('data:image/svg+xml;charset=utf-8,');--icon-flame:url('data:image/svg+xml;charset=utf-8,');--icon-question:url('data:image/svg+xml;charset=utf-8,');--icon-warning:url('data:image/svg+xml;charset=utf-8,');--icon-failure:url('data:image/svg+xml;charset=utf-8,');--icon-spark:url('data:image/svg+xml;charset=utf-8,');--color-admonition-title--caution:#ff9100;--color-admonition-title-background--caution:rgba(255,145,0,.2);--color-admonition-title--warning:#ff9100;--color-admonition-title-background--warning:rgba(255,145,0,.2);--color-admonition-title--danger:#ff5252;--color-admonition-title-background--danger:rgba(255,82,82,.2);--color-admonition-title--attention:#ff5252;--color-admonition-title-background--attention:rgba(255,82,82,.2);--color-admonition-title--error:#ff5252;--color-admonition-title-background--error:rgba(255,82,82,.2);--color-admonition-title--hint:#00c852;--color-admonition-title-background--hint:rgba(0,200,82,.2);--color-admonition-title--tip:#00c852;--color-admonition-title-background--tip:rgba(0,200,82,.2);--color-admonition-title--important:#00bfa5;--color-admonition-title-background--important:rgba(0,191,165,.2);--color-admonition-title--note:#00b0ff;--color-admonition-title-background--note:rgba(0,176,255,.2);--color-admonition-title--seealso:#448aff;--color-admonition-title-background--seealso:rgba(68,138,255,.2);--color-admonition-title--admonition-todo:grey;--color-admonition-title-background--admonition-todo:hsla(0,0%,50%,.2);--color-admonition-title:#651fff;--color-admonition-title-background:rgba(101,31,255,.2);--icon-admonition-default:var(--icon-abstract);--color-topic-title:#14b8a6;--color-topic-title-background:rgba(20,184,166,.2);--icon-topic-default:var(--icon-pencil);--color-problematic:#b30000;--color-foreground-primary:#000;--color-foreground-secondary:#5a5c63;--color-foreground-muted:#646776;--color-foreground-border:#878787;--color-background-primary:#fff;--color-background-secondary:#f8f9fb;--color-background-hover:#efeff4;--color-background-hover--transparent:#efeff400;--color-background-border:#eeebee;--color-background-item:#ccc;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2962ff;--color-brand-content:#2a5adf;--color-api-background:var(--color-background-hover--transparent);--color-api-background-hover:var(--color-background-hover);--color-api-overall:var(--color-foreground-secondary);--color-api-name:var(--color-problematic);--color-api-pre-name:var(--color-problematic);--color-api-paren:var(--color-foreground-secondary);--color-api-keyword:var(--color-foreground-primary);--color-highlight-on-target:#ffc;--color-inline-code-background:var(--color-background-secondary);--color-highlighted-background:#def;--color-highlighted-text:var(--color-foreground-primary);--color-guilabel-background:#ddeeff80;--color-guilabel-border:#bedaf580;--color-guilabel-text:var(--color-foreground-primary);--color-admonition-background:transparent;--color-table-header-background:var(--color-background-secondary);--color-table-border:var(--color-background-border);--color-card-border:var(--color-background-secondary);--color-card-background:transparent;--color-card-marginals-background:var(--color-background-secondary);--color-header-background:var(--color-background-primary);--color-header-border:var(--color-background-border);--color-header-text:var(--color-foreground-primary);--color-sidebar-background:var(--color-background-secondary);--color-sidebar-background-border:var(--color-background-border);--color-sidebar-brand-text:var(--color-foreground-primary);--color-sidebar-caption-text:var(--color-foreground-muted);--color-sidebar-link-text:var(--color-foreground-secondary);--color-sidebar-link-text--top-level:var(--color-brand-primary);--color-sidebar-item-background:var(--color-sidebar-background);--color-sidebar-item-background--current:var( --color-sidebar-item-background );--color-sidebar-item-background--hover:linear-gradient(90deg,var(--color-background-hover--transparent) 0%,var(--color-background-hover) var(--sidebar-item-spacing-horizontal),var(--color-background-hover) 100%);--color-sidebar-item-expander-background:transparent;--color-sidebar-item-expander-background--hover:var( --color-background-hover );--color-sidebar-search-text:var(--color-foreground-primary);--color-sidebar-search-background:var(--color-background-secondary);--color-sidebar-search-background--focus:var(--color-background-primary);--color-sidebar-search-border:var(--color-background-border);--color-sidebar-search-icon:var(--color-foreground-muted);--color-toc-background:var(--color-background-primary);--color-toc-title-text:var(--color-foreground-muted);--color-toc-item-text:var(--color-foreground-secondary);--color-toc-item-text--hover:var(--color-foreground-primary);--color-toc-item-text--active:var(--color-brand-primary);--color-content-foreground:var(--color-foreground-primary);--color-content-background:transparent;--color-link:var(--color-brand-content);--color-link--hover:var(--color-brand-content);--color-link-underline:var(--color-background-border);--color-link-underline--hover:var(--color-foreground-border)}.only-light{display:block!important}html body .only-dark{display:none!important}@media not print{body[data-theme=dark]{--color-problematic:#ee5151;--color-foreground-primary:#ffffffcc;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2b8cee;--color-brand-content:#368ce2;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body[data-theme=dark] .only-light{display:none!important}body[data-theme=dark] .only-dark{display:block!important}@media(prefers-color-scheme:dark){body:not([data-theme=light]){--color-problematic:#ee5151;--color-foreground-primary:#ffffffcc;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2b8cee;--color-brand-content:#368ce2;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body:not([data-theme=light]) .only-light{display:none!important}body:not([data-theme=light]) .only-dark{display:block!important}}}body[data-theme=auto] .theme-toggle svg.theme-icon-when-auto,body[data-theme=dark] .theme-toggle svg.theme-icon-when-dark,body[data-theme=light] .theme-toggle svg.theme-icon-when-light{display:block}body{font-family:var(--font-stack)}code,kbd,pre,samp{font-family:var(--font-stack--monospace)}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}article{line-height:1.5}h1,h2,h3,h4,h5,h6{border-radius:.5rem;font-weight:700;line-height:1.25;margin:.5rem -.5rem;padding-left:.5rem;padding-right:.5rem}h1+p,h2+p,h3+p,h4+p,h5+p,h6+p{margin-top:0}h1{font-size:2.5em;margin-bottom:1rem}h1,h2{margin-top:1.75rem}h2{font-size:2em}h3{font-size:1.5em}h4{font-size:1.25em}h5{font-size:1.125em}h6{font-size:1em}small{font-size:80%;opacity:75%}p{margin-bottom:.75rem;margin-top:.5rem}hr.docutils{background-color:var(--color-background-border);border:0;height:1px;margin:2rem 0;padding:0}.centered{text-align:center}a{color:var(--color-link);text-decoration:underline;text-decoration-color:var(--color-link-underline)}a:hover{color:var(--color-link--hover);text-decoration-color:var(--color-link-underline--hover)}a.muted-link{color:inherit}a.muted-link:hover{color:var(--color-link);text-decoration-color:var(--color-link-underline--hover)}html{overflow-x:hidden;overflow-y:scroll;scroll-behavior:smooth}.sidebar-scroll,.toc-scroll,article[role=main] *{scrollbar-color:var(--color-foreground-border) transparent;scrollbar-width:thin}.sidebar-scroll::-webkit-scrollbar,.toc-scroll::-webkit-scrollbar,article[role=main] ::-webkit-scrollbar{height:.25rem;width:.25rem}.sidebar-scroll::-webkit-scrollbar-thumb,.toc-scroll::-webkit-scrollbar-thumb,article[role=main] ::-webkit-scrollbar-thumb{background-color:var(--color-foreground-border);border-radius:.125rem}body,html{background:var(--color-background-primary);color:var(--color-foreground-primary);height:100%}article{background:var(--color-content-background);color:var(--color-content-foreground);overflow-wrap:break-word}.page{display:flex;min-height:100%}.mobile-header{background-color:var(--color-header-background);border-bottom:1px solid var(--color-header-border);color:var(--color-header-text);display:none;height:var(--header-height);width:100%;z-index:10}.mobile-header.scrolled{border-bottom:none;box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2)}.mobile-header .header-center a{color:var(--color-header-text);text-decoration:none}.main{display:flex;flex:1}.sidebar-drawer{background:var(--color-sidebar-background);border-right:1px solid var(--color-sidebar-background-border);box-sizing:border-box;display:flex;justify-content:flex-end;min-width:15em;width:calc(50% - 26em)}.sidebar-container,.toc-drawer{box-sizing:border-box;width:15em}.toc-drawer{background:var(--color-toc-background);padding-right:1rem}.sidebar-sticky,.toc-sticky{display:flex;flex-direction:column;height:min(100%,100vh);height:100vh;position:sticky;top:0}.sidebar-scroll,.toc-scroll{flex-grow:1;flex-shrink:1;overflow:auto;scroll-behavior:smooth}.content{display:flex;flex-direction:column;justify-content:space-between;padding:0 3em;width:46em}.icon{display:inline-block;height:1rem;width:1rem}.icon svg{height:100%;width:100%}.announcement{align-items:center;background-color:var(--color-announcement-background);color:var(--color-announcement-text);display:flex;height:var(--header-height);overflow-x:auto}.announcement+.page{min-height:calc(100% - var(--header-height))}.announcement-content{box-sizing:border-box;min-width:100%;padding:.5rem;text-align:center;white-space:nowrap}.announcement-content a{color:var(--color-announcement-text);text-decoration-color:var(--color-announcement-text)}.announcement-content a:hover{color:var(--color-announcement-text);text-decoration-color:var(--color-link--hover)}.no-js .theme-toggle-container{display:none}.theme-toggle-container{vertical-align:middle}.theme-toggle{background:transparent;border:none;cursor:pointer;padding:0}.theme-toggle svg{color:var(--color-foreground-primary);display:none;height:1rem;vertical-align:middle;width:1rem}.theme-toggle-header{float:left;padding:1rem .5rem}.nav-overlay-icon,.toc-overlay-icon{cursor:pointer;display:none}.nav-overlay-icon .icon,.toc-overlay-icon .icon{color:var(--color-foreground-secondary);height:1rem;width:1rem}.nav-overlay-icon,.toc-header-icon{align-items:center;justify-content:center}.toc-content-icon{height:1.5rem;width:1.5rem}.content-icon-container{display:flex;float:right;gap:.5rem;margin-bottom:1rem;margin-left:1rem;margin-top:1.5rem}.content-icon-container .edit-this-page svg{color:inherit;height:1rem;width:1rem}.sidebar-toggle{display:none;position:absolute}.sidebar-toggle[name=__toc]{left:20px}.sidebar-toggle:checked{left:40px}.overlay{background-color:rgba(0,0,0,.54);height:0;opacity:0;position:fixed;top:0;transition:width 0ms,height 0ms,opacity .25s ease-out;width:0}.sidebar-overlay{z-index:20}.toc-overlay{z-index:40}.sidebar-drawer{transition:left .25s ease-in-out;z-index:30}.toc-drawer{transition:right .25s ease-in-out;z-index:50}#__navigation:checked~.sidebar-overlay{height:100%;opacity:1;width:100%}#__navigation:checked~.page .sidebar-drawer{left:0;top:0}#__toc:checked~.toc-overlay{height:100%;opacity:1;width:100%}#__toc:checked~.page .toc-drawer{right:0;top:0}.back-to-top{background:var(--color-background-primary);border-radius:1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 1px 0 hsla(220,9%,46%,.502);display:none;font-size:.8125rem;left:0;margin-left:50%;padding:.5rem .75rem .5rem .5rem;position:fixed;text-decoration:none;top:1rem;transform:translateX(-50%);z-index:10}.back-to-top svg{fill:currentColor;display:inline-block;height:1rem;width:1rem}.back-to-top span{margin-left:.25rem}.show-back-to-top .back-to-top{align-items:center;display:flex}@media(min-width:97em){html{font-size:110%}}@media(max-width:82em){.toc-content-icon{display:flex}.toc-drawer{border-left:1px solid var(--color-background-muted);height:100vh;position:fixed;right:-15em;top:0}.toc-tree{border-left:none;font-size:var(--toc-font-size--mobile)}.sidebar-drawer{width:calc(50% - 18.5em)}}@media(max-width:67em){.nav-overlay-icon{display:flex}.sidebar-drawer{height:100vh;left:-15em;position:fixed;top:0;width:15em}.toc-header-icon{display:flex}.theme-toggle-content,.toc-content-icon{display:none}.theme-toggle-header{display:block}.mobile-header{align-items:center;display:flex;justify-content:space-between;position:sticky;top:0}.mobile-header .header-left,.mobile-header .header-right{display:flex;height:var(--header-height);padding:0 var(--header-padding)}.mobile-header .header-left label,.mobile-header .header-right label{height:100%;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:100%}.nav-overlay-icon .icon,.theme-toggle svg{height:1.25rem;width:1.25rem}:target{scroll-margin-top:var(--header-height)}.back-to-top{top:calc(var(--header-height) + .5rem)}.page{flex-direction:column;justify-content:center}.content{margin-left:auto;margin-right:auto}}@media(max-width:52em){.content{overflow-x:auto;width:100%}}@media(max-width:46em){.content{padding:0 1em}article aside.sidebar{float:none;margin:1rem 0;width:100%}}.admonition,.topic{background:var(--color-admonition-background);border-radius:.2rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1);font-size:var(--admonition-font-size);margin:1rem auto;overflow:hidden;padding:0 .5rem .5rem;page-break-inside:avoid}.admonition>:nth-child(2),.topic>:nth-child(2){margin-top:0}.admonition>:last-child,.topic>:last-child{margin-bottom:0}.admonition p.admonition-title,p.topic-title{font-size:var(--admonition-title-font-size);font-weight:500;line-height:1.3;margin:0 -.5rem .5rem;padding:.4rem .5rem .4rem 2rem;position:relative}.admonition p.admonition-title:before,p.topic-title:before{content:"";height:1rem;left:.5rem;position:absolute;width:1rem}p.admonition-title{background-color:var(--color-admonition-title-background)}p.admonition-title:before{background-color:var(--color-admonition-title);-webkit-mask-image:var(--icon-admonition-default);mask-image:var(--icon-admonition-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}p.topic-title{background-color:var(--color-topic-title-background)}p.topic-title:before{background-color:var(--color-topic-title);-webkit-mask-image:var(--icon-topic-default);mask-image:var(--icon-topic-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}.admonition{border-left:.2rem solid var(--color-admonition-title)}.admonition.caution{border-left-color:var(--color-admonition-title--caution)}.admonition.caution>.admonition-title{background-color:var(--color-admonition-title-background--caution)}.admonition.caution>.admonition-title:before{background-color:var(--color-admonition-title--caution);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.warning{border-left-color:var(--color-admonition-title--warning)}.admonition.warning>.admonition-title{background-color:var(--color-admonition-title-background--warning)}.admonition.warning>.admonition-title:before{background-color:var(--color-admonition-title--warning);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.danger{border-left-color:var(--color-admonition-title--danger)}.admonition.danger>.admonition-title{background-color:var(--color-admonition-title-background--danger)}.admonition.danger>.admonition-title:before{background-color:var(--color-admonition-title--danger);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.attention{border-left-color:var(--color-admonition-title--attention)}.admonition.attention>.admonition-title{background-color:var(--color-admonition-title-background--attention)}.admonition.attention>.admonition-title:before{background-color:var(--color-admonition-title--attention);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.error{border-left-color:var(--color-admonition-title--error)}.admonition.error>.admonition-title{background-color:var(--color-admonition-title-background--error)}.admonition.error>.admonition-title:before{background-color:var(--color-admonition-title--error);-webkit-mask-image:var(--icon-failure);mask-image:var(--icon-failure)}.admonition.hint{border-left-color:var(--color-admonition-title--hint)}.admonition.hint>.admonition-title{background-color:var(--color-admonition-title-background--hint)}.admonition.hint>.admonition-title:before{background-color:var(--color-admonition-title--hint);-webkit-mask-image:var(--icon-question);mask-image:var(--icon-question)}.admonition.tip{border-left-color:var(--color-admonition-title--tip)}.admonition.tip>.admonition-title{background-color:var(--color-admonition-title-background--tip)}.admonition.tip>.admonition-title:before{background-color:var(--color-admonition-title--tip);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.important{border-left-color:var(--color-admonition-title--important)}.admonition.important>.admonition-title{background-color:var(--color-admonition-title-background--important)}.admonition.important>.admonition-title:before{background-color:var(--color-admonition-title--important);-webkit-mask-image:var(--icon-flame);mask-image:var(--icon-flame)}.admonition.note{border-left-color:var(--color-admonition-title--note)}.admonition.note>.admonition-title{background-color:var(--color-admonition-title-background--note)}.admonition.note>.admonition-title:before{background-color:var(--color-admonition-title--note);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition.seealso{border-left-color:var(--color-admonition-title--seealso)}.admonition.seealso>.admonition-title{background-color:var(--color-admonition-title-background--seealso)}.admonition.seealso>.admonition-title:before{background-color:var(--color-admonition-title--seealso);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.admonition-todo{border-left-color:var(--color-admonition-title--admonition-todo)}.admonition.admonition-todo>.admonition-title{background-color:var(--color-admonition-title-background--admonition-todo)}.admonition.admonition-todo>.admonition-title:before{background-color:var(--color-admonition-title--admonition-todo);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition-todo>.admonition-title{text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd{margin-left:2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:first-child{margin-top:.125rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list,dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:last-child{margin-bottom:.75rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list>dt{font-size:var(--font-size--small);text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd:empty{margin-bottom:.5rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul{margin-left:-1.2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p:nth-child(2){margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p+p:last-child:empty{margin-bottom:0;margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{color:var(--color-api-overall)}.sig:not(.sig-inline){background:var(--color-api-background);border-radius:.25rem;font-family:var(--font-stack--monospace);font-size:var(--api-font-size);font-weight:700;margin-left:-.25rem;margin-right:-.25rem;padding:.25rem .5rem .25rem 3em;text-indent:-2.5em;transition:background .1s ease-out}.sig:not(.sig-inline):hover{background:var(--color-api-background-hover)}.sig:not(.sig-inline) a.reference .viewcode-link{font-weight:400;width:3.5rem}em.property{font-style:normal}em.property:first-child{color:var(--color-api-keyword)}.sig-name{color:var(--color-api-name)}.sig-prename{color:var(--color-api-pre-name);font-weight:400}.sig-paren{color:var(--color-api-paren)}.sig-param{font-style:normal}.versionmodified{font-style:italic}div.deprecated p,div.versionadded p,div.versionchanged p{margin-bottom:.125rem;margin-top:.125rem}.viewcode-back,.viewcode-link{float:right;text-align:right}.line-block{margin-bottom:.75rem;margin-top:.5rem}.line-block .line-block{margin-bottom:0;margin-top:0;padding-left:1rem}.code-block-caption,article p.caption,table>caption{font-size:var(--font-size--small);text-align:center}.toctree-wrapper.compound .caption,.toctree-wrapper.compound :not(.caption)>.caption-text{font-size:var(--font-size--small);margin-bottom:0;text-align:initial;text-transform:uppercase}.toctree-wrapper.compound>ul{margin-bottom:0;margin-top:0}.sig-inline,code.literal{background:var(--color-inline-code-background);border-radius:.2em;font-size:var(--font-size--small--2);padding:.1em .2em}pre.literal-block .sig-inline,pre.literal-block code.literal{font-size:inherit;padding:0}p .sig-inline,p code.literal{border:1px solid var(--color-background-border)}.sig-inline{font-family:var(--font-stack--monospace)}div[class*=" highlight-"],div[class^=highlight-]{display:flex;margin:1em 0}div[class*=" highlight-"] .table-wrapper,div[class^=highlight-] .table-wrapper,pre{margin:0;padding:0}pre{overflow:auto}article[role=main] .highlight pre{line-height:1.5}.highlight pre,pre.literal-block{font-size:var(--code-font-size);padding:.625rem .875rem}pre.literal-block{background-color:var(--color-code-background);border-radius:.2rem;color:var(--color-code-foreground);margin-bottom:1rem;margin-top:1rem}.highlight{border-radius:.2rem;width:100%}.highlight .gp,.highlight span.linenos{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.highlight .hll{display:block;margin-left:-.875rem;margin-right:-.875rem;padding-left:.875rem;padding-right:.875rem}.code-block-caption{background-color:var(--color-code-background);border-bottom:1px solid;border-radius:.25rem;border-bottom-left-radius:0;border-bottom-right-radius:0;border-color:var(--color-background-border);color:var(--color-code-foreground);display:flex;font-weight:300;padding:.625rem .875rem}.code-block-caption+div[class]{margin-top:0}.code-block-caption+div[class] pre{border-top-left-radius:0;border-top-right-radius:0}.highlighttable{display:block;width:100%}.highlighttable tbody{display:block}.highlighttable tr{display:flex}.highlighttable td.linenos{background-color:var(--color-code-background);border-bottom-left-radius:.2rem;border-top-left-radius:.2rem;color:var(--color-code-foreground);padding:.625rem 0 .625rem .875rem}.highlighttable .linenodiv{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;font-size:var(--code-font-size);padding-right:.875rem}.highlighttable td.code{display:block;flex:1;overflow:hidden;padding:0}.highlighttable td.code .highlight{border-bottom-left-radius:0;border-top-left-radius:0}.highlight span.linenos{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;display:inline-block;margin-right:.875rem;padding-left:0;padding-right:.875rem}.footnote-reference{font-size:var(--font-size--small--4);vertical-align:super}dl.footnote.brackets{color:var(--color-foreground-secondary);display:grid;font-size:var(--font-size--small);grid-template-columns:max-content auto}dl.footnote.brackets dt{margin:0}dl.footnote.brackets dt>.fn-backref{margin-left:.25rem}dl.footnote.brackets dt:after{content:":"}dl.footnote.brackets dt .brackets:before{content:"["}dl.footnote.brackets dt .brackets:after{content:"]"}dl.footnote.brackets dd{margin:0;padding:0 1rem}aside.footnote{color:var(--color-foreground-secondary);font-size:var(--font-size--small)}aside.footnote>span,div.citation>span{float:left;font-weight:500;padding-right:.25rem}aside.footnote>p,div.citation>p{margin-left:2rem}img{box-sizing:border-box;height:auto;max-width:100%}article .figure,article figure{border-radius:.2rem;margin:0}article .figure :last-child,article figure :last-child{margin-bottom:0}article .align-left{clear:left;float:left;margin:0 1rem 1rem}article .align-right{clear:right;float:right;margin:0 1rem 1rem}article .align-center,article .align-default{display:block;margin-left:auto;margin-right:auto;text-align:center}article table.align-default{display:table;text-align:initial}.domainindex-jumpbox,.genindex-jumpbox{border-bottom:1px solid var(--color-background-border);border-top:1px solid var(--color-background-border);padding:.25rem}.domainindex-section h2,.genindex-section h2{margin-bottom:.5rem;margin-top:.75rem}.domainindex-section ul,.genindex-section ul{margin-bottom:0;margin-top:0}ol,ul{margin-bottom:1rem;margin-top:1rem;padding-left:1.2rem}ol li>p:first-child,ul li>p:first-child{margin-bottom:.25rem;margin-top:.25rem}ol li>p:last-child,ul li>p:last-child{margin-top:.25rem}ol li>ol,ol li>ul,ul li>ol,ul li>ul{margin-bottom:.5rem;margin-top:.5rem}ol.arabic{list-style:decimal}ol.loweralpha{list-style:lower-alpha}ol.upperalpha{list-style:upper-alpha}ol.lowerroman{list-style:lower-roman}ol.upperroman{list-style:upper-roman}.simple li>ol,.simple li>ul,.toctree-wrapper li>ol,.toctree-wrapper li>ul{margin-bottom:0;margin-top:0}.field-list dt,.option-list dt,dl.footnote dt,dl.glossary dt,dl.simple dt,dl:not([class]) dt{font-weight:500;margin-top:.25rem}.field-list dt+dt,.option-list dt+dt,dl.footnote dt+dt,dl.glossary dt+dt,dl.simple dt+dt,dl:not([class]) dt+dt{margin-top:0}.field-list dt .classifier:before,.option-list dt .classifier:before,dl.footnote dt .classifier:before,dl.glossary dt .classifier:before,dl.simple dt .classifier:before,dl:not([class]) dt .classifier:before{content:":";margin-left:.2rem;margin-right:.2rem}.field-list dd ul,.field-list dd>p:first-child,.option-list dd ul,.option-list dd>p:first-child,dl.footnote dd ul,dl.footnote dd>p:first-child,dl.glossary dd ul,dl.glossary dd>p:first-child,dl.simple dd ul,dl.simple dd>p:first-child,dl:not([class]) dd ul,dl:not([class]) dd>p:first-child{margin-top:.125rem}.field-list dd ul,.option-list dd ul,dl.footnote dd ul,dl.glossary dd ul,dl.simple dd ul,dl:not([class]) dd ul{margin-bottom:.125rem}.math-wrapper{overflow-x:auto;width:100%}div.math{position:relative;text-align:center}div.math .headerlink,div.math:focus .headerlink{display:none}div.math:hover .headerlink{display:inline-block}div.math span.eqno{position:absolute;right:.5rem;top:50%;transform:translateY(-50%);z-index:1}abbr[title]{cursor:help}.problematic{color:var(--color-problematic)}kbd:not(.compound){background-color:var(--color-background-secondary);border:1px solid var(--color-foreground-border);border-radius:.2rem;box-shadow:0 .0625rem 0 rgba(0,0,0,.2),inset 0 0 0 .125rem var(--color-background-primary);color:var(--color-foreground-primary);display:inline-block;font-size:var(--font-size--small--3);margin:0 .2rem;padding:0 .2rem;vertical-align:text-bottom}blockquote{background:var(--color-background-secondary);border-left:4px solid var(--color-background-border);margin-left:0;margin-right:0;padding:.5rem 1rem}blockquote .attribution{font-weight:600;text-align:right}blockquote.highlights,blockquote.pull-quote{font-size:1.25em}blockquote.epigraph,blockquote.pull-quote{border-left-width:0;border-radius:.5rem}blockquote.highlights{background:transparent;border-left-width:0}p .reference img{vertical-align:middle}p.rubric{font-size:1.125em;font-weight:700;line-height:1.25}dd p.rubric{font-size:var(--font-size--small);font-weight:inherit;line-height:inherit;text-transform:uppercase}article .sidebar{background-color:var(--color-background-secondary);border:1px solid var(--color-background-border);border-radius:.2rem;clear:right;float:right;margin-left:1rem;margin-right:0;width:30%}article .sidebar>*{padding-left:1rem;padding-right:1rem}article .sidebar>ol,article .sidebar>ul{padding-left:2.2rem}article .sidebar .sidebar-title{border-bottom:1px solid var(--color-background-border);font-weight:500;margin:0;padding:.5rem 1rem}.table-wrapper{margin-bottom:.5rem;margin-top:1rem;overflow-x:auto;padding:.2rem .2rem .75rem;width:100%}table.docutils{border-collapse:collapse;border-radius:.2rem;border-spacing:0;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)}table.docutils th{background:var(--color-table-header-background)}table.docutils td,table.docutils th{border-bottom:1px solid var(--color-table-border);border-left:1px solid var(--color-table-border);border-right:1px solid var(--color-table-border);padding:0 .25rem}table.docutils td p,table.docutils th p{margin:.25rem}table.docutils td:first-child,table.docutils th:first-child{border-left:none}table.docutils td:last-child,table.docutils th:last-child{border-right:none}table.docutils td.text-left,table.docutils th.text-left{text-align:left}table.docutils td.text-right,table.docutils th.text-right{text-align:right}table.docutils td.text-center,table.docutils th.text-center{text-align:center}:target{scroll-margin-top:.5rem}@media(max-width:67em){:target{scroll-margin-top:calc(.5rem + var(--header-height))}section>span:target{scroll-margin-top:calc(.8rem + var(--header-height))}}.headerlink{font-weight:100;-webkit-user-select:none;-moz-user-select:none;user-select:none}.code-block-caption>.headerlink,dl dt>.headerlink,figcaption p>.headerlink,h1>.headerlink,h2>.headerlink,h3>.headerlink,h4>.headerlink,h5>.headerlink,h6>.headerlink,p.caption>.headerlink,table>caption>.headerlink{margin-left:.5rem;visibility:hidden}.code-block-caption:hover>.headerlink,dl dt:hover>.headerlink,figcaption p:hover>.headerlink,h1:hover>.headerlink,h2:hover>.headerlink,h3:hover>.headerlink,h4:hover>.headerlink,h5:hover>.headerlink,h6:hover>.headerlink,p.caption:hover>.headerlink,table>caption:hover>.headerlink{visibility:visible}.code-block-caption>.toc-backref,dl dt>.toc-backref,figcaption p>.toc-backref,h1>.toc-backref,h2>.toc-backref,h3>.toc-backref,h4>.toc-backref,h5>.toc-backref,h6>.toc-backref,p.caption>.toc-backref,table>caption>.toc-backref{color:inherit;text-decoration-line:none}figure:hover>figcaption>p>.headerlink,table:hover>caption>.headerlink{visibility:visible}:target>h1:first-of-type,:target>h2:first-of-type,:target>h3:first-of-type,:target>h4:first-of-type,:target>h5:first-of-type,:target>h6:first-of-type,span:target~h1:first-of-type,span:target~h2:first-of-type,span:target~h3:first-of-type,span:target~h4:first-of-type,span:target~h5:first-of-type,span:target~h6:first-of-type{background-color:var(--color-highlight-on-target)}:target>h1:first-of-type code.literal,:target>h2:first-of-type code.literal,:target>h3:first-of-type code.literal,:target>h4:first-of-type code.literal,:target>h5:first-of-type code.literal,:target>h6:first-of-type code.literal,span:target~h1:first-of-type code.literal,span:target~h2:first-of-type code.literal,span:target~h3:first-of-type code.literal,span:target~h4:first-of-type code.literal,span:target~h5:first-of-type code.literal,span:target~h6:first-of-type code.literal{background-color:transparent}.literal-block-wrapper:target .code-block-caption,.this-will-duplicate-information-and-it-is-still-useful-here li :target,figure:target,table:target>caption{background-color:var(--color-highlight-on-target)}dt:target{background-color:var(--color-highlight-on-target)!important}.footnote-reference:target,.footnote>dt:target+dd{background-color:var(--color-highlight-on-target)}.guilabel{background-color:var(--color-guilabel-background);border:1px solid var(--color-guilabel-border);border-radius:.5em;color:var(--color-guilabel-text);font-size:.9em;padding:0 .3em}footer{display:flex;flex-direction:column;font-size:var(--font-size--small);margin-top:2rem}.bottom-of-page{align-items:center;border-top:1px solid var(--color-background-border);color:var(--color-foreground-secondary);display:flex;justify-content:space-between;line-height:1.5;margin-top:1rem;padding-bottom:1rem;padding-top:1rem}@media(max-width:46em){.bottom-of-page{flex-direction:column-reverse;gap:.25rem;text-align:center}}.bottom-of-page .left-details{font-size:var(--font-size--small)}.bottom-of-page .right-details{display:flex;flex-direction:column;gap:.25rem;text-align:right}.bottom-of-page .icons{display:flex;font-size:1rem;gap:.25rem;justify-content:flex-end}.bottom-of-page .icons a{text-decoration:none}.bottom-of-page .icons img,.bottom-of-page .icons svg{font-size:1.125rem;height:1em;width:1em}.related-pages a{align-items:center;display:flex;text-decoration:none}.related-pages a:hover .page-info .title{color:var(--color-link);text-decoration:underline;text-decoration-color:var(--color-link-underline)}.related-pages a svg.furo-related-icon,.related-pages a svg.furo-related-icon>use{color:var(--color-foreground-border);flex-shrink:0;height:.75rem;margin:0 .5rem;width:.75rem}.related-pages a.next-page{clear:right;float:right;max-width:50%;text-align:right}.related-pages a.prev-page{clear:left;float:left;max-width:50%}.related-pages a.prev-page svg{transform:rotate(180deg)}.page-info{display:flex;flex-direction:column;overflow-wrap:anywhere}.next-page .page-info{align-items:flex-end}.page-info .context{align-items:center;color:var(--color-foreground-muted);display:flex;font-size:var(--font-size--small);padding-bottom:.1rem;text-decoration:none}ul.search{list-style:none;padding-left:0}ul.search li{border-bottom:1px solid var(--color-background-border);padding:1rem 0}[role=main] .highlighted{background-color:var(--color-highlighted-background);color:var(--color-highlighted-text)}.sidebar-brand{display:flex;flex-direction:column;flex-shrink:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none}.sidebar-brand-text{color:var(--color-sidebar-brand-text);font-size:1.5rem;overflow-wrap:break-word}.sidebar-brand-text,.sidebar-logo-container{margin:var(--sidebar-item-spacing-vertical) 0}.sidebar-logo{display:block;margin:0 auto;max-width:100%}.sidebar-search-container{align-items:center;background:var(--color-sidebar-search-background);display:flex;margin-top:var(--sidebar-search-space-above);position:relative}.sidebar-search-container:focus-within,.sidebar-search-container:hover{background:var(--color-sidebar-search-background--focus)}.sidebar-search-container:before{background-color:var(--color-sidebar-search-icon);content:"";height:var(--sidebar-search-icon-size);left:var(--sidebar-item-spacing-horizontal);-webkit-mask-image:var(--icon-search);mask-image:var(--icon-search);position:absolute;width:var(--sidebar-search-icon-size)}.sidebar-search{background:transparent;border:none;border-bottom:1px solid var(--color-sidebar-search-border);border-top:1px solid var(--color-sidebar-search-border);box-sizing:border-box;color:var(--color-sidebar-search-foreground);padding:var(--sidebar-search-input-spacing-vertical) var(--sidebar-search-input-spacing-horizontal) var(--sidebar-search-input-spacing-vertical) calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size));width:100%;z-index:10}.sidebar-search:focus{outline:none}.sidebar-search::-moz-placeholder{font-size:var(--sidebar-search-input-font-size)}.sidebar-search::placeholder{font-size:var(--sidebar-search-input-font-size)}#searchbox .highlight-link{margin:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0;text-align:center}#searchbox .highlight-link a{color:var(--color-sidebar-search-icon);font-size:var(--font-size--small--2)}.sidebar-tree{font-size:var(--sidebar-item-font-size);margin-bottom:var(--sidebar-item-spacing-vertical);margin-top:var(--sidebar-tree-space-above)}.sidebar-tree ul{display:flex;flex-direction:column;list-style:none;margin-bottom:0;margin-top:0;padding:0}.sidebar-tree li{margin:0;position:relative}.sidebar-tree li>ul{margin-left:var(--sidebar-item-spacing-horizontal)}.sidebar-tree .icon,.sidebar-tree .reference{color:var(--color-sidebar-link-text)}.sidebar-tree .reference{box-sizing:border-box;display:inline-block;height:100%;line-height:var(--sidebar-item-line-height);overflow-wrap:anywhere;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none;width:100%}.sidebar-tree .reference:hover{background:var(--color-sidebar-item-background--hover)}.sidebar-tree .reference.external:after{color:var(--color-sidebar-link-text);content:url("data:image/svg+xml;charset=utf-8,%3Csvg width='12' height='12' xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' stroke-width='1.5' stroke='%23607D8B' fill='none' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M0 0h24v24H0z' stroke='none'/%3E%3Cpath d='M11 7H6a2 2 0 0 0-2 2v9a2 2 0 0 0 2 2h9a2 2 0 0 0 2-2v-5M10 14 20 4M15 4h5v5'/%3E%3C/svg%3E");margin:0 .25rem;vertical-align:middle}.sidebar-tree .current-page>.reference{font-weight:700}.sidebar-tree label{align-items:center;cursor:pointer;display:flex;height:var(--sidebar-item-height);justify-content:center;position:absolute;right:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:var(--sidebar-expander-width)}.sidebar-tree .caption,.sidebar-tree :not(.caption)>.caption-text{color:var(--color-sidebar-caption-text);font-size:var(--sidebar-caption-font-size);font-weight:700;margin:var(--sidebar-caption-space-above) 0 0 0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-transform:uppercase}.sidebar-tree li.has-children>.reference{padding-right:var(--sidebar-expander-width)}.sidebar-tree .toctree-l1>.reference,.sidebar-tree .toctree-l1>label .icon{color:var(--color-sidebar-link-text--top-level)}.sidebar-tree label{background:var(--color-sidebar-item-expander-background)}.sidebar-tree label:hover{background:var(--color-sidebar-item-expander-background--hover)}.sidebar-tree .current>.reference{background:var(--color-sidebar-item-background--current)}.sidebar-tree .current>.reference:hover{background:var(--color-sidebar-item-background--hover)}.toctree-checkbox{display:none;position:absolute}.toctree-checkbox~ul{display:none}.toctree-checkbox~label .icon svg{transform:rotate(90deg)}.toctree-checkbox:checked~ul{display:block}.toctree-checkbox:checked~label .icon svg{transform:rotate(-90deg)}.toc-title-container{padding:var(--toc-title-padding);padding-top:var(--toc-spacing-vertical)}.toc-title{color:var(--color-toc-title-text);font-size:var(--toc-title-font-size);padding-left:var(--toc-spacing-horizontal);text-transform:uppercase}.no-toc{display:none}.toc-tree-container{padding-bottom:var(--toc-spacing-vertical)}.toc-tree{border-left:1px solid var(--color-background-border);font-size:var(--toc-font-size);line-height:1.3;padding-left:calc(var(--toc-spacing-horizontal) - var(--toc-item-spacing-horizontal))}.toc-tree>ul>li:first-child{padding-top:0}.toc-tree>ul>li:first-child>ul{padding-left:0}.toc-tree>ul>li:first-child>a{display:none}.toc-tree ul{list-style-type:none;margin-bottom:0;margin-top:0;padding-left:var(--toc-item-spacing-horizontal)}.toc-tree li{padding-top:var(--toc-item-spacing-vertical)}.toc-tree li.scroll-current>.reference{color:var(--color-toc-item-text--active);font-weight:700}.toc-tree .reference{color:var(--color-toc-item-text);overflow-wrap:anywhere;text-decoration:none}.toc-scroll{max-height:100vh;overflow-y:scroll}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here){background:rgba(255,0,0,.25);color:var(--color-problematic)}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here):before{content:"ERROR: Adding a table of contents in Furo-based documentation is unnecessary, and does not work well with existing styling.Add a 'this-will-duplicate-information-and-it-is-still-useful-here' class, if you want an escape hatch."}.text-align\:left>p{text-align:left}.text-align\:center>p{text-align:center}.text-align\:right>p{text-align:right} +/*# sourceMappingURL=furo.css.map*/ \ No newline at end of file diff --git a/api/_static/styles/furo.css.map b/api/_static/styles/furo.css.map new file mode 100644 index 000000000..1924b3334 --- /dev/null +++ b/api/_static/styles/furo.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo.css","mappings":"AAAA,2EAA2E,CAU3E,KAEE,6BAA8B,CAD9B,gBAEF,CASA,KACE,QACF,CAMA,KACE,aACF,CAOA,GACE,aAAc,CACd,cACF,CAUA,GACE,sBAAuB,CACvB,QAAS,CACT,gBACF,CAOA,IACE,+BAAiC,CACjC,aACF,CASA,EACE,4BACF,CAOA,YACE,kBAAmB,CACnB,yBAA0B,CAC1B,gCACF,CAMA,SAEE,kBACF,CAOA,cAGE,+BAAiC,CACjC,aACF,CAeA,QAEE,aAAc,CACd,aAAc,CACd,iBAAkB,CAClB,uBACF,CAEA,IACE,aACF,CAEA,IACE,SACF,CASA,IACE,iBACF,CAUA,sCAKE,mBAAoB,CACpB,cAAe,CACf,gBAAiB,CACjB,QACF,CAOA,aAEE,gBACF,CAOA,cAEE,mBACF,CAMA,gDAIE,yBACF,CAMA,wHAIE,iBAAkB,CAClB,SACF,CAMA,4GAIE,6BACF,CAMA,SACE,0BACF,CASA,OACE,qBAAsB,CACtB,aAAc,CACd,aAAc,CACd,cAAe,CACf,SAAU,CACV,kBACF,CAMA,SACE,uBACF,CAMA,SACE,aACF,CAOA,6BAEE,qBAAsB,CACtB,SACF,CAMA,kFAEE,WACF,CAOA,cACE,4BAA6B,CAC7B,mBACF,CAMA,yCACE,uBACF,CAOA,6BACE,yBAA0B,CAC1B,YACF,CASA,QACE,aACF,CAMA,QACE,iBACF,CAiBA,kBACE,YACF,CCvVA,aAcE,kEACE,uBAOF,WACE,iDAMF,gCACE,wBAEF,qCAEE,uBADA,uBACA,CAEF,SACE,wBAtBA,CCpBJ,iBAOE,6BAEA,mBANA,qBAEA,sBACA,0BAFA,oBAHA,4BAOA,6BANA,mBAOA,CAEF,gBACE,aCPF,KCGE,mHAEA,wGAGA,wBAAyB,CACzB,wBAAyB,CACzB,4BAA6B,CAC7B,yBAA0B,CAC1B,2BAA4B,CAG5B,sDAAuD,CACvD,gDAAiD,CACjD,wDAAyD,CAGzD,0CAA2C,CAC3C,gDAAiD,CACjD,gDAAiD,CAKjD,gCAAiC,CACjC,sCAAuC,CAGvC,2CAA4C,CAG5C,uCAAwC,CChCxC,+FAGA,uBAAwB,CAGxB,iCAAkC,CAClC,kCAAmC,CAEnC,+BAAgC,CAChC,sCAAuC,CACvC,sCAAuC,CACvC,qGAIA,mDAAoD,CAEpD,mCAAoC,CACpC,8CAA+C,CAC/C,gDAAiD,CACjD,kCAAmC,CACnC,6DAA8D,CAG9D,6BAA8B,CAC9B,6BAA8B,CAC9B,+BAAgC,CAChC,kCAAmC,CACnC,kCAAmC,CCPjC,ukBCYA,srCAZF,kaCVA,mLAOA,oTAWA,2UAaA,0CACA,gEACA,0CAGA,gEAUA,yCACA,+DAGA,4CACA,CACA,iEAGA,sGACA,uCACA,4DAGA,sCACA,2DAEA,4CACA,kEACA,oGACA,CAEA,0GACA,+CAGA,+MAOA,+EACA,wCAIA,4DACA,sEACA,kEACA,sEACA,gDAGA,+DACA,0CACA,gEACA,gGACA,CAGA,2DACA,qDAGA,0CACA,8CACA,oDACA,oDL7GF,iCAEA,iEAME,oCKyGA,yDAIA,sCACA,kCACA,sDAGA,0CACA,kEACA,oDAEA,sDAGA,oCACA,oEAIA,CAGA,yDAGA,qDACA,oDAGA,6DAIA,iEAGA,2DAEA,2DL9IE,4DAEA,gEAIF,gEKgGA,gFAIA,oNAOA,qDAEA,gFAIA,4DAIA,oEAMA,yEAIA,6DACA,0DAGA,uDAGA,qDAEA,wDLpII,6DAEA,yDACE,2DAMN,uCAIA,yCACE,8CAGF,sDMjDA,6DAKA,oCAIA,4CACA,kBAGF,sBAMA,2BAME,qCAGA,qCAEA,iCAEA,+BAEA,mCAEA,qCAIA,CACA,gCACA,gDAKA,kCAIA,6BAEA,0CAQA,kCAIF,8BAGE,8BACA,uCAGF,sCAKE,kCAEA,sDAGA,iCACE,CACA,2FAGA,gCACE,CACA,+DCzEJ,wCAEA,sBAEF,yDAEE,mCACA,wDAGA,2GAGA,wIACE,gDAMJ,kCAGE,6BACA,0CAGA,gEACA,8BACA,uCAKA,sCAIA,kCACA,sDACA,iCACA,sCAOA,sDAKE,gGAIE,+CAGN,sBAEE,yCAMA,0BAOA,yLAKA,aACA,MAEF,6BACE,mBAEA,wCAEF,wCAIE,kCAGA,SACA,kCAKA,mBAGA,CAJA,eACA,CAHF,gBAEE,CAWA,mBACA,mBACA,mDAIA,YACA,mBACA,CAEE,kBAMF,OAPE,kBAOF,oCACA,yCAEA,wBAEA,cADA,WACA,GACA,oBACA,CAFA,gBAEA,aAGF,+CAEE,UAJE,wBAEJ,CAFI,SAIF,CACA,2BACA,GAGA,uBACE,CAJF,yBAGA,CACE,iDACA,uCAEA,yDACE,cACA,wDAKN,yDAIE,uBAEF,kBACE,uBAEA,kDAKA,0DAEA,CAHA,oBAIA,0GAWA,aAEA,CAHA,YAGA,4HAKF,+CAGE,sBAEF,WAKE,0CAGA,CANA,qCAGA,CAJA,WAOA,SAIA,0CACE,CALF,qCAIA,CACE,wBAEA,mBAEJ,gBACE,gBAIA,+CAKF,CAIE,kDAEA,CANF,8BAIE,CAEA,YAGA,CAfF,2BACE,CAHA,UAEF,CAYE,UAGA,2CACF,iEAOE,iCACA,8BAGA,wCAIA,wBAMI,0CAKF,CATA,6DAGA,CALF,qBAEE,CASA,YACA,yBAGA,CAEE,cAKN,CAPI,sBAOJ,gCAGE,qBAEA,WACA,aACA,sCAEA,mBACA,6BAGA,uEADA,qBACA,6BAIA,yBACA,qCAEE,UAEA,YACA,sBAEF,8BAGA,CAPE,aACA,WAMF,4BACE,sBACA,WAMJ,uBACE,cAYE,mBAXA,qDAKA,qCAGA,CAEA,YACA,CAHA,2BAEA,CACA,oCAEA,4CACA,uBAIA,sBAEJ,eAFI,cAIF,iBACE,CAHJ,kBAGI,yBAEA,oCAIA,qDAMF,mEAGE,+CAKA,gCAEA,qCAGA,oCAGE,sBACA,CAJF,WAEE,CAFF,eAEE,SAEA,mBACA,qCACE,aACA,CAFF,YADA,qBACA,WAEE,sBACA,kEAEN,cAEE,CAFF,YAEE,iDAKA,uCAIA,2DAKA,kBAEA,CAHA,sBAGA,mBACA,0BAEJ,yBAII,aADA,WACA,CAMF,UAFE,kBAEF,CAJF,gBAEI,CAFJ,iBAIE,6CC9ZF,yBACE,WACA,iBAEA,aAFA,iBAEA,6BAEA,kCACA,mBAKA,gCAGA,CARA,QAEA,CAGA,UALA,qBAEA,qDAGA,CALA,OAQA,4BACE,cAGF,2BACE,gCAEJ,CAHE,UAGF,8CAGE,CAHF,UAGE,wCAGA,qBACA,CAFA,UAEA,6CAGA,yCAIA,sBAHA,UAGA,kCACE,OACA,CADA,KACA,cAQF,0CACE,CAFF,kBACA,CACE,wEACA,CARA,YACA,CAKF,mBAFF,MACE,CAIE,gBAJF,iCAJE,cAGJ,CANI,oBAEA,CAKF,SAIE,2BADA,UACA,kBAGF,sCACA,CAFF,WACE,WACA,mBACE,kDACA,0EACA,uDAKJ,aACE,mDAII,CAJJ,6CAII,4BACA,sCACE,kEACA,+CACE,aACA,WADA,+BACA,uEANN,YACE,mDAEE,mBADF,0CACE,CADF,qBACE,0DACA,YACE,4DACA,sEANN,YACE,8CACA,kBADA,UACA,2CACE,2EACA,cACE,kEACA,mEANN,yBACE,4DACA,sBACE,+EAEE,iEACA,qEANN,sCACE,CAGE,iBAHF,gBAGE,qBACE,CAJJ,uBACA,gDACE,wDACA,6DAHF,2CACA,CADA,gBACA,eACE,CAGE,sBANN,8BACE,CAII,iBAFF,4DACA,WACE,YADF,uCACE,6EACA,2BANN,8CACE,kDACA,0CACE,8BACA,yFACE,sBACA,sFALJ,mEACA,sBACE,kEACA,6EACE,uCACA,kEALJ,qGAEE,kEACA,6EACE,uCACA,kEALJ,8CACA,uDACE,sEACA,2EACE,sCACA,iEALJ,mGACA,qCACE,oDACA,0DACE,6GACA,gDAGR,yDCrEA,sEACE,CACA,6GACE,gEACF,iGAIF,wFACE,qDAGA,mGAEE,2CAEF,4FACE,gCACF,wGACE,8DAEE,6FAIA,iJAKN,6GACE,gDAKF,yDACA,qCAGA,6BACA,kBACA,qDAKA,oCAEA,+DAGA,2CAGE,oDAIA,oEAEE,qBAGJ,wDAEE,uCAEF,kEAGA,8CAEA,uDAKA,oCAEA,yDAEE,gEAKF,+CC5FA,0EAGE,CACA,qDCLJ,+DAIE,sCAIA,kEACE,yBACA,2FAMA,gBACA,yGCbF,mBAOA,2MAIA,4HAYA,0DACE,8GAYF,8HAQE,mBAEA,6HAOF,YAGA,mIAME,eACA,CAFF,YAEE,4FAMJ,8BAEE,uBAYA,sCAEE,CAJF,oBAEA,CARA,wCAEA,CAHA,8BACA,CAFA,eACA,CAGA,wCAEA,CAEA,mDAIE,kCACE,6BACA,4CAKJ,kDAIA,eACE,aAGF,8BACE,uDACA,sCACA,cAEA,+BACA,CAFA,eAEA,wCAEF,YACE,iBACA,mCACA,0DAGF,qBAEE,CAFF,kBAEE,+BAIA,yCAEE,qBADA,gBACA,yBAKF,eACA,CAFF,YACE,CACA,iBACA,qDAEA,mDCvIJ,2FAOE,iCACA,CAEA,eACA,CAHA,kBAEA,CAFA,wBAGA,8BACA,eACE,CAFF,YAEE,0BACA,8CAGA,oBACE,oCAGA,kBACE,8DAEA,iBAEN,UACE,8BAIJ,+CAEE,qDAEF,kDAIE,YAEF,CAFE,YAEF,CCjCE,mFAJA,QACA,UAIE,CADF,iBACE,mCAGA,iDACE,+BAGF,wBAEA,mBAKA,6CAEF,CAHE,mBACA,CAEF,kCAIE,CARA,kBACA,CAFF,eASE,YACA,mBAGF,CAJE,UAIF,wCCjCA,oBDmCE,wBCpCJ,uCACE,8BACA,4CACA,oBAGA,2CCAA,6CAGE,CAPF,uBAIA,CDGA,gDACE,6BCVJ,CAWM,2CAEF,CAJA,kCAEE,CDJF,aCLF,gBDKE,uBCMA,gCAGA,gDAGE,wBAGJ,0BAEA,iBACE,aACF,CADE,UACF,uBACE,aACF,oBACE,YACF,4BACE,6CAMA,CAYF,6DAZE,mCAGE,iCASJ,4BAGE,4DADA,+BACA,CAFA,qBAEA,yBACE,aAEF,wBAHA,SAGA,iHACE,2DAKF,CANA,yCACE,CADF,oCAMA,uSAIA,sGACE,oDChEJ,WAEF,yBACE,QACA,eAEA,gBAEE,uCAGA,CALF,iCAKE,uCAGA,0BACA,CACA,oBACA,iCClBJ,gBACE,KAGF,qBACE,YAGF,CAHE,cAGF,gCAEE,mBACA,iEAEA,oCACA,wCAEA,sBACA,WAEA,CAFA,YAEA,8EAEA,mCAFA,iBAEA,6BAIA,wEAKA,sDAIE,CARF,mDAIA,CAIE,cAEF,8CAIA,oBAFE,iBAEF,8CAGE,eAEF,CAFE,YAEF,OAEE,kBAGJ,CAJI,eACA,CAFF,mBAKF,yCCjDE,oBACA,CAFA,iBAEA,uCAKE,iBACA,qCAGA,mBCZJ,CDWI,gBCXJ,6BAEE,eACA,sBAGA,eAEA,sBACA,oDACA,iGAMA,gBAFE,YAEF,8FAME,iJClBF,YACA,gNAUE,6BAEF,oTAcI,kBACF,gHAIA,qBACE,eACF,qDACE,kBACF,6DACE,4BCxCJ,oBAEF,qCAEI,+CAGF,uBACE,uDAGJ,oBAkBE,mDAhBA,+CAaA,CAbA,oBAaA,0FAEE,CAFF,gGAbA,+BAaA,0BAGA,mQAIA,oNAEE,iBAGJ,CAHI,gBADA,gBAIJ,8CAYI,CAZJ,wCAYI,sVACE,iCAGA,uEAHA,QAGA,qXAKJ,iDAGF,CARM,+CACE,iDAIN,CALI,gBAQN,mHACE,gBAGF,2DACE,0EAOA,0EAKA,6EC/EA,iDACA,gCACA,oDAGA,qBACA,oDCFA,cACA,eAEA,yBAGF,sBAEE,iBACA,sNAWA,iBACE,kBACA,wRAgBA,kBAEA,iOAgBA,uCACE,uEAEA,kBAEF,qUAuBE,iDAIJ,CACA,geCxFF,4BAEE,CAQA,6JACA,iDAIA,sEAGA,mDAOF,iDAGE,4DAIA,8CACA,qDAEE,eAFF,cAEE,oBAEF,uBAFE,kCAGA,eACA,iBACA,mBAIA,mDACA,CAHA,uCAEA,CAJA,0CACA,CAIA,gBAJA,gBACA,oBADA,gBAIA,wBAEJ,gBAGE,6BACA,YAHA,iBAGA,gCACA,iEAEA,6CACA,sDACA,0BADA,wBACA,0BACA,oIAIA,mBAFA,YAEA,qBACA,0CAIE,uBAEF,CAHA,yBACE,CAEF,iDACE,mFAKJ,oCACE,CANE,aAKJ,CACE,qEAIA,YAFA,WAEA,CAHA,aACA,CAEA,gBACE,4BACA,sBADA,aACA,gCAMF,oCACA,yDACA,2CAEA,qBAGE,kBAEA,CACA,mCAIF,CARE,YACA,CAOF,iCAEE,CAPA,oBACA,CAQA,oBACE,uDAEJ,sDAGA,CAHA,cAGA,0BACE,oDAIA,oCACA,4BACA,sBAGA,cAEA,oFAGA,sBAEA,yDACE,CAIA,iBAJA,wBAIA,6CAJA,6CAOA,4BAGJ,CAHI,cAGJ,yCAGA,kBACE,CAIA,iDAEA,CATA,YAEF,CACE,4CAGA,kBAIA,wEAEA,wDAIF,kCAOE,iDACA,CARF,WAIE,sCAGA,CANA,2CACA,CAMA,oEARF,iBACE,CACA,qCAMA,iBAuBE,uBAlBF,YAKA,2DALA,uDAKA,CALA,sBAiBA,4CACE,CALA,gRAIF,YACE,UAEN,uBACE,YACA,mCAOE,+CAGA,8BAGF,+CAGA,4BCjNA,SDiNA,qFCjNA,gDAGA,sCACA,qCACA,sDAIF,CAIE,kDAGA,CAPF,0CAOE,kBAEA,kDAEA,CAHA,eACA,CAFA,YACA,CADA,SAIA,mHAIE,CAGA,6CAFA,oCAeE,CAbF,yBACE,qBAEJ,CAGE,oBACA,CAEA,YAFA,2CACF,CACE,uBAEA,mFAEE,CALJ,oBACE,CAEA,UAEE,gCAGF,sDAEA,yCC7CJ,oCAGA,CD6CE,yXAQE,sCCrDJ,wCAGA,oCACE","sources":["webpack:///./node_modules/normalize.css/normalize.css","webpack:///./src/furo/assets/styles/base/_print.sass","webpack:///./src/furo/assets/styles/base/_screen-readers.sass","webpack:///./src/furo/assets/styles/base/_theme.sass","webpack:///./src/furo/assets/styles/variables/_fonts.scss","webpack:///./src/furo/assets/styles/variables/_spacing.scss","webpack:///./src/furo/assets/styles/variables/_icons.scss","webpack:///./src/furo/assets/styles/variables/_admonitions.scss","webpack:///./src/furo/assets/styles/variables/_colors.scss","webpack:///./src/furo/assets/styles/base/_typography.sass","webpack:///./src/furo/assets/styles/_scaffold.sass","webpack:///./src/furo/assets/styles/content/_admonitions.sass","webpack:///./src/furo/assets/styles/content/_api.sass","webpack:///./src/furo/assets/styles/content/_blocks.sass","webpack:///./src/furo/assets/styles/content/_captions.sass","webpack:///./src/furo/assets/styles/content/_code.sass","webpack:///./src/furo/assets/styles/content/_footnotes.sass","webpack:///./src/furo/assets/styles/content/_images.sass","webpack:///./src/furo/assets/styles/content/_indexes.sass","webpack:///./src/furo/assets/styles/content/_lists.sass","webpack:///./src/furo/assets/styles/content/_math.sass","webpack:///./src/furo/assets/styles/content/_misc.sass","webpack:///./src/furo/assets/styles/content/_rubrics.sass","webpack:///./src/furo/assets/styles/content/_sidebar.sass","webpack:///./src/furo/assets/styles/content/_tables.sass","webpack:///./src/furo/assets/styles/content/_target.sass","webpack:///./src/furo/assets/styles/content/_gui-labels.sass","webpack:///./src/furo/assets/styles/components/_footer.sass","webpack:///./src/furo/assets/styles/components/_sidebar.sass","webpack:///./src/furo/assets/styles/components/_table_of_contents.sass","webpack:///./src/furo/assets/styles/_shame.sass"],"sourcesContent":["/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */\n\n/* Document\n ========================================================================== */\n\n/**\n * 1. Correct the line height in all browsers.\n * 2. Prevent adjustments of font size after orientation changes in iOS.\n */\n\nhtml {\n line-height: 1.15; /* 1 */\n -webkit-text-size-adjust: 100%; /* 2 */\n}\n\n/* Sections\n ========================================================================== */\n\n/**\n * Remove the margin in all browsers.\n */\n\nbody {\n margin: 0;\n}\n\n/**\n * Render the `main` element consistently in IE.\n */\n\nmain {\n display: block;\n}\n\n/**\n * Correct the font size and margin on `h1` elements within `section` and\n * `article` contexts in Chrome, Firefox, and Safari.\n */\n\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\n\n/* Grouping content\n ========================================================================== */\n\n/**\n * 1. Add the correct box sizing in Firefox.\n * 2. Show the overflow in Edge and IE.\n */\n\nhr {\n box-sizing: content-box; /* 1 */\n height: 0; /* 1 */\n overflow: visible; /* 2 */\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\npre {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/* Text-level semantics\n ========================================================================== */\n\n/**\n * Remove the gray background on active links in IE 10.\n */\n\na {\n background-color: transparent;\n}\n\n/**\n * 1. Remove the bottom border in Chrome 57-\n * 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.\n */\n\nabbr[title] {\n border-bottom: none; /* 1 */\n text-decoration: underline; /* 2 */\n text-decoration: underline dotted; /* 2 */\n}\n\n/**\n * Add the correct font weight in Chrome, Edge, and Safari.\n */\n\nb,\nstrong {\n font-weight: bolder;\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\ncode,\nkbd,\nsamp {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/**\n * Add the correct font size in all browsers.\n */\n\nsmall {\n font-size: 80%;\n}\n\n/**\n * Prevent `sub` and `sup` elements from affecting the line height in\n * all browsers.\n */\n\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\n\nsub {\n bottom: -0.25em;\n}\n\nsup {\n top: -0.5em;\n}\n\n/* Embedded content\n ========================================================================== */\n\n/**\n * Remove the border on images inside links in IE 10.\n */\n\nimg {\n border-style: none;\n}\n\n/* Forms\n ========================================================================== */\n\n/**\n * 1. Change the font styles in all browsers.\n * 2. Remove the margin in Firefox and Safari.\n */\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n font-family: inherit; /* 1 */\n font-size: 100%; /* 1 */\n line-height: 1.15; /* 1 */\n margin: 0; /* 2 */\n}\n\n/**\n * Show the overflow in IE.\n * 1. Show the overflow in Edge.\n */\n\nbutton,\ninput { /* 1 */\n overflow: visible;\n}\n\n/**\n * Remove the inheritance of text transform in Edge, Firefox, and IE.\n * 1. Remove the inheritance of text transform in Firefox.\n */\n\nbutton,\nselect { /* 1 */\n text-transform: none;\n}\n\n/**\n * Correct the inability to style clickable types in iOS and Safari.\n */\n\nbutton,\n[type=\"button\"],\n[type=\"reset\"],\n[type=\"submit\"] {\n -webkit-appearance: button;\n}\n\n/**\n * Remove the inner border and padding in Firefox.\n */\n\nbutton::-moz-focus-inner,\n[type=\"button\"]::-moz-focus-inner,\n[type=\"reset\"]::-moz-focus-inner,\n[type=\"submit\"]::-moz-focus-inner {\n border-style: none;\n padding: 0;\n}\n\n/**\n * Restore the focus styles unset by the previous rule.\n */\n\nbutton:-moz-focusring,\n[type=\"button\"]:-moz-focusring,\n[type=\"reset\"]:-moz-focusring,\n[type=\"submit\"]:-moz-focusring {\n outline: 1px dotted ButtonText;\n}\n\n/**\n * Correct the padding in Firefox.\n */\n\nfieldset {\n padding: 0.35em 0.75em 0.625em;\n}\n\n/**\n * 1. Correct the text wrapping in Edge and IE.\n * 2. Correct the color inheritance from `fieldset` elements in IE.\n * 3. Remove the padding so developers are not caught out when they zero out\n * `fieldset` elements in all browsers.\n */\n\nlegend {\n box-sizing: border-box; /* 1 */\n color: inherit; /* 2 */\n display: table; /* 1 */\n max-width: 100%; /* 1 */\n padding: 0; /* 3 */\n white-space: normal; /* 1 */\n}\n\n/**\n * Add the correct vertical alignment in Chrome, Firefox, and Opera.\n */\n\nprogress {\n vertical-align: baseline;\n}\n\n/**\n * Remove the default vertical scrollbar in IE 10+.\n */\n\ntextarea {\n overflow: auto;\n}\n\n/**\n * 1. Add the correct box sizing in IE 10.\n * 2. Remove the padding in IE 10.\n */\n\n[type=\"checkbox\"],\n[type=\"radio\"] {\n box-sizing: border-box; /* 1 */\n padding: 0; /* 2 */\n}\n\n/**\n * Correct the cursor style of increment and decrement buttons in Chrome.\n */\n\n[type=\"number\"]::-webkit-inner-spin-button,\n[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\n\n/**\n * 1. Correct the odd appearance in Chrome and Safari.\n * 2. Correct the outline style in Safari.\n */\n\n[type=\"search\"] {\n -webkit-appearance: textfield; /* 1 */\n outline-offset: -2px; /* 2 */\n}\n\n/**\n * Remove the inner padding in Chrome and Safari on macOS.\n */\n\n[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n\n/**\n * 1. Correct the inability to style clickable types in iOS and Safari.\n * 2. Change font properties to `inherit` in Safari.\n */\n\n::-webkit-file-upload-button {\n -webkit-appearance: button; /* 1 */\n font: inherit; /* 2 */\n}\n\n/* Interactive\n ========================================================================== */\n\n/*\n * Add the correct display in Edge, IE 10+, and Firefox.\n */\n\ndetails {\n display: block;\n}\n\n/*\n * Add the correct display in all browsers.\n */\n\nsummary {\n display: list-item;\n}\n\n/* Misc\n ========================================================================== */\n\n/**\n * Add the correct display in IE 10+.\n */\n\ntemplate {\n display: none;\n}\n\n/**\n * Add the correct display in IE 10.\n */\n\n[hidden] {\n display: none;\n}\n","// This file contains styles for managing print media.\n\n////////////////////////////////////////////////////////////////////////////////\n// Hide elements not relevant to print media.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Hide icon container.\n .content-icon-container\n display: none !important\n\n // Hide showing header links if hovering over when printing.\n .headerlink\n display: none !important\n\n // Hide mobile header.\n .mobile-header\n display: none !important\n\n // Hide navigation links.\n .related-pages\n display: none !important\n\n////////////////////////////////////////////////////////////////////////////////\n// Tweaks related to decolorization.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Apply a border around code which no longer have a color background.\n .highlight\n border: 0.1pt solid var(--color-foreground-border)\n\n////////////////////////////////////////////////////////////////////////////////\n// Avoid page break in some relevant cases.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n ul, ol, dl, a, table, pre, blockquote\n page-break-inside: avoid\n\n h1, h2, h3, h4, h5, h6, img, figure, caption\n page-break-inside: avoid\n page-break-after: avoid\n\n ul, ol, dl\n page-break-before: avoid\n",".visually-hidden\n position: absolute !important\n width: 1px !important\n height: 1px !important\n padding: 0 !important\n margin: -1px !important\n overflow: hidden !important\n clip: rect(0,0,0,0) !important\n white-space: nowrap !important\n border: 0 !important\n\n:-moz-focusring\n outline: auto\n","// This file serves as the \"skeleton\" of the theming logic.\n//\n// This contains the bulk of the logic for handling dark mode, color scheme\n// toggling and the handling of color-scheme-specific hiding of elements.\n\nbody\n @include fonts\n @include spacing\n @include icons\n @include admonitions\n @include default-admonition(#651fff, \"abstract\")\n @include default-topic(#14B8A6, \"pencil\")\n\n @include colors\n\n.only-light\n display: block !important\nhtml body .only-dark\n display: none !important\n\n// Ignore dark-mode hints if print media.\n@media not print\n // Enable dark-mode, if requested.\n body[data-theme=\"dark\"]\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n // Enable dark mode, unless explicitly told to avoid.\n @media (prefers-color-scheme: dark)\n body:not([data-theme=\"light\"])\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n//\n// Theme toggle presentation\n//\nbody[data-theme=\"auto\"]\n .theme-toggle svg.theme-icon-when-auto\n display: block\n\nbody[data-theme=\"dark\"]\n .theme-toggle svg.theme-icon-when-dark\n display: block\n\nbody[data-theme=\"light\"]\n .theme-toggle svg.theme-icon-when-light\n display: block\n","// Fonts used by this theme.\n//\n// There are basically two things here -- using the system font stack and\n// defining sizes for various elements in %ages. We could have also used `em`\n// but %age is easier to reason about for me.\n\n@mixin fonts {\n // These are adapted from https://systemfontstack.com/\n --font-stack: -apple-system, BlinkMacSystemFont, Segoe UI, Helvetica, Arial,\n sans-serif, Apple Color Emoji, Segoe UI Emoji;\n --font-stack--monospace: \"SFMono-Regular\", Menlo, Consolas, Monaco,\n Liberation Mono, Lucida Console, monospace;\n\n --font-size--normal: 100%;\n --font-size--small: 87.5%;\n --font-size--small--2: 81.25%;\n --font-size--small--3: 75%;\n --font-size--small--4: 62.5%;\n\n // Sidebar\n --sidebar-caption-font-size: var(--font-size--small--2);\n --sidebar-item-font-size: var(--font-size--small);\n --sidebar-search-input-font-size: var(--font-size--small);\n\n // Table of Contents\n --toc-font-size: var(--font-size--small--3);\n --toc-font-size--mobile: var(--font-size--normal);\n --toc-title-font-size: var(--font-size--small--4);\n\n // Admonitions\n //\n // These aren't defined in terms of %ages, since nesting these is permitted.\n --admonition-font-size: 0.8125rem;\n --admonition-title-font-size: 0.8125rem;\n\n // Code\n --code-font-size: var(--font-size--small--2);\n\n // API\n --api-font-size: var(--font-size--small);\n}\n","// Spacing for various elements on the page\n//\n// If the user wants to tweak things in a certain way, they are permitted to.\n// They also have to deal with the consequences though!\n\n@mixin spacing {\n // Header!\n --header-height: calc(\n var(--sidebar-item-line-height) + 4 * #{var(--sidebar-item-spacing-vertical)}\n );\n --header-padding: 0.5rem;\n\n // Sidebar\n --sidebar-tree-space-above: 1.5rem;\n --sidebar-caption-space-above: 1rem;\n\n --sidebar-item-line-height: 1rem;\n --sidebar-item-spacing-vertical: 0.5rem;\n --sidebar-item-spacing-horizontal: 1rem;\n --sidebar-item-height: calc(\n var(--sidebar-item-line-height) + 2 *#{var(--sidebar-item-spacing-vertical)}\n );\n\n --sidebar-expander-width: var(--sidebar-item-height); // be square\n\n --sidebar-search-space-above: 0.5rem;\n --sidebar-search-input-spacing-vertical: 0.5rem;\n --sidebar-search-input-spacing-horizontal: 0.5rem;\n --sidebar-search-input-height: 1rem;\n --sidebar-search-icon-size: var(--sidebar-search-input-height);\n\n // Table of Contents\n --toc-title-padding: 0.25rem 0;\n --toc-spacing-vertical: 1.5rem;\n --toc-spacing-horizontal: 1.5rem;\n --toc-item-spacing-vertical: 0.4rem;\n --toc-item-spacing-horizontal: 1rem;\n}\n","// Expose theme icons as CSS variables.\n\n$icons: (\n // Adapted from tabler-icons\n // url: https://tablericons.com/\n \"search\":\n url('data:image/svg+xml;charset=utf-8,'),\n // Factored out from mkdocs-material on 24-Aug-2020.\n // url: https://squidfunk.github.io/mkdocs-material/reference/admonitions/\n \"pencil\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"abstract\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"info\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"flame\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"question\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"warning\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"failure\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"spark\":\n url('data:image/svg+xml;charset=utf-8,')\n);\n\n@mixin icons {\n @each $name, $glyph in $icons {\n --icon-#{$name}: #{$glyph};\n }\n}\n","// Admonitions\n\n// Structure of these is:\n// admonition-class: color \"icon-name\";\n//\n// The colors are translated into CSS variables below. The icons are\n// used directly in the main declarations to set the `mask-image` in\n// the title.\n\n// prettier-ignore\n$admonitions: (\n // Each of these has an reST directives for it.\n \"caution\": #ff9100 \"spark\",\n \"warning\": #ff9100 \"warning\",\n \"danger\": #ff5252 \"spark\",\n \"attention\": #ff5252 \"warning\",\n \"error\": #ff5252 \"failure\",\n \"hint\": #00c852 \"question\",\n \"tip\": #00c852 \"info\",\n \"important\": #00bfa5 \"flame\",\n \"note\": #00b0ff \"pencil\",\n \"seealso\": #448aff \"info\",\n \"admonition-todo\": #808080 \"pencil\"\n);\n\n@mixin default-admonition($color, $icon-name) {\n --color-admonition-title: #{$color};\n --color-admonition-title-background: #{rgba($color, 0.2)};\n\n --icon-admonition-default: var(--icon-#{$icon-name});\n}\n\n@mixin default-topic($color, $icon-name) {\n --color-topic-title: #{$color};\n --color-topic-title-background: #{rgba($color, 0.2)};\n\n --icon-topic-default: var(--icon-#{$icon-name});\n}\n\n@mixin admonitions {\n @each $name, $values in $admonitions {\n --color-admonition-title--#{$name}: #{nth($values, 1)};\n --color-admonition-title-background--#{$name}: #{rgba(\n nth($values, 1),\n 0.2\n )};\n }\n}\n","// Colors used throughout this theme.\n//\n// The aim is to give the user more control. Thus, instead of hard-coding colors\n// in various parts of the stylesheet, the approach taken is to define all\n// colors as CSS variables and reusing them in all the places.\n//\n// `colors-dark` depends on `colors` being included at a lower specificity.\n\n@mixin colors {\n --color-problematic: #b30000;\n\n // Base Colors\n --color-foreground-primary: black; // for main text and headings\n --color-foreground-secondary: #5a5c63; // for secondary text\n --color-foreground-muted: #646776; // for muted text\n --color-foreground-border: #878787; // for content borders\n\n --color-background-primary: white; // for content\n --color-background-secondary: #f8f9fb; // for navigation + ToC\n --color-background-hover: #efeff4ff; // for navigation-item hover\n --color-background-hover--transparent: #efeff400;\n --color-background-border: #eeebee; // for UI borders\n --color-background-item: #ccc; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #2962ff;\n --color-brand-content: #2a5adf;\n\n // API documentation\n --color-api-background: var(--color-background-hover--transparent);\n --color-api-background-hover: var(--color-background-hover);\n --color-api-overall: var(--color-foreground-secondary);\n --color-api-name: var(--color-problematic);\n --color-api-pre-name: var(--color-problematic);\n --color-api-paren: var(--color-foreground-secondary);\n --color-api-keyword: var(--color-foreground-primary);\n --color-highlight-on-target: #ffffcc;\n\n // Inline code background\n --color-inline-code-background: var(--color-background-secondary);\n\n // Highlighted text (search)\n --color-highlighted-background: #ddeeff;\n --color-highlighted-text: var(--color-foreground-primary);\n\n // GUI Labels\n --color-guilabel-background: #ddeeff80;\n --color-guilabel-border: #bedaf580;\n --color-guilabel-text: var(--color-foreground-primary);\n\n // Admonitions!\n --color-admonition-background: transparent;\n\n //////////////////////////////////////////////////////////////////////////////\n // Everything below this should be one of:\n // - var(...)\n // - *-gradient(...)\n // - special literal values (eg: transparent, none)\n //////////////////////////////////////////////////////////////////////////////\n\n // Tables\n --color-table-header-background: var(--color-background-secondary);\n --color-table-border: var(--color-background-border);\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: transparent;\n --color-card-marginals-background: var(--color-background-secondary);\n\n // Header\n --color-header-background: var(--color-background-primary);\n --color-header-border: var(--color-background-border);\n --color-header-text: var(--color-foreground-primary);\n\n // Sidebar (left)\n --color-sidebar-background: var(--color-background-secondary);\n --color-sidebar-background-border: var(--color-background-border);\n\n --color-sidebar-brand-text: var(--color-foreground-primary);\n --color-sidebar-caption-text: var(--color-foreground-muted);\n --color-sidebar-link-text: var(--color-foreground-secondary);\n --color-sidebar-link-text--top-level: var(--color-brand-primary);\n\n --color-sidebar-item-background: var(--color-sidebar-background);\n --color-sidebar-item-background--current: var(\n --color-sidebar-item-background\n );\n --color-sidebar-item-background--hover: linear-gradient(\n 90deg,\n var(--color-background-hover--transparent) 0%,\n var(--color-background-hover) var(--sidebar-item-spacing-horizontal),\n var(--color-background-hover) 100%\n );\n\n --color-sidebar-item-expander-background: transparent;\n --color-sidebar-item-expander-background--hover: var(\n --color-background-hover\n );\n\n --color-sidebar-search-text: var(--color-foreground-primary);\n --color-sidebar-search-background: var(--color-background-secondary);\n --color-sidebar-search-background--focus: var(--color-background-primary);\n --color-sidebar-search-border: var(--color-background-border);\n --color-sidebar-search-icon: var(--color-foreground-muted);\n\n // Table of Contents (right)\n --color-toc-background: var(--color-background-primary);\n --color-toc-title-text: var(--color-foreground-muted);\n --color-toc-item-text: var(--color-foreground-secondary);\n --color-toc-item-text--hover: var(--color-foreground-primary);\n --color-toc-item-text--active: var(--color-brand-primary);\n\n // Actual page contents\n --color-content-foreground: var(--color-foreground-primary);\n --color-content-background: transparent;\n\n // Links\n --color-link: var(--color-brand-content);\n --color-link--hover: var(--color-brand-content);\n --color-link-underline: var(--color-background-border);\n --color-link-underline--hover: var(--color-foreground-border);\n}\n\n@mixin colors-dark {\n --color-problematic: #ee5151;\n\n // Base Colors\n --color-foreground-primary: #ffffffcc; // for main text and headings\n --color-foreground-secondary: #9ca0a5; // for secondary text\n --color-foreground-muted: #81868d; // for muted text\n --color-foreground-border: #666666; // for content borders\n\n --color-background-primary: #131416; // for content\n --color-background-secondary: #1a1c1e; // for navigation + ToC\n --color-background-hover: #1e2124ff; // for navigation-item hover\n --color-background-hover--transparent: #1e212400;\n --color-background-border: #303335; // for UI borders\n --color-background-item: #444; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #2b8cee;\n --color-brand-content: #368ce2;\n\n // Highlighted text (search)\n --color-highlighted-background: #083563;\n\n // GUI Labels\n --color-guilabel-background: #08356380;\n --color-guilabel-border: #13395f80;\n\n // API documentation\n --color-api-keyword: var(--color-foreground-secondary);\n --color-highlight-on-target: #333300;\n\n // Admonitions\n --color-admonition-background: #18181a;\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: #18181a;\n --color-card-marginals-background: var(--color-background-hover);\n}\n","// This file contains the styling for making the content throughout the page,\n// including fonts, paragraphs, headings and spacing among these elements.\n\nbody\n font-family: var(--font-stack)\npre,\ncode,\nkbd,\nsamp\n font-family: var(--font-stack--monospace)\n\n// Make fonts look slightly nicer.\nbody\n -webkit-font-smoothing: antialiased\n -moz-osx-font-smoothing: grayscale\n\n// Line height from Bootstrap 4.1\narticle\n line-height: 1.5\n\n//\n// Headings\n//\nh1,\nh2,\nh3,\nh4,\nh5,\nh6\n line-height: 1.25\n font-weight: bold\n\n border-radius: 0.5rem\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n margin-left: -0.5rem\n margin-right: -0.5rem\n padding-left: 0.5rem\n padding-right: 0.5rem\n\n + p\n margin-top: 0\n\nh1\n font-size: 2.5em\n margin-top: 1.75rem\n margin-bottom: 1rem\nh2\n font-size: 2em\n margin-top: 1.75rem\nh3\n font-size: 1.5em\nh4\n font-size: 1.25em\nh5\n font-size: 1.125em\nh6\n font-size: 1em\n\nsmall\n opacity: 75%\n font-size: 80%\n\n// Paragraph\np\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n\n// Horizontal rules\nhr.docutils\n height: 1px\n padding: 0\n margin: 2rem 0\n background-color: var(--color-background-border)\n border: 0\n\n.centered\n text-align: center\n\n// Links\na\n text-decoration: underline\n\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n &:hover\n color: var(--color-link--hover)\n text-decoration-color: var(--color-link-underline--hover)\n &.muted-link\n color: inherit\n &:hover\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline--hover)\n","// This file contains the styles for the overall layouting of the documentation\n// skeleton, including the responsive changes as well as sidebar toggles.\n//\n// This is implemented as a mobile-last design, which isn't ideal, but it is\n// reasonably good-enough and I got pretty tired by the time I'd finished this\n// to move the rules around to fix this. Shouldn't take more than 3-4 hours,\n// if you know what you're doing tho.\n\n// HACK: Not all browsers account for the scrollbar width in media queries.\n// This results in horizontal scrollbars in the breakpoint where we go\n// from displaying everything to hiding the ToC. We accomodate for this by\n// adding a bit of padding to the TOC drawer, disabling the horizontal\n// scrollbar and allowing the scrollbars to cover the padding.\n// https://www.456bereastreet.com/archive/201301/media_query_width_and_vertical_scrollbars/\n\n// HACK: Always having the scrollbar visible, prevents certain browsers from\n// causing the content to stutter horizontally between taller-than-viewport and\n// not-taller-than-viewport pages.\n\nhtml\n overflow-x: hidden\n overflow-y: scroll\n scroll-behavior: smooth\n\n.sidebar-scroll, .toc-scroll, article[role=main] *\n // Override Firefox scrollbar style\n scrollbar-width: thin\n scrollbar-color: var(--color-foreground-border) transparent\n\n // Override Chrome scrollbar styles\n &::-webkit-scrollbar\n width: 0.25rem\n height: 0.25rem\n &::-webkit-scrollbar-thumb\n background-color: var(--color-foreground-border)\n border-radius: 0.125rem\n\n//\n// Overalls\n//\nhtml,\nbody\n height: 100%\n color: var(--color-foreground-primary)\n background: var(--color-background-primary)\n\narticle\n color: var(--color-content-foreground)\n background: var(--color-content-background)\n overflow-wrap: break-word\n\n.page\n display: flex\n // fill the viewport for pages with little content.\n min-height: 100%\n\n.mobile-header\n width: 100%\n height: var(--header-height)\n background-color: var(--color-header-background)\n color: var(--color-header-text)\n border-bottom: 1px solid var(--color-header-border)\n\n // Looks like sub-script/super-script have this, and we need this to\n // be \"on top\" of those.\n z-index: 10\n\n // We don't show the header on large screens.\n display: none\n\n // Add shadow when scrolled\n &.scrolled\n border-bottom: none\n box-shadow: 0 0 0.2rem rgba(0, 0, 0, 0.1), 0 0.2rem 0.4rem rgba(0, 0, 0, 0.2)\n\n .header-center\n a\n color: var(--color-header-text)\n text-decoration: none\n\n.main\n display: flex\n flex: 1\n\n// Sidebar (left) also covers the entire left portion of screen.\n.sidebar-drawer\n box-sizing: border-box\n\n border-right: 1px solid var(--color-sidebar-background-border)\n background: var(--color-sidebar-background)\n\n display: flex\n justify-content: flex-end\n // These next two lines took me two days to figure out.\n width: calc((100% - #{$full-width}) / 2 + #{$sidebar-width})\n min-width: $sidebar-width\n\n// Scroll-along sidebars\n.sidebar-container,\n.toc-drawer\n box-sizing: border-box\n width: $sidebar-width\n\n.toc-drawer\n background: var(--color-toc-background)\n // See HACK described on top of this document\n padding-right: 1rem\n\n.sidebar-sticky,\n.toc-sticky\n position: sticky\n top: 0\n height: min(100%, 100vh)\n height: 100vh\n\n display: flex\n flex-direction: column\n\n.sidebar-scroll,\n.toc-scroll\n flex-grow: 1\n flex-shrink: 1\n\n overflow: auto\n scroll-behavior: smooth\n\n// Central items.\n.content\n padding: 0 $content-padding\n width: $content-width\n\n display: flex\n flex-direction: column\n justify-content: space-between\n\n.icon\n display: inline-block\n height: 1rem\n width: 1rem\n svg\n width: 100%\n height: 100%\n\n//\n// Accommodate announcement banner\n//\n.announcement\n background-color: var(--color-announcement-background)\n color: var(--color-announcement-text)\n\n height: var(--header-height)\n display: flex\n align-items: center\n overflow-x: auto\n & + .page\n min-height: calc(100% - var(--header-height))\n\n.announcement-content\n box-sizing: border-box\n padding: 0.5rem\n min-width: 100%\n white-space: nowrap\n text-align: center\n\n a\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-announcement-text)\n\n &:hover\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-link--hover)\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for theme\n////////////////////////////////////////////////////////////////////////////////\n.no-js .theme-toggle-container // don't show theme toggle if there's no JS\n display: none\n\n.theme-toggle-container\n vertical-align: middle\n\n.theme-toggle\n cursor: pointer\n border: none\n padding: 0\n background: transparent\n\n.theme-toggle svg\n vertical-align: middle\n height: 1rem\n width: 1rem\n color: var(--color-foreground-primary)\n display: none\n\n.theme-toggle-header\n float: left\n padding: 1rem 0.5rem\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for elements\n////////////////////////////////////////////////////////////////////////////////\n.toc-overlay-icon, .nav-overlay-icon\n display: none\n cursor: pointer\n\n .icon\n color: var(--color-foreground-secondary)\n height: 1rem\n width: 1rem\n\n.toc-header-icon, .nav-overlay-icon\n // for when we set display: flex\n justify-content: center\n align-items: center\n\n.toc-content-icon\n height: 1.5rem\n width: 1.5rem\n\n.content-icon-container\n float: right\n display: flex\n margin-top: 1.5rem\n margin-left: 1rem\n margin-bottom: 1rem\n gap: 0.5rem\n\n .edit-this-page svg\n color: inherit\n height: 1rem\n width: 1rem\n\n.sidebar-toggle\n position: absolute\n display: none\n// \n.sidebar-toggle[name=\"__toc\"]\n left: 20px\n.sidebar-toggle:checked\n left: 40px\n// \n\n.overlay\n position: fixed\n top: 0\n width: 0\n height: 0\n\n transition: width 0ms, height 0ms, opacity 250ms ease-out\n\n opacity: 0\n background-color: rgba(0, 0, 0, 0.54)\n.sidebar-overlay\n z-index: 20\n.toc-overlay\n z-index: 40\n\n// Keep things on top and smooth.\n.sidebar-drawer\n z-index: 30\n transition: left 250ms ease-in-out\n.toc-drawer\n z-index: 50\n transition: right 250ms ease-in-out\n\n// Show the Sidebar\n#__navigation:checked\n & ~ .sidebar-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .sidebar-drawer\n top: 0\n left: 0\n // Show the toc sidebar\n#__toc:checked\n & ~ .toc-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .toc-drawer\n top: 0\n right: 0\n\n////////////////////////////////////////////////////////////////////////////////\n// Back to top\n////////////////////////////////////////////////////////////////////////////////\n.back-to-top\n text-decoration: none\n\n display: none\n position: fixed\n left: 0\n top: 1rem\n padding: 0.5rem\n padding-right: 0.75rem\n border-radius: 1rem\n font-size: 0.8125rem\n\n background: var(--color-background-primary)\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), #6b728080 0px 0px 1px 0px\n\n z-index: 10\n\n margin-left: 50%\n transform: translateX(-50%)\n svg\n height: 1rem\n width: 1rem\n fill: currentColor\n display: inline-block\n\n span\n margin-left: 0.25rem\n\n .show-back-to-top &\n display: flex\n align-items: center\n\n////////////////////////////////////////////////////////////////////////////////\n// Responsive layouting\n////////////////////////////////////////////////////////////////////////////////\n// Make things a bit bigger on bigger screens.\n@media (min-width: $full-width + $sidebar-width)\n html\n font-size: 110%\n\n@media (max-width: $full-width)\n // Collapse \"toc\" into the icon.\n .toc-content-icon\n display: flex\n .toc-drawer\n position: fixed\n height: 100vh\n top: 0\n right: -$sidebar-width\n border-left: 1px solid var(--color-background-muted)\n .toc-tree\n border-left: none\n font-size: var(--toc-font-size--mobile)\n\n // Accomodate for a changed content width.\n .sidebar-drawer\n width: calc((100% - #{$full-width - $sidebar-width}) / 2 + #{$sidebar-width})\n\n@media (max-width: $full-width - $sidebar-width)\n // Collapse \"navigation\".\n .nav-overlay-icon\n display: flex\n .sidebar-drawer\n position: fixed\n height: 100vh\n width: $sidebar-width\n\n top: 0\n left: -$sidebar-width\n\n // Swap which icon is visible.\n .toc-header-icon\n display: flex\n .toc-content-icon, .theme-toggle-content\n display: none\n .theme-toggle-header\n display: block\n\n // Show the header.\n .mobile-header\n position: sticky\n top: 0\n display: flex\n justify-content: space-between\n align-items: center\n\n .header-left,\n .header-right\n display: flex\n height: var(--header-height)\n padding: 0 var(--header-padding)\n label\n height: 100%\n width: 100%\n user-select: none\n\n .nav-overlay-icon .icon,\n .theme-toggle svg\n height: 1.25rem\n width: 1.25rem\n\n // Add a scroll margin for the content\n :target\n scroll-margin-top: var(--header-height)\n\n // Show back-to-top below the header\n .back-to-top\n top: calc(var(--header-height) + 0.5rem)\n\n // Center the page, and accommodate for the header.\n .page\n flex-direction: column\n justify-content: center\n .content\n margin-left: auto\n margin-right: auto\n\n@media (max-width: $content-width + 2* $content-padding)\n // Content should respect window limits.\n .content\n width: 100%\n overflow-x: auto\n\n@media (max-width: $content-width)\n .content\n padding: 0 $content-padding--small\n // Don't float sidebars to the right.\n article aside.sidebar\n float: none\n width: 100%\n margin: 1rem 0\n","//\n// The design here is strongly inspired by mkdocs-material.\n.admonition, .topic\n margin: 1rem auto\n padding: 0 0.5rem 0.5rem 0.5rem\n\n background: var(--color-admonition-background)\n\n border-radius: 0.2rem\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n font-size: var(--admonition-font-size)\n\n overflow: hidden\n page-break-inside: avoid\n\n // First element should have no margin, since the title has it.\n > :nth-child(2)\n margin-top: 0\n\n // Last item should have no margin, since we'll control that w/ padding\n > :last-child\n margin-bottom: 0\n\n.admonition p.admonition-title,\np.topic-title\n position: relative\n margin: 0 -0.5rem 0.5rem\n padding-left: 2rem\n padding-right: .5rem\n padding-top: .4rem\n padding-bottom: .4rem\n\n font-weight: 500\n font-size: var(--admonition-title-font-size)\n line-height: 1.3\n\n // Our fancy icon\n &::before\n content: \"\"\n position: absolute\n left: 0.5rem\n width: 1rem\n height: 1rem\n\n// Default styles\np.admonition-title\n background-color: var(--color-admonition-title-background)\n &::before\n background-color: var(--color-admonition-title)\n mask-image: var(--icon-admonition-default)\n mask-repeat: no-repeat\n\np.topic-title\n background-color: var(--color-topic-title-background)\n &::before\n background-color: var(--color-topic-title)\n mask-image: var(--icon-topic-default)\n mask-repeat: no-repeat\n\n//\n// Variants\n//\n.admonition\n border-left: 0.2rem solid var(--color-admonition-title)\n\n @each $type, $value in $admonitions\n &.#{$type}\n border-left-color: var(--color-admonition-title--#{$type})\n > .admonition-title\n background-color: var(--color-admonition-title-background--#{$type})\n &::before\n background-color: var(--color-admonition-title--#{$type})\n mask-image: var(--icon-#{nth($value, 2)})\n\n.admonition-todo > .admonition-title\n text-transform: uppercase\n","// This file stylizes the API documentation (stuff generated by autodoc). It's\n// deeply nested due to how autodoc structures the HTML without enough classes\n// to select the relevant items.\n\n// API docs!\ndl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)\n // Tweak the spacing of all the things!\n dd\n margin-left: 2rem\n > :first-child\n margin-top: 0.125rem\n > :last-child\n margin-bottom: 0.75rem\n\n // This is used for the arguments\n .field-list\n margin-bottom: 0.75rem\n\n // \"Headings\" (like \"Parameters\" and \"Return\")\n > dt\n text-transform: uppercase\n font-size: var(--font-size--small)\n\n dd:empty\n margin-bottom: 0.5rem\n dd > ul\n margin-left: -1.2rem\n > li\n > p:nth-child(2)\n margin-top: 0\n // When the last-empty-paragraph follows a paragraph, it doesn't need\n // to augument the existing spacing.\n > p + p:last-child:empty\n margin-top: 0\n margin-bottom: 0\n\n // Colorize the elements\n > dt\n color: var(--color-api-overall)\n\n.sig:not(.sig-inline)\n font-weight: bold\n\n font-size: var(--api-font-size)\n font-family: var(--font-stack--monospace)\n\n margin-left: -0.25rem\n margin-right: -0.25rem\n padding-top: 0.25rem\n padding-bottom: 0.25rem\n padding-right: 0.5rem\n\n // These are intentionally em, to properly match the font size.\n padding-left: 3em\n text-indent: -2.5em\n\n border-radius: 0.25rem\n\n background: var(--color-api-background)\n transition: background 100ms ease-out\n\n &:hover\n background: var(--color-api-background-hover)\n\n // adjust the size of the [source] link on the right.\n a.reference\n .viewcode-link\n font-weight: normal\n width: 3.5rem\n\nem.property\n font-style: normal\n &:first-child\n color: var(--color-api-keyword)\n.sig-name\n color: var(--color-api-name)\n.sig-prename\n font-weight: normal\n color: var(--color-api-pre-name)\n.sig-paren\n color: var(--color-api-paren)\n.sig-param\n font-style: normal\n\n.versionmodified\n font-style: italic\ndiv.versionadded, div.versionchanged, div.deprecated\n p\n margin-top: 0.125rem\n margin-bottom: 0.125rem\n\n// Align the [docs] and [source] to the right.\n.viewcode-link, .viewcode-back\n float: right\n text-align: right\n",".line-block\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n .line-block\n margin-top: 0rem\n margin-bottom: 0rem\n padding-left: 1rem\n","// Captions\narticle p.caption,\ntable > caption,\n.code-block-caption\n font-size: var(--font-size--small)\n text-align: center\n\n// Caption above a TOCTree\n.toctree-wrapper.compound\n .caption, :not(.caption) > .caption-text\n font-size: var(--font-size--small)\n text-transform: uppercase\n\n text-align: initial\n margin-bottom: 0\n\n > ul\n margin-top: 0\n margin-bottom: 0\n","// Inline code\ncode.literal, .sig-inline\n background: var(--color-inline-code-background)\n border-radius: 0.2em\n // Make the font smaller, and use padding to recover.\n font-size: var(--font-size--small--2)\n padding: 0.1em 0.2em\n\n pre.literal-block &\n font-size: inherit\n padding: 0\n\n p &\n border: 1px solid var(--color-background-border)\n\n.sig-inline\n font-family: var(--font-stack--monospace)\n\n// Code and Literal Blocks\n$code-spacing-vertical: 0.625rem\n$code-spacing-horizontal: 0.875rem\n\n// Wraps every literal block + line numbers.\ndiv[class*=\" highlight-\"],\ndiv[class^=\"highlight-\"]\n margin: 1em 0\n display: flex\n\n .table-wrapper\n margin: 0\n padding: 0\n\npre\n margin: 0\n padding: 0\n overflow: auto\n\n // Needed to have more specificity than pygments' \"pre\" selector. :(\n article[role=\"main\"] .highlight &\n line-height: 1.5\n\n &.literal-block,\n .highlight &\n font-size: var(--code-font-size)\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n // Make it look like all the other blocks.\n &.literal-block\n margin-top: 1rem\n margin-bottom: 1rem\n\n border-radius: 0.2rem\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n\n// All code is always contained in this.\n.highlight\n width: 100%\n border-radius: 0.2rem\n\n // Make line numbers and prompts un-selectable.\n .gp, span.linenos\n user-select: none\n pointer-events: none\n\n // Expand the line-highlighting.\n .hll\n display: block\n margin-left: -$code-spacing-horizontal\n margin-right: -$code-spacing-horizontal\n padding-left: $code-spacing-horizontal\n padding-right: $code-spacing-horizontal\n\n/* Make code block captions be nicely integrated */\n.code-block-caption\n display: flex\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n border-radius: 0.25rem\n border-bottom-left-radius: 0\n border-bottom-right-radius: 0\n font-weight: 300\n border-bottom: 1px solid\n\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n border-color: var(--color-background-border)\n\n + div[class]\n margin-top: 0\n pre\n border-top-left-radius: 0\n border-top-right-radius: 0\n\n// When `html_codeblock_linenos_style` is table.\n.highlighttable\n width: 100%\n display: block\n tbody\n display: block\n\n tr\n display: flex\n\n // Line numbers\n td.linenos\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n padding: $code-spacing-vertical $code-spacing-horizontal\n padding-right: 0\n border-top-left-radius: 0.2rem\n border-bottom-left-radius: 0.2rem\n\n .linenodiv\n padding-right: $code-spacing-horizontal\n font-size: var(--code-font-size)\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n\n // Actual code\n td.code\n padding: 0\n display: block\n flex: 1\n overflow: hidden\n\n .highlight\n border-top-left-radius: 0\n border-bottom-left-radius: 0\n\n// When `html_codeblock_linenos_style` is inline.\n.highlight\n span.linenos\n display: inline-block\n padding-left: 0\n padding-right: $code-spacing-horizontal\n margin-right: $code-spacing-horizontal\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n","// Inline Footnote Reference\n.footnote-reference\n font-size: var(--font-size--small--4)\n vertical-align: super\n\n// Definition list, listing the content of each note.\n// docutils <= 0.17\ndl.footnote.brackets\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\n display: grid\n grid-template-columns: max-content auto\n dt\n margin: 0\n > .fn-backref\n margin-left: 0.25rem\n\n &:after\n content: \":\"\n\n .brackets\n &:before\n content: \"[\"\n &:after\n content: \"]\"\n\n dd\n margin: 0\n padding: 0 1rem\n\n// docutils >= 0.18\naside.footnote\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\naside.footnote > span,\ndiv.citation > span\n float: left\n font-weight: 500\n padding-right: 0.25rem\n\naside.footnote > p,\ndiv.citation > p\n margin-left: 2rem\n","//\n// Figures\n//\nimg\n box-sizing: border-box\n max-width: 100%\n height: auto\n\narticle\n figure, .figure\n border-radius: 0.2rem\n\n margin: 0\n :last-child\n margin-bottom: 0\n\n .align-left\n float: left\n clear: left\n margin: 0 1rem 1rem\n\n .align-right\n float: right\n clear: right\n margin: 0 1rem 1rem\n\n .align-default,\n .align-center\n display: block\n text-align: center\n margin-left: auto\n margin-right: auto\n\n // WELL, table needs to be stylised like a table.\n table.align-default\n display: table\n text-align: initial\n",".genindex-jumpbox, .domainindex-jumpbox\n border-top: 1px solid var(--color-background-border)\n border-bottom: 1px solid var(--color-background-border)\n padding: 0.25rem\n\n.genindex-section, .domainindex-section\n h2\n margin-top: 0.75rem\n margin-bottom: 0.5rem\n ul\n margin-top: 0\n margin-bottom: 0\n","ul,\nol\n padding-left: 1.2rem\n\n // Space lists out like paragraphs\n margin-top: 1rem\n margin-bottom: 1rem\n // reduce margins within li.\n li\n > p:first-child\n margin-top: 0.25rem\n margin-bottom: 0.25rem\n\n > p:last-child\n margin-top: 0.25rem\n\n > ul,\n > ol\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n\nol\n &.arabic\n list-style: decimal\n &.loweralpha\n list-style: lower-alpha\n &.upperalpha\n list-style: upper-alpha\n &.lowerroman\n list-style: lower-roman\n &.upperroman\n list-style: upper-roman\n\n// Don't space lists out when they're \"simple\" or in a `.. toctree::`\n.simple,\n.toctree-wrapper\n li\n > ul,\n > ol\n margin-top: 0\n margin-bottom: 0\n\n// Definition Lists\n.field-list,\n.option-list,\ndl:not([class]),\ndl.simple,\ndl.footnote,\ndl.glossary\n dt\n font-weight: 500\n margin-top: 0.25rem\n + dt\n margin-top: 0\n\n .classifier::before\n content: \":\"\n margin-left: 0.2rem\n margin-right: 0.2rem\n\n dd\n > p:first-child,\n ul\n margin-top: 0.125rem\n\n ul\n margin-bottom: 0.125rem\n",".math-wrapper\n width: 100%\n overflow-x: auto\n\ndiv.math\n position: relative\n text-align: center\n\n .headerlink,\n &:focus .headerlink\n display: none\n\n &:hover .headerlink\n display: inline-block\n\n span.eqno\n position: absolute\n right: 0.5rem\n top: 50%\n transform: translate(0, -50%)\n z-index: 1\n","// Abbreviations\nabbr[title]\n cursor: help\n\n// \"Problematic\" content, as identified by Sphinx\n.problematic\n color: var(--color-problematic)\n\n// Keyboard / Mouse \"instructions\"\nkbd:not(.compound)\n margin: 0 0.2rem\n padding: 0 0.2rem\n border-radius: 0.2rem\n border: 1px solid var(--color-foreground-border)\n color: var(--color-foreground-primary)\n vertical-align: text-bottom\n\n font-size: var(--font-size--small--3)\n display: inline-block\n\n box-shadow: 0 0.0625rem 0 rgba(0, 0, 0, 0.2), inset 0 0 0 0.125rem var(--color-background-primary)\n\n background-color: var(--color-background-secondary)\n\n// Blockquote\nblockquote\n border-left: 4px solid var(--color-background-border)\n background: var(--color-background-secondary)\n\n margin-left: 0\n margin-right: 0\n padding: 0.5rem 1rem\n\n .attribution\n font-weight: 600\n text-align: right\n\n &.pull-quote,\n &.highlights\n font-size: 1.25em\n\n &.epigraph,\n &.pull-quote\n border-left-width: 0\n border-radius: 0.5rem\n\n &.highlights\n border-left-width: 0\n background: transparent\n\n// Center align embedded-in-text images\np .reference img\n vertical-align: middle\n","p.rubric\n line-height: 1.25\n font-weight: bold\n font-size: 1.125em\n\n // For Numpy-style documentation that's got rubrics within it.\n // https://github.com/pradyunsg/furo/discussions/505\n dd &\n line-height: inherit\n font-weight: inherit\n\n font-size: var(--font-size--small)\n text-transform: uppercase\n","article .sidebar\n float: right\n clear: right\n width: 30%\n\n margin-left: 1rem\n margin-right: 0\n\n border-radius: 0.2rem\n background-color: var(--color-background-secondary)\n border: var(--color-background-border) 1px solid\n\n > *\n padding-left: 1rem\n padding-right: 1rem\n\n > ul, > ol // lists need additional padding, because bullets.\n padding-left: 2.2rem\n\n .sidebar-title\n margin: 0\n padding: 0.5rem 1rem\n border-bottom: var(--color-background-border) 1px solid\n\n font-weight: 500\n\n// TODO: subtitle\n// TODO: dedicated variables?\n",".table-wrapper\n width: 100%\n overflow-x: auto\n margin-top: 1rem\n margin-bottom: 0.5rem\n padding: 0.2rem 0.2rem 0.75rem\n\ntable.docutils\n border-radius: 0.2rem\n border-spacing: 0\n border-collapse: collapse\n\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n th\n background: var(--color-table-header-background)\n\n td,\n th\n // Space things out properly\n padding: 0 0.25rem\n\n // Get the borders looking just-right.\n border-left: 1px solid var(--color-table-border)\n border-right: 1px solid var(--color-table-border)\n border-bottom: 1px solid var(--color-table-border)\n\n p\n margin: 0.25rem\n\n &:first-child\n border-left: none\n &:last-child\n border-right: none\n\n // MyST-parser tables set these classes for control of column alignment\n &.text-left\n text-align: left\n &.text-right\n text-align: right\n &.text-center\n text-align: center\n",":target\n scroll-margin-top: 0.5rem\n\n@media (max-width: $full-width - $sidebar-width)\n :target\n scroll-margin-top: calc(0.5rem + var(--header-height))\n\n // When a heading is selected\n section > span:target\n scroll-margin-top: calc(0.8rem + var(--header-height))\n\n// Permalinks\n.headerlink\n font-weight: 100\n user-select: none\n\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\ndl dt,\np.caption,\nfigcaption p,\ntable > caption,\n.code-block-caption\n > .headerlink\n margin-left: 0.5rem\n visibility: hidden\n &:hover > .headerlink\n visibility: visible\n\n // Don't change to link-like, if someone adds the contents directive.\n > .toc-backref\n color: inherit\n text-decoration-line: none\n\n// Figure and table captions are special.\nfigure:hover > figcaption > p > .headerlink,\ntable:hover > caption > .headerlink\n visibility: visible\n\n:target >, // Regular section[id] style anchors\nspan:target ~ // Non-regular span[id] style \"extra\" anchors\n h1,\n h2,\n h3,\n h4,\n h5,\n h6\n &:nth-of-type(1)\n background-color: var(--color-highlight-on-target)\n // .headerlink\n // visibility: visible\n code.literal\n background-color: transparent\n\ntable:target > caption,\nfigure:target\n background-color: var(--color-highlight-on-target)\n\n// Inline page contents\n.this-will-duplicate-information-and-it-is-still-useful-here li :target\n background-color: var(--color-highlight-on-target)\n\n// Code block permalinks\n.literal-block-wrapper:target .code-block-caption\n background-color: var(--color-highlight-on-target)\n\n// When a definition list item is selected\n//\n// There isn't really an alternative to !important here, due to the\n// high-specificity of API documentation's selector.\ndt:target\n background-color: var(--color-highlight-on-target) !important\n\n// When a footnote reference is selected\n.footnote > dt:target + dd,\n.footnote-reference:target\n background-color: var(--color-highlight-on-target)\n",".guilabel\n background-color: var(--color-guilabel-background)\n border: 1px solid var(--color-guilabel-border)\n color: var(--color-guilabel-text)\n\n padding: 0 0.3em\n border-radius: 0.5em\n font-size: 0.9em\n","// This file contains the styles used for stylizing the footer that's shown\n// below the content.\n\nfooter\n font-size: var(--font-size--small)\n display: flex\n flex-direction: column\n\n margin-top: 2rem\n\n// Bottom of page information\n.bottom-of-page\n display: flex\n align-items: center\n justify-content: space-between\n\n margin-top: 1rem\n padding-top: 1rem\n padding-bottom: 1rem\n\n color: var(--color-foreground-secondary)\n border-top: 1px solid var(--color-background-border)\n\n line-height: 1.5\n\n @media (max-width: $content-width)\n text-align: center\n flex-direction: column-reverse\n gap: 0.25rem\n\n .left-details\n font-size: var(--font-size--small)\n\n .right-details\n display: flex\n flex-direction: column\n gap: 0.25rem\n text-align: right\n\n .icons\n display: flex\n justify-content: flex-end\n gap: 0.25rem\n font-size: 1rem\n\n a\n text-decoration: none\n\n svg,\n img\n font-size: 1.125rem\n height: 1em\n width: 1em\n\n// Next/Prev page information\n.related-pages\n a\n display: flex\n align-items: center\n\n text-decoration: none\n &:hover .page-info .title\n text-decoration: underline\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n svg.furo-related-icon,\n svg.furo-related-icon > use\n flex-shrink: 0\n\n color: var(--color-foreground-border)\n\n width: 0.75rem\n height: 0.75rem\n margin: 0 0.5rem\n\n &.next-page\n max-width: 50%\n\n float: right\n clear: right\n text-align: right\n\n &.prev-page\n max-width: 50%\n\n float: left\n clear: left\n\n svg\n transform: rotate(180deg)\n\n.page-info\n display: flex\n flex-direction: column\n overflow-wrap: anywhere\n\n .next-page &\n align-items: flex-end\n\n .context\n display: flex\n align-items: center\n\n padding-bottom: 0.1rem\n\n color: var(--color-foreground-muted)\n font-size: var(--font-size--small)\n text-decoration: none\n","// This file contains the styles for the contents of the left sidebar, which\n// contains the navigation tree, logo, search etc.\n\n////////////////////////////////////////////////////////////////////////////////\n// Brand on top of the scrollable tree.\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-brand\n display: flex\n flex-direction: column\n flex-shrink: 0\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n text-decoration: none\n\n.sidebar-brand-text\n color: var(--color-sidebar-brand-text)\n overflow-wrap: break-word\n margin: var(--sidebar-item-spacing-vertical) 0\n font-size: 1.5rem\n\n.sidebar-logo-container\n margin: var(--sidebar-item-spacing-vertical) 0\n\n.sidebar-logo\n margin: 0 auto\n display: block\n max-width: 100%\n\n////////////////////////////////////////////////////////////////////////////////\n// Search\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-search-container\n display: flex\n align-items: center\n margin-top: var(--sidebar-search-space-above)\n\n position: relative\n\n background: var(--color-sidebar-search-background)\n &:hover,\n &:focus-within\n background: var(--color-sidebar-search-background--focus)\n\n &::before\n content: \"\"\n position: absolute\n left: var(--sidebar-item-spacing-horizontal)\n width: var(--sidebar-search-icon-size)\n height: var(--sidebar-search-icon-size)\n\n background-color: var(--color-sidebar-search-icon)\n mask-image: var(--icon-search)\n\n.sidebar-search\n box-sizing: border-box\n\n border: none\n border-top: 1px solid var(--color-sidebar-search-border)\n border-bottom: 1px solid var(--color-sidebar-search-border)\n\n padding-top: var(--sidebar-search-input-spacing-vertical)\n padding-bottom: var(--sidebar-search-input-spacing-vertical)\n padding-right: var(--sidebar-search-input-spacing-horizontal)\n padding-left: calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size))\n\n width: 100%\n\n color: var(--color-sidebar-search-foreground)\n background: transparent\n z-index: 10\n\n &:focus\n outline: none\n\n &::placeholder\n font-size: var(--sidebar-search-input-font-size)\n\n//\n// Hide Search Matches link\n//\n#searchbox .highlight-link\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0\n margin: 0\n text-align: center\n\n a\n color: var(--color-sidebar-search-icon)\n font-size: var(--font-size--small--2)\n\n////////////////////////////////////////////////////////////////////////////////\n// Structure/Skeleton of the navigation tree (left)\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-tree\n font-size: var(--sidebar-item-font-size)\n margin-top: var(--sidebar-tree-space-above)\n margin-bottom: var(--sidebar-item-spacing-vertical)\n\n ul\n padding: 0\n margin-top: 0\n margin-bottom: 0\n\n display: flex\n flex-direction: column\n\n list-style: none\n\n li\n position: relative\n margin: 0\n\n > ul\n margin-left: var(--sidebar-item-spacing-horizontal)\n\n .icon\n color: var(--color-sidebar-link-text)\n\n .reference\n box-sizing: border-box\n color: var(--color-sidebar-link-text)\n\n // Fill the parent.\n display: inline-block\n line-height: var(--sidebar-item-line-height)\n text-decoration: none\n\n // Don't allow long words to cause wrapping.\n overflow-wrap: anywhere\n\n height: 100%\n width: 100%\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n &:hover\n background: var(--color-sidebar-item-background--hover)\n\n // Add a nice little \"external-link\" arrow here.\n &.external::after\n content: url('data:image/svg+xml,')\n margin: 0 0.25rem\n vertical-align: middle\n color: var(--color-sidebar-link-text)\n\n // Make the current page reference bold.\n .current-page > .reference\n font-weight: bold\n\n label\n position: absolute\n top: 0\n right: 0\n height: var(--sidebar-item-height)\n width: var(--sidebar-expander-width)\n\n cursor: pointer\n user-select: none\n\n display: flex\n justify-content: center\n align-items: center\n\n .caption, :not(.caption) > .caption-text\n font-size: var(--sidebar-caption-font-size)\n color: var(--color-sidebar-caption-text)\n\n font-weight: bold\n text-transform: uppercase\n\n margin: var(--sidebar-caption-space-above) 0 0 0\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n // If it has children, add a bit more padding to wrap the content to avoid\n // overlapping with the