Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove few dev deps #493

Merged
merged 4 commits into from
Oct 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ repos:
- id: black

- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: 'v0.0.288'
rev: 'v0.1.0'
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
Expand Down
4 changes: 2 additions & 2 deletions cyclops/evaluate/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,8 @@ def _prepare_models(
) -> Dict[str, WrappedModel]:
"""Prepare models for evaluation."""
if isinstance(model, get_args(WrappedModel)):
model_name: str = model.model_.__class__.__name__ # type: ignore
return {model_name: model} # type: ignore[dict-item]
model_name: str = model.model_.__class__.__name__
return {model_name: model}
if isinstance(model, (list, tuple)):
assert all(isinstance(m, get_args(WrappedModel)) for m in model)
return {m.getattr("model_").__class__.__name__: m for m in model}
Expand Down
16 changes: 8 additions & 8 deletions cyclops/evaluate/fairness/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def evaluate_fairness(
# since we have base values, remove overall slice
slice_spec._registry.pop("overall", None)

results: Dict[str, Dict[str, Any]] = {}
results: Dict[str, Dict[str, Dict[str, Any]]] = {}

for slice_name, slice_fn in slice_spec.slices():
sliced_dataset = dataset.remove_columns(remove_columns or []).filter(
Expand Down Expand Up @@ -933,32 +933,33 @@ def _construct_base_slice_name(base_values: Dict[str, Any]) -> str:


def _compute_parity_metrics(
results: Dict[str, Dict[str, Dict[str, Dict[str, float]]]],
results: Dict[str, Dict[str, Dict[str, Any]]],
base_slice_name: str,
) -> Dict[str, Dict[str, Dict[str, Dict[str, float]]]]:
) -> Dict[str, Dict[str, Dict[str, Any]]]:
"""Compute the parity metrics for each group and threshold if specified.

Parameters
----------
results : Dict[str, Dict[str, Dict[str, Dict[str, float]]]]
results : Dict[str, Dict[str, Dict[str, Any]]]
A dictionary mapping the prediction column to the metrics dictionary.
base_slice_name : str
The name of the base slice.

Returns
-------
Dict[str, Dict[str, Dict[str, Dict[str, float]]]]
Dict[str, Dict[str, Dict[str, Any]]]
A dictionary mapping the prediction column to the metrics dictionary.

"""
parity_results: Dict[str, Dict[str, Any]] = {}
parity_results: Dict[str, Dict[str, Dict[str, Any]]] = {}

for key, prediction_result in results.items():
parity_results[key] = {}
for slice_name, slice_result in prediction_result.items():
for metric_name, metric_value in slice_result.items():
if metric_name == "Group Size":
continue

# add 'Parity' to the metric name before @threshold, if specified
metric_name_parts = metric_name.split("@")
parity_metric_name = f"{metric_name_parts[0]} Parity"
Expand All @@ -967,14 +968,13 @@ def _compute_parity_metrics(

numerator = metric_value
denominator = prediction_result[base_slice_name][metric_name]
parity_metric_value = np.divide( # type: ignore[call-overload]
parity_metric_value = np.divide(
numerator,
denominator,
out=np.zeros_like(numerator, dtype=np.float_),
where=denominator != 0,
)

# add the parity metric to the results
parity_results[key].setdefault(slice_name, {}).update(
{
parity_metric_name: _get_value_if_singleton_array(
Expand Down
Loading