Skip to content

Commit

Permalink
deploy: 3d8283f
Browse files Browse the repository at this point in the history
  • Loading branch information
a-kore committed Nov 6, 2023
1 parent 16a0813 commit 9ebf1fa
Show file tree
Hide file tree
Showing 24 changed files with 4,962 additions and 21,810 deletions.
Binary file modified api/_images/tutorials_nihcxr_monitor_api_10_1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified api/_images/tutorials_nihcxr_monitor_api_12_0.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified api/_images/tutorials_nihcxr_monitor_api_6_0.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified api/_images/tutorials_nihcxr_monitor_api_8_0.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
226 changes: 117 additions & 109 deletions api/_modules/cyclops/report/report.html

Large diffs are not rendered by default.

129 changes: 43 additions & 86 deletions api/_sources/tutorials/kaggle/heart_failure_prediction.ipynb.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,13 @@
"\n",
"import copy\n",
"import inspect\n",
"import os\n",
"import shutil\n",
"from datetime import date\n",
"from pathlib import Path\n",
"\n",
"import numpy as np\n",
"import plotly.express as px\n",
"from datasets import Dataset\n",
"from datasets.features import ClassLabel\n",
"from dateutil.relativedelta import relativedelta\n",
"from kaggle.api.kaggle_api_extended import KaggleApi\n",
"from sklearn.compose import ColumnTransformer\n",
"from sklearn.impute import SimpleImputer\n",
Expand All @@ -51,7 +48,7 @@
"from cyclops.process.feature.feature import TabularFeatures\n",
"from cyclops.report import ModelCardReport\n",
"from cyclops.report.plot.classification import ClassificationPlotter\n",
"from cyclops.report.utils import flatten_results_dict, get_metrics_trends\n",
"from cyclops.report.utils import flatten_results_dict\n",
"from cyclops.tasks.mortality_prediction import MortalityPredictionTask\n",
"from cyclops.utils.file import join, load_dataframe"
]
Expand Down Expand Up @@ -872,25 +869,24 @@
" remove_metrics=[\"BinaryROCCurve\", \"BinaryPrecisionRecallCurve\"],\n",
" model_name=model_name,\n",
")\n",
"print(results_flat)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"for name, metric in results_flat.items():\n",
" split, name = name.split(\"/\") # noqa: PLW2901\n",
" pass_fail_thresholds = 0.8 if name == \"BinaryAUROC\" else 0.6\n",
" descriptions = {\n",
" \"BinaryPrecision\": \"The proportion of predicted positive instances that are correctly predicted.\",\n",
" \"BinaryRecall\": \"The proportion of actual positive instances that are correctly predicted. Also known as recall or true positive rate.\",\n",
" \"BinaryAccuracy\": \"The proportion of all instances that are correctly predicted.\",\n",
" \"BinaryAUROC\": \"The area under the receiver operating characteristic curve (AUROC) is a measure of the performance of a binary classification model.\",\n",
" \"BinaryF1Score\": \"The harmonic mean of precision and recall.\",\n",
" }\n",
" report.log_quantitative_analysis(\n",
" \"performance\",\n",
" name=name,\n",
" value=metric,\n",
" description=descriptions[name],\n",
" metric_slice=split,\n",
" pass_fail_thresholds=pass_fail_thresholds,\n",
" pass_fail_threshold_fns=lambda x, threshold: x >= threshold,\n",
" pass_fail_thresholds=0.7,\n",
" pass_fail_threshold_fns=lambda x, threshold: bool(x >= threshold),\n",
" )"
]
},
Expand Down Expand Up @@ -1056,74 +1052,6 @@
"Please note, for the purpose of this tutorial, we will create three dummy reports to demonstrate the process of plotting these metric trends."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Generating dummy reports\n",
"dummy_report_num = 3\n",
"dummy_report_dir = join(os.getcwd(), \"dummy_reports\")\n",
"\n",
"for i in range(dummy_report_num):\n",
" # Create a dummy model card report\n",
" dummy_report = ModelCardReport(output_dir=dummy_report_dir)\n",
" # Add noise to the recent results to simulate the model performance change\n",
" noise = np.random.uniform(-0.1, 0.1)\n",
" dummy_result = {key: max(0, value - noise) for key, value in results_flat.items()}\n",
" for name, metric in dummy_result.items():\n",
" split, name = name.split(\"/\") # noqa: PLW2901\n",
" pass_fail_thresholds = 0.8 if name == \"BinaryAUROC\" else 0.6\n",
" dummy_report.log_quantitative_analysis(\n",
" \"performance\",\n",
" name=name,\n",
" value=metric,\n",
" metric_slice=split,\n",
" pass_fail_thresholds=pass_fail_thresholds,\n",
" pass_fail_threshold_fns=lambda x, threshold: x >= threshold,\n",
" )\n",
" # Rename the report folder to the dummy date to simulate the time change\n",
" dummy_report_path = dummy_report.export()\n",
" date_dir = Path(dummy_report_path).parents[1]\n",
" dummy_date = date.today() + relativedelta(months=-(6 * (i + 1)))\n",
" new_dir = f\"{dummy_report_dir}/{dummy_date}\"\n",
" if os.path.exists(new_dir):\n",
" shutil.rmtree(new_dir)\n",
" os.rename(date_dir, new_dir)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Collecting performance metrics from previous reports and current report\n",
"trends = get_metrics_trends(\n",
" report_directory=dummy_report_dir,\n",
" flat_results=results_flat,\n",
" slice_names=[\"overall\"],\n",
")\n",
"shutil.rmtree(dummy_report_dir)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Plotting the performance over time\n",
"trends_plot = plotter.metrics_trends(trends)\n",
"report.log_plotly_figure(\n",
" fig=trends_plot,\n",
" caption=\"Performance over time\",\n",
" section_name=\"quantitative analysis\",\n",
")\n",
"trends_plot.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
Expand Down Expand Up @@ -1238,8 +1166,37 @@
},
"outputs": [],
"source": [
"report_path = report.export()\n",
"shutil.copy(f\"{report_path}\", \".\")"
"report_path = report.export(output_filename=\"heart_failure_report_periodic.html\")\n",
"shutil.copy(f\"{report_path}\", \".\")\n",
"for _ in range(5):\n",
" report._model_card.overview = None\n",
" report._model_card.quantitative_analysis = None\n",
" results_flat = flatten_results_dict(\n",
" results=results,\n",
" remove_metrics=[\"BinaryROCCurve\", \"BinaryPrecisionRecallCurve\"],\n",
" model_name=model_name,\n",
" )\n",
"\n",
" for name, metric in results_flat.items():\n",
" split, name = name.split(\"/\") # noqa: PLW2901\n",
" descriptions = {\n",
" \"BinaryPrecision\": \"The proportion of predicted positive instances that are correctly predicted.\",\n",
" \"BinaryRecall\": \"The proportion of actual positive instances that are correctly predicted. Also known as recall or true positive rate.\",\n",
" \"BinaryAccuracy\": \"The proportion of all instances that are correctly predicted.\",\n",
" \"BinaryAUROC\": \"The area under the receiver operating characteristic curve (AUROC) is a measure of the performance of a binary classification model.\",\n",
" \"BinaryF1Score\": \"The harmonic mean of precision and recall.\",\n",
" }\n",
" report.log_quantitative_analysis(\n",
" \"performance\",\n",
" name=name,\n",
" value=np.clip(metric + np.random.normal(0, 0.1), 0, 1),\n",
" description=descriptions[name],\n",
" metric_slice=split,\n",
" pass_fail_thresholds=0.7,\n",
" pass_fail_threshold_fns=lambda x, threshold: bool(x >= threshold),\n",
" )\n",
" report_path = report.export(output_filename=\"heart_failure_report_periodic.html\")\n",
" shutil.copy(f\"{report_path}\", \".\")"
]
},
{
Expand Down
Loading

0 comments on commit 9ebf1fa

Please sign in to comment.