From 2e20ceab39e14a92455087aa754a3277504d6443 Mon Sep 17 00:00:00 2001 From: Lukas Fehring Date: Sat, 17 Jun 2023 08:51:13 +0200 Subject: [PATCH 1/2] Fix logging and add codecarbon --- .github/workflows/tests.yml | 2 +- .gitignore | 6 +- .vscode/settings.json | 2 +- .../examples/example_conditional_grid.ipynb | 1981 ++--------------- .../examples/example_general_usage.ipynb | 1898 ++++++++++------ docs/source/examples/example_logtables.ipynb | 393 ++-- docs/source/index.rst | 2 +- docs/source/usage/execution.rst | 15 + .../usage/experiment_configuration_file.rst | 31 + docs/source/usage/index.rst | 3 +- poetry.lock | 86 +- py_experimenter/database_connector.py | 32 +- py_experimenter/database_connector_mysql.py | 13 +- py_experimenter/experimenter.py | 97 +- py_experimenter/result_processor.py | 29 +- py_experimenter/utils.py | 55 +- pyproject.toml | 1 + .../test_codecarbon_core_functions_mysql.py | 71 + .../test_codecarbon_core_functions_sqlite.py | 71 + .../test_codecarbon/test_integration_mysql.py | 63 + .../test_integration_sqlite.py | 66 + test/test_database_connector.py | 12 +- test/test_logtables/test_mysql.py | 4 +- test/test_logtables/test_sqlite.py | 6 +- test/test_result_processor.py | 8 +- .../test_run_mysql_experiment.py | 8 +- .../test_run_sqlite_experiment.py | 8 +- test/test_utils.py | 132 +- 28 files changed, 2400 insertions(+), 2695 deletions(-) create mode 100644 test/test_codecarbon/test_codecarbon_core_functions_mysql.py create mode 100644 test/test_codecarbon/test_codecarbon_core_functions_sqlite.py create mode 100644 test/test_codecarbon/test_integration_mysql.py create mode 100644 test/test_codecarbon/test_integration_sqlite.py diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7fc779ae..b2d37c13 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -41,4 +41,4 @@ jobs: run: | source $VENV # Ignore the MySQL test, as it requires a MySQL server: - pytest --ignore=test/test_run_experiments/test_run_mysql_experiment.py --ignore=test/test_logtables/test_mysql.py + pytest --ignore=test/test_run_experiments/test_run_mysql_experiment.py --ignore=test/test_logtables/test_mysql.py --ignore=test/test_run_experiments/test_run_mysql_experiment.py --ignore=test/test_codecarbon/test_codecarbon_core_functions_mysql.py --ignore=test/test_codecarbon/test_integration_mysql.py diff --git a/.gitignore b/.gitignore index 9920d3ab..0e5cb73e 100644 --- a/.gitignore +++ b/.gitignore @@ -139,4 +139,8 @@ dmypy.json todo.md config/database_credentials.cfg config/example*.cfg -output/ \ No newline at end of file +output/ + +# codecarbon +.codecarbon.config +emissions.csv diff --git a/.vscode/settings.json b/.vscode/settings.json index ce1114fd..74b9678a 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -58,5 +58,5 @@ "**/.git/subtree-cache/**": true, "**/node_modules/*/**": true }, - "esbonio.sphinx.confDir": "${workspaceFolder}/docs/source" + "esbonio.sphinx.confDir": "${workspaceFolder}\\docs" } \ No newline at end of file diff --git a/docs/source/examples/example_conditional_grid.ipynb b/docs/source/examples/example_conditional_grid.ipynb index a60bf22d..92f66306 100644 --- a/docs/source/examples/example_conditional_grid.ipynb +++ b/docs/source/examples/example_conditional_grid.ipynb @@ -208,14 +208,6 @@ "outputId": "447580e6-6a16-42ca-c44b-48a12829af91" }, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" - ] - }, { "data": { "text/html": [ @@ -266,10 +258,10 @@ " 5\n", " 1\n", " rbf\n", - " 0.0\n", + " 0.1\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -288,10 +280,10 @@ " 5\n", " 1\n", " rbf\n", - " 0.0\n", + " 0.3\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -310,10 +302,10 @@ " 5\n", " 1\n", " poly\n", - " 0.0\n", + " 0.1\n", " 3.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -332,10 +324,10 @@ " 5\n", " 1\n", " poly\n", - " 0.0\n", + " 0.1\n", " 3.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -354,10 +346,10 @@ " 5\n", " 1\n", " poly\n", - " 0.0\n", + " 0.1\n", " 4.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -376,10 +368,10 @@ " 5\n", " 1\n", " poly\n", - " 0.0\n", + " 0.1\n", " 4.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -398,10 +390,10 @@ " 5\n", " 1\n", " poly\n", - " 0.0\n", + " 0.3\n", " 3.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -420,10 +412,10 @@ " 5\n", " 1\n", " poly\n", - " 0.0\n", + " 0.3\n", " 3.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -442,10 +434,10 @@ " 5\n", " 1\n", " poly\n", - " 0.0\n", + " 0.3\n", " 4.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -464,10 +456,10 @@ " 5\n", " 1\n", " poly\n", - " 0.0\n", + " 0.3\n", " 4.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -489,7 +481,7 @@ " NaN\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -508,10 +500,10 @@ " 5\n", " 2\n", " rbf\n", - " 0.0\n", + " 0.1\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -530,10 +522,10 @@ " 5\n", " 2\n", " rbf\n", - " 0.0\n", + " 0.3\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -552,10 +544,10 @@ " 5\n", " 2\n", " poly\n", - " 0.0\n", + " 0.1\n", " 3.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -574,10 +566,10 @@ " 5\n", " 2\n", " poly\n", - " 0.0\n", + " 0.1\n", " 3.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -596,10 +588,10 @@ " 5\n", " 2\n", " poly\n", - " 0.0\n", + " 0.1\n", " 4.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -618,10 +610,10 @@ " 5\n", " 2\n", " poly\n", - " 0.0\n", + " 0.1\n", " 4.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -640,10 +632,10 @@ " 5\n", " 2\n", " poly\n", - " 0.0\n", + " 0.3\n", " 3.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -662,10 +654,10 @@ " 5\n", " 2\n", " poly\n", - " 0.0\n", + " 0.3\n", " 3.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -684,10 +676,10 @@ " 5\n", " 2\n", " poly\n", - " 0.0\n", + " 0.3\n", " 4.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -706,10 +698,10 @@ " 5\n", " 2\n", " poly\n", - " 0.0\n", + " 0.3\n", " 4.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -731,7 +723,7 @@ " NaN\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -750,10 +742,10 @@ " 5\n", " 3\n", " rbf\n", - " 0.0\n", + " 0.1\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -772,10 +764,10 @@ " 5\n", " 3\n", " rbf\n", - " 0.0\n", + " 0.3\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -794,10 +786,10 @@ " 5\n", " 3\n", " poly\n", - " 0.0\n", + " 0.1\n", " 3.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -816,10 +808,10 @@ " 5\n", " 3\n", " poly\n", - " 0.0\n", + " 0.1\n", " 3.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -838,10 +830,10 @@ " 5\n", " 3\n", " poly\n", - " 0.0\n", + " 0.1\n", " 4.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -860,10 +852,10 @@ " 5\n", " 3\n", " poly\n", - " 0.0\n", + " 0.1\n", " 4.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -882,10 +874,10 @@ " 5\n", " 3\n", " poly\n", - " 0.0\n", + " 0.3\n", " 3.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -904,10 +896,10 @@ " 5\n", " 3\n", " poly\n", - " 0.0\n", + " 0.3\n", " 3.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -926,10 +918,10 @@ " 5\n", " 3\n", " poly\n", - " 0.0\n", + " 0.3\n", " 4.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -948,10 +940,10 @@ " 5\n", " 3\n", " poly\n", - " 0.0\n", + " 0.3\n", " 4.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -973,7 +965,7 @@ " NaN\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -992,10 +984,10 @@ " 5\n", " 4\n", " rbf\n", - " 0.0\n", + " 0.1\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1014,10 +1006,10 @@ " 5\n", " 4\n", " rbf\n", - " 0.0\n", + " 0.3\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1036,10 +1028,10 @@ " 5\n", " 4\n", " poly\n", - " 0.0\n", + " 0.1\n", " 3.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1058,10 +1050,10 @@ " 5\n", " 4\n", " poly\n", - " 0.0\n", + " 0.1\n", " 3.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1080,10 +1072,10 @@ " 5\n", " 4\n", " poly\n", - " 0.0\n", + " 0.1\n", " 4.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1102,10 +1094,10 @@ " 5\n", " 4\n", " poly\n", - " 0.0\n", + " 0.1\n", " 4.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1124,10 +1116,10 @@ " 5\n", " 4\n", " poly\n", - " 0.0\n", + " 0.3\n", " 3.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1146,10 +1138,10 @@ " 5\n", " 4\n", " poly\n", - " 0.0\n", + " 0.3\n", " 3.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1168,10 +1160,10 @@ " 5\n", " 4\n", " poly\n", - " 0.0\n", + " 0.3\n", " 4.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1190,10 +1182,10 @@ " 5\n", " 4\n", " poly\n", - " 0.0\n", + " 0.3\n", " 4.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1215,7 +1207,7 @@ " NaN\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1234,10 +1226,10 @@ " 5\n", " 5\n", " rbf\n", - " 0.0\n", + " 0.1\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1256,10 +1248,10 @@ " 5\n", " 5\n", " rbf\n", - " 0.0\n", + " 0.3\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1278,10 +1270,10 @@ " 5\n", " 5\n", " poly\n", - " 0.0\n", + " 0.1\n", " 3.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1300,10 +1292,10 @@ " 5\n", " 5\n", " poly\n", - " 0.0\n", + " 0.1\n", " 3.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1322,10 +1314,10 @@ " 5\n", " 5\n", " poly\n", - " 0.0\n", + " 0.1\n", " 4.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1344,10 +1336,10 @@ " 5\n", " 5\n", " poly\n", - " 0.0\n", + " 0.1\n", " 4.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1366,10 +1358,10 @@ " 5\n", " 5\n", " poly\n", - " 0.0\n", + " 0.3\n", " 3.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1388,10 +1380,10 @@ " 5\n", " 5\n", " poly\n", - " 0.0\n", + " 0.3\n", " 3.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1410,10 +1402,10 @@ " 5\n", " 5\n", " poly\n", - " 0.0\n", + " 0.3\n", " 4.0\n", " 0.0\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1432,10 +1424,10 @@ " 5\n", " 5\n", " poly\n", - " 0.0\n", + " 0.3\n", " 4.0\n", - " 0.0\n", - " 2023-04-28 13:41:57\n", + " 0.1\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1457,7 +1449,7 @@ " NaN\n", " NaN\n", " NaN\n", - " 2023-04-28 13:41:57\n", + " 2023-06-17 09:05:36\n", " created\n", " None\n", " None\n", @@ -1475,118 +1467,118 @@ ], "text/plain": [ " ID dataset cross_validation_splits seed kernel gamma degree coef0 \\\n", - "0 1 iris 5 1 rbf 0.0 NaN NaN \n", - "1 2 iris 5 1 rbf 0.0 NaN NaN \n", - "2 3 iris 5 1 poly 0.0 3.0 0.0 \n", - "3 4 iris 5 1 poly 0.0 3.0 0.0 \n", - "4 5 iris 5 1 poly 0.0 4.0 0.0 \n", - "5 6 iris 5 1 poly 0.0 4.0 0.0 \n", - "6 7 iris 5 1 poly 0.0 3.0 0.0 \n", - "7 8 iris 5 1 poly 0.0 3.0 0.0 \n", - "8 9 iris 5 1 poly 0.0 4.0 0.0 \n", - "9 10 iris 5 1 poly 0.0 4.0 0.0 \n", + "0 1 iris 5 1 rbf 0.1 NaN NaN \n", + "1 2 iris 5 1 rbf 0.3 NaN NaN \n", + "2 3 iris 5 1 poly 0.1 3.0 0.0 \n", + "3 4 iris 5 1 poly 0.1 3.0 0.1 \n", + "4 5 iris 5 1 poly 0.1 4.0 0.0 \n", + "5 6 iris 5 1 poly 0.1 4.0 0.1 \n", + "6 7 iris 5 1 poly 0.3 3.0 0.0 \n", + "7 8 iris 5 1 poly 0.3 3.0 0.1 \n", + "8 9 iris 5 1 poly 0.3 4.0 0.0 \n", + "9 10 iris 5 1 poly 0.3 4.0 0.1 \n", "10 11 iris 5 1 linear NaN NaN NaN \n", - "11 12 iris 5 2 rbf 0.0 NaN NaN \n", - "12 13 iris 5 2 rbf 0.0 NaN NaN \n", - "13 14 iris 5 2 poly 0.0 3.0 0.0 \n", - "14 15 iris 5 2 poly 0.0 3.0 0.0 \n", - "15 16 iris 5 2 poly 0.0 4.0 0.0 \n", - "16 17 iris 5 2 poly 0.0 4.0 0.0 \n", - "17 18 iris 5 2 poly 0.0 3.0 0.0 \n", - "18 19 iris 5 2 poly 0.0 3.0 0.0 \n", - "19 20 iris 5 2 poly 0.0 4.0 0.0 \n", - "20 21 iris 5 2 poly 0.0 4.0 0.0 \n", + "11 12 iris 5 2 rbf 0.1 NaN NaN \n", + "12 13 iris 5 2 rbf 0.3 NaN NaN \n", + "13 14 iris 5 2 poly 0.1 3.0 0.0 \n", + "14 15 iris 5 2 poly 0.1 3.0 0.1 \n", + "15 16 iris 5 2 poly 0.1 4.0 0.0 \n", + "16 17 iris 5 2 poly 0.1 4.0 0.1 \n", + "17 18 iris 5 2 poly 0.3 3.0 0.0 \n", + "18 19 iris 5 2 poly 0.3 3.0 0.1 \n", + "19 20 iris 5 2 poly 0.3 4.0 0.0 \n", + "20 21 iris 5 2 poly 0.3 4.0 0.1 \n", "21 22 iris 5 2 linear NaN NaN NaN \n", - "22 23 iris 5 3 rbf 0.0 NaN NaN \n", - "23 24 iris 5 3 rbf 0.0 NaN NaN \n", - "24 25 iris 5 3 poly 0.0 3.0 0.0 \n", - "25 26 iris 5 3 poly 0.0 3.0 0.0 \n", - "26 27 iris 5 3 poly 0.0 4.0 0.0 \n", - "27 28 iris 5 3 poly 0.0 4.0 0.0 \n", - "28 29 iris 5 3 poly 0.0 3.0 0.0 \n", - "29 30 iris 5 3 poly 0.0 3.0 0.0 \n", - "30 31 iris 5 3 poly 0.0 4.0 0.0 \n", - "31 32 iris 5 3 poly 0.0 4.0 0.0 \n", + "22 23 iris 5 3 rbf 0.1 NaN NaN \n", + "23 24 iris 5 3 rbf 0.3 NaN NaN \n", + "24 25 iris 5 3 poly 0.1 3.0 0.0 \n", + "25 26 iris 5 3 poly 0.1 3.0 0.1 \n", + "26 27 iris 5 3 poly 0.1 4.0 0.0 \n", + "27 28 iris 5 3 poly 0.1 4.0 0.1 \n", + "28 29 iris 5 3 poly 0.3 3.0 0.0 \n", + "29 30 iris 5 3 poly 0.3 3.0 0.1 \n", + "30 31 iris 5 3 poly 0.3 4.0 0.0 \n", + "31 32 iris 5 3 poly 0.3 4.0 0.1 \n", "32 33 iris 5 3 linear NaN NaN NaN \n", - "33 34 iris 5 4 rbf 0.0 NaN NaN \n", - "34 35 iris 5 4 rbf 0.0 NaN NaN \n", - "35 36 iris 5 4 poly 0.0 3.0 0.0 \n", - "36 37 iris 5 4 poly 0.0 3.0 0.0 \n", - "37 38 iris 5 4 poly 0.0 4.0 0.0 \n", - "38 39 iris 5 4 poly 0.0 4.0 0.0 \n", - "39 40 iris 5 4 poly 0.0 3.0 0.0 \n", - "40 41 iris 5 4 poly 0.0 3.0 0.0 \n", - "41 42 iris 5 4 poly 0.0 4.0 0.0 \n", - "42 43 iris 5 4 poly 0.0 4.0 0.0 \n", + "33 34 iris 5 4 rbf 0.1 NaN NaN \n", + "34 35 iris 5 4 rbf 0.3 NaN NaN \n", + "35 36 iris 5 4 poly 0.1 3.0 0.0 \n", + "36 37 iris 5 4 poly 0.1 3.0 0.1 \n", + "37 38 iris 5 4 poly 0.1 4.0 0.0 \n", + "38 39 iris 5 4 poly 0.1 4.0 0.1 \n", + "39 40 iris 5 4 poly 0.3 3.0 0.0 \n", + "40 41 iris 5 4 poly 0.3 3.0 0.1 \n", + "41 42 iris 5 4 poly 0.3 4.0 0.0 \n", + "42 43 iris 5 4 poly 0.3 4.0 0.1 \n", "43 44 iris 5 4 linear NaN NaN NaN \n", - "44 45 iris 5 5 rbf 0.0 NaN NaN \n", - "45 46 iris 5 5 rbf 0.0 NaN NaN \n", - "46 47 iris 5 5 poly 0.0 3.0 0.0 \n", - "47 48 iris 5 5 poly 0.0 3.0 0.0 \n", - "48 49 iris 5 5 poly 0.0 4.0 0.0 \n", - "49 50 iris 5 5 poly 0.0 4.0 0.0 \n", - "50 51 iris 5 5 poly 0.0 3.0 0.0 \n", - "51 52 iris 5 5 poly 0.0 3.0 0.0 \n", - "52 53 iris 5 5 poly 0.0 4.0 0.0 \n", - "53 54 iris 5 5 poly 0.0 4.0 0.0 \n", + "44 45 iris 5 5 rbf 0.1 NaN NaN \n", + "45 46 iris 5 5 rbf 0.3 NaN NaN \n", + "46 47 iris 5 5 poly 0.1 3.0 0.0 \n", + "47 48 iris 5 5 poly 0.1 3.0 0.1 \n", + "48 49 iris 5 5 poly 0.1 4.0 0.0 \n", + "49 50 iris 5 5 poly 0.1 4.0 0.1 \n", + "50 51 iris 5 5 poly 0.3 3.0 0.0 \n", + "51 52 iris 5 5 poly 0.3 3.0 0.1 \n", + "52 53 iris 5 5 poly 0.3 4.0 0.0 \n", + "53 54 iris 5 5 poly 0.3 4.0 0.1 \n", "54 55 iris 5 5 linear NaN NaN NaN \n", "\n", - " creation_date status start_date name machine train_f1 \\\n", - "0 2023-04-28 13:41:57 created None None None None \n", - "1 2023-04-28 13:41:57 created None None None None \n", - "2 2023-04-28 13:41:57 created None None None None \n", - "3 2023-04-28 13:41:57 created None None None None \n", - "4 2023-04-28 13:41:57 created None None None None \n", - "5 2023-04-28 13:41:57 created None None None None \n", - "6 2023-04-28 13:41:57 created None None None None \n", - "7 2023-04-28 13:41:57 created None None None None \n", - "8 2023-04-28 13:41:57 created None None None None \n", - "9 2023-04-28 13:41:57 created None None None None \n", - "10 2023-04-28 13:41:57 created None None None None \n", - "11 2023-04-28 13:41:57 created None None None None \n", - "12 2023-04-28 13:41:57 created None None None None \n", - "13 2023-04-28 13:41:57 created None None None None \n", - "14 2023-04-28 13:41:57 created None None None None \n", - "15 2023-04-28 13:41:57 created None None None None \n", - "16 2023-04-28 13:41:57 created None None None None \n", - "17 2023-04-28 13:41:57 created None None None None \n", - "18 2023-04-28 13:41:57 created None None None None \n", - "19 2023-04-28 13:41:57 created None None None None \n", - "20 2023-04-28 13:41:57 created None None None None \n", - "21 2023-04-28 13:41:57 created None None None None \n", - "22 2023-04-28 13:41:57 created None None None None \n", - "23 2023-04-28 13:41:57 created None None None None \n", - "24 2023-04-28 13:41:57 created None None None None \n", - "25 2023-04-28 13:41:57 created None None None None \n", - "26 2023-04-28 13:41:57 created None None None None \n", - "27 2023-04-28 13:41:57 created None None None None \n", - "28 2023-04-28 13:41:57 created None None None None \n", - "29 2023-04-28 13:41:57 created None None None None \n", - "30 2023-04-28 13:41:57 created None None None None \n", - "31 2023-04-28 13:41:57 created None None None None \n", - "32 2023-04-28 13:41:57 created None None None None \n", - "33 2023-04-28 13:41:57 created None None None None \n", - "34 2023-04-28 13:41:57 created None None None None \n", - "35 2023-04-28 13:41:57 created None None None None \n", - "36 2023-04-28 13:41:57 created None None None None \n", - "37 2023-04-28 13:41:57 created None None None None \n", - "38 2023-04-28 13:41:57 created None None None None \n", - "39 2023-04-28 13:41:57 created None None None None \n", - "40 2023-04-28 13:41:57 created None None None None \n", - "41 2023-04-28 13:41:57 created None None None None \n", - "42 2023-04-28 13:41:57 created None None None None \n", - "43 2023-04-28 13:41:57 created None None None None \n", - "44 2023-04-28 13:41:57 created None None None None \n", - "45 2023-04-28 13:41:57 created None None None None \n", - "46 2023-04-28 13:41:57 created None None None None \n", - "47 2023-04-28 13:41:57 created None None None None \n", - "48 2023-04-28 13:41:57 created None None None None \n", - "49 2023-04-28 13:41:57 created None None None None \n", - "50 2023-04-28 13:41:57 created None None None None \n", - "51 2023-04-28 13:41:57 created None None None None \n", - "52 2023-04-28 13:41:57 created None None None None \n", - "53 2023-04-28 13:41:57 created None None None None \n", - "54 2023-04-28 13:41:57 created None None None None \n", + " creation_date status start_date name machine train_f1 \\\n", + "0 2023-06-17 09:05:36 created None None None None \n", + "1 2023-06-17 09:05:36 created None None None None \n", + "2 2023-06-17 09:05:36 created None None None None \n", + "3 2023-06-17 09:05:36 created None None None None \n", + "4 2023-06-17 09:05:36 created None None None None \n", + "5 2023-06-17 09:05:36 created None None None None \n", + "6 2023-06-17 09:05:36 created None None None None \n", + "7 2023-06-17 09:05:36 created None None None None \n", + "8 2023-06-17 09:05:36 created None None None None \n", + "9 2023-06-17 09:05:36 created None None None None \n", + "10 2023-06-17 09:05:36 created None None None None \n", + "11 2023-06-17 09:05:36 created None None None None \n", + "12 2023-06-17 09:05:36 created None None None None \n", + "13 2023-06-17 09:05:36 created None None None None \n", + "14 2023-06-17 09:05:36 created None None None None \n", + "15 2023-06-17 09:05:36 created None None None None \n", + "16 2023-06-17 09:05:36 created None None None None \n", + "17 2023-06-17 09:05:36 created None None None None \n", + "18 2023-06-17 09:05:36 created None None None None \n", + "19 2023-06-17 09:05:36 created None None None None \n", + "20 2023-06-17 09:05:36 created None None None None \n", + "21 2023-06-17 09:05:36 created None None None None \n", + "22 2023-06-17 09:05:36 created None None None None \n", + "23 2023-06-17 09:05:36 created None None None None \n", + "24 2023-06-17 09:05:36 created None None None None \n", + "25 2023-06-17 09:05:36 created None None None None \n", + "26 2023-06-17 09:05:36 created None None None None \n", + "27 2023-06-17 09:05:36 created None None None None \n", + "28 2023-06-17 09:05:36 created None None None None \n", + "29 2023-06-17 09:05:36 created None None None None \n", + "30 2023-06-17 09:05:36 created None None None None \n", + "31 2023-06-17 09:05:36 created None None None None \n", + "32 2023-06-17 09:05:36 created None None None None \n", + "33 2023-06-17 09:05:36 created None None None None \n", + "34 2023-06-17 09:05:36 created None None None None \n", + "35 2023-06-17 09:05:36 created None None None None \n", + "36 2023-06-17 09:05:36 created None None None None \n", + "37 2023-06-17 09:05:36 created None None None None \n", + "38 2023-06-17 09:05:36 created None None None None \n", + "39 2023-06-17 09:05:36 created None None None None \n", + "40 2023-06-17 09:05:36 created None None None None \n", + "41 2023-06-17 09:05:36 created None None None None \n", + "42 2023-06-17 09:05:36 created None None None None \n", + "43 2023-06-17 09:05:36 created None None None None \n", + "44 2023-06-17 09:05:36 created None None None None \n", + "45 2023-06-17 09:05:36 created None None None None \n", + "46 2023-06-17 09:05:36 created None None None None \n", + "47 2023-06-17 09:05:36 created None None None None \n", + "48 2023-06-17 09:05:36 created None None None None \n", + "49 2023-06-17 09:05:36 created None None None None \n", + "50 2023-06-17 09:05:36 created None None None None \n", + "51 2023-06-17 09:05:36 created None None None None \n", + "52 2023-06-17 09:05:36 created None None None None \n", + "53 2023-06-17 09:05:36 created None None None None \n", + "54 2023-06-17 09:05:36 created None None None None \n", "\n", " train_accuracy test_f1 test_accuracy end_date error \n", "0 None None None None None \n", @@ -1690,1513 +1682,22 @@ "id": "cDsuIw4M_AyY", "outputId": "d242da6f-1c9e-421b-f916-694c1a98ba95" }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" - ] - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
IDdatasetcross_validation_splitsseedkernelgammadegreecoef0creation_datestatusstart_datenamemachinetrain_f1train_accuracytest_f1test_accuracyend_dateerror
01iris51rbf0.0NaNNaN2023-04-28 13:41:57done2023-04-28 13:41:58SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:03None
12iris51rbf0.0NaNNaN2023-04-28 13:41:57done2023-04-28 13:42:04SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:06None
23iris51poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:42:06SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:08None
34iris51poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:42:09SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:11None
45iris51poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:42:11SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:14None
56iris51poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:42:14SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:17None
67iris51poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:42:17SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:20None
78iris51poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:42:20SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:22None
89iris51poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:42:23SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:25None
910iris51poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:42:26SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:28None
1011iris51linearNaNNaNNaN2023-04-28 13:41:57done2023-04-28 13:42:28SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:31None
1112iris52rbf0.0NaNNaN2023-04-28 13:41:57done2023-04-28 13:42:31SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:34None
1213iris52rbf0.0NaNNaN2023-04-28 13:41:57done2023-04-28 13:42:34SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:36None
1314iris52poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:42:36SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:37None
1415iris52poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:42:38SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:39None
1516iris52poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:42:39SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:41None
1617iris52poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:42:41SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:43None
1718iris52poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:42:43SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:44None
1819iris52poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:42:45SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:46None
1920iris52poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:42:47SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:48None
2021iris52poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:42:48SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:50None
2122iris52linearNaNNaNNaN2023-04-28 13:41:57done2023-04-28 13:42:50SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:52None
2223iris53rbf0.0NaNNaN2023-04-28 13:41:57done2023-04-28 13:42:53SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:54None
2324iris53rbf0.0NaNNaN2023-04-28 13:41:57done2023-04-28 13:42:54SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:42:57None
2425iris53poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:42:57SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:00None
2526iris53poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:43:00SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:03None
2627iris53poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:43:03SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:05None
2728iris53poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:43:06SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:08None
2829iris53poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:43:09SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:12None
2930iris53poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:43:12SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:14None
3031iris53poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:43:15SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:18None
3132iris53poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:43:18SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:21None
3233iris53linearNaNNaNNaN2023-04-28 13:41:57done2023-04-28 13:43:21SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:25None
3334iris54rbf0.0NaNNaN2023-04-28 13:41:57done2023-04-28 13:43:26SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:28None
3435iris54rbf0.0NaNNaN2023-04-28 13:41:57done2023-04-28 13:43:30SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:32None
3536iris54poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:43:32SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:35None
3637iris54poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:43:35SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:37None
3738iris54poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:43:38SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:40None
3839iris54poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:43:41SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:44None
3940iris54poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:43:44SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:47None
4041iris54poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:43:47SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:50None
4142iris54poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:43:50SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:53None
4243iris54poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:43:54SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:56None
4344iris54linearNaNNaNNaN2023-04-28 13:41:57done2023-04-28 13:43:57SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:43:59None
4445iris55rbf0.0NaNNaN2023-04-28 13:41:57done2023-04-28 13:43:59SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:44:02None
4546iris55rbf0.0NaNNaN2023-04-28 13:41:57done2023-04-28 13:44:03SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:44:08None
4647iris55poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:44:08SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:44:14None
4748iris55poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:44:14SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:44:19None
4849iris55poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:44:20SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:44:25None
4950iris55poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:44:26SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:44:31None
5051iris55poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:44:32SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:44:37None
5152iris55poly0.03.00.02023-04-28 13:41:57done2023-04-28 13:44:38SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:44:43None
5253iris55poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:44:45SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:44:50None
5354iris55poly0.04.00.02023-04-28 13:41:57done2023-04-28 13:44:50SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:44:56None
5455iris55linearNaNNaNNaN2023-04-28 13:41:57done2023-04-28 13:44:56SVM_experimenter_01Worklaptop1.01.01.01.02023-04-28 13:45:02None
\n", - "
" - ], - "text/plain": [ - " ID dataset cross_validation_splits seed kernel gamma degree coef0 \\\n", - "0 1 iris 5 1 rbf 0.0 NaN NaN \n", - "1 2 iris 5 1 rbf 0.0 NaN NaN \n", - "2 3 iris 5 1 poly 0.0 3.0 0.0 \n", - "3 4 iris 5 1 poly 0.0 3.0 0.0 \n", - "4 5 iris 5 1 poly 0.0 4.0 0.0 \n", - "5 6 iris 5 1 poly 0.0 4.0 0.0 \n", - "6 7 iris 5 1 poly 0.0 3.0 0.0 \n", - "7 8 iris 5 1 poly 0.0 3.0 0.0 \n", - "8 9 iris 5 1 poly 0.0 4.0 0.0 \n", - "9 10 iris 5 1 poly 0.0 4.0 0.0 \n", - "10 11 iris 5 1 linear NaN NaN NaN \n", - "11 12 iris 5 2 rbf 0.0 NaN NaN \n", - "12 13 iris 5 2 rbf 0.0 NaN NaN \n", - "13 14 iris 5 2 poly 0.0 3.0 0.0 \n", - "14 15 iris 5 2 poly 0.0 3.0 0.0 \n", - "15 16 iris 5 2 poly 0.0 4.0 0.0 \n", - "16 17 iris 5 2 poly 0.0 4.0 0.0 \n", - "17 18 iris 5 2 poly 0.0 3.0 0.0 \n", - "18 19 iris 5 2 poly 0.0 3.0 0.0 \n", - "19 20 iris 5 2 poly 0.0 4.0 0.0 \n", - "20 21 iris 5 2 poly 0.0 4.0 0.0 \n", - "21 22 iris 5 2 linear NaN NaN NaN \n", - "22 23 iris 5 3 rbf 0.0 NaN NaN \n", - "23 24 iris 5 3 rbf 0.0 NaN NaN \n", - "24 25 iris 5 3 poly 0.0 3.0 0.0 \n", - "25 26 iris 5 3 poly 0.0 3.0 0.0 \n", - "26 27 iris 5 3 poly 0.0 4.0 0.0 \n", - "27 28 iris 5 3 poly 0.0 4.0 0.0 \n", - "28 29 iris 5 3 poly 0.0 3.0 0.0 \n", - "29 30 iris 5 3 poly 0.0 3.0 0.0 \n", - "30 31 iris 5 3 poly 0.0 4.0 0.0 \n", - "31 32 iris 5 3 poly 0.0 4.0 0.0 \n", - "32 33 iris 5 3 linear NaN NaN NaN \n", - "33 34 iris 5 4 rbf 0.0 NaN NaN \n", - "34 35 iris 5 4 rbf 0.0 NaN NaN \n", - "35 36 iris 5 4 poly 0.0 3.0 0.0 \n", - "36 37 iris 5 4 poly 0.0 3.0 0.0 \n", - "37 38 iris 5 4 poly 0.0 4.0 0.0 \n", - "38 39 iris 5 4 poly 0.0 4.0 0.0 \n", - "39 40 iris 5 4 poly 0.0 3.0 0.0 \n", - "40 41 iris 5 4 poly 0.0 3.0 0.0 \n", - "41 42 iris 5 4 poly 0.0 4.0 0.0 \n", - "42 43 iris 5 4 poly 0.0 4.0 0.0 \n", - "43 44 iris 5 4 linear NaN NaN NaN \n", - "44 45 iris 5 5 rbf 0.0 NaN NaN \n", - "45 46 iris 5 5 rbf 0.0 NaN NaN \n", - "46 47 iris 5 5 poly 0.0 3.0 0.0 \n", - "47 48 iris 5 5 poly 0.0 3.0 0.0 \n", - "48 49 iris 5 5 poly 0.0 4.0 0.0 \n", - "49 50 iris 5 5 poly 0.0 4.0 0.0 \n", - "50 51 iris 5 5 poly 0.0 3.0 0.0 \n", - "51 52 iris 5 5 poly 0.0 3.0 0.0 \n", - "52 53 iris 5 5 poly 0.0 4.0 0.0 \n", - "53 54 iris 5 5 poly 0.0 4.0 0.0 \n", - "54 55 iris 5 5 linear NaN NaN NaN \n", - "\n", - " creation_date status start_date name \\\n", - "0 2023-04-28 13:41:57 done 2023-04-28 13:41:58 SVM_experimenter_01 \n", - "1 2023-04-28 13:41:57 done 2023-04-28 13:42:04 SVM_experimenter_01 \n", - "2 2023-04-28 13:41:57 done 2023-04-28 13:42:06 SVM_experimenter_01 \n", - "3 2023-04-28 13:41:57 done 2023-04-28 13:42:09 SVM_experimenter_01 \n", - "4 2023-04-28 13:41:57 done 2023-04-28 13:42:11 SVM_experimenter_01 \n", - "5 2023-04-28 13:41:57 done 2023-04-28 13:42:14 SVM_experimenter_01 \n", - "6 2023-04-28 13:41:57 done 2023-04-28 13:42:17 SVM_experimenter_01 \n", - "7 2023-04-28 13:41:57 done 2023-04-28 13:42:20 SVM_experimenter_01 \n", - "8 2023-04-28 13:41:57 done 2023-04-28 13:42:23 SVM_experimenter_01 \n", - "9 2023-04-28 13:41:57 done 2023-04-28 13:42:26 SVM_experimenter_01 \n", - "10 2023-04-28 13:41:57 done 2023-04-28 13:42:28 SVM_experimenter_01 \n", - "11 2023-04-28 13:41:57 done 2023-04-28 13:42:31 SVM_experimenter_01 \n", - "12 2023-04-28 13:41:57 done 2023-04-28 13:42:34 SVM_experimenter_01 \n", - "13 2023-04-28 13:41:57 done 2023-04-28 13:42:36 SVM_experimenter_01 \n", - "14 2023-04-28 13:41:57 done 2023-04-28 13:42:38 SVM_experimenter_01 \n", - "15 2023-04-28 13:41:57 done 2023-04-28 13:42:39 SVM_experimenter_01 \n", - "16 2023-04-28 13:41:57 done 2023-04-28 13:42:41 SVM_experimenter_01 \n", - "17 2023-04-28 13:41:57 done 2023-04-28 13:42:43 SVM_experimenter_01 \n", - "18 2023-04-28 13:41:57 done 2023-04-28 13:42:45 SVM_experimenter_01 \n", - "19 2023-04-28 13:41:57 done 2023-04-28 13:42:47 SVM_experimenter_01 \n", - "20 2023-04-28 13:41:57 done 2023-04-28 13:42:48 SVM_experimenter_01 \n", - "21 2023-04-28 13:41:57 done 2023-04-28 13:42:50 SVM_experimenter_01 \n", - "22 2023-04-28 13:41:57 done 2023-04-28 13:42:53 SVM_experimenter_01 \n", - "23 2023-04-28 13:41:57 done 2023-04-28 13:42:54 SVM_experimenter_01 \n", - "24 2023-04-28 13:41:57 done 2023-04-28 13:42:57 SVM_experimenter_01 \n", - "25 2023-04-28 13:41:57 done 2023-04-28 13:43:00 SVM_experimenter_01 \n", - "26 2023-04-28 13:41:57 done 2023-04-28 13:43:03 SVM_experimenter_01 \n", - "27 2023-04-28 13:41:57 done 2023-04-28 13:43:06 SVM_experimenter_01 \n", - "28 2023-04-28 13:41:57 done 2023-04-28 13:43:09 SVM_experimenter_01 \n", - "29 2023-04-28 13:41:57 done 2023-04-28 13:43:12 SVM_experimenter_01 \n", - "30 2023-04-28 13:41:57 done 2023-04-28 13:43:15 SVM_experimenter_01 \n", - "31 2023-04-28 13:41:57 done 2023-04-28 13:43:18 SVM_experimenter_01 \n", - "32 2023-04-28 13:41:57 done 2023-04-28 13:43:21 SVM_experimenter_01 \n", - "33 2023-04-28 13:41:57 done 2023-04-28 13:43:26 SVM_experimenter_01 \n", - "34 2023-04-28 13:41:57 done 2023-04-28 13:43:30 SVM_experimenter_01 \n", - "35 2023-04-28 13:41:57 done 2023-04-28 13:43:32 SVM_experimenter_01 \n", - "36 2023-04-28 13:41:57 done 2023-04-28 13:43:35 SVM_experimenter_01 \n", - "37 2023-04-28 13:41:57 done 2023-04-28 13:43:38 SVM_experimenter_01 \n", - "38 2023-04-28 13:41:57 done 2023-04-28 13:43:41 SVM_experimenter_01 \n", - "39 2023-04-28 13:41:57 done 2023-04-28 13:43:44 SVM_experimenter_01 \n", - "40 2023-04-28 13:41:57 done 2023-04-28 13:43:47 SVM_experimenter_01 \n", - "41 2023-04-28 13:41:57 done 2023-04-28 13:43:50 SVM_experimenter_01 \n", - "42 2023-04-28 13:41:57 done 2023-04-28 13:43:54 SVM_experimenter_01 \n", - "43 2023-04-28 13:41:57 done 2023-04-28 13:43:57 SVM_experimenter_01 \n", - "44 2023-04-28 13:41:57 done 2023-04-28 13:43:59 SVM_experimenter_01 \n", - "45 2023-04-28 13:41:57 done 2023-04-28 13:44:03 SVM_experimenter_01 \n", - "46 2023-04-28 13:41:57 done 2023-04-28 13:44:08 SVM_experimenter_01 \n", - "47 2023-04-28 13:41:57 done 2023-04-28 13:44:14 SVM_experimenter_01 \n", - "48 2023-04-28 13:41:57 done 2023-04-28 13:44:20 SVM_experimenter_01 \n", - "49 2023-04-28 13:41:57 done 2023-04-28 13:44:26 SVM_experimenter_01 \n", - "50 2023-04-28 13:41:57 done 2023-04-28 13:44:32 SVM_experimenter_01 \n", - "51 2023-04-28 13:41:57 done 2023-04-28 13:44:38 SVM_experimenter_01 \n", - "52 2023-04-28 13:41:57 done 2023-04-28 13:44:45 SVM_experimenter_01 \n", - "53 2023-04-28 13:41:57 done 2023-04-28 13:44:50 SVM_experimenter_01 \n", - "54 2023-04-28 13:41:57 done 2023-04-28 13:44:56 SVM_experimenter_01 \n", - "\n", - " machine train_f1 train_accuracy test_f1 test_accuracy \\\n", - "0 Worklaptop 1.0 1.0 1.0 1.0 \n", - "1 Worklaptop 1.0 1.0 1.0 1.0 \n", - "2 Worklaptop 1.0 1.0 1.0 1.0 \n", - "3 Worklaptop 1.0 1.0 1.0 1.0 \n", - "4 Worklaptop 1.0 1.0 1.0 1.0 \n", - "5 Worklaptop 1.0 1.0 1.0 1.0 \n", - "6 Worklaptop 1.0 1.0 1.0 1.0 \n", - "7 Worklaptop 1.0 1.0 1.0 1.0 \n", - "8 Worklaptop 1.0 1.0 1.0 1.0 \n", - "9 Worklaptop 1.0 1.0 1.0 1.0 \n", - "10 Worklaptop 1.0 1.0 1.0 1.0 \n", - "11 Worklaptop 1.0 1.0 1.0 1.0 \n", - "12 Worklaptop 1.0 1.0 1.0 1.0 \n", - "13 Worklaptop 1.0 1.0 1.0 1.0 \n", - "14 Worklaptop 1.0 1.0 1.0 1.0 \n", - "15 Worklaptop 1.0 1.0 1.0 1.0 \n", - "16 Worklaptop 1.0 1.0 1.0 1.0 \n", - "17 Worklaptop 1.0 1.0 1.0 1.0 \n", - "18 Worklaptop 1.0 1.0 1.0 1.0 \n", - "19 Worklaptop 1.0 1.0 1.0 1.0 \n", - "20 Worklaptop 1.0 1.0 1.0 1.0 \n", - "21 Worklaptop 1.0 1.0 1.0 1.0 \n", - "22 Worklaptop 1.0 1.0 1.0 1.0 \n", - "23 Worklaptop 1.0 1.0 1.0 1.0 \n", - "24 Worklaptop 1.0 1.0 1.0 1.0 \n", - "25 Worklaptop 1.0 1.0 1.0 1.0 \n", - "26 Worklaptop 1.0 1.0 1.0 1.0 \n", - "27 Worklaptop 1.0 1.0 1.0 1.0 \n", - "28 Worklaptop 1.0 1.0 1.0 1.0 \n", - "29 Worklaptop 1.0 1.0 1.0 1.0 \n", - "30 Worklaptop 1.0 1.0 1.0 1.0 \n", - "31 Worklaptop 1.0 1.0 1.0 1.0 \n", - "32 Worklaptop 1.0 1.0 1.0 1.0 \n", - "33 Worklaptop 1.0 1.0 1.0 1.0 \n", - "34 Worklaptop 1.0 1.0 1.0 1.0 \n", - "35 Worklaptop 1.0 1.0 1.0 1.0 \n", - "36 Worklaptop 1.0 1.0 1.0 1.0 \n", - "37 Worklaptop 1.0 1.0 1.0 1.0 \n", - "38 Worklaptop 1.0 1.0 1.0 1.0 \n", - "39 Worklaptop 1.0 1.0 1.0 1.0 \n", - "40 Worklaptop 1.0 1.0 1.0 1.0 \n", - "41 Worklaptop 1.0 1.0 1.0 1.0 \n", - "42 Worklaptop 1.0 1.0 1.0 1.0 \n", - "43 Worklaptop 1.0 1.0 1.0 1.0 \n", - "44 Worklaptop 1.0 1.0 1.0 1.0 \n", - "45 Worklaptop 1.0 1.0 1.0 1.0 \n", - "46 Worklaptop 1.0 1.0 1.0 1.0 \n", - "47 Worklaptop 1.0 1.0 1.0 1.0 \n", - "48 Worklaptop 1.0 1.0 1.0 1.0 \n", - "49 Worklaptop 1.0 1.0 1.0 1.0 \n", - "50 Worklaptop 1.0 1.0 1.0 1.0 \n", - "51 Worklaptop 1.0 1.0 1.0 1.0 \n", - "52 Worklaptop 1.0 1.0 1.0 1.0 \n", - "53 Worklaptop 1.0 1.0 1.0 1.0 \n", - "54 Worklaptop 1.0 1.0 1.0 1.0 \n", - "\n", - " end_date error \n", - "0 2023-04-28 13:42:03 None \n", - "1 2023-04-28 13:42:06 None \n", - "2 2023-04-28 13:42:08 None \n", - "3 2023-04-28 13:42:11 None \n", - "4 2023-04-28 13:42:14 None \n", - "5 2023-04-28 13:42:17 None \n", - "6 2023-04-28 13:42:20 None \n", - "7 2023-04-28 13:42:22 None \n", - "8 2023-04-28 13:42:25 None \n", - "9 2023-04-28 13:42:28 None \n", - "10 2023-04-28 13:42:31 None \n", - "11 2023-04-28 13:42:34 None \n", - "12 2023-04-28 13:42:36 None \n", - "13 2023-04-28 13:42:37 None \n", - "14 2023-04-28 13:42:39 None \n", - "15 2023-04-28 13:42:41 None \n", - "16 2023-04-28 13:42:43 None \n", - "17 2023-04-28 13:42:44 None \n", - "18 2023-04-28 13:42:46 None \n", - "19 2023-04-28 13:42:48 None \n", - "20 2023-04-28 13:42:50 None \n", - "21 2023-04-28 13:42:52 None \n", - "22 2023-04-28 13:42:54 None \n", - "23 2023-04-28 13:42:57 None \n", - "24 2023-04-28 13:43:00 None \n", - "25 2023-04-28 13:43:03 None \n", - "26 2023-04-28 13:43:05 None \n", - "27 2023-04-28 13:43:08 None \n", - "28 2023-04-28 13:43:12 None \n", - "29 2023-04-28 13:43:14 None \n", - "30 2023-04-28 13:43:18 None \n", - "31 2023-04-28 13:43:21 None \n", - "32 2023-04-28 13:43:25 None \n", - "33 2023-04-28 13:43:28 None \n", - "34 2023-04-28 13:43:32 None \n", - "35 2023-04-28 13:43:35 None \n", - "36 2023-04-28 13:43:37 None \n", - "37 2023-04-28 13:43:40 None \n", - "38 2023-04-28 13:43:44 None \n", - "39 2023-04-28 13:43:47 None \n", - "40 2023-04-28 13:43:50 None \n", - "41 2023-04-28 13:43:53 None \n", - "42 2023-04-28 13:43:56 None \n", - "43 2023-04-28 13:43:59 None \n", - "44 2023-04-28 13:44:02 None \n", - "45 2023-04-28 13:44:08 None \n", - "46 2023-04-28 13:44:14 None \n", - "47 2023-04-28 13:44:19 None \n", - "48 2023-04-28 13:44:25 None \n", - "49 2023-04-28 13:44:31 None \n", - "50 2023-04-28 13:44:37 None \n", - "51 2023-04-28 13:44:43 None \n", - "52 2023-04-28 13:44:50 None \n", - "53 2023-04-28 13:44:56 None \n", - "54 2023-04-28 13:45:02 None " - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "experimenter.execute(run_svm, max_experiments=-1)\n", "\n", "# showing database table\n", "experimenter.get_table() " ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CodeCarbon\n", + "Note that `CodeCarbon` is activated by default, collecting information about the carbon emissions of each experiment. Have a look at our [general usage example](https://tornede.github.io/py_experimenter/examples/example_general_usage.html) and the according [documentation of CodeCarbon fields](https://tornede.github.io/py_experimenter/usage.html#codecarbon-fields) for more information." + ] } ], "metadata": { diff --git a/docs/source/examples/example_general_usage.ipynb b/docs/source/examples/example_general_usage.ipynb index c5ea616b..9a6e3975 100644 --- a/docs/source/examples/example_general_usage.ipynb +++ b/docs/source/examples/example_general_usage.ipynb @@ -19,6 +19,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "qkmxwSl8DW-V" @@ -55,6 +56,14 @@ "\n", "[CUSTOM] \n", "path = sample_data\n", + "\n", + "[codecarbon]\n", + "offline_mode = False\n", + "measure_power_secs = 25\n", + "tracking_mode = process\n", + "log_level = error\n", + "save_to_file = True\n", + "output_dir = output/CodeCarbon\n", "\"\"\"\n", "# Create config directory if it does not exist\n", "if not os.path.exists('config'):\n", @@ -67,6 +76,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "b5pjc0TMBjnr" @@ -137,6 +147,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "Sa6mN98NBua-" @@ -164,6 +175,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "tdLXOI7eFhBh" @@ -188,14 +200,6 @@ "outputId": "447580e6-6a16-42ca-c44b-48a12829af91" }, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" - ] - }, { "data": { "text/html": [ @@ -244,17 +248,17 @@ " 5\n", " 2\n", " linear\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:35\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:36\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", @@ -264,17 +268,17 @@ " 5\n", " 4\n", " linear\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:36\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:37\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", @@ -284,17 +288,17 @@ " 5\n", " 6\n", " linear\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:38\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:39\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", @@ -304,17 +308,17 @@ " 5\n", " 2\n", " poly\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:39\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:40\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", @@ -324,17 +328,17 @@ " 5\n", " 4\n", " poly\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:40\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:42\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", @@ -344,17 +348,17 @@ " 5\n", " 6\n", " poly\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:42\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:43\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", @@ -364,17 +368,17 @@ " 5\n", " 2\n", " rbf\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:43\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:44\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", @@ -384,17 +388,17 @@ " 5\n", " 4\n", " rbf\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:45\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:46\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", @@ -404,17 +408,17 @@ " 5\n", " 6\n", " rbf\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:46\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:47\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", @@ -424,17 +428,17 @@ " 5\n", " 2\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:48\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:49\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", @@ -444,17 +448,17 @@ " 5\n", " 4\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:49\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:50\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", @@ -464,46 +468,46 @@ " 5\n", " 6\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", - " done\n", - " 2023-04-28 12:25:50\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:51\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " None\n", " \n", " \n", " 12\n", - " 15\n", + " 13\n", " error_dataset\n", " 3\n", " 42\n", " linear\n", - " 2023-04-28 13:45:34\n", - " error\n", - " 2023-04-28 13:45:34\n", - " example_notebook\n", - " Worklaptop\n", - " Pipeline(steps=[('standardscaler', StandardSca...\n", - " NaN\n", - " NaN\n", - " NaN\n", - " NaN\n", - " 2023-04-28 13:45:35\n", - " Traceback (most recent call last):\\n File \"/h...\n", + " 2023-06-17 08:59:12\n", + " created\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", + " None\n", " \n", " \n", "\n", "" ], "text/plain": [ - " ID dataset cross_validation_splits seed kernel \n", - "0 1 iris 5 2 linear \\\n", + " ID dataset cross_validation_splits seed kernel \\\n", + "0 1 iris 5 2 linear \n", "1 2 iris 5 4 linear \n", "2 3 iris 5 6 linear \n", "3 4 iris 5 2 poly \n", @@ -515,67 +519,37 @@ "9 10 iris 5 2 sigmoid \n", "10 11 iris 5 4 sigmoid \n", "11 12 iris 5 6 sigmoid \n", - "12 15 error_dataset 3 42 linear \n", - "\n", - " creation_date status start_date name \n", - "0 2023-04-28 12:25:34 done 2023-04-28 12:25:35 example_notebook \\\n", - "1 2023-04-28 12:25:34 done 2023-04-28 12:25:36 example_notebook \n", - "2 2023-04-28 12:25:34 done 2023-04-28 12:25:38 example_notebook \n", - "3 2023-04-28 12:25:34 done 2023-04-28 12:25:39 example_notebook \n", - "4 2023-04-28 12:25:34 done 2023-04-28 12:25:40 example_notebook \n", - "5 2023-04-28 12:25:34 done 2023-04-28 12:25:42 example_notebook \n", - "6 2023-04-28 12:25:34 done 2023-04-28 12:25:43 example_notebook \n", - "7 2023-04-28 12:25:34 done 2023-04-28 12:25:45 example_notebook \n", - "8 2023-04-28 12:25:34 done 2023-04-28 12:25:46 example_notebook \n", - "9 2023-04-28 12:25:34 done 2023-04-28 12:25:48 example_notebook \n", - "10 2023-04-28 12:25:34 done 2023-04-28 12:25:49 example_notebook \n", - "11 2023-04-28 12:25:34 done 2023-04-28 12:25:50 example_notebook \n", - "12 2023-04-28 13:45:34 error 2023-04-28 13:45:34 example_notebook \n", - "\n", - " machine pipeline train_f1 \n", - "0 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \\\n", - "1 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "2 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "3 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "4 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "5 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "6 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "7 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "8 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "9 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "10 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "11 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "12 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... NaN \n", + "12 13 error_dataset 3 42 linear \n", "\n", - " train_accuracy test_f1 test_accuracy end_date \n", - "0 1.0 1.0 1.0 2023-04-28 12:25:36 \\\n", - "1 1.0 1.0 1.0 2023-04-28 12:25:37 \n", - "2 1.0 1.0 1.0 2023-04-28 12:25:39 \n", - "3 1.0 1.0 1.0 2023-04-28 12:25:40 \n", - "4 1.0 1.0 1.0 2023-04-28 12:25:42 \n", - "5 1.0 1.0 1.0 2023-04-28 12:25:43 \n", - "6 1.0 1.0 1.0 2023-04-28 12:25:44 \n", - "7 1.0 1.0 1.0 2023-04-28 12:25:46 \n", - "8 1.0 1.0 1.0 2023-04-28 12:25:47 \n", - "9 1.0 1.0 1.0 2023-04-28 12:25:49 \n", - "10 1.0 1.0 1.0 2023-04-28 12:25:50 \n", - "11 1.0 1.0 1.0 2023-04-28 12:25:51 \n", - "12 NaN NaN NaN 2023-04-28 13:45:35 \n", + " creation_date status start_date name machine pipeline train_f1 \\\n", + "0 2023-06-17 08:59:12 created None None None None None \n", + "1 2023-06-17 08:59:12 created None None None None None \n", + "2 2023-06-17 08:59:12 created None None None None None \n", + "3 2023-06-17 08:59:12 created None None None None None \n", + "4 2023-06-17 08:59:12 created None None None None None \n", + "5 2023-06-17 08:59:12 created None None None None None \n", + "6 2023-06-17 08:59:12 created None None None None None \n", + "7 2023-06-17 08:59:12 created None None None None None \n", + "8 2023-06-17 08:59:12 created None None None None None \n", + "9 2023-06-17 08:59:12 created None None None None None \n", + "10 2023-06-17 08:59:12 created None None None None None \n", + "11 2023-06-17 08:59:12 created None None None None None \n", + "12 2023-06-17 08:59:12 created None None None None None \n", "\n", - " error \n", - "0 None \n", - "1 None \n", - "2 None \n", - "3 None \n", - "4 None \n", - "5 None \n", - "6 None \n", - "7 None \n", - "8 None \n", - "9 None \n", - "10 None \n", - "11 None \n", - "12 Traceback (most recent call last):\\n File \"/h... " + " train_accuracy test_f1 test_accuracy end_date error \n", + "0 None None None None None \n", + "1 None None None None None \n", + "2 None None None None None \n", + "3 None None None None None \n", + "4 None None None None None \n", + "5 None None None None None \n", + "6 None None None None None \n", + "7 None None None None None \n", + "8 None None None None None \n", + "9 None None None None None \n", + "10 None None None None None \n", + "11 None None None None None \n", + "12 None None None None None " ] }, "execution_count": 4, @@ -622,8 +596,13 @@ "name": "stderr", "output_type": "stream", "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" + "ERROR:root:Traceback (most recent call last):\n", + " File \"/home/lukas/development/code_projects/py_experimenter/py_experimenter/experimenter.py\", line 382, in _execution_wrapper\n", + " experiment_function(keyfield_values, result_processor, custom_fields)\n", + " File \"/tmp/ipykernel_28275/1244630566.py\", line 31, in run_ml\n", + " raise ValueError(\"Example error\")\n", + "ValueError: Example error\n", + "\n" ] }, { @@ -674,17 +653,17 @@ " 5\n", " 2\n", " linear\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:35\n", + " 2023-06-17 08:59:13\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:36\n", + " 0.971667\n", + " 0.971667\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:18\n", " None\n", " \n", " \n", @@ -694,17 +673,17 @@ " 5\n", " 4\n", " linear\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:36\n", + " 2023-06-17 08:59:18\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:37\n", + " 0.971667\n", + " 0.971667\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:24\n", " None\n", " \n", " \n", @@ -714,17 +693,17 @@ " 5\n", " 6\n", " linear\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:38\n", + " 2023-06-17 08:59:24\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:39\n", + " 0.971667\n", + " 0.971667\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:29\n", " None\n", " \n", " \n", @@ -734,17 +713,17 @@ " 5\n", " 2\n", " poly\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:39\n", + " 2023-06-17 08:59:30\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:40\n", + " 0.936667\n", + " 0.936667\n", + " 0.933333\n", + " 0.933333\n", + " 2023-06-17 08:59:35\n", " None\n", " \n", " \n", @@ -754,17 +733,17 @@ " 5\n", " 4\n", " poly\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:40\n", + " 2023-06-17 08:59:35\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:42\n", + " 0.936667\n", + " 0.936667\n", + " 0.933333\n", + " 0.933333\n", + " 2023-06-17 08:59:41\n", " None\n", " \n", " \n", @@ -774,17 +753,17 @@ " 5\n", " 6\n", " poly\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:42\n", + " 2023-06-17 08:59:41\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:43\n", + " 0.936667\n", + " 0.936667\n", + " 0.933333\n", + " 0.933333\n", + " 2023-06-17 08:59:46\n", " None\n", " \n", " \n", @@ -794,17 +773,17 @@ " 5\n", " 2\n", " rbf\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:43\n", + " 2023-06-17 08:59:46\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:44\n", + " 0.975000\n", + " 0.975000\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:52\n", " None\n", " \n", " \n", @@ -814,17 +793,17 @@ " 5\n", " 4\n", " rbf\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:45\n", + " 2023-06-17 08:59:52\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:46\n", + " 0.975000\n", + " 0.975000\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:57\n", " None\n", " \n", " \n", @@ -834,17 +813,17 @@ " 5\n", " 6\n", " rbf\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:46\n", + " 2023-06-17 08:59:57\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:47\n", + " 0.975000\n", + " 0.975000\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 09:00:03\n", " None\n", " \n", " \n", @@ -854,17 +833,17 @@ " 5\n", " 2\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:48\n", + " 2023-06-17 09:00:03\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:49\n", + " 0.896667\n", + " 0.896667\n", + " 0.893333\n", + " 0.893333\n", + " 2023-06-17 09:00:09\n", " None\n", " \n", " \n", @@ -874,17 +853,17 @@ " 5\n", " 4\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:49\n", + " 2023-06-17 09:00:09\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:50\n", + " 0.896667\n", + " 0.896667\n", + " 0.893333\n", + " 0.893333\n", + " 2023-06-17 09:00:14\n", " None\n", " \n", " \n", @@ -894,29 +873,29 @@ " 5\n", " 6\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:50\n", + " 2023-06-17 09:00:14\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:51\n", + " 0.896667\n", + " 0.896667\n", + " 0.893333\n", + " 0.893333\n", + " 2023-06-17 09:00:20\n", " None\n", " \n", " \n", " 12\n", - " 15\n", + " 13\n", " error_dataset\n", " 3\n", " 42\n", " linear\n", - " 2023-04-28 13:45:34\n", + " 2023-06-17 08:59:12\n", " error\n", - " 2023-04-28 13:45:34\n", + " 2023-06-17 09:00:20\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", @@ -924,7 +903,7 @@ " NaN\n", " NaN\n", " NaN\n", - " 2023-04-28 13:45:35\n", + " 2023-06-17 09:00:25\n", " Traceback (most recent call last):\\n File \"/h...\n", " \n", " \n", @@ -932,8 +911,8 @@ "" ], "text/plain": [ - " ID dataset cross_validation_splits seed kernel \n", - "0 1 iris 5 2 linear \\\n", + " ID dataset cross_validation_splits seed kernel \\\n", + "0 1 iris 5 2 linear \n", "1 2 iris 5 4 linear \n", "2 3 iris 5 6 linear \n", "3 4 iris 5 2 poly \n", @@ -945,52 +924,52 @@ "9 10 iris 5 2 sigmoid \n", "10 11 iris 5 4 sigmoid \n", "11 12 iris 5 6 sigmoid \n", - "12 15 error_dataset 3 42 linear \n", + "12 13 error_dataset 3 42 linear \n", "\n", - " creation_date status start_date name \n", - "0 2023-04-28 12:25:34 done 2023-04-28 12:25:35 example_notebook \\\n", - "1 2023-04-28 12:25:34 done 2023-04-28 12:25:36 example_notebook \n", - "2 2023-04-28 12:25:34 done 2023-04-28 12:25:38 example_notebook \n", - "3 2023-04-28 12:25:34 done 2023-04-28 12:25:39 example_notebook \n", - "4 2023-04-28 12:25:34 done 2023-04-28 12:25:40 example_notebook \n", - "5 2023-04-28 12:25:34 done 2023-04-28 12:25:42 example_notebook \n", - "6 2023-04-28 12:25:34 done 2023-04-28 12:25:43 example_notebook \n", - "7 2023-04-28 12:25:34 done 2023-04-28 12:25:45 example_notebook \n", - "8 2023-04-28 12:25:34 done 2023-04-28 12:25:46 example_notebook \n", - "9 2023-04-28 12:25:34 done 2023-04-28 12:25:48 example_notebook \n", - "10 2023-04-28 12:25:34 done 2023-04-28 12:25:49 example_notebook \n", - "11 2023-04-28 12:25:34 done 2023-04-28 12:25:50 example_notebook \n", - "12 2023-04-28 13:45:34 error 2023-04-28 13:45:34 example_notebook \n", + " creation_date status start_date name \\\n", + "0 2023-06-17 08:59:12 done 2023-06-17 08:59:13 example_notebook \n", + "1 2023-06-17 08:59:12 done 2023-06-17 08:59:18 example_notebook \n", + "2 2023-06-17 08:59:12 done 2023-06-17 08:59:24 example_notebook \n", + "3 2023-06-17 08:59:12 done 2023-06-17 08:59:30 example_notebook \n", + "4 2023-06-17 08:59:12 done 2023-06-17 08:59:35 example_notebook \n", + "5 2023-06-17 08:59:12 done 2023-06-17 08:59:41 example_notebook \n", + "6 2023-06-17 08:59:12 done 2023-06-17 08:59:46 example_notebook \n", + "7 2023-06-17 08:59:12 done 2023-06-17 08:59:52 example_notebook \n", + "8 2023-06-17 08:59:12 done 2023-06-17 08:59:57 example_notebook \n", + "9 2023-06-17 08:59:12 done 2023-06-17 09:00:03 example_notebook \n", + "10 2023-06-17 08:59:12 done 2023-06-17 09:00:09 example_notebook \n", + "11 2023-06-17 08:59:12 done 2023-06-17 09:00:14 example_notebook \n", + "12 2023-06-17 08:59:12 error 2023-06-17 09:00:20 example_notebook \n", "\n", - " machine pipeline train_f1 \n", - "0 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \\\n", - "1 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "2 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "3 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "4 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "5 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "6 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "7 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "8 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "9 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "10 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "11 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", + " machine pipeline train_f1 \\\n", + "0 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.971667 \n", + "1 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.971667 \n", + "2 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.971667 \n", + "3 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.936667 \n", + "4 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.936667 \n", + "5 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.936667 \n", + "6 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.975000 \n", + "7 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.975000 \n", + "8 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.975000 \n", + "9 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.896667 \n", + "10 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.896667 \n", + "11 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.896667 \n", "12 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... NaN \n", "\n", - " train_accuracy test_f1 test_accuracy end_date \n", - "0 1.0 1.0 1.0 2023-04-28 12:25:36 \\\n", - "1 1.0 1.0 1.0 2023-04-28 12:25:37 \n", - "2 1.0 1.0 1.0 2023-04-28 12:25:39 \n", - "3 1.0 1.0 1.0 2023-04-28 12:25:40 \n", - "4 1.0 1.0 1.0 2023-04-28 12:25:42 \n", - "5 1.0 1.0 1.0 2023-04-28 12:25:43 \n", - "6 1.0 1.0 1.0 2023-04-28 12:25:44 \n", - "7 1.0 1.0 1.0 2023-04-28 12:25:46 \n", - "8 1.0 1.0 1.0 2023-04-28 12:25:47 \n", - "9 1.0 1.0 1.0 2023-04-28 12:25:49 \n", - "10 1.0 1.0 1.0 2023-04-28 12:25:50 \n", - "11 1.0 1.0 1.0 2023-04-28 12:25:51 \n", - "12 NaN NaN NaN 2023-04-28 13:45:35 \n", + " train_accuracy test_f1 test_accuracy end_date \\\n", + "0 0.971667 0.966667 0.966667 2023-06-17 08:59:18 \n", + "1 0.971667 0.966667 0.966667 2023-06-17 08:59:24 \n", + "2 0.971667 0.966667 0.966667 2023-06-17 08:59:29 \n", + "3 0.936667 0.933333 0.933333 2023-06-17 08:59:35 \n", + "4 0.936667 0.933333 0.933333 2023-06-17 08:59:41 \n", + "5 0.936667 0.933333 0.933333 2023-06-17 08:59:46 \n", + "6 0.975000 0.966667 0.966667 2023-06-17 08:59:52 \n", + "7 0.975000 0.966667 0.966667 2023-06-17 08:59:57 \n", + "8 0.975000 0.966667 0.966667 2023-06-17 09:00:03 \n", + "9 0.896667 0.893333 0.893333 2023-06-17 09:00:09 \n", + "10 0.896667 0.893333 0.893333 2023-06-17 09:00:14 \n", + "11 0.896667 0.893333 0.893333 2023-06-17 09:00:20 \n", + "12 NaN NaN NaN 2023-06-17 09:00:25 \n", "\n", " error \n", "0 None \n", @@ -1021,6 +1000,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "ivljpd70Fnal" @@ -1043,14 +1023,6 @@ "outputId": "2eba52e5-2794-4ff7-ccc2-d2bfcf32b4d9" }, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" - ] - }, { "data": { "text/html": [ @@ -1099,17 +1071,17 @@ " 5\n", " 2\n", " linear\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:35\n", + " 2023-06-17 08:59:13\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:36\n", + " 0.971667\n", + " 0.971667\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:18\n", " None\n", " \n", " \n", @@ -1119,17 +1091,17 @@ " 5\n", " 4\n", " linear\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:36\n", + " 2023-06-17 08:59:18\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:37\n", + " 0.971667\n", + " 0.971667\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:24\n", " None\n", " \n", " \n", @@ -1139,17 +1111,17 @@ " 5\n", " 6\n", " linear\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:38\n", + " 2023-06-17 08:59:24\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:39\n", + " 0.971667\n", + " 0.971667\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:29\n", " None\n", " \n", " \n", @@ -1159,17 +1131,17 @@ " 5\n", " 2\n", " poly\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:39\n", + " 2023-06-17 08:59:30\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:40\n", + " 0.936667\n", + " 0.936667\n", + " 0.933333\n", + " 0.933333\n", + " 2023-06-17 08:59:35\n", " None\n", " \n", " \n", @@ -1179,17 +1151,17 @@ " 5\n", " 4\n", " poly\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:40\n", + " 2023-06-17 08:59:35\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:42\n", + " 0.936667\n", + " 0.936667\n", + " 0.933333\n", + " 0.933333\n", + " 2023-06-17 08:59:41\n", " None\n", " \n", " \n", @@ -1199,17 +1171,17 @@ " 5\n", " 6\n", " poly\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:42\n", + " 2023-06-17 08:59:41\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:43\n", + " 0.936667\n", + " 0.936667\n", + " 0.933333\n", + " 0.933333\n", + " 2023-06-17 08:59:46\n", " None\n", " \n", " \n", @@ -1219,17 +1191,17 @@ " 5\n", " 2\n", " rbf\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:43\n", + " 2023-06-17 08:59:46\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:44\n", + " 0.975000\n", + " 0.975000\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:52\n", " None\n", " \n", " \n", @@ -1239,17 +1211,17 @@ " 5\n", " 4\n", " rbf\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:45\n", + " 2023-06-17 08:59:52\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:46\n", + " 0.975000\n", + " 0.975000\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:57\n", " None\n", " \n", " \n", @@ -1259,17 +1231,17 @@ " 5\n", " 6\n", " rbf\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:46\n", + " 2023-06-17 08:59:57\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:47\n", + " 0.975000\n", + " 0.975000\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 09:00:03\n", " None\n", " \n", " \n", @@ -1279,17 +1251,17 @@ " 5\n", " 2\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:48\n", + " 2023-06-17 09:00:03\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:49\n", + " 0.896667\n", + " 0.896667\n", + " 0.893333\n", + " 0.893333\n", + " 2023-06-17 09:00:09\n", " None\n", " \n", " \n", @@ -1299,17 +1271,17 @@ " 5\n", " 4\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:49\n", + " 2023-06-17 09:00:09\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:50\n", + " 0.896667\n", + " 0.896667\n", + " 0.893333\n", + " 0.893333\n", + " 2023-06-17 09:00:14\n", " None\n", " \n", " \n", @@ -1319,29 +1291,29 @@ " 5\n", " 6\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:50\n", + " 2023-06-17 09:00:14\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:51\n", + " 0.896667\n", + " 0.896667\n", + " 0.893333\n", + " 0.893333\n", + " 2023-06-17 09:00:20\n", " None\n", " \n", " \n", " 12\n", - " 16\n", + " 14\n", " error_dataset\n", " 3\n", " 42\n", " linear\n", - " 2023-04-28 13:46:04\n", + " 2023-06-17 09:00:25\n", " created\n", - " NaT\n", + " None\n", " None\n", " None\n", " None\n", @@ -1349,7 +1321,7 @@ " NaN\n", " NaN\n", " NaN\n", - " NaT\n", + " None\n", " None\n", " \n", " \n", @@ -1357,8 +1329,8 @@ "" ], "text/plain": [ - " ID dataset cross_validation_splits seed kernel \n", - "0 1 iris 5 2 linear \\\n", + " ID dataset cross_validation_splits seed kernel \\\n", + "0 1 iris 5 2 linear \n", "1 2 iris 5 4 linear \n", "2 3 iris 5 6 linear \n", "3 4 iris 5 2 poly \n", @@ -1370,52 +1342,52 @@ "9 10 iris 5 2 sigmoid \n", "10 11 iris 5 4 sigmoid \n", "11 12 iris 5 6 sigmoid \n", - "12 16 error_dataset 3 42 linear \n", + "12 14 error_dataset 3 42 linear \n", "\n", - " creation_date status start_date name \n", - "0 2023-04-28 12:25:34 done 2023-04-28 12:25:35 example_notebook \\\n", - "1 2023-04-28 12:25:34 done 2023-04-28 12:25:36 example_notebook \n", - "2 2023-04-28 12:25:34 done 2023-04-28 12:25:38 example_notebook \n", - "3 2023-04-28 12:25:34 done 2023-04-28 12:25:39 example_notebook \n", - "4 2023-04-28 12:25:34 done 2023-04-28 12:25:40 example_notebook \n", - "5 2023-04-28 12:25:34 done 2023-04-28 12:25:42 example_notebook \n", - "6 2023-04-28 12:25:34 done 2023-04-28 12:25:43 example_notebook \n", - "7 2023-04-28 12:25:34 done 2023-04-28 12:25:45 example_notebook \n", - "8 2023-04-28 12:25:34 done 2023-04-28 12:25:46 example_notebook \n", - "9 2023-04-28 12:25:34 done 2023-04-28 12:25:48 example_notebook \n", - "10 2023-04-28 12:25:34 done 2023-04-28 12:25:49 example_notebook \n", - "11 2023-04-28 12:25:34 done 2023-04-28 12:25:50 example_notebook \n", - "12 2023-04-28 13:46:04 created NaT None \n", + " creation_date status start_date name \\\n", + "0 2023-06-17 08:59:12 done 2023-06-17 08:59:13 example_notebook \n", + "1 2023-06-17 08:59:12 done 2023-06-17 08:59:18 example_notebook \n", + "2 2023-06-17 08:59:12 done 2023-06-17 08:59:24 example_notebook \n", + "3 2023-06-17 08:59:12 done 2023-06-17 08:59:30 example_notebook \n", + "4 2023-06-17 08:59:12 done 2023-06-17 08:59:35 example_notebook \n", + "5 2023-06-17 08:59:12 done 2023-06-17 08:59:41 example_notebook \n", + "6 2023-06-17 08:59:12 done 2023-06-17 08:59:46 example_notebook \n", + "7 2023-06-17 08:59:12 done 2023-06-17 08:59:52 example_notebook \n", + "8 2023-06-17 08:59:12 done 2023-06-17 08:59:57 example_notebook \n", + "9 2023-06-17 08:59:12 done 2023-06-17 09:00:03 example_notebook \n", + "10 2023-06-17 08:59:12 done 2023-06-17 09:00:09 example_notebook \n", + "11 2023-06-17 08:59:12 done 2023-06-17 09:00:14 example_notebook \n", + "12 2023-06-17 09:00:25 created None None \n", "\n", - " machine pipeline train_f1 \n", - "0 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \\\n", - "1 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "2 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "3 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "4 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "5 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "6 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "7 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "8 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "9 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "10 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "11 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", + " machine pipeline train_f1 \\\n", + "0 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.971667 \n", + "1 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.971667 \n", + "2 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.971667 \n", + "3 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.936667 \n", + "4 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.936667 \n", + "5 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.936667 \n", + "6 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.975000 \n", + "7 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.975000 \n", + "8 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.975000 \n", + "9 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.896667 \n", + "10 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.896667 \n", + "11 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.896667 \n", "12 None None NaN \n", "\n", - " train_accuracy test_f1 test_accuracy end_date error \n", - "0 1.0 1.0 1.0 2023-04-28 12:25:36 None \n", - "1 1.0 1.0 1.0 2023-04-28 12:25:37 None \n", - "2 1.0 1.0 1.0 2023-04-28 12:25:39 None \n", - "3 1.0 1.0 1.0 2023-04-28 12:25:40 None \n", - "4 1.0 1.0 1.0 2023-04-28 12:25:42 None \n", - "5 1.0 1.0 1.0 2023-04-28 12:25:43 None \n", - "6 1.0 1.0 1.0 2023-04-28 12:25:44 None \n", - "7 1.0 1.0 1.0 2023-04-28 12:25:46 None \n", - "8 1.0 1.0 1.0 2023-04-28 12:25:47 None \n", - "9 1.0 1.0 1.0 2023-04-28 12:25:49 None \n", - "10 1.0 1.0 1.0 2023-04-28 12:25:50 None \n", - "11 1.0 1.0 1.0 2023-04-28 12:25:51 None \n", - "12 NaN NaN NaN NaT None " + " train_accuracy test_f1 test_accuracy end_date error \n", + "0 0.971667 0.966667 0.966667 2023-06-17 08:59:18 None \n", + "1 0.971667 0.966667 0.966667 2023-06-17 08:59:24 None \n", + "2 0.971667 0.966667 0.966667 2023-06-17 08:59:29 None \n", + "3 0.936667 0.933333 0.933333 2023-06-17 08:59:35 None \n", + "4 0.936667 0.933333 0.933333 2023-06-17 08:59:41 None \n", + "5 0.936667 0.933333 0.933333 2023-06-17 08:59:46 None \n", + "6 0.975000 0.966667 0.966667 2023-06-17 08:59:52 None \n", + "7 0.975000 0.966667 0.966667 2023-06-17 08:59:57 None \n", + "8 0.975000 0.966667 0.966667 2023-06-17 09:00:03 None \n", + "9 0.896667 0.893333 0.893333 2023-06-17 09:00:09 None \n", + "10 0.896667 0.893333 0.893333 2023-06-17 09:00:14 None \n", + "11 0.896667 0.893333 0.893333 2023-06-17 09:00:20 None \n", + "12 NaN NaN NaN None None " ] }, "execution_count": 6, @@ -1431,6 +1403,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "oLgmbd75Jtwm" @@ -1456,14 +1429,12 @@ "output_type": "stream", "text": [ "ERROR:root:Traceback (most recent call last):\n", - " File \"/home/lukas/development/code_projects/py_experimenter/py_experimenter/experimenter.py\", line 369, in _execution_wrapper\n", + " File \"/home/lukas/development/code_projects/py_experimenter/py_experimenter/experimenter.py\", line 382, in _execution_wrapper\n", " experiment_function(keyfield_values, result_processor, custom_fields)\n", - " File \"/tmp/ipykernel_24809/1244630566.py\", line 31, in run_ml\n", + " File \"/tmp/ipykernel_28275/1244630566.py\", line 31, in run_ml\n", " raise ValueError(\"Example error\")\n", "ValueError: Example error\n", - "\n", - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" + "\n" ] }, { @@ -1514,17 +1485,17 @@ " 5\n", " 2\n", " linear\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:35\n", + " 2023-06-17 08:59:13\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:36\n", + " 0.971667\n", + " 0.971667\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:18\n", " None\n", " \n", " \n", @@ -1534,17 +1505,17 @@ " 5\n", " 4\n", " linear\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:36\n", + " 2023-06-17 08:59:18\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:37\n", + " 0.971667\n", + " 0.971667\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:24\n", " None\n", " \n", " \n", @@ -1554,17 +1525,17 @@ " 5\n", " 6\n", " linear\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:38\n", + " 2023-06-17 08:59:24\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:39\n", + " 0.971667\n", + " 0.971667\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:29\n", " None\n", " \n", " \n", @@ -1574,17 +1545,17 @@ " 5\n", " 2\n", " poly\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:39\n", + " 2023-06-17 08:59:30\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:40\n", + " 0.936667\n", + " 0.936667\n", + " 0.933333\n", + " 0.933333\n", + " 2023-06-17 08:59:35\n", " None\n", " \n", " \n", @@ -1594,17 +1565,17 @@ " 5\n", " 4\n", " poly\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:40\n", + " 2023-06-17 08:59:35\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:42\n", + " 0.936667\n", + " 0.936667\n", + " 0.933333\n", + " 0.933333\n", + " 2023-06-17 08:59:41\n", " None\n", " \n", " \n", @@ -1614,17 +1585,17 @@ " 5\n", " 6\n", " poly\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:42\n", + " 2023-06-17 08:59:41\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:43\n", + " 0.936667\n", + " 0.936667\n", + " 0.933333\n", + " 0.933333\n", + " 2023-06-17 08:59:46\n", " None\n", " \n", " \n", @@ -1634,17 +1605,17 @@ " 5\n", " 2\n", " rbf\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:43\n", + " 2023-06-17 08:59:46\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:44\n", + " 0.975000\n", + " 0.975000\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:52\n", " None\n", " \n", " \n", @@ -1654,17 +1625,17 @@ " 5\n", " 4\n", " rbf\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:45\n", + " 2023-06-17 08:59:52\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:46\n", + " 0.975000\n", + " 0.975000\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 08:59:57\n", " None\n", " \n", " \n", @@ -1674,17 +1645,17 @@ " 5\n", " 6\n", " rbf\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:46\n", + " 2023-06-17 08:59:57\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:47\n", + " 0.975000\n", + " 0.975000\n", + " 0.966667\n", + " 0.966667\n", + " 2023-06-17 09:00:03\n", " None\n", " \n", " \n", @@ -1694,17 +1665,17 @@ " 5\n", " 2\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:48\n", + " 2023-06-17 09:00:03\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:49\n", + " 0.896667\n", + " 0.896667\n", + " 0.893333\n", + " 0.893333\n", + " 2023-06-17 09:00:09\n", " None\n", " \n", " \n", @@ -1714,17 +1685,17 @@ " 5\n", " 4\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:49\n", + " 2023-06-17 09:00:09\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:50\n", + " 0.896667\n", + " 0.896667\n", + " 0.893333\n", + " 0.893333\n", + " 2023-06-17 09:00:14\n", " None\n", " \n", " \n", @@ -1734,29 +1705,29 @@ " 5\n", " 6\n", " sigmoid\n", - " 2023-04-28 12:25:34\n", + " 2023-06-17 08:59:12\n", " done\n", - " 2023-04-28 12:25:50\n", + " 2023-06-17 09:00:14\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 2023-04-28 12:25:51\n", + " 0.896667\n", + " 0.896667\n", + " 0.893333\n", + " 0.893333\n", + " 2023-06-17 09:00:20\n", " None\n", " \n", " \n", " 12\n", - " 16\n", + " 14\n", " error_dataset\n", " 3\n", " 42\n", " linear\n", - " 2023-04-28 13:46:04\n", + " 2023-06-17 09:00:25\n", " error\n", - " 2023-04-28 13:46:05\n", + " 2023-06-17 09:00:26\n", " example_notebook\n", " Worklaptop\n", " Pipeline(steps=[('standardscaler', StandardSca...\n", @@ -1764,7 +1735,7 @@ " NaN\n", " NaN\n", " NaN\n", - " 2023-04-28 13:46:07\n", + " 2023-06-17 09:00:31\n", " Traceback (most recent call last):\\n File \"/h...\n", " \n", " \n", @@ -1772,8 +1743,8 @@ "" ], "text/plain": [ - " ID dataset cross_validation_splits seed kernel \n", - "0 1 iris 5 2 linear \\\n", + " ID dataset cross_validation_splits seed kernel \\\n", + "0 1 iris 5 2 linear \n", "1 2 iris 5 4 linear \n", "2 3 iris 5 6 linear \n", "3 4 iris 5 2 poly \n", @@ -1785,52 +1756,52 @@ "9 10 iris 5 2 sigmoid \n", "10 11 iris 5 4 sigmoid \n", "11 12 iris 5 6 sigmoid \n", - "12 16 error_dataset 3 42 linear \n", + "12 14 error_dataset 3 42 linear \n", "\n", - " creation_date status start_date name \n", - "0 2023-04-28 12:25:34 done 2023-04-28 12:25:35 example_notebook \\\n", - "1 2023-04-28 12:25:34 done 2023-04-28 12:25:36 example_notebook \n", - "2 2023-04-28 12:25:34 done 2023-04-28 12:25:38 example_notebook \n", - "3 2023-04-28 12:25:34 done 2023-04-28 12:25:39 example_notebook \n", - "4 2023-04-28 12:25:34 done 2023-04-28 12:25:40 example_notebook \n", - "5 2023-04-28 12:25:34 done 2023-04-28 12:25:42 example_notebook \n", - "6 2023-04-28 12:25:34 done 2023-04-28 12:25:43 example_notebook \n", - "7 2023-04-28 12:25:34 done 2023-04-28 12:25:45 example_notebook \n", - "8 2023-04-28 12:25:34 done 2023-04-28 12:25:46 example_notebook \n", - "9 2023-04-28 12:25:34 done 2023-04-28 12:25:48 example_notebook \n", - "10 2023-04-28 12:25:34 done 2023-04-28 12:25:49 example_notebook \n", - "11 2023-04-28 12:25:34 done 2023-04-28 12:25:50 example_notebook \n", - "12 2023-04-28 13:46:04 error 2023-04-28 13:46:05 example_notebook \n", + " creation_date status start_date name \\\n", + "0 2023-06-17 08:59:12 done 2023-06-17 08:59:13 example_notebook \n", + "1 2023-06-17 08:59:12 done 2023-06-17 08:59:18 example_notebook \n", + "2 2023-06-17 08:59:12 done 2023-06-17 08:59:24 example_notebook \n", + "3 2023-06-17 08:59:12 done 2023-06-17 08:59:30 example_notebook \n", + "4 2023-06-17 08:59:12 done 2023-06-17 08:59:35 example_notebook \n", + "5 2023-06-17 08:59:12 done 2023-06-17 08:59:41 example_notebook \n", + "6 2023-06-17 08:59:12 done 2023-06-17 08:59:46 example_notebook \n", + "7 2023-06-17 08:59:12 done 2023-06-17 08:59:52 example_notebook \n", + "8 2023-06-17 08:59:12 done 2023-06-17 08:59:57 example_notebook \n", + "9 2023-06-17 08:59:12 done 2023-06-17 09:00:03 example_notebook \n", + "10 2023-06-17 08:59:12 done 2023-06-17 09:00:09 example_notebook \n", + "11 2023-06-17 08:59:12 done 2023-06-17 09:00:14 example_notebook \n", + "12 2023-06-17 09:00:25 error 2023-06-17 09:00:26 example_notebook \n", "\n", - " machine pipeline train_f1 \n", - "0 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \\\n", - "1 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "2 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "3 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "4 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "5 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "6 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "7 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "8 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "9 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "10 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", - "11 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 1.0 \n", + " machine pipeline train_f1 \\\n", + "0 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.971667 \n", + "1 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.971667 \n", + "2 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.971667 \n", + "3 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.936667 \n", + "4 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.936667 \n", + "5 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.936667 \n", + "6 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.975000 \n", + "7 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.975000 \n", + "8 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.975000 \n", + "9 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.896667 \n", + "10 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.896667 \n", + "11 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... 0.896667 \n", "12 Worklaptop Pipeline(steps=[('standardscaler', StandardSca... NaN \n", "\n", - " train_accuracy test_f1 test_accuracy end_date \n", - "0 1.0 1.0 1.0 2023-04-28 12:25:36 \\\n", - "1 1.0 1.0 1.0 2023-04-28 12:25:37 \n", - "2 1.0 1.0 1.0 2023-04-28 12:25:39 \n", - "3 1.0 1.0 1.0 2023-04-28 12:25:40 \n", - "4 1.0 1.0 1.0 2023-04-28 12:25:42 \n", - "5 1.0 1.0 1.0 2023-04-28 12:25:43 \n", - "6 1.0 1.0 1.0 2023-04-28 12:25:44 \n", - "7 1.0 1.0 1.0 2023-04-28 12:25:46 \n", - "8 1.0 1.0 1.0 2023-04-28 12:25:47 \n", - "9 1.0 1.0 1.0 2023-04-28 12:25:49 \n", - "10 1.0 1.0 1.0 2023-04-28 12:25:50 \n", - "11 1.0 1.0 1.0 2023-04-28 12:25:51 \n", - "12 NaN NaN NaN 2023-04-28 13:46:07 \n", + " train_accuracy test_f1 test_accuracy end_date \\\n", + "0 0.971667 0.966667 0.966667 2023-06-17 08:59:18 \n", + "1 0.971667 0.966667 0.966667 2023-06-17 08:59:24 \n", + "2 0.971667 0.966667 0.966667 2023-06-17 08:59:29 \n", + "3 0.936667 0.933333 0.933333 2023-06-17 08:59:35 \n", + "4 0.936667 0.933333 0.933333 2023-06-17 08:59:41 \n", + "5 0.936667 0.933333 0.933333 2023-06-17 08:59:46 \n", + "6 0.975000 0.966667 0.966667 2023-06-17 08:59:52 \n", + "7 0.975000 0.966667 0.966667 2023-06-17 08:59:57 \n", + "8 0.975000 0.966667 0.966667 2023-06-17 09:00:03 \n", + "9 0.896667 0.893333 0.893333 2023-06-17 09:00:09 \n", + "10 0.896667 0.893333 0.893333 2023-06-17 09:00:14 \n", + "11 0.896667 0.893333 0.893333 2023-06-17 09:00:20 \n", + "12 NaN NaN NaN 2023-06-17 09:00:31 \n", "\n", " error \n", "0 None \n", @@ -1861,6 +1832,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "ekECNVPGJxyH" @@ -1884,14 +1856,6 @@ "outputId": "c8fb7af2-25ad-4882-d4ae-9fb7ada92d36" }, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" - ] - }, { "data": { "text/html": [ @@ -1935,7 +1899,7 @@ " \n", " \n", " error_dataset\n", - " 16.0\n", + " 14.0\n", " 3.0\n", " 42.0\n", " NaN\n", @@ -1948,25 +1912,25 @@ " 6.5\n", " 5.0\n", " 4.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", - " 1.0\n", + " 0.945\n", + " 0.945\n", + " 0.94\n", + " 0.94\n", " \n", " \n", "\n", "" ], "text/plain": [ - " ID cross_validation_splits seed train_f1 train_accuracy \n", + " ID cross_validation_splits seed train_f1 train_accuracy \\\n", "dataset \n", - "error_dataset 16.0 3.0 42.0 NaN NaN \\\n", - "iris 6.5 5.0 4.0 1.0 1.0 \n", + "error_dataset 14.0 3.0 42.0 NaN NaN \n", + "iris 6.5 5.0 4.0 0.945 0.945 \n", "\n", " test_f1 test_accuracy \n", "dataset \n", "error_dataset NaN NaN \n", - "iris 1.0 1.0 " + "iris 0.94 0.94 " ] }, "execution_count": 8, @@ -1980,6 +1944,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "id": "bjp-_uRDJ7oB" @@ -2009,7 +1974,7 @@ " & test_f1 \\\\\n", "dataset & \\\\\n", "error_dataset & nan \\\\\n", - "iris & 1.000000 \\\\\n", + "iris & 0.940000 \\\\\n", "\\end{tabular}\n", "\n" ] @@ -2018,6 +1983,653 @@ "source": [ "print(result_table_agg[['test_f1']].style.to_latex())" ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CodeCarbon\n", + "[CodeCarbon](https://tornede.github.io/py_experimenter/usage/experiment_configuration_file.html#codecarbon) is integrated into `PyExperimenter` to provide information about the carbon emissions of experiments. `CodeCarbon` will create a table with suffix `_codecarbon` in the database, each row containing information about the carbon emissions of a single experiment." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDexperiment_idcodecarbon_timestampproject_namerun_idduration_secondsemissions_kgemissions_rate_kg_seccpu_power_wattgpu_power_watt...cpu_countcpu_modelgpu_countgpu_modellongitudelatituderam_total_sizetracking_modeon_cloudoffline_mode
0112023-06-17T08:59:18codecarbon451bc2e9-8c7f-416b-80f3-4ed0ef44cdff0.1214723.084560e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
1222023-06-17T08:59:24codecarbon36f4b99e-b138-4c3f-b833-f6824feafa8f0.1478913.754468e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
2332023-06-17T08:59:29codecarbon7ed6d96f-68b1-4343-a1c2-7b26ec4bad4b0.1473793.877488e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
3442023-06-17T08:59:35codecarbon53826b6e-933d-4537-9477-0ff1b9afd5d10.1259643.283903e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
4552023-06-17T08:59:41codecarbon640bbbbf-c5a4-4706-94fa-e26bc8eb4c250.1347533.522335e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
5662023-06-17T08:59:46codecarbonb67e2e24-72ac-4ca6-976b-440454c794160.1402183.614312e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
6772023-06-17T08:59:52codecarbonb411da2a-9809-4f4f-bfbd-88d62bcde67b0.1318393.342969e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
7882023-06-17T08:59:57codecarbon244f3cff-c8bf-42c2-a186-994aa6931e270.1345103.455675e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
8992023-06-17T09:00:03codecarbon7a483787-7c64-4eaa-919d-e978600ea3110.1502133.967545e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
910102023-06-17T09:00:09codecarbond738e59c-83c8-4ece-b0fc-fcc8dd0907dd0.1357363.511814e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
1011112023-06-17T09:00:14codecarbonc8d8c9d7-ecc0-4729-aefd-6cfb49d92b730.1296633.374622e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
1112122023-06-17T09:00:20codecarbon74a3f19e-cdec-4ef3-be29-cd1c3fb69d310.1211403.037764e-070.00000342.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
1213132023-06-17T09:00:25codecarbon53effe7a-14f9-462d-911c-5cb736146e4b0.0937652.213938e-070.00000242.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
1314142023-06-17T09:00:31codecarbon1c67cb8f-cc53-4073-a610-01e94071bf280.1071412.437078e-070.00000242.50.0...16.012th Gen Intel(R) Core(TM) i7-1260PNoneNone8.851651.809915.474876processN0
\n", + "

14 rows × 33 columns

\n", + "
" + ], + "text/plain": [ + " ID experiment_id codecarbon_timestamp project_name \\\n", + "0 1 1 2023-06-17T08:59:18 codecarbon \n", + "1 2 2 2023-06-17T08:59:24 codecarbon \n", + "2 3 3 2023-06-17T08:59:29 codecarbon \n", + "3 4 4 2023-06-17T08:59:35 codecarbon \n", + "4 5 5 2023-06-17T08:59:41 codecarbon \n", + "5 6 6 2023-06-17T08:59:46 codecarbon \n", + "6 7 7 2023-06-17T08:59:52 codecarbon \n", + "7 8 8 2023-06-17T08:59:57 codecarbon \n", + "8 9 9 2023-06-17T09:00:03 codecarbon \n", + "9 10 10 2023-06-17T09:00:09 codecarbon \n", + "10 11 11 2023-06-17T09:00:14 codecarbon \n", + "11 12 12 2023-06-17T09:00:20 codecarbon \n", + "12 13 13 2023-06-17T09:00:25 codecarbon \n", + "13 14 14 2023-06-17T09:00:31 codecarbon \n", + "\n", + " run_id duration_seconds emissions_kg \\\n", + "0 451bc2e9-8c7f-416b-80f3-4ed0ef44cdff 0.121472 3.084560e-07 \n", + "1 36f4b99e-b138-4c3f-b833-f6824feafa8f 0.147891 3.754468e-07 \n", + "2 7ed6d96f-68b1-4343-a1c2-7b26ec4bad4b 0.147379 3.877488e-07 \n", + "3 53826b6e-933d-4537-9477-0ff1b9afd5d1 0.125964 3.283903e-07 \n", + "4 640bbbbf-c5a4-4706-94fa-e26bc8eb4c25 0.134753 3.522335e-07 \n", + "5 b67e2e24-72ac-4ca6-976b-440454c79416 0.140218 3.614312e-07 \n", + "6 b411da2a-9809-4f4f-bfbd-88d62bcde67b 0.131839 3.342969e-07 \n", + "7 244f3cff-c8bf-42c2-a186-994aa6931e27 0.134510 3.455675e-07 \n", + "8 7a483787-7c64-4eaa-919d-e978600ea311 0.150213 3.967545e-07 \n", + "9 d738e59c-83c8-4ece-b0fc-fcc8dd0907dd 0.135736 3.511814e-07 \n", + "10 c8d8c9d7-ecc0-4729-aefd-6cfb49d92b73 0.129663 3.374622e-07 \n", + "11 74a3f19e-cdec-4ef3-be29-cd1c3fb69d31 0.121140 3.037764e-07 \n", + "12 53effe7a-14f9-462d-911c-5cb736146e4b 0.093765 2.213938e-07 \n", + "13 1c67cb8f-cc53-4073-a610-01e94071bf28 0.107141 2.437078e-07 \n", + "\n", + " emissions_rate_kg_sec cpu_power_watt gpu_power_watt ... cpu_count \\\n", + "0 0.000003 42.5 0.0 ... 16.0 \n", + "1 0.000003 42.5 0.0 ... 16.0 \n", + "2 0.000003 42.5 0.0 ... 16.0 \n", + "3 0.000003 42.5 0.0 ... 16.0 \n", + "4 0.000003 42.5 0.0 ... 16.0 \n", + "5 0.000003 42.5 0.0 ... 16.0 \n", + "6 0.000003 42.5 0.0 ... 16.0 \n", + "7 0.000003 42.5 0.0 ... 16.0 \n", + "8 0.000003 42.5 0.0 ... 16.0 \n", + "9 0.000003 42.5 0.0 ... 16.0 \n", + "10 0.000003 42.5 0.0 ... 16.0 \n", + "11 0.000003 42.5 0.0 ... 16.0 \n", + "12 0.000002 42.5 0.0 ... 16.0 \n", + "13 0.000002 42.5 0.0 ... 16.0 \n", + "\n", + " cpu_model gpu_count gpu_model longitude \\\n", + "0 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "1 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "2 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "3 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "4 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "5 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "6 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "7 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "8 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "9 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "10 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "11 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "12 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "13 12th Gen Intel(R) Core(TM) i7-1260P None None 8.8516 \n", + "\n", + " latitude ram_total_size tracking_mode on_cloud offline_mode \n", + "0 51.8099 15.474876 process N 0 \n", + "1 51.8099 15.474876 process N 0 \n", + "2 51.8099 15.474876 process N 0 \n", + "3 51.8099 15.474876 process N 0 \n", + "4 51.8099 15.474876 process N 0 \n", + "5 51.8099 15.474876 process N 0 \n", + "6 51.8099 15.474876 process N 0 \n", + "7 51.8099 15.474876 process N 0 \n", + "8 51.8099 15.474876 process N 0 \n", + "9 51.8099 15.474876 process N 0 \n", + "10 51.8099 15.474876 process N 0 \n", + "11 51.8099 15.474876 process N 0 \n", + "12 51.8099 15.474876 process N 0 \n", + "13 51.8099 15.474876 process N 0 \n", + "\n", + "[14 rows x 33 columns]" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "experimenter.get_codecarbon_table()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Aggregating CodeCarbon Results\n", + "\n", + "The carbon emission information of `CodeCarbon` can be easily aggregated via `pandas.Dataframe`." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDexperiment_idduration_secondsemissions_kgemissions_rate_kg_seccpu_power_wattgpu_power_wattram_power_wattcpu_energy_kwgpu_energy_kwram_energy_kwenergy_consumed_kwcpu_countram_total_sizeoffline_mode
project_name
codecarbon1051051.8216850.0000050.000036595.00.00.8905050.0000150.02.255914e-080.000015224.0216.648270
\n", + "
" + ], + "text/plain": [ + " ID experiment_id duration_seconds emissions_kg \\\n", + "project_name \n", + "codecarbon 105 105 1.821685 0.000005 \n", + "\n", + " emissions_rate_kg_sec cpu_power_watt gpu_power_watt \\\n", + "project_name \n", + "codecarbon 0.000036 595.0 0.0 \n", + "\n", + " ram_power_watt cpu_energy_kw gpu_energy_kw ram_energy_kw \\\n", + "project_name \n", + "codecarbon 0.890505 0.000015 0.0 2.255914e-08 \n", + "\n", + " energy_consumed_kw cpu_count ram_total_size offline_mode \n", + "project_name \n", + "codecarbon 0.000015 224.0 216.64827 0 " + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "carbon_emissions = experimenter.get_codecarbon_table().groupby(['project_name']).sum(numeric_only = True)\n", + "carbon_emissions" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Printing CodeCarbon Results as LaTex Table\n", + "\n", + "Furthermore, the resulting `pandas.Dataframe` can easily be printed as LaTex table." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\\begin{tabular}{lrr}\n", + " & energy_consumed_kw & emissions_kg \\\\\n", + "project_name & & \\\\\n", + "codecarbon & 0.000015 & 0.000005 \\\\\n", + "\\end{tabular}\n", + "\n" + ] + } + ], + "source": [ + "print(carbon_emissions[['energy_consumed_kw', 'emissions_kg']].style.to_latex())" + ] } ], "metadata": { diff --git a/docs/source/examples/example_logtables.ipynb b/docs/source/examples/example_logtables.ipynb index 15ba38fb..6a9e54b3 100644 --- a/docs/source/examples/example_logtables.ipynb +++ b/docs/source/examples/example_logtables.ipynb @@ -157,14 +157,6 @@ "execution_count": 3, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" - ] - }, { "data": { "text/html": [ @@ -208,7 +200,7 @@ " iris\n", " 5\n", " 1\n", - " 2023-04-28 13:46:31\n", + " 2023-06-17 09:04:48\n", " created\n", " None\n", " None\n", @@ -224,7 +216,7 @@ " iris\n", " 5\n", " 2\n", - " 2023-04-28 13:46:31\n", + " 2023-06-17 09:04:48\n", " created\n", " None\n", " None\n", @@ -240,7 +232,7 @@ " iris\n", " 5\n", " 3\n", - " 2023-04-28 13:46:31\n", + " 2023-06-17 09:04:48\n", " created\n", " None\n", " None\n", @@ -256,7 +248,7 @@ " iris\n", " 5\n", " 4\n", - " 2023-04-28 13:46:31\n", + " 2023-06-17 09:04:48\n", " created\n", " None\n", " None\n", @@ -272,7 +264,7 @@ " iris\n", " 5\n", " 5\n", - " 2023-04-28 13:46:31\n", + " 2023-06-17 09:04:48\n", " created\n", " None\n", " None\n", @@ -287,12 +279,12 @@ "" ], "text/plain": [ - " ID dataset cross_validation_splits seed creation_date status \\\n", - "0 1 iris 5 1 2023-04-28 13:46:31 created \n", - "1 2 iris 5 2 2023-04-28 13:46:31 created \n", - "2 3 iris 5 3 2023-04-28 13:46:31 created \n", - "3 4 iris 5 4 2023-04-28 13:46:31 created \n", - "4 5 iris 5 5 2023-04-28 13:46:31 created \n", + " ID dataset cross_validation_splits seed creation_date status \\\n", + "0 1 iris 5 1 2023-06-17 09:04:48 created \n", + "1 2 iris 5 2 2023-06-17 09:04:48 created \n", + "2 3 iris 5 3 2023-06-17 09:04:48 created \n", + "3 4 iris 5 4 2023-06-17 09:04:48 created \n", + "4 5 iris 5 5 2023-06-17 09:04:48 created \n", "\n", " start_date name machine best_kernel_f1 best_kernel_accuracy end_date error \n", "0 None None None None None None None \n", @@ -321,14 +313,6 @@ "execution_count": 4, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" - ] - }, { "data": { "text/html": [ @@ -405,7 +389,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Check results\n", + "## Check Results\n", "The content of all database tables having keyfields and resultfields, as well as every logtable can be easily obtained." ] }, @@ -414,14 +398,6 @@ "execution_count": 6, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" - ] - }, { "data": { "text/html": [ @@ -465,14 +441,14 @@ " iris\n", " 5\n", " 1\n", - " 2023-04-28 13:46:31\n", + " 2023-06-17 09:04:48\n", " done\n", - " 2023-04-28 13:46:33\n", + " 2023-06-17 09:04:48\n", " example_notebook\n", " Worklaptop\n", " linear\n", " linear\n", - " 2023-04-28 13:46:35\n", + " 2023-06-17 09:04:54\n", " None\n", " \n", " \n", @@ -481,14 +457,14 @@ " iris\n", " 5\n", " 2\n", - " 2023-04-28 13:46:31\n", + " 2023-06-17 09:04:48\n", " done\n", - " 2023-04-28 13:46:35\n", + " 2023-06-17 09:04:54\n", " example_notebook\n", " Worklaptop\n", " linear\n", " linear\n", - " 2023-04-28 13:46:37\n", + " 2023-06-17 09:05:00\n", " None\n", " \n", " \n", @@ -497,14 +473,14 @@ " iris\n", " 5\n", " 3\n", - " 2023-04-28 13:46:31\n", + " 2023-06-17 09:04:48\n", " done\n", - " 2023-04-28 13:46:38\n", + " 2023-06-17 09:05:00\n", " example_notebook\n", " Worklaptop\n", " linear\n", " linear\n", - " 2023-04-28 13:46:40\n", + " 2023-06-17 09:05:06\n", " None\n", " \n", " \n", @@ -513,14 +489,14 @@ " iris\n", " 5\n", " 4\n", - " 2023-04-28 13:46:31\n", + " 2023-06-17 09:04:48\n", " done\n", - " 2023-04-28 13:46:40\n", + " 2023-06-17 09:05:06\n", " example_notebook\n", " Worklaptop\n", " linear\n", " linear\n", - " 2023-04-28 13:46:42\n", + " 2023-06-17 09:05:12\n", " None\n", " \n", " \n", @@ -529,14 +505,14 @@ " iris\n", " 5\n", " 5\n", - " 2023-04-28 13:46:31\n", + " 2023-06-17 09:04:48\n", " done\n", - " 2023-04-28 13:46:43\n", + " 2023-06-17 09:05:12\n", " example_notebook\n", " Worklaptop\n", " linear\n", " linear\n", - " 2023-04-28 13:46:45\n", + " 2023-06-17 09:05:18\n", " None\n", " \n", " \n", @@ -544,26 +520,26 @@ "" ], "text/plain": [ - " ID dataset cross_validation_splits seed creation_date status \\\n", - "0 1 iris 5 1 2023-04-28 13:46:31 done \n", - "1 2 iris 5 2 2023-04-28 13:46:31 done \n", - "2 3 iris 5 3 2023-04-28 13:46:31 done \n", - "3 4 iris 5 4 2023-04-28 13:46:31 done \n", - "4 5 iris 5 5 2023-04-28 13:46:31 done \n", + " ID dataset cross_validation_splits seed creation_date status \\\n", + "0 1 iris 5 1 2023-06-17 09:04:48 done \n", + "1 2 iris 5 2 2023-06-17 09:04:48 done \n", + "2 3 iris 5 3 2023-06-17 09:04:48 done \n", + "3 4 iris 5 4 2023-06-17 09:04:48 done \n", + "4 5 iris 5 5 2023-06-17 09:04:48 done \n", "\n", - " start_date name machine best_kernel_f1 \\\n", - "0 2023-04-28 13:46:33 example_notebook Worklaptop linear \n", - "1 2023-04-28 13:46:35 example_notebook Worklaptop linear \n", - "2 2023-04-28 13:46:38 example_notebook Worklaptop linear \n", - "3 2023-04-28 13:46:40 example_notebook Worklaptop linear \n", - "4 2023-04-28 13:46:43 example_notebook Worklaptop linear \n", + " start_date name machine best_kernel_f1 \\\n", + "0 2023-06-17 09:04:48 example_notebook Worklaptop linear \n", + "1 2023-06-17 09:04:54 example_notebook Worklaptop linear \n", + "2 2023-06-17 09:05:00 example_notebook Worklaptop linear \n", + "3 2023-06-17 09:05:06 example_notebook Worklaptop linear \n", + "4 2023-06-17 09:05:12 example_notebook Worklaptop linear \n", "\n", - " best_kernel_accuracy end_date error \n", - "0 linear 2023-04-28 13:46:35 None \n", - "1 linear 2023-04-28 13:46:37 None \n", - "2 linear 2023-04-28 13:46:40 None \n", - "3 linear 2023-04-28 13:46:42 None \n", - "4 linear 2023-04-28 13:46:45 None " + " best_kernel_accuracy end_date error \n", + "0 linear 2023-06-17 09:04:54 None \n", + "1 linear 2023-06-17 09:05:00 None \n", + "2 linear 2023-06-17 09:05:06 None \n", + "3 linear 2023-06-17 09:05:12 None \n", + "4 linear 2023-06-17 09:05:18 None " ] }, "execution_count": 6, @@ -580,14 +556,6 @@ "execution_count": 7, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" - ] - }, { "data": { "text/html": [ @@ -622,7 +590,7 @@ " 0\n", " 1\n", " 1\n", - " 2023-04-28 13:46:33\n", + " 2023-06-17 09:04:54\n", " 0.971667\n", " 0.971667\n", " 'linear'\n", @@ -631,7 +599,7 @@ " 1\n", " 2\n", " 1\n", - " 2023-04-28 13:46:34\n", + " 2023-06-17 09:04:54\n", " 0.936667\n", " 0.936667\n", " 'poly'\n", @@ -640,7 +608,7 @@ " 2\n", " 3\n", " 1\n", - " 2023-04-28 13:46:34\n", + " 2023-06-17 09:04:54\n", " 0.975000\n", " 0.975000\n", " 'rbf'\n", @@ -649,7 +617,7 @@ " 3\n", " 4\n", " 1\n", - " 2023-04-28 13:46:34\n", + " 2023-06-17 09:04:54\n", " 0.896667\n", " 0.896667\n", " 'sigmoid'\n", @@ -658,7 +626,7 @@ " 4\n", " 5\n", " 2\n", - " 2023-04-28 13:46:36\n", + " 2023-06-17 09:05:00\n", " 0.971667\n", " 0.971667\n", " 'linear'\n", @@ -667,7 +635,7 @@ " 5\n", " 6\n", " 2\n", - " 2023-04-28 13:46:36\n", + " 2023-06-17 09:05:00\n", " 0.936667\n", " 0.936667\n", " 'poly'\n", @@ -676,7 +644,7 @@ " 6\n", " 7\n", " 2\n", - " 2023-04-28 13:46:37\n", + " 2023-06-17 09:05:00\n", " 0.975000\n", " 0.975000\n", " 'rbf'\n", @@ -685,7 +653,7 @@ " 7\n", " 8\n", " 2\n", - " 2023-04-28 13:46:37\n", + " 2023-06-17 09:05:00\n", " 0.896667\n", " 0.896667\n", " 'sigmoid'\n", @@ -694,7 +662,7 @@ " 8\n", " 9\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.971667\n", " 0.971667\n", " 'linear'\n", @@ -703,7 +671,7 @@ " 9\n", " 10\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.936667\n", " 0.936667\n", " 'poly'\n", @@ -712,7 +680,7 @@ " 10\n", " 11\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.975000\n", " 0.975000\n", " 'rbf'\n", @@ -721,7 +689,7 @@ " 11\n", " 12\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.896667\n", " 0.896667\n", " 'sigmoid'\n", @@ -730,7 +698,7 @@ " 12\n", " 13\n", " 4\n", - " 2023-04-28 13:46:41\n", + " 2023-06-17 09:05:12\n", " 0.971667\n", " 0.971667\n", " 'linear'\n", @@ -739,7 +707,7 @@ " 13\n", " 14\n", " 4\n", - " 2023-04-28 13:46:41\n", + " 2023-06-17 09:05:12\n", " 0.936667\n", " 0.936667\n", " 'poly'\n", @@ -748,7 +716,7 @@ " 14\n", " 15\n", " 4\n", - " 2023-04-28 13:46:41\n", + " 2023-06-17 09:05:12\n", " 0.975000\n", " 0.975000\n", " 'rbf'\n", @@ -757,7 +725,7 @@ " 15\n", " 16\n", " 4\n", - " 2023-04-28 13:46:42\n", + " 2023-06-17 09:05:12\n", " 0.896667\n", " 0.896667\n", " 'sigmoid'\n", @@ -766,7 +734,7 @@ " 16\n", " 17\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:17\n", " 0.971667\n", " 0.971667\n", " 'linear'\n", @@ -775,7 +743,7 @@ " 17\n", " 18\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:18\n", " 0.936667\n", " 0.936667\n", " 'poly'\n", @@ -784,7 +752,7 @@ " 18\n", " 19\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:18\n", " 0.975000\n", " 0.975000\n", " 'rbf'\n", @@ -793,7 +761,7 @@ " 19\n", " 20\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:18\n", " 0.896667\n", " 0.896667\n", " 'sigmoid'\n", @@ -803,27 +771,27 @@ "" ], "text/plain": [ - " ID experiment_id timestamp f1 accuracy kernel\n", - "0 1 1 2023-04-28 13:46:33 0.971667 0.971667 'linear'\n", - "1 2 1 2023-04-28 13:46:34 0.936667 0.936667 'poly'\n", - "2 3 1 2023-04-28 13:46:34 0.975000 0.975000 'rbf'\n", - "3 4 1 2023-04-28 13:46:34 0.896667 0.896667 'sigmoid'\n", - "4 5 2 2023-04-28 13:46:36 0.971667 0.971667 'linear'\n", - "5 6 2 2023-04-28 13:46:36 0.936667 0.936667 'poly'\n", - "6 7 2 2023-04-28 13:46:37 0.975000 0.975000 'rbf'\n", - "7 8 2 2023-04-28 13:46:37 0.896667 0.896667 'sigmoid'\n", - "8 9 3 2023-04-28 13:46:39 0.971667 0.971667 'linear'\n", - "9 10 3 2023-04-28 13:46:39 0.936667 0.936667 'poly'\n", - "10 11 3 2023-04-28 13:46:39 0.975000 0.975000 'rbf'\n", - "11 12 3 2023-04-28 13:46:39 0.896667 0.896667 'sigmoid'\n", - "12 13 4 2023-04-28 13:46:41 0.971667 0.971667 'linear'\n", - "13 14 4 2023-04-28 13:46:41 0.936667 0.936667 'poly'\n", - "14 15 4 2023-04-28 13:46:41 0.975000 0.975000 'rbf'\n", - "15 16 4 2023-04-28 13:46:42 0.896667 0.896667 'sigmoid'\n", - "16 17 5 2023-04-28 13:46:44 0.971667 0.971667 'linear'\n", - "17 18 5 2023-04-28 13:46:44 0.936667 0.936667 'poly'\n", - "18 19 5 2023-04-28 13:46:44 0.975000 0.975000 'rbf'\n", - "19 20 5 2023-04-28 13:46:44 0.896667 0.896667 'sigmoid'" + " ID experiment_id timestamp f1 accuracy kernel\n", + "0 1 1 2023-06-17 09:04:54 0.971667 0.971667 'linear'\n", + "1 2 1 2023-06-17 09:04:54 0.936667 0.936667 'poly'\n", + "2 3 1 2023-06-17 09:04:54 0.975000 0.975000 'rbf'\n", + "3 4 1 2023-06-17 09:04:54 0.896667 0.896667 'sigmoid'\n", + "4 5 2 2023-06-17 09:05:00 0.971667 0.971667 'linear'\n", + "5 6 2 2023-06-17 09:05:00 0.936667 0.936667 'poly'\n", + "6 7 2 2023-06-17 09:05:00 0.975000 0.975000 'rbf'\n", + "7 8 2 2023-06-17 09:05:00 0.896667 0.896667 'sigmoid'\n", + "8 9 3 2023-06-17 09:05:06 0.971667 0.971667 'linear'\n", + "9 10 3 2023-06-17 09:05:06 0.936667 0.936667 'poly'\n", + "10 11 3 2023-06-17 09:05:06 0.975000 0.975000 'rbf'\n", + "11 12 3 2023-06-17 09:05:06 0.896667 0.896667 'sigmoid'\n", + "12 13 4 2023-06-17 09:05:12 0.971667 0.971667 'linear'\n", + "13 14 4 2023-06-17 09:05:12 0.936667 0.936667 'poly'\n", + "14 15 4 2023-06-17 09:05:12 0.975000 0.975000 'rbf'\n", + "15 16 4 2023-06-17 09:05:12 0.896667 0.896667 'sigmoid'\n", + "16 17 5 2023-06-17 09:05:17 0.971667 0.971667 'linear'\n", + "17 18 5 2023-06-17 09:05:18 0.936667 0.936667 'poly'\n", + "18 19 5 2023-06-17 09:05:18 0.975000 0.975000 'rbf'\n", + "19 20 5 2023-06-17 09:05:18 0.896667 0.896667 'sigmoid'" ] }, "execution_count": 7, @@ -840,14 +808,6 @@ "execution_count": 8, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" - ] - }, { "data": { "text/html": [ @@ -880,140 +840,140 @@ " 0\n", " 1\n", " 1\n", - " 2023-04-28 13:46:33\n", + " 2023-06-17 09:04:54\n", " 0.966667\n", " \n", " \n", " 1\n", " 2\n", " 1\n", - " 2023-04-28 13:46:34\n", + " 2023-06-17 09:04:54\n", " 0.933333\n", " \n", " \n", " 2\n", " 3\n", " 1\n", - " 2023-04-28 13:46:34\n", + " 2023-06-17 09:04:54\n", " 0.966667\n", " \n", " \n", " 3\n", " 4\n", " 1\n", - " 2023-04-28 13:46:34\n", + " 2023-06-17 09:04:54\n", " 0.893333\n", " \n", " \n", " 4\n", " 5\n", " 2\n", - " 2023-04-28 13:46:36\n", + " 2023-06-17 09:05:00\n", " 0.966667\n", " \n", " \n", " 5\n", " 6\n", " 2\n", - " 2023-04-28 13:46:36\n", + " 2023-06-17 09:05:00\n", " 0.933333\n", " \n", " \n", " 6\n", " 7\n", " 2\n", - " 2023-04-28 13:46:37\n", + " 2023-06-17 09:05:00\n", " 0.966667\n", " \n", " \n", " 7\n", " 8\n", " 2\n", - " 2023-04-28 13:46:37\n", + " 2023-06-17 09:05:00\n", " 0.893333\n", " \n", " \n", " 8\n", " 9\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.966667\n", " \n", " \n", " 9\n", " 10\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.933333\n", " \n", " \n", " 10\n", " 11\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.966667\n", " \n", " \n", " 11\n", " 12\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.893333\n", " \n", " \n", " 12\n", " 13\n", " 4\n", - " 2023-04-28 13:46:41\n", + " 2023-06-17 09:05:12\n", " 0.966667\n", " \n", " \n", " 13\n", " 14\n", " 4\n", - " 2023-04-28 13:46:41\n", + " 2023-06-17 09:05:12\n", " 0.933333\n", " \n", " \n", " 14\n", " 15\n", " 4\n", - " 2023-04-28 13:46:41\n", + " 2023-06-17 09:05:12\n", " 0.966667\n", " \n", " \n", " 15\n", " 16\n", " 4\n", - " 2023-04-28 13:46:42\n", + " 2023-06-17 09:05:12\n", " 0.893333\n", " \n", " \n", " 16\n", " 17\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:17\n", " 0.966667\n", " \n", " \n", " 17\n", " 18\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:18\n", " 0.933333\n", " \n", " \n", " 18\n", " 19\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:18\n", " 0.966667\n", " \n", " \n", " 19\n", " 20\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:18\n", " 0.893333\n", " \n", " \n", @@ -1021,27 +981,27 @@ "" ], "text/plain": [ - " ID experiment_id timestamp test_f1\n", - "0 1 1 2023-04-28 13:46:33 0.966667\n", - "1 2 1 2023-04-28 13:46:34 0.933333\n", - "2 3 1 2023-04-28 13:46:34 0.966667\n", - "3 4 1 2023-04-28 13:46:34 0.893333\n", - "4 5 2 2023-04-28 13:46:36 0.966667\n", - "5 6 2 2023-04-28 13:46:36 0.933333\n", - "6 7 2 2023-04-28 13:46:37 0.966667\n", - "7 8 2 2023-04-28 13:46:37 0.893333\n", - "8 9 3 2023-04-28 13:46:39 0.966667\n", - "9 10 3 2023-04-28 13:46:39 0.933333\n", - "10 11 3 2023-04-28 13:46:39 0.966667\n", - "11 12 3 2023-04-28 13:46:39 0.893333\n", - "12 13 4 2023-04-28 13:46:41 0.966667\n", - "13 14 4 2023-04-28 13:46:41 0.933333\n", - "14 15 4 2023-04-28 13:46:41 0.966667\n", - "15 16 4 2023-04-28 13:46:42 0.893333\n", - "16 17 5 2023-04-28 13:46:44 0.966667\n", - "17 18 5 2023-04-28 13:46:44 0.933333\n", - "18 19 5 2023-04-28 13:46:44 0.966667\n", - "19 20 5 2023-04-28 13:46:44 0.893333" + " ID experiment_id timestamp test_f1\n", + "0 1 1 2023-06-17 09:04:54 0.966667\n", + "1 2 1 2023-06-17 09:04:54 0.933333\n", + "2 3 1 2023-06-17 09:04:54 0.966667\n", + "3 4 1 2023-06-17 09:04:54 0.893333\n", + "4 5 2 2023-06-17 09:05:00 0.966667\n", + "5 6 2 2023-06-17 09:05:00 0.933333\n", + "6 7 2 2023-06-17 09:05:00 0.966667\n", + "7 8 2 2023-06-17 09:05:00 0.893333\n", + "8 9 3 2023-06-17 09:05:06 0.966667\n", + "9 10 3 2023-06-17 09:05:06 0.933333\n", + "10 11 3 2023-06-17 09:05:06 0.966667\n", + "11 12 3 2023-06-17 09:05:06 0.893333\n", + "12 13 4 2023-06-17 09:05:12 0.966667\n", + "13 14 4 2023-06-17 09:05:12 0.933333\n", + "14 15 4 2023-06-17 09:05:12 0.966667\n", + "15 16 4 2023-06-17 09:05:12 0.893333\n", + "16 17 5 2023-06-17 09:05:17 0.966667\n", + "17 18 5 2023-06-17 09:05:18 0.933333\n", + "18 19 5 2023-06-17 09:05:18 0.966667\n", + "19 20 5 2023-06-17 09:05:18 0.893333" ] }, "execution_count": 8, @@ -1058,14 +1018,6 @@ "execution_count": 9, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/lukas/development/code_projects/py_experimenter/py_experimenter/database_connector.py:320: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.\n", - " df = pd.read_sql(query, connection)\n" - ] - }, { "data": { "text/html": [ @@ -1098,140 +1050,140 @@ " 0\n", " 1\n", " 1\n", - " 2023-04-28 13:46:33\n", + " 2023-06-17 09:04:54\n", " 0.966667\n", " \n", " \n", " 1\n", " 2\n", " 1\n", - " 2023-04-28 13:46:34\n", + " 2023-06-17 09:04:54\n", " 0.933333\n", " \n", " \n", " 2\n", " 3\n", " 1\n", - " 2023-04-28 13:46:34\n", + " 2023-06-17 09:04:54\n", " 0.966667\n", " \n", " \n", " 3\n", " 4\n", " 1\n", - " 2023-04-28 13:46:34\n", + " 2023-06-17 09:04:54\n", " 0.893333\n", " \n", " \n", " 4\n", " 5\n", " 2\n", - " 2023-04-28 13:46:36\n", + " 2023-06-17 09:05:00\n", " 0.966667\n", " \n", " \n", " 5\n", " 6\n", " 2\n", - " 2023-04-28 13:46:36\n", + " 2023-06-17 09:05:00\n", " 0.933333\n", " \n", " \n", " 6\n", " 7\n", " 2\n", - " 2023-04-28 13:46:37\n", + " 2023-06-17 09:05:00\n", " 0.966667\n", " \n", " \n", " 7\n", " 8\n", " 2\n", - " 2023-04-28 13:46:37\n", + " 2023-06-17 09:05:00\n", " 0.893333\n", " \n", " \n", " 8\n", " 9\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.966667\n", " \n", " \n", " 9\n", " 10\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.933333\n", " \n", " \n", " 10\n", " 11\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.966667\n", " \n", " \n", " 11\n", " 12\n", " 3\n", - " 2023-04-28 13:46:39\n", + " 2023-06-17 09:05:06\n", " 0.893333\n", " \n", " \n", " 12\n", " 13\n", " 4\n", - " 2023-04-28 13:46:41\n", + " 2023-06-17 09:05:12\n", " 0.966667\n", " \n", " \n", " 13\n", " 14\n", " 4\n", - " 2023-04-28 13:46:41\n", + " 2023-06-17 09:05:12\n", " 0.933333\n", " \n", " \n", " 14\n", " 15\n", " 4\n", - " 2023-04-28 13:46:41\n", + " 2023-06-17 09:05:12\n", " 0.966667\n", " \n", " \n", " 15\n", " 16\n", " 4\n", - " 2023-04-28 13:46:42\n", + " 2023-06-17 09:05:12\n", " 0.893333\n", " \n", " \n", " 16\n", " 17\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:17\n", " 0.966667\n", " \n", " \n", " 17\n", " 18\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:18\n", " 0.933333\n", " \n", " \n", " 18\n", " 19\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:18\n", " 0.966667\n", " \n", " \n", " 19\n", " 20\n", " 5\n", - " 2023-04-28 13:46:44\n", + " 2023-06-17 09:05:18\n", " 0.893333\n", " \n", " \n", @@ -1239,27 +1191,27 @@ "" ], "text/plain": [ - " ID experiment_id timestamp test_accuracy\n", - "0 1 1 2023-04-28 13:46:33 0.966667\n", - "1 2 1 2023-04-28 13:46:34 0.933333\n", - "2 3 1 2023-04-28 13:46:34 0.966667\n", - "3 4 1 2023-04-28 13:46:34 0.893333\n", - "4 5 2 2023-04-28 13:46:36 0.966667\n", - "5 6 2 2023-04-28 13:46:36 0.933333\n", - "6 7 2 2023-04-28 13:46:37 0.966667\n", - "7 8 2 2023-04-28 13:46:37 0.893333\n", - "8 9 3 2023-04-28 13:46:39 0.966667\n", - "9 10 3 2023-04-28 13:46:39 0.933333\n", - "10 11 3 2023-04-28 13:46:39 0.966667\n", - "11 12 3 2023-04-28 13:46:39 0.893333\n", - "12 13 4 2023-04-28 13:46:41 0.966667\n", - "13 14 4 2023-04-28 13:46:41 0.933333\n", - "14 15 4 2023-04-28 13:46:41 0.966667\n", - "15 16 4 2023-04-28 13:46:42 0.893333\n", - "16 17 5 2023-04-28 13:46:44 0.966667\n", - "17 18 5 2023-04-28 13:46:44 0.933333\n", - "18 19 5 2023-04-28 13:46:44 0.966667\n", - "19 20 5 2023-04-28 13:46:44 0.893333" + " ID experiment_id timestamp test_accuracy\n", + "0 1 1 2023-06-17 09:04:54 0.966667\n", + "1 2 1 2023-06-17 09:04:54 0.933333\n", + "2 3 1 2023-06-17 09:04:54 0.966667\n", + "3 4 1 2023-06-17 09:04:54 0.893333\n", + "4 5 2 2023-06-17 09:05:00 0.966667\n", + "5 6 2 2023-06-17 09:05:00 0.933333\n", + "6 7 2 2023-06-17 09:05:00 0.966667\n", + "7 8 2 2023-06-17 09:05:00 0.893333\n", + "8 9 3 2023-06-17 09:05:06 0.966667\n", + "9 10 3 2023-06-17 09:05:06 0.933333\n", + "10 11 3 2023-06-17 09:05:06 0.966667\n", + "11 12 3 2023-06-17 09:05:06 0.893333\n", + "12 13 4 2023-06-17 09:05:12 0.966667\n", + "13 14 4 2023-06-17 09:05:12 0.933333\n", + "14 15 4 2023-06-17 09:05:12 0.966667\n", + "15 16 4 2023-06-17 09:05:12 0.893333\n", + "16 17 5 2023-06-17 09:05:17 0.966667\n", + "17 18 5 2023-06-17 09:05:18 0.933333\n", + "18 19 5 2023-06-17 09:05:18 0.966667\n", + "19 20 5 2023-06-17 09:05:18 0.893333" ] }, "execution_count": 9, @@ -1270,6 +1222,15 @@ "source": [ "experimenter.get_logtable('test_accuracy')" ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CodeCarbon\n", + "Note that `CodeCarbon` is activated by default, collecting information about the carbon emissions of each experiment. Have a look at our [general usage example](https://tornede.github.io/py_experimenter/examples/example_general_usage.html) and the according [documentation of CodeCarbon fields](https://tornede.github.io/py_experimenter/usage.html#codecarbon-fields) for more information." + ] } ], "metadata": { diff --git a/docs/source/index.rst b/docs/source/index.rst index 91eee82a..37a327b2 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -8,7 +8,7 @@ Welcome to PyExperimenter's documentation! ========================================== ``PyExperimenter`` is a tool to facilitate the setup, documentation, execution, and subsequent evaluation of results from an empirical study of algorithms and in particular is designed to reduce the involved manual effort significantly. -It is intended to be used by researchers in the field of artificial intelligence, but is not limited to those. +It is intended to be used by researchers in the field of artificial intelligence, but is not limited to those. Addtionally, we support tracking carbon emissions using :ref:`CodeCarbon `. The empirical analysis of algorithms is often accompanied by the execution of algorithms for different inputs and variants of the algorithms (specified via parameters) and the measurement of non-functional properties. Since the individual evaluations are usually independent, the evaluation can be performed in a distributed manner on an HPC system. diff --git a/docs/source/usage/execution.rst b/docs/source/usage/execution.rst index f9a38240..e5186650 100644 --- a/docs/source/usage/execution.rst +++ b/docs/source/usage/execution.rst @@ -16,6 +16,7 @@ The actual execution of ``PyExperimenter`` only needs a few lines of code. Pleas The above code will execute all experiments defined in the :ref:`experiment configuration file `. If you want to do something different, e.g. :ref:`fill the database table with specific rows `, or :ref:`reset experiments `, check out the following sections. +.. _execution_creating_pyexperimenter: ------------------------- Creating a PyExperimenter @@ -33,6 +34,7 @@ Additionally, further information can be given to ``PyExperimenter``: - ``database_credential_file_path``: The path of the :ref:`database credential file `. Default: ``config/database_credentials.cfg`` - ``database_name``: The name of the database to manage the experiments. If given, it will overwrite the database name given in the `experiment_configuration_file_path`. - ``table_name``: The name of the database table to manage the experiments. If given, it will overwrite the table name given in the `experiment_configuration_file_path`. +- ``use_codecarbon``: Specifies if :ref:`CodeCarbon ` will be used to track experiment emissions. Default: ``True``. - ``name``: The name of the experimenter, which will be added to the database table of each executed experiment. If using the PyExperimenter on an HPC system, this can be used for the job ID, so that the according log file can easily be found. Default: ``PyExperimenter``. @@ -135,3 +137,16 @@ The current content of the database table can be obtained as a ``pandas.DataFram result_table = experimenter.get_table() result_table = result_table.groupby(['dataset']).mean()[['seed']] print(result_table.to_latex(columns=['seed'], index_names=['dataset'])) + + +.. _execution_codecarbon: + +---------- +CodeCarbon +---------- + +Tracking information about the carbon footprint of experiments is supported via :ref:`CodeCarbon `. Tracking is enabled by default, as described in :ref:`how to create a PyExperimenter `. If the tracking is enabled, the according information can be found in the database table ``_codecarbon``, which can be easily accessed with the following call: + +.. code-block:: + + experimenter.get_codecarbon_table() diff --git a/docs/source/usage/experiment_configuration_file.rst b/docs/source/usage/experiment_configuration_file.rst index d90265de..ce31ccfb 100644 --- a/docs/source/usage/experiment_configuration_file.rst +++ b/docs/source/usage/experiment_configuration_file.rst @@ -30,6 +30,14 @@ The experiment configuration file is primarily used to define the database backe [CUSTOM] path = sample_data + [codecarbon] + offline_mode = False + measure_power_secs = 15 + tracking_mode = process + log_level = error + save_to_file = True + output_dir = output/CodeCarbon + offline_mode = False -------------------- Database Information @@ -138,3 +146,26 @@ Optionally, custom fields can be defined under the ``CUSTOM`` section, which wil [CUSTOM] path = sample_data + + +.. _experiment_configuration_file_codecarbon: + +---------- +CodeCarbon +---------- + +Tracking information about the carbon footprint of experiments is supported via `CodeCarbon `_. It is enabled by default, if you want to completely deactivate it, please check the :ref:`documentation on how to execute PyExperimenter `. + +Per default, ``CodeCarbon`` will track the carbon footprint of the whole machine, including the execution of the experiment function. It measures the power consumption every 15 seconds and estimates the carbon emissions based on the region of the device. The resulting information is saved to a file in the ``output/CodeCarbon`` as well as written into its own table in the database, called ``_codecarbon``. A description about how to access the data can be found in the :ref:`CodeCarbon explanation of the execution of PyExperimenter `. + +``CodeCarbon`` can be configured via its own section in the experiment configuration file. The default configuration is shown below, but can be extended by any of the parameters listed in the `CodeCarbon documentation `_. During the execution, the section will be automatically copied into a ``.codecarbon.config`` file in you working directory, as this is required by ``CodeCarbon``. + +.. code-block:: + + [codecarbon] + measure_power_secs = 15 + tracking_mode = process + log_level = error + save_to_file = True + output_dir = output/CodeCarbon + offline_mode = False diff --git a/docs/source/usage/index.rst b/docs/source/usage/index.rst index 29d63a95..c5f2c0f5 100644 --- a/docs/source/usage/index.rst +++ b/docs/source/usage/index.rst @@ -12,7 +12,8 @@ Once this table has been created, a ``PyExperimenter`` instance can be run on an Each instance automatically pulls open experiments from the database, executes the function provided by the user with the corresponding parameters defining the experiment and writes back the results computed by the function. Errors arising during the execution are logged in the database. In case of failed experiments or if desired otherwise, a subset of the experiments can be reset and restarted easily. -After all experiments are done, results can be jointly exported as a ``Pandas DataFrame`` for further processing, such as generating a LaTeX table averaging results of randomized computations over different seeds. +Overall, :ref:`CodeCarbon ` is used to track the carbon emissions of each experiment into a separate table. +After finishing all experiments, results can be jointly exported as a ``Pandas DataFrame`` for further processing, such as generating a LaTeX table averaging results of randomized computations over different seeds. .. figure:: ../_static/workflow.png :width: 600px diff --git a/poetry.lock b/poetry.lock index 82cf3c7a..40ea44fc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.4.1 and should not be changed by hand. [[package]] name = "aiofiles" @@ -435,6 +435,47 @@ files = [ {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, ] +[[package]] +name = "click" +version = "8.1.3" +description = "Composable command line interface toolkit" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "codecarbon" +version = "2.2.1" +description = "" +category = "main" +optional = false +python-versions = ">3.6" +files = [ + {file = "codecarbon-2.2.1-py3-none-any.whl", hash = "sha256:e2f75a695d0c752d9019c0e3473bedbdeec58e66b99a2773b39edb58e0a9406d"}, + {file = "codecarbon-2.2.1.tar.gz", hash = "sha256:44f3257a1814377967a8e9dcdf177583a7a2a1624716d38818410af871d89afd"}, +] + +[package.dependencies] +arrow = "*" +click = "*" +fuzzywuzzy = "*" +pandas = "*" +psutil = "*" +py-cpuinfo = "*" +pynvml = "*" +requests = "*" + +[package.extras] +dashboard = ["dash (>=2.2.0)", "dash-bootstrap-components", "plotly (>=5.6.0)"] +viz = ["dash", "dash-bootstrap-components (<1.0.0)", "fire"] + [[package]] name = "colorama" version = "0.4.6" @@ -606,7 +647,7 @@ files = [ name = "freezegun" version = "1.2.2" description = "Let your Python tests travel through time" -category = "main" +category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -617,6 +658,21 @@ files = [ [package.dependencies] python-dateutil = ">=2.7" +[[package]] +name = "fuzzywuzzy" +version = "0.18.0" +description = "Fuzzy string matching in python" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "fuzzywuzzy-0.18.0-py2.py3-none-any.whl", hash = "sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993"}, + {file = "fuzzywuzzy-0.18.0.tar.gz", hash = "sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8"}, +] + +[package.extras] +speedup = ["python-levenshtein (>=0.12)"] + [[package]] name = "idna" version = "3.4" @@ -1833,6 +1889,18 @@ files = [ [package.extras] tests = ["pytest"] +[[package]] +name = "py-cpuinfo" +version = "9.0.0" +description = "Get CPU info with pure Python" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, + {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, +] + [[package]] name = "pycparser" version = "2.21" @@ -1860,6 +1928,18 @@ files = [ [package.extras] plugins = ["importlib-metadata"] +[[package]] +name = "pynvml" +version = "11.5.0" +description = "Python Bindings for the NVIDIA Management Library" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pynvml-11.5.0-py3-none-any.whl", hash = "sha256:5cce014ac01b098d08f06178f86c37be409b80b2e903a5a03ce15eed60f55e25"}, + {file = "pynvml-11.5.0.tar.gz", hash = "sha256:d027b21b95b1088b9fc278117f9f61b7c67f8e33a787e9f83f735f0f71ac32d0"}, +] + [[package]] name = "pyrsistent" version = "0.19.3" @@ -2898,4 +2978,4 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "ab9c75c7818c382e78b02cd9b0177d863c5bb4222a937dc0ea15478fe37409f6" +content-hash = "16c92f7978756a8f9d58a649c3730656aaae25e63853eb14202d570dc1b58f47" diff --git a/py_experimenter/database_connector.py b/py_experimenter/database_connector.py index ae3f8451..ed3d9970 100644 --- a/py_experimenter/database_connector.py +++ b/py_experimenter/database_connector.py @@ -15,14 +15,16 @@ class DatabaseConnector(abc.ABC): - def __init__(self, config: ConfigParser): + def __init__(self, config: ConfigParser, use_codecarbon: bool, codecarbon_config: ConfigParser): self.config = config + self.codecarbon_config = codecarbon_config self.table_name = self.config.get('PY_EXPERIMENTER', 'table') self.database_name = self.config.get('PY_EXPERIMENTER', 'database') self.database_credentials = self._extract_credentials() self.timestamp_on_result_fields = utils.timestamps_for_result_fields(self.config) + self.use_codecarbon = use_codecarbon self._test_connection() @abc.abstractmethod @@ -89,12 +91,16 @@ def create_table_if_not_existing(self) -> None: self._create_table(cursor, columns, self.table_name) for logtable_name, logtable_columns in utils.extract_logtables(self.config, self.table_name).items(): - self._create_table(cursor, logtable_columns, logtable_name, logtable=True) + self._create_table(cursor, logtable_columns, logtable_name, table_type='logtable') + + if self.use_codecarbon: + codecarbon_columns = utils.extract_codecarbon_columns() + self._create_table(cursor, codecarbon_columns, f"{self.table_name}_codecarbon", table_type='codecarbon') self.close_connection(connection) @abc.abstractmethod - def _table_exists(self, cursor): + def _table_exists(self, cursor, table_name: str): pass @staticmethod @@ -119,21 +125,25 @@ def _exclude_fixed_columns(self, columns: List[str]) -> List[str]: return columns[1:amount_of_keyfields + 1] + columns[-amount_of_result_fields - 2:-2] - def _create_table(self, cursor, columns: List[Tuple['str']], table_name: str, logtable: bool = False): - query = self._get_create_table_query(columns, table_name, logtable) + def _create_table(self, cursor, columns: List[Tuple['str']], table_name: str, table_type: str = 'standard'): + query = self._get_create_table_query(columns, table_name, table_type) try: self.execute(cursor, query) except Exception as err: raise CreatingTableError(f'Error when creating table: {err}') - def _get_create_table_query(self, columns: List[Tuple['str']], table_name: str, logtable: bool): + def _get_create_table_query(self, columns: List[Tuple['str']], table_name: str, table_type: str = 'standard'): columns = ['%s %s DEFAULT NULL' % (field, datatype) for field, datatype in columns] columns = ','.join(columns) query = f"CREATE TABLE {table_name} (ID INTEGER PRIMARY KEY {self.get_autoincrement()}" - if logtable: + if table_type == 'standard': + query += f", {columns}" + elif table_type == 'logtable': query += f", experiment_id INTEGER, timestamp DATETIME, {columns}, FOREIGN KEY (experiment_id) REFERENCES {self.table_name}(ID) ON DELETE CASCADE" + elif table_type == 'codecarbon': + query += f", experiment_id INTEGER, {columns}, FOREIGN KEY (experiment_id) REFERENCES {self.table_name}(ID) ON DELETE CASCADE" else: - query += f", {columns}" + raise ValueError(f"Unknown table type: {table_type}") return query + ');' @abc.abstractstaticmethod @@ -308,12 +318,18 @@ def delete_table(self) -> None: cursor = self.cursor(connection) for logtable_name in utils.extract_logtables(self.config, self.table_name).keys(): self.execute(cursor, f'DROP TABLE IF EXISTS {logtable_name}') + if self.use_codecarbon: + self.execute(cursor, f'DROP TABLE IF EXISTS {self.table_name}_codecarbon') + self.execute(cursor, f'DROP TABLE IF EXISTS {self.table_name}') self.commit(connection) def get_logtable(self, logtable_name: str) -> pd.DataFrame: return self.get_table(f'{self.table_name}__{logtable_name}') + def get_codecarbon_table(self) -> pd.DataFrame: + return self.get_table(f'{self.table_name}_codecarbon') + def get_table(self, table_name: Optional[str] = None) -> pd.DataFrame: connection = self.connect() query = f"SELECT * FROM {self.table_name}" if table_name is None else f"SELECT * FROM {table_name}" diff --git a/py_experimenter/database_connector_mysql.py b/py_experimenter/database_connector_mysql.py index 039fb6cf..ec032c26 100644 --- a/py_experimenter/database_connector_mysql.py +++ b/py_experimenter/database_connector_mysql.py @@ -6,20 +6,20 @@ from mysql.connector import Error, connect from py_experimenter.database_connector import DatabaseConnector -from py_experimenter.exceptions import CreatingTableError, DatabaseConnectionError, DatabaseCreationError +from py_experimenter.exceptions import DatabaseConnectionError, DatabaseCreationError from py_experimenter.utils import load_config class DatabaseConnectorMYSQL(DatabaseConnector): _prepared_statement_placeholder = '%s' - def __init__(self, experiment_configuration_file_path: ConfigParser, database_credential_file_path): + def __init__(self, experiment_configuration: ConfigParser, use_codecarbon:bool, codecarbon_config:ConfigParser, database_credential_file_path:str): database_credentials = load_config(database_credential_file_path) self.host = database_credentials.get('CREDENTIALS', 'host') self.user = database_credentials.get('CREDENTIALS', 'user') self.password = database_credentials.get('CREDENTIALS', 'password') - super().__init__(experiment_configuration_file_path) + super().__init__(experiment_configuration, use_codecarbon, codecarbon_config) self._create_database_if_not_existing() @@ -64,8 +64,9 @@ def connect(self, credentials=None): def _start_transaction(self, connection, readonly=False): connection.start_transaction(readonly=readonly) - def _table_exists(self, cursor): - self.execute(cursor, f"SHOW TABLES LIKE '{self.table_name}'") + def _table_exists(self, cursor, table_name:str = None) -> bool: + table_name = table_name if table_name is not None else self.table_name + self.execute(cursor, f"SHOW TABLES LIKE '{table_name}'") return self.fetchall(cursor) @staticmethod @@ -79,7 +80,7 @@ def _table_has_correct_structure(self, cursor, typed_fields): columns = self._exclude_fixed_columns([k[0] for k in self.fetchall(cursor)]) config_columns = [k[0] for k in typed_fields] - return set(columns) == set(config_columns) + return set(columns) == set(config_columns) def _pull_open_experiment(self) -> Tuple[int, List, List]: try: diff --git a/py_experimenter/experimenter.py b/py_experimenter/experimenter.py index d0cbe537..620a1a3e 100644 --- a/py_experimenter/experimenter.py +++ b/py_experimenter/experimenter.py @@ -1,11 +1,12 @@ -import configparser import logging import os import socket import traceback +from configparser import ConfigParser from typing import Callable, Dict, List, Tuple import pandas as pd +from codecarbon import EmissionsTracker, OfflineEmissionsTracker from joblib import Parallel, delayed from py_experimenter import utils @@ -26,6 +27,7 @@ def __init__(self, database_credential_file_path: str = os.path.join('config', 'database_credentials.cfg'), table_name: str = None, database_name: str = None, + use_codecarbon: bool = True, name='PyExperimenter'): """ Initializes the PyExperimenter with the given information. @@ -44,6 +46,8 @@ def __init__(self, `experiment_configuration_file_path`. If None, the database name is taken from the experiment configuration file. Defaults to None. :type database_name: str, optional + :param use_codecarbon: If True, the carbon emissions are tracked and stored in the database. Defaults to True. + :type use_codecarbon: bool, optional :param name: The name of the PyExperimenter, which will be logged in the according column in the database table. Defaults to 'PyExperimenter'. :type name: str, optional @@ -51,6 +55,16 @@ def __init__(self, :raises ValueError: If an unsupported or unknown database connection provider is given. """ self.config = utils.load_config(experiment_configuration_file_path) + + self.use_codecarbon = use_codecarbon + self.config, self.codecarbon_config = utils.extract_codecarbon_config(self.config) + if self.codecarbon_config.has_option('codecarbon', 'offline_mode'): + self.codecarbon_offline_mode = self.codecarbon_config['codecarbon']['offline_mode'] == 'True' + else: + self.codecarbon_offline_mode = False + self.codecarbon_config.set('codecarbon', 'offline_mode', 'False') + utils.write_codecarbon_config(self.codecarbon_config) + self.database_credential_file_path = database_credential_file_path if not PyExperimenter._is_valid_configuration(self.config, database_credential_file_path): raise InvalidConfigError('Invalid configuration') @@ -65,9 +79,9 @@ def __init__(self, self.timestamp_on_result_fields = utils.timestamps_for_result_fields(self.config) if self.config['PY_EXPERIMENTER']['provider'] == 'sqlite': - self.dbconnector = DatabaseConnectorLITE(self.config) + self.dbconnector = DatabaseConnectorLITE(self.config, self.use_codecarbon, self.codecarbon_config) elif self.config['PY_EXPERIMENTER']['provider'] == 'mysql': - self.dbconnector = DatabaseConnectorMYSQL(self.config, database_credential_file_path) + self.dbconnector = DatabaseConnectorMYSQL(self.config, self.use_codecarbon, self.codecarbon_config, database_credential_file_path) else: raise ValueError('The provider indicated in the config file is not supported') @@ -138,41 +152,41 @@ def has_option(self, section_name: str, key: str) -> bool: return self.config.has_option(section_name, key) @ staticmethod - def _is_valid_configuration(_config: configparser, database_credential_file_path: str = None) -> bool: + def _is_valid_configuration(config: ConfigParser, database_credential_file_path: str = None) -> bool: """ Checks whether the given experiment configuration is valid, i.e., it contains all necessary fields, the database provider is either mysql or sqlite, and in case of a mysql database provider, that the database credentials are available. - :param _config: The experiment configuration. - :type _config: configparser + :param config: The experiment configuration. + :type config: ConfigParser :param database_credential_file_path: The path to the database configuration file, i.e., the file defining the host, user and password. Defaults to None. :type database_credential_file_path: str, optional :return: True if the experiment configuration contains all necessary fields. :rtype: bool """ - if not _config.has_section('PY_EXPERIMENTER'): + if not config.has_section('PY_EXPERIMENTER'): return False - if set(_config.keys()) > {'PY_EXPERIMENTER', 'CUSTOM', 'DEFAULT'}: + if set(config.keys()) > {'PY_EXPERIMENTER', 'CUSTOM', 'DEFAULT'}: return False - if not {'provider', 'database', 'table'}.issubset(set(_config.options('PY_EXPERIMENTER'))): + if not {'provider', 'database', 'table'}.issubset(set(config.options('PY_EXPERIMENTER'))): logging.error('Error in config file: DATABASE section must contain provider, database, and table') return False - if _config['PY_EXPERIMENTER']['provider'] not in ['sqlite', 'mysql']: + if config['PY_EXPERIMENTER']['provider'] not in ['sqlite', 'mysql']: logging.error('Error in config file: DATABASE provider must be either sqlite or mysql') return False - if _config['PY_EXPERIMENTER']['provider'] == 'mysql': + if config['PY_EXPERIMENTER']['provider'] == 'mysql': credentials = utils.load_config(database_credential_file_path) if not {'host', 'user', 'password'}.issubset(set(credentials.options('CREDENTIALS'))): logging.error( - f'Error in config file: DATABASE section must contain host, user, and password since provider is {_config["DATABASE"]["provider"]}') + f'Error in config file: DATABASE section must contain host, user, and password since provider is {config["DATABASE"]["provider"]}') return False - if not {'keyfields', 'resultfields'}.issubset(set(_config.options('PY_EXPERIMENTER'))): + if not {'keyfields', 'resultfields'}.issubset(set(config.options('PY_EXPERIMENTER'))): return False return True @@ -302,7 +316,8 @@ def execute(self, experiment_function: Callable[[Dict, Dict, ResultProcessor], N if max_experiments == -1: parallel(delayed(self._worker)(experiment_function) for _ in range(n_jobs)) else: - parallel(delayed(self._execution_wrapper)(experiment_function) for _ in range(max_experiments)) + parallel(delayed(self._execution_wrapper)(experiment_function) + for _ in range(max_experiments)) logging.info("All configured executions finished.") def _worker(self, experiment_function: Callable[[Dict, Dict, ResultProcessor], None]) -> None: @@ -318,19 +333,6 @@ def _worker(self, experiment_function: Callable[[Dict, Dict, ResultProcessor], N except NoExperimentsLeftException: break - def _execution_worker(self, experiment_function: Callable[[Dict, Dict, ResultProcessor], None]) -> None: - """ - Worker that repeatedly pulls open experiments from the database table and executes them. - - :param experiment_function: The function that should be executed with the different parametrizations. - :type experiment_function: Callable[[Dict, Dict, ResultProcessor], None] - """ - while True: - try: - self._execution_wrapper(experiment_function) - except NoExperimentsLeftException: - break - def _execution_wrapper(self, experiment_function: Callable[[dict, dict, ResultProcessor], None]) -> None: """ @@ -343,9 +345,9 @@ def _execution_wrapper(self, * `error` if an exception was raised during the execution of the experiment. * `done` if the execution of the experiment has finished successfully. - Errors raised during the execution of `experiment_function` are logged to the `error` column in the database table. - Note that only errors raised within `experiment_function` are logged in to the database table. Therefore all errors - raised before or after the execution of `experiment_function` are logged according to the local logging configuration + Errors raised during the execution of `experiment_function` are logged to the `error` column in the database table. + Note that only errors raised within `experiment_function` are logged in to the database table. Therefore all errors + raised before or after the execution of `experiment_function` are logged according to the local logging configuration and do not appear in the table. :param experiment_function: The function that should be executed with the different parametrizations. @@ -359,13 +361,24 @@ def _execution_wrapper(self, custom_fields = dict(self.config.items('CUSTOM')) if self.has_section('CUSTOM') else None table_name = self.get_config_value('PY_EXPERIMENTER', 'table') - result_processor = ResultProcessor(self.config, self.database_credential_file_path, table_name=table_name, + result_processor = ResultProcessor(self.config, self.use_codecarbon, self.codecarbon_config, self.database_credential_file_path, table_name=table_name, result_fields=result_field_names, experiment_id=experiment_id) result_processor._set_name(self.name) result_processor._set_machine(socket.gethostname()) + if self.use_codecarbon: + if self.codecarbon_offline_mode: + if not self.codecarbon_config.has_option('codecarbon', 'country_iso_code'): + raise InvalidConfigError(('CodeCarbon offline mode requires a `country_iso_code` in the config file.' + 'For more information see `https://mlco2.github.io/codecarbon/index.html`.')) + tracker = OfflineEmissionsTracker() + else: + tracker = EmissionsTracker() + try: logging.debug(f"Start of experiment_function on process {socket.gethostname()}") + if self.use_codecarbon: + tracker.start() experiment_function(keyfield_values, result_processor, custom_fields) except Exception: error_msg = traceback.format_exc() @@ -374,6 +387,11 @@ def _execution_wrapper(self, result_processor._change_status(ExperimentStatus.ERROR.value) else: result_processor._change_status(ExperimentStatus.DONE.value) + finally: + if self.use_codecarbon: + tracker.stop() + emission_data = tracker._prepare_emissions_data().values + result_processor._write_emissions(emission_data, self.codecarbon_offline_mode) def reset_experiments(self, *states: Tuple['str']) -> None: """ @@ -404,7 +422,7 @@ def get_table(self) -> pd.DataFrame: """ return self.dbconnector.get_table() - def get_logtable(self, table_name: str) -> pd.DataFrame: + def get_logtable(self, logtable_name: str) -> pd.DataFrame: """ Returns the log table as `Pandas.DataFrame`. @@ -413,4 +431,17 @@ def get_logtable(self, table_name: str) -> pd.DataFrame: :return: The log table as `Pandas.DataFrame`. :rtype: pd.DataFrame """ - return self.dbconnector.get_logtable(table_name) + return self.dbconnector.get_logtable(logtable_name) + + def get_codecarbon_table(self) -> pd.DataFrame: + """ + Returns the CodeCarbon table as `Pandas.DataFrame`. If CodeCarbon is not used in this experiment, an error is raised. + + :raises ValueError: If CodeCarbon is not used in this experiment. + :return: Returns the CodeCarbon table as `Pandas.DataFrame`. + :rtype: pd.DataFrame + """ + if self.use_codecarbon: + return self.dbconnector.get_codecarbon_table() + else: + raise ValueError('CodeCarbon is not used in this experiment.') diff --git a/py_experimenter/result_processor.py b/py_experimenter/result_processor.py index faaea22e..dc8a0b17 100644 --- a/py_experimenter/result_processor.py +++ b/py_experimenter/result_processor.py @@ -1,7 +1,10 @@ import logging +from configparser import ConfigParser from copy import deepcopy from typing import Dict, List, Tuple +from codecarbon.output import EmissionsData + import py_experimenter.utils as utils from py_experimenter.database_connector import DatabaseConnector from py_experimenter.database_connector_lite import DatabaseConnectorLITE @@ -24,19 +27,21 @@ class ResultProcessor: database. """ - def __init__(self, _config: dict, credential_path, table_name: str, result_fields: List[str], experiment_id: int): + def __init__(self, config: ConfigParser, use_codecarbon: bool, codecarbon_config: ConfigParser, credential_path, table_name: str, result_fields: List[str], experiment_id: int): self._table_name = table_name self._result_fields = result_fields - self._config = _config + self._config = config self._timestamp_on_result_fields = utils.timestamps_for_result_fields(self._config) - self._experiment_id = experiment_id self._experiment_id_condition = f'ID = {self._experiment_id}' - if _config['PY_EXPERIMENTER']['provider'] == 'sqlite': - self._dbconnector: DatabaseConnector = DatabaseConnectorLITE(_config) - elif _config['PY_EXPERIMENTER']['provider'] == 'mysql': - self._dbconnector: DatabaseConnector = DatabaseConnectorMYSQL(_config, credential_path) + self.use_codecarbon = use_codecarbon + self._codecarbon_config = codecarbon_config + + if config['PY_EXPERIMENTER']['provider'] == 'sqlite': + self._dbconnector: DatabaseConnector = DatabaseConnectorLITE(config, self.use_codecarbon, self._codecarbon_config) + elif config['PY_EXPERIMENTER']['provider'] == 'mysql': + self._dbconnector: DatabaseConnector = DatabaseConnectorMYSQL(config, self.use_codecarbon, self._codecarbon_config, credential_path) else: raise InvalidConfigError("Invalid database provider!") @@ -55,6 +60,16 @@ def process_results(self, results: dict) -> None: self._dbconnector.update_database(self._table_name, values=results, condition=self._experiment_id_condition) + def _write_emissions(self, emission_data: EmissionsData, offline_mode: bool) -> None: + emission_data['offline_mode'] = offline_mode + emission_data['experiment_id'] = self._experiment_id + + keys = utils.extract_codecarbon_columns(with_type = False) + values = emission_data.values() + values = [value if not value == '' else None for value in values] + statement = self._dbconnector.prepare_write_query(f'{self._table_name}_codecarbon', keys) + self._dbconnector.execute_queries([(statement, values)]) + @staticmethod def _add_timestamps_to_results(results: dict) -> List[Tuple[str, object]]: time = utils.get_timestamp_representation() diff --git a/py_experimenter/utils.py b/py_experimenter/utils.py index 89a3a520..9a10a86d 100644 --- a/py_experimenter/utils.py +++ b/py_experimenter/utils.py @@ -1,13 +1,14 @@ import logging from configparser import ConfigParser from datetime import datetime -from typing import Dict, List, Optional, Tuple +from typing import Dict, Iterable, List, Optional, Tuple import numpy as np from py_experimenter.exceptions import ConfigError, NoConfigFileError, ParameterCombinationError +# check if path to codecarbon file exists def load_config(path): """ Load and return configuration file. @@ -24,6 +25,58 @@ def load_config(path): return config +def extract_codecarbon_config(config: ConfigParser) -> Tuple[ConfigParser]: + codecarbon_config = ConfigParser() + if config.has_section("codecarbon"): + codecarbon_config.read_dict({"codecarbon": dict(config["codecarbon"])}) + config.remove_section("codecarbon") + else: + codecarbon_config.read_dict( + { + "codecarbon": { + "measure_power_secs": "15", + "tracking_mode": "machine", + "log_level": "error", + "save_to_file": "True", + "output_dir": "output/CodeCarbon" + } + } + ) + + return config, codecarbon_config + + +def write_codecarbon_config(codecarbon_config: ConfigParser): + with open('.codecarbon.config', 'w') as f: + codecarbon_config.write(f) + + +def extract_codecarbon_columns(with_type:bool = True): + if with_type: + return [ + ('codecarbon_timestamp', 'DATETIME '), ('project_name', 'VARCHAR(255)'), ('run_id', 'VARCHAR(255)'), + ('duration_seconds', 'DOUBLE'), ('emissions_kg', 'DOUBLE'), ('emissions_rate_kg_sec', 'DOUBLE'), + ('cpu_power_watt', 'DOUBLE'), ('gpu_power_watt', 'DOUBLE'), ('ram_power_watt', 'DOUBLE'), + ('cpu_energy_kw', 'DOUBLE'), ('gpu_energy_kw', 'DOUBLE'), ('ram_energy_kw', 'DOUBLE'), + ('energy_consumed_kw', 'DOUBLE'), ('country_name', 'VARCHAR(255)'), ('country_iso_code', 'VARCHAR(255)'), + ('region', 'VARCHAR(255)'), ('cloud_provider', 'VARCHAR(255)'), ('cloud_region', 'VARCHAR(255)'), + ('os', 'VARCHAR(255)'), ('python_version', 'VARCHAR(255)'), ('codecarbon_version', 'VARCHAR(255)'), + ('cpu_count', 'DOUBLE'), ('cpu_model', 'VARCHAR(255)'), ('gpu_count', 'DOUBLE'), + ('gpu_model', 'VARCHAR(255)'), ('longitude', 'VARCHAR(255)'), ('latitude', 'VARCHAR(255)'), + ('ram_total_size', 'DOUBLE'), ('tracking_mode', 'VARCHAR(255)'), ('on_cloud', 'VARCHAR(255)'), + ('offline_mode', 'BOOL') + ] + else: + return [ + 'codecarbon_timestamp', 'project_name', 'run_id', 'duration_seconds', 'emissions_kg', + 'emissions_rate_kg_sec', 'cpu_power_watt', 'gpu_power_watt', 'ram_power_watt', 'cpu_energy_kw', + 'gpu_energy_kw', 'ram_energy_kw', 'energy_consumed_kw', 'country_name', 'country_iso_code', 'region', + 'cloud_provider', 'cloud_region', 'os', 'python_version', 'codecarbon_version', 'cpu_count', 'cpu_model', + 'gpu_count', 'gpu_model', 'longitude', 'latitude', 'ram_total_size', 'tracking_mode', 'on_cloud', + 'offline_mode', 'experiment_id' + ] + + def get_keyfield_data(config): keyfields = get_keyfields(config) diff --git a/pyproject.toml b/pyproject.toml index f579280e..81a0216f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ pandas = ">=1.0" mysql-connector-python = ">=8.0" jupyterlab = "^3.5.0" joblib = "^1.2.0" +codecarbon = "^2.2.1" [tool.poetry.group.dev.dependencies] pytest = ">=7.0" diff --git a/test/test_codecarbon/test_codecarbon_core_functions_mysql.py b/test/test_codecarbon/test_codecarbon_core_functions_mysql.py new file mode 100644 index 00000000..895ff739 --- /dev/null +++ b/test/test_codecarbon/test_codecarbon_core_functions_mysql.py @@ -0,0 +1,71 @@ +import os +from unittest.mock import patch + +import pandas +import pytest + +from py_experimenter.database_connector import DatabaseConnector +from py_experimenter.experimenter import PyExperimenter + + +@pytest.fixture(scope='module') +def experimenter_mysql(): + # Create config directory if it does not exist + if not os.path.exists('config'): + os.mkdir('config') + + # Create config file + content = """ + [PY_EXPERIMENTER] + provider = mysql + database = py_experimenter + table = example_logtables + + keyfields = dataset, cross_validation_splits:int, seed:int + dataset = iris + cross_validation_splits = 5 + seed = 1,2,3,4,5 + + resultfields = best_kernel_f1:VARCHAR(50), best_kernel_accuracy:VARCHAR(50) + resultfields.timestamps = false + + logtables = train_scores:log_train_scores, test_f1:DOUBLE, test_accuracy:DOUBLE + log_train_scores = f1:DOUBLE, accuracy:DOUBLE, kernel:VARCHAR(50) + + [CUSTOM] + path = sample_data + """ + experiment_configuration = os.path.join('config', 'example_logtables.cfg') + with open(experiment_configuration, "w") as f: + f.write(content) + + experimenter = PyExperimenter(experiment_configuration_file_path=experiment_configuration, name='example_notebook') + yield experimenter + + experimenter.delete_table() + + +def test_delete_table_mysql(experimenter_mysql): + with patch.object(DatabaseConnector, 'connect', return_value=None), \ + patch.object(DatabaseConnector, 'cursor', return_value=None), \ + patch.object(DatabaseConnector, 'commit', return_value=None): + + with patch.object(DatabaseConnector, 'execute', return_value=None) as mock_execute: + experimenter_mysql.delete_table() + + assert mock_execute.call_count == 5 + assert mock_execute.call_args_list[0][0][1] == 'DROP TABLE IF EXISTS example_logtables__train_scores' + assert mock_execute.call_args_list[1][0][1] == 'DROP TABLE IF EXISTS example_logtables__test_f1' + assert mock_execute.call_args_list[2][0][1] == 'DROP TABLE IF EXISTS example_logtables__test_accuracy' + assert mock_execute.call_args_list[3][0][1] == 'DROP TABLE IF EXISTS example_logtables_codecarbon' + assert mock_execute.call_args_list[4][0][1] == 'DROP TABLE IF EXISTS example_logtables' + + +def test_get_table_mysql(experimenter_mysql): + with patch.object(DatabaseConnector, 'connect', return_value=None), \ + patch.object(pandas, 'read_sql', return_value=pandas.DataFrame()), \ + patch.object(DatabaseConnector, 'close_connection', return_value=None) as mock_close: + + df = experimenter_mysql.get_codecarbon_table() + + assert df.empty is True diff --git a/test/test_codecarbon/test_codecarbon_core_functions_sqlite.py b/test/test_codecarbon/test_codecarbon_core_functions_sqlite.py new file mode 100644 index 00000000..3da5af55 --- /dev/null +++ b/test/test_codecarbon/test_codecarbon_core_functions_sqlite.py @@ -0,0 +1,71 @@ +import os +from unittest.mock import patch + +import pandas +import pytest + +from py_experimenter.database_connector import DatabaseConnector +from py_experimenter.experimenter import PyExperimenter + + +@pytest.fixture(scope='module') +def experimenter_sqlite(): + # Create config directory if it does not exist + if not os.path.exists('config'): + os.mkdir('config') + + # Create config file + content = """ + [PY_EXPERIMENTER] + provider = sqlite + database = py_experimenter + table = example_logtables + + keyfields = dataset, cross_validation_splits:int, seed:int + dataset = iris + cross_validation_splits = 5 + seed = 1,2,3,4,5 + + resultfields = best_kernel_f1:VARCHAR(50), best_kernel_accuracy:VARCHAR(50) + resultfields.timestamps = false + + logtables = train_scores:log_train_scores, test_f1:DOUBLE, test_accuracy:DOUBLE + log_train_scores = f1:DOUBLE, accuracy:DOUBLE, kernel:VARCHAR(50) + + [CUSTOM] + path = sample_data + """ + experiment_configuration = os.path.join('config', 'example_logtables.cfg') + with open(experiment_configuration, "w") as f: + f.write(content) + + experimenter = PyExperimenter(experiment_configuration_file_path=experiment_configuration, name='example_notebook') + yield experimenter + + experimenter.delete_table() + + +def test_delete_table_sqlite(experimenter_sqlite): + with patch.object(DatabaseConnector, 'connect', return_value=None), \ + patch.object(DatabaseConnector, 'cursor', return_value=None), \ + patch.object(DatabaseConnector, 'commit', return_value=None): + + with patch.object(DatabaseConnector, 'execute', return_value=None) as mock_execute: + experimenter_sqlite.delete_table() + + assert mock_execute.call_count == 5 + assert mock_execute.call_args_list[0][0][1] == 'DROP TABLE IF EXISTS example_logtables__train_scores' + assert mock_execute.call_args_list[1][0][1] == 'DROP TABLE IF EXISTS example_logtables__test_f1' + assert mock_execute.call_args_list[2][0][1] == 'DROP TABLE IF EXISTS example_logtables__test_accuracy' + assert mock_execute.call_args_list[3][0][1] == 'DROP TABLE IF EXISTS example_logtables_codecarbon' + assert mock_execute.call_args_list[4][0][1] == 'DROP TABLE IF EXISTS example_logtables' + + +def test_get_table_sqlite(experimenter_sqlite): + with patch.object(DatabaseConnector, 'connect', return_value=None), \ + patch.object(pandas, 'read_sql', return_value=pandas.DataFrame()), \ + patch.object(DatabaseConnector, 'close_connection', return_value=None) as mock_close: + + df = experimenter_sqlite.get_codecarbon_table() + + assert df.empty is True diff --git a/test/test_codecarbon/test_integration_mysql.py b/test/test_codecarbon/test_integration_mysql.py new file mode 100644 index 00000000..6b07fc80 --- /dev/null +++ b/test/test_codecarbon/test_integration_mysql.py @@ -0,0 +1,63 @@ +import random +import tempfile + +import numpy as np +import pytest + +from py_experimenter.experimenter import PyExperimenter +from py_experimenter.result_processor import ResultProcessor + + +@pytest.fixture +def experimenter(): + content = """ + [PY_EXPERIMENTER] + provider = mysql + database = py_experimenter + table = integration_test_mysql + + keyfields = dataset, cross_validation_splits:int, seed:int, kernel + dataset = iris + cross_validation_splits = 5 + seed = 2:6:2 + kernel = linear, poly, rbf, sigmoid + + resultfields = pipeline:LONGTEXT, train_f1:DECIMAL, train_accuracy:DECIMAL, test_f1:DECIMAL, test_accuracy:DECIMAL + resultfields.timestamps = false + + [CUSTOM] + path = sample_data + """ + + # Create temporary experiment configuration file + with tempfile.NamedTemporaryFile(mode='w', delete=False) as f: + f.write(content) + experiment_configuration = f.name + + return PyExperimenter(experiment_configuration) + + +def run_ml(parameters: dict, result_processor: ResultProcessor, custom_config: dict): + seed = parameters['seed'] + random.seed(seed) + np.random.seed(seed) + + if parameters['dataset'] != 'iris': + raise ValueError("Example error") + + + +def test_integration(experimenter: PyExperimenter): + experimenter.delete_table() + experimenter.fill_table_from_config() + experimenter.execute(run_ml, -1) + table = experimenter.get_codecarbon_table() + assert list(table.columns) == [ + 'ID', 'experiment_id', 'codecarbon_timestamp', 'project_name', 'run_id', + 'duration_seconds', 'emissions_kg', 'emissions_rate_kg_sec', 'cpu_power_watt', 'gpu_power_watt', 'ram_power_watt', + 'cpu_energy_kw', 'gpu_energy_kw', 'ram_energy_kw', 'energy_consumed_kw', 'country_name', + 'country_iso_code', 'region', 'cloud_provider', 'cloud_region', 'os', 'python_version', + 'codecarbon_version', 'cpu_count', 'cpu_model', 'gpu_count', 'gpu_model', + 'longitude', 'latitude', 'ram_total_size', 'tracking_mode', 'on_cloud', 'offline_mode'] + assert table.shape == (12, 33) + assert list(table['experiment_id']) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] diff --git a/test/test_codecarbon/test_integration_sqlite.py b/test/test_codecarbon/test_integration_sqlite.py new file mode 100644 index 00000000..2d06f9d5 --- /dev/null +++ b/test/test_codecarbon/test_integration_sqlite.py @@ -0,0 +1,66 @@ +import random +import tempfile + +import numpy as np +import pytest + +from py_experimenter.experimenter import PyExperimenter +from py_experimenter.result_processor import ResultProcessor + + +@pytest.fixture +def experimenter(): + content = """ + [PY_EXPERIMENTER] + provider = sqlite + database = py_experimenter + table = integration_test_mysql + + keyfields = dataset, cross_validation_splits:int, seed:int, kernel + dataset = iris + cross_validation_splits = 5 + seed = 2:6:2 + kernel = linear, poly, rbf, sigmoid + + resultfields = pipeline:LONGTEXT, train_f1:DECIMAL, train_accuracy:DECIMAL, test_f1:DECIMAL, test_accuracy:DECIMAL + resultfields.timestamps = false + + [CUSTOM] + path = sample_data + """ + + # Create temporary experiment configuration file + with tempfile.NamedTemporaryFile(mode='w', delete=False) as f: + f.write(content) + experiment_configuration = f.name + + return PyExperimenter(experiment_configuration) + + +def run_ml(parameters: dict, result_processor: ResultProcessor, custom_config: dict): + seed = parameters['seed'] + random.seed(seed) + np.random.seed(seed) + + if parameters['dataset'] != 'iris': + raise ValueError("Example error") + #without a small sleep the test fails on windows + import time + time.sleep(1) + + +def test_integration(experimenter: PyExperimenter): + experimenter.delete_table() + experimenter.fill_table_from_config() + experimenter.execute(run_ml, -1) + table = experimenter.get_codecarbon_table() + assert list(table.columns) == [ + 'ID', 'experiment_id', 'codecarbon_timestamp', 'project_name', 'run_id', + 'duration_seconds', 'emissions_kg', 'emissions_rate_kg_sec', 'cpu_power_watt', 'gpu_power_watt', 'ram_power_watt', + 'cpu_energy_kw', 'gpu_energy_kw', 'ram_energy_kw', 'energy_consumed_kw', 'country_name', + 'country_iso_code', 'region', 'cloud_provider', 'cloud_region', 'os', 'python_version', + 'codecarbon_version', 'cpu_count', 'cpu_model', 'gpu_count', 'gpu_model', + 'longitude', 'latitude', 'ram_total_size', 'tracking_mode', 'on_cloud', 'offline_mode'] + assert table.shape == (12, 33) + assert list(table['experiment_id']) == [1,2,3,4,5,6,7,8,9,10,11,12] + diff --git a/test/test_database_connector.py b/test/test_database_connector.py index 03e1f69b..12e0637c 100644 --- a/test/test_database_connector.py +++ b/test/test_database_connector.py @@ -50,8 +50,8 @@ def test_create_table_if_not_exists(create_database_if_not_existing_mock, test_c table_exists_mock.return_value = True table_has_correct_structure_mock.return_value = True experiment_configuration_file_path = load_config(os.path.join('test', 'test_config_files', 'load_config_test_file', 'my_sql_test_file.cfg')) - database_connector = DatabaseConnectorMYSQL(experiment_configuration_file_path, database_credential_file_path=os.path.join( - 'test', 'test_config_files', 'load_config_test_file', 'mysql_fake_credentials.cfg')) + database_connector = DatabaseConnectorMYSQL(experiment_configuration_file_path, None, None, database_credential_file_path=os.path.join( + 'test', 'test_config_files', 'load_config_test_file', 'mysql_fake_credentials.cfg')) #todo add codecarbon config database_connector.create_table_if_not_existing() create_table_string = ('CREATE TABLE test_table (ID INTEGER PRIMARY KEY AUTO_INCREMENT, value int DEFAULT NULL,exponent int DEFAULT NULL,' 'creation_date DATETIME DEFAULT NULL,status VARCHAR(255) DEFAULT NULL,start_date DATETIME DEFAULT NULL,' @@ -131,6 +131,8 @@ def test_fill_table( experiment_configuration = load_config(experiment_configuration_file_path) database_connector = DatabaseConnectorMYSQL( experiment_configuration, + None, + None, database_credential_file_path=os.path.join('test', 'test_config_files', 'load_config_test_file', 'mysql_fake_credentials.cfg')) database_connector.fill_table(parameters, fixed_parameter_combination) values, columns = write_to_database_mock.call_args_list[0][0] @@ -166,6 +168,8 @@ def test_delete_experiments_with_condition(commit_mock, execute_mock, cursor_moc experiment_configuration_file_path = load_config(os.path.join('test', 'test_config_files', 'load_config_test_file', 'my_sql_test_file.cfg')) database_connector = DatabaseConnectorMYSQL( experiment_configuration_file_path, + None, + None, database_credential_file_path=os.path.join( 'test', 'test_config_files', 'load_config_test_file', 'mysql_fake_credentials.cfg') ) @@ -197,6 +201,8 @@ def test_get_experiments_with_condition(get_structture_from_table_mock, fetchall experiment_configuration_file_path = load_config(os.path.join('test', 'test_config_files', 'load_config_test_file', 'my_sql_test_file.cfg')) database_connector = DatabaseConnectorMYSQL( experiment_configuration_file_path, + None, + None, database_credential_file_path=os.path.join( 'test', 'test_config_files', 'load_config_test_file', 'mysql_fake_credentials.cfg') ) @@ -221,6 +227,8 @@ def test_delete_table(commit_mock, execute_mock, cursor_mock, connect_mock, crea experiment_configuration_file_path = load_config(os.path.join('test', 'test_config_files', 'load_config_test_file', 'my_sql_test_file.cfg')) database_connector = DatabaseConnectorMYSQL( experiment_configuration_file_path, + False, + None, database_credential_file_path=os.path.join( 'test', 'test_config_files', 'load_config_test_file', 'mysql_fake_credentials.cfg') ) diff --git a/test/test_logtables/test_mysql.py b/test/test_logtables/test_mysql.py index 694ba94b..c15b23fa 100644 --- a/test/test_logtables/test_mysql.py +++ b/test/test_logtables/test_mysql.py @@ -45,7 +45,7 @@ def test_tables_created(execute_mock, close_connection_mock, fetchall_mock, curs def test_logtable_insertion(database_connector_mock): config = ConfigParser() config.read(os.path.join('test', 'test_logtables', 'mysql_logtables.cfg')) - result_processor = ResultProcessor(config, None, None, None, 0) + result_processor = ResultProcessor(config, None, None, None, None, None, 0) result_processor._table_name = 'table_name' table_0_logs = {'test0': 'test', 'test1': 'test'} table_1_logs = {'test0': 'test'} @@ -69,7 +69,7 @@ def test_logtable_insertion(database_connector_mock): def test_delete_logtable(execution_mock, close_connection_mock, commit_mocck, fetchall_mock, cursor_mock, connect_mock, test_connection_mock, create_database_mock): fetchall_mock.return_value = cursor_mock.return_value = connect_mock.return_value = commit_mocck.return_value = None close_connection_mock.return_value = test_connection_mock.return_value = create_database_mock.return_value = execution_mock.return_value = None - experimenter = PyExperimenter(os.path.join('test', 'test_logtables', 'mysql_logtables.cfg')) + experimenter = PyExperimenter(os.path.join('test', 'test_logtables', 'mysql_logtables.cfg'), use_codecarbon=False) experimenter.delete_table() execution_mock.assert_has_calls([call(None, 'DROP TABLE IF EXISTS test_mysql_logtables__test_mysql_log'), call(None, 'DROP TABLE IF EXISTS test_mysql_logtables__test_mysql_log2'), diff --git a/test/test_logtables/test_sqlite.py b/test/test_logtables/test_sqlite.py index c63385ba..4b0a2d3c 100644 --- a/test/test_logtables/test_sqlite.py +++ b/test/test_logtables/test_sqlite.py @@ -41,7 +41,7 @@ def test_tables_created(execute_mock, close_connection_mock, fetchall_mock, curs def test_logtable_insertion(database_connector_mock): config = ConfigParser() config.read(os.path.join('test', 'test_logtables', 'sqlite_logtables.cfg')) - result_processor = ResultProcessor(config, None, None, None, 0) + result_processor = ResultProcessor(config, None, None, None, None, None, 0) result_processor._table_name = 'table_name' table_0_logs = {'test0': 'test', 'test1': 'test'} table_1_logs = {'test0': 'test'} @@ -65,7 +65,7 @@ def test_logtable_insertion(database_connector_mock): def test_delete_logtable(execution_mock, close_connection_mock, commit_mocck, fetchall_mock, cursor_mock, connect_mock, test_connection_mock): fetchall_mock.return_value = cursor_mock.return_value = connect_mock.return_value = commit_mocck.return_value = None close_connection_mock.return_value = test_connection_mock.return_value = execution_mock.return_value = None - experimenter = PyExperimenter(os.path.join('test', 'test_logtables', 'sqlite_logtables.cfg')) + experimenter = PyExperimenter(os.path.join('test', 'test_logtables', 'sqlite_logtables.cfg'), use_codecarbon=False) experimenter.delete_table() execution_mock.assert_has_calls([call(None, 'DROP TABLE IF EXISTS test_sqlite_logtables__test_sqlite_log'), call(None, 'DROP TABLE IF EXISTS test_sqlite_logtables__test_sqlite_log2'), call(None, 'DROP TABLE IF EXISTS test_sqlite_logtables')]) @@ -86,7 +86,7 @@ def own_function(keyfields: dict, result_processor: ResultProcessor, custom_fiel result_processor.process_logs({'test_sqlite_log': {'test': 2}, 'test_sqlite_log2': {'test': 3}}) def test_integration(): - experimenter = PyExperimenter(os.path.join('test', 'test_logtables', 'sqlite_logtables.cfg')) + experimenter = PyExperimenter(os.path.join('test', 'test_logtables', 'sqlite_logtables.cfg'), use_codecarbon=False) try: experimenter.delete_table() except Exception: diff --git a/test/test_result_processor.py b/test/test_result_processor.py index 906cab00..2cf6b26f 100644 --- a/test/test_result_processor.py +++ b/test/test_result_processor.py @@ -37,7 +37,7 @@ def test_init(create_database_if_not_existing_mock, test_connection_mysql, test_ create_database_if_not_existing_mock.return_value = None test_connection_mysql.return_value = None test_connection_sqlite.return_value = None - result_processor = ResultProcessor(config, CREDENTIAL_PATH, table_name, result_fields, 0) + result_processor = ResultProcessor(config, False, None, CREDENTIAL_PATH, table_name, result_fields, 0) assert table_name == result_processor._table_name assert result_fields == result_processor._result_fields @@ -53,7 +53,7 @@ def test_init_raises_error(mock_fn): config = utils.load_config(os.path.join('test', 'test_config_files', 'load_config_test_file', 'my_sql_test_file.cfg')) config.set('PY_EXPERIMENTER', 'provider', 'test_provider') with pytest.raises(InvalidConfigError, match='Invalid database provider!'): - ResultProcessor(config, CREDENTIAL_PATH, table_name, condition, result_fields) + ResultProcessor(config, False, None, CREDENTIAL_PATH, table_name, condition, result_fields) @patch.object(database_connector_mysql.DatabaseConnectorMYSQL, '_test_connection') @@ -81,7 +81,7 @@ def test_process_results_raises_error(create_database_mock, test_connection_mock table_name = 'test_table' config = utils.load_config(os.path.join('test', 'test_config_files', 'load_config_test_file', 'my_sql_test_file.cfg')) - result_processor = ResultProcessor(config, CREDENTIAL_PATH, table_name, result_fields, experiment_id) + result_processor = ResultProcessor(config, False, None, CREDENTIAL_PATH, table_name, result_fields, experiment_id) with pytest.raises(error, match=errorstring): result_processor.process_results(results) @@ -101,7 +101,7 @@ def test_valid_result_fields(create_database_if_not_existing_mock, test_connecti create_database_if_not_existing_mock.return_value = None test_connection_mock.return_value = None mock_config = utils.load_config(os.path.join('test', 'test_config_files', 'load_config_test_file', 'my_sql_test_file.cfg')) - assert subset_boolean == ResultProcessor(mock_config, CREDENTIAL_PATH, 'test_table_name', + assert subset_boolean == ResultProcessor(mock_config, False, None, CREDENTIAL_PATH, 'test_table_name', used_result_fields, 0)._valid_result_fields(existing_result_fields) diff --git a/test/test_run_experiments/test_run_mysql_experiment.py b/test/test_run_experiments/test_run_mysql_experiment.py index 378f02f0..ff4000d5 100644 --- a/test/test_run_experiments/test_run_mysql_experiment.py +++ b/test/test_run_experiments/test_run_mysql_experiment.py @@ -35,7 +35,7 @@ def check_done_entries(experimenter, amount_of_entries): def test_run_all_mqsql_experiments(): experiment_configuration_file_path = os.path.join('test', 'test_run_experiments', 'test_run_mysql_experiment_config.cfg') logging.basicConfig(level=logging.DEBUG) - experimenter = PyExperimenter(experiment_configuration_file_path=experiment_configuration_file_path) + experimenter = PyExperimenter(experiment_configuration_file_path=experiment_configuration_file_path, use_codecarbon=False) try: experimenter.delete_table() except ProgrammingError as e: @@ -53,7 +53,7 @@ def test_run_all_mqsql_experiments(): assert entries_without_metadata == (1, 1, 1, 'done', 'PyExperimenter', '0.8414709848078965', '0.5403023058681398', None) experimenter.dbconnector.close_connection(connection) - experimenter = PyExperimenter(experiment_configuration_file_path=experiment_configuration_file_path) + experimenter = PyExperimenter(experiment_configuration_file_path=experiment_configuration_file_path, use_codecarbon=False) experimenter.fill_table_from_config() experimenter.execute(own_function, -1) check_done_entries(experimenter, 30) @@ -93,7 +93,7 @@ def check_error_entries(experimenter): def test_run_error_experiment(): experiment_configuration_file_path = os.path.join('test', 'test_run_experiments', 'test_run_mysql_experiment_config.cfg') logging.basicConfig(level=logging.DEBUG) - experimenter = PyExperimenter(experiment_configuration_file_path=experiment_configuration_file_path) + experimenter = PyExperimenter(experiment_configuration_file_path=experiment_configuration_file_path, use_codecarbon=False) try: experimenter.delete_table() except ProgrammingError as e: @@ -126,7 +126,7 @@ def own_function_raising_errors(keyfields: dict, result_processor: ResultProcess def test_raising_error_experiment(): experimenter = PyExperimenter(experiment_configuration_file_path=os.path.join('test', 'test_run_experiments', 'test_run_mysql_error_config.cfg'), - name='name') + name='name', use_codecarbon=False) try: experimenter.delete_table() diff --git a/test/test_run_experiments/test_run_sqlite_experiment.py b/test/test_run_experiments/test_run_sqlite_experiment.py index bde6e850..38442b02 100644 --- a/test/test_run_experiments/test_run_sqlite_experiment.py +++ b/test/test_run_experiments/test_run_sqlite_experiment.py @@ -36,7 +36,7 @@ def check_done_entries(experimenter, amount_of_entries): def test_run_all_sqlite_experiments(): logging.basicConfig(level=logging.DEBUG) experimenter = PyExperimenter(experiment_configuration_file_path=os.path.join( - 'test', 'test_run_experiments', 'test_run_sqlite_experiment_config.cfg')) + 'test', 'test_run_experiments', 'test_run_sqlite_experiment_config.cfg'), use_codecarbon=False) try: experimenter.delete_table() except Exception: @@ -55,7 +55,7 @@ def test_run_all_sqlite_experiments(): experimenter.dbconnector.close_connection(connection) experimenter = PyExperimenter(experiment_configuration_file_path=os.path.join( - 'test', 'test_run_experiments', 'test_run_sqlite_experiment_config.cfg')) + 'test', 'test_run_experiments', 'test_run_sqlite_experiment_config.cfg'), use_codecarbon=False) experimenter.fill_table_from_config() experimenter.execute(own_function, -1) check_done_entries(experimenter, 30) @@ -95,7 +95,7 @@ def check_error_entries(experimenter): def test_run_error_experiment(): experiment_configuration_file_path = os.path.join('test', 'test_run_experiments', 'test_run_sqlite_experiment_config.cfg') logging.basicConfig(level=logging.DEBUG) - experimenter = PyExperimenter(experiment_configuration_file_path=experiment_configuration_file_path) + experimenter = PyExperimenter(experiment_configuration_file_path=experiment_configuration_file_path, use_codecarbon=False) try: experimenter.delete_table() except ProgrammingError as e: @@ -131,7 +131,7 @@ def own_function_raising_errors(keyfields: dict, result_processor: ResultProcess def test_raising_error_experiment(): experimenter = PyExperimenter(experiment_configuration_file_path=os.path.join('test', 'test_run_experiments', 'test_run_sqlite_error_config.cfg'), - name='name') + name='name', use_codecarbon=False) try: experimenter.delete_table() diff --git a/test/test_utils.py b/test/test_utils.py index 9b8c1056..b2dea398 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -1,13 +1,15 @@ import os import re +import tempfile from configparser import ConfigParser from typing import Dict import pytest from py_experimenter.exceptions import ConfigError, MissingLogTableError, NoConfigFileError, ParameterCombinationError -from py_experimenter.utils import (_generate_int_data, add_timestep_result_columns, combine_fill_table_parameters, extract_columns, extract_logtables, - get_keyfield_data, get_keyfield_names, get_keyfields, get_resultfields, load_config, timestamps_for_result_fields) +from py_experimenter.utils import (_generate_int_data, add_timestep_result_columns, combine_fill_table_parameters, extract_codecarbon_config, + extract_columns, extract_logtables, get_keyfield_data, get_keyfield_names, get_keyfields, get_resultfields, + load_config, timestamps_for_result_fields) @pytest.mark.parametrize( @@ -367,7 +369,7 @@ def test_combine_fill_table_parameters(keyfield_names, parameters, fixed_paramet 'logtables': 'table1:Table1, table2:Table2', 'Table1': 'a:FLOAT, b:FLOAT', 'Table2': 'a:FLOAT, b'}}, - {'some_table_name__table1': [('a', 'FLOAT'), ('b', 'FLOAT')], + {'some_table_name__table1': [('a', 'FLOAT'), ('b', 'FLOAT')], 'some_table_name__table2': [('a', 'FLOAT'), ('b', 'VARCHAR(255)')]}, id='logtables with two tables' ), @@ -384,7 +386,7 @@ def test_combine_fill_table_parameters(keyfield_names, parameters, fixed_paramet ), ] ) -def test_extract_logtables(table_name:str, configuration_dict: Dict[str, Dict[str, str]], expected_logtables: Dict[str, Dict[str, str]]): +def test_extract_logtables(table_name: str, configuration_dict: Dict[str, Dict[str, str]], expected_logtables: Dict[str, Dict[str, str]]): config = ConfigParser() config.read_dict(configuration_dict) logtables = extract_logtables(config, table_name) @@ -401,15 +403,117 @@ def test_extract_logtables(table_name:str, configuration_dict: Dict[str, Dict[st [], 'No parameter combination found!' ), - ( - ['keyfield_name_1', 'keyfield_name_2'], - {'keyfield_name_1': [1, 2], 'keyfield_name_2': [4, 5]}, - [{'keyfield_name_2': [7]}], - 'There is at least one key that is used more than once!' - ), - - ] + ], ) -def test_combine_fill_table_parameters_raises_error(keyfield_names, parameters, fixed_parameter_combinations, error_msg): +def test_combine_fill_table_parameters_raises_error( + keyfield_names, parameters, fixed_parameter_combinations, error_msg +): with pytest.raises(ParameterCombinationError, match=error_msg): - combine_fill_table_parameters(keyfield_names, parameters, fixed_parameter_combinations) + combine_fill_table_parameters( + keyfield_names, parameters, fixed_parameter_combinations + ) + + +@pytest.fixture +def temp_config_file_no_codecarbon_section(): + config_data = """ + [PY_EXPERIMENTER] + provider=mysql + database=py_experimenter + table=test_table_mysql_with_wrong_syntax + n_jobs = 5 + + keyfields = value:int, exponent:int, + resultfields = sin, cos + + value=1,2,3,4,5,6,7,8,9,10 + exponent=1,2,3 + """ + with tempfile.NamedTemporaryFile(mode="w", delete=False) as temp_file: + temp_file.write(config_data) + temp_file.close() + yield temp_file.name + # Clean up the temporary file after the test + os.remove(temp_file.name) + + +def test_extract_codecarbon_config_no_codecarbon_section( + temp_config_file_no_codecarbon_section, +): + config = ConfigParser() + expected_codecarbon_config = ConfigParser() + config.read(temp_config_file_no_codecarbon_section) + + config, codecarbon_config = extract_codecarbon_config(config) + + # Check if 'codecarbon' section is removed from the config + assert not config.has_section("codecarbon") + + # Check if the extracted 'codecarbon' config is correct + expected_codecarbon_config.read_dict( + { + "codecarbon": { + "measure_power_secs": "15", + "tracking_mode": "machine", + "log_level": "error", + "save_to_file": "True", + "output_dir": "output/CodeCarbon", + } + } + ) + assert codecarbon_config == expected_codecarbon_config + + +@pytest.fixture +def temp_config_file(): + config_data = """ + [PY_EXPERIMENTER] + provider=mysql + database=py_experimenter + table=test_table_mysql_with_wrong_syntax + n_jobs = 5 + + keyfields = value:int, exponent:int, + resultfields = sin, cos + + value=1,2,3,4,5,6,7,8,9,10 + exponent=1,2,3 + + [codecarbon] + measure_power_secs = 30 + tracking_mode = process + log_level = warning + save_to_file = False + output_dir = output/CodeCarbon + """ + with tempfile.NamedTemporaryFile(mode="w", delete=False) as temp_file: + temp_file.write(config_data) + temp_file.close() + yield temp_file.name + # Clean up the temporary file after the test + os.remove(temp_file.name) + + +def test_extract_codecarbon_config(temp_config_file): + config = ConfigParser() + expected_codecarbon_config = ConfigParser() + config.read(temp_config_file) + + config, codecarbon_config = extract_codecarbon_config(config) + + # Check if 'codecarbon' section is removed from the config + assert not config.has_section("codecarbon") + + # Check if the extracted 'codecarbon' config matches the expected values + expected_codecarbon_config.read_dict( + { + "codecarbon": { + "measure_power_secs": "30", + "tracking_mode": "process", + "log_level": "warning", + "save_to_file": "False", + "output_dir": "output/CodeCarbon", + } + } + ) + assert codecarbon_config == expected_codecarbon_config \ No newline at end of file From 9eec23663a050d23b34a53ff1eddcbba103b4078 Mon Sep 17 00:00:00 2001 From: Lukas Fehring Date: Wed, 21 Jun 2023 17:16:25 +0200 Subject: [PATCH 2/2] Adapt tracking mode --- docs/source/usage/experiment_configuration_file.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/usage/experiment_configuration_file.rst b/docs/source/usage/experiment_configuration_file.rst index ce31ccfb..7c0d1bd8 100644 --- a/docs/source/usage/experiment_configuration_file.rst +++ b/docs/source/usage/experiment_configuration_file.rst @@ -33,7 +33,7 @@ The experiment configuration file is primarily used to define the database backe [codecarbon] offline_mode = False measure_power_secs = 15 - tracking_mode = process + tracking_mode = machine log_level = error save_to_file = True output_dir = output/CodeCarbon @@ -164,7 +164,7 @@ Per default, ``CodeCarbon`` will track the carbon footprint of the whole machine [codecarbon] measure_power_secs = 15 - tracking_mode = process + tracking_mode = machine log_level = error save_to_file = True output_dir = output/CodeCarbon