From 47fe9213d8a91d265ec21c67ce97acacaa3265af Mon Sep 17 00:00:00 2001 From: LukasFehring <72503857+LukasFehring@users.noreply.github.com> Date: Wed, 17 Apr 2024 08:47:02 +0200 Subject: [PATCH] Extend documentation for distributed usage (#188) --- CHANGELOG.rst | 9 ++++ docs/source/usage/distributed_execution.rst | 47 +++++++++++++++++++++ docs/source/usage/index.rst | 1 + pyproject.toml | 2 +- 4 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 docs/source/usage/distributed_execution.rst diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e8770f2c..d6e51c3d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,15 @@ Changelog ========= + +v1.4.2 (??.??.2024) +=================== + +Feature +------- + +- Added documentation about how to execute PyExperimenter on distributed machines. + v1.4.1 (11.03.2024) =================== diff --git a/docs/source/usage/distributed_execution.rst b/docs/source/usage/distributed_execution.rst new file mode 100644 index 00000000..dbb01d13 --- /dev/null +++ b/docs/source/usage/distributed_execution.rst @@ -0,0 +1,47 @@ +.. _distributed_execution: + +===================== +Distributed Execution +===================== +To distribute the execution of experiments across multiple machines, you can follow the standard :ref:`procedure of using PyExperimenter `, with the following additional considerations. + +-------------- +Database Setup +-------------- +You need to have a shared database that is accessible to all the machines and supports concurrent access. Thus, ``SQLite`` is not a good choice for this purpose, which is why we recommend using a ``MySQL`` database instead. + +-------- +Workflow +-------- +While it is theoretically possible for multiple jobs to create new experiments, this introduces the possibility of creating the same experiment multiple times. To prevent this, we recommend the following workflow, where a process is either the ``database handler``, i.e. responsible to create/reset experiment, or a ``experiment executer`` actually executing experiments. + +.. note:: + Make sure to use the same :ref:`experiment configuration file `, and :ref:`database credential file ` for both types. + + +Database Handling +----------------- + +The ``database handler`` process creates/resets the experiments and stores them in the database once in advance. + +.. code-block:: python + + from py_experimenter.experimenter import PyExperimenter + + experimenter = PyExperimenter( + experiment_configuration_file_path = "path/to/file", + database_credential_file_path = "path/to/file" + ) + experimenter.fill_table_from_config() + + +Experiment Execution +-------------------- + +Multiple ``experiment executer`` processes execute the experiments in parallel on different machines, all using the same code. In a typical HPC context, each job starts a single ``experiment executer`` process on a different node. + +.. code-block:: python + + from py_experimenter.experimenter import PyExperimenter + + experimenter.execute(experiment_function, max_experiments=1) diff --git a/docs/source/usage/index.rst b/docs/source/usage/index.rst index c5f2c0f5..204a5a5e 100644 --- a/docs/source/usage/index.rst +++ b/docs/source/usage/index.rst @@ -38,3 +38,4 @@ The following steps are necessary to execute the ``PyExperimenter``. ./database_credential_file ./experiment_function ./execution + ./distributed_execution \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 15fe9a29..8764fd00 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "py-experimenter" -version = "1.4.1" +version = "1.4.2a0" description = "The PyExperimenter is a tool for the automatic execution of experiments, e.g. for machine learning (ML), capturing corresponding results in a unified manner in a database." authors = [ "Tanja Tornede ",