From 73ced76fad70dd19914119998856afe9b3c51ddb Mon Sep 17 00:00:00 2001 From: ghylander <74593034+ghylander@users.noreply.github.com> Date: Tue, 5 Jan 2021 14:03:53 +0100 Subject: [PATCH 1/3] update documentation Added example about using the built in keras callback and how to declare a new callback to track specific metrics --- README.md | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/README.md b/README.md index c452889..56ad88b 100644 --- a/README.md +++ b/README.md @@ -119,6 +119,55 @@ def train_dogs_vs_cats(exp): # Get Experiment object as argument to function. model.fit() exp.metric(model.accuracy()) ``` + + +### Keras Callbacks + +Hyperdash has an included keras callback module: +```python +from hyperdash import Experiment +exp = Experiment ("Keras callback showcase") + +callbacks=[exp.callbacks.keras] + +history = training_model.fit(train_dataset, + epochs=epochs, + callbacks=callbacks, + validation_data=validation_dataset, + validation_steps=steps + ) +``` +This callback will monitor validation loss (val_loss) and the validation accuracy (val_acc). + +If you want to monitor other metrics, you will have to define a callback yourself: +```python +from hyperdash import Experiment +from tensorflow.keras.callbacks import Callback + +class Hyperdash(Callback): + def __init__(self, entries, exp): + super(Hyperdash, self).__init__() + self.entries = entries + self.exp = exp + def on_epoch_end(self, epoch, logs=None): + for entrie in self.entries: + log = logs.get(entrie) + if log is not None: + self.exp.metric(entrie, log) + +callback_hd = Hyperdash(['accuracy', 'loss', 'val_accuracy', 'val_loss'], exp) # Here you list the metrics you want to track + +callbacks = [callback_hd] + +history = training_model.fit(train_dataset, + epochs=epochs, + callbacks=callbacks, + validation_data=validation_dataset, + validation_steps=steps + ) +``` + + ## API Keys ### Storage From b40e498ea7d25ec5c00b9636cecdafc09b3b8068 Mon Sep 17 00:00:00 2001 From: ghylander <74593034+ghylander@users.noreply.github.com> Date: Tue, 5 Jan 2021 14:17:39 +0100 Subject: [PATCH 2/3] update experiment.py needs confirmation, seems like the accuracy metric 'accuracy' changed names from 'acc' to 'accuracy'. Hence, when using the built in callback, the val_accuracy will not be tracked --- hyperdash/experiment.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hyperdash/experiment.py b/hyperdash/experiment.py index 79f022e..2ea6dc8 100644 --- a/hyperdash/experiment.py +++ b/hyperdash/experiment.py @@ -217,11 +217,11 @@ def __init__(self, exp): def on_epoch_end(self, epoch, logs=None): if not logs: logs = {} - val_acc = logs.get("val_acc") + val_acc = logs.get("val_accuracy") val_loss = logs.get("val_loss") if val_acc is not None: - self._exp.metric("val_acc", val_acc) + self._exp.metric("val_accuracy", val_acc) if val_loss is not None: self._exp.metric("val_loss", val_loss) cb = _KerasCallback(self._exp) From b74bd243318a7ea7e8b6315f362934e0134b2b34 Mon Sep 17 00:00:00 2001 From: Gustavo Hylander <74593034+ghylander@users.noreply.github.com> Date: Tue, 5 Oct 2021 10:49:21 +0200 Subject: [PATCH 3/3] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 56ad88b..32af372 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ Use Hyperdash if you're looking for cloud-based model monitoring that: * Saves your experiment's print output (standard out / error) as a local log file. * Notifies you when a long-running experiment has finished. -Hyperdash is compatible with: **Python 2.7-3.6** +Hyperdash is compatible with: **Python 2.7-3.8** ## Installation *Foreword: We care deeply about making Hyperdash fast and easy to install on Linux, Mac, and Windows. If you find a snag along the way, please let us know at support@hyperdash.io!*