From ec0974ffe6a444ec4c83a54c0ee12a9c6d183ee2 Mon Sep 17 00:00:00 2001 From: Kristjan Roosild Date: Tue, 19 Dec 2017 22:25:21 +0200 Subject: [PATCH 1/2] Fix the Kaggle Dogs vs Cats competition link --- deeplearning1/nbs/lesson1.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeplearning1/nbs/lesson1.ipynb b/deeplearning1/nbs/lesson1.ipynb index 9d3e51651..6041f0a15 100644 --- a/deeplearning1/nbs/lesson1.ipynb +++ b/deeplearning1/nbs/lesson1.ipynb @@ -25,7 +25,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We're going to try to create a model to enter the [Dogs vs Cats](https://www.kaggle.com/c/dogs-vs-cats) competition at Kaggle. There are 25,000 labelled dog and cat photos available for training, and 12,500 in the test set that we have to try to label for this competition. According to the Kaggle web-site, when this competition was launched (end of 2013): *\"**State of the art**: The current literature suggests machine classifiers can score above 80% accuracy on this task\"*. So if we can beat 80%, then we will be at the cutting edge as of 2013!" + "We're going to try to create a model to enter the [Dogs vs Cats](https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition) competition at Kaggle. There are 25,000 labelled dog and cat photos available for training, and 12,500 in the test set that we have to try to label for this competition. According to the Kaggle web-site, when this competition was launched (end of 2013): *\"**State of the art**: The current literature suggests machine classifiers can score above 80% accuracy on this task\"*. So if we can beat 80%, then we will be at the cutting edge as of 2013!" ] }, { From ddaedf27131a2b78f26f3d9d8c248365400a613e Mon Sep 17 00:00:00 2001 From: kristjanr Date: Sat, 23 Dec 2017 20:27:40 +0200 Subject: [PATCH 2/2] Fix a bug and use epoch variable in the for loop instead of the constant 1 --- deeplearning1/nbs/dogs_cats_redux.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deeplearning1/nbs/dogs_cats_redux.ipynb b/deeplearning1/nbs/dogs_cats_redux.ipynb index ed6fe4c77..8c1a15aaf 100755 --- a/deeplearning1/nbs/dogs_cats_redux.ipynb +++ b/deeplearning1/nbs/dogs_cats_redux.ipynb @@ -428,7 +428,7 @@ "latest_weights_filename = None\n", "for epoch in range(no_of_epochs):\n", " print \"Running epoch: %d\" % epoch\n", - " vgg.fit(batches, val_batches, nb_epoch=1)\n", + " vgg.fit(batches, val_batches, nb_epoch=epoch)\n", " latest_weights_filename = 'ft%d.h5' % epoch\n", " vgg.model.save_weights(results_path+latest_weights_filename)\n", "print \"Completed %s fit operations\" % no_of_epochs" @@ -1049,13 +1049,13 @@ { "ename": "NameError", "evalue": "name 'isdog' is not defined", - "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m#So to play it safe, we use a sneaky trick to round down our edge predictions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;31m#Swap all ones with .95 and all zeros with .05\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0misdog\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0misdog\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmin\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0.05\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0.95\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;31mNameError\u001b[0m: name 'isdog' is not defined" - ] + ], + "output_type": "error" } ], "source": [ @@ -1249,7 +1249,7 @@ "navigate_menu": true, "number_sections": true, "sideBar": true, - "threshold": 6, + "threshold": 6.0, "toc_cell": false, "toc_section_display": "block", "toc_window_display": false