diff --git a/dogs_vs_cats.ipynb b/dogs_vs_cats.ipynb
new file mode 100644
index 0000000..b9b05fc
--- /dev/null
+++ b/dogs_vs_cats.ipynb
@@ -0,0 +1,460 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "provenance": [],
+ "toc_visible": true,
+ "authorship_tag": "ABX9TyOdbgeOmsd22ZH45NbCSWnS",
+ "include_colab_link": true
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "fil7dKfzZGmX"
+ },
+ "outputs": [],
+ "source": [
+ "!wget https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_5340.zip"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!unzip kagglecatsanddogs_5340.zip\n"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "b-pjyr9VrGta",
+ "outputId": "1891054b-423b-406e-9cd6-7646ec3c094f"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Archive: kagglecatsanddogs_5340.zip\n",
+ "replace PetImages/Cat/0.jpg? [y]es, [n]o, [A]ll, [N]one, [r]ename: "
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "##Import modules"
+ ],
+ "metadata": {
+ "id": "nGN-7wcPrpeE"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "import pandas as pd\n",
+ "import numpy as np\n",
+ "import matplotlib.pyplot as plt\n",
+ "import warnings\n",
+ "import os\n",
+ "import tqdm\n",
+ "import random\n",
+ "from keras.preprocessing.image import load_img\n",
+ "warnings.filterwarnings('ignore')"
+ ],
+ "metadata": {
+ "id": "Z1eQ6888rvtQ"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "##create dataframe for i/p and o/p"
+ ],
+ "metadata": {
+ "id": "zmwYpOLasNiA"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "input_path = []\n",
+ "label = []\n",
+ "\n",
+ "for class_name in os.listdir(\"PetImages\"):\n",
+ " for path in os.listdir(\"PetImages/\"+class_name):\n",
+ " if class_name == 'Cat':\n",
+ " label.append(0)\n",
+ " else:\n",
+ " label.append(1)\n",
+ " input_path.append(os.path.join(\"PetImages\", class_name, path))\n",
+ "print(input_path[0], label[0])"
+ ],
+ "metadata": {
+ "id": "hKurPC0KsV93"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "df = pd.DataFrame()\n",
+ "df['images'] = input_path\n",
+ "df['label'] = label\n",
+ "df = df.sample(frac=1).reset_index(drop=True)\n",
+ "df.head()"
+ ],
+ "metadata": {
+ "id": "Y_-IjF6Bt2g_"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "for i in df['images']:\n",
+ " if '.jpg' not in i:\n",
+ " print(i)\n"
+ ],
+ "metadata": {
+ "id": "yeNGws-5udOt"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "import PIL\n",
+ "l = []\n",
+ "for image in df['images']:\n",
+ " try:\n",
+ " img = PIL.Image.open(image)\n",
+ " except:\n",
+ " l.append(image)\n",
+ "l"
+ ],
+ "metadata": {
+ "id": "2hWYhKNTudRI"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# delete db files\n",
+ "df = df[df['images']!='PetImages/Dog/Thumbs.db']\n",
+ "df = df[df['images']!='PetImages/Cat/Thumbs.db']\n",
+ "df = df[df['images']!='PetImages/Cat/666.jpg']\n",
+ "df = df[df['images']!='PetImages/Dog/11702.jpg']\n",
+ "len(df)"
+ ],
+ "metadata": {
+ "id": "tfwjbZcyudTu"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "##Exploratory Data Analysis"
+ ],
+ "metadata": {
+ "id": "s-TMLAuOunuT"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# to display grid of images\n",
+ "plt.figure(figsize=(25,25))\n",
+ "temp = df[df['label']==1]['images']\n",
+ "start = random.randint(0, len(temp))\n",
+ "files = temp[start:start+25]\n",
+ "\n",
+ "for index, file in enumerate(files):\n",
+ " plt.subplot(5,5, index+1)\n",
+ " img = load_img(file)\n",
+ " img = np.array(img)\n",
+ " plt.imshow(img)\n",
+ " plt.title('Dogs')\n",
+ " plt.axis('off')"
+ ],
+ "metadata": {
+ "id": "l4OvK_SnudWO"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# to display grid of images\n",
+ "plt.figure(figsize=(25,25))\n",
+ "temp = df[df['label']==0]['images']\n",
+ "start = random.randint(0, len(temp))\n",
+ "files = temp[start:start+25]\n",
+ "\n",
+ "for index, file in enumerate(files):\n",
+ " plt.subplot(5,5, index+1)\n",
+ " img = load_img(file)\n",
+ " img = np.array(img)\n",
+ " plt.imshow(img)\n",
+ " plt.title('Cats')\n",
+ " plt.axis('off')"
+ ],
+ "metadata": {
+ "id": "2v3efyHSudYy"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "import seaborn as sns\n",
+ "sns.countplot(df['label'])\n"
+ ],
+ "metadata": {
+ "id": "iiKdQQp3uda4"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "##Create DataGenerator for the Images"
+ ],
+ "metadata": {
+ "id": "-bDltrp0vbBT"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "df['label'] = df['label'].astype('str')"
+ ],
+ "metadata": {
+ "id": "P5ED7VSruddI"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "\n",
+ "df.head()"
+ ],
+ "metadata": {
+ "id": "vPMSiHStudfn"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# input split\n",
+ "from sklearn.model_selection import train_test_split\n",
+ "train, test = train_test_split(df, test_size=0.2, random_state=42)"
+ ],
+ "metadata": {
+ "id": "iz1J5CAiudhw"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from keras.preprocessing.image import ImageDataGenerator\n",
+ "train_generator = ImageDataGenerator(\n",
+ " rescale = 1./255, # normalization of images\n",
+ " rotation_range = 40, # augmention of images to avoid overfitting\n",
+ " shear_range = 0.2,\n",
+ " zoom_range = 0.2,\n",
+ " horizontal_flip = True,\n",
+ " fill_mode = 'nearest'\n",
+ ")\n",
+ "\n",
+ "val_generator = ImageDataGenerator(rescale = 1./255)\n",
+ "\n",
+ "train_iterator = train_generator.flow_from_dataframe(\n",
+ " train,\n",
+ " x_col='images',\n",
+ " y_col='label',\n",
+ " target_size=(128,128),\n",
+ " batch_size=512,\n",
+ " class_mode='binary'\n",
+ ")\n",
+ "\n",
+ "val_iterator = val_generator.flow_from_dataframe(\n",
+ " test,\n",
+ " x_col='images',\n",
+ " y_col='label',\n",
+ " target_size=(128,128),\n",
+ " batch_size=512,\n",
+ " class_mode='binary'\n",
+ ")"
+ ],
+ "metadata": {
+ "id": "ZaLQIUOZudkL"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "##Model Creation"
+ ],
+ "metadata": {
+ "id": "89ITQEgUvUXB"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from keras import Sequential\n",
+ "from keras.layers import Conv2D, MaxPool2D, Flatten, Dense\n",
+ "\n",
+ "model = Sequential([\n",
+ " Conv2D(16, (3,3), activation='relu', input_shape=(128,128,3)),\n",
+ " MaxPool2D((2,2)),\n",
+ " Conv2D(32, (3,3), activation='relu'),\n",
+ " MaxPool2D((2,2)),\n",
+ " Conv2D(64, (3,3), activation='relu'),\n",
+ " MaxPool2D((2,2)),\n",
+ " Flatten(),\n",
+ " Dense(512, activation='relu'),\n",
+ " Dense(1, activation='sigmoid')\n",
+ "])"
+ ],
+ "metadata": {
+ "id": "PK6v-ZVmudnh"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n",
+ "model.summary()"
+ ],
+ "metadata": {
+ "id": "aeNRSoIxu6bh"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "history = model.fit(train_iterator, epochs=10, validation_data=val_iterator)"
+ ],
+ "metadata": {
+ "id": "mPhWhJ3Eu6eJ"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "##Visualization of Results"
+ ],
+ "metadata": {
+ "id": "PwAhwxOuvN8R"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "acc = history.history['accuracy']\n",
+ "val_acc = history.history['val_accuracy']\n",
+ "epochs = range(len(acc))\n",
+ "\n",
+ "plt.plot(epochs, acc, 'b', label='Training Accuracy')\n",
+ "plt.plot(epochs, val_acc, 'r', label='Validation Accuracy')\n",
+ "plt.title('Accuracy Graph')\n",
+ "plt.legend()\n",
+ "plt.figure()\n",
+ "\n",
+ "loss = history.history['loss']\n",
+ "val_loss = history.history['val_loss']\n",
+ "plt.plot(epochs, loss, 'b', label='Training Loss')\n",
+ "plt.plot(epochs, val_loss, 'r', label='Validation Loss')\n",
+ "plt.title('Loss Graph')\n",
+ "plt.legend()\n",
+ "plt.show()"
+ ],
+ "metadata": {
+ "id": "Lss4cGXJu6hf"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "\n",
+ "##Test with Real Image"
+ ],
+ "metadata": {
+ "id": "7H8Swv98vGhP"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "image_path = \"test.jpg\" # path of the image\n",
+ "img = load_img(image_path, target_size=(128, 128))\n",
+ "img = np.array(img)\n",
+ "img = img / 255.0 # normalize the image\n",
+ "img = img.reshape(1, 128, 128, 3) # reshape for prediction\n",
+ "pred = model.predict(img)\n",
+ "if pred[0] > 0.5:\n",
+ " label = 'Dog'\n",
+ "else:\n",
+ " label = 'Cat'\n",
+ "print(label)"
+ ],
+ "metadata": {
+ "id": "0vjWGKIevCHO"
+ },
+ "execution_count": null,
+ "outputs": []
+ }
+ ]
+}
\ No newline at end of file
diff --git a/index.html b/index.html
index 5522981..ed801f7 100644
--- a/index.html
+++ b/index.html
@@ -26,8 +26,17 @@