Skip to content
Snippets Groups Projects
neural_nets_intro.ipynb 655 KiB
Newer Older
  • Learn to ignore specific revisions
  •    "cell_type": "code",
    
    chadhat's avatar
    chadhat committed
       "execution_count": 29,
    
    chadhat's avatar
    chadhat committed
       "metadata": {},
    
       "outputs": [
        {
         "name": "stdout",
         "output_type": "stream",
         "text": [
    
    chadhat's avatar
    chadhat committed
          "10000/10000 [==============================] - 0s 49us/step\n",
          "The [loss, accuracy] on test dataset are:  [0.1360580855519511, 0.9597]\n"
    
    chadhat's avatar
    chadhat committed
       "source": [
    
    chadhat's avatar
    chadhat committed
        "print(\"The [loss, accuracy] on test dataset are: \" , model.evaluate(X_test_prep, y_test_onehot))"
       ]
      },
      {
       "cell_type": "markdown",
       "metadata": {},
       "source": [
        "### Optional exercise: Run the model again with validation dataset, plot the accuracy as a function of epochs, play with number of epochs and observe what is happening."
       ]
      },
      {
       "cell_type": "code",
       "execution_count": null,
       "metadata": {},
       "outputs": [],
       "source": [
        "# Code here"
       ]
      },
      {
       "cell_type": "code",
       "execution_count": null,
       "metadata": {},
       "outputs": [],
       "source": [
        "# Solution:\n",
        "num_epochs = 20\n",
        "model_run = model.fit(X_train_prep, y_train_onehot, epochs=num_epochs,\n",
        "                      batch_size=512, validation_data=(X_test_prep, y_test_onehot))\n",
    
        "# Evaluating the model on test dataset\n",
    
    chadhat's avatar
    chadhat committed
        "#print(\"The [loss, accuracy] on test dataset are: \" , model.evaluate(X_test_prep, y_test_onehot))\n",
        "history_model = model_run.history\n",
        "print(\"The history has the following data: \", history_model.keys())\n",
        "\n",
        "# Plotting the training and validation accuracy during the training\n",
        "plt.plot(np.arange(1, num_epochs+1), history_model[\"acc\"], \"blue\")\n",
        "\n",
        "plt.plot(np.arange(1, num_epochs+1), history_model[\"val_acc\"], \"red\")"
       ]
      },
      {
       "cell_type": "code",
       "execution_count": 32,
       "metadata": {},
       "outputs": [
        {
         "name": "stdout",
         "output_type": "stream",
         "text": [
          "Epoch 1/50\n",
          "60000/60000 [==============================] - 3s 53us/step - loss: 1.5752 - acc: 0.8318\n",
          "Epoch 2/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.8282 - acc: 0.9022\n",
          "Epoch 3/50\n",
          "60000/60000 [==============================] - 1s 21us/step - loss: 0.6734 - acc: 0.9108\n",
          "Epoch 4/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.5963 - acc: 0.9154\n",
          "Epoch 5/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.5494 - acc: 0.9201\n",
          "Epoch 6/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.5158 - acc: 0.9233\n",
          "Epoch 7/50\n",
          "60000/60000 [==============================] - 2s 26us/step - loss: 0.4909 - acc: 0.9267\n",
          "Epoch 8/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.4721 - acc: 0.9300\n",
          "Epoch 9/50\n",
          "60000/60000 [==============================] - 1s 21us/step - loss: 0.4544 - acc: 0.9332\n",
          "Epoch 10/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.4393 - acc: 0.9356\n",
          "Epoch 11/50\n",
          "60000/60000 [==============================] - 1s 21us/step - loss: 0.4266 - acc: 0.9376\n",
          "Epoch 12/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.4142 - acc: 0.9395\n",
          "Epoch 13/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.4044 - acc: 0.9411\n",
          "Epoch 14/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.3966 - acc: 0.9423\n",
          "Epoch 15/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.3868 - acc: 0.9440\n",
          "Epoch 16/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.3792 - acc: 0.9449\n",
          "Epoch 17/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.3711 - acc: 0.9454\n",
          "Epoch 18/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.3640 - acc: 0.9474\n",
          "Epoch 19/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.3578 - acc: 0.9488\n",
          "Epoch 20/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.3535 - acc: 0.9488\n",
          "Epoch 21/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.3472 - acc: 0.9498\n",
          "Epoch 22/50\n",
          "60000/60000 [==============================] - 2s 25us/step - loss: 0.3399 - acc: 0.9514\n",
          "Epoch 23/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.3356 - acc: 0.9519\n",
          "Epoch 24/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.3325 - acc: 0.9531\n",
          "Epoch 25/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.3278 - acc: 0.9530\n",
          "Epoch 26/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.3234 - acc: 0.9532\n",
          "Epoch 27/50\n",
          "60000/60000 [==============================] - 1s 25us/step - loss: 0.3199 - acc: 0.9544\n",
          "Epoch 28/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.3153 - acc: 0.9549\n",
          "Epoch 29/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.3129 - acc: 0.9555\n",
          "Epoch 30/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.3087 - acc: 0.9565\n",
          "Epoch 31/50\n",
          "60000/60000 [==============================] - 2s 25us/step - loss: 0.3054 - acc: 0.9569\n",
          "Epoch 32/50\n",
          "60000/60000 [==============================] - 2s 25us/step - loss: 0.3028 - acc: 0.9571\n",
          "Epoch 33/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.2991 - acc: 0.9573\n",
          "Epoch 34/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.2964 - acc: 0.9579\n",
          "Epoch 35/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.2932 - acc: 0.9579\n",
          "Epoch 36/50\n",
          "60000/60000 [==============================] - 2s 26us/step - loss: 0.2911 - acc: 0.9588\n",
          "Epoch 37/50\n",
          "60000/60000 [==============================] - 2s 27us/step - loss: 0.2895 - acc: 0.9585\n",
          "Epoch 38/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.2875 - acc: 0.9594\n",
          "Epoch 39/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.2840 - acc: 0.9601\n",
          "Epoch 40/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.2819 - acc: 0.9596\n",
          "Epoch 41/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.2805 - acc: 0.9603\n",
          "Epoch 42/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.2769 - acc: 0.9603\n",
          "Epoch 43/50\n",
          "60000/60000 [==============================] - 1s 25us/step - loss: 0.2755 - acc: 0.9616\n",
          "Epoch 44/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.2734 - acc: 0.9612\n",
          "Epoch 45/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.2711 - acc: 0.9618\n",
          "Epoch 46/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.2699 - acc: 0.9616\n",
          "Epoch 47/50\n",
          "60000/60000 [==============================] - 1s 22us/step - loss: 0.2698 - acc: 0.9616\n",
          "Epoch 48/50\n",
          "60000/60000 [==============================] - 1s 25us/step - loss: 0.2676 - acc: 0.9613\n",
          "Epoch 49/50\n",
          "60000/60000 [==============================] - 1s 24us/step - loss: 0.2648 - acc: 0.9621\n",
          "Epoch 50/50\n",
          "60000/60000 [==============================] - 1s 23us/step - loss: 0.2644 - acc: 0.9629\n"
         ]
        }
       ],
       "source": [
        "# Adding some regularization\n",
        "# Building the keras model\n",
        "from keras.models import Sequential\n",
        "from keras.layers import Dense\n",
        "from keras.regularizers import l2\n",
        "\n",
        "def mnist_model():\n",
        "    \n",
        "    model = Sequential()\n",
        "\n",
        "    model.add(Dense(64, input_shape=(28*28,), activation=\"relu\", \n",
        "                   kernel_regularizer=l2(0.01)))\n",
        "\n",
        "    model.add(Dense(64, activation=\"relu\", \n",
        "                   kernel_regularizer=l2(0.01)))\n",
        "\n",
        "    model.add(Dense(10, activation=\"softmax\"))\n",
        "\n",
        "    model.compile(loss=\"categorical_crossentropy\",\n",
        "                  optimizer=\"rmsprop\", metrics=[\"accuracy\"])\n",
        "    return model\n",
        "\n",
        "num_epochs = 50\n",
        "model_run = model.fit(X_train_prep, y_train_onehot, epochs=num_epochs,\n",
        "                      batch_size=512)"
       ]
      },
      {
       "cell_type": "code",
       "execution_count": 33,
       "metadata": {},
       "outputs": [
        {
         "name": "stdout",
         "output_type": "stream",
         "text": [
          "10000/10000 [==============================] - 1s 128us/step\n",
          "The [loss, accuracy] on test dataset are:  [0.2548498446464539, 0.9646]\n"
         ]
        }
       ],
       "source": [
    
        "print(\"The [loss, accuracy] on test dataset are: \" , model.evaluate(X_test_prep, y_test_onehot))"
    
       "cell_type": "markdown",
    
    chadhat's avatar
    chadhat committed
       "metadata": {},
       "source": [
    
    chadhat's avatar
    chadhat committed
        "## Network Architecture\n",
        "\n",
        "The neural networks which we have seen till now are the simplest kind of neural networks.\n",
        "There exist more sophisticated network architectures especially designed for specific applications.\n",
        "Some of them are as follows:\n",
        "\n",
        "###  Convolution Neural Networks (CNNs)\n",
        "\n",
        "These networks are used mostly for computer vision like tasks. \n",
        "One of the old CNN networks is shown below.\n",
        "\n",
        "<center>\n",
        "<figure>\n",
        "<img src=\"./images/neuralnets/CNN_lecun.png\" width=\"800\"/>\n",
        "<figcaption>source: LeCun et al., Gradient-based learning applied to document recognition (1998).</figcaption>\n",
        "</figure>\n",
        "</center>\n",
        "\n",
        "CNNs consist of new type of layers like convolution layer and pooling layers.\n",
        "\n",
        "###  Recurrent Neural Networks (RNNs)\n",
        "\n",
        "These are used for time-series data, speech recognition, translation etc.\n",
    
    chadhat's avatar
    chadhat committed
        "\n",
    
    chadhat's avatar
    chadhat committed
        "IMAGE HERE\n",
        "\n",
        "### Generative adversarial networks (GANs)\n",
        "\n",
        "GANs consist of 2 parts, a generative network and a discriminative network. The generative network produces data which is then fed to the discriminative network which judges if the new data belongs to a specified dataset. Then via feedback loops the generative network becomes better and better at creating images similar to the dataset the discriminative network is judging against. At the same time the discriminative network get better and better at identifyig **fake** instances which are not from the reference dataset. \n",
        "\n",
        "IMAGE HERE"
       ]
      },
      {
       "cell_type": "markdown",
       "metadata": {},
       "source": [
        "## Will remove the example below."
    
       ]
      },
      {
       "cell_type": "markdown",
       "metadata": {},
       "source": [
    
    chadhat's avatar
    chadhat committed
        "**This beer example is not good for neural networks. Basically the dataset is far too small**"
    
       ]
      },
      {
       "cell_type": "code",
    
    chadhat's avatar
    chadhat committed
       "execution_count": 3,
    
       "metadata": {},
       "outputs": [
        {
         "data": {
          "text/plain": [
           "(225, 4)"
          ]
         },
    
    chadhat's avatar
    chadhat committed
         "execution_count": 3,
    
         "metadata": {},
         "output_type": "execute_result"
        }
       ],
       "source": [
        "# Revisiting the beer example\n",
        "\n",
        "import pandas as pd\n",
        "from sklearn.model_selection import train_test_split\n",
        "from sklearn.preprocessing import MinMaxScaler\n",
        "from keras.models import Sequential\n",
    
    chadhat's avatar
    chadhat committed
        "import numpy as np\n",
        "import matplotlib.pyplot as plt\n",
    
        "\n",
        "# Loading the beer data\n",
        "beer = pd.read_csv(\"beers.csv\")\n",
        "\n",
        "# Extracting the features and labels\n",
        "#beer_data.describe()\n",
        "features = beer.iloc[:, :-1]\n",
        "labels = beer.iloc[:, -1]\n",
        "features.shape"
       ]
      },
      {
       "cell_type": "code",
       "execution_count": 75,
       "metadata": {},
       "outputs": [],
       "source": [
        "# Revisiting the beer example\n",
        "\n",
        "# Loading and preparing the data\n",
        "\n",
        "import pandas as pd\n",
        "from sklearn.model_selection import train_test_split\n",
        "from sklearn.preprocessing import MinMaxScaler\n",
        "\n",
        "# Loading the beer data\n",
        "beer = pd.read_csv(\"beers.csv\")\n",
        "\n",
        "# Extracting the features and labels\n",
        "#beer_data.describe()\n",
        "features = beer.iloc[:, :-1]\n",
        "labels = beer.iloc[:, -1]\n",
        "\n",
        "# Here we split the dataset into training (70%) and validation sets (30%) \n",
        "#X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.5, random_state=42)\n",
        "X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.3)\n",
        "\n",
        "# Scaling the data\n",
        "# NOTE: The features should be normalized before being fed into the neural network\n",
        "scaling = MinMaxScaler()\n",
        "scaling.fit(X_train)\n",
        "\n",
        "X_train_scaled = scaling.transform(X_train)\n",
        "X_test_scaled = scaling.transform(X_test)"
       ]
      },
      {
       "cell_type": "code",
       "execution_count": 82,
       "metadata": {},
       "outputs": [
        {
         "name": "stdout",
         "output_type": "stream",
         "text": [
          "Train on 157 samples, validate on 68 samples\n",
          "Epoch 1/1000\n",
          "157/157 [==============================] - 1s 6ms/step - loss: 0.6730 - acc: 0.5350 - val_loss: 0.6769 - val_acc: 0.5147\n",
          "Epoch 2/1000\n",
          "157/157 [==============================] - 0s 406us/step - loss: 0.6704 - acc: 0.5350 - val_loss: 0.6754 - val_acc: 0.5147\n",
          "Epoch 3/1000\n",
          "157/157 [==============================] - 0s 256us/step - loss: 0.6693 - acc: 0.5350 - val_loss: 0.6740 - val_acc: 0.5147\n",
          "Epoch 4/1000\n",
          "157/157 [==============================] - 0s 215us/step - loss: 0.6679 - acc: 0.5350 - val_loss: 0.6728 - val_acc: 0.5147\n",
          "Epoch 5/1000\n",
          "157/157 [==============================] - 0s 168us/step - loss: 0.6668 - acc: 0.5350 - val_loss: 0.6716 - val_acc: 0.5147\n",
          "Epoch 6/1000\n",
          "157/157 [==============================] - 0s 107us/step - loss: 0.6658 - acc: 0.5350 - val_loss: 0.6704 - val_acc: 0.5147\n",
          "Epoch 7/1000\n",
          "157/157 [==============================] - 0s 303us/step - loss: 0.6652 - acc: 0.5350 - val_loss: 0.6693 - val_acc: 0.5147\n",
          "Epoch 8/1000\n",
          "157/157 [==============================] - 0s 98us/step - loss: 0.6637 - acc: 0.5350 - val_loss: 0.6682 - val_acc: 0.5147\n",
          "Epoch 9/1000\n",
          "157/157 [==============================] - 0s 92us/step - loss: 0.6626 - acc: 0.5350 - val_loss: 0.6670 - val_acc: 0.5147\n",
          "Epoch 10/1000\n",
          "157/157 [==============================] - 0s 90us/step - loss: 0.6616 - acc: 0.5350 - val_loss: 0.6657 - val_acc: 0.5147\n",
          "Epoch 11/1000\n",
          "157/157 [==============================] - 0s 92us/step - loss: 0.6605 - acc: 0.5350 - val_loss: 0.6644 - val_acc: 0.5147\n",
          "Epoch 12/1000\n",
          "157/157 [==============================] - 0s 305us/step - loss: 0.6596 - acc: 0.5350 - val_loss: 0.6633 - val_acc: 0.5147\n",
          "Epoch 13/1000\n",
          "157/157 [==============================] - 0s 142us/step - loss: 0.6587 - acc: 0.5350 - val_loss: 0.6622 - val_acc: 0.5147\n",
          "Epoch 14/1000\n",
          "157/157 [==============================] - 0s 144us/step - loss: 0.6578 - acc: 0.5350 - val_loss: 0.6612 - val_acc: 0.5147\n",
          "Epoch 15/1000\n",
          "157/157 [==============================] - 0s 137us/step - loss: 0.6567 - acc: 0.5350 - val_loss: 0.6601 - val_acc: 0.5147\n",
          "Epoch 16/1000\n",
          "157/157 [==============================] - 0s 179us/step - loss: 0.6558 - acc: 0.5350 - val_loss: 0.6591 - val_acc: 0.5147\n",
          "Epoch 17/1000\n",
          "157/157 [==============================] - 0s 98us/step - loss: 0.6551 - acc: 0.5350 - val_loss: 0.6580 - val_acc: 0.5147\n",
          "Epoch 18/1000\n",
          "157/157 [==============================] - 0s 106us/step - loss: 0.6540 - acc: 0.5350 - val_loss: 0.6570 - val_acc: 0.5147\n",
          "Epoch 19/1000\n",
          "157/157 [==============================] - 0s 97us/step - loss: 0.6531 - acc: 0.5350 - val_loss: 0.6559 - val_acc: 0.5147\n",
          "Epoch 20/1000\n",
          "157/157 [==============================] - 0s 131us/step - loss: 0.6523 - acc: 0.5350 - val_loss: 0.6549 - val_acc: 0.5147\n",
          "Epoch 21/1000\n",
          "157/157 [==============================] - 0s 141us/step - loss: 0.6512 - acc: 0.5350 - val_loss: 0.6537 - val_acc: 0.5147\n",
          "Epoch 22/1000\n",
          "157/157 [==============================] - 0s 288us/step - loss: 0.6506 - acc: 0.5350 - val_loss: 0.6527 - val_acc: 0.5147\n",
          "Epoch 23/1000\n",
          "157/157 [==============================] - 0s 128us/step - loss: 0.6496 - acc: 0.5414 - val_loss: 0.6517 - val_acc: 0.5147\n",
          "Epoch 24/1000\n",
          "157/157 [==============================] - 0s 257us/step - loss: 0.6486 - acc: 0.5414 - val_loss: 0.6506 - val_acc: 0.5147\n",
          "Epoch 25/1000\n",
          "157/157 [==============================] - 0s 95us/step - loss: 0.6477 - acc: 0.5478 - val_loss: 0.6495 - val_acc: 0.5147\n",
          "Epoch 26/1000\n",
          "157/157 [==============================] - 0s 112us/step - loss: 0.6466 - acc: 0.5414 - val_loss: 0.6483 - val_acc: 0.5147\n",
          "Epoch 27/1000\n",
          "157/157 [==============================] - 0s 168us/step - loss: 0.6458 - acc: 0.5541 - val_loss: 0.6472 - val_acc: 0.5147\n",
          "Epoch 28/1000\n",
          "157/157 [==============================] - 0s 257us/step - loss: 0.6447 - acc: 0.5541 - val_loss: 0.6461 - val_acc: 0.5147\n",
          "Epoch 29/1000\n",
          "157/157 [==============================] - 0s 134us/step - loss: 0.6437 - acc: 0.5541 - val_loss: 0.6449 - val_acc: 0.5147\n",
          "Epoch 30/1000\n",
          "157/157 [==============================] - 0s 111us/step - loss: 0.6427 - acc: 0.5669 - val_loss: 0.6438 - val_acc: 0.5147\n",
          "Epoch 31/1000\n",
          "157/157 [==============================] - 0s 153us/step - loss: 0.6417 - acc: 0.5669 - val_loss: 0.6426 - val_acc: 0.5147\n",
          "Epoch 32/1000\n",
          "157/157 [==============================] - 0s 103us/step - loss: 0.6407 - acc: 0.5669 - val_loss: 0.6414 - val_acc: 0.5147\n",
          "Epoch 33/1000\n",
          "157/157 [==============================] - 0s 269us/step - loss: 0.6394 - acc: 0.5732 - val_loss: 0.6401 - val_acc: 0.5294\n",
          "Epoch 34/1000\n",
          "157/157 [==============================] - 0s 119us/step - loss: 0.6384 - acc: 0.5732 - val_loss: 0.6387 - val_acc: 0.5294\n",
          "Epoch 35/1000\n",
          "157/157 [==============================] - 0s 92us/step - loss: 0.6371 - acc: 0.5732 - val_loss: 0.6373 - val_acc: 0.5294\n",
          "Epoch 36/1000\n",
          "157/157 [==============================] - 0s 298us/step - loss: 0.6361 - acc: 0.5796 - val_loss: 0.6360 - val_acc: 0.5294\n",
          "Epoch 37/1000\n",
          "157/157 [==============================] - 0s 193us/step - loss: 0.6349 - acc: 0.5860 - val_loss: 0.6347 - val_acc: 0.5441\n",
          "Epoch 38/1000\n",
          "157/157 [==============================] - 0s 122us/step - loss: 0.6336 - acc: 0.5860 - val_loss: 0.6333 - val_acc: 0.5441\n",
          "Epoch 39/1000\n",
          "157/157 [==============================] - 0s 194us/step - loss: 0.6323 - acc: 0.5860 - val_loss: 0.6318 - val_acc: 0.5441\n",
          "Epoch 40/1000\n",
          "157/157 [==============================] - 0s 321us/step - loss: 0.6310 - acc: 0.5860 - val_loss: 0.6302 - val_acc: 0.5441\n",
          "Epoch 41/1000\n",
          "157/157 [==============================] - 0s 151us/step - loss: 0.6297 - acc: 0.5924 - val_loss: 0.6286 - val_acc: 0.5441\n",
          "Epoch 42/1000\n",
          "157/157 [==============================] - 0s 229us/step - loss: 0.6285 - acc: 0.5924 - val_loss: 0.6273 - val_acc: 0.5441\n",
          "Epoch 43/1000\n",
          "157/157 [==============================] - 0s 201us/step - loss: 0.6271 - acc: 0.5924 - val_loss: 0.6258 - val_acc: 0.5441\n",
          "Epoch 44/1000\n",
          "157/157 [==============================] - 0s 129us/step - loss: 0.6260 - acc: 0.5924 - val_loss: 0.6243 - val_acc: 0.5441\n",
          "Epoch 45/1000\n",
          "157/157 [==============================] - 0s 149us/step - loss: 0.6245 - acc: 0.5987 - val_loss: 0.6228 - val_acc: 0.5588\n",
          "Epoch 46/1000\n",
          "157/157 [==============================] - 0s 113us/step - loss: 0.6234 - acc: 0.6051 - val_loss: 0.6213 - val_acc: 0.5588\n",
          "Epoch 47/1000\n",
          "157/157 [==============================] - 0s 537us/step - loss: 0.6218 - acc: 0.6178 - val_loss: 0.6197 - val_acc: 0.5588\n",
          "Epoch 48/1000\n",
          "157/157 [==============================] - 0s 117us/step - loss: 0.6205 - acc: 0.6178 - val_loss: 0.6181 - val_acc: 0.5588\n",
          "Epoch 49/1000\n",
          "157/157 [==============================] - 0s 146us/step - loss: 0.6191 - acc: 0.6178 - val_loss: 0.6164 - val_acc: 0.5735\n",
          "Epoch 50/1000\n",
          "157/157 [==============================] - 0s 200us/step - loss: 0.6176 - acc: 0.6178 - val_loss: 0.6146 - val_acc: 0.5882\n",
          "Epoch 51/1000\n",
          "157/157 [==============================] - 0s 286us/step - loss: 0.6165 - acc: 0.6178 - val_loss: 0.6130 - val_acc: 0.5882\n",
          "Epoch 52/1000\n",
          "157/157 [==============================] - 0s 254us/step - loss: 0.6152 - acc: 0.6242 - val_loss: 0.6116 - val_acc: 0.6029\n",
          "Epoch 53/1000\n",
          "157/157 [==============================] - 0s 156us/step - loss: 0.6136 - acc: 0.6242 - val_loss: 0.6100 - val_acc: 0.6029\n",
          "Epoch 54/1000\n",
          "157/157 [==============================] - 0s 202us/step - loss: 0.6127 - acc: 0.6242 - val_loss: 0.6085 - val_acc: 0.6029\n",
          "Epoch 55/1000\n",
          "157/157 [==============================] - 0s 108us/step - loss: 0.6114 - acc: 0.6242 - val_loss: 0.6070 - val_acc: 0.6029\n",
          "Epoch 56/1000\n",
          "157/157 [==============================] - 0s 157us/step - loss: 0.6098 - acc: 0.6242 - val_loss: 0.6053 - val_acc: 0.6029\n",
          "Epoch 57/1000\n",
          "157/157 [==============================] - 0s 118us/step - loss: 0.6085 - acc: 0.6242 - val_loss: 0.6036 - val_acc: 0.6029\n",
          "Epoch 58/1000\n",
          "157/157 [==============================] - 0s 128us/step - loss: 0.6070 - acc: 0.6242 - val_loss: 0.6018 - val_acc: 0.6029\n",
          "Epoch 59/1000\n",
          "157/157 [==============================] - 0s 165us/step - loss: 0.6057 - acc: 0.6242 - val_loss: 0.6001 - val_acc: 0.6029\n",
    
    chadhat's avatar
    chadhat committed
          "Epoch 60/1000\n",
    
          "157/157 [==============================] - 0s 263us/step - loss: 0.6039 - acc: 0.6242 - val_loss: 0.5982 - val_acc: 0.6176\n",
          "Epoch 61/1000\n",
          "157/157 [==============================] - 0s 244us/step - loss: 0.6023 - acc: 0.6242 - val_loss: 0.5963 - val_acc: 0.6176\n",
          "Epoch 62/1000\n",
          "157/157 [==============================] - 0s 409us/step - loss: 0.6006 - acc: 0.6306 - val_loss: 0.5943 - val_acc: 0.6176\n",
          "Epoch 63/1000\n",
          "157/157 [==============================] - 0s 104us/step - loss: 0.5991 - acc: 0.6306 - val_loss: 0.5922 - val_acc: 0.6324\n",
          "Epoch 64/1000\n",
          "157/157 [==============================] - 0s 193us/step - loss: 0.5981 - acc: 0.6369 - val_loss: 0.5906 - val_acc: 0.6324\n",
          "Epoch 65/1000\n",
          "157/157 [==============================] - 0s 104us/step - loss: 0.5958 - acc: 0.6433 - val_loss: 0.5889 - val_acc: 0.6471\n",
          "Epoch 66/1000\n",
          "157/157 [==============================] - 0s 172us/step - loss: 0.5945 - acc: 0.6433 - val_loss: 0.5871 - val_acc: 0.6471\n",
          "Epoch 67/1000\n",
          "157/157 [==============================] - 0s 378us/step - loss: 0.5929 - acc: 0.6433 - val_loss: 0.5852 - val_acc: 0.6471\n",
          "Epoch 68/1000\n",
          "157/157 [==============================] - 0s 193us/step - loss: 0.5917 - acc: 0.6497 - val_loss: 0.5836 - val_acc: 0.6471\n",
          "Epoch 69/1000\n",
          "157/157 [==============================] - 0s 155us/step - loss: 0.5901 - acc: 0.6497 - val_loss: 0.5816 - val_acc: 0.6471\n",
          "Epoch 70/1000\n",
          "157/157 [==============================] - 0s 180us/step - loss: 0.5885 - acc: 0.6497 - val_loss: 0.5797 - val_acc: 0.6765\n",
          "Epoch 71/1000\n",
          "157/157 [==============================] - 0s 208us/step - loss: 0.5867 - acc: 0.6561 - val_loss: 0.5778 - val_acc: 0.6765\n",
          "Epoch 72/1000\n",
          "157/157 [==============================] - 0s 200us/step - loss: 0.5850 - acc: 0.6561 - val_loss: 0.5755 - val_acc: 0.6765\n",
          "Epoch 73/1000\n",
          "157/157 [==============================] - 0s 279us/step - loss: 0.5831 - acc: 0.6624 - val_loss: 0.5733 - val_acc: 0.6765\n",
          "Epoch 74/1000\n",
          "157/157 [==============================] - 0s 263us/step - loss: 0.5812 - acc: 0.6688 - val_loss: 0.5712 - val_acc: 0.6912\n",
          "Epoch 75/1000\n",
          "157/157 [==============================] - 0s 263us/step - loss: 0.5791 - acc: 0.6752 - val_loss: 0.5688 - val_acc: 0.7059\n",
          "Epoch 76/1000\n",
          "157/157 [==============================] - 0s 223us/step - loss: 0.5771 - acc: 0.6752 - val_loss: 0.5665 - val_acc: 0.7059\n",
          "Epoch 77/1000\n",
          "157/157 [==============================] - 0s 252us/step - loss: 0.5750 - acc: 0.6879 - val_loss: 0.5643 - val_acc: 0.7059\n",
          "Epoch 78/1000\n",
          "157/157 [==============================] - 0s 217us/step - loss: 0.5728 - acc: 0.6879 - val_loss: 0.5619 - val_acc: 0.7059\n",
          "Epoch 79/1000\n",
          "157/157 [==============================] - 0s 123us/step - loss: 0.5708 - acc: 0.6943 - val_loss: 0.5596 - val_acc: 0.7059\n",
          "Epoch 80/1000\n",
          "157/157 [==============================] - 0s 149us/step - loss: 0.5687 - acc: 0.7006 - val_loss: 0.5570 - val_acc: 0.7206\n",
          "Epoch 81/1000\n",
          "157/157 [==============================] - 0s 181us/step - loss: 0.5666 - acc: 0.7070 - val_loss: 0.5545 - val_acc: 0.7206\n",
          "Epoch 82/1000\n",
          "157/157 [==============================] - 0s 109us/step - loss: 0.5643 - acc: 0.7006 - val_loss: 0.5519 - val_acc: 0.7206\n",
          "Epoch 83/1000\n",
          "157/157 [==============================] - 0s 258us/step - loss: 0.5623 - acc: 0.7134 - val_loss: 0.5495 - val_acc: 0.7206\n",
          "Epoch 84/1000\n",
          "157/157 [==============================] - 0s 123us/step - loss: 0.5600 - acc: 0.7197 - val_loss: 0.5469 - val_acc: 0.7206\n",
          "Epoch 85/1000\n",
          "157/157 [==============================] - 0s 120us/step - loss: 0.5577 - acc: 0.7197 - val_loss: 0.5443 - val_acc: 0.7206\n",
          "Epoch 86/1000\n",
          "157/157 [==============================] - 0s 166us/step - loss: 0.5550 - acc: 0.7197 - val_loss: 0.5411 - val_acc: 0.7353\n",
          "Epoch 87/1000\n",
          "157/157 [==============================] - 0s 134us/step - loss: 0.5529 - acc: 0.7325 - val_loss: 0.5383 - val_acc: 0.7353\n",
          "Epoch 88/1000\n",
          "157/157 [==============================] - 0s 185us/step - loss: 0.5498 - acc: 0.7325 - val_loss: 0.5347 - val_acc: 0.7353\n",
          "Epoch 89/1000\n",
          "157/157 [==============================] - 0s 194us/step - loss: 0.5471 - acc: 0.7516 - val_loss: 0.5314 - val_acc: 0.7647\n",
          "Epoch 90/1000\n",
          "157/157 [==============================] - 0s 163us/step - loss: 0.5451 - acc: 0.7452 - val_loss: 0.5283 - val_acc: 0.7941\n",
          "Epoch 91/1000\n",
          "157/157 [==============================] - 0s 292us/step - loss: 0.5430 - acc: 0.7580 - val_loss: 0.5258 - val_acc: 0.8088\n",
          "Epoch 92/1000\n",
          "157/157 [==============================] - 0s 137us/step - loss: 0.5399 - acc: 0.7580 - val_loss: 0.5234 - val_acc: 0.8088\n",
          "Epoch 93/1000\n",
          "157/157 [==============================] - 0s 193us/step - loss: 0.5383 - acc: 0.7643 - val_loss: 0.5210 - val_acc: 0.8088\n",
          "Epoch 94/1000\n",
          "157/157 [==============================] - 0s 231us/step - loss: 0.5356 - acc: 0.7643 - val_loss: 0.5184 - val_acc: 0.8088\n",
          "Epoch 95/1000\n",
          "157/157 [==============================] - 0s 96us/step - loss: 0.5334 - acc: 0.7643 - val_loss: 0.5158 - val_acc: 0.8235\n",
          "Epoch 96/1000\n",
          "157/157 [==============================] - 0s 196us/step - loss: 0.5309 - acc: 0.7707 - val_loss: 0.5128 - val_acc: 0.8235\n",
          "Epoch 97/1000\n",
          "157/157 [==============================] - 0s 214us/step - loss: 0.5291 - acc: 0.7898 - val_loss: 0.5100 - val_acc: 0.8235\n",
          "Epoch 98/1000\n",
          "157/157 [==============================] - 0s 179us/step - loss: 0.5263 - acc: 0.7898 - val_loss: 0.5074 - val_acc: 0.8235\n",
          "Epoch 99/1000\n",
          "157/157 [==============================] - 0s 182us/step - loss: 0.5243 - acc: 0.7962 - val_loss: 0.5044 - val_acc: 0.8088\n",
          "Epoch 100/1000\n",
          "157/157 [==============================] - 0s 144us/step - loss: 0.5234 - acc: 0.7834 - val_loss: 0.5024 - val_acc: 0.8088\n",
          "Epoch 101/1000\n",
          "157/157 [==============================] - 0s 154us/step - loss: 0.5198 - acc: 0.8025 - val_loss: 0.5002 - val_acc: 0.8088\n",
          "Epoch 102/1000\n",
          "157/157 [==============================] - 0s 320us/step - loss: 0.5184 - acc: 0.7962 - val_loss: 0.4978 - val_acc: 0.8088\n",
          "Epoch 103/1000\n",
          "157/157 [==============================] - 0s 142us/step - loss: 0.5162 - acc: 0.8025 - val_loss: 0.4956 - val_acc: 0.8088\n",
          "Epoch 104/1000\n",
          "157/157 [==============================] - 0s 131us/step - loss: 0.5136 - acc: 0.8025 - val_loss: 0.4932 - val_acc: 0.8088\n",
          "Epoch 105/1000\n",
          "157/157 [==============================] - 0s 142us/step - loss: 0.5115 - acc: 0.7962 - val_loss: 0.4903 - val_acc: 0.8235\n",
          "Epoch 106/1000\n",
          "157/157 [==============================] - 0s 144us/step - loss: 0.5091 - acc: 0.8025 - val_loss: 0.4877 - val_acc: 0.8382\n",
          "Epoch 107/1000\n",
          "157/157 [==============================] - 0s 351us/step - loss: 0.5065 - acc: 0.8089 - val_loss: 0.4851 - val_acc: 0.8382\n",
          "Epoch 108/1000\n",
          "157/157 [==============================] - 0s 370us/step - loss: 0.5041 - acc: 0.8025 - val_loss: 0.4822 - val_acc: 0.8529\n",
          "Epoch 109/1000\n",
          "157/157 [==============================] - 0s 345us/step - loss: 0.5016 - acc: 0.8089 - val_loss: 0.4795 - val_acc: 0.8529\n",
          "Epoch 110/1000\n",
          "157/157 [==============================] - 0s 121us/step - loss: 0.4996 - acc: 0.8025 - val_loss: 0.4765 - val_acc: 0.8529\n",
          "Epoch 111/1000\n",
          "157/157 [==============================] - 0s 135us/step - loss: 0.4972 - acc: 0.8089 - val_loss: 0.4739 - val_acc: 0.8529\n",
          "Epoch 112/1000\n",
          "157/157 [==============================] - 0s 266us/step - loss: 0.4944 - acc: 0.8280 - val_loss: 0.4716 - val_acc: 0.8529\n",
          "Epoch 113/1000\n",
          "157/157 [==============================] - 0s 218us/step - loss: 0.4918 - acc: 0.8153 - val_loss: 0.4686 - val_acc: 0.8529\n",
          "Epoch 114/1000\n",
          "157/157 [==============================] - 0s 174us/step - loss: 0.4894 - acc: 0.8471 - val_loss: 0.4656 - val_acc: 0.8529\n",
          "Epoch 115/1000\n",
          "157/157 [==============================] - 0s 157us/step - loss: 0.4869 - acc: 0.8408 - val_loss: 0.4624 - val_acc: 0.8676\n",
          "Epoch 116/1000\n",
          "157/157 [==============================] - 0s 276us/step - loss: 0.4846 - acc: 0.8089 - val_loss: 0.4592 - val_acc: 0.8676\n",
          "Epoch 117/1000\n",
          "157/157 [==============================] - 0s 146us/step - loss: 0.4818 - acc: 0.8408 - val_loss: 0.4565 - val_acc: 0.8676\n",
          "Epoch 118/1000\n",
          "157/157 [==============================] - 0s 246us/step - loss: 0.4792 - acc: 0.8535 - val_loss: 0.4539 - val_acc: 0.8676\n",
          "Epoch 119/1000\n",
          "157/157 [==============================] - 0s 116us/step - loss: 0.4768 - acc: 0.8408 - val_loss: 0.4506 - val_acc: 0.8676\n"
         ]
        },
        {
         "name": "stdout",
         "output_type": "stream",
         "text": [
          "Epoch 120/1000\n",
          "157/157 [==============================] - 0s 229us/step - loss: 0.4756 - acc: 0.8471 - val_loss: 0.4482 - val_acc: 0.8676\n",
          "Epoch 121/1000\n",
          "157/157 [==============================] - 0s 204us/step - loss: 0.4726 - acc: 0.8599 - val_loss: 0.4463 - val_acc: 0.8676\n",
          "Epoch 122/1000\n",
          "157/157 [==============================] - 0s 162us/step - loss: 0.4708 - acc: 0.8471 - val_loss: 0.4438 - val_acc: 0.8676\n",
          "Epoch 123/1000\n",
          "157/157 [==============================] - 0s 245us/step - loss: 0.4682 - acc: 0.8599 - val_loss: 0.4415 - val_acc: 0.8676\n",
          "Epoch 124/1000\n",
          "157/157 [==============================] - 0s 200us/step - loss: 0.4658 - acc: 0.8535 - val_loss: 0.4390 - val_acc: 0.8676\n",
          "Epoch 125/1000\n",
          "157/157 [==============================] - 0s 178us/step - loss: 0.4635 - acc: 0.8599 - val_loss: 0.4361 - val_acc: 0.8824\n",
          "Epoch 126/1000\n",
          "157/157 [==============================] - 0s 156us/step - loss: 0.4614 - acc: 0.8535 - val_loss: 0.4332 - val_acc: 0.8824\n",
          "Epoch 127/1000\n",
          "157/157 [==============================] - 0s 327us/step - loss: 0.4584 - acc: 0.8726 - val_loss: 0.4307 - val_acc: 0.8824\n",
          "Epoch 128/1000\n",
          "157/157 [==============================] - 0s 181us/step - loss: 0.4571 - acc: 0.8535 - val_loss: 0.4279 - val_acc: 0.8824\n",
          "Epoch 129/1000\n",
          "157/157 [==============================] - 0s 268us/step - loss: 0.4550 - acc: 0.8726 - val_loss: 0.4258 - val_acc: 0.8824\n",
          "Epoch 130/1000\n",
          "157/157 [==============================] - 0s 176us/step - loss: 0.4517 - acc: 0.8599 - val_loss: 0.4230 - val_acc: 0.8824\n",
          "Epoch 131/1000\n",
          "157/157 [==============================] - 0s 281us/step - loss: 0.4497 - acc: 0.8726 - val_loss: 0.4204 - val_acc: 0.8824\n",
          "Epoch 132/1000\n",
          "157/157 [==============================] - 0s 149us/step - loss: 0.4476 - acc: 0.8662 - val_loss: 0.4178 - val_acc: 0.8824\n",
          "Epoch 133/1000\n",
          "157/157 [==============================] - 0s 177us/step - loss: 0.4456 - acc: 0.8726 - val_loss: 0.4153 - val_acc: 0.8824\n",
          "Epoch 134/1000\n",
          "157/157 [==============================] - 0s 137us/step - loss: 0.4433 - acc: 0.8790 - val_loss: 0.4131 - val_acc: 0.8824\n",
          "Epoch 135/1000\n",
          "157/157 [==============================] - 0s 121us/step - loss: 0.4409 - acc: 0.8854 - val_loss: 0.4108 - val_acc: 0.8824\n",
          "Epoch 136/1000\n",
          "157/157 [==============================] - 0s 167us/step - loss: 0.4381 - acc: 0.8726 - val_loss: 0.4082 - val_acc: 0.8824\n",
          "Epoch 137/1000\n",
          "157/157 [==============================] - 0s 272us/step - loss: 0.4357 - acc: 0.8854 - val_loss: 0.4053 - val_acc: 0.8824\n",
          "Epoch 138/1000\n",
          "157/157 [==============================] - 0s 286us/step - loss: 0.4338 - acc: 0.8726 - val_loss: 0.4025 - val_acc: 0.8824\n",
          "Epoch 139/1000\n",
          "157/157 [==============================] - 0s 164us/step - loss: 0.4308 - acc: 0.8726 - val_loss: 0.3994 - val_acc: 0.8824\n",
          "Epoch 140/1000\n",
          "157/157 [==============================] - 0s 160us/step - loss: 0.4286 - acc: 0.8790 - val_loss: 0.3968 - val_acc: 0.8824\n",
          "Epoch 141/1000\n",
          "157/157 [==============================] - 0s 196us/step - loss: 0.4266 - acc: 0.8726 - val_loss: 0.3944 - val_acc: 0.8824\n",
          "Epoch 142/1000\n",
          "157/157 [==============================] - 0s 285us/step - loss: 0.4241 - acc: 0.8790 - val_loss: 0.3924 - val_acc: 0.8824\n",
          "Epoch 143/1000\n",
          "157/157 [==============================] - 0s 136us/step - loss: 0.4224 - acc: 0.8726 - val_loss: 0.3902 - val_acc: 0.8824\n",
          "Epoch 144/1000\n",
          "157/157 [==============================] - 0s 243us/step - loss: 0.4204 - acc: 0.8726 - val_loss: 0.3882 - val_acc: 0.8824\n",
          "Epoch 145/1000\n",
          "157/157 [==============================] - 0s 155us/step - loss: 0.4177 - acc: 0.8726 - val_loss: 0.3860 - val_acc: 0.8824\n",
          "Epoch 146/1000\n",
          "157/157 [==============================] - 0s 210us/step - loss: 0.4167 - acc: 0.8854 - val_loss: 0.3840 - val_acc: 0.8824\n",
          "Epoch 147/1000\n",
          "157/157 [==============================] - 0s 155us/step - loss: 0.4133 - acc: 0.8726 - val_loss: 0.3815 - val_acc: 0.8824\n",
          "Epoch 148/1000\n",
          "157/157 [==============================] - 0s 150us/step - loss: 0.4112 - acc: 0.8790 - val_loss: 0.3791 - val_acc: 0.8824\n",
          "Epoch 149/1000\n",
          "157/157 [==============================] - 0s 274us/step - loss: 0.4098 - acc: 0.8854 - val_loss: 0.3771 - val_acc: 0.8824\n",
          "Epoch 150/1000\n",
          "157/157 [==============================] - 0s 162us/step - loss: 0.4075 - acc: 0.8726 - val_loss: 0.3743 - val_acc: 0.8824\n",
          "Epoch 151/1000\n",
          "157/157 [==============================] - 0s 141us/step - loss: 0.4047 - acc: 0.8854 - val_loss: 0.3721 - val_acc: 0.8824\n",
          "Epoch 152/1000\n",
          "157/157 [==============================] - 0s 282us/step - loss: 0.4033 - acc: 0.8726 - val_loss: 0.3694 - val_acc: 0.8824\n",
          "Epoch 153/1000\n",
          "157/157 [==============================] - 0s 167us/step - loss: 0.4013 - acc: 0.9108 - val_loss: 0.3680 - val_acc: 0.8824\n",
          "Epoch 154/1000\n",
          "157/157 [==============================] - 0s 313us/step - loss: 0.3985 - acc: 0.8854 - val_loss: 0.3655 - val_acc: 0.8824\n",
          "Epoch 155/1000\n",
          "157/157 [==============================] - 0s 150us/step - loss: 0.3970 - acc: 0.8981 - val_loss: 0.3635 - val_acc: 0.8824\n",
          "Epoch 156/1000\n",
          "157/157 [==============================] - 0s 156us/step - loss: 0.3944 - acc: 0.8981 - val_loss: 0.3613 - val_acc: 0.8824\n",
          "Epoch 157/1000\n",
          "157/157 [==============================] - 0s 124us/step - loss: 0.3928 - acc: 0.8981 - val_loss: 0.3594 - val_acc: 0.8824\n",
          "Epoch 158/1000\n",
          "157/157 [==============================] - 0s 163us/step - loss: 0.3903 - acc: 0.8917 - val_loss: 0.3567 - val_acc: 0.8824\n",
          "Epoch 159/1000\n",
          "157/157 [==============================] - 0s 128us/step - loss: 0.3881 - acc: 0.8981 - val_loss: 0.3543 - val_acc: 0.8824\n",
          "Epoch 160/1000\n",
          "157/157 [==============================] - 0s 128us/step - loss: 0.3871 - acc: 0.8917 - val_loss: 0.3523 - val_acc: 0.8824\n",
          "Epoch 161/1000\n",
          "157/157 [==============================] - 0s 123us/step - loss: 0.3840 - acc: 0.9108 - val_loss: 0.3503 - val_acc: 0.8824\n",
          "Epoch 162/1000\n",
          "157/157 [==============================] - 0s 133us/step - loss: 0.3833 - acc: 0.8854 - val_loss: 0.3481 - val_acc: 0.8971\n",
          "Epoch 163/1000\n",
          "157/157 [==============================] - 0s 222us/step - loss: 0.3810 - acc: 0.8917 - val_loss: 0.3463 - val_acc: 0.8971\n",
          "Epoch 164/1000\n",
          "157/157 [==============================] - 0s 210us/step - loss: 0.3785 - acc: 0.9236 - val_loss: 0.3449 - val_acc: 0.8824\n",
          "Epoch 165/1000\n",
          "157/157 [==============================] - 0s 278us/step - loss: 0.3774 - acc: 0.9045 - val_loss: 0.3431 - val_acc: 0.8971\n",
          "Epoch 166/1000\n",
          "157/157 [==============================] - 0s 163us/step - loss: 0.3751 - acc: 0.8917 - val_loss: 0.3406 - val_acc: 0.8971\n",
          "Epoch 167/1000\n",
          "157/157 [==============================] - 0s 183us/step - loss: 0.3735 - acc: 0.8981 - val_loss: 0.3388 - val_acc: 0.8971\n",
          "Epoch 168/1000\n",
          "157/157 [==============================] - 0s 144us/step - loss: 0.3711 - acc: 0.9172 - val_loss: 0.3368 - val_acc: 0.8971\n",
          "Epoch 169/1000\n",
          "157/157 [==============================] - 0s 304us/step - loss: 0.3701 - acc: 0.9108 - val_loss: 0.3346 - val_acc: 0.8971\n",
          "Epoch 170/1000\n",
          "157/157 [==============================] - 0s 162us/step - loss: 0.3674 - acc: 0.9236 - val_loss: 0.3330 - val_acc: 0.8971\n",
          "Epoch 171/1000\n",
          "157/157 [==============================] - 0s 287us/step - loss: 0.3666 - acc: 0.9172 - val_loss: 0.3312 - val_acc: 0.8971\n",
          "Epoch 172/1000\n",
          "157/157 [==============================] - 0s 189us/step - loss: 0.3638 - acc: 0.9108 - val_loss: 0.3291 - val_acc: 0.8971\n",
          "Epoch 173/1000\n",
          "157/157 [==============================] - 0s 154us/step - loss: 0.3617 - acc: 0.9236 - val_loss: 0.3275 - val_acc: 0.8971\n",
          "Epoch 174/1000\n",
          "157/157 [==============================] - 0s 136us/step - loss: 0.3595 - acc: 0.9236 - val_loss: 0.3257 - val_acc: 0.8971\n",
          "Epoch 175/1000\n",
          "157/157 [==============================] - 0s 154us/step - loss: 0.3579 - acc: 0.9236 - val_loss: 0.3240 - val_acc: 0.8971\n",
          "Epoch 176/1000\n",
          "157/157 [==============================] - 0s 129us/step - loss: 0.3565 - acc: 0.9172 - val_loss: 0.3219 - val_acc: 0.8971\n",
          "Epoch 177/1000\n",
          "157/157 [==============================] - 0s 191us/step - loss: 0.3540 - acc: 0.9236 - val_loss: 0.3205 - val_acc: 0.8971\n",
          "Epoch 178/1000\n",
          "157/157 [==============================] - 0s 374us/step - loss: 0.3529 - acc: 0.9108 - val_loss: 0.3181 - val_acc: 0.8971\n",
    
    chadhat's avatar
    chadhat committed
          "Epoch 179/1000\n",
    
          "157/157 [==============================] - 0s 307us/step - loss: 0.3500 - acc: 0.9236 - val_loss: 0.3156 - val_acc: 0.8971\n",
          "Epoch 180/1000\n",
          "157/157 [==============================] - 0s 319us/step - loss: 0.3485 - acc: 0.9236 - val_loss: 0.3136 - val_acc: 0.8971\n",
          "Epoch 181/1000\n",
          "157/157 [==============================] - 0s 166us/step - loss: 0.3467 - acc: 0.9236 - val_loss: 0.3116 - val_acc: 0.8971\n",
          "Epoch 182/1000\n",
          "157/157 [==============================] - 0s 186us/step - loss: 0.3450 - acc: 0.9236 - val_loss: 0.3103 - val_acc: 0.8971\n",
          "Epoch 183/1000\n",
          "157/157 [==============================] - 0s 282us/step - loss: 0.3439 - acc: 0.9172 - val_loss: 0.3084 - val_acc: 0.8971\n",
          "Epoch 184/1000\n",
          "157/157 [==============================] - 0s 287us/step - loss: 0.3413 - acc: 0.9172 - val_loss: 0.3064 - val_acc: 0.8971\n",
          "Epoch 185/1000\n",
          "157/157 [==============================] - 0s 153us/step - loss: 0.3405 - acc: 0.9108 - val_loss: 0.3047 - val_acc: 0.9118\n",
          "Epoch 186/1000\n",
          "157/157 [==============================] - 0s 238us/step - loss: 0.3376 - acc: 0.9236 - val_loss: 0.3028 - val_acc: 0.9118\n",
          "Epoch 187/1000\n",
          "157/157 [==============================] - 0s 291us/step - loss: 0.3358 - acc: 0.9299 - val_loss: 0.3014 - val_acc: 0.9118\n",
          "Epoch 188/1000\n",
          "157/157 [==============================] - 0s 191us/step - loss: 0.3347 - acc: 0.9236 - val_loss: 0.2989 - val_acc: 0.9118\n",
          "Epoch 189/1000\n",
          "157/157 [==============================] - 0s 231us/step - loss: 0.3334 - acc: 0.9299 - val_loss: 0.2972 - val_acc: 0.9118\n",
          "Epoch 190/1000\n",
          "157/157 [==============================] - 0s 208us/step - loss: 0.3302 - acc: 0.9299 - val_loss: 0.2961 - val_acc: 0.8971\n",
          "Epoch 191/1000\n",
          "157/157 [==============================] - 0s 213us/step - loss: 0.3284 - acc: 0.9299 - val_loss: 0.2943 - val_acc: 0.8971\n",
          "Epoch 192/1000\n",
          "157/157 [==============================] - 0s 184us/step - loss: 0.3265 - acc: 0.9299 - val_loss: 0.2917 - val_acc: 0.9118\n",
          "Epoch 193/1000\n",
          "157/157 [==============================] - 0s 369us/step - loss: 0.3259 - acc: 0.9299 - val_loss: 0.2908 - val_acc: 0.8971\n",
          "Epoch 194/1000\n",
          "157/157 [==============================] - 0s 218us/step - loss: 0.3226 - acc: 0.9299 - val_loss: 0.2889 - val_acc: 0.8971\n",
          "Epoch 195/1000\n",
          "157/157 [==============================] - 0s 203us/step - loss: 0.3237 - acc: 0.9236 - val_loss: 0.2873 - val_acc: 0.8971\n",
          "Epoch 196/1000\n",
          "157/157 [==============================] - 0s 207us/step - loss: 0.3194 - acc: 0.9236 - val_loss: 0.2857 - val_acc: 0.8971\n",
          "Epoch 197/1000\n",
          "157/157 [==============================] - 0s 291us/step - loss: 0.3173 - acc: 0.9236 - val_loss: 0.2830 - val_acc: 0.9118\n",
          "Epoch 198/1000\n",
          "157/157 [==============================] - 0s 235us/step - loss: 0.3165 - acc: 0.9299 - val_loss: 0.2819 - val_acc: 0.9118\n",
          "Epoch 199/1000\n",
          "157/157 [==============================] - 0s 160us/step - loss: 0.3166 - acc: 0.9236 - val_loss: 0.2805 - val_acc: 0.8971\n",
          "Epoch 200/1000\n",
          "157/157 [==============================] - 0s 308us/step - loss: 0.3128 - acc: 0.9236 - val_loss: 0.2790 - val_acc: 0.9118\n",
          "Epoch 201/1000\n",
          "157/157 [==============================] - 0s 149us/step - loss: 0.3109 - acc: 0.9299 - val_loss: 0.2772 - val_acc: 0.9118\n",
          "Epoch 202/1000\n",
          "157/157 [==============================] - 0s 189us/step - loss: 0.3092 - acc: 0.9236 - val_loss: 0.2755 - val_acc: 0.9118\n",
          "Epoch 203/1000\n",
          "157/157 [==============================] - 0s 230us/step - loss: 0.3076 - acc: 0.9236 - val_loss: 0.2736 - val_acc: 0.9118\n",
          "Epoch 204/1000\n",
          "157/157 [==============================] - 0s 123us/step - loss: 0.3056 - acc: 0.9236 - val_loss: 0.2724 - val_acc: 0.9118\n",
          "Epoch 205/1000\n",
          "157/157 [==============================] - 0s 118us/step - loss: 0.3046 - acc: 0.9236 - val_loss: 0.2703 - val_acc: 0.9118\n",
          "Epoch 206/1000\n",
          "157/157 [==============================] - 0s 319us/step - loss: 0.3018 - acc: 0.9299 - val_loss: 0.2682 - val_acc: 0.9118\n",
          "Epoch 207/1000\n",
          "157/157 [==============================] - 0s 156us/step - loss: 0.2998 - acc: 0.9427 - val_loss: 0.2670 - val_acc: 0.9118\n",
          "Epoch 208/1000\n",
          "157/157 [==============================] - 0s 128us/step - loss: 0.2988 - acc: 0.9299 - val_loss: 0.2651 - val_acc: 0.9118\n",
          "Epoch 209/1000\n",
          "157/157 [==============================] - 0s 188us/step - loss: 0.2970 - acc: 0.9299 - val_loss: 0.2626 - val_acc: 0.9118\n",
          "Epoch 210/1000\n",
          "157/157 [==============================] - 0s 141us/step - loss: 0.2945 - acc: 0.9427 - val_loss: 0.2626 - val_acc: 0.8971\n",
          "Epoch 211/1000\n",
          "157/157 [==============================] - 0s 152us/step - loss: 0.2932 - acc: 0.9299 - val_loss: 0.2599 - val_acc: 0.9118\n",
          "Epoch 212/1000\n",
          "157/157 [==============================] - 0s 317us/step - loss: 0.2919 - acc: 0.9427 - val_loss: 0.2590 - val_acc: 0.8971\n",
          "Epoch 213/1000\n",
          "157/157 [==============================] - 0s 241us/step - loss: 0.2898 - acc: 0.9236 - val_loss: 0.2560 - val_acc: 0.9118\n",
          "Epoch 214/1000\n",
          "157/157 [==============================] - 0s 396us/step - loss: 0.2892 - acc: 0.9427 - val_loss: 0.2547 - val_acc: 0.9118\n",
          "Epoch 215/1000\n",
          "157/157 [==============================] - 0s 317us/step - loss: 0.2863 - acc: 0.9427 - val_loss: 0.2529 - val_acc: 0.9118\n",
          "Epoch 216/1000\n",
          "157/157 [==============================] - 0s 254us/step - loss: 0.2870 - acc: 0.9363 - val_loss: 0.2518 - val_acc: 0.9118\n",
          "Epoch 217/1000\n",
          "157/157 [==============================] - 0s 255us/step - loss: 0.2839 - acc: 0.9363 - val_loss: 0.2511 - val_acc: 0.9118\n",
          "Epoch 218/1000\n",
          "157/157 [==============================] - 0s 144us/step - loss: 0.2816 - acc: 0.9363 - val_loss: 0.2490 - val_acc: 0.9118\n",
          "Epoch 219/1000\n",
          "157/157 [==============================] - 0s 228us/step - loss: 0.2807 - acc: 0.9427 - val_loss: 0.2484 - val_acc: 0.9118\n",
          "Epoch 220/1000\n",
          "157/157 [==============================] - 0s 140us/step - loss: 0.2789 - acc: 0.9427 - val_loss: 0.2471 - val_acc: 0.9118\n",
          "Epoch 221/1000\n",
          "157/157 [==============================] - 0s 267us/step - loss: 0.2770 - acc: 0.9363 - val_loss: 0.2438 - val_acc: 0.9118\n",
          "Epoch 222/1000\n",
          "157/157 [==============================] - 0s 251us/step - loss: 0.2760 - acc: 0.9427 - val_loss: 0.2423 - val_acc: 0.9118\n",
          "Epoch 223/1000\n",
          "157/157 [==============================] - 0s 298us/step - loss: 0.2745 - acc: 0.9299 - val_loss: 0.2407 - val_acc: 0.9118\n",
          "Epoch 224/1000\n",
          "157/157 [==============================] - 0s 218us/step - loss: 0.2726 - acc: 0.9490 - val_loss: 0.2411 - val_acc: 0.9118\n",
          "Epoch 225/1000\n",
          "157/157 [==============================] - 0s 293us/step - loss: 0.2707 - acc: 0.9363 - val_loss: 0.2380 - val_acc: 0.9118\n",
          "Epoch 226/1000\n",
          "157/157 [==============================] - 0s 157us/step - loss: 0.2703 - acc: 0.9427 - val_loss: 0.2386 - val_acc: 0.9118\n",
          "Epoch 227/1000\n",
          "157/157 [==============================] - 0s 213us/step - loss: 0.2681 - acc: 0.9490 - val_loss: 0.2374 - val_acc: 0.9118\n",
          "Epoch 228/1000\n",
          "157/157 [==============================] - 0s 149us/step - loss: 0.2680 - acc: 0.9363 - val_loss: 0.2365 - val_acc: 0.9118\n",
          "Epoch 229/1000\n",
          "157/157 [==============================] - 0s 156us/step - loss: 0.2668 - acc: 0.9236 - val_loss: 0.2342 - val_acc: 0.9118\n",
          "Epoch 230/1000\n",
          "157/157 [==============================] - 0s 213us/step - loss: 0.2652 - acc: 0.9363 - val_loss: 0.2324 - val_acc: 0.9118\n",
          "Epoch 231/1000\n",
          "157/157 [==============================] - 0s 170us/step - loss: 0.2634 - acc: 0.9490 - val_loss: 0.2320 - val_acc: 0.9118\n",
          "Epoch 232/1000\n",
          "157/157 [==============================] - 0s 258us/step - loss: 0.2624 - acc: 0.9427 - val_loss: 0.2310 - val_acc: 0.9118\n",
          "Epoch 233/1000\n",
          "157/157 [==============================] - 0s 245us/step - loss: 0.2627 - acc: 0.9427 - val_loss: 0.2299 - val_acc: 0.9118\n",
          "Epoch 234/1000\n",
          "157/157 [==============================] - 0s 396us/step - loss: 0.2597 - acc: 0.9490 - val_loss: 0.2293 - val_acc: 0.9118\n",
          "Epoch 235/1000\n",
          "157/157 [==============================] - 0s 192us/step - loss: 0.2584 - acc: 0.9490 - val_loss: 0.2292 - val_acc: 0.9118\n",
          "Epoch 236/1000\n",
          "157/157 [==============================] - 0s 294us/step - loss: 0.2579 - acc: 0.9427 - val_loss: 0.2271 - val_acc: 0.9118\n",
          "Epoch 237/1000\n",
          "157/157 [==============================] - 0s 200us/step - loss: 0.2564 - acc: 0.9427 - val_loss: 0.2262 - val_acc: 0.9118\n",
          "Epoch 238/1000\n"
         ]
        },
        {
         "name": "stdout",
         "output_type": "stream",
         "text": [
          "157/157 [==============================] - 0s 251us/step - loss: 0.2542 - acc: 0.9490 - val_loss: 0.2261 - val_acc: 0.9118\n",
          "Epoch 239/1000\n",
          "157/157 [==============================] - 0s 183us/step - loss: 0.2552 - acc: 0.9363 - val_loss: 0.2241 - val_acc: 0.9118\n",
          "Epoch 240/1000\n",
          "157/157 [==============================] - 0s 281us/step - loss: 0.2531 - acc: 0.9490 - val_loss: 0.2243 - val_acc: 0.9118\n",
          "Epoch 241/1000\n",
          "157/157 [==============================] - 0s 158us/step - loss: 0.2508 - acc: 0.9490 - val_loss: 0.2222 - val_acc: 0.9118\n",
          "Epoch 242/1000\n",
          "157/157 [==============================] - 0s 171us/step - loss: 0.2530 - acc: 0.9427 - val_loss: 0.2201 - val_acc: 0.9118\n",
          "Epoch 243/1000\n",
          "157/157 [==============================] - 0s 185us/step - loss: 0.2502 - acc: 0.9554 - val_loss: 0.2198 - val_acc: 0.9118\n",
          "Epoch 244/1000\n",
          "157/157 [==============================] - 0s 125us/step - loss: 0.2478 - acc: 0.9490 - val_loss: 0.2190 - val_acc: 0.9118\n",
          "Epoch 245/1000\n",
          "157/157 [==============================] - 0s 247us/step - loss: 0.2477 - acc: 0.9490 - val_loss: 0.2185 - val_acc: 0.9118\n",
          "Epoch 246/1000\n",
          "157/157 [==============================] - 0s 164us/step - loss: 0.2458 - acc: 0.9490 - val_loss: 0.2167 - val_acc: 0.9118\n",
          "Epoch 247/1000\n",
          "157/157 [==============================] - 0s 188us/step - loss: 0.2445 - acc: 0.9490 - val_loss: 0.2152 - val_acc: 0.9118\n",
          "Epoch 248/1000\n",
          "157/157 [==============================] - 0s 215us/step - loss: 0.2437 - acc: 0.9299 - val_loss: 0.2130 - val_acc: 0.9118\n",
          "Epoch 249/1000\n",
          "157/157 [==============================] - 0s 228us/step - loss: 0.2420 - acc: 0.9554 - val_loss: 0.2124 - val_acc: 0.9118\n",
          "Epoch 250/1000\n",
          "157/157 [==============================] - 0s 305us/step - loss: 0.2404 - acc: 0.9490 - val_loss: 0.2109 - val_acc: 0.9118\n",
          "Epoch 251/1000\n",
          "157/157 [==============================] - 0s 237us/step - loss: 0.2428 - acc: 0.9618 - val_loss: 0.2121 - val_acc: 0.9118\n",
          "Epoch 252/1000\n",
          "157/157 [==============================] - 0s 129us/step - loss: 0.2383 - acc: 0.9490 - val_loss: 0.2106 - val_acc: 0.9118\n",
          "Epoch 253/1000\n",
          "157/157 [==============================] - 0s 235us/step - loss: 0.2370 - acc: 0.9618 - val_loss: 0.2111 - val_acc: 0.9118\n",
          "Epoch 254/1000\n",
          "157/157 [==============================] - 0s 196us/step - loss: 0.2370 - acc: 0.9490 - val_loss: 0.2096 - val_acc: 0.9118\n",
          "Epoch 255/1000\n",
          "157/157 [==============================] - 0s 137us/step - loss: 0.2390 - acc: 0.9172 - val_loss: 0.2082 - val_acc: 0.9118\n",
          "Epoch 256/1000\n",
          "157/157 [==============================] - 0s 151us/step - loss: 0.2338 - acc: 0.9490 - val_loss: 0.2063 - val_acc: 0.9118\n",
          "Epoch 257/1000\n",
          "157/157 [==============================] - 0s 153us/step - loss: 0.2332 - acc: 0.9554 - val_loss: 0.2063 - val_acc: 0.9118\n",
          "Epoch 258/1000\n",
          "157/157 [==============================] - 0s 135us/step - loss: 0.2319 - acc: 0.9490 - val_loss: 0.2060 - val_acc: 0.9118\n",
          "Epoch 259/1000\n",
          "157/157 [==============================] - 0s 214us/step - loss: 0.2329 - acc: 0.9299 - val_loss: 0.2034 - val_acc: 0.9118\n",
          "Epoch 260/1000\n",
          "157/157 [==============================] - 0s 194us/step - loss: 0.2304 - acc: 0.9490 - val_loss: 0.2044 - val_acc: 0.9118\n",
          "Epoch 261/1000\n",
          "157/157 [==============================] - 0s 151us/step - loss: 0.2307 - acc: 0.9554 - val_loss: 0.2025 - val_acc: 0.9118\n",
          "Epoch 262/1000\n",
          "157/157 [==============================] - 0s 205us/step - loss: 0.2277 - acc: 0.9554 - val_loss: 0.2018 - val_acc: 0.9118\n",
          "Epoch 263/1000\n",
          "157/157 [==============================] - 0s 118us/step - loss: 0.2265 - acc: 0.9554 - val_loss: 0.2022 - val_acc: 0.9118\n",
          "Epoch 264/1000\n",
          "157/157 [==============================] - 0s 112us/step - loss: 0.2261 - acc: 0.9490 - val_loss: 0.2007 - val_acc: 0.9118\n",
          "Epoch 265/1000\n",
          "157/157 [==============================] - 0s 183us/step - loss: 0.2256 - acc: 0.9554 - val_loss: 0.1985 - val_acc: 0.9118\n",
          "Epoch 266/1000\n",
          "157/157 [==============================] - 0s 125us/step - loss: 0.2233 - acc: 0.9618 - val_loss: 0.1982 - val_acc: 0.9118\n",
          "Epoch 267/1000\n",
          "157/157 [==============================] - 0s 202us/step - loss: 0.2220 - acc: 0.9554 - val_loss: 0.1957 - val_acc: 0.9118\n",
          "Epoch 268/1000\n",
          "157/157 [==============================] - 0s 125us/step - loss: 0.2226 - acc: 0.9554 - val_loss: 0.1957 - val_acc: 0.9118\n",
          "Epoch 269/1000\n",
          "157/157 [==============================] - 0s 131us/step - loss: 0.2213 - acc: 0.9554 - val_loss: 0.1935 - val_acc: 0.9118\n",
          "Epoch 270/1000\n",
          "157/157 [==============================] - 0s 140us/step - loss: 0.2214 - acc: 0.9554 - val_loss: 0.1968 - val_acc: 0.9118\n",
          "Epoch 271/1000\n",
          "157/157 [==============================] - 0s 165us/step - loss: 0.2187 - acc: 0.9554 - val_loss: 0.1965 - val_acc: 0.9118\n",
          "Epoch 272/1000\n",
          "157/157 [==============================] - 0s 222us/step - loss: 0.2174 - acc: 0.9490 - val_loss: 0.1925 - val_acc: 0.9118\n",
          "Epoch 273/1000\n",
          "157/157 [==============================] - 0s 130us/step - loss: 0.2188 - acc: 0.9618 - val_loss: 0.1925 - val_acc: 0.9118\n",
          "Epoch 274/1000\n",
          "157/157 [==============================] - 0s 112us/step - loss: 0.2157 - acc: 0.9554 - val_loss: 0.1923 - val_acc: 0.9118\n",
          "Epoch 275/1000\n",
          "157/157 [==============================] - 0s 120us/step - loss: 0.2170 - acc: 0.9490 - val_loss: 0.1908 - val_acc: 0.9118\n",
          "Epoch 276/1000\n",
          "157/157 [==============================] - 0s 201us/step - loss: 0.2149 - acc: 0.9618 - val_loss: 0.1918 - val_acc: 0.9118\n",
          "Epoch 277/1000\n",
          "157/157 [==============================] - 0s 128us/step - loss: 0.2140 - acc: 0.9618 - val_loss: 0.1924 - val_acc: 0.9118\n",
          "Epoch 278/1000\n",
          "157/157 [==============================] - 0s 121us/step - loss: 0.2128 - acc: 0.9554 - val_loss: 0.1899 - val_acc: 0.9118\n",
          "Epoch 279/1000\n",
          "157/157 [==============================] - 0s 205us/step - loss: 0.2123 - acc: 0.9618 - val_loss: 0.1881 - val_acc: 0.9118\n",
          "Epoch 280/1000\n",
          "157/157 [==============================] - 0s 146us/step - loss: 0.2115 - acc: 0.9554 - val_loss: 0.1889 - val_acc: 0.9118\n",
          "Epoch 281/1000\n",
          "157/157 [==============================] - 0s 117us/step - loss: 0.2115 - acc: 0.9490 - val_loss: 0.1863 - val_acc: 0.9118\n",
          "Epoch 282/1000\n",
          "157/157 [==============================] - 0s 235us/step - loss: 0.2100 - acc: 0.9554 - val_loss: 0.1854 - val_acc: 0.9118\n",
          "Epoch 283/1000\n",
          "157/157 [==============================] - 0s 127us/step - loss: 0.2099 - acc: 0.9618 - val_loss: 0.1872 - val_acc: 0.9118\n",
          "Epoch 284/1000\n",
          "157/157 [==============================] - 0s 108us/step - loss: 0.2085 - acc: 0.9618 - val_loss: 0.1867 - val_acc: 0.9118\n",
          "Epoch 285/1000\n",
          "157/157 [==============================] - 0s 216us/step - loss: 0.2070 - acc: 0.9618 - val_loss: 0.1862 - val_acc: 0.9118\n",
          "Epoch 286/1000\n",
          "157/157 [==============================] - 0s 142us/step - loss: 0.2061 - acc: 0.9618 - val_loss: 0.1858 - val_acc: 0.9118\n",
          "Epoch 287/1000\n",
          "157/157 [==============================] - 0s 115us/step - loss: 0.2074 - acc: 0.9554 - val_loss: 0.1866 - val_acc: 0.9118\n",
          "Epoch 288/1000\n",
          "157/157 [==============================] - 0s 134us/step - loss: 0.2052 - acc: 0.9554 - val_loss: 0.1864 - val_acc: 0.9118\n",
          "Epoch 289/1000\n",
          "157/157 [==============================] - 0s 155us/step - loss: 0.2045 - acc: 0.9554 - val_loss: 0.1839 - val_acc: 0.9118\n",
          "Epoch 290/1000\n",
          "157/157 [==============================] - 0s 246us/step - loss: 0.2035 - acc: 0.9618 - val_loss: 0.1817 - val_acc: 0.9118\n",
          "Epoch 291/1000\n",
          "157/157 [==============================] - 0s 127us/step - loss: 0.2043 - acc: 0.9618 - val_loss: 0.1828 - val_acc: 0.9118\n",
          "Epoch 292/1000\n",
          "157/157 [==============================] - 0s 137us/step - loss: 0.2014 - acc: 0.9618 - val_loss: 0.1832 - val_acc: 0.9118\n",
          "Epoch 293/1000\n",
          "157/157 [==============================] - 0s 165us/step - loss: 0.2014 - acc: 0.9554 - val_loss: 0.1829 - val_acc: 0.9118\n",
          "Epoch 294/1000\n",
          "157/157 [==============================] - 0s 198us/step - loss: 0.2003 - acc: 0.9618 - val_loss: 0.1822 - val_acc: 0.9118\n",
          "Epoch 295/1000\n",
          "157/157 [==============================] - 0s 155us/step - loss: 0.2019 - acc: 0.9618 - val_loss: 0.1799 - val_acc: 0.9118\n",
          "Epoch 296/1000\n",
          "157/157 [==============================] - 0s 165us/step - loss: 0.1995 - acc: 0.9554 - val_loss: 0.1778 - val_acc: 0.9118\n",
    
    chadhat's avatar
    chadhat committed
          "Epoch 297/1000\n",
    
          "157/157 [==============================] - 0s 165us/step - loss: 0.1990 - acc: 0.9618 - val_loss: 0.1810 - val_acc: 0.9118\n",
          "Epoch 298/1000\n",
          "157/157 [==============================] - 0s 189us/step - loss: 0.1975 - acc: 0.9618 - val_loss: 0.1822 - val_acc: 0.9118\n",
          "Epoch 299/1000\n",
          "157/157 [==============================] - 0s 169us/step - loss: 0.1975 - acc: 0.9490 - val_loss: 0.1800 - val_acc: 0.9118\n",
          "Epoch 300/1000\n",
          "157/157 [==============================] - 0s 270us/step - loss: 0.1964 - acc: 0.9618 - val_loss: 0.1784 - val_acc: 0.9118\n",
          "Epoch 301/1000\n",
          "157/157 [==============================] - 0s 249us/step - loss: 0.1957 - acc: 0.9618 - val_loss: 0.1755 - val_acc: 0.9118\n",
          "Epoch 302/1000\n",
          "157/157 [==============================] - 0s 368us/step - loss: 0.1977 - acc: 0.9618 - val_loss: 0.1741 - val_acc: 0.9118\n",
          "Epoch 303/1000\n",
          "157/157 [==============================] - 0s 214us/step - loss: 0.1941 - acc: 0.9554 - val_loss: 0.1766 - val_acc: 0.9118\n",
          "Epoch 304/1000\n",
          "157/157 [==============================] - 0s 283us/step - loss: 0.1930 - acc: 0.9618 - val_loss: 0.1742 - val_acc: 0.9118\n",
          "Epoch 305/1000\n",
          "157/157 [==============================] - 0s 299us/step - loss: 0.1932 - acc: 0.9618 - val_loss: 0.1752 - val_acc: 0.9118\n",
          "Epoch 306/1000\n",
          "157/157 [==============================] - 0s 284us/step - loss: 0.1930 - acc: 0.9618 - val_loss: 0.1766 - val_acc: 0.9118\n",
          "Epoch 307/1000\n",
          "157/157 [==============================] - 0s 217us/step - loss: 0.1914 - acc: 0.9618 - val_loss: 0.1746 - val_acc: 0.9118\n",
          "Epoch 308/1000\n",
          "157/157 [==============================] - 0s 303us/step - loss: 0.1918 - acc: 0.9490 - val_loss: 0.1736 - val_acc: 0.9118\n",
          "Epoch 309/1000\n",
          "157/157 [==============================] - 0s 561us/step - loss: 0.1892 - acc: 0.9618 - val_loss: 0.1723 - val_acc: 0.9118\n",
          "Epoch 310/1000\n",
          "157/157 [==============================] - 0s 379us/step - loss: 0.1897 - acc: 0.9618 - val_loss: 0.1725 - val_acc: 0.9118\n",
          "Epoch 311/1000\n",
          "157/157 [==============================] - 0s 219us/step - loss: 0.1880 - acc: 0.9618 - val_loss: 0.1721 - val_acc: 0.9118\n",
          "Epoch 312/1000\n",
          "157/157 [==============================] - 0s 181us/step - loss: 0.1872 - acc: 0.9618 - val_loss: 0.1693 - val_acc: 0.9118\n",
          "Epoch 313/1000\n",
          "157/157 [==============================] - 0s 206us/step - loss: 0.1880 - acc: 0.9554 - val_loss: 0.1679 - val_acc: 0.9118\n",
          "Epoch 314/1000\n",
          "157/157 [==============================] - 0s 168us/step - loss: 0.1857 - acc: 0.9618 - val_loss: 0.1690 - val_acc: 0.9118\n",
          "Epoch 315/1000\n",
          "157/157 [==============================] - 0s 579us/step - loss: 0.1847 - acc: 0.9554 - val_loss: 0.1694 - val_acc: 0.9118\n",
          "Epoch 316/1000\n",
          "157/157 [==============================] - 0s 199us/step - loss: 0.1843 - acc: 0.9618 - val_loss: 0.1727 - val_acc: 0.9118\n",
          "Epoch 317/1000\n",
          "157/157 [==============================] - 0s 244us/step - loss: 0.1853 - acc: 0.9554 - val_loss: 0.1714 - val_acc: 0.9118\n",
          "Epoch 318/1000\n",
          "157/157 [==============================] - 0s 228us/step - loss: 0.1843 - acc: 0.9618 - val_loss: 0.1680 - val_acc: 0.9118\n",
          "Epoch 319/1000\n",
          "157/157 [==============================] - 0s 249us/step - loss: 0.1815 - acc: 0.9554 - val_loss: 0.1686 - val_acc: 0.9118\n",
          "Epoch 320/1000\n",
          "157/157 [==============================] - 0s 171us/step - loss: 0.1828 - acc: 0.9618 - val_loss: 0.1669 - val_acc: 0.9118\n",
          "Epoch 321/1000\n",
          "157/157 [==============================] - 0s 143us/step - loss: 0.1807 - acc: 0.9618 - val_loss: 0.1646 - val_acc: 0.9118\n",