diff --git a/neural_nets_intro.ipynb b/neural_nets_intro.ipynb index cc102d09e1158292da4f306d0169bcb42d2bcead..a3425dd8a44cc6ae6295564c4939553b22e1ab9f 100644 --- a/neural_nets_intro.ipynb +++ b/neural_nets_intro.ipynb @@ -137,9 +137,6 @@ "\n", "## History of Neural networks\n", "\n", - "<div class=\"alert alert-block alert-danger\"><p>\n", - " <strong>TODO</strong>: Make it more complete and format properly\n", - "</p></div>\n", "\n", "1943 - Threshold Logic\n", "\n", @@ -147,17 +144,19 @@ "\n", "1958 - Perceptron\n", "\n", - "1975 - Backpropagation\n", - "\n", "1980s - Neocognitron\n", "\n", "1982 - Hopfield Network\n", "\n", - "1986 - Convolutional Neural Networks\n", + "1989 - CNN kernels trained via backpropagation\n", "\n", "1997 - Long-short term memory (LSTM) model\n", "\n", - "2014 - Gated Recurrent Units, Generative Adversarial Networks(Check)?" + "1998 - LeNet-5\n", + "\n", + "2014 - Gated Recurrent Units (GRU), Generative Adversarial Networks (GAN)\n", + "\n", + "2015 - ResNet" ] }, { @@ -2633,7 +2632,7 @@ "source": [ "<div class=\"alert alert-block alert-warning\">\n", "<p><i class=\"fa fa-warning\"></i> \n", - "Another way to add regularization and to make the network more robust we can add something called **Dropout**. When we add dropout to a layer a specified percentage of units in that layer are switched off. \n", + "Another way to add regularization and to make the network more robust is by applying **Dropout**. When we add dropout to a layer a specified percentage of units in that layer are switched off. \n", " \n", "Both L2 regularization and Dropout make the model simpler and thus reducing overfitting.\n", "</p>\n",