"git@sissource.ethz.ch:sispub/openbis.git" did not exist on "a83dd65c40cb928382b8319b8a6d0d65a44d655b"
Newer
Older
" xmin, ymin = features_2d.min(axis=0)\n",
" xmax, ymax = features_2d.max(axis=0)\n",
" x = np.linspace(xmin, xmax, N)\n",
" y = np.linspace(ymin, ymax, N)\n",
" points = np.array(np.meshgrid(x, y)).T.reshape(-1, 2)\n",
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
" if preproc is not None:\n",
" points_for_classifier = preproc.fit_transform(points)\n",
" features_2d = preproc.fit_transform(features_2d)\n",
" else:\n",
" points_for_classifier = points\n",
"\n",
" classifier.fit(features_2d, labels, verbose=0)\n",
" predicted = classifier.predict(features_2d)\n",
" \n",
" if name == \"Neural Net\":\n",
" predicted = list_flatten(predicted)\n",
" \n",
" \n",
" if preproc is not None:\n",
" name += \" (w/ preprocessing)\"\n",
" print(name + \":\\t\", sum(predicted == labels), \"/\", len(labels), \"correct\")\n",
" \n",
" if name == \"Neural Net\":\n",
" classes = np.array(list_flatten(classifier.predict(points_for_classifier)), dtype=bool)\n",
" else:\n",
" classes = np.array(classifier.predict(points_for_classifier), dtype=bool)\n",
" plt.plot(\n",
" points[~classes][:, 0],\n",
" points[~classes][:, 1],\n",
" \"o\",\n",
" color=\"steelblue\",\n",
" markersize=1,\n",
" alpha=0.01,\n",
" )\n",
" plt.plot(\n",
" points[classes][:, 0],\n",
" points[classes][:, 1],\n",
" \"o\",\n",
" color=\"chocolate\",\n",
" markersize=1,\n",
" alpha=0.04,\n",
" )"
]
},
{
"cell_type": "code",
"_, ax = plt.subplots(figsize=(6, 6))\n",
"train_and_plot_decision_surface(\"Neural Net\", model_scikit, features, labels, plt=ax)\n",
"plot_points(plt=ax)"
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
"source": [
"# Applying K-fold cross-validation\n",
"# Here we pass the whole dataset, i.e. features and labels, instead of splitting it.\n",
"num_folds = 5\n",
"cross_validation = cross_val_score(\n",
" model_scikit, features, labels, cv=num_folds, verbose=0)\n",
"\n",
"print(\"The acuracy on the \", num_folds, \" validation folds:\", cross_validation)\n",
"print(\"The Average acuracy on the \", num_folds, \" validation folds:\", np.mean(cross_validation))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### NOTE: The above code took quiet long even though we used only 5 CV folds and the neural network and data size are very small!"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Hyperparameter optimization"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We know from chapter 6 that there are 2 types of parameters which need to be tuned for a machine learning model.\n",
"* Internal model parameters (weights) which can be learned for e.g. by gradient-descent\n",
"* Hyperparameters\n",
"\n",
"In the model which we created above we made some arbitrary choices like which optimizer we use, what is its learning rate, number of hidden units and so on ...\n",
"\n",
"Now that we have the keras model wrapped as a scikit model we can use the grid search functions we have seen in chapter 6."
]
},
"metadata": {},
"outputs": [],
"source": [
"from sklearn.model_selection import GridSearchCV\n",
"# Just to remember\n",
"model_scikit = KerasClassifier(\n",
" build_fn=a_simple_NN, **{\"epochs\": num_epochs, \"verbose\": 0})"
"execution_count": null,
"metadata": {},
"outputs": [],
"search = GridSearchCV(estimator=model_scikit, param_grid=HP_grid)\n",
"search.fit(features, labels)\n",
"print(search.best_score_, search.best_params_)"
]
},
"execution_count": null,
"metadata": {},
"outputs": [],
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
"source": [
"HP_grid = {'epochs' : [10, 15, 30], \n",
" 'batch_size' : [10, 20, 30] }\n",
"search = GridSearchCV(estimator=model_scikit, param_grid=HP_grid)\n",
"search.fit(features, labels)\n",
"print(search.best_score_, search.best_params_)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# A more general model for further Hyperparameter optimization\n",
"from keras import optimizers\n",
"\n",
"def a_simple_NN(activation='relu', num_hidden_neurons=[4, 4], learning_rate=0.01):\n",
"\n",
" model = Sequential()\n",
"\n",
" model.add(Dense(num_hidden_neurons[0],\n",
" input_shape=(2,), activation=activation))\n",
"\n",
" model.add(Dense(num_hidden_neurons[1], activation=activation))\n",
"\n",
" model.add(Dense(1, activation=\"sigmoid\"))\n",
"\n",
" model.compile(loss=\"binary_crossentropy\", optimizer=optimizers.rmsprop(\n",
" lr=learning_rate), metrics=[\"accuracy\"])\n",
"\n",
" return model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Exercise: \n",
"* Look at the model above and choose a couple of hyperparameters to optimize. \n",
"* **(OPTIONAL:)** What function from SciKit learn other than GridSearchCV can we use for hyperparameter optimization? Use it."
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Code here"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Exercise: Create a neural network to classify the 2d points example from chapter 2 learned \n",
"(Optional: As you create the model read a bit on the different keras commands we have used)"
]
},
{
"cell_type": "code",
"circle = pd.read_csv(\"2d_points.csv\")\n",
"# Using x and y coordinates as featues\n",
"features = circle.iloc[:, :-1]\n",
"# Convert boolean to integer values (True->1 and False->0)\n",
"labels = circle.iloc[:, -1].astype(int)\n",
"colors = [[\"steelblue\", \"chocolate\"][i] for i in circle[\"label\"]]\n",
"plt.figure(figsize=(5, 5))\n",
"plt.xlim([-2, 2])\n",
"plt.ylim([-2, 2])\n",
"\n",
"plt.scatter(features[\"x\"], features[\"y\"], color=colors, marker=\"o\");\n"
]
},
{
"cell_type": "code",
"metadata": {},
"outputs": [],
"source": [
"# Insert Code here"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### The examples above are not the ideal use problems one should use neural networks for. They are too simple and can be easily solved by classical machine learning algorithms. Below we show examples which are the more common applications of Neural Networks."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Handwritten Digits Classification\n",
"MNIST datasets is a very common dataset used in machine learning. It is widely used to train and validate models.\n",
">The MNIST database of handwritten digits, available from this page, has a training set of 60,000 examples, and a >test set of 10,000 examples. It is a subset of a larger set available from NIST. The digits have been size->normalized and centered in a fixed-size image.\n",
">It is a good database for people who want to try learning techniques and pattern recognition methods on real-world >data while spending minimal efforts on preprocessing and formatting.\n",
">source: http://yann.lecun.com/exdb/mnist/\n",
"The problem we want to solve using this dataset is: multi-class classification (FIRST TIME)\n",
"This dataset consists of images of handwritten digits between 0-9 and their corresponsing labels. We want to train a neural network which is able to predict the correct digit on the image. "
]
},
{
"cell_type": "code",
"metadata": {},
"outputs": [],
"source": [
"# Loading the dataset in keras\n",
"# Later you can explore and play with other datasets with come with Keras\n",
"from keras.datasets import mnist\n",
"# Loading the train and test data\n",
"(X_train, y_train), (X_test, y_test) = mnist.load_data()"
]
},
{
"cell_type": "code",
"source": [
"# Looking at the dataset\n",
"print(X_train.shape)"
]
},
{
"cell_type": "code",
"# We can see that the training set consists of 60,000 images of size 28x28 pixels\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"i=np.random.randint(0,X_train.shape[0])\n",
"plt.imshow(X_train[i], cmap=\"gray_r\") ;\n",
"print(\"This digit is: \" , y_train[i])"
"# Look at the data values for a couple of images\n",
"print(X_train[0].min(), X_train[1].max())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The data consists of values between 0-255 representing the **grayscale level**"
]
},
{
"cell_type": "code",
"# The labels are the digit on the image\n",
"print(y_train.shape)"
"metadata": {},
"outputs": [],
"source": [
"# Scaling the data\n",
"# It is important to normalize the input data to (0-1) before providing it to a neural net\n",
"# We could use the previously introduced function from SciKit learn. However, here it is sufficient to\n",
"# just divide the input data by 255\n",
"X_train_norm = X_train/255.\n",
"X_test_norm = X_test/255.\n",
"\n",
"# Also we need to reshape the input data such that each sample is a vector and not a 2D matrix\n",
"X_train_prep = X_train_norm.reshape(X_train_norm.shape[0],28*28)\n",
"X_test_prep = X_test_norm.reshape(X_test_norm.shape[0],28*28)"
"**TODO: Better frame the explaination**\n",
"In such problems the labels are provided as something called **One-hot encodings**. What this does is to convert a categorical label to a vector.\n",
"For the MNIST problem where we have **10 categories** one-hot encoding will create a vector of length 10 for each of the labels. All the entries of this vector will be zero **except** for the index which is equal to the integer value of the label.\n",
"For example:\n",
"if label is 4. The one-hot vector will look like **[0 0 0 0 1 0 0 0 0 0]**\n",
"Fortunately, we don't have to code this ourselves because Keras has a built-in function for this."
"from keras.utils.np_utils import to_categorical\n",
"y_train_onehot = to_categorical(y_train, num_classes=10)\n",
"y_test_onehot = to_categorical(y_test, num_classes=10)\n",
"# Building the keras model\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense\n",
"def mnist_model():\n",
" model = Sequential()\n",
" model.add(Dense(64, input_shape=(28*28,), activation=\"relu\"))\n",
" model.add(Dense(10, activation=\"softmax\"))\n",
" model.compile(loss=\"categorical_crossentropy\",\n",
" optimizer=\"rmsprop\", metrics=[\"accuracy\"])\n",
" return model\n",
"model = mnist_model()\n",
"\n",
"model_run = model.fit(X_train_prep, y_train_onehot, epochs=20,\n",
" batch_size=512)"
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
"print(\"The [loss, accuracy] on test dataset are: \" , model.evaluate(X_test_prep, y_test_onehot))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Optional exercise: Run the model again with validation dataset, plot the accuracy as a function of epochs, play with number of epochs and observe what is happening."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Code here"
]
},
{
"cell_type": "code",
"source": [
"# Solution:\n",
"num_epochs = 20\n",
"model_run = model.fit(X_train_prep, y_train_onehot, epochs=num_epochs,\n",
" batch_size=512, validation_data=(X_test_prep, y_test_onehot))\n",
"# Evaluating the model on test dataset\n",
"#print(\"The [loss, accuracy] on test dataset are: \" , model.evaluate(X_test_prep, y_test_onehot))\n",
"history_model = model_run.history\n",
"print(\"The history has the following data: \", history_model.keys())\n",
"\n",
"# Plotting the training and validation accuracy during the training\n",
"plt.plot(np.arange(1, num_epochs+1), history_model[\"acc\"], \"blue\")\n",
"\n",
"plt.plot(np.arange(1, num_epochs+1), history_model[\"val_acc\"], \"red\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Adding regularization"
]
},
"execution_count": null,
"metadata": {},
"outputs": [],
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
"# Building the keras model\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense\n",
"from keras.regularizers import l2\n",
"\n",
"def mnist_model():\n",
" \n",
" model = Sequential()\n",
"\n",
" model.add(Dense(64, input_shape=(28*28,), activation=\"relu\", \n",
" kernel_regularizer=l2(0.01)))\n",
"\n",
" model.add(Dense(64, activation=\"relu\", \n",
" kernel_regularizer=l2(0.01)))\n",
"\n",
" model.add(Dense(10, activation=\"softmax\"))\n",
"\n",
" model.compile(loss=\"categorical_crossentropy\",\n",
" optimizer=\"rmsprop\", metrics=[\"accuracy\"])\n",
" return model\n",
"\n",
"num_epochs = 50\n",
"model_run = model.fit(X_train_prep, y_train_onehot, epochs=num_epochs,\n",
" batch_size=512)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"print(\"The [loss, accuracy] on test dataset are: \" , model.evaluate(X_test_prep, y_test_onehot))"
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Another way to add regularization and to make the network more robust we can add something called \"Dropout\". When we add dropout to a layer a specified percentage of units in that layer are switched off. \n",
"\n",
"### Exercise: Add dropout instead of l2 regularization in the network above"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Adding dropout is easy in keras\n",
"# We import a layer called Dropout and add as follows\n",
"# model.add(Dropout(0.5)) to randomly drop 50% of the hidden units\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
"source": [
"# Solution\n",
"# Adding Dropout\n",
"# Building the keras model\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Dropout\n",
"\n",
"def mnist_model():\n",
" \n",
" model = Sequential()\n",
"\n",
" model.add(Dense(64, input_shape=(28*28,), activation=\"relu\"))\n",
" \n",
" model.add(Dropout(0.4))\n",
"\n",
" model.add(Dense(64, activation=\"relu\"))\n",
"\n",
" model.add(Dense(10, activation=\"softmax\"))\n",
"\n",
" model.compile(loss=\"categorical_crossentropy\",\n",
" optimizer=\"rmsprop\", metrics=[\"accuracy\"])\n",
" \n",
" return model\n",
"\n",
"model = mnist_model()\n",
"\n",
"num_epochs = 50\n",
"model_run = model.fit(X_train_prep, y_train_onehot, epochs=num_epochs,\n",
" batch_size=512)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"The [loss, accuracy] on test dataset are: \" , model.evaluate(X_test_prep, y_test_onehot))"
]
},
"## Network Architecture\n",
"\n",
"The neural networks which we have seen till now are the simplest kind of neural networks.\n",
"There exist more sophisticated network architectures especially designed for specific applications.\n",
"Some of them are as follows:\n",
"\n",
"### Convolution Neural Networks (CNNs)\n",
"\n",
"These networks are used mostly for computer vision (EXAMPLES) like tasks. \n",
"One of the old CNN networks is shown below.\n",
"\n",
"<center>\n",
"<figure>\n",
"<img src=\"./images/neuralnets/CNN_lecun.png\" width=\"800\"/>\n",
"<figcaption>source: LeCun et al., Gradient-based learning applied to document recognition (1998).</figcaption>\n",
"</figure>\n",
"</center>\n",
"\n",
"CNNs consist of new type of layers like convolution layer and pooling layers.\n",
"\n",
"### Recurrent Neural Networks (RNNs)\n",
"\n",
"These are used for time-series data, speech recognition, translation etc.\n",
"IMAGE HERE\n",
"\n",
"### Generative adversarial networks (GANs)\n",
"\n",
"GANs consist of 2 parts, a generative network and a discriminative network. The generative network produces data which is then fed to the discriminative network which judges if the new data belongs to a specified dataset. Then via feedback loops the generative network becomes better and better at creating images similar to the dataset the discriminative network is judging against. At the same time the discriminative network get better and better at identifyig **fake** instances which are not from the reference dataset. \n",
"\n",
"IMAGE HERE"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For this example we will work with a dataset called fashion-MNIST which is quite similar to the MNIST data above.\n",
"> Fashion-MNIST is a dataset of Zalando's article images—consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. We intend Fashion-MNIST to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking machine learning algorithms. It shares the same image size and structure of training and testing splits.\n",
"source: https://github.com/zalandoresearch/fashion-mnist\n",
"| Label| Item |\n",
"| --- | --- |\n",
"| 0 |\tT-shirt/top |\n",
"| 1\t| Trouser |\n",
"|2|\tPullover|\n",
"|3|\tDress|\n",
"|4|\tCoat|\n",
"|5|\tSandal|\n",
"|6|\tShirt|\n",
"|7|\tSneaker|\n",
"|8|\tBag|\n",
"|9|\tAnkle boot|"
]
},
{
"cell_type": "code",
"metadata": {},
"outputs": [],
"source": [
"# Loading the dataset in keras\n",
"# Later you can explore and play with other datasets with come with Keras\n",
"from keras.datasets import fashion_mnist\n",
"(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()\n",
"items =['T-shirt/top', 'Trouser', \n",
" 'Pullover', 'Dress', \n",
" 'Coat', 'Sandal', \n",
" 'Shirt', 'Sneaker',\n",
" 'Bag', 'Ankle boot']"
]
},
{
"cell_type": "code",
"source": [
"# We can see that the training set consists of 60,000 images of size 28x28 pixels\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"i=np.random.randint(0,X_train.shape[0])\n",
"plt.imshow(X_train[i], cmap=\"gray_r\") ; \n",
"print(\"This item is a: \" , items[y_train[i]])"
]
},
{
"cell_type": "code",
"source": [
"# Also we need to reshape the input data such that each sample is a 4D matrix of dimension\n",
"# (num_samples, width, height, channels). Even though these images are grayscale we need to add\n",
"# channel dimension as this is expected by the Conv function\n",
"X_train_prep = X_train.reshape(X_train.shape[0],28,28,1)/255.\n",
"X_test_prep = X_test.reshape(X_test.shape[0],28,28,1)/255.\n",
"\n",
"from keras.utils.np_utils import to_categorical\n",
"\n",
"y_train_onehot = to_categorical(y_train, num_classes=10)\n",
"y_test_onehot = to_categorical(y_test, num_classes=10)\n",
"\n",
"print(y_train_onehot.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"# Creating a CNN similar to the one shown in the figure from LeCun paper\n",
"# In the original implementation Average pooling was used. However, we will use maxpooling as this \n",
"# is what us used in the more recent architectures and is found to be a better choice\n",
"# Convolution -> Pooling -> Convolution -> Pooling -> Flatten -> Dense -> Dense -> Output layer\n",
"from keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout, BatchNormalization\n",
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
"def simple_CNN():\n",
" \n",
" model = Sequential()\n",
" \n",
" model.add(Conv2D(6, (3,3), input_shape=(28,28,1), activation='relu'))\n",
" \n",
" model.add(MaxPool2D((2,2)))\n",
" \n",
" model.add(Conv2D(16, (3,3), activation='relu'))\n",
" \n",
" model.add(MaxPool2D((2,2)))\n",
" \n",
" model.add(Flatten())\n",
" \n",
" model.add(Dense(120, activation='relu'))\n",
" \n",
" model.add(Dense(84, activation='relu'))\n",
" \n",
" model.add(Dense(10, activation='softmax'))\n",
" \n",
" model.compile(loss=\"categorical_crossentropy\", optimizer=\"rmsprop\", metrics=[\"accuracy\"])\n",
" \n",
" return model\n",
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"num_epochs = 10\n",
"model_run = model.fit(X_train_prep, y_train_onehot, epochs=num_epochs, \n",
" batch_size=64, validation_data=(X_test_prep, y_test_onehot))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Exercise: Use the above model or improve it (change number of filters, add more layers etc. on the MNIST example and see if you can get a better accuracy than what we achieved with a vanilla neural network)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Exercise: Load and play with the CIFAR10 dataset also included with Keras and build+train a simple CNN using it"
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
},
"latex_envs": {
"LaTeX_envs_menu_present": true,
"autoclose": false,
"autocomplete": true,
"bibliofile": "biblio.bib",
"cite_by": "apalike",
"current_citInitial": 1,
"eqLabelWithNumbers": true,
"eqNumInitial": 1,
"hotkeys": {
"equation": "Ctrl-E",
"itemize": "Ctrl-I"
},
"labels_anchors": false,
"latex_user_defs": false,
"report_style_numbering": false,
"user_envs_cfg": false