From 658639d54b456a0afd743eb9d81a3a496c243fc8 Mon Sep 17 00:00:00 2001 From: Zeynep Hakguder <zhakguder@cse.unl.edu> Date: Fri, 1 Jun 2018 15:17:38 -0500 Subject: [PATCH] pa1 --- .../ProgrammingAssignment1.ipynb | 66 ++++++++++--------- ProgrammingAssignment_1/model.ipynb | 15 ++--- 2 files changed, 42 insertions(+), 39 deletions(-) diff --git a/ProgrammingAssignment_1/ProgrammingAssignment1.ipynb b/ProgrammingAssignment_1/ProgrammingAssignment1.ipynb index d101f79..bb66aac 100644 --- a/ProgrammingAssignment_1/ProgrammingAssignment1.ipynb +++ b/ProgrammingAssignment_1/ProgrammingAssignment1.ipynb @@ -6,20 +6,23 @@ "source": [ "# *k*-Nearest Neighbor\n", "\n", - "We'll implement *k*-Nearest Neighbor (*k*-NN) algorithm for this assignment. We recommend using [Madelon](https://archive.ics.uci.edu/ml/datasets/Madelon) dataset, although it is not mandatory. If you choose to use a different dataset, it should meet the following criteria:\n", - "* dependent variable should be binary (suited for binary classification)\n", - "* number of features (attributes) should be at least 50\n", - "* number of examples (instances) should be between 1,000 - 5,000\n", + "We'll implement *k*-Nearest Neighbor (*k*-NN) algorithm for this assignment. You can use data available in machine learning repositories such as [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php) or a dataset related to your research. Your dataset should \n", + "* have labels (suited for classification)\n", + "* ideally have between 1,000 - 5,000 examples\n", "\n", "A skeleton of a general supervised learning model is provided in \"model.ipynb\". The functions that will be implemented there will be indicated in this notebook. \n", "\n", "### Assignment Goals:\n", "In this assignment, we will:\n", - "* we'll implement 'Euclidean' and 'Manhattan' distance metrics \n", + "* implement 'Euclidean' and 'Manhattan' distance metrics \n", "* use the validation dataset to find a good value for *k*\n", "* evaluate our model with respect to performance measures:\n", " * accuracy, generalization error and ROC curve\n", - "* try to assess if *k*-NN is suitable for the dataset you used\n" + "* try to assess if *k*-NN is suitable for the dataset you used\n", + "\n", + "### Note:\n", + "\n", + "You are not required to follow this exact template. You can change what parameters your functions take or partition the tasks across functions differently. However, make sure there are outputs and implementation for items listed in the rubric for each task. Also, indicate in code with comments which task you are attempting." ] }, { @@ -52,7 +55,7 @@ "|---|----------------|-----|-----|\n", "| 7 | Assess suitability of *k*-NN | 10 | 10 |\n", "\n", - "Points are broken down further below in Rubric sections. The **first** score is for 478, the **second** is for 878 students. There a total of 100 points in this assignment and extra 20 bonus points for 478 students and 10 bonus points for 878 students." + "Points are broken down further below in Rubric sections. The **first** score is for 478, the **second** is for 878 students. There are a total of 100 points in this assignment and extra 20 bonus points for 478 students and 10 bonus points for 878 students." ] }, { @@ -165,19 +168,21 @@ " Inherits Model class. Implements the k-NN algorithm for classification.\n", " '''\n", " \n", - " def fit(self, training_features, training_labels, k, distance_f,**kwargs):\n", + " def fit(self, training_features, training_labels, classes, k, distance_f,**kwargs):\n", " '''\n", " Fit the model. This is pretty straightforward for k-NN.\n", " Args:\n", " training_features: ndarray\n", " training_labels: ndarray\n", + " classes: ndarray\n", + " 1D array containing unique classes in the dataset\n", " k: int\n", " distance_f: function\n", " kwargs: dict\n", " Contains keyword arguments that will be passed to distance_f\n", " '''\n", " # TODO\n", - " # set self.train_features, self.train_labels, self.k, self.distance_f, self.distance_metric\n", + " # set self.train_features, self.train_labels, self.classes, self.k, self.distance_f, self.distance_metric\n", " \n", " raise NotImplementedError\n", "\n", @@ -201,13 +206,10 @@ " # use your implementation of distance function\n", " # distance_f(..., distance_metric)\n", " # to find the labels of k-nearest neighbors. \n", - " \n", - " # Find the ratio of the positive labels\n", - " # and append to pred with pred.append(ratio).\n", - " \n", - " # when calculating learning curve you can make use of\n", - " # self.learning_curve and self.training_proportion\n", "\n", + " # you'll need proportion of the dominant class\n", + " # in k nearest neighbors\n", + " \n", " return np.array(pred)\n", " " ] @@ -246,6 +248,8 @@ "my_model = kNN()\n", "# obtain features and labels from files\n", "features, labels = preprocess(feature_file=..., label_file=...)\n", + "# get class names (unique entries in labels)\n", + "classes = np.unique(labels)\n", "# partition the data set\n", "val_indices, test_indices, train_indices = partition(size=..., t = 0.3, v = 0.1)" ] @@ -265,7 +269,7 @@ "source": [ "# pass the training features and labels to the fit method\n", "kwargs_f = {'metric': 'Euclidean'}\n", - "my_model.fit(training_features=..., training_labels-..., k=10, distance_f=..., **kwargs_f)" + "my_model.fit(training_features=..., training_labels-..., classes, k=10, distance_f=..., **kwargs_f)" ] }, { @@ -285,17 +289,17 @@ "# TODO\n", "\n", "# get model predictions\n", - "pred_ratios = my_model.predict(my_model.features[my_model.test_indices])\n", + "pred_ratios = my_model.predict(features[test_indices])\n", "\n", - "# For now, we will consider a data point as predicted in the positive class if more than 0.5 \n", - "# of its k-neighbors are positive.\n", + "# For now, we will consider a data point as predicted in a class if more than 0.5 \n", + "# of its k-neighbors are in that class.\n", "threshold = 0.5\n", "# convert predicted ratios to predicted labels\n", "pred_labels = None\n", "\n", - "# obtain true positive, true negative,\n", - "#false positive and false negative counts using conf_matrix\n", - "tp,tn, fp, fn = conf_matrix(...)" + "# show the distribution of predicted and true labels in a confusion matrix\n", + "confusion = conf_matrix(...)\n", + "confusion" ] }, { @@ -347,7 +351,7 @@ "# train using %10, %20, %30, ..., 100% of training data\n", "training_proportions = np.arange(0.10, 1.01, 0.10)\n", "train_size = len(train_indices)\n", - "training_sizes = np.int(np.ceil(size*proportion))\n", + "training_sizes = np.int(np.ceil(train_size*proportion))\n", "\n", "# TODO\n", "error_train = []\n", @@ -355,12 +359,13 @@ "\n", "# For each size in training_sizes\n", "for size in training_sizes:\n", - " # fit the model using \"size\" data porint\n", + " # fit the model using \"size\" data point\n", " # Calculate error for training and validation sets\n", " # populate error_train and error_val arrays. \n", " # Each entry in these arrays\n", " # should correspond to each entry in training_sizes.\n", "\n", + "# plot the learning curve\n", "plt.plot(training_sizes, error_train, 'r', label = 'training_error')\n", "plt.plot(training_sizes, error_val, 'g', label = 'validation_error')\n", "plt.legend()\n", @@ -379,8 +384,8 @@ "metadata": {}, "source": [ "### Rubric:\n", - "* Increased accuracy with new *k* +5, +5\n", - "* Improved confusion matrix +5, +5" + "* Accuracies reported with various *k* values +5, +5\n", + "* Confusion matrices shown for various *k* values +5, +5" ] }, { @@ -418,15 +423,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### ROC curve and confusion matrix for the final model\n", - "ROC curves are a good way to visualize sensitivity vs. 1-specificity for varying cut off points. Now, implement, in \"model.ipynb\", a \"ROC\" function that predicts the labels of the test set examples using different *threshold* values in \"predict\" and plot the ROC curve. \"ROC\" takes a list containing different *threshold* parameter values to try and returns two arrays; one where each entry is the sensitivity at a given threshold and the other where entries are 1-specificities." + "ROC curves are a good way to visualize sensitivity vs. 1-specificity for varying cut off points. Now, implement, in *model.ipynb*, a \"ROC\" function. \"ROC\" takes a list containing different threshold values to try and returns two arrays; one where each entry is the sensitivity at a given threshold and the other where entries are 1-specificities." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We can finally create the confusion matrix and plot the ROC curve for our optimal *k*-NN classifier. Use the *k* value you found above, if you completed TASK 5, else use *k* = 10. We'll plot the ROC curve for values between 0.1 and 1.0." + "Use the *k* value you found above, if you completed TASK 5, else use *k* = 10 to plot the ROC curve for values between 0.1 and 1.0." ] }, { @@ -437,7 +441,7 @@ "source": [ "# TODO\n", "# ROC curve\n", - "roc_sens, roc_spec_ = ROC(my_model, my_model.test_indices, np.arange(0.1, 1.0, 0.1))\n", + "roc_sens, roc_spec_ = ROC(true_labels=..., preds=..., np.arange(0.1, 1.0, 0.1))\n", "plt.plot(roc_sens, roc_spec_)\n", "plt.show()" ] @@ -453,7 +457,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Use this cell to write about your understanding of why *k*-NN performed well if it did or why not if it didn't. What properties of the dataset affect the performance of the algorithm?" + "Use this cell to write about your understanding of why *k*-NN performed well if it did or why not if it didn't. What properties of the dataset could have affected the performance of the algorithm?" ] } ], diff --git a/ProgrammingAssignment_1/model.ipynb b/ProgrammingAssignment_1/model.ipynb index 1613f49..a35f752 100644 --- a/ProgrammingAssignment_1/model.ipynb +++ b/ProgrammingAssignment_1/model.ipynb @@ -166,24 +166,24 @@ "source": [ "# TODO: Programming Assignment 1\n", "\n", - "def conf_matrix(true, pred):\n", + "def conf_matrix(true, pred, n_classes):\n", " '''\n", " Args: \n", " true: ndarray\n", " nx1 array of true labels for test set\n", " pred: ndarray \n", " nx1 array of predicted labels for test set\n", + " n_classes: int\n", " Returns:\n", - " ndarray\n", + " result: ndarray\n", + " n_classes x n_classes array confusion matrix\n", " '''\n", " raise NotImplementedError\n", + " result = np.ndarray([n_classes, n_classes])\n", " \n", - " tp = tn = fp = fn = 0\n", - " # calculate true positives (tp), true negatives(tn)\n", - " # false positives (fp) and false negatives (fn)\n", " \n", " # returns the confusion matrix as numpy.ndarray\n", - " return np.array([tp,tn, fp, fn])" + " return result" ] }, { @@ -207,7 +207,7 @@ " true_labels: ndarray\n", " 1D array containing true labels\n", " preds: ndarray\n", - " 1D array containing thresholded value (e.g. proportion of positive neighbors in kNN)\n", + " 1D array containing thresholded value (e.g. proportion of neighbors in kNN)\n", " value_list: ndarray\n", " 1D array containing different threshold values\n", " Returns:\n", @@ -217,7 +217,6 @@ " 1D array containing 1-specifities\n", " '''\n", " \n", - " # use conf_matrix to calculate tp, tn, fp, fn\n", " # calculate sensitivity, 1-specificity\n", " # return two arrays\n", " \n", -- GitLab