diff --git a/ProgrammingAssignment_0/.ipynb_checkpoints/GettingFamiliar-checkpoint.ipynb b/ProgrammingAssignment_0/.ipynb_checkpoints/GettingFamiliar-checkpoint.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..c71781b1c0d5407ea98856be54163c2d8a114b92 --- /dev/null +++ b/ProgrammingAssignment_0/.ipynb_checkpoints/GettingFamiliar-checkpoint.ipynb @@ -0,0 +1,404 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JUPYTER NOTEBOOK TIPS\n", + "\n", + "Each rectangular box is called a cell. \n", + "* Ctrl+ENTER evaluates the current cell; if it contains Python code, it runs the code, if it contains Markdown, it returns rendered text.\n", + "* Alt+ENTER evaluates the current cell and adds a new cell below it.\n", + "* If you click to the left of a cell, you'll notice the frame changes color to blue. You can erase a cell by hitting 'dd' (that's two \"d\"s in a row) when the frame is blue." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# GRADING\n", + "\n", + "You will be graded on parts that are marked with **\\#TODO** comments. Read the comments in the code to make sure you don't miss any.\n", + "\n", + "### Mandatory for 478 & 878:\n", + "\n", + "| Tasks | 478 | 878 |\n", + "|----------------------------|-----|-----|\n", + "| Implement `preprocess` | 10 | 5 |\n", + "| Implement `partition` | 10 | 5 |\n", + "| Putting the model together | 5 | 5 |\n", + "\n", + "\n", + "### Mandatory for 878, bonus for 478\n", + "\n", + "| Tasks | 478 | 878 |\n", + "|---------------------------------------|-----|-----|\n", + "| Modify `preprocess` for normalization | 5 | 10 |\n", + "\n", + "\n", + "Points are broken down further below in Rubric sections. The **first** score is for 478, the **second** is for 878 students. There a total of 25 points in this assignment and extra 5 bonus points for 478 students." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Supervised Learning Model Skeleton\n", + "\n", + "We'll use this skeleton for implementing different supervised learning algorithms. For this first assignment, we'll read and partition the [\"madelon\" dataset](http://archive.ics.uci.edu/ml/datasets/madelon). Features and labels for the first two examples are listed below. Please complete \"preprocess\" and \"partition\" methods. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The 500 features in the \"madelon\" dataset have integer values:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "../data/madelon.data\r\n", + " 1-) 485 477 537 479 452 471 491 476 475 473 455 500 456 507 478 491 447 422 480 482 515 482 464 484 477 496 509 491 459 482 483 505 508 458 509 517 479 487 473 472 474 531 485 508 517 489 507 515 440 465 550 532 450 483 460 469 507 485 479 458 516 480 460 479 648 480 561 481 474 474 544 484 490 451 494 480 486 459 521 500 466 457 494 492 488 497 477 461 473 464 476 471 481 507 474 500 481 536 464 501 479 480 483 462 470 181 510 470 431 482 496 481 469 539 491 482 481 476 533 495 474 485 479 495 465 541 493 488 452 481 491 501 477 479 503 529 540 504 482 463 477 530 508 488 488 474 479 506 478 511 501 474 483 575 478 482 461 480 543 415 527 477 487 486 511 474 477 482 476 516 466 492 561 479 472 457 497 475 452 491 477 454 461 472 481 490 526 490 459 478 461 516 511 544 519 487 485 475 477 476 478 470 493 581 484 476 521 474 492 459 487 504 464 485 478 465 603 475 481 491 555 424 528 511 384 525 459 478 477 539 479 508 471 517 482 518 473 478 506 476 507 434 466 480 547 518 516 476 492 454 463 497 477 531 472 495 532 496 492 480 480 479 517 470 470 500 468 477 486 553 490 499 450 469 466 479 476 401 491 551 477 517 492 475 537 516 472 451 484 471 469 523 496 482 458 487 477 457 458 493 458 517 478 482 474 517 482 488 490 485 440 455 464 531 483 467 494 488 414 491 494 497 501 476 481 485 478 476 491 492 523 492 476 464 496 473 658 507 628 484 468 448 502 618 438 486 496 535 452 497 490 485 504 477 481 473 517 476 479 483 482 458 464 466 473 482 497 479 497 495 489 483 500 490 479 471 468 496 419 513 475 471 514 479 480 486 480 477 494 454 480 539 477 441 482 461 484 510 475 485 480 474 474 442 477 502 402 478 504 476 484 475 488 486 524 506 480 451 512 498 478 485 495 476 496 485 496 485 486 482 505 528 496 533 504 512 474 646 526 485 541 487 568 492 467 479 483 479 546 476 457 463 517 471 482 630 481 494 440 509 507 512 496 488 462 498 480 511 500 437 537 470 515 476 467 401 485 499 495 490 508 463 487 531 515 476 482 463 467 479 477 481 477 485 511 485 481 479 475 496 \r\n", + " 2-) 483 458 460 487 587 475 526 479 485 469 434 483 465 503 472 478 469 518 495 491 478 530 462 494 549 469 516 487 475 486 478 514 542 406 469 452 483 498 480 476 474 504 478 493 472 461 521 521 499 458 466 519 487 485 489 485 551 516 435 487 525 481 529 486 488 513 415 463 481 481 491 504 496 433 475 416 481 482 493 536 483 416 553 460 554 447 477 499 470 527 476 480 507 522 474 485 478 479 468 397 482 469 477 476 553 431 489 447 535 487 488 557 485 515 484 497 479 494 436 470 477 468 480 587 503 429 496 502 473 485 522 484 481 486 519 455 442 499 470 483 508 510 481 494 483 473 481 510 480 447 538 497 475 404 479 519 486 492 520 519 500 482 486 487 533 487 476 480 475 459 470 522 489 477 447 519 484 472 458 510 529 539 456 478 490 509 481 524 530 478 495 507 459 467 494 470 480 491 476 503 485 475 508 488 495 477 507 482 447 482 483 455 485 474 478 579 540 484 508 480 492 517 490 547 510 465 495 477 475 497 477 442 489 507 466 504 493 471 478 467 530 551 476 470 575 477 510 486 473 504 451 450 477 506 480 506 575 502 486 489 485 479 488 524 465 516 443 503 517 498 482 467 454 407 484 479 475 498 514 492 477 435 491 475 503 480 506 512 482 477 504 527 454 483 458 473 484 542 469 459 462 503 477 492 469 467 475 483 491 464 466 475 477 502 483 506 474 494 469 524 483 434 488 463 495 483 468 481 493 489 538 469 477 480 460 495 469 469 528 544 497 497 462 478 494 481 493 461 482 483 471 422 493 511 471 497 523 476 462 453 471 502 475 536 481 389 491 464 500 553 467 497 489 486 490 540 487 488 526 477 480 462 523 483 488 475 485 479 492 452 479 441 475 442 476 475 484 500 570 482 481 428 477 456 477 546 502 477 516 467 512 469 498 501 503 539 493 505 543 556 486 483 514 476 457 507 475 448 479 481 486 500 489 442 509 479 500 517 489 488 494 496 463 460 472 478 457 487 420 463 484 474 459 311 479 582 480 495 538 487 537 488 485 483 500 487 476 526 449 363 466 478 465 479 482 549 470 506 481 494 492 448 492 447 598 507 478 483 492 485 463 478 487 338 513 486 483 492 510 517 \r\n" + ] + } + ], + "source": [ + "! echo '../data/madelon.data'; head -n 2 ../data/madelon.data | nl -s '-) '" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Labels are either positive (1) or negative (-1):" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "../data/madelon.labels\r\n", + " 1-) -1\r\n", + " 2-) -1\r\n" + ] + } + ], + "source": [ + "! echo '../data/madelon.labels'; head -n 2 ../data/madelon.labels | nl -s '-) '" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## TASK 1: Implement `preprocess`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This step is for reading the dataset and for extracting features and labels. The \"preprocess\" function should return an $n \\times d$ \"features\" array, and an $n \\times 1$ \"labels\" array, where $n$ is the number of examples and $d$ is the number of features in the dataset. In cases where there is a big difference between the scales of features, we want to normalize the features to have values in the same range [0,1]. Since this is not the case with this dataset, we will not do normalization." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def preprocess(feature_file, label_file):\n", + " '''\n", + " Args:\n", + " feature_file: str \n", + " file containing features\n", + " label_file: str\n", + " file containing labels\n", + " Returns:\n", + " features: ndarray\n", + " nxd features\n", + " labels: ndarray\n", + " nx1 labels\n", + " '''\n", + " # You might find np.genfromtxt useful for reading in the file. Be careful with the file delimiter, \n", + " # e.g. for comma-separated files use delimiter=',' argument.\n", + " \n", + " # TODO \n", + " \n", + " raise NotImplementedError\n", + "\n", + " \n", + " return features, labels" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Rubric:\n", + "* Correct features size +5, +2.5\n", + "* Correct labels size +5, +2.5" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Test `preprocess`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "features, labels = preprocess(feature_file = ..., label_file = ...)\n", + "# TODO: Output the dimension of both features and labels." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## TASK 2: Implement `partition`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, you'll need to split your dataset into training, validation and test sets. The \"partition\" function should take as input the size of the whole dataset and randomly sample a proportion $t$ of the dataset as test partition and a proportion of $v$ as validation partition. The remaining will be used as training data. For example, to keep 30% of the examples as test and %10 as validation, set $t=0.3$ and $v=0.1$. You should choose these values according to the size of the data available to you. The \"split\" function should return indices of the training, validation and test sets. These will be used to index into the whole training set." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def partition(size, t, v = 0):\n", + " '''\n", + " Args:\n", + " size: int\n", + " number of examples in the whole dataset\n", + " t: float\n", + " proportion kept for test\n", + " v: float\n", + " proportion kept for validation\n", + " Returns:\n", + " test_indices: ndarray\n", + " 1D array containing test set indices\n", + " val_indices: ndarray\n", + " 1D array containing validation set indices\n", + " '''\n", + " \n", + " # np.random.permutation might come in handy. Do not sample with replacement!\n", + " # Be sure not to use the same indices in test and validation sets!\n", + " \n", + " # use the first np.ceil(size*t) for test, \n", + " # the following np.ceil(size*v) for validation set.\n", + " \n", + " # TODO\n", + " \n", + " raise NotImplementedError\n", + " \n", + " return test_indices, val_indices" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Rubric:\n", + "* Correct length of test indices +5, +2.5\n", + "* Correct length of validation indices +5, +2.5" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Test `partition`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO\n", + "# Pass the correct size argument (number of examples in the whole dataset)\n", + "test_indices, val_indices = partition(size=..., t = 0.3, v = 0.1)\n", + "\n", + "# Output the size of both features and labels." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## TASK 3: Putting things together" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The model definition is given below. We'll extend this class for different supervised classification algorithms. Specifically, we'll implement \"fit\" and \"predict\" methods for these algorithms. For this assignment, you are not asked to implement these methods. Run the cells below and make sure each piece of code fits together and works as expected." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class Model:\n", + " # preprocess_f and partition_f expect functions\n", + " # use kwargs to pass arguments to preprocessor_f and partition_f\n", + " # kwargs is a dictionary and should contain t, v, feature_file, label_file\n", + " # e.g. {'t': 0.3, 'v': 0.1, 'feature_file': 'some_file_name', 'label_file': 'some_file_name'}\n", + " \n", + " def __init__(self, preprocessor_f, partition_f, **kwargs):\n", + " \n", + " self.features, self.labels = preprocessor_f(kwargs['feature_file'], kwargs['label_file'])\n", + " self.size = len(self.labels) # number of examples in dataset \n", + " self.feat_dim = self.features.shape[1] # number of features\n", + " \n", + " self.val_indices, self.test_indices = partition_f(self.size, kwargs['t'], kwargs['v'])\n", + " self.val_size = len(self.val_indices)\n", + " self.test_size = len(self.test_indices)\n", + " \n", + " self.train_indices = np.delete(np.arange(self.size), np.append(self.test_indices, self.val_indices), 0)\n", + " self.train_size = len(self.train_indices)\n", + " \n", + " def fit(self):\n", + " \n", + " raise NotImplementedError\n", + " \n", + " def predict(self, indices):\n", + " raise NotImplementedError" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Rubric:\n", + "* Correct training size +5, +5" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Test `Model`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will use a keyword arguments dictionary that conveniently passes arguments to functions that are themselves passed as arguments during object initialization. Please do not change these calls in this and the following assignments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO\n", + "# pass the correct arguments to preprocessor_f and partition_f\n", + "kwargs = {'t': 0.3, 'v': 0.1, 'feature_file': ..., 'label_file': ...}\n", + "my_model = Model(preprocessor_f=..., partition_f=..., **kwargs)\n", + "# Output size of the training partition" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## TASK 4: Normalization" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Modify `preprocess` function such that the output features take values in the range [0, 1]. Initialize a new model with this function and check the values of the features." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Rubric:\n", + "* Correct range for feature values +5, +10" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO\n", + "# args is a placeholder for the parameters of the function\n", + "# Args and Returns are as in \"preprocess\"\n", + "def normalized_preprocess(args=...):\n", + " raise NotImplementedError" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO\n", + "\n", + "kwargs = {'t': 0.3, 'v': 0.1, 'feature_file': ..., 'label_file': ...}\n", + "my_model = Model(preprocessor_f=..., partition_f=..., **kwargs)\n", + "\n", + "# Check that the range of each feature in the training set is in range [0, 1]" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/ProgrammingAssignment_0/madelon.data b/ProgrammingAssignment_0/madelon.data new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ProgrammingAssignment_0/madelon.labels b/ProgrammingAssignment_0/madelon.labels new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ProgrammingAssignment_1/ProgrammingAssignment1-Solution.ipynb b/ProgrammingAssignment_1/ProgrammingAssignment1-Solution.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..c2796624f7a5e2521efb8e4626be188f339d3e49 --- /dev/null +++ b/ProgrammingAssignment_1/ProgrammingAssignment1-Solution.ipynb @@ -0,0 +1,279 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# k-Nearest Neighbor" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can use numpy for array operations and matplpotlib for plotting for this assignment. Please do not add other libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 247, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Following code makes the Model class and relevant functions available from model.ipynb." + ] + }, + { + "cell_type": "code", + "execution_count": 256, + "metadata": {}, + "outputs": [], + "source": [ + "%run 'model-Solution.ipynb'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Choice of distance metric plays an important role in the performance of kNN. Let's start by implementing a distance method in the \"distance\" function below. It should take two data points and the name of the metric and return a scalar value." + ] + }, + { + "cell_type": "code", + "execution_count": 257, + "metadata": {}, + "outputs": [], + "source": [ + "def distance(x, y, metric):\n", + " '''\n", + " x: a 1xd array\n", + " y: a 1xd array\n", + " metric: Euclidean, Hamming, etc.\n", + " '''\n", + " #raise NotImplementedError\n", + " \n", + " if metric == 'Euclidean':\n", + " dist = np.sqrt(np.sum(np.square((x-y))))\n", + " \n", + " ####################################\n", + " return dist # scalar distance btw x and y" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can implement our kNN classifier. kNN class inherits Model class. Implement \"fit\" and \"predict\" methods. Use the \"distance\" function you defined above. \"fit\" method takes $k$ as an argument. \"predict\" takes as input the feature vector for a single test point and outputs the predicted class, and the proportion of predicted class labels in $k$ nearest neighbors." + ] + }, + { + "cell_type": "code", + "execution_count": 283, + "metadata": {}, + "outputs": [], + "source": [ + "class kNN(Model):\n", + "\n", + " def fit(self, k, distance_f, **kwargs):\n", + " \n", + " #raise NotImplementedError\n", + " \n", + " self.k = k\n", + " self.distance_f = distance_f\n", + " self.distance_metric = kwargs['metric']\n", + " \n", + " \n", + " #######################\n", + " return\n", + " # vary the threshold value for ROC analysis\n", + " def predict(self, test_points):\n", + " \n", + " chosen_labels = []\n", + " for test_point in self.features[test_indices]:\n", + " #raise NotImplementedError\n", + " tmp_dist = [np.inf] * self.k\n", + " distances = []\n", + "\n", + " labels = []\n", + " for index in self.training_indices:\n", + " dist = self.distance_f(self.features[index], test_point, self.distance_metric)\n", + " distances.append(dist)\n", + " labels.append(self.labels[index])\n", + " a_order = np.argsort(distances)\n", + " tmp_labels = list(np.array(labels)[a_order[::-1]][:self.k])\n", + " b = tmp_labels.count(1)\n", + " chosen_labels.append(b/self.k)\n", + " \n", + " ##########################\n", + " # return the predicted class label and the following ratio: \n", + " # number of points that have the same label as the test point / k\n", + " return np.array(chosen_labels)\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It's time to build and evaluate our model now. Remember you need to provide values to $p$, $v$ parameters for \"partition\" function and to $file\\_path$ for \"preprocess\" function." + ] + }, + { + "cell_type": "code", + "execution_count": 284, + "metadata": {}, + "outputs": [], + "source": [ + "# populate the keyword arguments dictionary kwargs\n", + "kwargs = {'p': 0.3, 'v': 0.1, 'seed': 123, 'file_path': 'madelon_train'}\n", + "# initialize the model\n", + "my_model = kNN(preprocessor_f=preprocess, partition_f=partition, **kwargs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Assign a value to $k$ and fit the kNN model. You do not need to change the value of the $threshold$ parameter yet." + ] + }, + { + "cell_type": "code", + "execution_count": 285, + "metadata": {}, + "outputs": [], + "source": [ + "kwargs_f = {'metric': 'Euclidean'}\n", + "my_model.fit(k = 10, distance_f=distance, **kwargs_f)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Evaluate your model on the test data and report your accuracy. Also, calculate and report the confidence interval on the generalization error estimate." + ] + }, + { + "cell_type": "code", + "execution_count": 286, + "metadata": {}, + "outputs": [], + "source": [ + "final_labels = my_model.predict(my_model.test_indices)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have the true labels and the predicted ones from our model, we can build a confusion matrix and see how accurate our model is. Implement the \"conf_matrix\" function that takes as input an array of true labels ($true$) and an array of predicted labels ($pred$). It should output a numpy.ndarray. " + ] + }, + { + "cell_type": "code", + "execution_count": 289, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([196, 106, 193, 105])" + ] + }, + "execution_count": 289, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# You should see array([ 196, 106, 193, 105]) with seed 123\n", + "conf_matrix(my_model.labels[my_model.test_indices], final_labels, threshold= 0.5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "ROC curves are a good way to visualize sensitivity vs. 1-specificity for varying cut off points. Now, implement a \"ROC\" function that predicts the labels of the test set examples using different $threshold$ values in \"fit\" and plot the ROC curve. \"ROC\" takes a list containing different $threshold$ parameter values to try and returns (sensitivity, 1-specificity) pair for each $parameter$ value." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def ROC(true, pred, value_list):\n", + " '''\n", + " true: nx1 array of true labels for test set\n", + " pred: nx1 array of predicted labels for test set\n", + " Calculate sensitivity and 1-specificity for each point in value_list\n", + " Return two nX1 arrays: sens (for sensitivities) and spec_ (for 1-specificities)\n", + " '''\n", + " \n", + " \n", + " return sens, spec_" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can finally create the confusion matrix and plot the ROC curve for our kNN classifier." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# confusion matrix\n", + "conf_matrix(true_classes, predicted_classes)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ROC curve\n", + "roc_sens, roc_spec_ = ROC(true_classes, predicted_classes, np.arange(0.1, 1.0, 0.1))\n", + "plt.plot(roc_sens, roc_spec_)\n", + "plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/ProgrammingAssignment_1/ProgrammingAssignment1.ipynb b/ProgrammingAssignment_1/ProgrammingAssignment1.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..da45e04933867f555cea589193027d67a511e628 --- /dev/null +++ b/ProgrammingAssignment_1/ProgrammingAssignment1.ipynb @@ -0,0 +1,336 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# $k$-Nearest Neighbor\n", + "\n", + "We'll implement $k$-Nearest Neighbor ($k$-NN) algorithm for this assignment. We recommend using [Madelon](https://archive.ics.uci.edu/ml/datasets/Madelon) dataset, although it is not mandatory. If you choose to use a different dataset, it should meet the following criteria:\n", + "* dependent variable should be binary (suited for binary classification)\n", + "* number of features (attributes) should be at least 50\n", + "* number of examples (instances) should be between 1,000 - 5,000\n", + "\n", + "A skeleton of a general supervised learning model is provided in \"model.ipynb\". Please look through it and complete the \"preprocess\" and \"partition\" methods. \n", + "\n", + "### Assignment Goals:\n", + "In this assignment, we will:\n", + "* learn to split a dataset into training/validation/test partitions \n", + "* use the validation dataset to find a good value for $k$\n", + "* Having found the \"best\" $k$, we'll obtain final performance measures:\n", + " * accuracy, generalization error and ROC curve\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can use numpy for array operations and matplotlib for plotting for this assignment. Please do not add other libraries." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Following code makes the Model class and relevant functions available from model.ipynb." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%run 'model.ipynb'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Choice of distance metric plays an important role in the performance of $k$-NN. Let's start with implementing a distance method in the \"distance\" function below. It should take two data points and the name of the metric and return a scalar value." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def distance(x, y, metric):\n", + " '''\n", + " Args:\n", + " x: ndarray \n", + " 1D array containing coordinates for a point\n", + " y: ndarray\n", + " 1D array containing coordinates for a point\n", + " metric: str\n", + " Euclidean, Hamming \n", + " Returns:\n", + " \n", + " '''\n", + " if metric == 'Euclidean':\n", + " raise NotImplementedError\n", + " elif metric == 'Hammming':\n", + " raise NotImplementedError\n", + " else:\n", + " raise ValueError('{} is not a valid metric.'.format(metric))\n", + " return dist # scalar distance btw x and y" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### $k$-NN Class Methods" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can start implementing our $k$-NN classifier. $k$-NN class inherits Model class. You'll need to implement \"fit\" and \"predict\" methods. Use the \"distance\" function you defined above. \"fit\" method takes $k$ as an argument. \"predict\" takes as input an $mxd$ array containing $d$-dimensional $m$ feature vectors for examples and outputs the predicted class and the ratio of positive examples in $k$ nearest neighbors." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class kNN(Model):\n", + " '''\n", + " Inherits Model class. Implements the k-NN algorithm for classification.\n", + " '''\n", + " \n", + " def fit(self, k, distance_f, **kwargs):\n", + " '''\n", + " Fit the model. This is pretty straightforward for k-NN.\n", + " '''\n", + " # set self.k, self.distance_f, self.distance_metric\n", + " raise NotImplementedError\n", + " \n", + " return\n", + " \n", + " \n", + " def predict(self, test_indices):\n", + " \n", + " raise NotImplementedError\n", + " \n", + " pred = []\n", + " # for each point in test points\n", + " # use your implementation of distance function\n", + " # distance_f(..., distance_metric)\n", + " # to find the labels of k-nearest neighbors. \n", + " \n", + " # Find the ratio of the positive labels\n", + " # and append to pred with pred.append(ratio).\n", + " \n", + "\n", + " return np.array(pred)\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Build and Evaluate the Model (Accuracy, Confidence Interval, Confusion Matrix)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It's time to build and evaluate our model now. Remember you need to provide values to $p$, $v$ parameters for \"partition\" function and to $file\\_path$ for \"preprocess\" function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# populate the keyword arguments dictionary kwargs\n", + "kwargs = {'p': 0.3, 'v': 0.1, seed: 123, 'file_path': 'madelon_train'}\n", + "# initialize the model\n", + "my_model = kNN(preprocessor_f=preprocess, partition_f=partition, **kwargs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Assign a value to $k$ and fit the kNN model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "kwargs_f = {'metric': 'Euclidean'}\n", + "my_model.fit(k = 10, distance_f=distance, **kwargs_f)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Evaluate your model on the test data and report your **accuracy**. Also, calculate and report the confidence interval on the generalization **error** estimate." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "final_labels = my_model.predict(my_model.test_indices)\n", + "\n", + "# For now, We will consider a data point as predicted in the positive class if more than 0.5 \n", + "# of its k-neighbors are positive.\n", + "threshold = 0.5\n", + "# Calculate accuracy and generalization error with confidence interval here." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " ### Plotting a learning curve\n", + " \n", + "A learning curve shows how error changes as the training set size increases. For more information, see [learning curves](https://www.dataquest.io/blog/learning-curves-machine-learning/).\n", + "We'll plot the error values for training and validation data while varying the size of the training set. Report a good size for training set for which there is a good balance between bias and variance." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# try sizes 0, 100, 200, 300, ..., up to the largest multiple of 100 >= train_size\n", + "training_sizes = np.xrange(0, my_model.train_size + 1, 100)\n", + "\n", + "# Calculate error for each entry in training_sizes\n", + "# for training and validation sets and populate\n", + "# error_train and error_val arrays. Each entry in these arrays\n", + "# should correspond to each entry in training_sizes.\n", + "\n", + "plt.plot(training_sizes, error_train, 'r', label = 'training_error')\n", + "plt.plot(training_sizes, error_val, 'g', label = 'validation_error')\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Computing the confusion matrix for $k = 10$\n", + "Now that we have the true labels and the predicted ones from our model, we can build a confusion matrix and see how accurate our model is. Implement the \"conf_matrix\" function (in model.ipynb) that takes as input an array of true labels ($true$) and an array of predicted labels ($pred$). It should output a numpy.ndarray. You do not need to change the value of the threshold parameter yet." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "conf_matrix(my_model.labels[my_model.test_indices], final_labels, threshold = 0.5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Finding a good value for $k$\n", + "\n", + "We can use the validation set to come up with a $k$ value that results in better performance in terms of accuracy. Additionally, in some cases, predicting examples from a certain class correctly is more critical than other classes. In those cases, we can use the confusion matrix to find a good trade off between correct and wrong predictions and allow more wrong predictions in some classes to predict more examples correctly in a that class.\n", + "\n", + "Below calculate the accuracies for different values of $k$ using the validation set. Report a good $k$ value and use it in the analyses that follow this section." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Change values of $k. \n", + "# Calculate accuracies for the validation set.\n", + "# Report a good k value that you'll use in the following analyses." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ROC curve and confusion matrix for the final model\n", + "ROC curves are a good way to visualize sensitivity vs. 1-specificity for varying cut off points. Now, implement, in \"model.ipynb\", a \"ROC\" function that predicts the labels of the test set examples using different $threshold$ values in \"predict\" and plot the ROC curve. \"ROC\" takes a list containing different $threshold$ parameter values to try and returns two arrays; one where each entry is the sensitivity at a given threshold and the other where entries are 1-specificities." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can finally create the confusion matrix and plot the ROC curve for our optimal $k$-NN classifier. (Use the $k$ value you found above.) We'll plot the ROC curve for values between 0.1 and 1.0." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# confusion matrix\n", + "conf_matrix(true_classes, predicted_classes)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ROC curve\n", + "roc_sens, roc_spec_ = ROC(my_model, my_model.test_indices, np.arange(0.1, 1.0, 0.1))\n", + "plt.plot(roc_sens, roc_spec_)\n", + "plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/ProgrammingAssignment_2/ProgrammingAssignment2.ipynb b/ProgrammingAssignment_2/ProgrammingAssignment2.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..b92ee64c0185b3f5f3da3fd2c7d984f4e2581d72 --- /dev/null +++ b/ProgrammingAssignment_2/ProgrammingAssignment2.ipynb @@ -0,0 +1,451 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Linear Regression & Naive Bayes\n", + "\n", + "We'll implement linear regression & Naive Bayes algorithms for this assignment. Please modify the \"preprocess\" in this notebook and \"partition\" method in \"model.ipynb\" to suit your datasets for this assignment. In the linear regression part of this assignment, we have a small dataset available to us. We won't have examples to spare for validation set, instead we'll use cross-validation to tune hyperparameters. In our Naive Bayes implementation, we will not use validation set or crossvalidation.\n", + "\n", + "### Assignment Goals:\n", + "In this assignment, we will:\n", + "* implement linear regression\n", + " * use gradient descent for optimization\n", + " * use residuals to decide if we need a polynomial model\n", + " * change our model to quadratic/cubic regression and use cross-validation to find the \"best\" polynomial degree\n", + " * implement regularization techniques\n", + " * $l_1$/$l_2$ regularization\n", + " * use cross-validation to find a good regularization parameter $\\lambda$\n", + " \n", + "* implement Naive Bayes\n", + " * address sparse data problem with **pseudocounts** (**$m$-estimate**)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can use numpy for array operations and matplotlib for plotting for this assignment. Please do not add other libraries." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Following code makes the Model class and relevant functions available from \"model.ipynb\"." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%run 'model.ipynb'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll implement the \"preprocess\" function and \"kfold\" function for $k$-fold cross-validation in \"model.ipynb\". 5 and 10 are commonly used values for $k$. You can use either one of them." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def preprocess(file_path):\n", + " '''\n", + " file_path: where to read the dataset from\n", + " Returns:\n", + " features: ndarray\n", + " nxd array containing `float` feature values\n", + " labels: ndarray\n", + " 1D array containing `float` label\n", + " '''\n", + " # You might find np.genfromtxt useful for reading in the file. Be careful with the file delimiter, \n", + " # e.g. for comma-separated files use delimiter=',' argument.\n", + " \n", + " raise NotImplementedError\n", + "\n", + " \n", + " return features, labels" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll need to use mean squared error (mse) for linear regression. Next, implement \"mse\" function that takes predicted and true y values, and returns the \"mse\" between them." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def mse(y_pred, y_true):\n", + " '''\n", + " Args:\n", + " y_hat: ndarray \n", + " 1D array containing data with `float` type. Values predicted by our method\n", + " y_true: ndarray\n", + " 1D array containing data with `float` type. True y values\n", + " Returns:\n", + " cost: float\n", + " A single value. Mean squared error between y_pred and y_true.\n", + " \n", + " '''\n", + " raise NotImplementedError\n", + "\n", + " return cost\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can define our linear_regression model class now. Implement the \"fit\" and \"predict\" methods. Keep the default values for now, later we'll change the $polynomial\\_degree$. If your \"kfold\" implementation works as it should, each call to fit and predict " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class linear_regression(Model):\n", + " def __init__(self, preprocessor_f, partition_f, **kwargs):\n", + " super().__init__(preprocessor_f, partition_f, **kwargs)\n", + " if k_fold:\n", + " self.data_dict = kfold(self.train_indices, k = kwargs['k'])\n", + " # counter for train fold\n", + " self.i = 0\n", + " # counter for test fold\n", + " self.j = 0 \n", + " \n", + " # You can disregard polynomial_degree and regularizer in your first pass\n", + " def fit(self, learning_rate = 0.001, epochs = 1000, regularizer=None, polynomial_degree=1, **kwargs):\n", + " \n", + " train_features = self.train_features[self.data_dict[self.i]]\n", + " train_labels = self.train_labels[self.data_dict[self.i]]\n", + " \n", + " #initialize theta_cur randomly\n", + " \n", + " # for each epoch\n", + " # compute model predictions for training examples\n", + " y_hat = None\n", + " \n", + " if regularizer = None:\n", + " \n", + " # use mse function to find the cost\n", + " cost = None\n", + " # calculate gradients wrt theta\n", + " grad_theta = None\n", + " # update theta\n", + " theta_curr = None\n", + " raise NotImplementedError\n", + " \n", + " else:\n", + " # take regularization into account\n", + " raise NotImplementedError\n", + " \n", + " # update the model parameters to be used in predict method\n", + " self.theta = theta_curr\n", + " # increment counter for next fold\n", + " self.i += 1\n", + " \n", + " def predict(self, indices):\n", + " \n", + " # obtain test features for current fold\n", + " \n", + " test_features = self.train_features[self.data_dict[self.j]]\n", + " raise NotImplementedError\n", + " \n", + " # increment counter for next fold\n", + " self.j += 1\n", + " return y_hat\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# populate the keyword arguments dictionary kwargs\n", + "# p: proportion for test data\n", + "# k: parameter for k-fold crossvalidation\n", + "kwargs = {'p': 0.3, 'v': 0.1, 'file_path': 'madelon', 'k': 1}\n", + "# initialize the model\n", + "my_model = linear_regression(preprocessor_f=preprocess, partition_f=partition, k_fold=True, **kwargs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# use fit_kwargs to pass arguments to regularization function\n", + "# fit_kwargs is empty for now since we are not applying \n", + "# regularization yet\n", + "fit_kwargs = {}\n", + "my_model.fit(**fit_kwargs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Residuals are the differences between the predicted value $y_{hat}$ and the true value $y$ for each example. Predict $y_{hat}$ for the validation set." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "y_hat_val = my_model.predict(my_model.features[my_model.val_indices])\n", + "residuals = my_model.labels[my_model.val_indices] - y_hat_val\n", + "plt.plot(residuals)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the data is better suited for quadratic/cubic regression, regions of positive and negative residuals will alternate in the plot. Regardless, modify fit\" and \"predict\" in the class definition to raise the feature values to $polynomial\\_degree$. You can directly make the modification in the above definition, do not repeat. Use the validation set to find the degree of polynomial that results in lowest _mse_." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "kwargs = {'p': 0.3, 'file_path': 'madelon', 'k': 5}\n", + "# initialize the model\n", + "my_model = linear_regression(preprocessor_f=preprocess, partition_f=partition, k_fold=True, **kwargs)\n", + "\n", + "fit_kwargs = {}\n", + "\n", + "# calculate mse for each of linear model, quadratic and cubic models\n", + "# and append to mses_for_models\n", + "\n", + "mses_for_models = []\n", + "\n", + "for i in range(1,4):\n", + " kfold_mse = 0\n", + " for k in range(5):\n", + " my_model.fit(polynomial_degree = i ,**fit_kwargs)\n", + " pred = my_model.predict(my_model.features[my_model.val_indices], fold = k)\n", + " k_fold_mse += mse(pred, my_model.labels[my_model.val_indices])\n", + " mses_for_models_for_models.append(k_fold_mse/k)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define \"regularization\" function which implements $l_1$ and $l_2$ regularization. You'll use this function in \"fit\" method of \"linear_regression\" class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def regularization(weights, method):\n", + " '''\n", + " Args:\n", + " weights: ndarray\n", + " 1D array with `float` entries\n", + " method: str\n", + " Returns:\n", + " value: float\n", + " A single value. Regularization term that will be used in cost function in fit.\n", + " '''\n", + " if method == \"l1\":\n", + " value = None\n", + " raise NotImplementedError\n", + " elif method == \"l2\":\n", + " value = None\n", + " raise NotImplementedError\n", + " return value" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using crossvalidation and the value of $polynomial_{degree}$ you found above, try different values of $\\lambda$ to find a a good value that results in low _mse_. Report the best values you found for hyperparameters and the resulting _mse_." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Naive Bayes Spam Classifier\n", + "\n", + "This part is independent of the above part. We will use the Enron spam/ham dataset. You will need to decompress the provided \"enron.tar.gz\" folder. The two subfolders contain spam and ham emails.\n", + "\n", + "The features for Naive Bayes algorithm will be word counts. Number of features will be equal to the unique words seen in the whole dataset. The \"preprocess\" function will be more involved this time. You'll need to remove pucntuation marks (you may find string.punctuation useful), tokenize text to words (remember to lowercase all) and count the number of words." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def preprocess_bayes(folder_path):\n", + " '''\n", + " Args:\n", + " folder_path: str\n", + " Where to read the dataset from.\n", + " Returns:\n", + " features: ndarray\n", + " nxd array with n emails, d words. features_ij is the count of word_j in email_i\n", + " labels: ndarray\n", + " 1D array of labels (1: spam, 0: ham)\n", + " '''\n", + " # remove punctutaion marks\n", + " # tokenize, lowercase\n", + " # count number of words in each email\n", + " \n", + " raise NotImplementedError\n", + "\n", + " \n", + " return features, labels" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Implement the \"fit\" and \"predict\" methods for Naive Bayes. Use $m$-estimate to address missing attribute values (also called **Laplace smoothing** when $m$ = 1). In general, $m$ values should be small. We'll use $m$ = 1." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class naive_bayes(Model):\n", + " def __init__(self, preprocessor_f, partition_f, **kwargs):\n", + " super().__init__(preprocessor_f, partition_f, **kwargs)\n", + " \n", + " def fit(self, m, **kwargs):\n", + " \n", + " self.ham_word_counts = np.zeros(self.feat_dim)\n", + " self.spam_word_counts = np.zeros(self.feat_dim)\n", + " \n", + " # find class prior probabilities\n", + " self.ham_prior = None\n", + " self.spam_prior = None\n", + " # find the number of words(counting repeats) summed across all emails in a class\n", + " n = None\n", + " # find the number of each word summed across all emails in a class\n", + " # populate self.ham_word_counts and self.spam_word_counts\n", + " \n", + " # find the likelihood of a word_i in each class\n", + " # 1D ndarray\n", + " self.ham_likelihood = None\n", + " self.spam_likelihood = None\n", + " \n", + " \n", + " def predict(self, indices):\n", + " '''\n", + " Returns:\n", + " preds: ndarray\n", + " 1D binary array containing predicted labels\n", + " '''\n", + " raise NotImplementedError\n", + " \n", + " return preds\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can fit our model and see how accurately it predicts spam emails now. We won't use a validation set or crossvalidation this time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# populate the keyword arguments dictionary kwargs\n", + "# p: proportion for test data\n", + "# k: parameter for k-fold crossvalidation\n", + "kwargs = {'p': 0.3, 'file_path': 'enron'}\n", + "# initialize the model\n", + "my_model = linear_regression(preprocessor_f=preprocess_bayes, partition_f=partition, **kwargs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can use the \"conf_matrix\" function we defined before to see how error is distributed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "preds = my_model.predict(my_model.test_indices)\n", + "tp,tn, fp, fn = conf_matrix(true = my_model.features[my_model.test_indices], pred = preds)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/enron.tar.gz b/enron.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..11982f4502552ca13381ef89804e253065b415c0 Binary files /dev/null and b/enron.tar.gz differ