From 0da80ae22d8a8fd4774cb3f695ea8e106a92a40a Mon Sep 17 00:00:00 2001 From: xiaoruiliu Date: Thu, 29 Aug 2024 15:20:30 -0700 Subject: [PATCH] comment out future notes, only leave introduction lecture notes student facing --- _quarto.yml | 50 +- docs/index.html | 150 ---- docs/intro_lec/introduction.html | 154 ---- docs/search.json | 1310 ------------------------------ 4 files changed, 25 insertions(+), 1639 deletions(-) diff --git a/_quarto.yml b/_quarto.yml index 5d0b0949c..c06d8eac2 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -17,32 +17,32 @@ book: chapters: - index.md - intro_lec/introduction.qmd - - pandas_1/pandas_1.qmd - - pandas_2/pandas_2.qmd - - pandas_3/pandas_3.qmd - - eda/eda.qmd - - regex/regex.qmd - - visualization_1/visualization_1.qmd - - visualization_2/visualization_2.qmd - - sampling/sampling.qmd - - intro_to_modeling/intro_to_modeling.qmd - - constant_model_loss_transformations/loss_transformations.qmd - - ols/ols.qmd - - gradient_descent/gradient_descent.qmd - - feature_engineering/feature_engineering.qmd - - case_study_HCE/case_study_HCE.qmd - - cv_regularization/cv_reg.qmd - - probability_1/probability_1.qmd - - probability_2/probability_2.qmd - - inference_causality/inference_causality.qmd + # - pandas_1/pandas_1.qmd + # - pandas_2/pandas_2.qmd + # - pandas_3/pandas_3.qmd + # - eda/eda.qmd + # - regex/regex.qmd + # - visualization_1/visualization_1.qmd + # - visualization_2/visualization_2.qmd + # - sampling/sampling.qmd + # - intro_to_modeling/intro_to_modeling.qmd + # - constant_model_loss_transformations/loss_transformations.qmd + # - ols/ols.qmd + # - gradient_descent/gradient_descent.qmd + # - feature_engineering/feature_engineering.qmd + # - case_study_HCE/case_study_HCE.qmd + # - cv_regularization/cv_reg.qmd + # - probability_1/probability_1.qmd + # - probability_2/probability_2.qmd + # - inference_causality/inference_causality.qmd # - case_study_climate/case_study_climate.qmd - - sql_I/sql_I.qmd - - sql_II/sql_II.qmd - - logistic_regression_1/logistic_reg_1.qmd - - logistic_regression_2/logistic_reg_2.qmd - - pca_1/pca_1.qmd - - pca_2/pca_2.qmd - - clustering/clustering.qmd + # - sql_I/sql_I.qmd + # - sql_II/sql_II.qmd + # - logistic_regression_1/logistic_reg_1.qmd + # - logistic_regression_2/logistic_reg_2.qmd + # - pca_1/pca_1.qmd + # - pca_2/pca_2.qmd + # - clustering/clustering.qmd sidebar: logo: "data100_logo.png" diff --git a/docs/index.html b/docs/index.html index a6e30b14f..70659ad0a 100644 --- a/docs/index.html +++ b/docs/index.html @@ -132,156 +132,6 @@ 1  Introduction - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/intro_lec/introduction.html b/docs/intro_lec/introduction.html index 9e123bb15..9765b7773 100644 --- a/docs/intro_lec/introduction.html +++ b/docs/intro_lec/introduction.html @@ -30,7 +30,6 @@ - @@ -123,156 +122,6 @@ 1  Introduction - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -963,9 +812,6 @@

- - 2  Pandas I - diff --git a/docs/search.json b/docs/search.json index 3fb205f9e..ed56e2191 100644 --- a/docs/search.json +++ b/docs/search.json @@ -48,1315 +48,5 @@ "crumbs": [ "1  Introduction" ] - }, - { - "objectID": "pandas_1/pandas_1.html", - "href": "pandas_1/pandas_1.html", - "title": "2  Pandas I", - "section": "", - "text": "2.1 Tabular Data\nData scientists work with data stored in a variety of formats. This class focuses primarily on tabular data — data that is stored in a table.\nTabular data is one of the most common systems that data scientists use to organize data. This is in large part due to the simplicity and flexibility of tables. Tables allow us to represent each observation, or instance of collecting data from an individual, as its own row. We can record each observation’s distinct characteristics, or features, in separate columns.\nTo see this in action, we’ll explore the elections dataset, which stores information about political candidates who ran for president of the United States in previous years.\nIn the elections dataset, each row (blue box) represents one instance of a candidate running for president in a particular year. For example, the first row represents Andrew Jackson running for president in the year 1824. Each column (yellow box) represents one characteristic piece of information about each presidential candidate. For example, the column named “Result” stores whether or not the candidate won the election.\nYour work in Data 8 helped you grow very familiar with using and interpreting data stored in a tabular format. Back then, you used the Table class of the datascience library, a special programming library created specifically for Data 8 students.\nIn Data 100, we will be working with the programming library pandas, which is generally accepted in the data science community as the industry- and academia-standard tool for manipulating tabular data (as well as the inspiration for Petey, our panda bear mascot).\nUsing pandas, we can", - "crumbs": [ - "2  Pandas I" - ] - }, - { - "objectID": "pandas_1/pandas_1.html#tabular-data", - "href": "pandas_1/pandas_1.html#tabular-data", - "title": "2  Pandas I", - "section": "", - "text": "Arrange data in a tabular format.\nExtract useful information filtered by specific conditions.\nOperate on data to gain new insights.\nApply NumPy functions to our data (our friends from Data 8).\nPerform vectorized computations to speed up our analysis (Lab 1).", - "crumbs": [ - "2  Pandas I" - ] - }, - { - "objectID": "pandas_1/pandas_1.html#series-dataframes-and-indices", - "href": "pandas_1/pandas_1.html#series-dataframes-and-indices", - "title": "2  Pandas I", - "section": "2.2 Series, DataFrames, and Indices", - "text": "2.2 Series, DataFrames, and Indices\nTo begin our work in pandas, we must first import the library into our Python environment. This will allow us to use pandas data structures and methods in our code.\n\n# `pd` is the conventional alias for Pandas, as `np` is for NumPy\nimport pandas as pd\n\nThere are three fundamental data structures in pandas:\n\nSeries: 1D labeled array data; best thought of as columnar data.\nDataFrame: 2D tabular data with rows and columns.\nIndex: A sequence of row/column labels.\n\nDataFrames, Series, and Indices can be represented visually in the following diagram, which considers the first few rows of the elections dataset.\n\n\n\nNotice how the DataFrame is a two-dimensional object — it contains both rows and columns. The Series above is a singular column of this DataFrame, namely the Result column. Both contain an Index, or a shared list of row labels (the integers from 0 to 4, inclusive).\n\n2.2.1 Series\nA Series represents a column of a DataFrame; more generally, it can be any 1-dimensional array-like object. It contains both:\n\nA sequence of values of the same type.\nA sequence of data labels called the index.\n\nIn the cell below, we create a Series named s.\n\ns = pd.Series([\"welcome\", \"to\", \"data 100\"])\ns\n\n0 welcome\n1 to\n2 data 100\ndtype: object\n\n\n\n # Accessing data values within the Series\n s.values\n\narray(['welcome', 'to', 'data 100'], dtype=object)\n\n\n\n # Accessing the Index of the Series\n s.index\n\nRangeIndex(start=0, stop=3, step=1)\n\n\nBy default, the index of a Series is a sequential list of integers beginning from 0. Optionally, a manually specified list of desired indices can be passed to the index argument.\n\ns = pd.Series([-1, 10, 2], index = [\"a\", \"b\", \"c\"])\ns\n\na -1\nb 10\nc 2\ndtype: int64\n\n\n\ns.index\n\nIndex(['a', 'b', 'c'], dtype='object')\n\n\nIndices can also be changed after initialization.\n\ns.index = [\"first\", \"second\", \"third\"]\ns\n\nfirst -1\nsecond 10\nthird 2\ndtype: int64\n\n\n\ns.index\n\nIndex(['first', 'second', 'third'], dtype='object')\n\n\n\n2.2.1.1 Selection in Series\nMuch like when working with NumPy arrays, we can select a single value or a set of values from a Series. To do so, there are three primary methods:\n\nA single label.\nA list of labels.\nA filtering condition.\n\nTo demonstrate this, let’s define the Series ser.\n\nser = pd.Series([4, -2, 0, 6], index = [\"a\", \"b\", \"c\", \"d\"])\nser\n\na 4\nb -2\nc 0\nd 6\ndtype: int64\n\n\n\n2.2.1.1.1 A Single Label\n\n# We return the value stored at the index label \"a\"\nser[\"a\"] \n\n4\n\n\n\n\n2.2.1.1.2 A List of Labels\n\n# We return a Series of the values stored at the index labels \"a\" and \"c\"\nser[[\"a\", \"c\"]] \n\na 4\nc 0\ndtype: int64\n\n\n\n\n2.2.1.1.3 A Filtering Condition\nPerhaps the most interesting (and useful) method of selecting data from a Series is by using a filtering condition.\nFirst, we apply a boolean operation to the Series. This creates a new Series of boolean values.\n\n# Filter condition: select all elements greater than 0\nser > 0 \n\na True\nb False\nc False\nd True\ndtype: bool\n\n\nWe then use this boolean condition to index into our original Series. pandas will select only the entries in the original Series that satisfy the condition.\n\nser[ser > 0] \n\na 4\nd 6\ndtype: int64\n\n\n\n\n\n\n2.2.2 DataFrames\nTypically, we will work with Series using the perspective that they are columns in a DataFrame. We can think of a DataFrame as a collection of Series that all share the same Index.\nIn Data 8, you encountered the Table class of the datascience library, which represented tabular data. In Data 100, we’ll be using the DataFrame class of the pandas library.\n\n2.2.2.1 Creating a DataFrame\nThere are many ways to create a DataFrame. Here, we will cover the most popular approaches:\n\nFrom a CSV file.\nUsing a list and column name(s).\nFrom a dictionary.\nFrom a Series.\n\nMore generally, the syntax for creating a DataFrame is:\n pandas.DataFrame(data, index, columns)\n\n2.2.2.1.1 From a CSV file\nIn Data 100, our data are typically stored in a CSV (comma-separated values) file format. We can import a CSV file into a DataFrame by passing the data path as an argument to the following pandas function.  pd.read_csv(\"filename.csv\")\nWith our new understanding of pandas in hand, let’s return to the elections dataset from before. Now, we can recognize that it is represented as a pandas DataFrame.\n\nelections = pd.read_csv(\"data/elections.csv\")\nelections\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.210122\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.789878\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.203927\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\nloss\n43.796073\n\n\n4\n1832\nAndrew Jackson\nDemocratic\n702735\nwin\n54.574789\n\n\n...\n...\n...\n...\n...\n...\n...\n\n\n177\n2016\nJill Stein\nGreen\n1457226\nloss\n1.073699\n\n\n178\n2020\nJoseph Biden\nDemocratic\n81268924\nwin\n51.311515\n\n\n179\n2020\nDonald Trump\nRepublican\n74216154\nloss\n46.858542\n\n\n180\n2020\nJo Jorgensen\nLibertarian\n1865724\nloss\n1.177979\n\n\n181\n2020\nHoward Hawkins\nGreen\n405035\nloss\n0.255731\n\n\n\n\n182 rows × 6 columns\n\n\n\nThis code stores our DataFrame object in the elections variable. Upon inspection, our elections DataFrame has 182 rows and 6 columns (Year, Candidate, Party, Popular Vote, Result, %). Each row represents a single record — in our example, a presidential candidate from some particular year. Each column represents a single attribute or feature of the record.\n\n\n2.2.2.1.2 Using a List and Column Name(s)\nWe’ll now explore creating a DataFrame with data of our own.\nConsider the following examples. The first code cell creates a DataFrame with a single column Numbers.\n\ndf_list = pd.DataFrame([1, 2, 3], columns=[\"Numbers\"])\ndf_list\n\n\n\n\n\n\n\n\nNumbers\n\n\n\n\n0\n1\n\n\n1\n2\n\n\n2\n3\n\n\n\n\n\n\n\nThe second creates a DataFrame with the columns Numbers and Description. Notice how a 2D list of values is required to initialize the second DataFrame — each nested list represents a single row of data.\n\ndf_list = pd.DataFrame([[1, \"one\"], [2, \"two\"]], columns = [\"Number\", \"Description\"])\ndf_list\n\n\n\n\n\n\n\n\nNumber\nDescription\n\n\n\n\n0\n1\none\n\n\n1\n2\ntwo\n\n\n\n\n\n\n\n\n\n2.2.2.1.3 From a Dictionary\nA third (and more common) way to create a DataFrame is with a dictionary. The dictionary keys represent the column names, and the dictionary values represent the column values.\nBelow are two ways of implementing this approach. The first is based on specifying the columns of the DataFrame, whereas the second is based on specifying the rows of the DataFrame.\n\ndf_dict = pd.DataFrame({\n \"Fruit\": [\"Strawberry\", \"Orange\"], \n \"Price\": [5.49, 3.99]\n})\ndf_dict\n\n\n\n\n\n\n\n\nFruit\nPrice\n\n\n\n\n0\nStrawberry\n5.49\n\n\n1\nOrange\n3.99\n\n\n\n\n\n\n\n\ndf_dict = pd.DataFrame(\n [\n {\"Fruit\":\"Strawberry\", \"Price\":5.49}, \n {\"Fruit\": \"Orange\", \"Price\":3.99}\n ]\n)\ndf_dict\n\n\n\n\n\n\n\n\nFruit\nPrice\n\n\n\n\n0\nStrawberry\n5.49\n\n\n1\nOrange\n3.99\n\n\n\n\n\n\n\n\n\n2.2.2.1.4 From a Series\nEarlier, we explained how a Series was synonymous to a column in a DataFrame. It follows, then, that a DataFrame is equivalent to a collection of Series, which all share the same Index.\nIn fact, we can initialize a DataFrame by merging two or more Series. Consider the Series s_a and s_b.\n\n# Notice how our indices, or row labels, are the same\n\ns_a = pd.Series([\"a1\", \"a2\", \"a3\"], index = [\"r1\", \"r2\", \"r3\"])\ns_b = pd.Series([\"b1\", \"b2\", \"b3\"], index = [\"r1\", \"r2\", \"r3\"])\n\nWe can turn individual Series into a DataFrame using two common methods (shown below):\n\npd.DataFrame(s_a)\n\n\n\n\n\n\n\n\n0\n\n\n\n\nr1\na1\n\n\nr2\na2\n\n\nr3\na3\n\n\n\n\n\n\n\n\ns_b.to_frame()\n\n\n\n\n\n\n\n\n0\n\n\n\n\nr1\nb1\n\n\nr2\nb2\n\n\nr3\nb3\n\n\n\n\n\n\n\nTo merge the two Series and specify their column names, we use the following syntax:\n\npd.DataFrame({\n \"A-column\": s_a, \n \"B-column\": s_b\n})\n\n\n\n\n\n\n\n\nA-column\nB-column\n\n\n\n\nr1\na1\nb1\n\n\nr2\na2\nb2\n\n\nr3\na3\nb3\n\n\n\n\n\n\n\n\n\n\n\n2.2.3 Indices\nOn a more technical note, an index doesn’t have to be an integer, nor does it have to be unique. For example, we can set the index of the elections DataFrame to be the name of presidential candidates.\n\n# Creating a DataFrame from a CSV file and specifying the index column\nelections = pd.read_csv(\"data/elections.csv\", index_col = \"Candidate\")\nelections\n\n\n\n\n\n\n\n\nYear\nParty\nPopular vote\nResult\n%\n\n\nCandidate\n\n\n\n\n\n\n\n\n\nAndrew Jackson\n1824\nDemocratic-Republican\n151271\nloss\n57.210122\n\n\nJohn Quincy Adams\n1824\nDemocratic-Republican\n113142\nwin\n42.789878\n\n\nAndrew Jackson\n1828\nDemocratic\n642806\nwin\n56.203927\n\n\nJohn Quincy Adams\n1828\nNational Republican\n500897\nloss\n43.796073\n\n\nAndrew Jackson\n1832\nDemocratic\n702735\nwin\n54.574789\n\n\n...\n...\n...\n...\n...\n...\n\n\nJill Stein\n2016\nGreen\n1457226\nloss\n1.073699\n\n\nJoseph Biden\n2020\nDemocratic\n81268924\nwin\n51.311515\n\n\nDonald Trump\n2020\nRepublican\n74216154\nloss\n46.858542\n\n\nJo Jorgensen\n2020\nLibertarian\n1865724\nloss\n1.177979\n\n\nHoward Hawkins\n2020\nGreen\n405035\nloss\n0.255731\n\n\n\n\n182 rows × 5 columns\n\n\n\nWe can also select a new column and set it as the index of the DataFrame. For example, we can set the index of the elections DataFrame to represent the candidate’s party.\n\nelections.reset_index(inplace = True) # Resetting the index so we can set it again\n# This sets the index to the \"Party\" column\nelections.set_index(\"Party\")\n\n\n\n\n\n\n\n\nCandidate\nYear\nPopular vote\nResult\n%\n\n\nParty\n\n\n\n\n\n\n\n\n\nDemocratic-Republican\nAndrew Jackson\n1824\n151271\nloss\n57.210122\n\n\nDemocratic-Republican\nJohn Quincy Adams\n1824\n113142\nwin\n42.789878\n\n\nDemocratic\nAndrew Jackson\n1828\n642806\nwin\n56.203927\n\n\nNational Republican\nJohn Quincy Adams\n1828\n500897\nloss\n43.796073\n\n\nDemocratic\nAndrew Jackson\n1832\n702735\nwin\n54.574789\n\n\n...\n...\n...\n...\n...\n...\n\n\nGreen\nJill Stein\n2016\n1457226\nloss\n1.073699\n\n\nDemocratic\nJoseph Biden\n2020\n81268924\nwin\n51.311515\n\n\nRepublican\nDonald Trump\n2020\n74216154\nloss\n46.858542\n\n\nLibertarian\nJo Jorgensen\n2020\n1865724\nloss\n1.177979\n\n\nGreen\nHoward Hawkins\n2020\n405035\nloss\n0.255731\n\n\n\n\n182 rows × 5 columns\n\n\n\nAnd, if we’d like, we can revert the index back to the default list of integers.\n\n# This resets the index to be the default list of integer\nelections.reset_index(inplace=True) \nelections.index\n\nRangeIndex(start=0, stop=182, step=1)\n\n\nIt is also important to note that the row labels that constitute an index don’t have to be unique. While index values can be unique and numeric, acting as a row number, they can also be named and non-unique.\nHere we see unique and numeric index values.\n\n\n\nHowever, here the index values are not unique.", - "crumbs": [ - "2  Pandas I" - ] - }, - { - "objectID": "pandas_1/pandas_1.html#dataframe-attributes-index-columns-and-shape", - "href": "pandas_1/pandas_1.html#dataframe-attributes-index-columns-and-shape", - "title": "2  Pandas I", - "section": "2.3 DataFrame Attributes: Index, Columns, and Shape", - "text": "2.3 DataFrame Attributes: Index, Columns, and Shape\nOn the other hand, column names in a DataFrame are almost always unique. Looking back to the elections dataset, it wouldn’t make sense to have two columns named \"Candidate\". Sometimes, you’ll want to extract these different values, in particular, the list of row and column labels.\nFor index/row labels, use DataFrame.index:\n\nelections.set_index(\"Party\", inplace = True)\nelections.index\n\nIndex(['Democratic-Republican', 'Democratic-Republican', 'Democratic',\n 'National Republican', 'Democratic', 'National Republican',\n 'Anti-Masonic', 'Whig', 'Democratic', 'Whig',\n ...\n 'Constitution', 'Republican', 'Independent', 'Libertarian',\n 'Democratic', 'Green', 'Democratic', 'Republican', 'Libertarian',\n 'Green'],\n dtype='object', name='Party', length=182)\n\n\nFor column labels, use DataFrame.columns:\n\nelections.columns\n\nIndex(['index', 'Candidate', 'Year', 'Popular vote', 'Result', '%'], dtype='object')\n\n\nAnd for the shape of the DataFrame, we can use DataFrame.shape to get the number of rows followed by the number of columns:\n\nelections.shape\n\n(182, 6)", - "crumbs": [ - "2  Pandas I" - ] - }, - { - "objectID": "pandas_1/pandas_1.html#slicing-in-dataframes", - "href": "pandas_1/pandas_1.html#slicing-in-dataframes", - "title": "2  Pandas I", - "section": "2.4 Slicing in DataFrames", - "text": "2.4 Slicing in DataFrames\nNow that we’ve learned more about DataFrames, let’s dive deeper into their capabilities.\nThe API (Application Programming Interface) for the DataFrame class is enormous. In this section, we’ll discuss several methods of the DataFrame API that allow us to extract subsets of data.\nThe simplest way to manipulate a DataFrame is to extract a subset of rows and columns, known as slicing.\nCommon ways we may want to extract data are grabbing:\n\nThe first or last n rows in the DataFrame.\nData with a certain label.\nData at a certain position.\n\nWe will do so with four primary methods of the DataFrame class:\n\n.head and .tail\n.loc\n.iloc\n[]\n\n\n2.4.1 Extracting data with .head and .tail\nThe simplest scenario in which we want to extract data is when we simply want to select the first or last few rows of the DataFrame.\nTo extract the first n rows of a DataFrame df, we use the syntax df.head(n).\n\n\nCode\nelections = pd.read_csv(\"data/elections.csv\")\n\n\n\n# Extract the first 5 rows of the DataFrame\nelections.head(5)\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.210122\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.789878\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.203927\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\nloss\n43.796073\n\n\n4\n1832\nAndrew Jackson\nDemocratic\n702735\nwin\n54.574789\n\n\n\n\n\n\n\nSimilarly, calling df.tail(n) allows us to extract the last n rows of the DataFrame.\n\n# Extract the last 5 rows of the DataFrame\nelections.tail(5)\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n177\n2016\nJill Stein\nGreen\n1457226\nloss\n1.073699\n\n\n178\n2020\nJoseph Biden\nDemocratic\n81268924\nwin\n51.311515\n\n\n179\n2020\nDonald Trump\nRepublican\n74216154\nloss\n46.858542\n\n\n180\n2020\nJo Jorgensen\nLibertarian\n1865724\nloss\n1.177979\n\n\n181\n2020\nHoward Hawkins\nGreen\n405035\nloss\n0.255731\n\n\n\n\n\n\n\n\n\n2.4.2 Label-based Extraction: Indexing with .loc\nFor the more complex task of extracting data with specific column or index labels, we can use .loc. The .loc accessor allows us to specify the labels of rows and columns we wish to extract. The labels (commonly referred to as the indices) are the bold text on the far left of a DataFrame, while the column labels are the column names found at the top of a DataFrame.\n\n\n\nTo grab data with .loc, we must specify the row and column label(s) where the data exists. The row labels are the first argument to the .loc function; the column labels are the second.\nArguments to .loc can be:\n\nA single value.\nA slice.\nA list.\n\nFor example, to select a single value, we can select the row labeled 0 and the column labeled Candidate from the elections DataFrame.\n\nelections.loc[0, 'Candidate']\n\n'Andrew Jackson'\n\n\nKeep in mind that passing in just one argument as a single value will produce a Series. Below, we’ve extracted a subset of the \"Popular vote\" column as a Series.\n\nelections.loc[[87, 25, 179], \"Popular vote\"]\n\n87 15761254\n25 848019\n179 74216154\nName: Popular vote, dtype: int64\n\n\nTo select multiple rows and columns, we can use Python slice notation. Here, we select the rows from labels 0 to 3 and the columns from labels \"Year\" to \"Popular vote\". Notice that unlike Python slicing, .loc is inclusive of the right upper bound.\n\nelections.loc[0:3, 'Year':'Popular vote']\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\n\n\n\n\n\n\n\nSuppose that instead, we want to extract all column values for the first four rows in the elections DataFrame. The shorthand : is useful for this.\n\nelections.loc[0:3, :]\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.210122\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.789878\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.203927\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\nloss\n43.796073\n\n\n\n\n\n\n\nWe can use the same shorthand to extract all rows.\n\nelections.loc[:, [\"Year\", \"Candidate\", \"Result\"]]\n\n\n\n\n\n\n\n\nYear\nCandidate\nResult\n\n\n\n\n0\n1824\nAndrew Jackson\nloss\n\n\n1\n1824\nJohn Quincy Adams\nwin\n\n\n2\n1828\nAndrew Jackson\nwin\n\n\n3\n1828\nJohn Quincy Adams\nloss\n\n\n4\n1832\nAndrew Jackson\nwin\n\n\n...\n...\n...\n...\n\n\n177\n2016\nJill Stein\nloss\n\n\n178\n2020\nJoseph Biden\nwin\n\n\n179\n2020\nDonald Trump\nloss\n\n\n180\n2020\nJo Jorgensen\nloss\n\n\n181\n2020\nHoward Hawkins\nloss\n\n\n\n\n182 rows × 3 columns\n\n\n\nThere are a couple of things we should note. Firstly, unlike conventional Python, pandas allows us to slice string values (in our example, the column labels). Secondly, slicing with .loc is inclusive. Notice how our resulting DataFrame includes every row and column between and including the slice labels we specified.\nEquivalently, we can use a list to obtain multiple rows and columns in our elections DataFrame.\n\nelections.loc[[0, 1, 2, 3], ['Year', 'Candidate', 'Party', 'Popular vote']]\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\n\n\n\n\n\n\n\nLastly, we can interchange list and slicing notation.\n\nelections.loc[[0, 1, 2, 3], :]\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.210122\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.789878\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.203927\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\nloss\n43.796073\n\n\n\n\n\n\n\n\n\n2.4.3 Integer-based Extraction: Indexing with .iloc\nSlicing with .iloc works similarly to .loc. However, .iloc uses the index positions of rows and columns rather than the labels (think to yourself: loc uses lables; iloc uses indices). The arguments to the .iloc function also behave similarly — single values, lists, indices, and any combination of these are permitted.\nLet’s begin reproducing our results from above. We’ll begin by selecting the first presidential candidate in our elections DataFrame:\n\n# elections.loc[0, \"Candidate\"] - Previous approach\nelections.iloc[0, 1]\n\n'Andrew Jackson'\n\n\nNotice how the first argument to both .loc and .iloc are the same. This is because the row with a label of 0 is conveniently in the \\(0^{\\text{th}}\\) (equivalently, the first position) of the elections DataFrame. Generally, this is true of any DataFrame where the row labels are incremented in ascending order from 0.\nAnd, as before, if we were to pass in only one single value argument, our result would be a Series.\n\nelections.iloc[[1,2,3],1]\n\n1 John Quincy Adams\n2 Andrew Jackson\n3 John Quincy Adams\nName: Candidate, dtype: object\n\n\nHowever, when we select the first four rows and columns using .iloc, we notice something.\n\n# elections.loc[0:3, 'Year':'Popular vote'] - Previous approach\nelections.iloc[0:4, 0:4]\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\n\n\n\n\n\n\n\nSlicing is no longer inclusive in .iloc — it’s exclusive. In other words, the right end of a slice is not included when using .iloc. This is one of the subtleties of pandas syntax; you will get used to it with practice.\nList behavior works just as expected.\n\n#elections.loc[[0, 1, 2, 3], ['Year', 'Candidate', 'Party', 'Popular vote']] - Previous Approach\nelections.iloc[[0, 1, 2, 3], [0, 1, 2, 3]]\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\n\n\n\n\n\n\n\nAnd just like with .loc, we can use a colon with .iloc to extract all rows or columns.\n\nelections.iloc[:, 0:3]\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n\n\n4\n1832\nAndrew Jackson\nDemocratic\n\n\n...\n...\n...\n...\n\n\n177\n2016\nJill Stein\nGreen\n\n\n178\n2020\nJoseph Biden\nDemocratic\n\n\n179\n2020\nDonald Trump\nRepublican\n\n\n180\n2020\nJo Jorgensen\nLibertarian\n\n\n181\n2020\nHoward Hawkins\nGreen\n\n\n\n\n182 rows × 3 columns\n\n\n\nThis discussion begs the question: when should we use .loc vs. .iloc? In most cases, .loc is generally safer to use. You can imagine .iloc may return incorrect values when applied to a dataset where the ordering of data can change. However, .iloc can still be useful — for example, if you are looking at a DataFrame of sorted movie earnings and want to get the median earnings for a given year, you can use .iloc to index into the middle.\nOverall, it is important to remember that:\n\n.loc performances label-based extraction.\n.iloc performs integer-based extraction.\n\n\n\n2.4.4 Context-dependent Extraction: Indexing with []\nThe [] selection operator is the most baffling of all, yet the most commonly used. It only takes a single argument, which may be one of the following:\n\nA slice of row numbers.\nA list of column labels.\nA single-column label.\n\nThat is, [] is context-dependent. Let’s see some examples.\n\n2.4.4.1 A slice of row numbers\nSay we wanted the first four rows of our elections DataFrame.\n\nelections[0:4]\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.210122\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.789878\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.203927\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\nloss\n43.796073\n\n\n\n\n\n\n\n\n\n2.4.4.2 A list of column labels\nSuppose we now want the first four columns.\n\nelections[[\"Year\", \"Candidate\", \"Party\", \"Popular vote\"]]\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\n\n\n4\n1832\nAndrew Jackson\nDemocratic\n702735\n\n\n...\n...\n...\n...\n...\n\n\n177\n2016\nJill Stein\nGreen\n1457226\n\n\n178\n2020\nJoseph Biden\nDemocratic\n81268924\n\n\n179\n2020\nDonald Trump\nRepublican\n74216154\n\n\n180\n2020\nJo Jorgensen\nLibertarian\n1865724\n\n\n181\n2020\nHoward Hawkins\nGreen\n405035\n\n\n\n\n182 rows × 4 columns\n\n\n\n\n\n2.4.4.3 A single-column label\nLastly, [] allows us to extract only the \"Candidate\" column.\n\nelections[\"Candidate\"]\n\n0 Andrew Jackson\n1 John Quincy Adams\n2 Andrew Jackson\n3 John Quincy Adams\n4 Andrew Jackson\n ... \n177 Jill Stein\n178 Joseph Biden\n179 Donald Trump\n180 Jo Jorgensen\n181 Howard Hawkins\nName: Candidate, Length: 182, dtype: object\n\n\nThe output is a Series! In this course, we’ll become very comfortable with [], especially for selecting columns. In practice, [] is much more common than .loc, especially since it is far more concise.", - "crumbs": [ - "2  Pandas I" - ] - }, - { - "objectID": "pandas_1/pandas_1.html#parting-note", - "href": "pandas_1/pandas_1.html#parting-note", - "title": "2  Pandas I", - "section": "2.5 Parting Note", - "text": "2.5 Parting Note\nThe pandas library is enormous and contains many useful functions. Here is a link to its documentation. We certainly don’t expect you to memorize each and every method of the library, and we will give you a reference sheet for exams.\nThe introductory Data 100 pandas lectures will provide a high-level view of the key data structures and methods that will form the foundation of your pandas knowledge. A goal of this course is to help you build your familiarity with the real-world programming practice of … Googling! Answers to your questions can be found in documentation, Stack Overflow, etc. Being able to search for, read, and implement documentation is an important life skill for any data scientist.\nWith that, we will move on to Pandas II!", - "crumbs": [ - "2  Pandas I" - ] - }, - { - "objectID": "pandas_2/pandas_2.html", - "href": "pandas_2/pandas_2.html", - "title": "3  Pandas II", - "section": "", - "text": "3.1 Conditional Selection\nConditional selection allows us to select a subset of rows in a DataFrame that satisfy some specified condition.\nTo understand how to use conditional selection, we must look at another possible input of the .loc and [] methods – a boolean array, which is simply an array or Series where each element is either True or False. This boolean array must have a length equal to the number of rows in the DataFrame. It will return all rows that correspond to a value of True in the array. We used a very similar technique when performing conditional extraction from a Series in the last lecture.\nTo see this in action, let’s select all even-indexed rows in the first 10 rows of our DataFrame.\n# Ask yourself: why is :9 is the correct slice to select the first 10 rows?\nbabynames_first_10_rows = babynames.loc[:9, :]\n\n# Notice how we have exactly 10 elements in our boolean array argument\nbabynames_first_10_rows[[True, False, True, False, True, False, True, False, True, False]]\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n\n\n2\nCA\nF\n1910\nDorothy\n220\n\n\n4\nCA\nF\n1910\nFrances\n134\n\n\n6\nCA\nF\n1910\nEvelyn\n126\n\n\n8\nCA\nF\n1910\nVirginia\n101\nWe can perform a similar operation using .loc.\nbabynames_first_10_rows.loc[[True, False, True, False, True, False, True, False, True, False], :]\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n\n\n2\nCA\nF\n1910\nDorothy\n220\n\n\n4\nCA\nF\n1910\nFrances\n134\n\n\n6\nCA\nF\n1910\nEvelyn\n126\n\n\n8\nCA\nF\n1910\nVirginia\n101\nThese techniques worked well in this example, but you can imagine how tedious it might be to list out True and Falsefor every row in a larger DataFrame. To make things easier, we can instead provide a logical condition as an input to .loc or [] that returns a boolean array with the necessary length.\nFor example, to return all names associated with F sex:\n# First, use a logical condition to generate a boolean array\nlogical_operator = (babynames[\"Sex\"] == \"F\")\n\n# Then, use this boolean array to filter the DataFrame\nbabynames[logical_operator].head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n\n\n1\nCA\nF\n1910\nHelen\n239\n\n\n2\nCA\nF\n1910\nDorothy\n220\n\n\n3\nCA\nF\n1910\nMargaret\n163\n\n\n4\nCA\nF\n1910\nFrances\n134\nRecall from the previous lecture that .head() will return only the first few rows in the DataFrame. In reality, babynames[logical operator] contains as many rows as there are entries in the original babynames DataFrame with sex \"F\".\nHere, logical_operator evaluates to a Series of boolean values with length 407428.\nCode\nprint(\"There are a total of {} values in 'logical_operator'\".format(len(logical_operator)))\n\n\nThere are a total of 407428 values in 'logical_operator'\nRows starting at row 0 and ending at row 239536 evaluate to True and are thus returned in the DataFrame. Rows from 239537 onwards evaluate to False and are omitted from the output.\nCode\nprint(\"The 0th item in this 'logical_operator' is: {}\".format(logical_operator.iloc[0]))\nprint(\"The 239536th item in this 'logical_operator' is: {}\".format(logical_operator.iloc[239536]))\nprint(\"The 239537th item in this 'logical_operator' is: {}\".format(logical_operator.iloc[239537]))\n\n\nThe 0th item in this 'logical_operator' is: True\nThe 239536th item in this 'logical_operator' is: True\nThe 239537th item in this 'logical_operator' is: False\nPassing a Series as an argument to babynames[] has the same effect as using a boolean array. In fact, the [] selection operator can take a boolean Series, array, and list as arguments. These three are used interchangeably throughout the course.\nWe can also use .loc to achieve similar results.\nbabynames.loc[babynames[\"Sex\"] == \"F\"].head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n\n\n1\nCA\nF\n1910\nHelen\n239\n\n\n2\nCA\nF\n1910\nDorothy\n220\n\n\n3\nCA\nF\n1910\nMargaret\n163\n\n\n4\nCA\nF\n1910\nFrances\n134\nBoolean conditions can be combined using various bitwise operators, allowing us to filter results by multiple conditions. In the table below, p and q are boolean arrays or Series.\nWhen combining multiple conditions with logical operators, we surround each individual condition with a set of parenthesis (). This imposes an order of operations on pandas evaluating your logic and can avoid code erroring.\nFor example, if we want to return data on all names with sex \"F\" born before the year 2000, we can write:\nbabynames[(babynames[\"Sex\"] == \"F\") & (babynames[\"Year\"] < 2000)].head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n\n\n1\nCA\nF\n1910\nHelen\n239\n\n\n2\nCA\nF\n1910\nDorothy\n220\n\n\n3\nCA\nF\n1910\nMargaret\n163\n\n\n4\nCA\nF\n1910\nFrances\n134\nNote that we’re working with Series, so using and in place of &, or or in place | will error.\n# This line of code will raise a ValueError\n# babynames[(babynames[\"Sex\"] == \"F\") and (babynames[\"Year\"] < 2000)].head()\nIf we want to return data on all names with sex \"F\" or all born before the year 2000, we can write:\nbabynames[(babynames[\"Sex\"] == \"F\") | (babynames[\"Year\"] < 2000)].head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n\n\n1\nCA\nF\n1910\nHelen\n239\n\n\n2\nCA\nF\n1910\nDorothy\n220\n\n\n3\nCA\nF\n1910\nMargaret\n163\n\n\n4\nCA\nF\n1910\nFrances\n134\nBoolean array selection is a useful tool, but can lead to overly verbose code for complex conditions. In the example below, our boolean condition is long enough to extend for several lines of code.\n# Note: The parentheses surrounding the code make it possible to break the code on to multiple lines for readability\n(\n babynames[(babynames[\"Name\"] == \"Bella\") | \n (babynames[\"Name\"] == \"Alex\") |\n (babynames[\"Name\"] == \"Ani\") |\n (babynames[\"Name\"] == \"Lisa\")]\n).head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n6289\nCA\nF\n1923\nBella\n5\n\n\n7512\nCA\nF\n1925\nBella\n8\n\n\n12368\nCA\nF\n1932\nLisa\n5\n\n\n14741\nCA\nF\n1936\nLisa\n8\n\n\n17084\nCA\nF\n1939\nLisa\n5\nFortunately, pandas provides many alternative methods for constructing boolean filters.\nThe .isin function is one such example. This method evaluates if the values in a Series are contained in a different sequence (list, array, or Series) of values. In the cell below, we achieve equivalent results to the DataFrame above with far more concise code.\nnames = [\"Bella\", \"Alex\", \"Narges\", \"Lisa\"]\nbabynames[\"Name\"].isin(names).head()\n\n0 False\n1 False\n2 False\n3 False\n4 False\nName: Name, dtype: bool\nbabynames[babynames[\"Name\"].isin(names)].head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n6289\nCA\nF\n1923\nBella\n5\n\n\n7512\nCA\nF\n1925\nBella\n8\n\n\n12368\nCA\nF\n1932\nLisa\n5\n\n\n14741\nCA\nF\n1936\nLisa\n8\n\n\n17084\nCA\nF\n1939\nLisa\n5\nThe function str.startswith can be used to define a filter based on string values in a Series object. It checks to see if string values in a Series start with a particular character.\n# Identify whether names begin with the letter \"N\"\nbabynames[\"Name\"].str.startswith(\"N\").head()\n\n0 False\n1 False\n2 False\n3 False\n4 False\nName: Name, dtype: bool\n# Extracting names that begin with the letter \"N\"\nbabynames[babynames[\"Name\"].str.startswith(\"N\")].head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n76\nCA\nF\n1910\nNorma\n23\n\n\n83\nCA\nF\n1910\nNellie\n20\n\n\n127\nCA\nF\n1910\nNina\n11\n\n\n198\nCA\nF\n1910\nNora\n6\n\n\n310\nCA\nF\n1911\nNellie\n23", - "crumbs": [ - "3  Pandas II" - ] - }, - { - "objectID": "pandas_2/pandas_2.html#conditional-selection", - "href": "pandas_2/pandas_2.html#conditional-selection", - "title": "3  Pandas II", - "section": "", - "text": "Symbol\nUsage\nMeaning\n\n\n\n\n~\n~p\nReturns negation of p\n\n\n|\np | q\np OR q\n\n\n&\np & q\np AND q\n\n\n^\np ^ q\np XOR q (exclusive or)", - "crumbs": [ - "3  Pandas II" - ] - }, - { - "objectID": "pandas_2/pandas_2.html#adding-removing-and-modifying-columns", - "href": "pandas_2/pandas_2.html#adding-removing-and-modifying-columns", - "title": "3  Pandas II", - "section": "3.2 Adding, Removing, and Modifying Columns", - "text": "3.2 Adding, Removing, and Modifying Columns\nIn many data science tasks, we may need to change the columns contained in our DataFrame in some way. Fortunately, the syntax to do so is fairly straightforward.\nTo add a new column to a DataFrame, we use a syntax similar to that used when accessing an existing column. Specify the name of the new column by writing df[\"column\"], then assign this to a Series or array containing the values that will populate this column.\n\n# Create a Series of the length of each name. \nbabyname_lengths = babynames[\"Name\"].str.len()\n\n# Add a column named \"name_lengths\" that includes the length of each name\nbabynames[\"name_lengths\"] = babyname_lengths\nbabynames.head(5)\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\nname_lengths\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n4\n\n\n1\nCA\nF\n1910\nHelen\n239\n5\n\n\n2\nCA\nF\n1910\nDorothy\n220\n7\n\n\n3\nCA\nF\n1910\nMargaret\n163\n8\n\n\n4\nCA\nF\n1910\nFrances\n134\n7\n\n\n\n\n\n\n\nIf we need to later modify an existing column, we can do so by referencing this column again with the syntax df[\"column\"], then re-assigning it to a new Series or array of the appropriate length.\n\n# Modify the “name_lengths” column to be one less than its original value\nbabynames[\"name_lengths\"] = babynames[\"name_lengths\"] - 1\nbabynames.head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\nname_lengths\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n3\n\n\n1\nCA\nF\n1910\nHelen\n239\n4\n\n\n2\nCA\nF\n1910\nDorothy\n220\n6\n\n\n3\nCA\nF\n1910\nMargaret\n163\n7\n\n\n4\nCA\nF\n1910\nFrances\n134\n6\n\n\n\n\n\n\n\nWe can rename a column using the .rename() method. It takes in a dictionary that maps old column names to their new ones.\n\n# Rename “name_lengths” to “Length”\nbabynames = babynames.rename(columns={\"name_lengths\":\"Length\"})\nbabynames.head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\nLength\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n3\n\n\n1\nCA\nF\n1910\nHelen\n239\n4\n\n\n2\nCA\nF\n1910\nDorothy\n220\n6\n\n\n3\nCA\nF\n1910\nMargaret\n163\n7\n\n\n4\nCA\nF\n1910\nFrances\n134\n6\n\n\n\n\n\n\n\nIf we want to remove a column or row of a DataFrame, we can call the .drop (documentation) method. Use the axis parameter to specify whether a column or row should be dropped. Unless otherwise specified, pandas will assume that we are dropping a row by default.\n\n# Drop our new \"Length\" column from the DataFrame\nbabynames = babynames.drop(\"Length\", axis=\"columns\")\nbabynames.head(5)\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n\n\n1\nCA\nF\n1910\nHelen\n239\n\n\n2\nCA\nF\n1910\nDorothy\n220\n\n\n3\nCA\nF\n1910\nMargaret\n163\n\n\n4\nCA\nF\n1910\nFrances\n134\n\n\n\n\n\n\n\nNotice that we re-assigned babynames to the result of babynames.drop(...). This is a subtle but important point: pandas table operations do not occur in-place. Calling df.drop(...) will output a copy of df with the row/column of interest removed without modifying the original df table.\nIn other words, if we simply call:\n\n# This creates a copy of `babynames` and removes the column \"Name\"...\nbabynames.drop(\"Name\", axis=\"columns\")\n\n# ...but the original `babynames` is unchanged! \n# Notice that the \"Name\" column is still present\nbabynames.head(5)\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n\n\n1\nCA\nF\n1910\nHelen\n239\n\n\n2\nCA\nF\n1910\nDorothy\n220\n\n\n3\nCA\nF\n1910\nMargaret\n163\n\n\n4\nCA\nF\n1910\nFrances\n134", - "crumbs": [ - "3  Pandas II" - ] - }, - { - "objectID": "pandas_2/pandas_2.html#useful-utility-functions", - "href": "pandas_2/pandas_2.html#useful-utility-functions", - "title": "3  Pandas II", - "section": "3.3 Useful Utility Functions", - "text": "3.3 Useful Utility Functions\npandas contains an extensive library of functions that can help shorten the process of setting and getting information from its data structures. In the following section, we will give overviews of each of the main utility functions that will help us in Data 100.\nDiscussing all functionality offered by pandas could take an entire semester! We will walk you through the most commonly-used functions and encourage you to explore and experiment on your own.\n\nNumPy and built-in function support\n.shape\n.size\n.describe()\n.sample()\n.value_counts()\n.unique()\n.sort_values()\n\nThe pandas documentation will be a valuable resource in Data 100 and beyond.\n\n3.3.1 NumPy\npandas is designed to work well with NumPy, the framework for array computations you encountered in Data 8. Just about any NumPy function can be applied to pandas DataFrames and Series.\n\n# Pull out the number of babies named Yash each year\nyash_count = babynames[babynames[\"Name\"] == \"Yash\"][\"Count\"]\nyash_count.head()\n\n331824 8\n334114 9\n336390 11\n338773 12\n341387 10\nName: Count, dtype: int64\n\n\n\n# Average number of babies named Yash each year\nnp.mean(yash_count)\n\n17.142857142857142\n\n\n\n# Max number of babies named Yash born in any one year\nnp.max(yash_count)\n\n29\n\n\n\n\n3.3.2 .shape and .size\n.shape and .size are attributes of Series and DataFrames that measure the “amount” of data stored in the structure. Calling .shape returns a tuple containing the number of rows and columns present in the DataFrame or Series. .size is used to find the total number of elements in a structure, equivalent to the number of rows times the number of columns.\nMany functions strictly require the dimensions of the arguments along certain axes to match. Calling these dimension-finding functions is much faster than counting all of the items by hand.\n\n# Return the shape of the DataFrame, in the format (num_rows, num_columns)\nbabynames.shape\n\n(407428, 5)\n\n\n\n# Return the size of the DataFrame, equal to num_rows * num_columns\nbabynames.size\n\n2037140\n\n\n\n\n3.3.3 .describe()\nIf many statistics are required from a DataFrame (minimum value, maximum value, mean value, etc.), then .describe() (documentation) can be used to compute all of them at once.\n\nbabynames.describe()\n\n\n\n\n\n\n\n\nYear\nCount\n\n\n\n\ncount\n407428.000000\n407428.000000\n\n\nmean\n1985.733609\n79.543456\n\n\nstd\n27.007660\n293.698654\n\n\nmin\n1910.000000\n5.000000\n\n\n25%\n1969.000000\n7.000000\n\n\n50%\n1992.000000\n13.000000\n\n\n75%\n2008.000000\n38.000000\n\n\nmax\n2022.000000\n8260.000000\n\n\n\n\n\n\n\nA different set of statistics will be reported if .describe() is called on a Series.\n\nbabynames[\"Sex\"].describe()\n\ncount 407428\nunique 2\ntop F\nfreq 239537\nName: Sex, dtype: object\n\n\n\n\n3.3.4 .sample()\nAs we will see later in the semester, random processes are at the heart of many data science techniques (for example, train-test splits, bootstrapping, and cross-validation). .sample() (documentation) lets us quickly select random entries (a row if called from a DataFrame, or a value if called from a Series).\nBy default, .sample() selects entries without replacement. Pass in the argument replace=True to sample with replacement.\n\n# Sample a single row\nbabynames.sample()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n378062\nCA\nM\n2012\nParth\n6\n\n\n\n\n\n\n\nNaturally, this can be chained with other methods and operators (iloc, etc.).\n\n# Sample 5 random rows, and select all columns after column 2\nbabynames.sample(5).iloc[:, 2:]\n\n\n\n\n\n\n\n\nYear\nName\nCount\n\n\n\n\n378415\n2012\nNeftali\n5\n\n\n374207\n2011\nTayden\n12\n\n\n218433\n2017\nAnnalee\n31\n\n\n349233\n2002\nTobin\n8\n\n\n242648\n1919\nClaud\n7\n\n\n\n\n\n\n\n\n# Randomly sample 4 names from the year 2000, with replacement, and select all columns after column 2\nbabynames[babynames[\"Year\"] == 2000].sample(4, replace = True).iloc[:, 2:]\n\n\n\n\n\n\n\n\nYear\nName\nCount\n\n\n\n\n150968\n2000\nEloise\n11\n\n\n343841\n2000\nLevon\n10\n\n\n149396\n2000\nTanya\n126\n\n\n150438\n2000\nAnthony\n18\n\n\n\n\n\n\n\n\n\n3.3.5 .value_counts()\nThe Series.value_counts() (documentation) method counts the number of occurrence of each unique value in a Series. In other words, it counts the number of times each unique value appears. This is often useful for determining the most or least common entries in a Series.\nIn the example below, we can determine the name with the most years in which at least one person has taken that name by counting the number of times each name appears in the \"Name\" column of babynames. Note that the return value is also a Series.\n\nbabynames[\"Name\"].value_counts().head()\n\nName\nJean 223\nFrancis 221\nGuadalupe 218\nJessie 217\nMarion 214\nName: count, dtype: int64\n\n\n\n\n3.3.6 .unique()\nIf we have a Series with many repeated values, then .unique() (documentation) can be used to identify only the unique values. Here we return an array of all the names in babynames.\n\nbabynames[\"Name\"].unique()\n\narray(['Mary', 'Helen', 'Dorothy', ..., 'Zae', 'Zai', 'Zayvier'],\n dtype=object)\n\n\n\n\n3.3.7 .sort_values()\nOrdering a DataFrame can be useful for isolating extreme values. For example, the first 5 entries of a row sorted in descending order (that is, from highest to lowest) are the largest 5 values. .sort_values (documentation) allows us to order a DataFrame or Series by a specified column. We can choose to either receive the rows in ascending order (default) or descending order.\n\n# Sort the \"Count\" column from highest to lowest\nbabynames.sort_values(by=\"Count\", ascending=False).head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n268041\nCA\nM\n1957\nMichael\n8260\n\n\n267017\nCA\nM\n1956\nMichael\n8258\n\n\n317387\nCA\nM\n1990\nMichael\n8246\n\n\n281850\nCA\nM\n1969\nMichael\n8245\n\n\n283146\nCA\nM\n1970\nMichael\n8196\n\n\n\n\n\n\n\nUnlike when calling .value_counts() on a DataFrame, we do not need to explicitly specify the column used for sorting when calling .value_counts() on a Series. We can still specify the ordering paradigm – that is, whether values are sorted in ascending or descending order.\n\n# Sort the \"Name\" Series alphabetically\nbabynames[\"Name\"].sort_values(ascending=True).head()\n\n366001 Aadan\n384005 Aadan\n369120 Aadan\n398211 Aadarsh\n370306 Aaden\nName: Name, dtype: object", - "crumbs": [ - "3  Pandas II" - ] - }, - { - "objectID": "pandas_2/pandas_2.html#parting-note", - "href": "pandas_2/pandas_2.html#parting-note", - "title": "3  Pandas II", - "section": "3.4 Parting Note", - "text": "3.4 Parting Note\nManipulating DataFrames is not a skill that is mastered in just one day. Due to the flexibility of pandas, there are many different ways to get from point A to point B. We recommend trying multiple different ways to solve the same problem to gain even more practice and reach that point of mastery sooner.\nNext, we will start digging deeper into the mechanics behind grouping data.", - "crumbs": [ - "3  Pandas II" - ] - }, - { - "objectID": "pandas_3/pandas_3.html", - "href": "pandas_3/pandas_3.html", - "title": "4  Pandas III", - "section": "", - "text": "4.1 Custom Sorts\nFirst, let’s finish our discussion about sorting. Let’s try to solve a sorting problem using different approaches. Assume we want to find the longest baby names and sort our data accordingly.\nWe’ll start by loading the babynames dataset. Note that this dataset is filtered to only contain data from California.\nCode\n# This code pulls census data and loads it into a DataFrame\n# We won't cover it explicitly in this class, but you are welcome to explore it on your own\nimport pandas as pd\nimport numpy as np\nimport urllib.request\nimport os.path\nimport zipfile\n\ndata_url = \"https://www.ssa.gov/oact/babynames/state/namesbystate.zip\"\nlocal_filename = \"data/babynamesbystate.zip\"\nif not os.path.exists(local_filename): # If the data exists don't download again\n with urllib.request.urlopen(data_url) as resp, open(local_filename, 'wb') as f:\n f.write(resp.read())\n\nzf = zipfile.ZipFile(local_filename, 'r')\n\nca_name = 'STATE.CA.TXT'\nfield_names = ['State', 'Sex', 'Year', 'Name', 'Count']\nwith zf.open(ca_name) as fh:\n babynames = pd.read_csv(fh, header=None, names=field_names)\n\nbabynames.tail(10)\n\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n407418\nCA\nM\n2022\nZach\n5\n\n\n407419\nCA\nM\n2022\nZadkiel\n5\n\n\n407420\nCA\nM\n2022\nZae\n5\n\n\n407421\nCA\nM\n2022\nZai\n5\n\n\n407422\nCA\nM\n2022\nZay\n5\n\n\n407423\nCA\nM\n2022\nZayvier\n5\n\n\n407424\nCA\nM\n2022\nZia\n5\n\n\n407425\nCA\nM\n2022\nZora\n5\n\n\n407426\nCA\nM\n2022\nZuriel\n5\n\n\n407427\nCA\nM\n2022\nZylo\n5", - "crumbs": [ - "4  Pandas III" - ] - }, - { - "objectID": "pandas_3/pandas_3.html#custom-sorts", - "href": "pandas_3/pandas_3.html#custom-sorts", - "title": "4  Pandas III", - "section": "", - "text": "4.1.1 Approach 1: Create a Temporary Column\nOne method to do this is to first start by creating a column that contains the lengths of the names.\n\n# Create a Series of the length of each name\nbabyname_lengths = babynames[\"Name\"].str.len()\n\n# Add a column named \"name_lengths\" that includes the length of each name\nbabynames[\"name_lengths\"] = babyname_lengths\nbabynames.head(5)\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\nname_lengths\n\n\n\n\n0\nCA\nF\n1910\nMary\n295\n4\n\n\n1\nCA\nF\n1910\nHelen\n239\n5\n\n\n2\nCA\nF\n1910\nDorothy\n220\n7\n\n\n3\nCA\nF\n1910\nMargaret\n163\n8\n\n\n4\nCA\nF\n1910\nFrances\n134\n7\n\n\n\n\n\n\n\nWe can then sort the DataFrame by that column using .sort_values():\n\n# Sort by the temporary column\nbabynames = babynames.sort_values(by=\"name_lengths\", ascending=False)\nbabynames.head(5)\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\nname_lengths\n\n\n\n\n334166\nCA\nM\n1996\nFranciscojavier\n8\n15\n\n\n337301\nCA\nM\n1997\nFranciscojavier\n5\n15\n\n\n339472\nCA\nM\n1998\nFranciscojavier\n6\n15\n\n\n321792\nCA\nM\n1991\nRyanchristopher\n7\n15\n\n\n327358\nCA\nM\n1993\nJohnchristopher\n5\n15\n\n\n\n\n\n\n\nFinally, we can drop the name_length column from babynames to prevent our table from getting cluttered.\n\n# Drop the 'name_length' column\nbabynames = babynames.drop(\"name_lengths\", axis='columns')\nbabynames.head(5)\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n334166\nCA\nM\n1996\nFranciscojavier\n8\n\n\n337301\nCA\nM\n1997\nFranciscojavier\n5\n\n\n339472\nCA\nM\n1998\nFranciscojavier\n6\n\n\n321792\nCA\nM\n1991\nRyanchristopher\n7\n\n\n327358\nCA\nM\n1993\nJohnchristopher\n5\n\n\n\n\n\n\n\n\n\n4.1.2 Approach 2: Sorting using the key Argument\nAnother way to approach this is to use the key argument of .sort_values(). Here we can specify that we want to sort \"Name\" values by their length.\n\nbabynames.sort_values(\"Name\", key=lambda x: x.str.len(), ascending=False).head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n334166\nCA\nM\n1996\nFranciscojavier\n8\n\n\n327472\nCA\nM\n1993\nRyanchristopher\n5\n\n\n337301\nCA\nM\n1997\nFranciscojavier\n5\n\n\n337477\nCA\nM\n1997\nRyanchristopher\n5\n\n\n312543\nCA\nM\n1987\nFranciscojavier\n5\n\n\n\n\n\n\n\n\n\n4.1.3 Approach 3: Sorting using the map Function\nWe can also use the map function on a Series to solve this. Say we want to sort the babynames table by the number of \"dr\"’s and \"ea\"’s in each \"Name\". We’ll define the function dr_ea_count to help us out.\n\n# First, define a function to count the number of times \"dr\" or \"ea\" appear in each name\ndef dr_ea_count(string):\n return string.count('dr') + string.count('ea')\n\n# Then, use `map` to apply `dr_ea_count` to each name in the \"Name\" column\nbabynames[\"dr_ea_count\"] = babynames[\"Name\"].map(dr_ea_count)\n\n# Sort the DataFrame by the new \"dr_ea_count\" column so we can see our handiwork\nbabynames = babynames.sort_values(by=\"dr_ea_count\", ascending=False)\nbabynames.head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\ndr_ea_count\n\n\n\n\n115957\nCA\nF\n1990\nDeandrea\n5\n3\n\n\n101976\nCA\nF\n1986\nDeandrea\n6\n3\n\n\n131029\nCA\nF\n1994\nLeandrea\n5\n3\n\n\n108731\nCA\nF\n1988\nDeandrea\n5\n3\n\n\n308131\nCA\nM\n1985\nDeandrea\n6\n3\n\n\n\n\n\n\n\nWe can drop the dr_ea_count once we’re done using it to maintain a neat table.\n\n# Drop the `dr_ea_count` column\nbabynames = babynames.drop(\"dr_ea_count\", axis = 'columns')\nbabynames.head(5)\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\n\n\n\n\n115957\nCA\nF\n1990\nDeandrea\n5\n\n\n101976\nCA\nF\n1986\nDeandrea\n6\n\n\n131029\nCA\nF\n1994\nLeandrea\n5\n\n\n108731\nCA\nF\n1988\nDeandrea\n5\n\n\n308131\nCA\nM\n1985\nDeandrea\n6", - "crumbs": [ - "4  Pandas III" - ] - }, - { - "objectID": "pandas_3/pandas_3.html#aggregating-data-with-.groupby", - "href": "pandas_3/pandas_3.html#aggregating-data-with-.groupby", - "title": "4  Pandas III", - "section": "4.2 Aggregating Data with .groupby", - "text": "4.2 Aggregating Data with .groupby\nUp until this point, we have been working with individual rows of DataFrames. As data scientists, we often wish to investigate trends across a larger subset of our data. For example, we may want to compute some summary statistic (the mean, median, sum, etc.) for a group of rows in our DataFrame. To do this, we’ll use pandas GroupBy objects. Our goal is to group together rows that fall under the same category and perform an operation that aggregates across all rows in the category.\nLet’s say we wanted to aggregate all rows in babynames for a given year.\n\nbabynames.groupby(\"Year\")\n\n<pandas.core.groupby.generic.DataFrameGroupBy object at 0x16b0b0650>\n\n\nWhat does this strange output mean? Calling .groupby (documentation) has generated a GroupBy object. You can imagine this as a set of “mini” sub-DataFrames, where each subframe contains all of the rows from babynames that correspond to a particular year.\nThe diagram below shows a simplified view of babynames to help illustrate this idea.\n\n\n\nWe can’t work with a GroupBy object directly – that is why you saw that strange output earlier rather than a standard view of a DataFrame. To actually manipulate values within these “mini” DataFrames, we’ll need to call an aggregation method. This is a method that tells pandas how to aggregate the values within the GroupBy object. Once the aggregation is applied, pandas will return a normal (now grouped) DataFrame.\nThe first aggregation method we’ll consider is .agg. The .agg method takes in a function as its argument; this function is then applied to each column of a “mini” grouped DataFrame. We end up with a new DataFrame with one aggregated row per subframe. Let’s see this in action by finding the sum of all counts for each year in babynames – this is equivalent to finding the number of babies born in each year.\n\nbabynames[[\"Year\", \"Count\"]].groupby(\"Year\").agg(sum).head(5)\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/2718070104.py:1: FutureWarning:\n\nThe provided callable <built-in function sum> is currently using DataFrameGroupBy.sum. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"sum\" instead.\n\n\n\n\n\n\n\n\n\n\nCount\n\n\nYear\n\n\n\n\n\n1910\n9163\n\n\n1911\n9983\n\n\n1912\n17946\n\n\n1913\n22094\n\n\n1914\n26926\n\n\n\n\n\n\n\nWe can relate this back to the diagram we used above. Remember that the diagram uses a simplified version of babynames, which is why we see smaller values for the summed counts.\n\n\n\nPerforming an aggregation\n\n\nCalling .agg has condensed each subframe back into a single row. This gives us our final output: a DataFrame that is now indexed by \"Year\", with a single row for each unique year in the original babynames DataFrame.\nThere are many different aggregation functions we can use, all of which are useful in different applications.\n\nbabynames[[\"Year\", \"Count\"]].groupby(\"Year\").agg(min).head(5)\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/86785752.py:1: FutureWarning:\n\nThe provided callable <built-in function min> is currently using DataFrameGroupBy.min. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"min\" instead.\n\n\n\n\n\n\n\n\n\n\nCount\n\n\nYear\n\n\n\n\n\n1910\n5\n\n\n1911\n5\n\n\n1912\n5\n\n\n1913\n5\n\n\n1914\n5\n\n\n\n\n\n\n\n\nbabynames[[\"Year\", \"Count\"]].groupby(\"Year\").agg(max).head(5)\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/3032256904.py:1: FutureWarning:\n\nThe provided callable <built-in function max> is currently using DataFrameGroupBy.max. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"max\" instead.\n\n\n\n\n\n\n\n\n\n\nCount\n\n\nYear\n\n\n\n\n\n1910\n295\n\n\n1911\n390\n\n\n1912\n534\n\n\n1913\n614\n\n\n1914\n773\n\n\n\n\n\n\n\n\n# Same result, but now we explicitly tell pandas to only consider the \"Count\" column when summing\nbabynames.groupby(\"Year\")[[\"Count\"]].agg(sum).head(5)\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/1958904241.py:2: FutureWarning:\n\nThe provided callable <built-in function sum> is currently using DataFrameGroupBy.sum. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"sum\" instead.\n\n\n\n\n\n\n\n\n\n\nCount\n\n\nYear\n\n\n\n\n\n1910\n9163\n\n\n1911\n9983\n\n\n1912\n17946\n\n\n1913\n22094\n\n\n1914\n26926\n\n\n\n\n\n\n\nThere are many different aggregations that can be applied to the grouped data. The primary requirement is that an aggregation function must:\n\nTake in a Series of data (a single column of the grouped subframe).\nReturn a single value that aggregates this Series.\n\n\n4.2.1 Aggregation Functions\nBecause of this fairly broad requirement, pandas offers many ways of computing an aggregation.\nIn-built Python operations – such as sum, max, and min – are automatically recognized by pandas.\n\n# What is the minimum count for each name in any year?\nbabynames.groupby(\"Name\")[[\"Count\"]].agg(min).head()\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/3244314896.py:2: FutureWarning:\n\nThe provided callable <built-in function min> is currently using DataFrameGroupBy.min. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"min\" instead.\n\n\n\n\n\n\n\n\n\n\nCount\n\n\nName\n\n\n\n\n\nAadan\n5\n\n\nAadarsh\n6\n\n\nAaden\n10\n\n\nAadhav\n6\n\n\nAadhini\n6\n\n\n\n\n\n\n\n\n# What is the largest single-year count of each name?\nbabynames.groupby(\"Name\")[[\"Count\"]].agg(max).head()\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/3805876622.py:2: FutureWarning:\n\nThe provided callable <built-in function max> is currently using DataFrameGroupBy.max. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"max\" instead.\n\n\n\n\n\n\n\n\n\n\nCount\n\n\nName\n\n\n\n\n\nAadan\n7\n\n\nAadarsh\n6\n\n\nAaden\n158\n\n\nAadhav\n8\n\n\nAadhini\n6\n\n\n\n\n\n\n\nAs mentioned previously, functions from the NumPy library, such as np.mean, np.max, np.min, and np.sum, are also fair game in pandas.\n\n# What is the average count for each name across all years?\nbabynames.groupby(\"Name\")[[\"Count\"]].agg(np.mean).head()\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/308986604.py:2: FutureWarning:\n\nThe provided callable <function mean at 0x1061a49a0> is currently using DataFrameGroupBy.mean. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"mean\" instead.\n\n\n\n\n\n\n\n\n\n\nCount\n\n\nName\n\n\n\n\n\nAadan\n6.000000\n\n\nAadarsh\n6.000000\n\n\nAaden\n46.214286\n\n\nAadhav\n6.750000\n\n\nAadhini\n6.000000\n\n\n\n\n\n\n\npandas also offers a number of in-built functions. Functions that are native to pandas can be referenced using their string name within a call to .agg. Some examples include:\n\n.agg(\"sum\")\n.agg(\"max\")\n.agg(\"min\")\n.agg(\"mean\")\n.agg(\"first\")\n.agg(\"last\")\n\nThe latter two entries in this list – \"first\" and \"last\" – are unique to pandas. They return the first or last entry in a subframe column. Why might this be useful? Consider a case where multiple columns in a group share identical information. To represent this information in the grouped output, we can simply grab the first or last entry, which we know will be identical to all other entries.\nLet’s illustrate this with an example. Say we add a new column to babynames that contains the first letter of each name.\n\n# Imagine we had an additional column, \"First Letter\". We'll explain this code next week\nbabynames[\"First Letter\"] = babynames[\"Name\"].str[0]\n\n# We construct a simplified DataFrame containing just a subset of columns\nbabynames_new = babynames[[\"Name\", \"First Letter\", \"Year\"]]\nbabynames_new.head()\n\n\n\n\n\n\n\n\nName\nFirst Letter\nYear\n\n\n\n\n115957\nDeandrea\nD\n1990\n\n\n101976\nDeandrea\nD\n1986\n\n\n131029\nLeandrea\nL\n1994\n\n\n108731\nDeandrea\nD\n1988\n\n\n308131\nDeandrea\nD\n1985\n\n\n\n\n\n\n\nIf we form groups for each name in the dataset, \"First Letter\" will be the same for all members of the group. This means that if we simply select the first entry for \"First Letter\" in the group, we’ll represent all data in that group.\nWe can use a dictionary to apply different aggregation functions to each column during grouping.\n\n\n\nAggregating using “first”\n\n\n\nbabynames_new.groupby(\"Name\").agg({\"First Letter\":\"first\", \"Year\":\"max\"}).head()\n\n\n\n\n\n\n\n\nFirst Letter\nYear\n\n\nName\n\n\n\n\n\n\nAadan\nA\n2014\n\n\nAadarsh\nA\n2019\n\n\nAaden\nA\n2020\n\n\nAadhav\nA\n2019\n\n\nAadhini\nA\n2022\n\n\n\n\n\n\n\n\n\n4.2.2 Plotting Birth Counts\nLet’s use .agg to find the total number of babies born in each year. Recall that using .agg with .groupby() follows the format: df.groupby(column_name).agg(aggregation_function). The line of code below gives us the total number of babies born in each year.\n\n\nCode\nbabynames.groupby(\"Year\")[[\"Count\"]].agg(sum).head(5)\n# Alternative 1\n# babynames.groupby(\"Year\")[[\"Count\"]].sum()\n# Alternative 2\n# babynames.groupby(\"Year\").sum(numeric_only=True)\n\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/390646742.py:1: FutureWarning:\n\nThe provided callable <built-in function sum> is currently using DataFrameGroupBy.sum. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"sum\" instead.\n\n\n\n\n\n\n\n\n\n\nCount\n\n\nYear\n\n\n\n\n\n1910\n9163\n\n\n1911\n9983\n\n\n1912\n17946\n\n\n1913\n22094\n\n\n1914\n26926\n\n\n\n\n\n\n\nHere’s an illustration of the process:\n\nPlotting the Dataframe we obtain tells an interesting story.\n\n\nCode\nimport plotly.express as px\npuzzle2 = babynames.groupby(\"Year\")[[\"Count\"]].agg(sum)\npx.line(puzzle2, y = \"Count\")\n\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/4066413905.py:2: FutureWarning:\n\nThe provided callable <built-in function sum> is currently using DataFrameGroupBy.sum. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"sum\" instead.\n\n\n\n \n\n\nA word of warning: we made an enormous assumption when we decided to use this dataset to estimate birth rate. According to this article from the Legistlative Analyst Office, the true number of babies born in California in 2020 was 421,275. However, our plot shows 362,882 babies —— what happened?\n\n\n4.2.3 Summary of the .groupby() Function\nA groupby operation involves some combination of splitting a DataFrame into grouped subframes, applying a function, and combining the results.\nFor some arbitrary DataFrame df below, the code df.groupby(\"year\").agg(sum) does the following:\n\nSplits the DataFrame into sub-DataFrames with rows belonging to the same year.\nApplies the sum function to each column of each sub-DataFrame.\nCombines the results of sum into a single DataFrame, indexed by year.\n\n\n\n\n4.2.4 Revisiting the .agg() Function\n.agg() can take in any function that aggregates several values into one summary value. Some commonly-used aggregation functions can even be called directly, without explicit use of .agg(). For example, we can call .mean() on .groupby():\nbabynames.groupby(\"Year\").mean().head()\nWe can now put this all into practice. Say we want to find the baby name with sex “F” that has fallen in popularity the most in California. To calculate this, we can first create a metric: “Ratio to Peak” (RTP). The RTP is the ratio of babies born with a given name in 2022 to the maximum number of babies born with the name in any year.\nLet’s start with calculating this for one baby, “Jennifer”.\n\n# We filter by babies with sex \"F\" and sort by \"Year\"\nf_babynames = babynames[babynames[\"Sex\"] == \"F\"]\nf_babynames = f_babynames.sort_values([\"Year\"])\n\n# Determine how many Jennifers were born in CA per year\njenn_counts_series = f_babynames[f_babynames[\"Name\"] == \"Jennifer\"][\"Count\"]\n\n# Determine the max number of Jennifers born in a year and the number born in 2022 \n# to calculate RTP\nmax_jenn = max(f_babynames[f_babynames[\"Name\"] == \"Jennifer\"][\"Count\"])\ncurr_jenn = f_babynames[f_babynames[\"Name\"] == \"Jennifer\"][\"Count\"].iloc[-1]\nrtp = curr_jenn / max_jenn\nrtp\n\n0.018796372629843364\n\n\nBy creating a function to calculate RTP and applying it to our DataFrame by using .groupby(), we can easily compute the RTP for all names at once!\n\ndef ratio_to_peak(series):\n return series.iloc[-1] / max(series)\n\n#Using .groupby() to apply the function\nrtp_table = f_babynames.groupby(\"Name\")[[\"Year\", \"Count\"]].agg(ratio_to_peak)\nrtp_table.head()\n\n\n\n\n\n\n\n\nYear\nCount\n\n\nName\n\n\n\n\n\n\nAadhini\n1.0\n1.000000\n\n\nAadhira\n1.0\n0.500000\n\n\nAadhya\n1.0\n0.660000\n\n\nAadya\n1.0\n0.586207\n\n\nAahana\n1.0\n0.269231\n\n\n\n\n\n\n\nIn the rows shown above, we can see that every row shown has a Year value of 1.0.\nThis is the “pandas-ification” of logic you saw in Data 8. Much of the logic you’ve learned in Data 8 will serve you well in Data 100.\n\n\n4.2.5 Nuisance Columns\nNote that you must be careful with which columns you apply the .agg() function to. If we were to apply our function to the table as a whole by doing f_babynames.groupby(\"Name\").agg(ratio_to_peak), executing our .agg() call would result in a TypeError.\n\nWe can avoid this issue (and prevent unintentional loss of data) by explicitly selecting column(s) we want to apply our aggregation function to BEFORE calling .agg(),\n\n\n4.2.6 Renaming Columns After Grouping\nBy default, .groupby will not rename any aggregated columns. As we can see in the table above, the aggregated column is still named Count even though it now represents the RTP. For better readability, we can rename Count to Count RTP\n\nrtp_table = rtp_table.rename(columns = {\"Count\": \"Count RTP\"})\nrtp_table\n\n\n\n\n\n\n\n\nYear\nCount RTP\n\n\nName\n\n\n\n\n\n\nAadhini\n1.0\n1.000000\n\n\nAadhira\n1.0\n0.500000\n\n\nAadhya\n1.0\n0.660000\n\n\nAadya\n1.0\n0.586207\n\n\nAahana\n1.0\n0.269231\n\n\n...\n...\n...\n\n\nZyanya\n1.0\n0.466667\n\n\nZyla\n1.0\n1.000000\n\n\nZylah\n1.0\n1.000000\n\n\nZyra\n1.0\n1.000000\n\n\nZyrah\n1.0\n0.833333\n\n\n\n\n13782 rows × 2 columns\n\n\n\n\n\n4.2.7 Some Data Science Payoff\nBy sorting rtp_table, we can see the names whose popularity has decreased the most.\n\nrtp_table = rtp_table.rename(columns = {\"Count\": \"Count RTP\"})\nrtp_table.sort_values(\"Count RTP\").head()\n\n\n\n\n\n\n\n\nYear\nCount RTP\n\n\nName\n\n\n\n\n\n\nDebra\n1.0\n0.001260\n\n\nDebbie\n1.0\n0.002815\n\n\nCarol\n1.0\n0.003180\n\n\nTammy\n1.0\n0.003249\n\n\nSusan\n1.0\n0.003305\n\n\n\n\n\n\n\nTo visualize the above DataFrame, let’s look at the line plot below:\n\n\nCode\nimport plotly.express as px\npx.line(f_babynames[f_babynames[\"Name\"] == \"Debra\"], x = \"Year\", y = \"Count\")\n\n\n \n\n\nWe can get the list of the top 10 names and then plot popularity with the following code:\n\ntop10 = rtp_table.sort_values(\"Count RTP\").head(10).index\npx.line(\n f_babynames[f_babynames[\"Name\"].isin(top10)], \n x = \"Year\", \n y = \"Count\", \n color = \"Name\"\n)\n\n \n\n\nAs a quick exercise, consider what code would compute the total number of babies with each name.\n\n\nCode\nbabynames.groupby(\"Name\")[[\"Count\"]].agg(sum).head()\n# alternative solution: \n# babynames.groupby(\"Name\")[[\"Count\"]].sum()\n\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/1912269730.py:1: FutureWarning:\n\nThe provided callable <built-in function sum> is currently using DataFrameGroupBy.sum. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"sum\" instead.\n\n\n\n\n\n\n\n\n\n\nCount\n\n\nName\n\n\n\n\n\nAadan\n18\n\n\nAadarsh\n6\n\n\nAaden\n647\n\n\nAadhav\n27\n\n\nAadhini\n6", - "crumbs": [ - "4  Pandas III" - ] - }, - { - "objectID": "pandas_3/pandas_3.html#groupby-continued", - "href": "pandas_3/pandas_3.html#groupby-continued", - "title": "4  Pandas III", - "section": "4.3 .groupby(), Continued", - "text": "4.3 .groupby(), Continued\nWe’ll work with the elections DataFrame again.\n\n\nCode\nimport pandas as pd\nimport numpy as np\n\nelections = pd.read_csv(\"data/elections.csv\")\nelections.head(5)\n\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.210122\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.789878\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.203927\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\nloss\n43.796073\n\n\n4\n1832\nAndrew Jackson\nDemocratic\n702735\nwin\n54.574789\n\n\n\n\n\n\n\n\n4.3.1 Raw GroupBy Objects\nThe result of groupby applied to a DataFrame is a DataFrameGroupBy object, not a DataFrame.\n\ngrouped_by_year = elections.groupby(\"Year\")\ntype(grouped_by_year)\n\npandas.core.groupby.generic.DataFrameGroupBy\n\n\nThere are several ways to look into DataFrameGroupBy objects:\n\ngrouped_by_party = elections.groupby(\"Party\")\ngrouped_by_party.groups\n\n{'American': [22, 126], 'American Independent': [115, 119, 124], 'Anti-Masonic': [6], 'Anti-Monopoly': [38], 'Citizens': [127], 'Communist': [89], 'Constitution': [160, 164, 172], 'Constitutional Union': [24], 'Democratic': [2, 4, 8, 10, 13, 14, 17, 20, 28, 29, 34, 37, 39, 45, 47, 52, 55, 57, 64, 70, 74, 77, 81, 83, 86, 91, 94, 97, 100, 105, 108, 111, 114, 116, 118, 123, 129, 134, 137, 140, 144, 151, 158, 162, 168, 176, 178], 'Democratic-Republican': [0, 1], 'Dixiecrat': [103], 'Farmer–Labor': [78], 'Free Soil': [15, 18], 'Green': [149, 155, 156, 165, 170, 177, 181], 'Greenback': [35], 'Independent': [121, 130, 143, 161, 167, 174], 'Liberal Republican': [31], 'Libertarian': [125, 128, 132, 138, 139, 146, 153, 159, 163, 169, 175, 180], 'National Democratic': [50], 'National Republican': [3, 5], 'National Union': [27], 'Natural Law': [148], 'New Alliance': [136], 'Northern Democratic': [26], 'Populist': [48, 61, 141], 'Progressive': [68, 82, 101, 107], 'Prohibition': [41, 44, 49, 51, 54, 59, 63, 67, 73, 75, 99], 'Reform': [150, 154], 'Republican': [21, 23, 30, 32, 33, 36, 40, 43, 46, 53, 56, 60, 65, 69, 72, 79, 80, 84, 87, 90, 96, 98, 104, 106, 109, 112, 113, 117, 120, 122, 131, 133, 135, 142, 145, 152, 157, 166, 171, 173, 179], 'Socialist': [58, 62, 66, 71, 76, 85, 88, 92, 95, 102], 'Southern Democratic': [25], 'States' Rights': [110], 'Taxpayers': [147], 'Union': [93], 'Union Labor': [42], 'Whig': [7, 9, 11, 12, 16, 19]}\n\n\n\ngrouped_by_party.get_group(\"Socialist\")\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n58\n1904\nEugene V. Debs\nSocialist\n402810\nloss\n2.985897\n\n\n62\n1908\nEugene V. Debs\nSocialist\n420852\nloss\n2.850866\n\n\n66\n1912\nEugene V. Debs\nSocialist\n901551\nloss\n6.004354\n\n\n71\n1916\nAllan L. Benson\nSocialist\n590524\nloss\n3.194193\n\n\n76\n1920\nEugene V. Debs\nSocialist\n913693\nloss\n3.428282\n\n\n85\n1928\nNorman Thomas\nSocialist\n267478\nloss\n0.728623\n\n\n88\n1932\nNorman Thomas\nSocialist\n884885\nloss\n2.236211\n\n\n92\n1936\nNorman Thomas\nSocialist\n187910\nloss\n0.412876\n\n\n95\n1940\nNorman Thomas\nSocialist\n116599\nloss\n0.234237\n\n\n102\n1948\nNorman Thomas\nSocialist\n139569\nloss\n0.286312\n\n\n\n\n\n\n\n\n\n4.3.2 Other GroupBy Methods\nThere are many aggregation methods we can use with .agg. Some useful options are:\n\n.mean: creates a new DataFrame with the mean value of each group\n.sum: creates a new DataFrame with the sum of each group\n.max and .min: creates a new DataFrame with the maximum/minimum value of each group\n.first and .last: creates a new DataFrame with the first/last row in each group\n.size: creates a new Series with the number of entries in each group\n.count: creates a new DataFrame with the number of entries, excluding missing values.\n\nLet’s illustrate some examples by creating a DataFrame called df.\n\ndf = pd.DataFrame({'letter':['A','A','B','C','C','C'], \n 'num':[1,2,3,4,np.nan,4], \n 'state':[np.nan, 'tx', 'fl', 'hi', np.nan, 'ak']})\ndf\n\n\n\n\n\n\n\n\nletter\nnum\nstate\n\n\n\n\n0\nA\n1.0\nNaN\n\n\n1\nA\n2.0\ntx\n\n\n2\nB\n3.0\nfl\n\n\n3\nC\n4.0\nhi\n\n\n4\nC\nNaN\nNaN\n\n\n5\nC\n4.0\nak\n\n\n\n\n\n\n\nNote the slight difference between .size() and .count(): while .size() returns a Series and counts the number of entries including the missing values, .count() returns a DataFrame and counts the number of entries in each column excluding missing values.\n\ndf.groupby(\"letter\").size()\n\nletter\nA 2\nB 1\nC 3\ndtype: int64\n\n\n\ndf.groupby(\"letter\").count()\n\n\n\n\n\n\n\n\nnum\nstate\n\n\nletter\n\n\n\n\n\n\nA\n2\n1\n\n\nB\n1\n1\n\n\nC\n2\n2\n\n\n\n\n\n\n\nYou might recall that the value_counts() function in the previous note does something similar. It turns out value_counts() and groupby.size() are the same, except value_counts() sorts the resulting Series in descending order automatically.\n\ndf[\"letter\"].value_counts()\n\nletter\nC 3\nA 2\nB 1\nName: count, dtype: int64\n\n\nThese (and other) aggregation functions are so common that pandas allows for writing shorthand. Instead of explicitly stating the use of .agg, we can call the function directly on the GroupBy object.\nFor example, the following are equivalent:\n\nelections.groupby(\"Candidate\").agg(mean)\nelections.groupby(\"Candidate\").mean()\n\nThere are many other methods that pandas supports. You can check them out on the pandas documentation.\n\n\n4.3.3 Filtering by Group\nAnother common use for GroupBy objects is to filter data by group.\ngroupby.filter takes an argument func, where func is a function that:\n\nTakes a DataFrame object as input\nReturns a single True or False.\n\ngroupby.filter applies func to each group/sub-DataFrame:\n\nIf func returns True for a group, then all rows belonging to the group are preserved.\nIf func returns False for a group, then all rows belonging to that group are filtered out.\n\nIn other words, sub-DataFrames that correspond to True are returned in the final result, whereas those with a False value are not. Importantly, groupby.filter is different from groupby.agg in that an entire sub-DataFrame is returned in the final DataFrame, not just a single row. As a result, groupby.filter preserves the original indices and the column we grouped on does NOT become the index!\n\nTo illustrate how this happens, let’s go back to the elections dataset. Say we want to identify “tight” election years – that is, we want to find all rows that correspond to election years where all candidates in that year won a similar portion of the total vote. Specifically, let’s find all rows corresponding to a year where no candidate won more than 45% of the total vote.\nIn other words, we want to:\n\nFind the years where the maximum % in that year is less than 45%\nReturn all DataFrame rows that correspond to these years\n\nFor each year, we need to find the maximum % among all rows for that year. If this maximum % is lower than 45%, we will tell pandas to keep all rows corresponding to that year.\n\nelections.groupby(\"Year\").filter(lambda sf: sf[\"%\"].max() < 45).head(9)\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n23\n1860\nAbraham Lincoln\nRepublican\n1855993\nwin\n39.699408\n\n\n24\n1860\nJohn Bell\nConstitutional Union\n590901\nloss\n12.639283\n\n\n25\n1860\nJohn C. Breckinridge\nSouthern Democratic\n848019\nloss\n18.138998\n\n\n26\n1860\nStephen A. Douglas\nNorthern Democratic\n1380202\nloss\n29.522311\n\n\n66\n1912\nEugene V. Debs\nSocialist\n901551\nloss\n6.004354\n\n\n67\n1912\nEugene W. Chafin\nProhibition\n208156\nloss\n1.386325\n\n\n68\n1912\nTheodore Roosevelt\nProgressive\n4122721\nloss\n27.457433\n\n\n69\n1912\nWilliam Taft\nRepublican\n3486242\nloss\n23.218466\n\n\n70\n1912\nWoodrow Wilson\nDemocratic\n6296284\nwin\n41.933422\n\n\n\n\n\n\n\nWhat’s going on here? In this example, we’ve defined our filtering function, func, to be lambda sf: sf[\"%\"].max() < 45. This filtering function will find the maximum \"%\" value among all entries in the grouped sub-DataFrame, which we call sf. If the maximum value is less than 45, then the filter function will return True and all rows in that grouped sub-DataFrame will appear in the final output DataFrame.\nExamine the DataFrame above. Notice how, in this preview of the first 9 rows, all entries from the years 1860 and 1912 appear. This means that in 1860 and 1912, no candidate in that year won more than 45% of the total vote.\nYou may ask: how is the groupby.filter procedure different to the boolean filtering we’ve seen previously? Boolean filtering considers individual rows when applying a boolean condition. For example, the code elections[elections[\"%\"] < 45] will check the \"%\" value of every single row in elections; if it is less than 45, then that row will be kept in the output. groupby.filter, in contrast, applies a boolean condition across all rows in a group. If not all rows in that group satisfy the condition specified by the filter, the entire group will be discarded in the output.\n\n\n4.3.4 Aggregation with lambda Functions\nWhat if we wish to aggregate our DataFrame using a non-standard function – for example, a function of our own design? We can do so by combining .agg with lambda expressions.\nLet’s first consider a puzzle to jog our memory. We will attempt to find the Candidate from each Party with the highest % of votes.\nA naive approach may be to group by the Party column and aggregate by the maximum.\n\nelections.groupby(\"Party\").agg(max).head(10)\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/4278286395.py:1: FutureWarning:\n\nThe provided callable <built-in function max> is currently using DataFrameGroupBy.max. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"max\" instead.\n\n\n\n\n\n\n\n\n\n\nYear\nCandidate\nPopular vote\nResult\n%\n\n\nParty\n\n\n\n\n\n\n\n\n\nAmerican\n1976\nThomas J. Anderson\n873053\nloss\n21.554001\n\n\nAmerican Independent\n1976\nLester Maddox\n9901118\nloss\n13.571218\n\n\nAnti-Masonic\n1832\nWilliam Wirt\n100715\nloss\n7.821583\n\n\nAnti-Monopoly\n1884\nBenjamin Butler\n134294\nloss\n1.335838\n\n\nCitizens\n1980\nBarry Commoner\n233052\nloss\n0.270182\n\n\nCommunist\n1932\nWilliam Z. Foster\n103307\nloss\n0.261069\n\n\nConstitution\n2016\nMichael Peroutka\n203091\nloss\n0.152398\n\n\nConstitutional Union\n1860\nJohn Bell\n590901\nloss\n12.639283\n\n\nDemocratic\n2020\nWoodrow Wilson\n81268924\nwin\n61.344703\n\n\nDemocratic-Republican\n1824\nJohn Quincy Adams\n151271\nwin\n57.210122\n\n\n\n\n\n\n\nThis approach is clearly wrong – the DataFrame claims that Woodrow Wilson won the presidency in 2020.\nWhy is this happening? Here, the max aggregation function is taken over every column independently. Among Democrats, max is computing:\n\nThe most recent Year a Democratic candidate ran for president (2020)\nThe Candidate with the alphabetically “largest” name (“Woodrow Wilson”)\nThe Result with the alphabetically “largest” outcome (“win”)\n\nInstead, let’s try a different approach. We will:\n\nSort the DataFrame so that rows are in descending order of %\nGroup by Party and select the first row of each sub-DataFrame\n\nWhile it may seem unintuitive, sorting elections by descending order of % is extremely helpful. If we then group by Party, the first row of each GroupBy object will contain information about the Candidate with the highest voter %.\n\nelections_sorted_by_percent = elections.sort_values(\"%\", ascending=False)\nelections_sorted_by_percent.head(5)\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n114\n1964\nLyndon Johnson\nDemocratic\n43127041\nwin\n61.344703\n\n\n91\n1936\nFranklin Roosevelt\nDemocratic\n27752648\nwin\n60.978107\n\n\n120\n1972\nRichard Nixon\nRepublican\n47168710\nwin\n60.907806\n\n\n79\n1920\nWarren Harding\nRepublican\n16144093\nwin\n60.574501\n\n\n133\n1984\nRonald Reagan\nRepublican\n54455472\nwin\n59.023326\n\n\n\n\n\n\n\n\nelections_sorted_by_percent.groupby(\"Party\").agg(lambda x : x.iloc[0]).head(10)\n\n# Equivalent to the below code\n# elections_sorted_by_percent.groupby(\"Party\").agg('first').head(10)\n\n\n\n\n\n\n\n\nYear\nCandidate\nPopular vote\nResult\n%\n\n\nParty\n\n\n\n\n\n\n\n\n\nAmerican\n1856\nMillard Fillmore\n873053\nloss\n21.554001\n\n\nAmerican Independent\n1968\nGeorge Wallace\n9901118\nloss\n13.571218\n\n\nAnti-Masonic\n1832\nWilliam Wirt\n100715\nloss\n7.821583\n\n\nAnti-Monopoly\n1884\nBenjamin Butler\n134294\nloss\n1.335838\n\n\nCitizens\n1980\nBarry Commoner\n233052\nloss\n0.270182\n\n\nCommunist\n1932\nWilliam Z. Foster\n103307\nloss\n0.261069\n\n\nConstitution\n2008\nChuck Baldwin\n199750\nloss\n0.152398\n\n\nConstitutional Union\n1860\nJohn Bell\n590901\nloss\n12.639283\n\n\nDemocratic\n1964\nLyndon Johnson\n43127041\nwin\n61.344703\n\n\nDemocratic-Republican\n1824\nAndrew Jackson\n151271\nloss\n57.210122\n\n\n\n\n\n\n\nHere’s an illustration of the process:\n\nNotice how our code correctly determines that Lyndon Johnson from the Democratic Party has the highest voter %.\nMore generally, lambda functions are used to design custom aggregation functions that aren’t pre-defined by Python. The input parameter x to the lambda function is a GroupBy object. Therefore, it should make sense why lambda x : x.iloc[0] selects the first row in each groupby object.\nIn fact, there’s a few different ways to approach this problem. Each approach has different tradeoffs in terms of readability, performance, memory consumption, complexity, etc. We’ve given a few examples below.\nNote: Understanding these alternative solutions is not required. They are given to demonstrate the vast number of problem-solving approaches in pandas.\n\n# Using the idxmax function\nbest_per_party = elections.loc[elections.groupby('Party')['%'].idxmax()]\nbest_per_party.head(5)\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n22\n1856\nMillard Fillmore\nAmerican\n873053\nloss\n21.554001\n\n\n115\n1968\nGeorge Wallace\nAmerican Independent\n9901118\nloss\n13.571218\n\n\n6\n1832\nWilliam Wirt\nAnti-Masonic\n100715\nloss\n7.821583\n\n\n38\n1884\nBenjamin Butler\nAnti-Monopoly\n134294\nloss\n1.335838\n\n\n127\n1980\nBarry Commoner\nCitizens\n233052\nloss\n0.270182\n\n\n\n\n\n\n\n\n# Using the .drop_duplicates function\nbest_per_party2 = elections.sort_values('%').drop_duplicates(['Party'], keep='last')\nbest_per_party2.head(5)\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n148\n1996\nJohn Hagelin\nNatural Law\n113670\nloss\n0.118219\n\n\n164\n2008\nChuck Baldwin\nConstitution\n199750\nloss\n0.152398\n\n\n110\n1956\nT. Coleman Andrews\nStates' Rights\n107929\nloss\n0.174883\n\n\n147\n1996\nHoward Phillips\nTaxpayers\n184656\nloss\n0.192045\n\n\n136\n1988\nLenora Fulani\nNew Alliance\n217221\nloss\n0.237804", - "crumbs": [ - "4  Pandas III" - ] - }, - { - "objectID": "pandas_3/pandas_3.html#aggregating-data-with-pivot-tables", - "href": "pandas_3/pandas_3.html#aggregating-data-with-pivot-tables", - "title": "4  Pandas III", - "section": "4.4 Aggregating Data with Pivot Tables", - "text": "4.4 Aggregating Data with Pivot Tables\nWe know now that .groupby gives us the ability to group and aggregate data across our DataFrame. The examples above formed groups using just one column in the DataFrame. It’s possible to group by multiple columns at once by passing in a list of column names to .groupby.\nLet’s consider the babynames dataset again. In this problem, we will find the total number of baby names associated with each sex for each year. To do this, we’ll group by both the \"Year\" and \"Sex\" columns.\n\nbabynames.head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\nFirst Letter\n\n\n\n\n115957\nCA\nF\n1990\nDeandrea\n5\nD\n\n\n101976\nCA\nF\n1986\nDeandrea\n6\nD\n\n\n131029\nCA\nF\n1994\nLeandrea\n5\nL\n\n\n108731\nCA\nF\n1988\nDeandrea\n5\nD\n\n\n308131\nCA\nM\n1985\nDeandrea\n6\nD\n\n\n\n\n\n\n\n\n# Find the total number of baby names associated with each sex for each \n# year in the data\nbabynames.groupby([\"Year\", \"Sex\"])[[\"Count\"]].agg(sum).head(6)\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/3186035650.py:3: FutureWarning:\n\nThe provided callable <built-in function sum> is currently using DataFrameGroupBy.sum. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"sum\" instead.\n\n\n\n\n\n\n\n\n\n\n\nCount\n\n\nYear\nSex\n\n\n\n\n\n1910\nF\n5950\n\n\nM\n3213\n\n\n1911\nF\n6602\n\n\nM\n3381\n\n\n1912\nF\n9804\n\n\nM\n8142\n\n\n\n\n\n\n\nNotice that both \"Year\" and \"Sex\" serve as the index of the DataFrame (they are both rendered in bold). We’ve created a multi-index DataFrame where two different index values, the year and sex, are used to uniquely identify each row.\nThis isn’t the most intuitive way of representing this data – and, because multi-indexed DataFrames have multiple dimensions in their index, they can often be difficult to use.\nAnother strategy to aggregate across two columns is to create a pivot table. You saw these back in Data 8. One set of values is used to create the index of the pivot table; another set is used to define the column names. The values contained in each cell of the table correspond to the aggregated data for each index-column pair.\nHere’s an illustration of the process:\n\nThe best way to understand pivot tables is to see one in action. Let’s return to our original goal of summing the total number of names associated with each combination of year and sex. We’ll call the pandas .pivot_table method to create a new table.\n\n# The `pivot_table` method is used to generate a Pandas pivot table\nimport numpy as np\nbabynames.pivot_table(\n index = \"Year\",\n columns = \"Sex\", \n values = \"Count\", \n aggfunc = np.sum, \n).head(5)\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/2548053048.py:3: FutureWarning:\n\nThe provided callable <function sum at 0x106183880> is currently using DataFrameGroupBy.sum. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"sum\" instead.\n\n\n\n\n\n\n\n\n\nSex\nF\nM\n\n\nYear\n\n\n\n\n\n\n1910\n5950\n3213\n\n\n1911\n6602\n3381\n\n\n1912\n9804\n8142\n\n\n1913\n11860\n10234\n\n\n1914\n13815\n13111\n\n\n\n\n\n\n\nLooks a lot better! Now, our DataFrame is structured with clear index-column combinations. Each entry in the pivot table represents the summed count of names for a given combination of \"Year\" and \"Sex\".\nLet’s take a closer look at the code implemented above.\n\nindex = \"Year\" specifies the column name in the original DataFrame that should be used as the index of the pivot table\ncolumns = \"Sex\" specifies the column name in the original DataFrame that should be used to generate the columns of the pivot table\nvalues = \"Count\" indicates what values from the original DataFrame should be used to populate the entry for each index-column combination\naggfunc = np.sum tells pandas what function to use when aggregating the data specified by values. Here, we are summing the name counts for each pair of \"Year\" and \"Sex\"\n\nWe can even include multiple values in the index or columns of our pivot tables.\n\nbabynames_pivot = babynames.pivot_table(\n index=\"Year\", # the rows (turned into index)\n columns=\"Sex\", # the column values\n values=[\"Count\", \"Name\"], \n aggfunc=max, # group operation\n)\nbabynames_pivot.head(6)\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32661/970182367.py:1: FutureWarning:\n\nThe provided callable <built-in function max> is currently using DataFrameGroupBy.max. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string \"max\" instead.\n\n\n\n\n\n\n\n\n\n\nCount\nName\n\n\nSex\nF\nM\nF\nM\n\n\nYear\n\n\n\n\n\n\n\n\n1910\n295\n237\nYvonne\nWilliam\n\n\n1911\n390\n214\nZelma\nWillis\n\n\n1912\n534\n501\nYvonne\nWoodrow\n\n\n1913\n584\n614\nZelma\nYoshio\n\n\n1914\n773\n769\nZelma\nYoshio\n\n\n1915\n998\n1033\nZita\nYukio\n\n\n\n\n\n\n\nNote that each row provides the number of girls and number of boys having that year’s most common name, and also lists the alphabetically largest girl name and boy name. The counts for number of girls/boys in the resulting DataFrame do not correspond to the names listed. For example, in 1910, the most popular girl name is given to 295 girls, but that name was likely not Yvonne.", - "crumbs": [ - "4  Pandas III" - ] - }, - { - "objectID": "pandas_3/pandas_3.html#joining-tables", - "href": "pandas_3/pandas_3.html#joining-tables", - "title": "4  Pandas III", - "section": "4.5 Joining Tables", - "text": "4.5 Joining Tables\nWhen working on data science projects, we’re unlikely to have absolutely all the data we want contained in a single DataFrame – a real-world data scientist needs to grapple with data coming from multiple sources. If we have access to multiple datasets with related information, we can join two or more tables into a single DataFrame.\nTo put this into practice, we’ll revisit the elections dataset.\n\nelections.head(5)\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.210122\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.789878\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.203927\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\nloss\n43.796073\n\n\n4\n1832\nAndrew Jackson\nDemocratic\n702735\nwin\n54.574789\n\n\n\n\n\n\n\nSay we want to understand the popularity of the names of each presidential candidate in 2022. To do this, we’ll need the combined data of babynames and elections.\nWe’ll start by creating a new column containing the first name of each presidential candidate. This will help us join each name in elections to the corresponding name data in babynames.\n\n# This `str` operation splits each candidate's full name at each \n# blank space, then takes just the candidate's first name\nelections[\"First Name\"] = elections[\"Candidate\"].str.split().str[0]\nelections.head(5)\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\nFirst Name\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.210122\nAndrew\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.789878\nJohn\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.203927\nAndrew\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\nloss\n43.796073\nJohn\n\n\n4\n1832\nAndrew Jackson\nDemocratic\n702735\nwin\n54.574789\nAndrew\n\n\n\n\n\n\n\n\n# Here, we'll only consider `babynames` data from 2022\nbabynames_2022 = babynames[babynames[\"Year\"]==2022]\nbabynames_2022.head()\n\n\n\n\n\n\n\n\nState\nSex\nYear\nName\nCount\nFirst Letter\n\n\n\n\n237964\nCA\nF\n2022\nLeandra\n10\nL\n\n\n404916\nCA\nM\n2022\nLeandro\n99\nL\n\n\n405892\nCA\nM\n2022\nAndreas\n14\nA\n\n\n235927\nCA\nF\n2022\nAndrea\n322\nA\n\n\n405695\nCA\nM\n2022\nDeandre\n18\nD\n\n\n\n\n\n\n\nNow, we’re ready to join the two tables. pd.merge is the pandas method used to join DataFrames together.\n\nmerged = pd.merge(left = elections, right = babynames_2022, \\\n left_on = \"First Name\", right_on = \"Name\")\nmerged.head()\n# Notice that pandas automatically specifies `Year_x` and `Year_y` \n# when both merged DataFrames have the same column name to avoid confusion\n\n# Second option\n# merged = elections.merge(right = babynames_2022, \\\n # left_on = \"First Name\", right_on = \"Name\")\n\n\n\n\n\n\n\n\nYear_x\nCandidate\nParty\nPopular vote\nResult\n%\nFirst Name\nState\nSex\nYear_y\nName\nCount\nFirst Letter\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.210122\nAndrew\nCA\nM\n2022\nAndrew\n741\nA\n\n\n1\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.203927\nAndrew\nCA\nM\n2022\nAndrew\n741\nA\n\n\n2\n1832\nAndrew Jackson\nDemocratic\n702735\nwin\n54.574789\nAndrew\nCA\nM\n2022\nAndrew\n741\nA\n\n\n3\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.789878\nJohn\nCA\nM\n2022\nJohn\n490\nJ\n\n\n4\n1828\nJohn Quincy Adams\nNational Republican\n500897\nloss\n43.796073\nJohn\nCA\nM\n2022\nJohn\n490\nJ\n\n\n\n\n\n\n\nLet’s take a closer look at the parameters:\n\nleft and right parameters are used to specify the DataFrames to be joined.\nleft_on and right_on parameters are assigned to the string names of the columns to be used when performing the join. These two on parameters tell pandas what values should act as pairing keys to determine which rows to merge across the DataFrames. We’ll talk more about this idea of a pairing key next lecture.", - "crumbs": [ - "4  Pandas III" - ] - }, - { - "objectID": "pandas_3/pandas_3.html#parting-note", - "href": "pandas_3/pandas_3.html#parting-note", - "title": "4  Pandas III", - "section": "4.6 Parting Note", - "text": "4.6 Parting Note\nCongratulations! We finally tackled pandas. Don’t worry if you are still not feeling very comfortable with it—you will have plenty of chances to practice over the next few weeks.\nNext, we will get our hands dirty with some real-world datasets and use our pandas knowledge to conduct some exploratory data analysis.", - "crumbs": [ - "4  Pandas III" - ] - }, - { - "objectID": "eda/eda.html", - "href": "eda/eda.html", - "title": "5  Data Cleaning and EDA", - "section": "", - "text": "5.1 Structure\nWe often prefer rectangular data for data analysis. Rectangular structures are easy to manipulate and analyze. A key element of data cleaning is about transforming data to be more rectangular.\nThere are two kinds of rectangular data: tables and matrices. Tables have named columns with different data types and are manipulated using data transformation languages. Matrices contain numeric data of the same type and are manipulated using linear algebra.", - "crumbs": [ - "5  Data Cleaning and EDA" - ] - }, - { - "objectID": "eda/eda.html#structure", - "href": "eda/eda.html#structure", - "title": "5  Data Cleaning and EDA", - "section": "", - "text": "5.1.1 File Formats\nThere are many file types for storing structured data: TSV, JSON, XML, ASCII, SAS, etc. We’ll only cover CSV, TSV, and JSON in lecture, but you’ll likely encounter other formats as you work with different datasets. Reading documentation is your best bet for understanding how to process the multitude of different file types.\n\n5.1.1.1 CSV\nCSVs, which stand for Comma-Separated Values, are a common tabular data format. In the past two pandas lectures, we briefly touched on the idea of file format: the way data is encoded in a file for storage. Specifically, our elections and babynames datasets were stored and loaded as CSVs:\n\npd.read_csv(\"data/elections.csv\").head(5)\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.21\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.79\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.20\n\n\n3\n1828\nJohn Quincy Adams\nNational Republican\n500897\nloss\n43.80\n\n\n4\n1832\nAndrew Jackson\nDemocratic\n702735\nwin\n54.57\n\n\n\n\n\n\n\nTo better understand the properties of a CSV, let’s take a look at the first few rows of the raw data file to see what it looks like before being loaded into a DataFrame. We’ll use the repr() function to return the raw string with its special characters:\n\nwith open(\"data/elections.csv\", \"r\") as table:\n i = 0\n for row in table:\n print(repr(row))\n i += 1\n if i > 3:\n break\n\n'Year,Candidate,Party,Popular vote,Result,%\\n'\n'1824,Andrew Jackson,Democratic-Republican,151271,loss,57.21012204\\n'\n'1824,John Quincy Adams,Democratic-Republican,113142,win,42.78987796\\n'\n'1828,Andrew Jackson,Democratic,642806,win,56.20392707\\n'\n\n\nEach row, or record, in the data is delimited by a newline \\n. Each column, or field, in the data is delimited by a comma , (hence, comma-separated!).\n\n\n5.1.1.2 TSV\nAnother common file type is TSV (Tab-Separated Values). In a TSV, records are still delimited by a newline \\n, while fields are delimited by \\t tab character.\nLet’s check out the first few rows of the raw TSV file. Again, we’ll use the repr() function so that print shows the special characters.\n\nwith open(\"data/elections.txt\", \"r\") as table:\n i = 0\n for row in table:\n print(repr(row))\n i += 1\n if i > 3:\n break\n\n'\\ufeffYear\\tCandidate\\tParty\\tPopular vote\\tResult\\t%\\n'\n'1824\\tAndrew Jackson\\tDemocratic-Republican\\t151271\\tloss\\t57.21012204\\n'\n'1824\\tJohn Quincy Adams\\tDemocratic-Republican\\t113142\\twin\\t42.78987796\\n'\n'1828\\tAndrew Jackson\\tDemocratic\\t642806\\twin\\t56.20392707\\n'\n\n\nTSVs can be loaded into pandas using pd.read_csv. We’ll need to specify the delimiter with parametersep='\\t' (documentation).\n\npd.read_csv(\"data/elections.txt\", sep='\\t').head(3)\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.21\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.79\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.20\n\n\n\n\n\n\n\nAn issue with CSVs and TSVs comes up whenever there are commas or tabs within the records. How does pandas differentiate between a comma delimiter vs. a comma within the field itself, for example 8,900? To remedy this, check out the quotechar parameter.\n\n\n5.1.1.3 JSON\nJSON (JavaScript Object Notation) files behave similarly to Python dictionaries. A raw JSON is shown below.\n\nwith open(\"data/elections.json\", \"r\") as table:\n i = 0\n for row in table:\n print(row)\n i += 1\n if i > 8:\n break\n\n[\n\n {\n\n \"Year\": 1824,\n\n \"Candidate\": \"Andrew Jackson\",\n\n \"Party\": \"Democratic-Republican\",\n\n \"Popular vote\": 151271,\n\n \"Result\": \"loss\",\n\n \"%\": 57.21012204\n\n },\n\n\n\nJSON files can be loaded into pandas using pd.read_json.\n\npd.read_json('data/elections.json').head(3)\n\n\n\n\n\n\n\n\nYear\nCandidate\nParty\nPopular vote\nResult\n%\n\n\n\n\n0\n1824\nAndrew Jackson\nDemocratic-Republican\n151271\nloss\n57.21\n\n\n1\n1824\nJohn Quincy Adams\nDemocratic-Republican\n113142\nwin\n42.79\n\n\n2\n1828\nAndrew Jackson\nDemocratic\n642806\nwin\n56.20\n\n\n\n\n\n\n\n\n5.1.1.3.1 EDA with JSON: Berkeley COVID-19 Data\nThe City of Berkeley Open Data website has a dataset with COVID-19 Confirmed Cases among Berkeley residents by date. Let’s download the file and save it as a JSON (note the source URL file type is also a JSON). In the interest of reproducible data science, we will download the data programatically. We have defined some helper functions in the ds100_utils.py file that we can reuse these helper functions in many different notebooks.\n\nfrom ds100_utils import fetch_and_cache\n\ncovid_file = fetch_and_cache(\n \"https://data.cityofberkeley.info/api/views/xn6j-b766/rows.json?accessType=DOWNLOAD\",\n \"confirmed-cases.json\",\n force=False)\ncovid_file # a file path wrapper object\n\nUsing cached version that was downloaded (UTC): Wed Aug 28 15:54:12 2024\n\n\nPosixPath('data/confirmed-cases.json')\n\n\n\n5.1.1.3.1.1 File Size\nLet’s start our analysis by getting a rough estimate of the size of the dataset to inform the tools we use to view the data. For relatively small datasets, we can use a text editor or spreadsheet. For larger datasets, more programmatic exploration or distributed computing tools may be more fitting. Here we will use Python tools to probe the file.\nSince there seem to be text files, let’s investigate the number of lines, which often corresponds to the number of records\n\nimport os\n\nprint(covid_file, \"is\", os.path.getsize(covid_file) / 1e6, \"MB\")\n\nwith open(covid_file, \"r\") as f:\n print(covid_file, \"is\", sum(1 for l in f), \"lines.\")\n\ndata/confirmed-cases.json is 0.116367 MB\ndata/confirmed-cases.json is 1110 lines.\n\n\n\n\n5.1.1.3.1.2 Unix Commands\nAs part of the EDA workflow, Unix commands can come in very handy. In fact, there’s an entire book called “Data Science at the Command Line” that explores this idea in depth! In Jupyter/IPython, you can prefix lines with ! to execute arbitrary Unix commands, and within those lines, you can refer to Python variables and expressions with the syntax {expr}.\nHere, we use the ls command to list files, using the -lh flags, which request “long format with information in human-readable form.” We also use the wc command for “word count,” but with the -l flag, which asks for line counts instead of words.\nThese two give us the same information as the code above, albeit in a slightly different form:\n\n!ls -lh {covid_file}\n!wc -l {covid_file}\n\n-rw-r--r-- 1 xiaoruiliu staff 114K Aug 28 15:54 data/confirmed-cases.json\n 1109 data/confirmed-cases.json\n\n\n\n\n5.1.1.3.1.3 File Contents\nLet’s explore the data format using Python.\n\nwith open(covid_file, \"r\") as f:\n for i, row in enumerate(f):\n print(repr(row)) # print raw strings\n if i >= 4: break\n\n'{\\n'\n' \"meta\" : {\\n'\n' \"view\" : {\\n'\n' \"id\" : \"xn6j-b766\",\\n'\n' \"name\" : \"COVID-19 Confirmed Cases\",\\n'\n\n\nWe can use the head Unix command (which is where pandas’ head method comes from!) to see the first few lines of the file:\n\n!head -5 {covid_file}\n\n{\n \"meta\" : {\n \"view\" : {\n \"id\" : \"xn6j-b766\",\n \"name\" : \"COVID-19 Confirmed Cases\",\n\n\nIn order to load the JSON file into pandas, Let’s first do some EDA with Oython’s json package to understand the particular structure of this JSON file so that we can decide what (if anything) to load into pandas. Python has relatively good support for JSON data since it closely matches the internal python object model. In the following cell we import the entire JSON datafile into a python dictionary using the json package.\n\nimport json\n\nwith open(covid_file, \"rb\") as f:\n covid_json = json.load(f)\n\nThe covid_json variable is now a dictionary encoding the data in the file:\n\ntype(covid_json)\n\ndict\n\n\nWe can examine what keys are in the top level JSON object by listing out the keys.\n\ncovid_json.keys()\n\ndict_keys(['meta', 'data'])\n\n\nObservation: The JSON dictionary contains a meta key which likely refers to metadata (data about the data). Metadata is often maintained with the data and can be a good source of additional information.\nWe can investigate the metadata further by examining the keys associated with the metadata.\n\ncovid_json['meta'].keys()\n\ndict_keys(['view'])\n\n\nThe meta key contains another dictionary called view. This likely refers to metadata about a particular “view” of some underlying database. We will learn more about views when we study SQL later in the class.\n\ncovid_json['meta']['view'].keys()\n\ndict_keys(['id', 'name', 'assetType', 'attribution', 'averageRating', 'category', 'createdAt', 'description', 'displayType', 'downloadCount', 'hideFromCatalog', 'hideFromDataJson', 'newBackend', 'numberOfComments', 'oid', 'provenance', 'publicationAppendEnabled', 'publicationDate', 'publicationGroup', 'publicationStage', 'rowsUpdatedAt', 'rowsUpdatedBy', 'tableId', 'totalTimesRated', 'viewCount', 'viewLastModified', 'viewType', 'approvals', 'columns', 'grants', 'metadata', 'owner', 'query', 'rights', 'tableAuthor', 'tags', 'flags'])\n\n\nNotice that this a nested/recursive data structure. As we dig deeper we reveal more and more keys and the corresponding data:\nmeta\n|-> data\n | ... (haven't explored yet)\n|-> view\n | -> id\n | -> name\n | -> attribution \n ...\n | -> description\n ...\n | -> columns\n ...\nThere is a key called description in the view sub dictionary. This likely contains a description of the data:\n\nprint(covid_json['meta']['view']['description'])\n\nCounts of confirmed COVID-19 cases among Berkeley residents by date.\n\n\n\n\n5.1.1.3.1.4 Examining the Data Field for Records\nWe can look at a few entries in the data field. This is what we’ll load into pandas.\n\nfor i in range(3):\n print(f\"{i:03} | {covid_json['data'][i]}\")\n\n000 | ['row-kzbg.v7my-c3y2', '00000000-0000-0000-0405-CB14DE51DAA7', 0, 1643733903, None, 1643733903, None, '{ }', '2020-02-28T00:00:00', '1', '1']\n001 | ['row-jkyx_9u4r-h2yw', '00000000-0000-0000-F806-86D0DBE0E17F', 0, 1643733903, None, 1643733903, None, '{ }', '2020-02-29T00:00:00', '0', '1']\n002 | ['row-qifg_4aug-y3ym', '00000000-0000-0000-2DCE-4D1872F9B216', 0, 1643733903, None, 1643733903, None, '{ }', '2020-03-01T00:00:00', '0', '1']\n\n\nObservations: * These look like equal-length records, so maybe data is a table! * But what do each of values in the record mean? Where can we find column headers?\nFor that, we’ll need the columns key in the metadata dictionary. This returns a list:\n\ntype(covid_json['meta']['view']['columns'])\n\nlist\n\n\n\n\n5.1.1.3.1.5 Summary of exploring the JSON file\n\nThe above metadata tells us a lot about the columns in the data including column names, potential data anomalies, and a basic statistic.\nBecause of its non-tabular structure, JSON makes it easier (than CSV) to create self-documenting data, meaning that information about the data is stored in the same file as the data.\nSelf-documenting data can be helpful since it maintains its own description and these descriptions are more likely to be updated as data changes.\n\n\n\n5.1.1.3.1.6 Loading COVID Data into pandas\nFinally, let’s load the data (not the metadata) into a pandas DataFrame. In the following block of code we:\n\nTranslate the JSON records into a DataFrame:\n\nfields: covid_json['meta']['view']['columns']\nrecords: covid_json['data']\n\nRemove columns that have no metadata description. This would be a bad idea in general, but here we remove these columns since the above analysis suggests they are unlikely to contain useful information.\nExamine the tail of the table.\n\n\n# Load the data from JSON and assign column titles\ncovid = pd.DataFrame(\n covid_json['data'],\n columns=[c['name'] for c in covid_json['meta']['view']['columns']])\n\ncovid.tail()\n\n\n\n\n\n\n\n\nsid\nid\nposition\ncreated_at\ncreated_meta\nupdated_at\nupdated_meta\nmeta\nDate\nNew Cases\nCumulative Cases\n\n\n\n\n699\nrow-49b6_x8zv.gyum\n00000000-0000-0000-A18C-9174A6D05774\n0\n1643733903\nNone\n1643733903\nNone\n{ }\n2022-01-27T00:00:00\n106\n10694\n\n\n700\nrow-gs55-p5em.y4v9\n00000000-0000-0000-F41D-5724AEABB4D6\n0\n1643733903\nNone\n1643733903\nNone\n{ }\n2022-01-28T00:00:00\n223\n10917\n\n\n701\nrow-3pyj.tf95-qu67\n00000000-0000-0000-BEE3-B0188D2518BD\n0\n1643733903\nNone\n1643733903\nNone\n{ }\n2022-01-29T00:00:00\n139\n11056\n\n\n702\nrow-cgnd.8syv.jvjn\n00000000-0000-0000-C318-63CF75F7F740\n0\n1643733903\nNone\n1643733903\nNone\n{ }\n2022-01-30T00:00:00\n33\n11089\n\n\n703\nrow-qywv_24x6-237y\n00000000-0000-0000-FE92-9789FED3AA20\n0\n1643733903\nNone\n1643733903\nNone\n{ }\n2022-01-31T00:00:00\n42\n11131\n\n\n\n\n\n\n\n\n\n\n\n\n5.1.2 Primary and Foreign Keys\nLast time, we introduced .merge as the pandas method for joining multiple DataFrames together. In our discussion of joins, we touched on the idea of using a “key” to determine what rows should be merged from each table. Let’s take a moment to examine this idea more closely.\nThe primary key is the column or set of columns in a table that uniquely determine the values of the remaining columns. It can be thought of as the unique identifier for each individual row in the table. For example, a table of Data 100 students might use each student’s Cal ID as the primary key.\n\n\n\n\n\n\n\n\n\nCal ID\nName\nMajor\n\n\n\n\n0\n3034619471\nOski\nData Science\n\n\n1\n3035619472\nOllie\nComputer Science\n\n\n2\n3025619473\nOrrie\nData Science\n\n\n3\n3046789372\nOllie\nEconomics\n\n\n\n\n\n\n\nThe foreign key is the column or set of columns in a table that reference primary keys in other tables. Knowing a dataset’s foreign keys can be useful when assigning the left_on and right_on parameters of .merge. In the table of office hour tickets below, \"Cal ID\" is a foreign key referencing the previous table.\n\n\n\n\n\n\n\n\n\nOH Request\nCal ID\nQuestion\n\n\n\n\n0\n1\n3034619471\nHW 2 Q1\n\n\n1\n2\n3035619472\nHW 2 Q3\n\n\n2\n3\n3025619473\nLab 3 Q4\n\n\n3\n4\n3035619472\nHW 2 Q7\n\n\n\n\n\n\n\n\n\n5.1.3 Variable Types\nVariables are columns. A variable is a measurement of a particular concept. Variables have two common properties: data type/storage type and variable type/feature type. The data type of a variable indicates how each variable value is stored in memory (integer, floating point, boolean, etc.) and affects which pandas functions are used. The variable type is a conceptualized measurement of information (and therefore indicates what values a variable can take on). Variable type is identified through expert knowledge, exploring the data itself, or consulting the data codebook. The variable type affects how one visualizes and inteprets the data. In this class, “variable types” are conceptual.\nAfter loading data into a file, it’s a good idea to take the time to understand what pieces of information are encoded in the dataset. In particular, we want to identify what variable types are present in our data. Broadly speaking, we can categorize variables into one of two overarching types.\nQuantitative variables describe some numeric quantity or amount. We can divide quantitative data further into:\n\nContinuous quantitative variables: numeric data that can be measured on a continuous scale to arbitrary precision. Continuous variables do not have a strict set of possible values – they can be recorded to any number of decimal places. For example, weights, GPA, or CO2 concentrations.\nDiscrete quantitative variables: numeric data that can only take on a finite set of possible values. For example, someone’s age or the number of siblings they have.\n\nQualitative variables, also known as categorical variables, describe data that isn’t measuring some quantity or amount. The sub-categories of categorical data are:\n\nOrdinal qualitative variables: categories with ordered levels. Specifically, ordinal variables are those where the difference between levels has no consistent, quantifiable meaning. Some examples include levels of education (high school, undergrad, grad, etc.), income bracket (low, medium, high), or Yelp rating.\nNominal qualitative variables: categories with no specific order. For example, someone’s political affiliation or Cal ID number.\n\n\n\n\nClassification of variable types\n\n\nNote that many variables don’t sit neatly in just one of these categories. Qualitative variables could have numeric levels, and conversely, quantitative variables could be stored as strings.", - "crumbs": [ - "5  Data Cleaning and EDA" - ] - }, - { - "objectID": "eda/eda.html#granularity-scope-and-temporality", - "href": "eda/eda.html#granularity-scope-and-temporality", - "title": "5  Data Cleaning and EDA", - "section": "5.2 Granularity, Scope, and Temporality", - "text": "5.2 Granularity, Scope, and Temporality\nAfter understanding the structure of the dataset, the next task is to determine what exactly the data represents. We’ll do so by considering the data’s granularity, scope, and temporality.\n\n5.2.1 Granularity\nThe granularity of a dataset is what a single row represents. You can also think of it as the level of detail included in the data. To determine the data’s granularity, ask: what does each row in the dataset represent? Fine-grained data contains a high level of detail, with a single row representing a small individual unit. For example, each record may represent one person. Coarse-grained data is encoded such that a single row represents a large individual unit – for example, each record may represent a group of people.\n\n\n5.2.2 Scope\nThe scope of a dataset is the subset of the population covered by the data. If we were investigating student performance in Data Science courses, a dataset with a narrow scope might encompass all students enrolled in Data 100 whereas a dataset with an expansive scope might encompass all students in California.\n\n\n5.2.3 Temporality\nThe temporality of a dataset describes the periodicity over which the data was collected as well as when the data was most recently collected or updated.\nTime and date fields of a dataset could represent a few things:\n\nwhen the “event” happened\nwhen the data was collected, or when it was entered into the system\nwhen the data was copied into the database\n\nTo fully understand the temporality of the data, it also may be necessary to standardize time zones or inspect recurring time-based trends in the data (do patterns recur in 24-hour periods? Over the course of a month? Seasonally?). The convention for standardizing time is the Coordinated Universal Time (UTC), an international time standard measured at 0 degrees latitude that stays consistent throughout the year (no daylight savings). We can represent Berkeley’s time zone, Pacific Standard Time (PST), as UTC-7 (with daylight savings).\n\n5.2.3.1 Temporality with pandas’ dt accessors\nLet’s briefly look at how we can use pandas’ dt accessors to work with dates/times in a dataset using the dataset you’ll see in Lab 3: the Berkeley PD Calls for Service dataset.\n\n\nCode\ncalls = pd.read_csv(\"data/Berkeley_PD_-_Calls_for_Service.csv\")\ncalls.head()\n\n\n\n\n\n\n\n\n\nCASENO\nOFFENSE\nEVENTDT\nEVENTTM\nCVLEGEND\nCVDOW\nInDbDate\nBlock_Location\nBLKADDR\nCity\nState\n\n\n\n\n0\n21014296\nTHEFT MISD. (UNDER $950)\n04/01/2021 12:00:00 AM\n10:58\nLARCENY\n4\n06/15/2021 12:00:00 AM\nBerkeley, CA\\n(37.869058, -122.270455)\nNaN\nBerkeley\nCA\n\n\n1\n21014391\nTHEFT MISD. (UNDER $950)\n04/01/2021 12:00:00 AM\n10:38\nLARCENY\n4\n06/15/2021 12:00:00 AM\nBerkeley, CA\\n(37.869058, -122.270455)\nNaN\nBerkeley\nCA\n\n\n2\n21090494\nTHEFT MISD. (UNDER $950)\n04/19/2021 12:00:00 AM\n12:15\nLARCENY\n1\n06/15/2021 12:00:00 AM\n2100 BLOCK HASTE ST\\nBerkeley, CA\\n(37.864908,...\n2100 BLOCK HASTE ST\nBerkeley\nCA\n\n\n3\n21090204\nTHEFT FELONY (OVER $950)\n02/13/2021 12:00:00 AM\n17:00\nLARCENY\n6\n06/15/2021 12:00:00 AM\n2600 BLOCK WARRING ST\\nBerkeley, CA\\n(37.86393...\n2600 BLOCK WARRING ST\nBerkeley\nCA\n\n\n4\n21090179\nBURGLARY AUTO\n02/08/2021 12:00:00 AM\n6:20\nBURGLARY - VEHICLE\n1\n06/15/2021 12:00:00 AM\n2700 BLOCK GARBER ST\\nBerkeley, CA\\n(37.86066,...\n2700 BLOCK GARBER ST\nBerkeley\nCA\n\n\n\n\n\n\n\nLooks like there are three columns with dates/times: EVENTDT, EVENTTM, and InDbDate.\nMost likely, EVENTDT stands for the date when the event took place, EVENTTM stands for the time of day the event took place (in 24-hr format), and InDbDate is the date this call is recorded onto the database.\nIf we check the data type of these columns, we will see they are stored as strings. We can convert them to datetime objects using pandas to_datetime function.\n\ncalls[\"EVENTDT\"] = pd.to_datetime(calls[\"EVENTDT\"])\ncalls.head()\n\n/var/folders/gr/vb80r2qs5td4rqbnv4dn2klh0000gn/T/ipykernel_32707/874729699.py:1: UserWarning:\n\nCould not infer format, so each element will be parsed individually, falling back to `dateutil`. To ensure parsing is consistent and as-expected, please specify a format.\n\n\n\n\n\n\n\n\n\n\nCASENO\nOFFENSE\nEVENTDT\nEVENTTM\nCVLEGEND\nCVDOW\nInDbDate\nBlock_Location\nBLKADDR\nCity\nState\n\n\n\n\n0\n21014296\nTHEFT MISD. (UNDER $950)\n2021-04-01\n10:58\nLARCENY\n4\n06/15/2021 12:00:00 AM\nBerkeley, CA\\n(37.869058, -122.270455)\nNaN\nBerkeley\nCA\n\n\n1\n21014391\nTHEFT MISD. (UNDER $950)\n2021-04-01\n10:38\nLARCENY\n4\n06/15/2021 12:00:00 AM\nBerkeley, CA\\n(37.869058, -122.270455)\nNaN\nBerkeley\nCA\n\n\n2\n21090494\nTHEFT MISD. (UNDER $950)\n2021-04-19\n12:15\nLARCENY\n1\n06/15/2021 12:00:00 AM\n2100 BLOCK HASTE ST\\nBerkeley, CA\\n(37.864908,...\n2100 BLOCK HASTE ST\nBerkeley\nCA\n\n\n3\n21090204\nTHEFT FELONY (OVER $950)\n2021-02-13\n17:00\nLARCENY\n6\n06/15/2021 12:00:00 AM\n2600 BLOCK WARRING ST\\nBerkeley, CA\\n(37.86393...\n2600 BLOCK WARRING ST\nBerkeley\nCA\n\n\n4\n21090179\nBURGLARY AUTO\n2021-02-08\n6:20\nBURGLARY - VEHICLE\n1\n06/15/2021 12:00:00 AM\n2700 BLOCK GARBER ST\\nBerkeley, CA\\n(37.86066,...\n2700 BLOCK GARBER ST\nBerkeley\nCA\n\n\n\n\n\n\n\nNow, we can use the dt accessor on this column.\nWe can get the month:\n\ncalls[\"EVENTDT\"].dt.month.head()\n\n0 4\n1 4\n2 4\n3 2\n4 2\nName: EVENTDT, dtype: int32\n\n\nWhich day of the week the date is on:\n\ncalls[\"EVENTDT\"].dt.dayofweek.head()\n\n0 3\n1 3\n2 0\n3 5\n4 0\nName: EVENTDT, dtype: int32\n\n\nCheck the mimimum values to see if there are any suspicious-looking, 70s dates:\n\ncalls.sort_values(\"EVENTDT\").head()\n\n\n\n\n\n\n\n\nCASENO\nOFFENSE\nEVENTDT\nEVENTTM\nCVLEGEND\nCVDOW\nInDbDate\nBlock_Location\nBLKADDR\nCity\nState\n\n\n\n\n2513\n20057398\nBURGLARY COMMERCIAL\n2020-12-17\n16:05\nBURGLARY - COMMERCIAL\n4\n06/15/2021 12:00:00 AM\n600 BLOCK GILMAN ST\\nBerkeley, CA\\n(37.878405,...\n600 BLOCK GILMAN ST\nBerkeley\nCA\n\n\n624\n20057207\nASSAULT/BATTERY MISD.\n2020-12-17\n16:50\nASSAULT\n4\n06/15/2021 12:00:00 AM\n2100 BLOCK SHATTUCK AVE\\nBerkeley, CA\\n(37.871...\n2100 BLOCK SHATTUCK AVE\nBerkeley\nCA\n\n\n154\n20092214\nTHEFT FROM AUTO\n2020-12-17\n18:30\nLARCENY - FROM VEHICLE\n4\n06/15/2021 12:00:00 AM\n800 BLOCK SHATTUCK AVE\\nBerkeley, CA\\n(37.8918...\n800 BLOCK SHATTUCK AVE\nBerkeley\nCA\n\n\n659\n20057324\nTHEFT MISD. (UNDER $950)\n2020-12-17\n15:44\nLARCENY\n4\n06/15/2021 12:00:00 AM\n1800 BLOCK 4TH ST\\nBerkeley, CA\\n(37.869888, -...\n1800 BLOCK 4TH ST\nBerkeley\nCA\n\n\n993\n20057573\nBURGLARY RESIDENTIAL\n2020-12-17\n22:15\nBURGLARY - RESIDENTIAL\n4\n06/15/2021 12:00:00 AM\n1700 BLOCK STUART ST\\nBerkeley, CA\\n(37.857495...\n1700 BLOCK STUART ST\nBerkeley\nCA\n\n\n\n\n\n\n\nDoesn’t look like it! We are good!\nWe can also do many things with the dt accessor like switching time zones and converting time back to UNIX/POSIX time. Check out the documentation on .dt accessor and time series/date functionality.", - "crumbs": [ - "5  Data Cleaning and EDA" - ] - }, - { - "objectID": "eda/eda.html#faithfulness", - "href": "eda/eda.html#faithfulness", - "title": "5  Data Cleaning and EDA", - "section": "5.3 Faithfulness", - "text": "5.3 Faithfulness\nAt this stage in our data cleaning and EDA workflow, we’ve achieved quite a lot: we’ve identified how our data is structured, come to terms with what information it encodes, and gained insight as to how it was generated. Throughout this process, we should always recall the original intent of our work in Data Science – to use data to better understand and model the real world. To achieve this goal, we need to ensure that the data we use is faithful to reality; that is, that our data accurately captures the “real world.”\nData used in research or industry is often “messy” – there may be errors or inaccuracies that impact the faithfulness of the dataset. Signs that data may not be faithful include:\n\nUnrealistic or “incorrect” values, such as negative counts, locations that don’t exist, or dates set in the future\nViolations of obvious dependencies, like an age that does not match a birthday\nClear signs that data was entered by hand, which can lead to spelling errors or fields that are incorrectly shifted\nSigns of data falsification, such as fake email addresses or repeated use of the same names\nDuplicated records or fields containing the same information\nTruncated data, e.g. Microsoft Excel would limit the number of rows to 655536 and the number of columns to 255\n\nWe often solve some of these more common issues in the following ways:\n\nSpelling errors: apply corrections or drop records that aren’t in a dictionary\nTime zone inconsistencies: convert to a common time zone (e.g. UTC)\nDuplicated records or fields: identify and eliminate duplicates (using primary keys)\nUnspecified or inconsistent units: infer the units and check that values are in reasonable ranges in the data\n\n\n5.3.1 Missing Values\nAnother common issue encountered with real-world datasets is that of missing data. One strategy to resolve this is to simply drop any records with missing values from the dataset. This does, however, introduce the risk of inducing biases – it is possible that the missing or corrupt records may be systemically related to some feature of interest in the data. Another solution is to keep the data as NaN values.\nA third method to address missing data is to perform imputation: infer the missing values using other data available in the dataset. There is a wide variety of imputation techniques that can be implemented; some of the most common are listed below.\n\nAverage imputation: replace missing values with the average value for that field\nHot deck imputation: replace missing values with some random value\nRegression imputation: develop a model to predict missing values and replace with the predicted value from the model.\nMultiple imputation: replace missing values with multiple random values\n\nRegardless of the strategy used to deal with missing data, we should think carefully about why particular records or fields may be missing – this can help inform whether or not the absence of these values is significant or meaningful.", - "crumbs": [ - "5  Data Cleaning and EDA" - ] - }, - { - "objectID": "eda/eda.html#eda-demo-1-tuberculosis-in-the-united-states", - "href": "eda/eda.html#eda-demo-1-tuberculosis-in-the-united-states", - "title": "5  Data Cleaning and EDA", - "section": "5.4 EDA Demo 1: Tuberculosis in the United States", - "text": "5.4 EDA Demo 1: Tuberculosis in the United States\nNow, let’s walk through the data-cleaning and EDA workflow to see what can we learn about the presence of Tuberculosis in the United States!\nWe will examine the data included in the original CDC article published in 2021.\n\n5.4.1 CSVs and Field Names\nSuppose Table 1 was saved as a CSV file located in data/cdc_tuberculosis.csv.\nWe can then explore the CSV (which is a text file, and does not contain binary-encoded data) in many ways: 1. Using a text editor like emacs, vim, VSCode, etc. 2. Opening the CSV directly in DataHub (read-only), Excel, Google Sheets, etc. 3. The Python file object 4. pandas, using pd.read_csv()\nTo try out options 1 and 2, you can view or download the Tuberculosis from the lecture demo notebook under the data folder in the left hand menu. Notice how the CSV file is a type of rectangular data (i.e., tabular data) stored as comma-separated values.\nNext, let’s try out option 3 using the Python file object. We’ll look at the first four lines:\n\n\nCode\nwith open(\"data/cdc_tuberculosis.csv\", \"r\") as f:\n i = 0\n for row in f:\n print(row)\n i += 1\n if i > 3:\n break\n\n\n,No. of TB cases,,,TB incidence,,\n\nU.S. jurisdiction,2019,2020,2021,2019,2020,2021\n\nTotal,\"8,900\",\"7,173\",\"7,860\",2.71,2.16,2.37\n\nAlabama,87,72,92,1.77,1.43,1.83\n\n\n\nWhoa, why are there blank lines interspaced between the lines of the CSV?\nYou may recall that all line breaks in text files are encoded as the special newline character \\n. Python’s print() prints each string (including the newline), and an additional newline on top of that.\nIf you’re curious, we can use the repr() function to return the raw string with all special characters:\n\n\nCode\nwith open(\"data/cdc_tuberculosis.csv\", \"r\") as f:\n i = 0\n for row in f:\n print(repr(row)) # print raw strings\n i += 1\n if i > 3:\n break\n\n\n',No. of TB cases,,,TB incidence,,\\n'\n'U.S. jurisdiction,2019,2020,2021,2019,2020,2021\\n'\n'Total,\"8,900\",\"7,173\",\"7,860\",2.71,2.16,2.37\\n'\n'Alabama,87,72,92,1.77,1.43,1.83\\n'\n\n\nFinally, let’s try option 4 and use the tried-and-true Data 100 approach: pandas.\n\ntb_df = pd.read_csv(\"data/cdc_tuberculosis.csv\")\ntb_df.head()\n\n\n\n\n\n\n\n\nUnnamed: 0\nNo. of TB cases\nUnnamed: 2\nUnnamed: 3\nTB incidence\nUnnamed: 5\nUnnamed: 6\n\n\n\n\n0\nU.S. jurisdiction\n2019\n2020\n2021\n2019.00\n2020.00\n2021.00\n\n\n1\nTotal\n8,900\n7,173\n7,860\n2.71\n2.16\n2.37\n\n\n2\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n\n\n3\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n\n\n4\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n\n\n\n\n\n\n\nYou may notice some strange things about this table: what’s up with the “Unnamed” column names and the first row?\nCongratulations — you’re ready to wrangle your data! Because of how things are stored, we’ll need to clean the data a bit to name our columns better.\nA reasonable first step is to identify the row with the right header. The pd.read_csv() function (documentation) has the convenient header parameter that we can set to use the elements in row 1 as the appropriate columns:\n\ntb_df = pd.read_csv(\"data/cdc_tuberculosis.csv\", header=1) # row index\ntb_df.head(5)\n\n\n\n\n\n\n\n\nU.S. jurisdiction\n2019\n2020\n2021\n2019.1\n2020.1\n2021.1\n\n\n\n\n0\nTotal\n8,900\n7,173\n7,860\n2.71\n2.16\n2.37\n\n\n1\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n\n\n2\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n\n\n3\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n\n\n4\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\n\n\n\n\n\n\n\nWait…but now we can’t differentiate betwen the “Number of TB cases” and “TB incidence” year columns. pandas has tried to make our lives easier by automatically adding “.1” to the latter columns, but this doesn’t help us, as humans, understand the data.\nWe can do this manually with df.rename() (documentation):\n\nrename_dict = {'2019': 'TB cases 2019',\n '2020': 'TB cases 2020',\n '2021': 'TB cases 2021',\n '2019.1': 'TB incidence 2019',\n '2020.1': 'TB incidence 2020',\n '2021.1': 'TB incidence 2021'}\ntb_df = tb_df.rename(columns=rename_dict)\ntb_df.head(5)\n\n\n\n\n\n\n\n\nU.S. jurisdiction\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\n\n\n\n\n0\nTotal\n8,900\n7,173\n7,860\n2.71\n2.16\n2.37\n\n\n1\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n\n\n2\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n\n\n3\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n\n\n4\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\n\n\n\n\n\n\n\n\n\n5.4.2 Record Granularity\nYou might already be wondering: what’s up with that first record?\nRow 0 is what we call a rollup record, or summary record. It’s often useful when displaying tables to humans. The granularity of record 0 (Totals) vs the rest of the records (States) is different.\nOkay, EDA step two. How was the rollup record aggregated?\nLet’s check if Total TB cases is the sum of all state TB cases. If we sum over all rows, we should get 2x the total cases in each of our TB cases by year (why do you think this is?).\n\n\nCode\ntb_df.sum(axis=0)\n\n\nU.S. jurisdiction TotalAlabamaAlaskaArizonaArkansasCaliforniaCol...\nTB cases 2019 8,9008758183642,111666718245583029973261085237...\nTB cases 2020 7,1737258136591,706525417194122219282169239376...\nTB cases 2021 7,8609258129691,750585443194992281064255127494...\nTB incidence 2019 109.94\nTB incidence 2020 93.09\nTB incidence 2021 102.94\ndtype: object\n\n\nWhoa, what’s going on with the TB cases in 2019, 2020, and 2021? Check out the column types:\n\n\nCode\ntb_df.dtypes\n\n\nU.S. jurisdiction object\nTB cases 2019 object\nTB cases 2020 object\nTB cases 2021 object\nTB incidence 2019 float64\nTB incidence 2020 float64\nTB incidence 2021 float64\ndtype: object\n\n\nSince there are commas in the values for TB cases, the numbers are read as the object datatype, or storage type (close to the Python string datatype), so pandas is concatenating strings instead of adding integers (recall that Python can “sum”, or concatenate, strings together: \"data\" + \"100\" evaluates to \"data100\").\nFortunately read_csv also has a thousands parameter (documentation):\n\n# improve readability: chaining method calls with outer parentheses/line breaks\ntb_df = (\n pd.read_csv(\"data/cdc_tuberculosis.csv\", header=1, thousands=',')\n .rename(columns=rename_dict)\n)\ntb_df.head(5)\n\n\n\n\n\n\n\n\nU.S. jurisdiction\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\n\n\n\n\n0\nTotal\n8900\n7173\n7860\n2.71\n2.16\n2.37\n\n\n1\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n\n\n2\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n\n\n3\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n\n\n4\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\n\n\n\n\n\n\n\n\ntb_df.sum()\n\nU.S. jurisdiction TotalAlabamaAlaskaArizonaArkansasCaliforniaCol...\nTB cases 2019 17800\nTB cases 2020 14346\nTB cases 2021 15720\nTB incidence 2019 109.94\nTB incidence 2020 93.09\nTB incidence 2021 102.94\ndtype: object\n\n\nThe total TB cases look right. Phew!\nLet’s just look at the records with state-level granularity:\n\n\nCode\nstate_tb_df = tb_df[1:]\nstate_tb_df.head(5)\n\n\n\n\n\n\n\n\n\nU.S. jurisdiction\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\n\n\n\n\n1\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n\n\n2\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n\n\n3\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n\n\n4\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\n\n\n5\nCalifornia\n2111\n1706\n1750\n5.35\n4.32\n4.46\n\n\n\n\n\n\n\n\n\n5.4.3 Gather Census Data\nU.S. Census population estimates source (2019), source (2020-2021).\nRunning the below cells cleans the data. There are a few new methods here: * df.convert_dtypes() (documentation) conveniently converts all float dtypes into ints and is out of scope for the class. * df.drop_na() (documentation) will be explained in more detail next time.\n\n\nCode\n# 2010s census data\ncensus_2010s_df = pd.read_csv(\"data/nst-est2019-01.csv\", header=3, thousands=\",\")\ncensus_2010s_df = (\n census_2010s_df\n .reset_index()\n .drop(columns=[\"index\", \"Census\", \"Estimates Base\"])\n .rename(columns={\"Unnamed: 0\": \"Geographic Area\"})\n .convert_dtypes() # \"smart\" converting of columns, use at your own risk\n .dropna() # we'll introduce this next time\n)\ncensus_2010s_df['Geographic Area'] = census_2010s_df['Geographic Area'].str.strip('.')\n\n# with pd.option_context('display.min_rows', 30): # shows more rows\n# display(census_2010s_df)\n \ncensus_2010s_df.head(5)\n\n\n\n\n\n\n\n\n\nGeographic Area\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\n2019\n\n\n\n\n0\nUnited States\n309321666\n311556874\n313830990\n315993715\n318301008\n320635163\n322941311\n324985539\n326687501\n328239523\n\n\n1\nNortheast\n55380134\n55604223\n55775216\n55901806\n56006011\n56034684\n56042330\n56059240\n56046620\n55982803\n\n\n2\nMidwest\n66974416\n67157800\n67336743\n67560379\n67745167\n67860583\n67987540\n68126781\n68236628\n68329004\n\n\n3\nSouth\n114866680\n116006522\n117241208\n118364400\n119624037\n120997341\n122351760\n123542189\n124569433\n125580448\n\n\n4\nWest\n72100436\n72788329\n73477823\n74167130\n74925793\n75742555\n76559681\n77257329\n77834820\n78347268\n\n\n\n\n\n\n\nOccasionally, you will want to modify code that you have imported. To reimport those modifications you can either use python’s importlib library:\nfrom importlib import reload\nreload(utils)\nor use iPython magic which will intelligently import code when files change:\n%load_ext autoreload\n%autoreload 2\n\n\nCode\n# census 2020s data\ncensus_2020s_df = pd.read_csv(\"data/NST-EST2022-POP.csv\", header=3, thousands=\",\")\ncensus_2020s_df = (\n census_2020s_df\n .reset_index()\n .drop(columns=[\"index\", \"Unnamed: 1\"])\n .rename(columns={\"Unnamed: 0\": \"Geographic Area\"})\n .convert_dtypes() # \"smart\" converting of columns, use at your own risk\n .dropna() # we'll introduce this next time\n)\ncensus_2020s_df['Geographic Area'] = census_2020s_df['Geographic Area'].str.strip('.')\n\ncensus_2020s_df.head(5)\n\n\n\n\n\n\n\n\n\nGeographic Area\n2020\n2021\n2022\n\n\n\n\n0\nUnited States\n331511512\n332031554\n333287557\n\n\n1\nNortheast\n57448898\n57259257\n57040406\n\n\n2\nMidwest\n68961043\n68836505\n68787595\n\n\n3\nSouth\n126450613\n127346029\n128716192\n\n\n4\nWest\n78650958\n78589763\n78743364\n\n\n\n\n\n\n\n\n\n5.4.4 Joining Data (Merging DataFrames)\nTime to merge! Here we use the DataFrame method df1.merge(right=df2, ...) on DataFrame df1 (documentation). Contrast this with the function pd.merge(left=df1, right=df2, ...) (documentation). Feel free to use either.\n\n# merge TB DataFrame with two US census DataFrames\ntb_census_df = (\n tb_df\n .merge(right=census_2010s_df,\n left_on=\"U.S. jurisdiction\", right_on=\"Geographic Area\")\n .merge(right=census_2020s_df,\n left_on=\"U.S. jurisdiction\", right_on=\"Geographic Area\")\n)\ntb_census_df.head(5)\n\n\n\n\n\n\n\n\nU.S. jurisdiction\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\nGeographic Area_x\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\n2019\nGeographic Area_y\n2020\n2021\n2022\n\n\n\n\n0\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\nAlabama\n4785437\n4799069\n4815588\n4830081\n4841799\n4852347\n4863525\n4874486\n4887681\n4903185\nAlabama\n5031362\n5049846\n5074296\n\n\n1\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\nAlaska\n713910\n722128\n730443\n737068\n736283\n737498\n741456\n739700\n735139\n731545\nAlaska\n732923\n734182\n733583\n\n\n2\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\nArizona\n6407172\n6472643\n6554978\n6632764\n6730413\n6829676\n6941072\n7044008\n7158024\n7278717\nArizona\n7179943\n7264877\n7359197\n\n\n3\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\nArkansas\n2921964\n2940667\n2952164\n2959400\n2967392\n2978048\n2989918\n3001345\n3009733\n3017804\nArkansas\n3014195\n3028122\n3045637\n\n\n4\nCalifornia\n2111\n1706\n1750\n5.35\n4.32\n4.46\nCalifornia\n37319502\n37638369\n37948800\n38260787\n38596972\n38918045\n39167117\n39358497\n39461588\n39512223\nCalifornia\n39501653\n39142991\n39029342\n\n\n\n\n\n\n\nHaving all of these columns is a little unwieldy. We could either drop the unneeded columns now, or just merge on smaller census DataFrames. Let’s do the latter.\n\n# try merging again, but cleaner this time\ntb_census_df = (\n tb_df\n .merge(right=census_2010s_df[[\"Geographic Area\", \"2019\"]],\n left_on=\"U.S. jurisdiction\", right_on=\"Geographic Area\")\n .drop(columns=\"Geographic Area\")\n .merge(right=census_2020s_df[[\"Geographic Area\", \"2020\", \"2021\"]],\n left_on=\"U.S. jurisdiction\", right_on=\"Geographic Area\")\n .drop(columns=\"Geographic Area\")\n)\ntb_census_df.head(5)\n\n\n\n\n\n\n\n\nU.S. jurisdiction\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\n2019\n2020\n2021\n\n\n\n\n0\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n4903185\n5031362\n5049846\n\n\n1\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n731545\n732923\n734182\n\n\n2\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n7278717\n7179943\n7264877\n\n\n3\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\n3017804\n3014195\n3028122\n\n\n4\nCalifornia\n2111\n1706\n1750\n5.35\n4.32\n4.46\n39512223\n39501653\n39142991\n\n\n\n\n\n\n\n\n\n5.4.5 Reproducing Data: Compute Incidence\nLet’s recompute incidence to make sure we know where the original CDC numbers came from.\nFrom the CDC report: TB incidence is computed as “Cases per 100,000 persons using mid-year population estimates from the U.S. Census Bureau.”\nIf we define a group as 100,000 people, then we can compute the TB incidence for a given state population as\n\\[\\text{TB incidence} = \\frac{\\text{TB cases in population}}{\\text{groups in population}} = \\frac{\\text{TB cases in population}}{\\text{population}/100000} \\]\n\\[= \\frac{\\text{TB cases in population}}{\\text{population}} \\times 100000\\]\nLet’s try this for 2019:\n\ntb_census_df[\"recompute incidence 2019\"] = tb_census_df[\"TB cases 2019\"]/tb_census_df[\"2019\"]*100000\ntb_census_df.head(5)\n\n\n\n\n\n\n\n\nU.S. jurisdiction\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\n2019\n2020\n2021\nrecompute incidence 2019\n\n\n\n\n0\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n4903185\n5031362\n5049846\n1.77\n\n\n1\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n731545\n732923\n734182\n7.93\n\n\n2\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n7278717\n7179943\n7264877\n2.51\n\n\n3\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\n3017804\n3014195\n3028122\n2.12\n\n\n4\nCalifornia\n2111\n1706\n1750\n5.35\n4.32\n4.46\n39512223\n39501653\n39142991\n5.34\n\n\n\n\n\n\n\nAwesome!!!\nLet’s use a for-loop and Python format strings to compute TB incidence for all years. Python f-strings are just used for the purposes of this demo, but they’re handy to know when you explore data beyond this course (documentation).\n\n# recompute incidence for all years\nfor year in [2019, 2020, 2021]:\n tb_census_df[f\"recompute incidence {year}\"] = tb_census_df[f\"TB cases {year}\"]/tb_census_df[f\"{year}\"]*100000\ntb_census_df.head(5)\n\n\n\n\n\n\n\n\nU.S. jurisdiction\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\n2019\n2020\n2021\nrecompute incidence 2019\nrecompute incidence 2020\nrecompute incidence 2021\n\n\n\n\n0\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n4903185\n5031362\n5049846\n1.77\n1.43\n1.82\n\n\n1\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n731545\n732923\n734182\n7.93\n7.91\n7.90\n\n\n2\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n7278717\n7179943\n7264877\n2.51\n1.89\n1.78\n\n\n3\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\n3017804\n3014195\n3028122\n2.12\n1.96\n2.28\n\n\n4\nCalifornia\n2111\n1706\n1750\n5.35\n4.32\n4.46\n39512223\n39501653\n39142991\n5.34\n4.32\n4.47\n\n\n\n\n\n\n\nThese numbers look pretty close!!! There are a few errors in the hundredths place, particularly in 2021. It may be useful to further explore reasons behind this discrepancy.\n\ntb_census_df.describe()\n\n\n\n\n\n\n\n\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\n2019\n2020\n2021\nrecompute incidence 2019\nrecompute incidence 2020\nrecompute incidence 2021\n\n\n\n\ncount\n51.00\n51.00\n51.00\n51.00\n51.00\n51.00\n51.00\n51.00\n51.00\n51.00\n51.00\n51.00\n\n\nmean\n174.51\n140.65\n154.12\n2.10\n1.78\n1.97\n6436069.08\n6500225.73\n6510422.63\n2.10\n1.78\n1.97\n\n\nstd\n341.74\n271.06\n286.78\n1.50\n1.34\n1.48\n7360660.47\n7408168.46\n7394300.08\n1.50\n1.34\n1.47\n\n\nmin\n1.00\n0.00\n2.00\n0.17\n0.00\n0.21\n578759.00\n577605.00\n579483.00\n0.17\n0.00\n0.21\n\n\n25%\n25.50\n29.00\n23.00\n1.29\n1.21\n1.23\n1789606.00\n1820311.00\n1844920.00\n1.30\n1.21\n1.23\n\n\n50%\n70.00\n67.00\n69.00\n1.80\n1.52\n1.70\n4467673.00\n4507445.00\n4506589.00\n1.81\n1.52\n1.69\n\n\n75%\n180.50\n139.00\n150.00\n2.58\n1.99\n2.22\n7446805.00\n7451987.00\n7502811.00\n2.58\n1.99\n2.22\n\n\nmax\n2111.00\n1706.00\n1750.00\n7.91\n7.92\n7.92\n39512223.00\n39501653.00\n39142991.00\n7.93\n7.91\n7.90\n\n\n\n\n\n\n\n\n\n5.4.6 Bonus EDA: Reproducing the Reported Statistic\nHow do we reproduce that reported statistic in the original CDC report?\n\nReported TB incidence (cases per 100,000 persons) increased 9.4%, from 2.2 during 2020 to 2.4 during 2021 but was lower than incidence during 2019 (2.7). Increases occurred among both U.S.-born and non–U.S.-born persons.\n\nThis is TB incidence computed across the entire U.S. population! How do we reproduce this? * We need to reproduce the “Total” TB incidences in our rolled record. * But our current tb_census_df only has 51 entries (50 states plus Washington, D.C.). There is no rolled record. * What happened…?\nLet’s get exploring!\nBefore we keep exploring, we’ll set all indexes to more meaningful values, instead of just numbers that pertain to some row at some point. This will make our cleaning slightly easier.\n\n\nCode\ntb_df = tb_df.set_index(\"U.S. jurisdiction\")\ntb_df.head(5)\n\n\n\n\n\n\n\n\n\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\n\n\nU.S. jurisdiction\n\n\n\n\n\n\n\n\n\n\nTotal\n8900\n7173\n7860\n2.71\n2.16\n2.37\n\n\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n\n\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n\n\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n\n\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\n\n\n\n\n\n\n\n\ncensus_2010s_df = census_2010s_df.set_index(\"Geographic Area\")\ncensus_2010s_df.head(5)\n\n\n\n\n\n\n\n\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\n2019\n\n\nGeographic Area\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nUnited States\n309321666\n311556874\n313830990\n315993715\n318301008\n320635163\n322941311\n324985539\n326687501\n328239523\n\n\nNortheast\n55380134\n55604223\n55775216\n55901806\n56006011\n56034684\n56042330\n56059240\n56046620\n55982803\n\n\nMidwest\n66974416\n67157800\n67336743\n67560379\n67745167\n67860583\n67987540\n68126781\n68236628\n68329004\n\n\nSouth\n114866680\n116006522\n117241208\n118364400\n119624037\n120997341\n122351760\n123542189\n124569433\n125580448\n\n\nWest\n72100436\n72788329\n73477823\n74167130\n74925793\n75742555\n76559681\n77257329\n77834820\n78347268\n\n\n\n\n\n\n\n\ncensus_2020s_df = census_2020s_df.set_index(\"Geographic Area\")\ncensus_2020s_df.head(5)\n\n\n\n\n\n\n\n\n2020\n2021\n2022\n\n\nGeographic Area\n\n\n\n\n\n\n\nUnited States\n331511512\n332031554\n333287557\n\n\nNortheast\n57448898\n57259257\n57040406\n\n\nMidwest\n68961043\n68836505\n68787595\n\n\nSouth\n126450613\n127346029\n128716192\n\n\nWest\n78650958\n78589763\n78743364\n\n\n\n\n\n\n\nIt turns out that our merge above only kept state records, even though our original tb_df had the “Total” rolled record:\n\ntb_df.head()\n\n\n\n\n\n\n\n\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\n\n\nU.S. jurisdiction\n\n\n\n\n\n\n\n\n\n\nTotal\n8900\n7173\n7860\n2.71\n2.16\n2.37\n\n\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n\n\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n\n\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n\n\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\n\n\n\n\n\n\n\nRecall that merge by default does an inner merge by default, meaning that it only preserves keys that are present in both DataFrames.\nThe rolled records in our census DataFrame have different Geographic Area fields, which was the key we merged on:\n\ncensus_2010s_df.head(5)\n\n\n\n\n\n\n\n\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\n2019\n\n\nGeographic Area\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nUnited States\n309321666\n311556874\n313830990\n315993715\n318301008\n320635163\n322941311\n324985539\n326687501\n328239523\n\n\nNortheast\n55380134\n55604223\n55775216\n55901806\n56006011\n56034684\n56042330\n56059240\n56046620\n55982803\n\n\nMidwest\n66974416\n67157800\n67336743\n67560379\n67745167\n67860583\n67987540\n68126781\n68236628\n68329004\n\n\nSouth\n114866680\n116006522\n117241208\n118364400\n119624037\n120997341\n122351760\n123542189\n124569433\n125580448\n\n\nWest\n72100436\n72788329\n73477823\n74167130\n74925793\n75742555\n76559681\n77257329\n77834820\n78347268\n\n\n\n\n\n\n\nThe Census DataFrame has several rolled records. The aggregate record we are looking for actually has the Geographic Area named “United States”.\nOne straightforward way to get the right merge is to rename the value itself. Because we now have the Geographic Area index, we’ll use df.rename() (documentation):\n\n# rename rolled record for 2010s\ncensus_2010s_df.rename(index={'United States':'Total'}, inplace=True)\ncensus_2010s_df.head(5)\n\n\n\n\n\n\n\n\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\n2019\n\n\nGeographic Area\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nTotal\n309321666\n311556874\n313830990\n315993715\n318301008\n320635163\n322941311\n324985539\n326687501\n328239523\n\n\nNortheast\n55380134\n55604223\n55775216\n55901806\n56006011\n56034684\n56042330\n56059240\n56046620\n55982803\n\n\nMidwest\n66974416\n67157800\n67336743\n67560379\n67745167\n67860583\n67987540\n68126781\n68236628\n68329004\n\n\nSouth\n114866680\n116006522\n117241208\n118364400\n119624037\n120997341\n122351760\n123542189\n124569433\n125580448\n\n\nWest\n72100436\n72788329\n73477823\n74167130\n74925793\n75742555\n76559681\n77257329\n77834820\n78347268\n\n\n\n\n\n\n\n\n# same, but for 2020s rename rolled record\ncensus_2020s_df.rename(index={'United States':'Total'}, inplace=True)\ncensus_2020s_df.head(5)\n\n\n\n\n\n\n\n\n2020\n2021\n2022\n\n\nGeographic Area\n\n\n\n\n\n\n\nTotal\n331511512\n332031554\n333287557\n\n\nNortheast\n57448898\n57259257\n57040406\n\n\nMidwest\n68961043\n68836505\n68787595\n\n\nSouth\n126450613\n127346029\n128716192\n\n\nWest\n78650958\n78589763\n78743364\n\n\n\n\n\n\n\n\nNext let’s rerun our merge. Note the different chaining, because we are now merging on indexes (df.merge() documentation).\n\ntb_census_df = (\n tb_df\n .merge(right=census_2010s_df[[\"2019\"]],\n left_index=True, right_index=True)\n .merge(right=census_2020s_df[[\"2020\", \"2021\"]],\n left_index=True, right_index=True)\n)\ntb_census_df.head(5)\n\n\n\n\n\n\n\n\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\n2019\n2020\n2021\n\n\n\n\nTotal\n8900\n7173\n7860\n2.71\n2.16\n2.37\n328239523\n331511512\n332031554\n\n\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n4903185\n5031362\n5049846\n\n\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n731545\n732923\n734182\n\n\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n7278717\n7179943\n7264877\n\n\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\n3017804\n3014195\n3028122\n\n\n\n\n\n\n\n\nFinally, let’s recompute our incidences:\n\n# recompute incidence for all years\nfor year in [2019, 2020, 2021]:\n tb_census_df[f\"recompute incidence {year}\"] = tb_census_df[f\"TB cases {year}\"]/tb_census_df[f\"{year}\"]*100000\ntb_census_df.head(5)\n\n\n\n\n\n\n\n\nTB cases 2019\nTB cases 2020\nTB cases 2021\nTB incidence 2019\nTB incidence 2020\nTB incidence 2021\n2019\n2020\n2021\nrecompute incidence 2019\nrecompute incidence 2020\nrecompute incidence 2021\n\n\n\n\nTotal\n8900\n7173\n7860\n2.71\n2.16\n2.37\n328239523\n331511512\n332031554\n2.71\n2.16\n2.37\n\n\nAlabama\n87\n72\n92\n1.77\n1.43\n1.83\n4903185\n5031362\n5049846\n1.77\n1.43\n1.82\n\n\nAlaska\n58\n58\n58\n7.91\n7.92\n7.92\n731545\n732923\n734182\n7.93\n7.91\n7.90\n\n\nArizona\n183\n136\n129\n2.51\n1.89\n1.77\n7278717\n7179943\n7264877\n2.51\n1.89\n1.78\n\n\nArkansas\n64\n59\n69\n2.12\n1.96\n2.28\n3017804\n3014195\n3028122\n2.12\n1.96\n2.28\n\n\n\n\n\n\n\nWe reproduced the total U.S. incidences correctly!\nWe’re almost there. Let’s revisit the quote:\n\nReported TB incidence (cases per 100,000 persons) increased 9.4%, from 2.2 during 2020 to 2.4 during 2021 but was lower than incidence during 2019 (2.7). Increases occurred among both U.S.-born and non–U.S.-born persons.\n\nRecall that percent change from \\(A\\) to \\(B\\) is computed as \\(\\text{percent change} = \\frac{B - A}{A} \\times 100\\).\n\nincidence_2020 = tb_census_df.loc['Total', 'recompute incidence 2020']\nincidence_2020\n\n2.1637257652759883\n\n\n\nincidence_2021 = tb_census_df.loc['Total', 'recompute incidence 2021']\nincidence_2021\n\n2.3672448914298068\n\n\n\ndifference = (incidence_2021 - incidence_2020)/incidence_2020 * 100\ndifference\n\n9.405957511804143", - "crumbs": [ - "5  Data Cleaning and EDA" - ] - }, - { - "objectID": "eda/eda.html#eda-demo-2-mauna-loa-co2-data-a-lesson-in-data-faithfulness", - "href": "eda/eda.html#eda-demo-2-mauna-loa-co2-data-a-lesson-in-data-faithfulness", - "title": "5  Data Cleaning and EDA", - "section": "5.5 EDA Demo 2: Mauna Loa CO2 Data – A Lesson in Data Faithfulness", - "text": "5.5 EDA Demo 2: Mauna Loa CO2 Data – A Lesson in Data Faithfulness\nMauna Loa Observatory has been monitoring CO2 concentrations since 1958.\n\nco2_file = \"data/co2_mm_mlo.txt\"\n\nLet’s do some EDA!!\n\n5.5.1 Reading this file into Pandas?\nLet’s instead check out this .txt file. Some questions to keep in mind: Do we trust this file extension? What structure is it?\nLines 71-78 (inclusive) are shown below:\nline number | file contents\n\n71 | # decimal average interpolated trend #days\n72 | # date (season corr)\n73 | 1958 3 1958.208 315.71 315.71 314.62 -1\n74 | 1958 4 1958.292 317.45 317.45 315.29 -1\n75 | 1958 5 1958.375 317.50 317.50 314.71 -1\n76 | 1958 6 1958.458 -99.99 317.10 314.85 -1\n77 | 1958 7 1958.542 315.86 315.86 314.98 -1\n78 | 1958 8 1958.625 314.93 314.93 315.94 -1\nNotice how:\n\nThe values are separated by white space, possibly tabs.\nThe data line up down the rows. For example, the month appears in 7th to 8th position of each line.\nThe 71st and 72nd lines in the file contain column headings split over two lines.\n\nWe can use read_csv to read the data into a pandas DataFrame, and we provide several arguments to specify that the separators are white space, there is no header (we will set our own column names), and to skip the first 72 rows of the file.\n\nco2 = pd.read_csv(\n co2_file, header = None, skiprows = 72,\n sep = r'\\s+' #delimiter for continuous whitespace (stay tuned for regex next lecture))\n)\nco2.head()\n\n\n\n\n\n\n\n\n0\n1\n2\n3\n4\n5\n6\n\n\n\n\n0\n1958\n3\n1958.21\n315.71\n315.71\n314.62\n-1\n\n\n1\n1958\n4\n1958.29\n317.45\n317.45\n315.29\n-1\n\n\n2\n1958\n5\n1958.38\n317.50\n317.50\n314.71\n-1\n\n\n3\n1958\n6\n1958.46\n-99.99\n317.10\n314.85\n-1\n\n\n4\n1958\n7\n1958.54\n315.86\n315.86\n314.98\n-1\n\n\n\n\n\n\n\nCongratulations! You’ve wrangled the data!\n\n…But our columns aren’t named. We need to do more EDA.\n\n\n5.5.2 Exploring Variable Feature Types\nThe NOAA webpage might have some useful tidbits (in this case it doesn’t).\nUsing this information, we’ll rerun pd.read_csv, but this time with some custom column names.\n\nco2 = pd.read_csv(\n co2_file, header = None, skiprows = 72,\n sep = '\\s+', #regex for continuous whitespace (next lecture)\n names = ['Yr', 'Mo', 'DecDate', 'Avg', 'Int', 'Trend', 'Days']\n)\nco2.head()\n\n\n\n\n\n\n\n\nYr\nMo\nDecDate\nAvg\nInt\nTrend\nDays\n\n\n\n\n0\n1958\n3\n1958.21\n315.71\n315.71\n314.62\n-1\n\n\n1\n1958\n4\n1958.29\n317.45\n317.45\n315.29\n-1\n\n\n2\n1958\n5\n1958.38\n317.50\n317.50\n314.71\n-1\n\n\n3\n1958\n6\n1958.46\n-99.99\n317.10\n314.85\n-1\n\n\n4\n1958\n7\n1958.54\n315.86\n315.86\n314.98\n-1\n\n\n\n\n\n\n\n\n\n5.5.3 Visualizing CO2\nScientific studies tend to have very clean data, right…? Let’s jump right in and make a time series plot of CO2 monthly averages.\n\n\nCode\nsns.lineplot(x='DecDate', y='Avg', data=co2);\n\n\n\n\n\n\n\n\n\nThe code above uses the seaborn plotting library (abbreviated sns). We will cover this in the Visualization lecture, but now you don’t need to worry about how it works!\nYikes! Plotting the data uncovered a problem. The sharp vertical lines suggest that we have some missing values. What happened here?\n\nco2.head()\n\n\n\n\n\n\n\n\nYr\nMo\nDecDate\nAvg\nInt\nTrend\nDays\n\n\n\n\n0\n1958\n3\n1958.21\n315.71\n315.71\n314.62\n-1\n\n\n1\n1958\n4\n1958.29\n317.45\n317.45\n315.29\n-1\n\n\n2\n1958\n5\n1958.38\n317.50\n317.50\n314.71\n-1\n\n\n3\n1958\n6\n1958.46\n-99.99\n317.10\n314.85\n-1\n\n\n4\n1958\n7\n1958.54\n315.86\n315.86\n314.98\n-1\n\n\n\n\n\n\n\n\nco2.tail()\n\n\n\n\n\n\n\n\nYr\nMo\nDecDate\nAvg\nInt\nTrend\nDays\n\n\n\n\n733\n2019\n4\n2019.29\n413.32\n413.32\n410.49\n26\n\n\n734\n2019\n5\n2019.38\n414.66\n414.66\n411.20\n28\n\n\n735\n2019\n6\n2019.46\n413.92\n413.92\n411.58\n27\n\n\n736\n2019\n7\n2019.54\n411.77\n411.77\n411.43\n23\n\n\n737\n2019\n8\n2019.62\n409.95\n409.95\n411.84\n29\n\n\n\n\n\n\n\nSome data have unusual values like -1 and -99.99.\nLet’s check the description at the top of the file again.\n\n-1 signifies a missing value for the number of days Days the equipment was in operation that month.\n-99.99 denotes a missing monthly average Avg\n\nHow can we fix this? First, let’s explore other aspects of our data. Understanding our data will help us decide what to do with the missing values.\n\n\n\n5.5.4 Sanity Checks: Reasoning about the data\nFirst, we consider the shape of the data. How many rows should we have?\n\nIf chronological order, we should have one record per month.\nData from March 1958 to August 2019.\nWe should have $ 12 (2019-1957) - 2 - 4 = 738 $ records.\n\n\nco2.shape\n\n(738, 7)\n\n\nNice!! The number of rows (i.e. records) match our expectations.\nLet’s now check the quality of each feature.\n\n\n5.5.5 Understanding Missing Value 1: Days\nDays is a time field, so let’s analyze other time fields to see if there is an explanation for missing values of days of operation.\nLet’s start with months, Mo.\nAre we missing any records? The number of months should have 62 or 61 instances (March 1957-August 2019).\n\nco2[\"Mo\"].value_counts().sort_index()\n\nMo\n1 61\n2 61\n3 62\n4 62\n5 62\n6 62\n7 62\n8 62\n9 61\n10 61\n11 61\n12 61\nName: count, dtype: int64\n\n\nAs expected Jan, Feb, Sep, Oct, Nov, and Dec have 61 occurrences and the rest 62.\n\nNext let’s explore days Days itself, which is the number of days that the measurement equipment worked.\n\n\nCode\nsns.displot(co2['Days']);\nplt.title(\"Distribution of days feature\"); # suppresses unneeded plotting output\n\n\n\n\n\n\n\n\n\nIn terms of data quality, a handful of months have averages based on measurements taken on fewer than half the days. In addition, there are nearly 200 missing values–that’s about 27% of the data!\n\nFinally, let’s check the last time feature, year Yr.\nLet’s check to see if there is any connection between missing-ness and the year of the recording.\n\n\nCode\nsns.scatterplot(x=\"Yr\", y=\"Days\", data=co2);\nplt.title(\"Day field by Year\"); # the ; suppresses output\n\n\n\n\n\n\n\n\n\nObservations:\n\nAll of the missing data are in the early years of operation.\nIt appears there may have been problems with equipment in the mid to late 80s.\n\nPotential Next Steps:\n\nConfirm these explanations through documentation about the historical readings.\nMaybe drop the earliest recordings? However, we would want to delay such action until after we have examined the time trends and assess whether there are any potential problems.\n\n\n\n\n5.5.6 Understanding Missing Value 2: Avg\nNext, let’s return to the -99.99 values in Avg to analyze the overall quality of the CO2 measurements. We’ll plot a histogram of the average CO2 measurements\n\n\nCode\n# Histograms of average CO2 measurements\nsns.displot(co2['Avg']);\n\n\n\n\n\n\n\n\n\nThe non-missing values are in the 300-400 range (a regular range of CO2 levels).\nWe also see that there are only a few missing Avg values (<1% of values). Let’s examine all of them:\n\nco2[co2[\"Avg\"] < 0]\n\n\n\n\n\n\n\n\nYr\nMo\nDecDate\nAvg\nInt\nTrend\nDays\n\n\n\n\n3\n1958\n6\n1958.46\n-99.99\n317.10\n314.85\n-1\n\n\n7\n1958\n10\n1958.79\n-99.99\n312.66\n315.61\n-1\n\n\n71\n1964\n2\n1964.12\n-99.99\n320.07\n319.61\n-1\n\n\n72\n1964\n3\n1964.21\n-99.99\n320.73\n319.55\n-1\n\n\n73\n1964\n4\n1964.29\n-99.99\n321.77\n319.48\n-1\n\n\n213\n1975\n12\n1975.96\n-99.99\n330.59\n331.60\n0\n\n\n313\n1984\n4\n1984.29\n-99.99\n346.84\n344.27\n2\n\n\n\n\n\n\n\nThere doesn’t seem to be a pattern to these values, other than that most records also were missing Days data.\n\n\n5.5.7 Drop, NaN, or Impute Missing Avg Data?\nHow should we address the invalid Avg data?\n\nDrop records\nSet to NaN\nImpute using some strategy\n\nRemember we want to fix the following plot:\n\n\nCode\nsns.lineplot(x='DecDate', y='Avg', data=co2)\nplt.title(\"CO2 Average By Month\");\n\n\n\n\n\n\n\n\n\nSince we are plotting Avg vs DecDate, we should just focus on dealing with missing values for Avg.\nLet’s consider a few options: 1. Drop those records 2. Replace -99.99 with NaN 3. Substitute it with a likely value for the average CO2?\nWhat do you think are the pros and cons of each possible action?\nLet’s examine each of these three options.\n\n# 1. Drop missing values\nco2_drop = co2[co2['Avg'] > 0]\nco2_drop.head()\n\n\n\n\n\n\n\n\nYr\nMo\nDecDate\nAvg\nInt\nTrend\nDays\n\n\n\n\n0\n1958\n3\n1958.21\n315.71\n315.71\n314.62\n-1\n\n\n1\n1958\n4\n1958.29\n317.45\n317.45\n315.29\n-1\n\n\n2\n1958\n5\n1958.38\n317.50\n317.50\n314.71\n-1\n\n\n4\n1958\n7\n1958.54\n315.86\n315.86\n314.98\n-1\n\n\n5\n1958\n8\n1958.62\n314.93\n314.93\n315.94\n-1\n\n\n\n\n\n\n\n\n# 2. Replace NaN with -99.99\nco2_NA = co2.replace(-99.99, np.nan)\nco2_NA.head()\n\n\n\n\n\n\n\n\nYr\nMo\nDecDate\nAvg\nInt\nTrend\nDays\n\n\n\n\n0\n1958\n3\n1958.21\n315.71\n315.71\n314.62\n-1\n\n\n1\n1958\n4\n1958.29\n317.45\n317.45\n315.29\n-1\n\n\n2\n1958\n5\n1958.38\n317.50\n317.50\n314.71\n-1\n\n\n3\n1958\n6\n1958.46\nNaN\n317.10\n314.85\n-1\n\n\n4\n1958\n7\n1958.54\n315.86\n315.86\n314.98\n-1\n\n\n\n\n\n\n\nWe’ll also use a third version of the data.\nFirst, we note that the dataset already comes with a substitute value for the -99.99.\nFrom the file description:\n\nThe interpolated column includes average values from the preceding column (average) and interpolated values where data are missing. Interpolated values are computed in two steps…\n\nThe Int feature has values that exactly match those in Avg, except when Avg is -99.99, and then a reasonable estimate is used instead.\nSo, the third version of our data will use the Int feature instead of Avg.\n\n# 3. Use interpolated column which estimates missing Avg values\nco2_impute = co2.copy()\nco2_impute['Avg'] = co2['Int']\nco2_impute.head()\n\n\n\n\n\n\n\n\nYr\nMo\nDecDate\nAvg\nInt\nTrend\nDays\n\n\n\n\n0\n1958\n3\n1958.21\n315.71\n315.71\n314.62\n-1\n\n\n1\n1958\n4\n1958.29\n317.45\n317.45\n315.29\n-1\n\n\n2\n1958\n5\n1958.38\n317.50\n317.50\n314.71\n-1\n\n\n3\n1958\n6\n1958.46\n317.10\n317.10\n314.85\n-1\n\n\n4\n1958\n7\n1958.54\n315.86\n315.86\n314.98\n-1\n\n\n\n\n\n\n\nWhat’s a reasonable estimate?\nTo answer this question, let’s zoom in on a short time period, say the measurements in 1958 (where we know we have two missing values).\n\n\nCode\n# results of plotting data in 1958\n\ndef line_and_points(data, ax, title):\n # assumes single year, hence Mo\n ax.plot('Mo', 'Avg', data=data)\n ax.scatter('Mo', 'Avg', data=data)\n ax.set_xlim(2, 13)\n ax.set_title(title)\n ax.set_xticks(np.arange(3, 13))\n\ndef data_year(data, year):\n return data[data[\"Yr\"] == 1958]\n \n# uses matplotlib subplots\n# you may see more next week; focus on output for now\nfig, axes = plt.subplots(ncols = 3, figsize=(12, 4), sharey=True)\n\nyear = 1958\nline_and_points(data_year(co2_drop, year), axes[0], title=\"1. Drop Missing\")\nline_and_points(data_year(co2_NA, year), axes[1], title=\"2. Missing Set to NaN\")\nline_and_points(data_year(co2_impute, year), axes[2], title=\"3. Missing Interpolated\")\n\nfig.suptitle(f\"Monthly Averages for {year}\")\nplt.tight_layout()\n\n\n\n\n\n\n\n\n\nIn the big picture since there are only 7 Avg values missing (<1% of 738 months), any of these approaches would work.\nHowever there is some appeal to option C, Imputing:\n\nShows seasonal trends for CO2\nWe are plotting all months in our data as a line plot\n\nLet’s replot our original figure with option 3:\n\n\nCode\nsns.lineplot(x='DecDate', y='Avg', data=co2_impute)\nplt.title(\"CO2 Average By Month, Imputed\");\n\n\n\n\n\n\n\n\n\nLooks pretty close to what we see on the NOAA website!\n\n\n5.5.8 Presenting the Data: A Discussion on Data Granularity\nFrom the description:\n\nMonthly measurements are averages of average day measurements.\nThe NOAA GML website has datasets for daily/hourly measurements too.\n\nThe data you present depends on your research question.\nHow do CO2 levels vary by season?\n\nYou might want to keep average monthly data.\n\nAre CO2 levels rising over the past 50+ years, consistent with global warming predictions?\n\nYou might be happier with a coarser granularity of average year data!\n\n\n\nCode\nco2_year = co2_impute.groupby('Yr').mean()\nsns.lineplot(x='Yr', y='Avg', data=co2_year)\nplt.title(\"CO2 Average By Year\");\n\n\n\n\n\n\n\n\n\nIndeed, we see a rise by nearly 100 ppm of CO2 since Mauna Loa began recording in 1958.", - "crumbs": [ - "5  Data Cleaning and EDA" - ] - }, - { - "objectID": "eda/eda.html#summary", - "href": "eda/eda.html#summary", - "title": "5  Data Cleaning and EDA", - "section": "5.6 Summary", - "text": "5.6 Summary\nWe went over a lot of content this lecture; let’s summarize the most important points:\n\n5.6.1 Dealing with Missing Values\nThere are a few options we can take to deal with missing data:\n\nDrop missing records\nKeep NaN missing values\nImpute using an interpolated column\n\n\n\n5.6.2 EDA and Data Wrangling\nThere are several ways to approach EDA and Data Wrangling:\n\nExamine the data and metadata: what is the date, size, organization, and structure of the data?\nExamine each field/attribute/dimension individually.\nExamine pairs of related dimensions (e.g. breaking down grades by major).\nAlong the way, we can:\n\nVisualize or summarize the data.\nValidate assumptions about data and its collection process. Pay particular attention to when the data was collected.\nIdentify and address anomalies.\nApply data transformations and corrections (we’ll cover this in the upcoming lecture).\nRecord everything you do! Developing in Jupyter Notebook promotes reproducibility of your own work!", - "crumbs": [ - "5  Data Cleaning and EDA" - ] - }, - { - "objectID": "regex/regex.html", - "href": "regex/regex.html", - "title": "6  Regular Expressions", - "section": "", - "text": "6.1 Why Work with Text?\nLast lecture, we learned of the difference between quantitative and qualitative variable types. The latter includes string data — the primary focus of lecture 6. In this note, we’ll discuss the necessary tools to manipulate text: Python string manipulation and regular expressions.\nThere are two main reasons for working with text.", - "crumbs": [ - "6  Regular Expressions" - ] - }, - { - "objectID": "regex/regex.html#why-work-with-text", - "href": "regex/regex.html#why-work-with-text", - "title": "6  Regular Expressions", - "section": "", - "text": "Canonicalization: Convert data that has multiple formats into a standard form.\n\nBy manipulating text, we can join tables with mismatched string labels.\n\nExtract information into a new feature.\n\nFor example, we can extract date and time features from text.", - "crumbs": [ - "6  Regular Expressions" - ] - }, - { - "objectID": "regex/regex.html#python-string-methods", - "href": "regex/regex.html#python-string-methods", - "title": "6  Regular Expressions", - "section": "6.2 Python String Methods", - "text": "6.2 Python String Methods\nFirst, we’ll introduce a few methods useful for string manipulation. The following table includes a number of string operations supported by Python and pandas. The Python functions operate on a single string, while their equivalent in pandas are vectorized — they operate on a Series of string data.\n\n\n\n\n\n\n\n\nOperation\nPython\nPandas (Series)\n\n\n\n\nTransformation\n\ns.lower()\ns.upper()\n\n\nser.str.lower()\nser.str.upper()\n\n\n\nReplacement + Deletion\n\ns.replace(_)\n\n\nser.str.replace(_)\n\n\n\nSplit\n\ns.split(_)\n\n\nser.str.split(_)\n\n\n\nSubstring\n\ns[1:4]\n\n\nser.str[1:4]\n\n\n\nMembership\n\n'_' in s\n\n\nser.str.contains(_)\n\n\n\nLength\n\nlen(s)\n\n\nser.str.len()\n\n\n\n\nWe’ll discuss the differences between Python string functions and pandas Series methods in the following section on canonicalization.\n\n6.2.1 Canonicalization\nAssume we want to merge the given tables.\n\n\nCode\nimport pandas as pd\n\nwith open('data/county_and_state.csv') as f:\n county_and_state = pd.read_csv(f)\n \nwith open('data/county_and_population.csv') as f:\n county_and_pop = pd.read_csv(f)\n\n\n\ndisplay(county_and_state), display(county_and_pop);\n\n\n\n\n\n\n\n\nCounty\nState\n\n\n\n\n0\nDe Witt County\nIL\n\n\n1\nLac qui Parle County\nMN\n\n\n2\nLewis and Clark County\nMT\n\n\n3\nSt John the Baptist Parish\nLS\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nCounty\nPopulation\n\n\n\n\n0\nDeWitt\n16798\n\n\n1\nLac Qui Parle\n8067\n\n\n2\nLewis & Clark\n55716\n\n\n3\nSt. John the Baptist\n43044\n\n\n\n\n\n\n\nLast time, we used a primary key and foreign key to join two tables. While neither of these keys exist in our DataFrames, the \"County\" columns look similar enough. Can we convert these columns into one standard, canonical form to merge the two tables?\n\n6.2.1.1 Canonicalization with Python String Manipulation\nThe following function uses Python string manipulation to convert a single county name into canonical form. It does so by eliminating whitespace, punctuation, and unnecessary text.\n\ndef canonicalize_county(county_name):\n return (\n county_name\n .lower()\n .replace(' ', '')\n .replace('&', 'and')\n .replace('.', '')\n .replace('county', '')\n .replace('parish', '')\n )\n\ncanonicalize_county(\"St. John the Baptist\")\n\n'stjohnthebaptist'\n\n\nWe will use the pandas map function to apply the canonicalize_county function to every row in both DataFrames. In doing so, we’ll create a new column in each called clean_county_python with the canonical form.\n\ncounty_and_pop['clean_county_python'] = county_and_pop['County'].map(canonicalize_county)\ncounty_and_state['clean_county_python'] = county_and_state['County'].map(canonicalize_county)\ndisplay(county_and_state), display(county_and_pop);\n\n\n\n\n\n\n\n\nCounty\nState\nclean_county_python\n\n\n\n\n0\nDe Witt County\nIL\ndewitt\n\n\n1\nLac qui Parle County\nMN\nlacquiparle\n\n\n2\nLewis and Clark County\nMT\nlewisandclark\n\n\n3\nSt John the Baptist Parish\nLS\nstjohnthebaptist\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nCounty\nPopulation\nclean_county_python\n\n\n\n\n0\nDeWitt\n16798\ndewitt\n\n\n1\nLac Qui Parle\n8067\nlacquiparle\n\n\n2\nLewis & Clark\n55716\nlewisandclark\n\n\n3\nSt. John the Baptist\n43044\nstjohnthebaptist\n\n\n\n\n\n\n\n\n\n6.2.1.2 Canonicalization with Pandas Series Methods\nAlternatively, we can use pandas Series methods to create this standardized column. To do so, we must call the .str attribute of our Series object prior to calling any methods, like .lower and .replace. Notice how these method names match their equivalent built-in Python string functions.\nChaining multiple Series methods in this manner eliminates the need to use the map function (as this code is vectorized).\n\ndef canonicalize_county_series(county_series):\n return (\n county_series\n .str.lower()\n .str.replace(' ', '')\n .str.replace('&', 'and')\n .str.replace('.', '')\n .str.replace('county', '')\n .str.replace('parish', '')\n )\n\ncounty_and_pop['clean_county_pandas'] = canonicalize_county_series(county_and_pop['County'])\ncounty_and_state['clean_county_pandas'] = canonicalize_county_series(county_and_state['County'])\ndisplay(county_and_pop), display(county_and_state);\n\n\n\n\n\n\n\n\nCounty\nPopulation\nclean_county_python\nclean_county_pandas\n\n\n\n\n0\nDeWitt\n16798\ndewitt\ndewitt\n\n\n1\nLac Qui Parle\n8067\nlacquiparle\nlacquiparle\n\n\n2\nLewis & Clark\n55716\nlewisandclark\nlewisandclark\n\n\n3\nSt. John the Baptist\n43044\nstjohnthebaptist\nstjohnthebaptist\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nCounty\nState\nclean_county_python\nclean_county_pandas\n\n\n\n\n0\nDe Witt County\nIL\ndewitt\ndewitt\n\n\n1\nLac qui Parle County\nMN\nlacquiparle\nlacquiparle\n\n\n2\nLewis and Clark County\nMT\nlewisandclark\nlewisandclark\n\n\n3\nSt John the Baptist Parish\nLS\nstjohnthebaptist\nstjohnthebaptist\n\n\n\n\n\n\n\n\n\n\n6.2.2 Extraction\nExtraction explores the idea of obtaining useful information from text data. This will be particularily important in model building, which we’ll study in a few weeks.\nSay we want to read some data from a .txt file.\n\nwith open('data/log.txt', 'r') as f:\n log_lines = f.readlines()\n\nlog_lines\n\n['169.237.46.168 - - [26/Jan/2014:10:47:58 -0800] \"GET /stat141/Winter04/ HTTP/1.1\" 200 2585 \"http://anson.ucdavis.edu/courses/\"\\n',\n '193.205.203.3 - - [2/Feb/2005:17:23:6 -0800] \"GET /stat141/Notes/dim.html HTTP/1.0\" 404 302 \"http://eeyore.ucdavis.edu/stat141/Notes/session.html\"\\n',\n '169.237.46.240 - \"\" [3/Feb/2006:10:18:37 -0800] \"GET /stat141/homework/Solutions/hw1Sol.pdf HTTP/1.1\"\\n']\n\n\nSuppose we want to extract the day, month, year, hour, minutes, seconds, and time zone. Unfortunately, these items are not in a fixed position from the beginning of the string, so slicing by some fixed offset won’t work.\nInstead, we can use some clever thinking. Notice how the relevant information is contained within a set of brackets, further separated by / and :. We can hone in on this region of text, and split the data on these characters. Python’s built-in .split function makes this easy.\n\nfirst = log_lines[0] # Only considering the first row of data\n\npertinent = first.split(\"[\")[1].split(']')[0]\nday, month, rest = pertinent.split('/')\nyear, hour, minute, rest = rest.split(':')\nseconds, time_zone = rest.split(' ')\nday, month, year, hour, minute, seconds, time_zone\n\n('26', 'Jan', '2014', '10', '47', '58', '-0800')\n\n\nThere are two problems with this code:\n\nPython’s built-in functions limit us to extract data one record at a time,\n\nThis can be resolved using the map function or pandas Series methods.\n\nThe code is quite verbose.\n\nThis is a larger issue that is trickier to solve\n\n\nIn the next section, we’ll introduce regular expressions - a tool that solves problem 2.", - "crumbs": [ - "6  Regular Expressions" - ] - }, - { - "objectID": "regex/regex.html#regex-basics", - "href": "regex/regex.html#regex-basics", - "title": "6  Regular Expressions", - "section": "6.3 RegEx Basics", - "text": "6.3 RegEx Basics\nA regular expression (“RegEx”) is a sequence of characters that specifies a search pattern. They are written to extract specific information from text. Regular expressions are essentially part of a smaller programming language embedded in Python, made available through the re module. As such, they have a stand-alone syntax and methods for various capabilities.\nRegular expressions are useful in many applications beyond data science. For example, Social Security Numbers (SSNs) are often validated with regular expressions.\n\nr\"[0-9]{3}-[0-9]{2}-[0-9]{4}\" # Regular Expression Syntax\n\n# 3 of any digit, then a dash,\n# then 2 of any digit, then a dash,\n# then 4 of any digit\n\n'[0-9]{3}-[0-9]{2}-[0-9]{4}'\n\n\n\nThere are a ton of resources to learn and experiment with regular expressions. A few are provided below:\n\nOfficial Regex Guide\nData 100 Reference Sheet\nRegex101.com\n\nBe sure to check Python under the category on the left.\n\n\n\n6.3.1 Basics RegEx Syntax\nThere are four basic operations with regular expressions.\n\n\n\n\n\n\n\n\n\n\nOperation\nOrder\nSyntax Example\nMatches\nDoesn’t Match\n\n\n\n\nOr: |\n4\nAA|BAAB\nAA BAAB\nevery other string\n\n\nConcatenation\n3\nAABAAB\nAABAAB\nevery other string\n\n\nClosure: * (zero or more)\n2\nAB*A\nAA ABBBBBBA\nAB ABABA\n\n\nGroup: () (parenthesis)\n1\nA(A|B)AAB (AB)*A\nAAAAB ABAAB A ABABABABA\nevery other string AA ABBA\n\n\n\nNotice how these metacharacter operations are ordered. Rather than being literal characters, these metacharacters manipulate adjacent characters. () takes precedence, followed by *, and finally |. This allows us to differentiate between very different regex commands like AB* and (AB)*. The former reads “A then zero or more copies of B”, while the latter specifies “zero or more copies of AB”.\n\n6.3.1.1 Examples\nQuestion 1: Give a regular expression that matches moon, moooon, etc. Your expression should match any even number of os except zero (i.e. don’t match mn).\nAnswer 1: moo(oo)*n\n\nHardcoding oo before the capture group ensures that mn is not matched.\nA capture group of (oo)* ensures the number of o’s is even.\n\nQuestion 2: Using only basic operations, formulate a regex that matches muun, muuuun, moon, moooon, etc. Your expression should match any even number of us or os except zero (i.e. don’t match mn).\nAnswer 2: m(uu(uu)*|oo(oo)*)n\n\nThe leading m and trailing n ensures that only strings beginning with m and ending with n are matched.\nNotice how the outer capture group surrounds the |.\n\nConsider the regex m(uu(uu)*)|(oo(oo)*)n. This incorrectly matches muu and oooon.\n\nEach OR clause is everything to the left and right of |. The incorrect solution matches only half of the string, and ignores either the beginning m or trailing n.\nA set of parenthesis must surround |. That way, each OR clause is everything to the left and right of | within the group. This ensures both the beginning m and trailing n are matched.", - "crumbs": [ - "6  Regular Expressions" - ] - }, - { - "objectID": "regex/regex.html#regex-expanded", - "href": "regex/regex.html#regex-expanded", - "title": "6  Regular Expressions", - "section": "6.4 RegEx Expanded", - "text": "6.4 RegEx Expanded\nProvided below are more complex regular expression functions.\n\n\n\n\n\n\n\n\n\nOperation\nSyntax Example\nMatches\nDoesn’t Match\n\n\n\n\nAny Character: . (except newline)\n.U.U.U.\nCUMULUS JUGULUM\nSUCCUBUS TUMULTUOUS\n\n\nCharacter Class: [] (match one character in [])\n[A-Za-z][a-z]*\nword Capitalized\ncamelCase 4illegal\n\n\nRepeated \"a\" Times: {a}\nj[aeiou]{3}hn\njaoehn jooohn\njhn jaeiouhn\n\n\nRepeated \"from a to b\" Times: {a, b}\nj[ou]{1,2}hn\njohn juohn\njhn jooohn\n\n\nAt Least One: +\njo+hn\njohn joooooohn\njhn jjohn\n\n\nZero or One: ?\njoh?n\njon john\nany other string\n\n\n\nA character class matches a single character in its class. These characters can be hardcoded —— in the case of [aeiou] —— or shorthand can be specified to mean a range of characters. Examples include:\n\n[A-Z]: Any capitalized letter\n[a-z]: Any lowercase letter\n[0-9]: Any single digit\n[A-Za-z]: Any capitalized of lowercase letter\n[A-Za-z0-9]: Any capitalized or lowercase letter or single digit\n\n\n6.4.0.1 Examples\nLet’s analyze a few examples of complex regular expressions.\n\n\n\n\n\n\n\nMatches\nDoes Not Match\n\n\n\n\n\n.*SPB.*\n\n\n\n\nRASPBERRY SPBOO\nSUBSPACE SUBSPECIES\n\n\n\n[0-9]{3}-[0-9]{2}-[0-9]{4}\n\n\n\n\n231-41-5121 573-57-1821\n231415121 57-3571821\n\n\n\n[a-z]+@([a-z]+\\.)+(edu|com)\n\n\n\n\nhorse@pizza.com horse@pizza.food.com\nfrank_99@yahoo.com hug@cs\n\n\n\nExplanations\n\n.*SPB.* only matches strings that contain the substring SPB.\n\nThe .* metacharacter matches any amount of non-negative characters. Newlines do not count.\n\n\nThis regular expression matches 3 of any digit, then a dash, then 2 of any digit, then a dash, then 4 of any digit.\n\nYou’ll recognize this as the familiar Social Security Number regular expression.\n\nMatches any email with a com or edu domain, where all characters of the email are letters.\n\nAt least one . must precede the domain name. Including a backslash \\ before any metacharacter (in this case, the .) tells RegEx to match that character exactly.", - "crumbs": [ - "6  Regular Expressions" - ] - }, - { - "objectID": "regex/regex.html#convenient-regex", - "href": "regex/regex.html#convenient-regex", - "title": "6  Regular Expressions", - "section": "6.5 Convenient RegEx", - "text": "6.5 Convenient RegEx\nHere are a few more convenient regular expressions.\n\n\n\n\n\n\n\n\n\nOperation\nSyntax Example\nMatches\nDoesn’t Match\n\n\n\n\nbuilt in character class\n\\w+ \\d+ \\s+ \nFawef_03 231123 whitespace\nthis person 423 people non-whitespace\n\n\ncharacter class negation: [^] (everything except the given characters)\n[^a-z]+.\nPEPPERS3982 17211!↑å\nporch CLAmS\n\n\nescape character: \\ (match the literal next character)\ncow\\.com\ncow.com\ncowscom\n\n\nbeginning of line: ^\n^ark\nark two ark o ark\ndark\n\n\nend of line: $\nark$\ndark ark o ark\nark two\n\n\nlazy version of zero or more : *?\n5.*?5\n5005 55\n5005005\n\n\n\n\n6.5.1 Greediness\nIn order to fully understand the last operation in the table, we have to discuss greediness. RegEx is greedy – it will look for the longest possible match in a string. To motivate this with an example, consider the pattern <div>.*</div>. In the sentence below, we would hope that the bolded portions would be matched:\n“This is a <div>example</div> of greediness <div>in</div> regular expressions.”\nHowever, in reality, RegEx captures far more of the sentence. The way RegEx processes the text given that pattern is as follows:\n\n“Look for the exact string <>”\nThen, “look for any character 0 or more times”\nThen, “look for the exact string </div>”\n\nThe result would be all the characters starting from the leftmost <div> and the rightmost </div> (inclusive):\n“This is a <div>example</div> of greediness <div>in</div> regular expressions.”\nWe can fix this by making our pattern non-greedy, <div>.*?</div>. You can read up more in the documentation here.\n\n\n6.5.2 Examples\nLet’s revisit our earlier problem of extracting date/time data from the given .txt files. Here is how the data looked.\n\nlog_lines[0]\n\n'169.237.46.168 - - [26/Jan/2014:10:47:58 -0800] \"GET /stat141/Winter04/ HTTP/1.1\" 200 2585 \"http://anson.ucdavis.edu/courses/\"\\n'\n\n\nQuestion: Give a regular expression that matches everything contained within and including the brackets - the day, month, year, hour, minutes, seconds, and time zone.\nAnswer: \\[.*\\]\n\nNotice how matching the literal [ and ] is necessary. Therefore, an escape character \\ is required before both [ and ] — otherwise these metacharacters will match character classes.\nWe need to match a particular format between [ and ]. For this example, .* will suffice.\n\nAlternative Solution: \\[\\w+/\\w+/\\w+:\\w+:\\w+:\\w+\\s-\\w+\\]\n\nThis solution is much safer.\n\nImagine the data between [ and ] was garbage - .* will still match that.\nThe alternate solution will only match data that follows the correct format.", - "crumbs": [ - "6  Regular Expressions" - ] - }, - { - "objectID": "regex/regex.html#regex-in-python-and-pandas-regex-groups", - "href": "regex/regex.html#regex-in-python-and-pandas-regex-groups", - "title": "6  Regular Expressions", - "section": "6.6 Regex in Python and Pandas (RegEx Groups)", - "text": "6.6 Regex in Python and Pandas (RegEx Groups)\n\n6.6.1 Canonicalization\n\n6.6.1.1 Canonicalization with RegEx\nEarlier in this note, we examined the process of canonicalization using python string manipulation and pandas Series methods. However, we mentioned this approach had a major flaw: our code was unnecessarily verbose. Equipped with our knowledge of regular expressions, let’s fix this.\nTo do so, we need to understand a few functions in the re module. The first of these is the substitute function: re.sub(pattern, rep1, text). It behaves similarly to python’s built-in .replace function, and returns text with all instances of pattern replaced by rep1.\nThe regular expression here removes text surrounded by <> (also known as HTML tags).\nIn order, the pattern matches … 1. a single < 2. any character that is not a > : div, td valign…, /td, /div 3. a single >\nAny substring in text that fulfills all three conditions will be replaced by ''.\n\nimport re\n\ntext = \"<div><td valign='top'>Moo</td></div>\"\npattern = r\"<[^>]+>\"\nre.sub(pattern, '', text) \n\n'Moo'\n\n\nNotice the r preceding the regular expression pattern; this specifies the regular expression is a raw string. Raw strings do not recognize escape sequences (i.e., the Python newline metacharacter \\n). This makes them useful for regular expressions, which often contain literal \\ characters.\nIn other words, don’t forget to tag your RegEx with an r.\n\n\n6.6.1.2 Canonicalization with pandas\nWe can also use regular expressions with pandas Series methods. This gives us the benefit of operating on an entire column of data as opposed to a single value. The code is simple: ser.str.replace(pattern, repl, regex=True).\nConsider the following DataFrame html_data with a single column.\n\n\nCode\ndata = {\"HTML\": [\"<div><td valign='top'>Moo</td></div>\", \\\n \"<a href='http://ds100.org'>Link</a>\", \\\n \"<b>Bold text</b>\"]}\nhtml_data = pd.DataFrame(data)\n\n\n\nhtml_data\n\n\n\n\n\n\n\n\nHTML\n\n\n\n\n0\n<div><td valign='top'>Moo</td></div>\n\n\n1\n<a href='http://ds100.org'>Link</a>\n\n\n2\n<b>Bold text</b>\n\n\n\n\n\n\n\n\npattern = r\"<[^>]+>\"\nhtml_data['HTML'].str.replace(pattern, '', regex=True)\n\n0 Moo\n1 Link\n2 Bold text\nName: HTML, dtype: object\n\n\n\n\n\n6.6.2 Extraction\n\n6.6.2.1 Extraction with RegEx\nJust like with canonicalization, the re module provides capability to extract relevant text from a string: re.findall(pattern, text). This function returns a list of all matches to pattern.\nUsing the familiar regular expression for Social Security Numbers:\n\ntext = \"My social security number is 123-45-6789 bro, or maybe it’s 321-45-6789.\"\npattern = r\"[0-9]{3}-[0-9]{2}-[0-9]{4}\"\nre.findall(pattern, text) \n\n['123-45-6789', '321-45-6789']\n\n\n\n\n6.6.2.2 Extraction with pandas\npandas similarily provides extraction functionality on a Series of data: ser.str.findall(pattern)\nConsider the following DataFrame ssn_data.\n\n\nCode\ndata = {\"SSN\": [\"987-65-4321\", \"forty\", \\\n \"123-45-6789 bro or 321-45-6789\",\n \"999-99-9999\"]}\nssn_data = pd.DataFrame(data)\n\n\n\nssn_data\n\n\n\n\n\n\n\n\nSSN\n\n\n\n\n0\n987-65-4321\n\n\n1\nforty\n\n\n2\n123-45-6789 bro or 321-45-6789\n\n\n3\n999-99-9999\n\n\n\n\n\n\n\n\nssn_data[\"SSN\"].str.findall(pattern)\n\n0 [987-65-4321]\n1 []\n2 [123-45-6789, 321-45-6789]\n3 [999-99-9999]\nName: SSN, dtype: object\n\n\nThis function returns a list for every row containing the pattern matches in a given string.\nAs you may expect, there are similar pandas equivalents for other re functions as well. Series.str.extract takes in a pattern and returns a DataFrame of each capture group’s first match in the string. In contrast, Series.str.extractall returns a multi-indexed DataFrame of all matches for each capture group. You can see the difference in the outputs below:\n\npattern_cg = r\"([0-9]{3})-([0-9]{2})-([0-9]{4})\"\nssn_data[\"SSN\"].str.extract(pattern_cg)\n\n\n\n\n\n\n\n\n0\n1\n2\n\n\n\n\n0\n987\n65\n4321\n\n\n1\nNaN\nNaN\nNaN\n\n\n2\n123\n45\n6789\n\n\n3\n999\n99\n9999\n\n\n\n\n\n\n\n\nssn_data[\"SSN\"].str.extractall(pattern_cg)\n\n\n\n\n\n\n\n\n\n0\n1\n2\n\n\n\nmatch\n\n\n\n\n\n\n\n0\n0\n987\n65\n4321\n\n\n2\n0\n123\n45\n6789\n\n\n1\n321\n45\n6789\n\n\n3\n0\n999\n99\n9999\n\n\n\n\n\n\n\n\n\n\n6.6.3 Regular Expression Capture Groups\nEarlier we used parentheses ( ) to specify the highest order of operation in regular expressions. However, they have another meaning; parentheses are often used to represent capture groups. Capture groups are essentially, a set of smaller regular expressions that match multiple substrings in text data.\nLet’s take a look at an example.\n\n6.6.3.1 Example 1\n\ntext = \"Observations: 03:04:53 - Horse awakens. \\\n 03:05:14 - Horse goes back to sleep.\"\n\nSay we want to capture all occurences of time data (hour, minute, and second) as separate entities.\n\npattern_1 = r\"(\\d\\d):(\\d\\d):(\\d\\d)\"\nre.findall(pattern_1, text)\n\n[('03', '04', '53'), ('03', '05', '14')]\n\n\nNotice how the given pattern has 3 capture groups, each specified by the regular expression (\\d\\d). We then use re.findall to return these capture groups, each as tuples containing 3 matches.\nThese regular expression capture groups can be different. We can use the (\\d{2}) shorthand to extract the same data.\n\npattern_2 = r\"(\\d\\d):(\\d\\d):(\\d{2})\"\nre.findall(pattern_2, text)\n\n[('03', '04', '53'), ('03', '05', '14')]\n\n\n\n\n6.6.3.2 Example 2\nWith the notion of capture groups, convince yourself how the following regular expression works.\n\nfirst = log_lines[0]\nfirst\n\n'169.237.46.168 - - [26/Jan/2014:10:47:58 -0800] \"GET /stat141/Winter04/ HTTP/1.1\" 200 2585 \"http://anson.ucdavis.edu/courses/\"\\n'\n\n\n\npattern = r'\\[(\\d+)\\/(\\w+)\\/(\\d+):(\\d+):(\\d+):(\\d+) (.+)\\]'\nday, month, year, hour, minute, second, time_zone = re.findall(pattern, first)[0]\nprint(day, month, year, hour, minute, second, time_zone)\n\n26 Jan 2014 10 47 58 -0800", - "crumbs": [ - "6  Regular Expressions" - ] - }, - { - "objectID": "regex/regex.html#limitations-of-regular-expressions", - "href": "regex/regex.html#limitations-of-regular-expressions", - "title": "6  Regular Expressions", - "section": "6.7 Limitations of Regular Expressions", - "text": "6.7 Limitations of Regular Expressions\nToday, we explored the capabilities of regular expressions in data wrangling with text data. However, there are a few things to be wary of.\nWriting regular expressions is like writing a program.\n\nNeed to know the syntax well.\nCan be easier to write than to read.\nCan be difficult to debug.\n\nRegular expressions are terrible at certain types of problems:\n\nFor parsing a hierarchical structure, such as JSON, use the json.load() parser, not RegEx!\nComplex features (e.g. valid email address).\nCounting (same number of instances of a and b). (impossible)\nComplex properties (palindromes, balanced parentheses). (impossible)\n\nUltimately, the goal is not to memorize all regular expressions. Rather, the aim is to:\n\nUnderstand what RegEx is capable of.\nParse and create RegEx, with a reference table\nUse vocabulary (metacharacter, escape character, groups, etc.) to describe regex metacharacters.\nDifferentiate between (), [], {}\nDesign your own character classes with , , […-…], ^, etc.\nUse python and pandas RegEx methods.", - "crumbs": [ - "6  Regular Expressions" - ] - }, - { - "objectID": "visualization_1/visualization_1.html", - "href": "visualization_1/visualization_1.html", - "title": "7  Visualization I", - "section": "", - "text": "7.1 Visualizations in Data 8 and Data 100 (so far)\nYou’ve likely encountered several forms of data visualizations in your studies. You may remember two such examples from Data 8: line plots, scatter plots, and histograms. Each of these served a unique purpose. For example, line plots displayed how numerical quantities changed over time, while histograms were useful in understanding a variable’s distribution.", - "crumbs": [ - "7  Visualization I" - ] - }, - { - "objectID": "visualization_1/visualization_1.html#visualizations-in-data-8-and-data-100-so-far", - "href": "visualization_1/visualization_1.html#visualizations-in-data-8-and-data-100-so-far", - "title": "7  Visualization I", - "section": "", - "text": "Line Chart\nScatter Plot\n\n\n\n\n\n\n\n\n\n\n\n\nHistogram", - "crumbs": [ - "7  Visualization I" - ] - }, - { - "objectID": "visualization_1/visualization_1.html#goals-of-visualization", - "href": "visualization_1/visualization_1.html#goals-of-visualization", - "title": "7  Visualization I", - "section": "7.2 Goals of Visualization", - "text": "7.2 Goals of Visualization\nVisualizations are useful for a number of reasons. In Data 100, we consider two areas in particular:\n\nTo broaden your understanding of the data. Summarizing trends visually before in-depth analysis is a key part of exploratory data analysis. Creating these graphs is a lightweight, iterative and flexible process that helps us investigate relationships between variables.\nTo communicate results/conclusions to others. These visualizations are highly editorial, selective, and fine-tuned to achieve a communications goal, so be thoughtful and careful about its clarity, accessibility, and necessary context.\n\nAltogether, these goals emphasize the fact that visualizations aren’t a matter of making “pretty” pictures; we need to do a lot of thinking about what stylistic choices communicate ideas most effectively.\nThis course note will focus on the first half of visualization topics in Data 100. The goal here is to understand how to choose the “right” plot depending on different variable types and, secondly, how to generate these plots using code.", - "crumbs": [ - "7  Visualization I" - ] - }, - { - "objectID": "visualization_1/visualization_1.html#an-overview-of-distributions", - "href": "visualization_1/visualization_1.html#an-overview-of-distributions", - "title": "7  Visualization I", - "section": "7.3 An Overview of Distributions", - "text": "7.3 An Overview of Distributions\nA distribution describes both the set of values that a single variable can take and the frequency of unique values in a single variable. For example, if we’re interested in the distribution of students across Data 100 discussion sections, the set of possible values is a list of discussion sections (10-11am, 11-12pm, etc.), and the frequency that each of those values occur is the number of students enrolled in each section. In other words, the we’re interested in how a variable is distributed across it’s possible values. Therefore, distributions must satisfy two properties:\n\nThe total frequency of all categories must sum to 100%\nTotal count should sum to the total number of datapoints if we’re using raw counts.\n\n\n\n\n\n\n\n\nNot a Valid Distribution\nValid Distribution\n\n\n\n\n\n\n\n\nThis is not a valid distribution since individuals can be associated with more than one category and the bar values demonstrate values in minutes and not probability.\nThis example satisfies the two properties of distributions, so it is a valid distribution.", - "crumbs": [ - "7  Visualization I" - ] - }, - { - "objectID": "visualization_1/visualization_1.html#variable-types-should-inform-plot-choice", - "href": "visualization_1/visualization_1.html#variable-types-should-inform-plot-choice", - "title": "7  Visualization I", - "section": "7.4 Variable Types Should Inform Plot Choice", - "text": "7.4 Variable Types Should Inform Plot Choice\nDifferent plots are more or less suited for displaying particular types of variables, laid out in the diagram below:\n\n\n\nThe first step of any visualization is to identify the type(s) of variables we’re working with. From here, we can select an appropriate plot type:", - "crumbs": [ - "7  Visualization I" - ] - }, - { - "objectID": "visualization_1/visualization_1.html#qualitative-variables-bar-plots", - "href": "visualization_1/visualization_1.html#qualitative-variables-bar-plots", - "title": "7  Visualization I", - "section": "7.5 Qualitative Variables: Bar Plots", - "text": "7.5 Qualitative Variables: Bar Plots\nA bar plot is one of the most common ways of displaying the distribution of a qualitative (categorical) variable. The length of a bar plot encodes the frequency of a category; the width encodes no useful information. The color could indicate a sub-category, but this is not necessarily the case.\nLet’s contextualize this in an example. We will use the World Bank dataset (wb) in our analysis.\n\n\nCode\nimport pandas as pd\nimport numpy as np\n\nwb = pd.read_csv(\"data/world_bank.csv\", index_col=0)\nwb.head()\n\n\n\n\n\n\n\n\n\nContinent\nCountry\nPrimary completion rate: Male: % of relevant age group: 2015\nPrimary completion rate: Female: % of relevant age group: 2015\nLower secondary completion rate: Male: % of relevant age group: 2015\nLower secondary completion rate: Female: % of relevant age group: 2015\nYouth literacy rate: Male: % of ages 15-24: 2005-14\nYouth literacy rate: Female: % of ages 15-24: 2005-14\nAdult literacy rate: Male: % ages 15 and older: 2005-14\nAdult literacy rate: Female: % ages 15 and older: 2005-14\n...\nAccess to improved sanitation facilities: % of population: 1990\nAccess to improved sanitation facilities: % of population: 2015\nChild immunization rate: Measles: % of children ages 12-23 months: 2015\nChild immunization rate: DTP3: % of children ages 12-23 months: 2015\nChildren with acute respiratory infection taken to health provider: % of children under age 5 with ARI: 2009-2016\nChildren with diarrhea who received oral rehydration and continuous feeding: % of children under age 5 with diarrhea: 2009-2016\nChildren sleeping under treated bed nets: % of children under age 5: 2009-2016\nChildren with fever receiving antimalarial drugs: % of children under age 5 with fever: 2009-2016\nTuberculosis: Treatment success rate: % of new cases: 2014\nTuberculosis: Cases detection rate: % of new estimated cases: 2015\n\n\n\n\n0\nAfrica\nAlgeria\n106.0\n105.0\n68.0\n85.0\n96.0\n92.0\n83.0\n68.0\n...\n80.0\n88.0\n95.0\n95.0\n66.0\n42.0\nNaN\nNaN\n88.0\n80.0\n\n\n1\nAfrica\nAngola\nNaN\nNaN\nNaN\nNaN\n79.0\n67.0\n82.0\n60.0\n...\n22.0\n52.0\n55.0\n64.0\nNaN\nNaN\n25.9\n28.3\n34.0\n64.0\n\n\n2\nAfrica\nBenin\n83.0\n73.0\n50.0\n37.0\n55.0\n31.0\n41.0\n18.0\n...\n7.0\n20.0\n75.0\n79.0\n23.0\n33.0\n72.7\n25.9\n89.0\n61.0\n\n\n3\nAfrica\nBotswana\n98.0\n101.0\n86.0\n87.0\n96.0\n99.0\n87.0\n89.0\n...\n39.0\n63.0\n97.0\n95.0\nNaN\nNaN\nNaN\nNaN\n77.0\n62.0\n\n\n5\nAfrica\nBurundi\n58.0\n66.0\n35.0\n30.0\n90.0\n88.0\n89.0\n85.0\n...\n42.0\n48.0\n93.0\n94.0\n55.0\n43.0\n53.8\n25.4\n91.0\n51.0\n\n\n\n\n5 rows × 47 columns\n\n\n\nWe can visualize the distribution of the Continent column using a bar plot. There are a few ways to do this.\n\n7.5.1 Plotting in Pandas\n\nwb['Continent'].value_counts().plot(kind='bar');\n\n\n\n\n\n\n\n\nRecall that .value_counts() returns a Series with the total count of each unique value. We call .plot(kind='bar') on this result to visualize these counts as a bar plot.\nPlotting methods in pandas are the least preferred and not supported in Data 100, as their functionality is limited. Instead, future examples will focus on other libraries built specifically for visualizing data. The most well-known library here is matplotlib.\n\n\n7.5.2 Plotting in Matplotlib\n\nimport matplotlib.pyplot as plt # matplotlib is typically given the alias plt\n\ncontinent = wb['Continent'].value_counts()\nplt.bar(continent.index, continent)\nplt.xlabel('Continent')\nplt.ylabel('Count');\n\n\n\n\n\n\n\n\nWhile more code is required to achieve the same result, matplotlib is often used over pandas for its ability to plot more complex visualizations, some of which are discussed shortly.\nHowever, note how we needed to label the axes with plt.xlabel and plt.ylabel, as matplotlib does not support automatic axis labeling. To get around these inconveniences, we can use a more efficient plotting library: seaborn.\n\n\n7.5.3 Plotting in Seaborn\n\nimport seaborn as sns # seaborn is typically given the alias sns\nsns.countplot(data = wb, x = 'Continent');\n\n\n\n\n\n\n\n\nIn contrast to matplotlib, the general structure of a seaborn call involves passing in an entire DataFrame, and then specifying what column(s) to plot. seaborn.countplot both counts and visualizes the number of unique values in a given column. This column is specified by the x argument to sns.countplot, while the DataFrame is specified by the data argument.\nFor the vast majority of visualizations, seaborn is far more concise and aesthetically pleasing than matplotlib. However, the color scheme of this particular bar plot is arbitrary - it encodes no additional information about the categories themselves. This is not always true; color may signify meaningful detail in other visualizations. We’ll explore this more in-depth during the next lecture.\nBy now, you’ll have noticed that each of these plotting libraries have a very different syntax. As with pandas, we’ll teach you the important methods in matplotlib and seaborn, but you’ll learn more through documentation.\n\nMatplotlib Documentation\nSeaborn Documentation", - "crumbs": [ - "7  Visualization I" - ] - }, - { - "objectID": "visualization_1/visualization_1.html#distributions-of-quantitative-variables", - "href": "visualization_1/visualization_1.html#distributions-of-quantitative-variables", - "title": "7  Visualization I", - "section": "7.6 Distributions of Quantitative Variables", - "text": "7.6 Distributions of Quantitative Variables\nRevisiting our example with the wb DataFrame, let’s plot the distribution of Gross national income per capita.\n\n\nCode\nwb.head(5)\n\n\n\n\n\n\n\n\n\nContinent\nCountry\nPrimary completion rate: Male: % of relevant age group: 2015\nPrimary completion rate: Female: % of relevant age group: 2015\nLower secondary completion rate: Male: % of relevant age group: 2015\nLower secondary completion rate: Female: % of relevant age group: 2015\nYouth literacy rate: Male: % of ages 15-24: 2005-14\nYouth literacy rate: Female: % of ages 15-24: 2005-14\nAdult literacy rate: Male: % ages 15 and older: 2005-14\nAdult literacy rate: Female: % ages 15 and older: 2005-14\n...\nAccess to improved sanitation facilities: % of population: 1990\nAccess to improved sanitation facilities: % of population: 2015\nChild immunization rate: Measles: % of children ages 12-23 months: 2015\nChild immunization rate: DTP3: % of children ages 12-23 months: 2015\nChildren with acute respiratory infection taken to health provider: % of children under age 5 with ARI: 2009-2016\nChildren with diarrhea who received oral rehydration and continuous feeding: % of children under age 5 with diarrhea: 2009-2016\nChildren sleeping under treated bed nets: % of children under age 5: 2009-2016\nChildren with fever receiving antimalarial drugs: % of children under age 5 with fever: 2009-2016\nTuberculosis: Treatment success rate: % of new cases: 2014\nTuberculosis: Cases detection rate: % of new estimated cases: 2015\n\n\n\n\n0\nAfrica\nAlgeria\n106.0\n105.0\n68.0\n85.0\n96.0\n92.0\n83.0\n68.0\n...\n80.0\n88.0\n95.0\n95.0\n66.0\n42.0\nNaN\nNaN\n88.0\n80.0\n\n\n1\nAfrica\nAngola\nNaN\nNaN\nNaN\nNaN\n79.0\n67.0\n82.0\n60.0\n...\n22.0\n52.0\n55.0\n64.0\nNaN\nNaN\n25.9\n28.3\n34.0\n64.0\n\n\n2\nAfrica\nBenin\n83.0\n73.0\n50.0\n37.0\n55.0\n31.0\n41.0\n18.0\n...\n7.0\n20.0\n75.0\n79.0\n23.0\n33.0\n72.7\n25.9\n89.0\n61.0\n\n\n3\nAfrica\nBotswana\n98.0\n101.0\n86.0\n87.0\n96.0\n99.0\n87.0\n89.0\n...\n39.0\n63.0\n97.0\n95.0\nNaN\nNaN\nNaN\nNaN\n77.0\n62.0\n\n\n5\nAfrica\nBurundi\n58.0\n66.0\n35.0\n30.0\n90.0\n88.0\n89.0\n85.0\n...\n42.0\n48.0\n93.0\n94.0\n55.0\n43.0\n53.8\n25.4\n91.0\n51.0\n\n\n\n\n5 rows × 47 columns\n\n\n\nHow should we define our categories for this variable? In the previous example, these were a few unique values of the Continent column. If we use similar logic here, our categories are the different numerical values contained in the Gross national income per capita column.\nUnder this assumption, let’s plot this distribution using the seaborn.countplot function.\n\nsns.countplot(data = wb, x = 'Gross national income per capita, Atlas method: $: 2016');\n\n\n\n\n\n\n\n\nWhat happened? A bar plot (either plt.bar or sns.countplot) will create a separate bar for each unique value of a variable. With a continuous variable, we may not have a finite number of possible values, which can lead to situations like above where we would need many, many bars to display each unique value.\nSpecifically, we can say this histogram suffers from overplotting as we are unable to interpret the plot and gain any meaningful insight.\nRather than bar plots, to visualize the distribution of a continuous variable, we use one of the following types of plots:\n\nHistogram\nBox plot\nViolin plot\n\n\n7.6.1 Box Plots and Violin Plots\nBox plots and violin plots are two very similar kinds of visualizations. Both display the distribution of a variable using information about quartiles.\nIn a box plot, the width of the box at any point does not encode meaning. In a violin plot, the width of the plot indicates the density of the distribution at each possible value.\n\nsns.boxplot(data=wb, y='Gross national income per capita, Atlas method: $: 2016');\n\n\n\n\n\n\n\n\n\nsns.violinplot(data=wb, y=\"Gross national income per capita, Atlas method: $: 2016\");\n\n\n\n\n\n\n\n\nA quartile represents a 25% portion of the data. We say that:\n\nThe first quartile (Q1) represents the 25th percentile – 25% of the data is smaller than or equal to the first quartile.\nThe second quartile (Q2) represents the 50th percentile, also known as the median – 50% of the data is smaller than or equal to the second quartile.\nThe third quartile (Q3) represents the 75th percentile – 75% of the data is smaller than or equal to the third quartile.\n\nThis means that the middle 50% of the data lies between the first and third quartiles. This is demonstrated in the histogram below. The three quartiles are marked with red vertical bars.\n\n\nCode\ngdp = wb['Gross domestic product: % growth : 2016']\ngdp = gdp[~gdp.isna()]\n\nq1, q2, q3 = np.percentile(gdp, [25, 50, 75])\n\nwb_quartiles = wb.copy()\nwb_quartiles['category'] = None\nwb_quartiles.loc[(wb_quartiles['Gross domestic product: % growth : 2016'] < q1) | (wb_quartiles['Gross domestic product: % growth : 2016'] > q3), 'category'] = 'Outside of the middle 50%'\nwb_quartiles.loc[(wb_quartiles['Gross domestic product: % growth : 2016'] > q1) & (wb_quartiles['Gross domestic product: % growth : 2016'] < q3), 'category'] = 'In the middle 50%'\n\nsns.histplot(wb_quartiles, x=\"Gross domestic product: % growth : 2016\", hue=\"category\")\nsns.rugplot([q1, q2, q3], c=\"firebrick\", lw=6, height=0.1);\n\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\nIn a box plot, the lower extent of the box lies at Q1, while the upper extent of the box lies at Q3. The horizontal line in the middle of the box corresponds to Q2 (equivalently, the median).\n\nsns.boxplot(data=wb, y='Gross domestic product: % growth : 2016');\n\n\n\n\n\n\n\n\nThe whiskers of a box-plot are the two points that lie at the [\\(1^{st}\\) Quartile \\(-\\) (\\(1.5\\times\\) IQR)], and the [\\(3^{rd}\\) Quartile \\(+\\) (\\(1.5\\times\\) IQR)]. They are the lower and upper ranges of “normal” data (the points excluding outliers).\nThe different forms of information contained in a box plot can be summarised as follows:\n\n\n\nA violin plot displays quartile information, albeit a bit more subtly through smoothed density curves. Look closely at the center vertical bar of the violin plot below; the three quartiles and “whiskers” are still present!\n\nsns.violinplot(data=wb, y='Gross domestic product: % growth : 2016');\n\n\n\n\n\n\n\n\n\n\n7.6.2 Side-by-Side Box and Violin Plots\nPlotting side-by-side box or violin plots allows us to compare distributions across different categories. In other words, they enable us to plot both a qualitative variable and a quantitative continuous variable in one visualization.\nWith seaborn, we can easily create side-by-side plots by specifying both an x and y column.\n\nsns.boxplot(data=wb, x=\"Continent\", y='Gross domestic product: % growth : 2016');\n\n\n\n\n\n\n\n\n\n\n7.6.3 Histograms\nYou are likely familiar with histograms from Data 8. A histogram collects continuous data into bins, then plots this binned data. Each bin reflects the density of datapoints with values that lie between the left and right ends of the bin; in other words, the area of each bin is proportional to the percentage of datapoints it contains.\n\n7.6.3.1 Plotting Histograms\nBelow, we plot a histogram using matplotlib and seaborn. Which graph do you prefer?\n\n# The `edgecolor` argument controls the color of the bin edges\ngni = wb[\"Gross national income per capita, Atlas method: $: 2016\"]\nplt.hist(gni, density=True, edgecolor=\"white\")\n\n# Add labels\nplt.xlabel(\"Gross national income per capita\")\nplt.ylabel(\"Density\")\nplt.title(\"Distribution of gross national income per capita\");\n\n\n\n\n\n\n\n\n\nsns.histplot(data=wb, x=\"Gross national income per capita, Atlas method: $: 2016\", stat=\"density\")\nplt.title(\"Distribution of gross national income per capita\");\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\n\n\n7.6.3.2 Overlaid Histograms\nWe can overlay histograms (or density curves) to compare distributions across qualitative categories.\nThe hue parameter of sns.histplot specifies the column that should be used to determine the color of each category. hue can be used in many seaborn plotting functions.\nNotice that the resulting plot includes a legend describing which color corresponds to each hemisphere – a legend should always be included if color is used to encode information in a visualization!\n\n# Create a new variable to store the hemisphere in which each country is located\nnorth = [\"Asia\", \"Europe\", \"N. America\"]\nsouth = [\"Africa\", \"Oceania\", \"S. America\"]\nwb.loc[wb[\"Continent\"].isin(north), \"Hemisphere\"] = \"Northern\"\nwb.loc[wb[\"Continent\"].isin(south), \"Hemisphere\"] = \"Southern\"\n\n\nsns.histplot(data=wb, x=\"Gross national income per capita, Atlas method: $: 2016\", hue=\"Hemisphere\", stat=\"density\")\nplt.title(\"Distribution of gross national income per capita\");\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\nAgain, each bin of a histogram is scaled such that its area is proportional to the percentage of all datapoints that it contains.\n\ndensities, bins, _ = plt.hist(gni, density=True, edgecolor=\"white\", bins=5)\nplt.xlabel(\"Gross national income per capita\")\nplt.ylabel(\"Density\")\n\nprint(f\"First bin has width {bins[1]-bins[0]} and height {densities[0]}\")\nprint(f\"This corresponds to {bins[1]-bins[0]} * {densities[0]} = {(bins[1]-bins[0])*densities[0]*100}% of the data\")\n\nFirst bin has width 16410.0 and height 4.7741589911386953e-05\nThis corresponds to 16410.0 * 4.7741589911386953e-05 = 78.343949044586% of the data\n\n\n\n\n\n\n\n\n\n\n\n7.6.3.3 Evaluating Histograms\nHistograms allow us to assess a distribution by their shape. There are a few properties of histograms we can analyze:\n\nSkewness and Tails\n\nSkewed left vs skewed right\nLeft tail vs right tail\n\nOutliers\n\nUsing percentiles\n\nModes\n\nMost commonly occuring data\n\n\n\n7.6.3.3.1 Skewness and Tails\nThe skew of a histogram describes the direction in which its “tail” extends. - A distribution with a long right tail is skewed right (such as Gross national income per capita). In a right-skewed distribution, the few large outliers “pull” the mean to the right of the median.\n\nsns.histplot(data = wb, x = 'Gross national income per capita, Atlas method: $: 2016', stat = 'density');\nplt.title('Distribution with a long right tail')\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\nText(0.5, 1.0, 'Distribution with a long right tail')\n\n\n\n\n\n\n\n\n\n\nA distribution with a long left tail is skewed left (such as Access to an improved water source). In a left-skewed distribution, the few small outliers “pull” the mean to the left of the median.\n\nIn the case where a distribution has equal-sized right and left tails, it is symmetric. The mean is approximately equal to the median. Think of mean as the balancing point of the distribution.\n\nsns.histplot(data = wb, x = 'Access to an improved water source: % of population: 2015', stat = 'density');\nplt.title('Distribution with a long left tail')\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\nText(0.5, 1.0, 'Distribution with a long left tail')\n\n\n\n\n\n\n\n\n\n\n\n7.6.3.3.2 Outliers\nLoosely speaking, an outlier is defined as a data point that lies an abnormally large distance away from other values. Let’s make this more concrete. As you may have observed in the box plot infographic earlier, we define outliers to be the data points that fall beyond the whiskers. Specifically, values that are less than the [\\(1^{st}\\) Quartile \\(-\\) (\\(1.5\\times\\) IQR)], or greater than [\\(3^{rd}\\) Quartile \\(+\\) (\\(1.5\\times\\) IQR).]\n\n\n7.6.3.3.3 Modes\nIn Data 100, we describe a “mode” of a histogram as a peak in the distribution. Often, however, it is difficult to determine what counts as its own “peak.” For example, the number of peaks in the distribution of HIV rates across different countries varies depending on the number of histogram bins we plot.\nIf we set the number of bins to 5, the distribution appears unimodal.\n\n# Rename the very long column name for convenience\nwb = wb.rename(columns={'Antiretroviral therapy coverage: % of people living with HIV: 2015':\"HIV rate\"})\n# With 5 bins, it seems that there is only one peak\nsns.histplot(data=wb, x=\"HIV rate\", stat=\"density\", bins=5)\nplt.title(\"5 histogram bins\");\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\n\n# With 10 bins, there seem to be two peaks\n\nsns.histplot(data=wb, x=\"HIV rate\", stat=\"density\", bins=10)\nplt.title(\"10 histogram bins\");\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\n\n# And with 20 bins, it becomes hard to say what counts as a \"peak\"!\n\nsns.histplot(data=wb, x =\"HIV rate\", stat=\"density\", bins=20)\nplt.title(\"20 histogram bins\");\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\nIn part, it is these ambiguities that motivate us to consider using Kernel Density Estimation (KDE), which we will explore more in the next lecture.", - "crumbs": [ - "7  Visualization I" - ] - }, - { - "objectID": "visualization_2/visualization_2.html", - "href": "visualization_2/visualization_2.html", - "title": "8  Visualization II", - "section": "", - "text": "8.1 Kernel Density Estimation\nOften, we want to identify general trends across a distribution, rather than focus on detail. Smoothing a distribution helps generalize the structure of the data and eliminate noise.", - "crumbs": [ - "8  Visualization II" - ] - }, - { - "objectID": "visualization_2/visualization_2.html#kernel-density-estimation", - "href": "visualization_2/visualization_2.html#kernel-density-estimation", - "title": "8  Visualization II", - "section": "", - "text": "8.1.1 KDE Theory\nA kernel density estimate (KDE) is a smooth, continuous function that approximates a curve. It allows us to represent general trends in a distribution without focusing on the details, which is useful for analyzing the broad structure of a dataset.\nMore formally, a KDE attempts to approximate the underlying probability distribution from which our dataset was drawn. You may have encountered the idea of a probability distribution in your other classes; if not, we’ll discuss it at length in the next lecture. For now, you can think of a probability distribution as a description of how likely it is for us to sample a particular value in our dataset.\nA KDE curve estimates the probability density function of a random variable. Consider the example below, where we have used sns.displot to plot both a histogram (containing the data points we actually collected) and a KDE curve (representing the approximated probability distribution from which this data was drawn) using data from the World Bank dataset (wb).\n\n\nCode\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nwb = pd.read_csv(\"data/world_bank.csv\", index_col=0)\nwb = wb.rename(columns={'Antiretroviral therapy coverage: % of people living with HIV: 2015':\"HIV rate\",\n 'Gross national income per capita, Atlas method: $: 2016':'gni'})\nwb.head()\n\n\n\n\n\n\n\n\n\nContinent\nCountry\nPrimary completion rate: Male: % of relevant age group: 2015\nPrimary completion rate: Female: % of relevant age group: 2015\nLower secondary completion rate: Male: % of relevant age group: 2015\nLower secondary completion rate: Female: % of relevant age group: 2015\nYouth literacy rate: Male: % of ages 15-24: 2005-14\nYouth literacy rate: Female: % of ages 15-24: 2005-14\nAdult literacy rate: Male: % ages 15 and older: 2005-14\nAdult literacy rate: Female: % ages 15 and older: 2005-14\n...\nAccess to improved sanitation facilities: % of population: 1990\nAccess to improved sanitation facilities: % of population: 2015\nChild immunization rate: Measles: % of children ages 12-23 months: 2015\nChild immunization rate: DTP3: % of children ages 12-23 months: 2015\nChildren with acute respiratory infection taken to health provider: % of children under age 5 with ARI: 2009-2016\nChildren with diarrhea who received oral rehydration and continuous feeding: % of children under age 5 with diarrhea: 2009-2016\nChildren sleeping under treated bed nets: % of children under age 5: 2009-2016\nChildren with fever receiving antimalarial drugs: % of children under age 5 with fever: 2009-2016\nTuberculosis: Treatment success rate: % of new cases: 2014\nTuberculosis: Cases detection rate: % of new estimated cases: 2015\n\n\n\n\n0\nAfrica\nAlgeria\n106.0\n105.0\n68.0\n85.0\n96.0\n92.0\n83.0\n68.0\n...\n80.0\n88.0\n95.0\n95.0\n66.0\n42.0\nNaN\nNaN\n88.0\n80.0\n\n\n1\nAfrica\nAngola\nNaN\nNaN\nNaN\nNaN\n79.0\n67.0\n82.0\n60.0\n...\n22.0\n52.0\n55.0\n64.0\nNaN\nNaN\n25.9\n28.3\n34.0\n64.0\n\n\n2\nAfrica\nBenin\n83.0\n73.0\n50.0\n37.0\n55.0\n31.0\n41.0\n18.0\n...\n7.0\n20.0\n75.0\n79.0\n23.0\n33.0\n72.7\n25.9\n89.0\n61.0\n\n\n3\nAfrica\nBotswana\n98.0\n101.0\n86.0\n87.0\n96.0\n99.0\n87.0\n89.0\n...\n39.0\n63.0\n97.0\n95.0\nNaN\nNaN\nNaN\nNaN\n77.0\n62.0\n\n\n5\nAfrica\nBurundi\n58.0\n66.0\n35.0\n30.0\n90.0\n88.0\n89.0\n85.0\n...\n42.0\n48.0\n93.0\n94.0\n55.0\n43.0\n53.8\n25.4\n91.0\n51.0\n\n\n\n\n5 rows × 47 columns\n\n\n\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.displot(data = wb, x = 'HIV rate', \\\n kde = True, stat = \"density\")\n\nplt.title(\"Distribution of HIV rates\");\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\nNotice that the smooth KDE curve is higher when the histogram bins are taller. You can think of the height of the KDE curve as representing how “probable” it is that we randomly sample a datapoint with the corresponding value. This intuitively makes sense – if we have already collected more datapoints with a particular value (resulting in a tall histogram bin), it is more likely that, if we randomly sample another datapoint, we will sample one with a similar value (resulting in a high KDE curve).\nThe area under a probability density function should always integrate to 1, representing the fact that the total probability of a distribution should always sum to 100%. Hence, a KDE curve will always have an area under the curve of 1.\n\n\n8.1.2 Constructing a KDE\nWe perform kernel density estimation using three steps.\n\nPlace a kernel at each datapoint.\nNormalize the kernels to have a total area of 1 (across all kernels).\nSum the normalized kernels.\n\nWe’ll explain what a “kernel” is momentarily.\nTo make things simpler, let’s construct a KDE for a small, artificially generated dataset of 5 datapoints: \\([2.2, 2.8, 3.7, 5.3, 5.7]\\). In the plot below, each vertical bar represents one data point.\n\n\nCode\ndata = [2.2, 2.8, 3.7, 5.3, 5.7]\n\nsns.rugplot(data, height=0.3)\n\nplt.xlabel(\"Data\")\nplt.ylabel(\"Density\")\nplt.xlim(-3, 10)\nplt.ylim(0, 0.5);\n\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\nOur goal is to create the following KDE curve, which was generated automatically by sns.kdeplot.\n\n\nCode\nsns.kdeplot(data)\n\nplt.xlabel(\"Data\")\nplt.xlim(-3, 10)\nplt.ylim(0, 0.5);\n\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\n\n8.1.2.1 Step 1: Place a Kernel at Each Data Point\nTo begin generating a density curve, we need to choose a kernel and bandwidth value (\\(\\alpha\\)). What are these exactly?\nA kernel is a density curve. It is the mathematical function that attempts to capture the randomness of each data point in our sampled data. To explain what this means, consider just one of the datapoints in our dataset: \\(2.2\\). We obtained this datapoint by randomly sampling some information out in the real world (you can imagine \\(2.2\\) as representing a single measurement taken in an experiment, for example). If we were to sample a new datapoint, we may obtain a slightly different value. It could be higher than \\(2.2\\); it could also be lower than \\(2.2\\). We make the assumption that any future sampled datapoints will likely be similar in value to the data we’ve already drawn. This means that our kernel – our description of the probability of randomly sampling any new value – will be greatest at the datapoint we’ve already drawn but still have non-zero probability above and below it. The area under any kernel should integrate to 1, representing the total probability of drawing a new datapoint.\nA bandwidth value, usually denoted by \\(\\alpha\\), represents the width of the kernel. A large value of \\(\\alpha\\) will result in a wide, short kernel function, while a small value with result in a narrow, tall kernel.\nBelow, we place a Gaussian kernel, plotted in orange, over the datapoint \\(2.2\\). A Gaussian kernel is simply the normal distribution, which you may have called a bell curve in Data 8.\n\n\nCode\ndef gaussian_kernel(x, z, a):\n # We'll discuss where this mathematical formulation came from later\n return (1/np.sqrt(2*np.pi*a**2)) * np.exp((-(x - z)**2 / (2 * a**2)))\n\n# Plot our datapoint\nsns.rugplot([2.2], height=0.3)\n\n# Plot the kernel\nx = np.linspace(-3, 10, 1000)\nplt.plot(x, gaussian_kernel(x, 2.2, 1))\n\nplt.xlabel(\"Data\")\nplt.ylabel(\"Density\")\nplt.xlim(-3, 10)\nplt.ylim(0, 0.5);\n\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\nTo begin creating our KDE, we place a kernel on each datapoint in our dataset. For our dataset of 5 points, we will have 5 kernels.\n\n\nCode\n# You will work with the functions below in Lab 4\ndef create_kde(kernel, pts, a):\n # Takes in a kernel, set of points, and alpha\n # Returns the KDE as a function\n def f(x):\n output = 0\n for pt in pts:\n output += kernel(x, pt, a)\n return output / len(pts) # Normalization factor\n return f\n\ndef plot_kde(kernel, pts, a):\n # Calls create_kde and plots the corresponding KDE\n f = create_kde(kernel, pts, a)\n x = np.linspace(min(pts) - 5, max(pts) + 5, 1000)\n y = [f(xi) for xi in x]\n plt.plot(x, y);\n \ndef plot_separate_kernels(kernel, pts, a, norm=False):\n # Plots individual kernels, which are then summed to create the KDE\n x = np.linspace(min(pts) - 5, max(pts) + 5, 1000)\n for pt in pts:\n y = kernel(x, pt, a)\n if norm:\n y /= len(pts)\n plt.plot(x, y)\n \n plt.show();\n \nplt.xlim(-3, 10)\nplt.ylim(0, 0.5)\nplt.xlabel(\"Data\")\nplt.ylabel(\"Density\")\n\nplot_separate_kernels(gaussian_kernel, data, a = 1)\n\n\n\n\n\n\n\n\n\n\n\n8.1.2.2 Step 2: Normalize Kernels to Have a Total Area of 1\nAbove, we said that each kernel has an area of 1. Earlier, we also said that our goal is to construct a KDE curve using these kernels with a total area of 1. If we were to directly sum the kernels as they are, we would produce a KDE curve with an integrated area of (5 kernels) \\(\\times\\) (area of 1 each) = 5. To avoid this, we will normalize each of our kernels. This involves multiplying each kernel by \\(\\frac{1}{\\#\\:\\text{datapoints}}\\).\nIn the cell below, we multiply each of our 5 kernels by \\(\\frac{1}{5}\\) to apply normalization.\n\n\nCode\nplt.xlim(-3, 10)\nplt.ylim(0, 0.5)\nplt.xlabel(\"Data\")\nplt.ylabel(\"Density\")\n\n# The `norm` argument specifies whether or not to normalize the kernels\nplot_separate_kernels(gaussian_kernel, data, a = 1, norm = True)\n\n\n\n\n\n\n\n\n\n\n\n8.1.2.3 Step 3: Sum the Normalized Kernels\nOur KDE curve is the sum of the normalized kernels. Notice that the final curve is identical to the plot generated by sns.kdeplot we saw earlier!\n\n\nCode\nplt.xlim(-3, 10)\nplt.ylim(0, 0.5)\nplt.xlabel(\"Data\")\nplt.ylabel(\"Density\")\n\nplot_kde(gaussian_kernel, data, a = 1)\n\n\n\n\n\n\n\n\n\n\n\n\n8.1.3 Kernel Functions and Bandwidths\n\n\n\nA general “KDE formula” function is given above.\n\n\\(K_{\\alpha}(x, x_i)\\) is the kernel centered on the observation i.\n\nEach kernel individually has area 1.\nx represents any number on the number line. It is the input to our function.\n\n\\(n\\) is the number of observed datapoints that we have.\n\nWe multiply by \\(\\frac{1}{n}\\) so that the total area of the KDE is still 1.\n\nEach \\(x_i \\in \\{x_1, x_2, \\dots, x_n\\}\\) represents an observed datapoint.\n\nThese are what we use to create our KDE by summing multiple shifted kernels centered at these points.\n\n\n\n\\(\\alpha\\) (alpha) is the bandwidth or smoothing parameter.\n\nA kernel (for our purposes) is a valid density function. This means it:\n\nMust be non-negative for all inputs.\nMust integrate to 1.\n\n\n8.1.3.1 Gaussian Kernel\nThe most common kernel is the Gaussian kernel. The Gaussian kernel is equivalent to the Gaussian probability density function (the Normal distribution), centered at the observed value with a standard deviation of (this is known as the bandwidth parameter).\n\\[K_a(x, x_i) = \\frac{1}{\\sqrt{2\\pi\\alpha^{2}}}e^{-\\frac{(x-x_i)^{2}}{2\\alpha^{2}}}\\]\nIn this formula:\n\n\\(x\\) (no subscript) represents any value along the x-axis of our plot\n\\(x_i\\) represents the \\(i\\) -th datapoint in our dataset. It is one of the values that we have actually collected in our data sampling process. In our example earlier, \\(x_i=2.2\\). Those of you who have taken a probability class may recognize \\(x_i\\) as the mean of the normal distribution.\nEach kernel is centered on our observed values, so its distribution mean is \\(x_i\\).\n\\(\\alpha\\) is the bandwidth parameter, representing the width of our kernel. More formally, \\(\\alpha\\) is the standard deviation of the Gaussian curve.\n\nA large value of \\(\\alpha\\) will produce a kernel that is wider and shorter – this leads to a smoother KDE when the kernels are summed together.\nA small value of \\(\\alpha\\) will produce a narrower, taller kernel, and, with it, a noisier KDE.\n\n\nThe details of this (admittedly intimidating) formula are less important than understanding its role in kernel density estimation – this equation gives us the shape of each kernel.\n\n\n\n\n\n\n\nGaussian Kernel, \\(\\alpha\\) = 0.1\nGaussian Kernel, \\(\\alpha\\) = 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nGaussian Kernel, \\(\\alpha\\) = 2\nGaussian Kernel, \\(\\alpha\\) = 10\n\n\n\n\n\n\n\n\n\n\n\n8.1.3.2 Boxcar Kernel\nAnother example of a kernel is the Boxcar kernel. The boxcar kernel assigns a uniform density to points within a “window” of the observation, and a density of 0 elsewhere. The equation below is a boxcar kernel with the center at \\(x_i\\) and the bandwidth of \\(\\alpha\\).\n\\[K_a(x, x_i) = \\begin{cases}\n \\frac{1}{\\alpha}, & |x - x_i| \\le \\frac{\\alpha}{2}\\\\\n 0, & \\text{else }\n \\end{cases}\\]\nThe boxcar kernel is seldom used in practice – we include it here to demonstrate that a kernel function can take whatever form you would like, provided it integrates to 1 and does not output negative values.\n\n\nCode\ndef boxcar_kernel(alpha, x, z):\n return (((x-z)>=-alpha/2)&((x-z)<=alpha/2))/alpha\n\nxs = np.linspace(-5, 5, 200)\nalpha=1\nkde_curve = [boxcar_kernel(alpha, x, 0) for x in xs]\nplt.plot(xs, kde_curve);\n\n\n\n\n\nThe Boxcar kernel centered at 0 with bandwidth \\(\\alpha\\) = 1.\n\n\n\n\nThe diagram on the right is how the density curve for our 5 point dataset would have looked had we used the Boxcar kernel with bandwidth \\(\\alpha\\) = 1.\n\n\n\n\n\n\n\nKDE\nBoxcar", - "crumbs": [ - "8  Visualization II" - ] - }, - { - "objectID": "visualization_2/visualization_2.html#diving-deeper-into-displot", - "href": "visualization_2/visualization_2.html#diving-deeper-into-displot", - "title": "8  Visualization II", - "section": "8.2 Diving Deeper into displot", - "text": "8.2 Diving Deeper into displot\nAs we saw earlier, we can use seaborn’s displot function to plot various distributions. In particular, displot allows you to specify the kind of plot and is a wrapper for histplot, kdeplot, and ecdfplot.\nBelow, we can see a couple of examples of how sns.displot can be used to plot various distributions.\nFirst, we can plot a histogram by setting kind to \"hist\". Note that here we’ve specified stat = density to normalize the histogram such that the area under the histogram is equal to 1.\n\nsns.displot(data=wb, \n x=\"gni\", \n kind=\"hist\", \n stat=\"density\") # default: stat=count and density integrates to 1\nplt.title(\"Distribution of gross national income per capita\");\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\nNow, what if we want to generate a KDE plot? We can set kind = to \"kde\"!\n\nsns.displot(data=wb, \n x=\"gni\", \n kind='kde')\nplt.title(\"Distribution of gross national income per capita\");\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\nAnd finally, if we want to generate an Empirical Cumulative Distribution Function (ECDF), we can specify kind = \"ecdf\".\n\nsns.displot(data=wb, \n x=\"gni\", \n kind='ecdf')\nplt.title(\"Cumulative Distribution of gross national income per capita\");\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.", - "crumbs": [ - "8  Visualization II" - ] - }, - { - "objectID": "visualization_2/visualization_2.html#relationships-between-quantitative-variables", - "href": "visualization_2/visualization_2.html#relationships-between-quantitative-variables", - "title": "8  Visualization II", - "section": "8.3 Relationships Between Quantitative Variables", - "text": "8.3 Relationships Between Quantitative Variables\nUp until now, we’ve discussed how to visualize single-variable distributions. Going beyond this, we want to understand the relationship between pairs of numerical variables.\n\n8.3.0.1 Scatter Plots\nScatter plots are one of the most useful tools in representing the relationship between pairs of quantitative variables. They are particularly important in gauging the strength, or correlation, of the relationship between variables. Knowledge of these relationships can then motivate decisions in our modeling process.\nIn matplotlib, we use the function plt.scatter to generate a scatter plot. Notice that, unlike our examples of plotting single-variable distributions, now we specify sequences of values to be plotted along the x-axis and the y-axis.\n\nplt.scatter(wb[\"per capita: % growth: 2016\"], \\\n wb['Adult literacy rate: Female: % ages 15 and older: 2005-14'])\n\nplt.xlabel(\"% growth per capita\")\nplt.ylabel(\"Female adult literacy rate\")\nplt.title(\"Female adult literacy against % growth\");\n\n\n\n\n\n\n\n\nIn seaborn, we call the function sns.scatterplot. We use the x and y parameters to indicate the values to be plotted along the x and y axes, respectively. By using the hue parameter, we can specify a third variable to be used for coloring each scatter point.\n\nsns.scatterplot(data = wb, x = \"per capita: % growth: 2016\", \\\n y = \"Adult literacy rate: Female: % ages 15 and older: 2005-14\", \n hue = \"Continent\")\n\nplt.title(\"Female adult literacy against % growth\");\n\n\n\n\n\n\n\n\n\n8.3.0.1.1 Overplotting\nAlthough the plots above communicate the general relationship between the two plotted variables, they both suffer a major limitation – overplotting. Overplotting occurs when scatter points with similar values are stacked on top of one another, making it difficult to see the number of scatter points actually plotted in the visualization. Notice how in the upper righthand region of the plots, we cannot easily tell just how many points have been plotted. This makes our visualizations difficult to interpret.\nWe have a few methods to help reduce overplotting:\n\nDecreasing the size of the scatter point markers can improve readability. We do this by setting a new value to the size parameter, s, of plt.scatter or sns.scatterplot.\nJittering is the process of adding a small amount of random noise to all x and y values to slightly shift the position of each datapoint. By randomly shifting all the data by some small distance, we can discern individual points more clearly without modifying the major trends of the original dataset.\n\nIn the cell below, we first jitter the data using np.random.uniform, then re-plot it with smaller markers. The resulting plot is much easier to interpret.\n\n# Setting a seed ensures that we produce the same plot each time\n# This means that the course notes will not change each time you access them\nnp.random.seed(150)\n\n# This call to np.random.uniform generates random numbers between -1 and 1\n# We add these random numbers to the original x data to jitter it slightly\nx_noise = np.random.uniform(-1, 1, len(wb))\njittered_x = wb[\"per capita: % growth: 2016\"] + x_noise\n\n# Repeat for y data\ny_noise = np.random.uniform(-5, 5, len(wb))\njittered_y = wb[\"Adult literacy rate: Female: % ages 15 and older: 2005-14\"] + y_noise\n\n# Setting the size parameter `s` changes the size of each point\nplt.scatter(jittered_x, jittered_y, s=15)\n\nplt.xlabel(\"% growth per capita (jittered)\")\nplt.ylabel(\"Female adult literacy rate (jittered)\")\nplt.title(\"Female adult literacy against % growth\");\n\n\n\n\n\n\n\n\n\n\n\n8.3.0.2 lmplot and jointplot\nseaborn also includes several built-in functions for creating more sophisticated scatter plots. Two of the most commonly used examples are sns.lmplot and sns.jointplot.\nsns.lmplot plots both a scatter plot and a linear regression line, all in one function call. We’ll discuss linear regression in a few lectures.\n\nsns.lmplot(data = wb, x = \"per capita: % growth: 2016\", \\\n y = \"Adult literacy rate: Female: % ages 15 and older: 2005-14\")\n\nplt.title(\"Female adult literacy against % growth\");\n\n\n\n\n\n\n\n\nsns.jointplot creates a visualization with three components: a scatter plot, a histogram of the distribution of x values, and a histogram of the distribution of y values.\n\nsns.jointplot(data = wb, x = \"per capita: % growth: 2016\", \\\n y = \"Adult literacy rate: Female: % ages 15 and older: 2005-14\")\n\n# plt.suptitle allows us to shift the title up so it does not overlap with the histogram\nplt.suptitle(\"Female adult literacy against % growth\")\nplt.subplots_adjust(top=0.9);\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\n\n\n8.3.0.3 Hex plots\nFor datasets with a very large number of datapoints, jittering is unlikely to fully resolve the issue of overplotting. In these cases, we can attempt to visualize our data by its density, rather than displaying each individual datapoint.\nHex plots can be thought of as two-dimensional histograms that show the joint distribution between two variables. This is particularly useful when working with very dense data. In a hex plot, the x-y plane is binned into hexagons. Hexagons that are darker in color indicate a greater density of data – that is, there are more data points that lie in the region enclosed by the hexagon.\nWe can generate a hex plot using sns.jointplot modified with the kind parameter.\n\nsns.jointplot(data = wb, x = \"per capita: % growth: 2016\", \\\n y = \"Adult literacy rate: Female: % ages 15 and older: 2005-14\", \\\n kind = \"hex\")\n\n# plt.suptitle allows us to shift the title up so it does not overlap with the histogram\nplt.suptitle(\"Female adult literacy against % growth\")\nplt.subplots_adjust(top=0.9);\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\n\n\n8.3.0.4 Contour Plots\nContour plots are an alternative way of plotting the joint distribution of two variables. You can think of them as the 2-dimensional versions of KDE plots. A contour plot can be interpreted in a similar way to a topographic map. Each contour line represents an area that has the same density of datapoints throughout the region. Contours marked with darker colors contain more datapoints (a higher density) in that region.\nsns.kdeplot will generate a contour plot if we specify both x and y data.\n\nsns.kdeplot(data = wb, x = \"per capita: % growth: 2016\", \\\n y = \"Adult literacy rate: Female: % ages 15 and older: 2005-14\", \\\n fill = True)\n\nplt.title(\"Female adult literacy against % growth\");\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.", - "crumbs": [ - "8  Visualization II" - ] - }, - { - "objectID": "visualization_2/visualization_2.html#transformations", - "href": "visualization_2/visualization_2.html#transformations", - "title": "8  Visualization II", - "section": "8.4 Transformations", - "text": "8.4 Transformations\nWe have now covered visualizations in great depth, looking into various forms of visualizations, plotting libraries, and high-level theory.\nMuch of this was done to uncover insights in data, which will prove necessary when we begin building models of data later in the course. A strong graphical correlation between two variables hints at an underlying relationship that we may want to study in greater detail. However, relying on visual relationships alone is limiting - not all plots show association. The presence of outliers and other statistical anomalies makes it hard to interpret data.\nTransformations are the process of manipulating data to find significant relationships between variables. These are often found by applying mathematical functions to variables that “transform” their range of possible values and highlight some previously hidden associations between data.\nTo see why we may want to transform data, consider the following plot of adult literacy rates against gross national income.\n\n\nCode\n# Some data cleaning to help with the next example\ndf = pd.DataFrame(index=wb.index)\ndf['lit'] = wb['Adult literacy rate: Female: % ages 15 and older: 2005-14'] \\\n + wb[\"Adult literacy rate: Male: % ages 15 and older: 2005-14\"]\ndf['inc'] = wb['gni']\ndf.dropna(inplace=True)\n\nplt.scatter(df[\"inc\"], df[\"lit\"])\nplt.xlabel(\"Gross national income per capita\")\nplt.ylabel(\"Adult literacy rate\")\nplt.title(\"Adult literacy rate against GNI per capita\");\n\n\n\n\n\n\n\n\n\nThis plot is difficult to interpret for two reasons:\n\nThe data shown in the visualization appears almost “smushed” – it is heavily concentrated in the upper lefthand region of the plot. Even if we jittered the dataset, we likely would not be able to fully assess all datapoints in that area.\nIt is hard to generalize a clear relationship between the two plotted variables. While adult literacy rate appears to share some positive relationship with gross national income, we are not able to describe the specifics of this trend in much detail.\n\nA transformation would allow us to visualize this data more clearly, which, in turn, would enable us to describe the underlying relationship between our variables of interest.\nWe will most commonly apply a transformation to linearize a relationship between variables. If we find a transformation to make a scatter plot of two variables linear, we can “backtrack” to find the exact relationship between the variables. This helps us in two major ways. Firstly, linear relationships are particularly simple to interpret – we have an intuitive sense of what the slope and intercept of a linear trend represent, and how they can help us understand the relationship between two variables. Secondly, linear relationships are the backbone of linear models. We will begin exploring linear modeling in great detail next week. As we’ll soon see, linear models become much more effective when we are working with linearized data.\nIn the remainder of this note, we will discuss how to linearize a dataset to produce the result below. Notice that the resulting plot displays a rough linear relationship between the values plotted on the x and y axes.\n\n\n8.4.1 Linearization and Applying Transformations\nTo linearize a relationship, begin by asking yourself: what makes the data non-linear? It is helpful to repeat this question for each variable in your visualization.\nLet’s start by considering the gross national income variable in our plot above. Looking at the y values in the scatter plot, we can see that many large y values are all clumped together, compressing the vertical axis. The scale of the horizontal axis is also being distorted by the few large outlying x values on the right.\n\nIf we decreased the size of these outliers relative to the bulk of the data, we could reduce the distortion of the horizontal axis. How can we do this? We need a transformation that will:\n\nDecrease the magnitude of large x values by a significant amount.\nNot drastically change the magnitude of small x values.\n\nOne function that produces this result is the log transformation. When we take the logarithm of a large number, the original number will decrease in magnitude dramatically. Conversely, when we take the logarithm of a small number, the original number does not change its value by as significant of an amount (to illustrate this, consider the difference between \\(\\log{(100)} = 4.61\\) and \\(\\log{(10)} = 2.3\\)).\nIn Data 100 (and most upper-division STEM classes), \\(\\log\\) is used to refer to the natural logarithm with base \\(e\\).\n\n# np.log takes the logarithm of an array or Series\nplt.scatter(np.log(df[\"inc\"]), df[\"lit\"])\n\nplt.xlabel(\"Log(gross national income per capita)\")\nplt.ylabel(\"Adult literacy rate\")\nplt.title(\"Adult literacy rate against Log(GNI per capita)\");\n\n\n\n\n\n\n\n\nAfter taking the logarithm of our x values, our plot appears much more balanced in its horizontal scale. We no longer have many datapoints clumped on one end and a few outliers out at extreme values.\nLet’s repeat this reasoning for the y values. Considering only the vertical axis of the plot, notice how there are many datapoints concentrated at large y values. Only a few datapoints lie at smaller values of y.\nIf we were to “spread out” these large values of y more, we would no longer see the dense concentration in one region of the y-axis. We need a transformation that will:\n\nIncrease the magnitude of large values of y so these datapoints are distributed more broadly on the vertical scale,\nNot substantially alter the scaling of small values of y (we do not want to drastically modify the lower end of the y axis, which is already distributed evenly on the vertical scale).\n\nIn this case, it is helpful to apply a power transformation – that is, raise our y values to a power. Let’s try raising our adult literacy rate values to the power of 4. Large values raised to the power of 4 will increase in magnitude proportionally much more than small values raised to the power of 4 (consider the difference between \\(2^4 = 16\\) and \\(200^4 = 1600000000\\)).\n\n# Apply a log transformation to the x values and a power transformation to the y values\nplt.scatter(np.log(df[\"inc\"]), df[\"lit\"]**4)\n\nplt.xlabel(\"Log(gross national income per capita)\")\nplt.ylabel(\"Adult literacy rate (4th power)\")\nplt.suptitle(\"Adult literacy rate (4th power) against Log(GNI per capita)\")\nplt.subplots_adjust(top=0.9);\n\n\n\n\n\n\n\n\nOur scatter plot is looking a lot better! Now, we are plotting the log of our original x values on the horizontal axis, and the 4th power of our original y values on the vertical axis. We start to see an approximate linear relationship between our transformed variables.\nWhat can we take away from this? We now know that the log of gross national income and adult literacy to the power of 4 are roughly linearly related. If we denote the original, untransformed gross national income values as \\(x\\) and the original adult literacy rate values as \\(y\\), we can use the standard form of a linear fit to express this relationship:\n\\[y^4 = m(\\log{x}) + b\\]\nWhere \\(m\\) represents the slope of the linear fit, while \\(b\\) represents the intercept.\nThe cell below computes \\(m\\) and \\(b\\) for our transformed data. We’ll discuss how this code was generated in a future lecture.\n\n\nCode\n# The code below fits a linear regression model. We'll discuss it at length in a future lecture\nfrom sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression()\nmodel.fit(np.log(df[[\"inc\"]]), df[\"lit\"]**4)\nm, b = model.coef_[0], model.intercept_\n\nprint(f\"The slope, m, of the transformed data is: {m}\")\nprint(f\"The intercept, b, of the transformed data is: {b}\")\n\ndf = df.sort_values(\"inc\")\nplt.scatter(np.log(df[\"inc\"]), df[\"lit\"]**4, label=\"Transformed data\")\nplt.plot(np.log(df[\"inc\"]), m*np.log(df[\"inc\"])+b, c=\"red\", label=\"Linear regression\")\nplt.xlabel(\"Log(gross national income per capita)\")\nplt.ylabel(\"Adult literacy rate (4th power)\")\nplt.legend();\n\n\nThe slope, m, of the transformed data is: 336400693.43172693\nThe intercept, b, of the transformed data is: -1802204836.0479977\n\n\n\n\n\n\n\n\n\nWhat if we want to understand the underlying relationship between our original variables, before they were transformed? We can simply rearrange our linear expression above!\nRecall our linear relationship between the transformed variables \\(\\log{x}\\) and \\(y^4\\).\n\\[y^4 = m(\\log{x}) + b\\]\nBy rearranging the equation, we find a relationship between the untransformed variables \\(x\\) and \\(y\\).\n\\[y = [m(\\log{x}) + b]^{(1/4)}\\]\nWhen we plug in the values for \\(m\\) and \\(b\\) computed above, something interesting happens.\n\n\nCode\n# Now, plug the values for m and b into the relationship between the untransformed x and y\nplt.scatter(df[\"inc\"], df[\"lit\"], label=\"Untransformed data\")\nplt.plot(df[\"inc\"], (m*np.log(df[\"inc\"])+b)**(1/4), c=\"red\", label=\"Modeled relationship\")\nplt.xlabel(\"Gross national income per capita\")\nplt.ylabel(\"Adult literacy rate\")\nplt.legend();\n\n\n\n\n\n\n\n\n\nWe have found a relationship between our original variables – gross national income and adult literacy rate!\nTransformations are powerful tools for understanding our data in greater detail. To summarize what we just achieved:\n\nWe identified appropriate transformations to linearize the original data.\nWe used our knowledge of linear curves to compute the slope and intercept of the transformed data.\nWe used this slope and intercept information to derive a relationship in the untransformed data.\n\nLinearization will be an important tool as we begin our work on linear modeling next week.\n\n8.4.1.1 Tukey-Mosteller Bulge Diagram\nThe Tukey-Mosteller Bulge Diagram is a good guide when determining possible transformations to achieve linearity. It is a visual summary of the reasoning we just worked through above.\n\nHow does it work? Each curved “bulge” represents a possible shape of non-linear data. To use the diagram, find which of the four bulges resembles your dataset the most closely. Then, look at the axes of the quadrant for this bulge. The horizontal axis will list possible transformations that could be applied to your x data for linearization. Similarly, the vertical axis will list possible transformations that could be applied to your y data. Note that each axis lists two possible transformations. While either of these transformations has the potential to linearize your dataset, note that this is an iterative process. It’s important to try out these transformations and look at the results to see whether you’ve actually achieved linearity. If not, you’ll need to continue testing other possible transformations.\nGenerally:\n\n\\(\\sqrt{}\\) and \\(\\log{}\\) will reduce the magnitude of large values.\nPowers (\\(^2\\) and \\(^3\\)) will increase the spread in magnitude of large values.\n\n\n\n\nImportant: You should still understand the logic we worked through to determine how best to transform the data. The bulge diagram is just a summary of this same reasoning. You will be expected to be able to explain why a given transformation is or is not appropriate for linearization.\n\n\n\n8.4.2 Additional Remarks\nVisualization requires a lot of thought!\n\nThere are many tools for visualizing distributions.\n\nDistribution of a single variable:\n\nRugplot\nHistogram\nDensity plot\nBox plot\nViolin plot\n\nJoint distribution of two quantitative variables:\n\nScatter plot\nHex plot\nContour plot\n\n\n\nThis class primarily uses seaborn and matplotlib, but pandas also has basic built-in plotting methods. Many other visualization libraries exist, and plotly is one of them.\n\nplotly creates very easily creates interactive plots.\nplotly will occasionally appear in lecture code, labs, and assignments!\n\nNext, we’ll go deeper into the theory behind visualization.", - "crumbs": [ - "8  Visualization II" - ] - }, - { - "objectID": "visualization_2/visualization_2.html#visualization-theory", - "href": "visualization_2/visualization_2.html#visualization-theory", - "title": "8  Visualization II", - "section": "8.5 Visualization Theory", - "text": "8.5 Visualization Theory\nThis section marks a pivot to the second major topic of this lecture - visualization theory. We’ll discuss the abstract nature of visualizations and analyze how they convey information.\nRemember, we had two goals for visualizing data. This section is particularly important in:\n\nHelping us understand the data and results,\nCommunicating our results and conclusions with others.\n\n\n8.5.1 Information Channels\nVisualizations are able to convey information through various encodings. In the remainder of this lecture, we’ll look at the use of color, scale, and depth, to name a few.\n\n8.5.1.1 Encodings in Rugplots\nOne detail that we may have overlooked in our earlier discussion of rugplots is the importance of encodings. Rugplots are effective visuals because they utilize line thickness to encode frequency. Consider the following diagram:\n\n\n\n8.5.1.2 Multi-Dimensional Encodings\nEncodings are also useful for representing multi-dimensional data. Notice how the following visual highlights four distinct “dimensions” of data:\n\nX-axis\nY-axis\nArea\nColor\n\n\nThe human visual perception system is only capable of visualizing data in a three-dimensional plane, but as you’ve seen, we can encode many more channels of information.\n\n\n\n8.5.2 Harnessing the Axes\n\n8.5.2.1 Consider the Scale of the Data\nHowever, we should be careful to not misrepresent relationships in our data by manipulating the scale or axes. The visualization below improperly portrays two seemingly independent relationships on the same plot. The authors have clearly changed the scale of the y-axis to mislead their audience.\n\nNotice how the downwards-facing line segment contains values in the millions, while the upwards-trending segment only contains values near three hundred thousand. These lines should not be intersecting.\nWhen there is a large difference in the magnitude of the data, it’s advised to analyze percentages instead of counts. The following diagrams correctly display the trends in cancer screening and abortion rates.\n\n\n\n\n\n\n\n\n\n\n\n8.5.2.2 Reveal the Data\nGreat visualizations not only consider the scale of the data but also utilize the axes in a way that best conveys information. For example, data scientists commonly set certain axes limits to highlight parts of the visualization they are most interested in.\n\n\n\n\n\n\n\n\n\nThe visualization on the right captures the trend in coronavirus cases during March of 2020. From only looking at the visualization on the left, a viewer may incorrectly believe that coronavirus began to skyrocket on March 4th, 2020. However, the second illustration tells a different story - cases rose closer to March 21th, 2020.\n\n\n\n8.5.3 Harnessing Color\nColor is another important feature in visualizations that does more than what meets the eye.\nWe already explored using color to encode a categorical variable in our scatter plot. Let’s now discuss the uses of color in novel visualizations like colormaps and heatmaps.\n5-8% of the world is red-green color blind, so we have to be very particular about our color scheme. We want to make these as accessible as possible. Choosing a set of colors that work together is evidently a challenging task!\n\n8.5.3.1 Colormaps\nColormaps are mappings from pixel data to color values, and they’re often used to highlight distinct parts of an image. Let’s investigate a few properties of colormaps.\n\n\nJet Colormap \n\n\n\nViridis Colormap \n\n\nThe jet colormap is infamous for being misleading. While it seems more vibrant than viridis, the aggressive colors poorly encode numerical data. To understand why, let’s analyze the following images.\n\n\n\n\n\n\n\n\n\nThe diagram on the left compares how a variety of colormaps represent pixel data that transitions from a high to low intensity. These include the jet colormap (row a) and grayscale (row b). Notice how the grayscale images do the best job in smoothly transitioning between pixel data. The jet colormap is the worst at this - the four images in row (a) look like a conglomeration of individual colors.\nThe difference is also evident in the images labeled (a) and (b) on the left side. The grayscale image is better at preserving finer detail in the vertical line strokes. Additionally, grayscale is preferred in X-ray scans for being more neutral. The intensity of the dark red color in the jet colormap is frightening and indicates something is wrong.\nWhy is the jet colormap so much worse? The answer lies in how its color composition is perceived to the human eye.\n\n\nJet Colormap Perception \n\n\n\nViridis Colormap Perception \n\n\nThe jet colormap is largely misleading because it is not perceptually uniform. Perceptually uniform colormaps have the property that if the pixel data goes from 0.1 to 0.2, the perceptual change is the same as when the data goes from 0.8 to 0.9.\nNotice how the said uniformity is present within the linear trend displayed in the viridis colormap. On the other hand, the jet colormap is largely non-linear - this is precisely why it’s considered a worse colormap.\n\n\n\n8.5.4 Harnessing Markings\nIn our earlier discussion of multi-dimensional encodings, we analyzed a scatter plot with four pseudo-dimensions: the two axes, area, and color. Were these appropriate to use? The following diagram analyzes how well the human eye can distinguish between these “markings”.\n\nThere are a few key takeaways from this diagram\n\nLengths are easy to discern. Don’t use plots with jiggled baselines - keep everything axis-aligned.\nAvoid pie charts! Angle judgments are inaccurate.\nAreas and volumes are hard to distinguish (area charts, word clouds, etc.).\n\n\n\n8.5.5 Harnessing Conditioning\nConditioning is the process of comparing data that belong to separate groups. We’ve seen this before in overlayed distributions, side-by-side box plots, and scatter plots with categorical encodings. Here, we’ll introduce terminology that formalizes these examples.\nConsider an example where we want to analyze income earnings for males and females with varying levels of education. There are multiple ways to compare this data.\n\n\n\n\n\n\n\n\n\nThe barplot is an example of juxtaposition: placing multiple plots side by side, with the same scale. The scatter plot is an example of superposition: placing multiple density curves and scatter plots on top of each other.\nWhich is better depends on the problem at hand. Here, superposition makes the precise wage difference very clear from a quick glance. However, many sophisticated plots convey information that favors the use of juxtaposition. Below is one example.\n\n\n\n8.5.6 Harnessing Context\nThe last component of a great visualization is perhaps the most critical - the use of context. Adding informative titles, axis labels, and descriptive captions are all best practices that we’ve heard repeatedly in Data 8.\nA publication-ready plot (and every Data 100 plot) needs:\n\nInformative title (takeaway, not description),\nAxis labels,\nReference lines, markers, etc,\nLegends, if appropriate,\nCaptions that describe data,\n\nCaptions should:\n\nBe comprehensive and self-contained,\nDescribe what has been graphed,\nDraw attention to important features,\nDescribe conclusions drawn from graphs.", - "crumbs": [ - "8  Visualization II" - ] - }, - { - "objectID": "sampling/sampling.html", - "href": "sampling/sampling.html", - "title": "9  Sampling", - "section": "", - "text": "9.1 Censuses and Surveys\nIn general: a census is “a complete count or survey of a population, typically recording various details of individuals.” An example is the U.S. Decennial Census which was held in April 2020. It counts every person living in all 50 states, DC, and US territories, not just citizens. Participation is required by law (it is mandated by the U.S. Constitution). Important uses include the allocation of Federal funds, congressional representation, and drawing congressional and state legislative districts. The census is composed of a survey mailed to different housing addresses in the United States.\nA survey is a set of questions. An example is workers sampling individuals and households. What is asked and how it is asked can affect how the respondent answers or even whether or not they answer in the first place.\nWhile censuses are great, it is often very difficult and expensive to survey everyone in a population. Imagine the amount of resources, money, time, and energy the U.S. spent on the 2020 Census. While this does give us more accurate information about the population, it’s often infeasible to execute. Thus, we usually survey a subset of the population instead.\nA sample is (usually) a subset of the population that is often used to make inferences about the population. If our sample is a good representation of our population, then we can use it to glean useful information at a lower cost. That being said, how the sample is drawn will affect the reliability of such inferences. Two common sources of error in sampling are chance error, where random samples can vary from what is expected in any direction, and bias, which is a systematic error in one direction. Biases can be the result of many things, for example, our sampling scheme or survey methods.\nLet’s define some useful vocabulary:\nWhile ideally, these three sets would be exactly the same, they usually aren’t in practice. For example, there may be individuals in your sampling frame (and hence, your sample) that are not in your population. And generally, sample sizes are much smaller than population sizes.", - "crumbs": [ - "9  Sampling" - ] - }, - { - "objectID": "sampling/sampling.html#censuses-and-surveys", - "href": "sampling/sampling.html#censuses-and-surveys", - "title": "9  Sampling", - "section": "", - "text": "Population: The group that you want to learn something about.\n\nIndividuals in a population are not always people. Other populations include bacteria in your gut (sampled using DNA sequencing), trees of a certain species, small businesses receiving a microloan, or published results in an academic journal or field.\n\nSampling Frame: The list from which the sample is drawn.\n\nFor example, if sampling people, then the sampling frame is the set of all people that could possibly end up in your sample.\n\nSample: Who you actually end up sampling. The sample is therefore a subset of your sampling frame.\n\n\n\n\n\nSampling_Frames", - "crumbs": [ - "9  Sampling" - ] - }, - { - "objectID": "sampling/sampling.html#bias-a-case-study", - "href": "sampling/sampling.html#bias-a-case-study", - "title": "9  Sampling", - "section": "9.2 Bias: A Case Study", - "text": "9.2 Bias: A Case Study\nThe following case study is adapted from Statistics by Freedman, Pisani, and Purves, W.W. Norton NY, 1978.\nIn 1936, President Franklin D. Roosevelt (Democratic) went up for re-election against Alf Landon (Republican). As is usual, polls were conducted in the months leading up to the election to try and predict the outcome. The Literary Digest was a magazine that had successfully predicted the outcome of 5 general elections coming into 1936. In their polling for the 1936 election, they sent out their survey to 10 million individuals whom they found from phone books, lists of magazine subscribers, and lists of country club members. Of the roughly 2.4 million people who filled out the survey, only 43% reported they would vote for Roosevelt; thus, the Digest predicted that Landon would win.\nOn election day, Roosevelt won in a landslide, winning 61% of the popular vote of about 45 million voters. How could the Digest have been so wrong with their polling?\nIt turns out that the Literary Digest sample was not representative of the population. Their sampling frame of people found in phone books, lists of magazine subscribers, and lists of country club members were more affluent and tended to vote Republican. As such, their sampling frame was inherently skewed in Landon’s favor. The Literary Digest completely overlooked the lion’s share of voters who were still suffering through the Great Depression. Furthermore, they had a dismal response rate (about 24%); who knows how the other non-respondents would have polled? The Digest folded just 18 months after this disaster.\nAt the same time, George Gallup, a rising statistician, also made predictions about the 1936 elections. Despite having a smaller sample size of “only” 50,000 (this is still more than necessary; more when we cover the Central Limit Theorem), his estimate that 56% of voters would choose Roosevelt was much closer to the actual result (61%). Gallup also predicted the Digest’s prediction within 1% with a sample size of only 3000 people by anticipating the Digest’s affluent sampling frame and subsampling those individuals.\nSo what’s the moral of the story? Samples, while convenient, are subject to chance error and bias. Election polling, in particular, can involve many sources of bias. To name a few:\n\nSelection bias systematically excludes (or favors) particular groups.\n\nExample: the Literary Digest poll excludes people not in phone books.\nHow to avoid: Examine the sampling frame and the method of sampling.\n\nResponse bias occurs because people don’t always respond truthfully. Survey designers pay special detail to the nature and wording of questions to avoid this type of bias.\n\nExample: Illegal immigrants might not answer truthfully when asked citizenship questions on the census survey.\nHow to avoid: Examine the nature of questions and the method of surveying. Randomized response - flip a coin and answer yes if heads or answer truthfully if tails.\n\nNon-response bias occurs because people don’t always respond to survey requests, which can skew responses.\n\nExample: Only 2.4m out of 10m people responded to the Literary Digest’s poll.\nHow to avoid: Keep surveys short, and be persistent.\n\n\nRandomized Response\nSuppose you want to ask someone a sensitive question: “Have you ever cheated on an exam?” An individual may be embarrassed or afraid to answer truthfully and might lie or not answer the question. One solution is to leverage a randomized response:\nFirst, you can ask the individual to secretly flip a fair coin; you (the surveyor) don’t know the outcome of the coin flip.\nThen, you ask them to answer “Yes” if the coin landed heads and to answer truthfully if the coin landed tails.\nThe surveyor doesn’t know if the “Yes” means that the person cheated or if it means that the coin landed heads. The individual’s sensitive information remains secret. However, if the response is “No”, then the surveyor knows the individual didn’t cheat. We assume the individual is comfortable revealing this information.\nGenerally, we can assume that the coin lands heads 50% of the time, masking the remaining 50% of the “No” answers. We can therefore double the proportion of “No” answers to estimate the true fraction of “No” answers.\nElection Polls\nToday, the Gallup Poll is one of the leading polls for election results. The many sources of biases – who responds to polls? Do voters tell the truth? How can we predict turnout? – still remain, but the Gallup Poll uses several tactics to mitigate them. Within their sampling frame of “civilian, non-institutionalized population” of adults in telephone households in continental U.S., they use random digit dialing to include both listed/unlisted phone numbers and to avoid selection bias. Additionally, they use a within-household selection process to randomly select households with one or more adults. If no one answers, re-call multiple times to avoid non-response bias.", - "crumbs": [ - "9  Sampling" - ] - }, - { - "objectID": "sampling/sampling.html#probability-samples", - "href": "sampling/sampling.html#probability-samples", - "title": "9  Sampling", - "section": "9.3 Probability Samples", - "text": "9.3 Probability Samples\nWhen sampling, it is essential to focus on the quality of the sample rather than the quantity of the sample. A huge sample size does not fix a bad sampling method. Our main goal is to gather a sample that is representative of the population it came from. In this section, we’ll explore the different types of sampling and their pros and cons.\nA convenience sample is whatever you can get ahold of; this type of sampling is non-random. Note that haphazard sampling is not necessarily random sampling; there are many potential sources of bias.\nIn a probability sample, we provide the chance that any specified set of individuals will be in the sample (individuals in the population can have different chances of being selected; they don’t all have to be uniform), and we sample at random based off this known chance. For this reason, probability samples are also called random samples. The randomness provides a few benefits:\n\nBecause we know the source probabilities, we can measure the errors.\nSampling at random gives us a more representative sample of the population, which reduces bias. (Note: this is only the case when the probability distribution we’re sampling from is accurate. Random samples using “bad” or inaccurate distributions can produce biased estimates of population quantities.)\nProbability samples allow us to estimate the bias and chance error, which helps us quantify uncertainty (more in a future lecture).\n\nThe real world is usually more complicated, and we often don’t know the initial probabilities. For example, we do not generally know the probability that a given bacterium is in a microbiome sample or whether people will answer when Gallup calls landlines. That being said, still we try to model probability sampling to the best of our ability even when the sampling or measurement process is not fully under our control.\nA few common random sampling schemes:\n\nA uniform random sample with replacement is a sample drawn uniformly at random with replacement.\n\nRandom doesn’t always mean “uniformly at random,” but in this specific context, it does.\nSome individuals in the population might get picked more than once.\n\nA simple random sample (SRS) is a sample drawn uniformly at random without replacement.\n\nEvery individual (and subset of individuals) has the same chance of being selected from the sampling frame.\nEvery pair has the same chance as every other pair.\nEvery triple has the same chance as every other triple.\nAnd so on.\n\nA stratified random sample, where random sampling is performed on strata (specific groups), and the groups together compose a sample.\n\n\n9.3.1 Example Scheme 1: Probability Sample\nSuppose we have 3 TA’s (Arman, Boyu, Charlie): I decide to sample 2 of them as follows:\n\nI choose A with probability 1.0\nI choose either B or C, each with a probability of 0.5.\n\nWe can list all the possible outcomes and their respective probabilities in a table:\n\n\n\nOutcome\nProbability\n\n\n\n\n{A, B}\n0.5\n\n\n{A, C}\n0.5\n\n\n{B, C}\n0\n\n\n\nThis is a probability sample (though not a great one). Of the 3 people in my population, I know the chance of getting each subset. Suppose I’m measuring the average distance TAs live from campus.\n\nThis scheme does not see the entire population!\nMy estimate using the single sample I take has some chance error depending on if I see AB or AC.\nThis scheme is biased towards A’s response.\n\n\n\n9.3.2 Example Scheme 2: Simple Random Sample\nConsider the following sampling scheme:\n\nA class roster has 1100 students listed alphabetically.\nPick one of the first 10 students on the list at random (e.g. Student 8).\nTo create your sample, take that student and every 10th student listed after that (e.g. Students 8, 18, 28, 38, etc.).\n\n\n\nIs this a probability sample?\n\nYes. For a sample [n, n + 10, n + 20, …, n + 1090], where 1 <= n <= 10, the probability of that sample is 1/10. Otherwise, the probability is 0.\nOnly 10 possible samples!\n\n\n\nDoes each student have the same probability of being selected?\n\nYes. Each student is chosen with a probability of 1/10.\n\n\n\nIs this a simple random sample?\n\nNo. The chance of selecting (8, 18) is 1/10; the chance of selecting (8, 9) is 0.\n\n\n\n9.3.3 Demo: Barbie v. Oppenheimer\nWe are trying to collect a sample from Berkeley residents to predict the which one of Barbie and Oppenheimer would perform better on their opening day, July 21st.\nFirst, let’s grab a dataset that has every single resident in Berkeley (this is a fake dataset) and which movie they actually watched on July 21st.\nLet’s load in the movie.csv table. We can assume that:\n\nis_male is a boolean that indicates if a resident identifies as male.\nThere are only two movies they can watch on July 21st: Barbie and Oppenheimer.\nEvery resident watches a movie (either Barbie or Oppenheimer) on July 21st.\n\n\n\nCode\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nsns.set_theme(style='darkgrid', font_scale = 1.5,\n rc={'figure.figsize':(7,5)})\n\nrng = np.random.default_rng()\n\n\n\nmovie = pd.read_csv(\"data/movie.csv\")\n\n# create a 1/0 int that indicates Barbie vote\nmovie['barbie'] = (movie['movie'] == 'Barbie').astype(int)\nmovie.head()\n\n\n\n\n\n\n\n\nage\nis_male\nmovie\nbarbie\n\n\n\n\n0\n35\nFalse\nBarbie\n1\n\n\n1\n42\nTrue\nOppenheimer\n0\n\n\n2\n55\nFalse\nBarbie\n1\n\n\n3\n77\nTrue\nOppenheimer\n0\n\n\n4\n31\nFalse\nBarbie\n1\n\n\n\n\n\n\n\nWhat fraction of Berkeley residents chose Barbie?\n\nactual_barbie = np.mean(movie[\"barbie\"])\nactual_barbie\n\n0.5302792307692308\n\n\nThis is the actual outcome of the competition. Based on this result, Barbie would win. How did our sample of retirees do?\n\n9.3.3.1 Convenience Sample: Retirees\nLet’s take a convenience sample of people who have retired (>= 65 years old). What proportion of them went to see Barbie instead of Oppenheimer?\n\nconvenience_sample = movie[movie['age'] >= 65] # take a convenience sample of retirees\nnp.mean(convenience_sample[\"barbie\"]) # what proportion of them saw Barbie? \n\n0.3744755089093924\n\n\nBased on this result, we would have predicted that Oppenheimer would win! What happened? Is it possible that our sample is too small or noisy?\n\n# what's the size of our sample? \nlen(convenience_sample)\n\n359396\n\n\n\n# what proportion of our data is in the convenience sample? \nlen(convenience_sample)/len(movie)\n\n0.27645846153846154\n\n\nSeems like our sample is rather large (roughly 360,000 people), so the error is likely not due to solely to chance.\n\n\n9.3.3.2 Check for Bias\nLet us aggregate all choices by age and visualize the fraction of Barbie views, split by gender.\n\nvotes_by_barbie = movie.groupby([\"age\",\"is_male\"]).agg(\"mean\", numeric_only=True).reset_index()\nvotes_by_barbie.head()\n\n\n\n\n\n\n\n\nage\nis_male\nbarbie\n\n\n\n\n0\n18\nFalse\n0.819594\n\n\n1\n18\nTrue\n0.667001\n\n\n2\n19\nFalse\n0.812214\n\n\n3\n19\nTrue\n0.661252\n\n\n4\n20\nFalse\n0.805281\n\n\n\n\n\n\n\n\n\nCode\n# A common matplotlib/seaborn pattern: create the figure and axes object, pass ax\n# to seaborn for drawing into, and later fine-tune the figure via ax.\nfig, ax = plt.subplots();\n\nred_blue = [\"#bf1518\", \"#397eb7\"]\nwith sns.color_palette(red_blue):\n sns.pointplot(data=votes_by_barbie, x = \"age\", y = \"barbie\", hue = \"is_male\", ax=ax)\n\nnew_ticks = [i.get_text() for i in ax.get_xticklabels()]\nax.set_xticks(range(0, len(new_ticks), 10), new_ticks[::10])\nax.set_title(\"Preferences by Demographics\");\n\n\n\n\n\n\n\n\n\n\nWe see that retirees (in Berkeley) tend to watch Oppenheimer.\nWe also see that residents who identify as non-male tend to prefer Barbie.\n\n\n\n9.3.3.3 Simple Random Sample\nSuppose we took a simple random sample (SRS) of the same size as our retiree sample:\n\nn = len(convenience_sample)\nrandom_sample = movie.sample(n, replace = False) ## By default, replace = False\nnp.mean(random_sample[\"barbie\"])\n\n0.529927990294828\n\n\nThis is very close to the actual vote of 0.5302792307692308!\nIt turns out that we can get similar results with a much smaller sample size, say, 800:\n\nn = 800\nrandom_sample = movie.sample(n, replace = False)\n\n# Compute the sample average and the resulting relative error\nsample_barbie = np.mean(random_sample[\"barbie\"])\nerr = abs(sample_barbie-actual_barbie)/actual_barbie\n\n# We can print output with Markdown formatting too...\nfrom IPython.display import Markdown\nMarkdown(f\"**Actual** = {actual_barbie:.4f}, **Sample** = {sample_barbie:.4f}, \"\n f\"**Err** = {100*err:.2f}%.\")\n\nActual = 0.5303, Sample = 0.5175, Err = 2.41%.\n\n\nWe’ll learn how to choose this number when we (re)learn the Central Limit Theorem later in the semester.\n\n\n9.3.3.4 Quantifying Chance Error\nIn our SRS of size 800, what would be our chance error?\nLet’s simulate 1000 versions of taking the 800-sized SRS from before:\n\nnrep = 1000 # number of simulations\nn = 800 # size of our sample\npoll_result = []\nfor i in range(0, nrep):\n random_sample = movie.sample(n, replace = False)\n poll_result.append(np.mean(random_sample[\"barbie\"]))\n\n\n\nCode\nfig, ax = plt.subplots()\nsns.histplot(poll_result, stat='density', ax=ax)\nax.axvline(actual_barbie, color=\"orange\", lw=4);\n\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\nWhat fraction of these simulated samples would have predicted Barbie?\n\npoll_result = pd.Series(poll_result)\nnp.sum(poll_result > 0.5)/1000\n\n0.955\n\n\nYou can see the curve looks roughly Gaussian/normal. Using KDE:\n\n\nCode\nsns.histplot(poll_result, stat='density', kde=True);\n\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.", - "crumbs": [ - "9  Sampling" - ] - }, - { - "objectID": "sampling/sampling.html#summary", - "href": "sampling/sampling.html#summary", - "title": "9  Sampling", - "section": "9.4 Summary", - "text": "9.4 Summary\nUnderstanding the sampling process is what lets us go from describing the data to understanding the world. Without knowing / assuming something about how the data were collected, there is no connection between the sample and the population. Ultimately, the dataset doesn’t tell us about the world behind the data.", - "crumbs": [ - "9  Sampling" - ] - }, - { - "objectID": "intro_to_modeling/intro_to_modeling.html", - "href": "intro_to_modeling/intro_to_modeling.html", - "title": "10  Introduction to Modeling", - "section": "", - "text": "10.1 What is a Model?\nA model is an idealized representation of a system. A system is a set of principles or procedures according to which something functions. We live in a world full of systems: the procedure of turning on a light happens according to a specific set of rules dictating the flow of electricity. The truth behind how any event occurs is usually complex, and many times the specifics are unknown. The workings of the world can be viewed as its own giant procedure. Models seek to simplify the world and distill them into workable pieces.\nExample: We model the fall of an object on Earth as subject to a constant acceleration of \\(9.81 m/s^2\\) due to gravity.", - "crumbs": [ - "10  Introduction to Modeling" - ] - }, - { - "objectID": "intro_to_modeling/intro_to_modeling.html#what-is-a-model", - "href": "intro_to_modeling/intro_to_modeling.html#what-is-a-model", - "title": "10  Introduction to Modeling", - "section": "", - "text": "While this describes the behavior of our system, it is merely an approximation.\nIt doesn’t account for the effects of air resistance, local variations in gravity, etc.\nIn practice, it’s accurate enough to be useful!\n\n\n10.1.1 Reasons for Building Models\nWhy do we want to build models? As far as data scientists and statisticians are concerned, there are three reasons, and each implies a different focus on modeling.\n\nTo explain complex phenomena occurring in the world we live in. Examples of this might be:\n\nHow are the parents’ average height related to their children’s average height?\nHow does an object’s velocity and acceleration impact how far it travels? (Physics: \\(d = d_0 + vt + \\frac{1}{2}at^2\\))\n\nIn these cases, we care about creating models that are simple and interpretable, allowing us to understand what the relationships between our variables are.\nTo make accurate predictions about unseen data. Some examples include:\n\nCan we predict if an email is spam or not?\nCan we generate a one-sentence summary of this 10-page long article?\n\nWhen making predictions, we care more about making extremely accurate predictions, at the cost of having an uninterpretable model. These are sometimes called black-box models and are common in fields like deep learning.\nTo measure the causal effects of one event on some other event. For example,\n\nDoes smoking cause lung cancer?\nDoes a job training program cause increases in employment and wages?\n\nThis is a much harder question because most statistical tools are designed to infer association, not causation. We will not focus on this task in Data 100, but you can take other advanced classes on causal inference (e.g., Stat 156, Data 102) if you are intrigued!\n\nMost of the time, we aim to strike a balance between building interpretable models and building accurate models.\n\n\n10.1.2 Common Types of Models\nIn general, models can be split into two categories:\n\nDeterministic physical (mechanistic) models: Laws that govern how the world works.\n\nKepler’s Third Law of Planetary Motion (1619): The ratio of the square of an object’s orbital period with the cube of the semi-major axis of its orbit is the same for all objects orbiting the same primary.\n\n\\(T^2 \\propto R^3\\)\n\nNewton’s Laws: motion and gravitation (1687): Newton’s second law of motion models the relationship between the mass of an object and the force required to accelerate it.\n\n\\(F = ma\\)\n\\(F_g = G \\frac{m_1 m_2}{r^2}\\) \n\n\nProbabilistic models: Models that attempt to understand how random processes evolve. These are more general and can be used to describe many phenomena in the real world. These models commonly make simplifying assumptions about the nature of the world.\n\nPoisson Process models: Used to model random events that happen with some probability at any point in time and are strictly increasing in count, such as the arrival of customers at a store.\n\n\nNote: These specific models are not in the scope of Data 100 and exist to serve as motivation.", - "crumbs": [ - "10  Introduction to Modeling" - ] - }, - { - "objectID": "intro_to_modeling/intro_to_modeling.html#simple-linear-regression", - "href": "intro_to_modeling/intro_to_modeling.html#simple-linear-regression", - "title": "10  Introduction to Modeling", - "section": "10.2 Simple Linear Regression", - "text": "10.2 Simple Linear Regression\nThe regression line is the unique straight line that minimizes the mean squared error of estimation among all straight lines. As with any straight line, it can be defined by a slope and a y-intercept:\n\n\\(\\text{slope} = r \\cdot \\frac{\\text{Standard Deviation of } y}{\\text{Standard Deviation of }x}\\)\n\\(y\\text{-intercept} = \\text{average of }y - \\text{slope}\\cdot\\text{average of }x\\)\n\\(\\text{regression estimate} = y\\text{-intercept} + \\text{slope}\\cdot\\text{}x\\)\n\\(\\text{residual} =\\text{observed }y - \\text{regression estimate}\\)\n\n\n\nCode\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n# Set random seed for consistency \nnp.random.seed(43)\nplt.style.use('default') \n\n#Generate random noise for plotting\nx = np.linspace(-3, 3, 100)\ny = x * 0.5 - 1 + np.random.randn(100) * 0.3\n\n#plot regression line\nsns.regplot(x=x,y=y);\n\n\n\n\n\n\n\n\n\n\n10.2.1 Notations and Definitions\nFor a pair of variables \\(x\\) and \\(y\\) representing our data \\(\\mathcal{D} = \\{(x_1, y_1), (x_2, y_2), \\dots, (x_n, y_n)\\}\\), we denote their means/averages as \\(\\bar x\\) and \\(\\bar y\\) and standard deviations as \\(\\sigma_x\\) and \\(\\sigma_y\\).\n\n10.2.1.1 Standard Units\nA variable is represented in standard units if the following are true:\n\n0 in standard units is equal to the mean (\\(\\bar{x}\\)) in the original variable’s units.\nAn increase of 1 standard unit is an increase of 1 standard deviation (\\(\\sigma_x\\)) in the original variable’s units.\n\nTo convert a variable \\(x_i\\) into standard units, we subtract its mean from it and divide it by its standard deviation. For example, \\(x_i\\) in standard units is \\(\\frac{x_i - \\bar x}{\\sigma_x}\\).\n\n\n10.2.1.2 Correlation\nThe correlation (\\(r\\)) is the average of the product of \\(x\\) and \\(y\\), both measured in standard units.\n\\[r = \\frac{1}{n} \\sum_{i=1}^n (\\frac{x_i - \\bar{x}}{\\sigma_x})(\\frac{y_i - \\bar{y}}{\\sigma_y})\\]\n\nCorrelation measures the strength of a linear association between two variables.\nCorrelations range between -1 and 1: \\(|r| \\leq 1\\), with \\(r=1\\) indicating perfect linear association, and \\(r=-1\\) indicating perfect negative association. The closer \\(r\\) is to \\(0\\), the weaker the linear association is.\nCorrelation says nothing about causation and non-linear association. Correlation does not imply causation. When \\(r = 0\\), the two variables are uncorrelated. However, they could still be related through some non-linear relationship.\n\n\n\nCode\ndef plot_and_get_corr(ax, x, y, title):\n ax.set_xlim(-3, 3)\n ax.set_ylim(-3, 3)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.scatter(x, y, alpha = 0.73)\n r = np.corrcoef(x, y)[0, 1]\n ax.set_title(title + \" (corr: {})\".format(r.round(2)))\n return r\n\nfig, axs = plt.subplots(2, 2, figsize = (10, 10))\n\n# Just noise\nx1, y1 = np.random.randn(2, 100)\ncorr1 = plot_and_get_corr(axs[0, 0], x1, y1, title = \"noise\")\n\n# Strong linear\nx2 = np.linspace(-3, 3, 100)\ny2 = x2 * 0.5 - 1 + np.random.randn(100) * 0.3\ncorr2 = plot_and_get_corr(axs[0, 1], x2, y2, title = \"strong linear\")\n\n# Unequal spread\nx3 = np.linspace(-3, 3, 100)\ny3 = - x3/3 + np.random.randn(100)*(x3)/2.5\ncorr3 = plot_and_get_corr(axs[1, 0], x3, y3, title = \"strong linear\")\nextent = axs[1, 0].get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n\n# Strong non-linear\nx4 = np.linspace(-3, 3, 100)\ny4 = 2*np.sin(x3 - 1.5) + np.random.randn(100) * 0.3\ncorr4 = plot_and_get_corr(axs[1, 1], x4, y4, title = \"strong non-linear\")\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n10.2.2 Alternate Form\nWhen the variables \\(y\\) and \\(x\\) are measured in standard units, the regression line for predicting \\(y\\) based on \\(x\\) has slope \\(r\\) and passes through the origin.\n\\[\\hat{y}_{su} = r \\cdot x_{su}\\]\n\n\nIn the original units, this becomes\n\n\\[\\frac{\\hat{y} - \\bar{y}}{\\sigma_y} = r \\cdot \\frac{x - \\bar{x}}{\\sigma_x}\\]\n\n\n\n10.2.3 Derivation\nStarting from the top, we have our claimed form of the regression line, and we want to show that it is equivalent to the optimal linear regression line: \\(\\hat{y} = \\hat{a} + \\hat{b}x\\).\nRecall:\n\n\\(\\hat{b} = r \\cdot \\frac{\\text{Standard Deviation of }y}{\\text{Standard Deviation of }x}\\)\n\\(\\hat{a} = \\text{average of }y - \\text{slope}\\cdot\\text{average of }x\\)\n\n\n\n\n\n\n\nProof:\n\\[\\frac{\\hat{y} - \\bar{y}}{\\sigma_y} = r \\cdot \\frac{x - \\bar{x}}{\\sigma_x}\\]\nMultiply by \\(\\sigma_y\\), and add \\(\\bar{y}\\) on both sides.\n\\[\\hat{y} = \\sigma_y \\cdot r \\cdot \\frac{x - \\bar{x}}{\\sigma_x} + \\bar{y}\\]\nDistribute coefficient \\(\\sigma_{y}\\cdot r\\) to the \\(\\frac{x - \\bar{x}}{\\sigma_x}\\) term\n\\[\\hat{y} = (\\frac{r\\sigma_y}{\\sigma_x} ) \\cdot x + (\\bar{y} - (\\frac{r\\sigma_y}{\\sigma_x} ) \\bar{x})\\]\nWe now see that we have a line that matches our claim:\n\nslope: \\(r\\cdot\\frac{\\text{SD of y}}{\\text{SD of x}} = r\\cdot\\frac{\\sigma_y}{\\sigma_x}\\)\nintercept: \\(\\bar{y} - \\text{slope}\\cdot \\bar{x}\\)\n\nNote that the error for the i-th datapoint is: \\(e_i = y_i - \\hat{y_i}\\)", - "crumbs": [ - "10  Introduction to Modeling" - ] - }, - { - "objectID": "intro_to_modeling/intro_to_modeling.html#the-modeling-process", - "href": "intro_to_modeling/intro_to_modeling.html#the-modeling-process", - "title": "10  Introduction to Modeling", - "section": "10.3 The Modeling Process", - "text": "10.3 The Modeling Process\nAt a high level, a model is a way of representing a system. In Data 100, we’ll treat a model as some mathematical rule we use to describe the relationship between variables.\nWhat variables are we modeling? Typically, we use a subset of the variables in our sample of collected data to model another variable in this data. To put this more formally, say we have the following dataset \\(\\mathcal{D}\\):\n\\[\\mathcal{D} = \\{(x_1, y_1), (x_2, y_2), ..., (x_n, y_n)\\}\\]\nEach pair of values \\((x_i, y_i)\\) represents a datapoint. In a modeling setting, we call these observations. \\(y_i\\) is the dependent variable we are trying to model, also called an output or response. \\(x_i\\) is the independent variable inputted into the model to make predictions, also known as a feature.\nOur goal in modeling is to use the observed data \\(\\mathcal{D}\\) to predict the output variable \\(y_i\\). We denote each prediction as \\(\\hat{y}_i\\) (read: “y hat sub i”).\nHow do we generate these predictions? Some examples of models we’ll encounter in the next few lectures are given below:\n\\[\\hat{y}_i = \\theta\\] \\[\\hat{y}_i = \\theta_0 + \\theta_1 x_i\\]\nThe examples above are known as parametric models. They relate the collected data, \\(x_i\\), to the prediction we make, \\(\\hat{y}_i\\). A few parameters (\\(\\theta\\), \\(\\theta_0\\), \\(\\theta_1\\)) are used to describe the relationship between \\(x_i\\) and \\(\\hat{y}_i\\).\nNotice that we don’t immediately know the values of these parameters. While the features, \\(x_i\\), are taken from our observed data, we need to decide what values to give \\(\\theta\\), \\(\\theta_0\\), and \\(\\theta_1\\) ourselves. This is the heart of parametric modeling: what parameter values should we choose so our model makes the best possible predictions?\nTo choose our model parameters, we’ll work through the modeling process.\n\nChoose a model: how should we represent the world?\nChoose a loss function: how do we quantify prediction error?\nFit the model: how do we choose the best parameters of our model given our data?\nEvaluate model performance: how do we evaluate whether this process gave rise to a good model?", - "crumbs": [ - "10  Introduction to Modeling" - ] - }, - { - "objectID": "intro_to_modeling/intro_to_modeling.html#choosing-a-model", - "href": "intro_to_modeling/intro_to_modeling.html#choosing-a-model", - "title": "10  Introduction to Modeling", - "section": "10.4 Choosing a Model", - "text": "10.4 Choosing a Model\nOur first step is choosing a model: defining the mathematical rule that describes the relationship between the features, \\(x_i\\), and predictions \\(\\hat{y}_i\\).\nIn Data 8, you learned about the Simple Linear Regression (SLR) model. You learned that the model takes the form: \\[\\hat{y}_i = a + bx_i\\]\nIn Data 100, we’ll use slightly different notation: we will replace \\(a\\) with \\(\\theta_0\\) and \\(b\\) with \\(\\theta_1\\). This will allow us to use the same notation when we explore more complex models later on in the course.\n\\[\\hat{y}_i = \\theta_0 + \\theta_1 x_i\\]\nThe parameters of the SLR model are \\(\\theta_0\\), also called the intercept term, and \\(\\theta_1\\), also called the slope term. To create an effective model, we want to choose values for \\(\\theta_0\\) and \\(\\theta_1\\) that most accurately predict the output variable. The “best” fitting model parameters are given the special names: \\(\\hat{\\theta}_0\\) and \\(\\hat{\\theta}_1\\); they are the specific parameter values that allow our model to generate the best possible predictions.\nIn Data 8, you learned that the best SLR model parameters are: \\[\\hat{\\theta}_0 = \\bar{y} - \\hat{\\theta}_1\\bar{x} \\qquad \\qquad \\hat{\\theta}_1 = r \\frac{\\sigma_y}{\\sigma_x}\\]\nA quick reminder on notation:\n\n\\(\\bar{y}\\) and \\(\\bar{x}\\) indicate the mean value of \\(y\\) and \\(x\\), respectively\n\\(\\sigma_y\\) and \\(\\sigma_x\\) indicate the standard deviations of \\(y\\) and \\(x\\)\n\\(r\\) is the correlation coefficient, defined as the average of the product of \\(x\\) and \\(y\\) measured in standard units: \\(\\frac{1}{n} \\sum_{i=1}^n (\\frac{x_i-\\bar{x}}{\\sigma_x})(\\frac{y_i-\\bar{y}}{\\sigma_y})\\)\n\nIn Data 100, we want to understand how to derive these best model coefficients. To do so, we’ll introduce the concept of a loss function.", - "crumbs": [ - "10  Introduction to Modeling" - ] - }, - { - "objectID": "intro_to_modeling/intro_to_modeling.html#choosing-a-loss-function", - "href": "intro_to_modeling/intro_to_modeling.html#choosing-a-loss-function", - "title": "10  Introduction to Modeling", - "section": "10.5 Choosing a Loss Function", - "text": "10.5 Choosing a Loss Function\nWe’ve talked about the idea of creating the “best” possible predictions. This begs the question: how do we decide how “good” or “bad” our model’s predictions are?\nA loss function characterizes the cost, error, or fit resulting from a particular choice of model or model parameters. This function, \\(L(y, \\hat{y})\\), quantifies how “bad” or “far off” a single prediction by our model is from a true, observed value in our collected data.\nThe choice of loss function for a particular model will affect the accuracy and computational cost of estimation, and it’ll also depend on the estimation task at hand. For example,\n\nAre outputs quantitative or qualitative?\nDo outliers matter?\nAre all errors equally costly? (e.g., a false negative on a cancer test is arguably more dangerous than a false positive)\n\nRegardless of the specific function used, a loss function should follow two basic principles:\n\nIf the prediction \\(\\hat{y}_i\\) is close to the actual value \\(y_i\\), loss should be low.\nIf the prediction \\(\\hat{y}_i\\) is far from the actual value \\(y_i\\), loss should be high.\n\nTwo common choices of loss function are squared loss and absolute loss.\nSquared loss, also known as L2 loss, computes loss as the square of the difference between the observed \\(y_i\\) and predicted \\(\\hat{y}_i\\): \\[L(y_i, \\hat{y}_i) = (y_i - \\hat{y}_i)^2\\]\nAbsolute loss, also known as L1 loss, computes loss as the absolute difference between the observed \\(y_i\\) and predicted \\(\\hat{y}_i\\): \\[L(y_i, \\hat{y}_i) = |y_i - \\hat{y}_i|\\]\nL1 and L2 loss give us a tool for quantifying our model’s performance on a single data point. This is a good start, but ideally, we want to understand how our model performs across our entire dataset. A natural way to do this is to compute the average loss across all data points in the dataset. This is known as the cost function, \\(\\hat{R}(\\theta)\\): \\[\\hat{R}(\\theta) = \\frac{1}{n} \\sum^n_{i=1} L(y_i, \\hat{y}_i)\\]\nThe cost function has many names in the statistics literature. You may also encounter the terms:\n\nEmpirical risk (this is why we give the cost function the name \\(R\\))\nError function\nAverage loss\n\nWe can substitute our L1 and L2 loss into the cost function definition. The Mean Squared Error (MSE) is the average squared loss across a dataset: \\[\\text{MSE} = \\frac{1}{n} \\sum_{i=1}^n (y_i - \\hat{y}_i)^2\\]\nThe Mean Absolute Error (MAE) is the average absolute loss across a dataset: \\[\\text{MAE}= \\frac{1}{n} \\sum_{i=1}^n |y_i - \\hat{y}_i|\\]", - "crumbs": [ - "10  Introduction to Modeling" - ] - }, - { - "objectID": "intro_to_modeling/intro_to_modeling.html#fitting-the-model", - "href": "intro_to_modeling/intro_to_modeling.html#fitting-the-model", - "title": "10  Introduction to Modeling", - "section": "10.6 Fitting the Model", - "text": "10.6 Fitting the Model\nNow that we’ve established the concept of a loss function, we can return to our original goal of choosing model parameters. Specifically, we want to choose the best set of model parameters that will minimize the model’s cost on our dataset. This process is called fitting the model.\nWe know from calculus that a function is minimized when (1) its first derivative is equal to zero and (2) its second derivative is positive. We often call the function being minimized the objective function (our objective is to find its minimum).\nTo find the optimal model parameter, we:\n\nTake the derivative of the cost function with respect to that parameter\nSet the derivative equal to 0\nSolve for the parameter\n\nWe repeat this process for each parameter present in the model. For now, we’ll disregard the second derivative condition.\nTo help us make sense of this process, let’s put it into action by deriving the optimal model parameters for simple linear regression using the mean squared error as our cost function. Remember: although the notation may look tricky, all we are doing is following the three steps above!\nStep 1: take the derivative of the cost function with respect to each model parameter. We substitute the SLR model, \\(\\hat{y}_i = \\theta_0+\\theta_1 x_i\\), into the definition of MSE above and differentiate with respect to \\(\\theta_0\\) and \\(\\theta_1\\). \\[\\text{MSE} = \\frac{1}{n} \\sum_{i=1}^{n} (y_i - \\hat{y}_i)^2 = \\frac{1}{n} \\sum_{i=1}^{n} (y_i - \\theta_0 - \\theta_1 x_i)^2\\]\n\\[\\frac{\\partial}{\\partial \\theta_0} \\text{MSE} = \\frac{-2}{n} \\sum_{i=1}^{n} y_i - \\theta_0 - \\theta_1 x_i\\]\n\\[\\frac{\\partial}{\\partial \\theta_1} \\text{MSE} = \\frac{-2}{n} \\sum_{i=1}^{n} (y_i - \\theta_0 - \\theta_1 x_i)x_i\\]\nLet’s walk through these derivations in more depth, starting with the derivative of MSE with respect to \\(\\theta_0\\).\nGiven our MSE above, we know that: \\[\\frac{\\partial}{\\partial \\theta_0} \\text{MSE} = \\frac{\\partial}{\\partial \\theta_0} \\frac{1}{n} \\sum_{i=1}^{n} {(y_i - \\theta_0 - \\theta_1 x_i)}^{2}\\]\nNoting that the derivative of sum is equivalent to the sum of derivatives, this then becomes: \\[ = \\frac{1}{n} \\sum_{i=1}^{n} \\frac{\\partial}{\\partial \\theta_0} {(y_i - \\theta_0 - \\theta_1 x_i)}^{2}\\]\nWe can then apply the chain rule.\n\\[ = \\frac{1}{n} \\sum_{i=1}^{n} 2 \\cdot{(y_i - \\theta_0 - \\theta_1 x_i)}\\dot(-1)\\]\nFinally, we can simplify the constants, leaving us with our answer.\n\\[\\frac{\\partial}{\\partial \\theta_0} \\text{MSE} = \\frac{-2}{n} \\sum_{i=1}^{n}{(y_i - \\theta_0 - \\theta_1 x_i)}\\]\nFollowing the same procedure, we can take the derivative of MSE with respect to \\(\\theta_1\\).\n\\[\\frac{\\partial}{\\partial \\theta_1} \\text{MSE} = \\frac{\\partial}{\\partial \\theta_1} \\frac{1}{n} \\sum_{i=1}^{n} {(y_i - \\theta_0 - \\theta_1 x_i)}^{2}\\]\n\\[ = \\frac{1}{n} \\sum_{i=1}^{n} \\frac{\\partial}{\\partial \\theta_1} {(y_i - \\theta_0 - \\theta_1 x_i)}^{2}\\]\n\\[ = \\frac{1}{n} \\sum_{i=1}^{n} 2 \\dot{(y_i - \\theta_0 - \\theta_1 x_i)}\\dot(-x_i)\\]\n\\[= \\frac{-2}{n} \\sum_{i=1}^{n} {(y_i - \\theta_0 - \\theta_1 x_i)}x_i\\]\nStep 2: set the derivatives equal to 0. After simplifying terms, this produces two estimating equations. The best set of model parameters \\((\\hat{\\theta}_0, \\hat{\\theta}_1)\\) must satisfy these two optimality conditions. \\[0 = \\frac{-2}{n} \\sum_{i=1}^{n} y_i - \\hat{\\theta}_0 - \\hat{\\theta}_1 x_i \\Longleftrightarrow \\frac{1}{n}\\sum_{i=1}^{n} y_i - \\hat{y}_i = 0\\] \\[0 = \\frac{-2}{n} \\sum_{i=1}^{n} (y_i - \\hat{\\theta}_0 - \\hat{\\theta}_1 x_i)x_i \\Longleftrightarrow \\frac{1}{n}\\sum_{i=1}^{n} (y_i - \\hat{y}_i)x_i = 0\\]\nStep 3: solve the estimating equations to compute estimates for \\(\\hat{\\theta}_0\\) and \\(\\hat{\\theta}_1\\).\nTaking the first equation gives the estimate of \\(\\hat{\\theta}_0\\): \\[\\frac{1}{n} \\sum_{i=1}^n y_i - \\hat{\\theta}_0 - \\hat{\\theta}_1 x_i = 0 \\]\n\\[\\left(\\frac{1}{n} \\sum_{i=1}^n y_i \\right) - \\hat{\\theta}_0 - \\hat{\\theta}_1\\left(\\frac{1}{n} \\sum_{i=1}^n x_i \\right) = 0\\]\n\\[ \\hat{\\theta}_0 = \\bar{y} - \\hat{\\theta}_1 \\bar{x}\\]\nWith a bit more maneuvering, the second equation gives the estimate of \\(\\hat{\\theta}_1\\). Start by multiplying the first estimating equation by \\(\\bar{x}\\), then subtracting the result from the second estimating equation.\n\\[\\frac{1}{n} \\sum_{i=1}^n (y_i - \\hat{y}_i)x_i - \\frac{1}{n} \\sum_{i=1}^n (y_i - \\hat{y}_i)\\bar{x} = 0 \\]\n\\[\\frac{1}{n} \\sum_{i=1}^n (y_i - \\hat{y}_i)(x_i - \\bar{x}) = 0 \\]\nNext, plug in \\(\\hat{y}_i = \\hat{\\theta}_0 + \\hat{\\theta}_1 x_i = \\bar{y} + \\hat{\\theta}_1(x_i - \\bar{x})\\):\n\\[\\frac{1}{n} \\sum_{i=1}^n (y_i - \\bar{y} - \\hat{\\theta}_1(x - \\bar{x}))(x_i - \\bar{x}) = 0 \\]\n\\[\\frac{1}{n} \\sum_{i=1}^n (y_i - \\bar{y})(x_i - \\bar{x}) = \\hat{\\theta}_1 \\times \\frac{1}{n} \\sum_{i=1}^n (x_i - \\bar{x})^2\n\\]\nBy using the definition of correlation \\(\\left(r = \\frac{1}{n} \\sum_{i=1}^n (\\frac{x_i-\\bar{x}}{\\sigma_x})(\\frac{y_i-\\bar{y}}{\\sigma_y}) \\right)\\) and standard deviation \\(\\left(\\sigma_x = \\sqrt{\\frac{1}{n} \\sum_{i=1}^n (x_i - \\bar{x})^2} \\right)\\), we can conclude: \\[r \\sigma_x \\sigma_y = \\hat{\\theta}_1 \\times \\sigma_x^2\\] \\[\\hat{\\theta}_1 = r \\frac{\\sigma_y}{\\sigma_x}\\]\nJust as was given in Data 8!\nRemember, this derivation found the optimal model parameters for SLR when using the MSE cost function. If we had used a different model or different loss function, we likely would have found different values for the best model parameters. However, regardless of the model and loss used, we can always follow these three steps to fit the model.", - "crumbs": [ - "10  Introduction to Modeling" - ] - }, - { - "objectID": "constant_model_loss_transformations/loss_transformations.html", - "href": "constant_model_loss_transformations/loss_transformations.html", - "title": "11  Constant Model, Loss, and Transformations", - "section": "", - "text": "11.0.1 Prediction vs. Estimation\nThe terms prediction and estimation are often used somewhat interchangeably, but there is a subtle difference between them. Estimation is the task of using data to calculate model parameters. Prediction is the task of using a model to predict outputs for unseen data. In our simple linear regression model,\n\\[\\hat{y} = \\hat{\\theta_0} + \\hat{\\theta_1}\\]\nwe estimate the parameters by minimizing average loss; then, we predict using these estimations. Least Squares Estimation is when we choose the parameters that minimize MSE.", - "crumbs": [ - "11  Constant Model, Loss, and Transformations" - ] - }, - { - "objectID": "constant_model_loss_transformations/loss_transformations.html#step-4-evaluating-the-slr-model", - "href": "constant_model_loss_transformations/loss_transformations.html#step-4-evaluating-the-slr-model", - "title": "11  Constant Model, Loss, and Transformations", - "section": "11.1 Step 4: Evaluating the SLR Model", - "text": "11.1 Step 4: Evaluating the SLR Model\nNow that we’ve explored the mathematics behind (1) choosing a model, (2) choosing a loss function, and (3) fitting the model, we’re left with one final question – how “good” are the predictions made by this “best” fitted model? To determine this, we can:\n\nVisualize data and compute statistics:\n\nPlot the original data.\nCompute each column’s mean and standard deviation. If the mean and standard deviation of our predictions are close to those of the original observed \\(y_i\\)’s, we might be inclined to say that our model has done well.\n(If we’re fitting a linear model) Compute the correlation \\(r\\). A large magnitude for the correlation coefficient between the feature and response variables could also indicate that our model has done well.\n\nPerformance metrics:\n\nWe can take the Root Mean Squared Error (RMSE).\n\nIt’s the square root of the mean squared error (MSE), which is the average loss that we’ve been minimizing to determine optimal model parameters.\nRMSE is in the same units as \\(y\\).\nA lower RMSE indicates more “accurate” predictions, as we have a lower “average loss” across the data.\n\n\n\\[\\text{RMSE} = \\sqrt{\\frac{1}{n} \\sum_{i=1}^n (y_i - \\hat{y}_i)^2}\\]\nVisualization:\n\nLook at the residual plot of \\(e_i = y_i - \\hat{y_i}\\) to visualize the difference between actual and predicted values. The good residual plot should not show any pattern between input/features \\(x_i\\) and residual values \\(e_i\\).\n\n\nTo illustrate this process, let’s take a look at Anscombe’s quartet.\n\n11.1.1 Four Mysterious Datasets (Anscombe’s quartet)\nLet’s take a look at four different datasets.\n\n\nCode\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns\nimport itertools\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n\n\nCode\n# Big font helper\ndef adjust_fontsize(size=None):\n SMALL_SIZE = 8\n MEDIUM_SIZE = 10\n BIGGER_SIZE = 12\n if size != None:\n SMALL_SIZE = MEDIUM_SIZE = BIGGER_SIZE = size\n\n plt.rc(\"font\", size=SMALL_SIZE) # controls default text sizes\n plt.rc(\"axes\", titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc(\"axes\", labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc(\"xtick\", labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc(\"ytick\", labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc(\"legend\", fontsize=SMALL_SIZE) # legend fontsize\n plt.rc(\"figure\", titlesize=BIGGER_SIZE) # fontsize of the figure title\n\n\n# Helper functions\ndef standard_units(x):\n return (x - np.mean(x)) / np.std(x)\n\n\ndef correlation(x, y):\n return np.mean(standard_units(x) * standard_units(y))\n\n\ndef slope(x, y):\n return correlation(x, y) * np.std(y) / np.std(x)\n\n\ndef intercept(x, y):\n return np.mean(y) - slope(x, y) * np.mean(x)\n\n\ndef fit_least_squares(x, y):\n theta_0 = intercept(x, y)\n theta_1 = slope(x, y)\n return theta_0, theta_1\n\n\ndef predict(x, theta_0, theta_1):\n return theta_0 + theta_1 * x\n\n\ndef compute_mse(y, yhat):\n return np.mean((y - yhat) ** 2)\n\n\nplt.style.use(\"default\") # Revert style to default mpl\n\n\n\n\nCode\nplt.style.use(\"default\") # Revert style to default mpl\nNO_VIZ, RESID, RESID_SCATTER = range(3)\n\n\ndef least_squares_evaluation(x, y, visualize=NO_VIZ):\n # statistics\n print(f\"x_mean : {np.mean(x):.2f}, y_mean : {np.mean(y):.2f}\")\n print(f\"x_stdev: {np.std(x):.2f}, y_stdev: {np.std(y):.2f}\")\n print(f\"r = Correlation(x, y): {correlation(x, y):.3f}\")\n\n # Performance metrics\n ahat, bhat = fit_least_squares(x, y)\n yhat = predict(x, ahat, bhat)\n print(f\"\\theta_0: {ahat:.2f}, \\theta_1: {bhat:.2f}\")\n print(f\"RMSE: {np.sqrt(compute_mse(y, yhat)):.3f}\")\n\n # visualization\n fig, ax_resid = None, None\n if visualize == RESID_SCATTER:\n fig, axs = plt.subplots(1, 2, figsize=(8, 3))\n axs[0].scatter(x, y)\n axs[0].plot(x, yhat)\n axs[0].set_title(\"LS fit\")\n ax_resid = axs[1]\n elif visualize == RESID:\n fig = plt.figure(figsize=(4, 3))\n ax_resid = plt.gca()\n\n if ax_resid is not None:\n ax_resid.scatter(x, y - yhat, color=\"red\")\n ax_resid.plot([4, 14], [0, 0], color=\"black\")\n ax_resid.set_title(\"Residuals\")\n\n return fig\n\n\n\n\nCode\n# Load in four different datasets: I, II, III, IV\nx = [10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5]\ny1 = [8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68]\ny2 = [9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74]\ny3 = [7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73]\nx4 = [8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8]\ny4 = [6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89]\n\nanscombe = {\n \"I\": pd.DataFrame(list(zip(x, y1)), columns=[\"x\", \"y\"]),\n \"II\": pd.DataFrame(list(zip(x, y2)), columns=[\"x\", \"y\"]),\n \"III\": pd.DataFrame(list(zip(x, y3)), columns=[\"x\", \"y\"]),\n \"IV\": pd.DataFrame(list(zip(x4, y4)), columns=[\"x\", \"y\"]),\n}\n\n# Plot the scatter plot and line of best fit\nfig, axs = plt.subplots(2, 2, figsize=(10, 10))\n\nfor i, dataset in enumerate([\"I\", \"II\", \"III\", \"IV\"]):\n ans = anscombe[dataset]\n x, y = ans[\"x\"], ans[\"y\"]\n ahat, bhat = fit_least_squares(x, y)\n yhat = predict(x, ahat, bhat)\n axs[i // 2, i % 2].scatter(x, y, alpha=0.6, color=\"red\") # plot the x, y points\n axs[i // 2, i % 2].plot(x, yhat) # plot the line of best fit\n axs[i // 2, i % 2].set_xlabel(f\"$x_{i+1}$\")\n axs[i // 2, i % 2].set_ylabel(f\"$y_{i+1}$\")\n axs[i // 2, i % 2].set_title(f\"Dataset {dataset}\")\n\nplt.show()\n\n\n\n\n\n\n\n\n\nWhile these four sets of datapoints look very different, they actually all have identical means \\(\\bar x\\), \\(\\bar y\\), standard deviations \\(\\sigma_x\\), \\(\\sigma_y\\), correlation \\(r\\), and RMSE! If we only look at these statistics, we would probably be inclined to say that these datasets are similar.\n\n\nCode\nfor dataset in [\"I\", \"II\", \"III\", \"IV\"]:\n print(f\">>> Dataset {dataset}:\")\n ans = anscombe[dataset]\n fig = least_squares_evaluation(ans[\"x\"], ans[\"y\"], visualize=NO_VIZ)\n print()\n print()\n\n\n>>> Dataset I:\nx_mean : 9.00, y_mean : 7.50\nx_stdev: 3.16, y_stdev: 1.94\nr = Correlation(x, y): 0.816\n heta_0: 3.00, heta_1: 0.50\nRMSE: 1.119\n\n\n>>> Dataset II:\nx_mean : 9.00, y_mean : 7.50\nx_stdev: 3.16, y_stdev: 1.94\nr = Correlation(x, y): 0.816\n heta_0: 3.00, heta_1: 0.50\nRMSE: 1.119\n\n\n>>> Dataset III:\nx_mean : 9.00, y_mean : 7.50\nx_stdev: 3.16, y_stdev: 1.94\nr = Correlation(x, y): 0.816\n heta_0: 3.00, heta_1: 0.50\nRMSE: 1.118\n\n\n>>> Dataset IV:\nx_mean : 9.00, y_mean : 7.50\nx_stdev: 3.16, y_stdev: 1.94\nr = Correlation(x, y): 0.817\n heta_0: 3.00, heta_1: 0.50\nRMSE: 1.118\n\n\n\n\nWe may also wish to visualize the model’s residuals, defined as the difference between the observed and predicted \\(y_i\\) value (\\(e_i = y_i - \\hat{y}_i\\)). This gives a high-level view of how “off” each prediction is from the true observed value. Recall that you explored this concept in Data 8: a good regression fit should display no clear pattern in its plot of residuals. The residual plots for Anscombe’s quartet are displayed below. Note how only the first plot shows no clear pattern to the magnitude of residuals. This is an indication that SLR is not the best choice of model for the remaining three sets of points.\n\n\n\nCode\n# Residual visualization\nfig, axs = plt.subplots(2, 2, figsize=(10, 10))\n\nfor i, dataset in enumerate([\"I\", \"II\", \"III\", \"IV\"]):\n ans = anscombe[dataset]\n x, y = ans[\"x\"], ans[\"y\"]\n ahat, bhat = fit_least_squares(x, y)\n yhat = predict(x, ahat, bhat)\n axs[i // 2, i % 2].scatter(\n x, y - yhat, alpha=0.6, color=\"red\"\n ) # plot the x, y points\n axs[i // 2, i % 2].plot(\n x, np.zeros_like(x), color=\"black\"\n ) # plot the residual line\n axs[i // 2, i % 2].set_xlabel(f\"$x_{i+1}$\")\n axs[i // 2, i % 2].set_ylabel(f\"$e_{i+1}$\")\n axs[i // 2, i % 2].set_title(f\"Dataset {dataset} Residuals\")\n\nplt.show()", - "crumbs": [ - "11  Constant Model, Loss, and Transformations" - ] - }, - { - "objectID": "constant_model_loss_transformations/loss_transformations.html#constant-model-mse", - "href": "constant_model_loss_transformations/loss_transformations.html#constant-model-mse", - "title": "11  Constant Model, Loss, and Transformations", - "section": "11.2 Constant Model + MSE", - "text": "11.2 Constant Model + MSE\nNow, we’ll shift from the SLR model to the constant model, also known as a summary statistic. The constant model is slightly different from the simple linear regression model we’ve explored previously. Rather than generating predictions from an inputted feature variable, the constant model always predicts the same constant number. This ignores any relationships between variables. For example, let’s say we want to predict the number of drinks a boba shop sells in a day. Boba tea sales likely depend on the time of year, the weather, how the customers feel, whether school is in session, etc., but the constant model ignores these factors in favor of a simpler model. In other words, the constant model employs a simplifying assumption.\nIt is also a parametric, statistical model:\n\\[\\hat{y} = \\theta_0\\]\n\\(\\theta_0\\) is the parameter of the constant model, just as \\(\\theta_0\\) and \\(\\theta_1\\) were the parameters in SLR. Since our parameter \\(\\theta_0\\) is 1-dimensional (\\(\\theta_0 \\in \\mathbb{R}\\)), we now have no input to our model and will always predict \\(\\hat{y} = \\theta_0\\).\n\n11.2.1 Deriving the optimal \\(\\theta_0\\)\nOur task now is to determine what value of \\(\\theta_0\\) best represents the optimal model – in other words, what number should we guess each time to have the lowest possible average loss on our data?\nLike before, we’ll use Mean Squared Error (MSE). Recall that the MSE is average squared loss (L2 loss) over the data \\(D = \\{y_1, y_2, ..., y_n\\}\\).\n\\[\\hat{R}(\\theta) = \\frac{1}{n}\\sum^{n}_{i=1} (y_i - \\hat{y_i})^2 \\]\nOur modeling process now looks like this:\n\nChoose a model: constant model\nChoose a loss function: L2 loss\nFit the model\nEvaluate model performance\n\nGiven the constant model \\(\\hat{y} = \\theta_0\\), we can rewrite the MSE equation as\n\\[\\hat{R}(\\theta) = \\frac{1}{n}\\sum^{n}_{i=1} (y_i - \\theta_0)^2 \\]\nWe can fit the model by finding the optimal \\(\\hat{\\theta_0}\\) that minimizes the MSE using a calculus approach.\n\nDifferentiate with respect to \\(\\theta_0\\):\n\n\\[\n\\begin{align}\n\\frac{d}{d\\theta_0}\\text{R}(\\theta) & = \\frac{d}{d\\theta_0}(\\frac{1}{n}\\sum^{n}_{i=1} (y_i - \\theta_0)^2)\n\\\\ &= \\frac{1}{n}\\sum^{n}_{i=1} \\frac{d}{d\\theta_0} (y_i - \\theta_0)^2 \\quad \\quad \\text{a derivative of sums is a sum of derivatives}\n\\\\ &= \\frac{1}{n}\\sum^{n}_{i=1} 2 (y_i - \\theta_0) (-1) \\quad \\quad \\text{chain rule}\n\\\\ &= {\\frac{-2}{n}}\\sum^{n}_{i=1} (y_i - \\theta_0) \\quad \\quad \\text{simply constants}\n\\end{align}\n\\]\n\nSet the derivative equation equal to 0:\n\\[\n0 = {\\frac{-2}{n}}\\sum^{n}_{i=1} (y_i - \\hat{\\theta_0})\n\\]\nSolve for \\(\\hat{\\theta_0}\\)\n\n\\[\n\\begin{align}\n0 &= {\\frac{-2}{n}}\\sum^{n}_{i=1} (y_i - \\hat{\\theta_0})\n\\\\ &= \\sum^{n}_{i=1} (y_i - \\hat{\\theta_0}) \\quad \\quad \\text{divide both sides by} \\frac{-2}{n}\n\\\\ &= \\left(\\sum^{n}_{i=1} y_i\\right) - \\left(\\sum^{n}_{i=1} \\theta_0\\right) \\quad \\quad \\text{separate sums}\n\\\\ &= \\left(\\sum^{n}_{i=1} y_i\\right) - (n \\cdot \\hat{\\theta_0}) \\quad \\quad \\text{c + c + … + c = nc}\n\\\\ n \\cdot \\hat{\\theta_0} &= \\sum^{n}_{i=1} y_i\n\\\\ \\hat{\\theta_0} &= \\frac{1}{n} \\sum^{n}_{i=1} y_i\n\\\\ \\hat{\\theta_0} &= \\bar{y}\n\\end{align}\n\\]\nLet’s take a moment to interpret this result. \\(\\hat{\\theta_0} = \\bar{y}\\) is the optimal parameter for constant model + MSE. It holds true regardless of what data sample you have, and it provides some formal reasoning as to why the mean is such a common summary statistic.\nOur optimal model parameter is the value of the parameter that minimizes the cost function. This minimum value of the cost function can be expressed:\n\\[R(\\hat{\\theta_0}) = \\min_{\\theta_0} R(\\theta_0)\\]\nTo restate the above in plain English: we are looking at the value of the cost function when it takes the best parameter as input. This optimal model parameter, \\(\\hat{\\theta_0}\\), is the value of \\(\\theta_0\\) that minimizes the cost \\(R\\).\nFor modeling purposes, we care less about the minimum value of cost, \\(R(\\hat{\\theta_0})\\), and more about the value of \\(\\theta\\) that results in this lowest average loss. In other words, we concern ourselves with finding the best parameter value such that:\n\\[\\hat{\\theta} = \\underset{\\theta}{\\operatorname{\\arg\\min}}\\:R(\\theta)\\]\nThat is, we want to find the argument \\(\\theta\\) that minimizes the cost function.\n\n\n11.2.2 Comparing Two Different Models, Both Fit with MSE\nNow that we’ve explored the constant model with an L2 loss, we can compare it to the SLR model that we learned last lecture. Consider the dataset below, which contains information about the ages and lengths of dugongs. Supposed we wanted to predict dugong ages:\n\n\n\n\n\n\n\n\n\nConstant Model\nSimple Linear Regression\n\n\n\n\nmodel\n\\(\\hat{y} = \\theta_0\\)\n\\(\\hat{y} = \\theta_0 + \\theta_1 x\\)\n\n\ndata\nsample of ages \\(D = \\{y_1, y_2, ..., y_n\\}\\)\nsample of ages \\(D = \\{(x_1, y_1), (x_2, y_2), ..., (x_n, y_n)\\}\\)\n\n\ndimensions\n\\(\\hat{\\theta_0}\\) is 1-D\n\\(\\hat{\\theta} = [\\hat{\\theta_0}, \\hat{\\theta_1}]\\) is 2-D\n\n\nloss surface\n2-D \n3-D \n\n\nloss model\n\\(\\hat{R}(\\theta) = \\frac{1}{n}\\sum^{n}_{i=1} (y_i - \\theta_0)^2\\)\n\\(\\hat{R}(\\theta_0, \\theta_1) = \\frac{1}{n}\\sum^{n}_{i=1} (y_i - (\\theta_0 + \\theta_1 x))^2\\)\n\n\nRMSE\n7.72\n4.31\n\n\npredictions visualized\nrug plot \nscatter plot \n\n\n\n(Notice how the points for our SLR scatter plot are visually not a great linear fit. We’ll come back to this).\nThe code for generating the graphs and models is included below, but we won’t go over it in too much depth.\n\n\nCode\ndugongs = pd.read_csv(\"data/dugongs.csv\")\ndata_constant = dugongs[\"Age\"]\ndata_linear = dugongs[[\"Length\", \"Age\"]]\n\n\n\n\nCode\n# Constant Model + MSE\nplt.style.use('default') # Revert style to default mpl\nadjust_fontsize(size=16)\n%matplotlib inline\n\ndef mse_constant(theta, data):\n return np.mean(np.array([(y_obs - theta) ** 2 for y_obs in data]), axis=0)\n\nthetas = np.linspace(-20, 42, 1000)\nl2_loss_thetas = mse_constant(thetas, data_constant)\n\n# Plotting the loss surface\nplt.plot(thetas, l2_loss_thetas)\nplt.xlabel(r'$\\theta_0$')\nplt.ylabel(r'MSE')\n\n# Optimal point\nthetahat = np.mean(data_constant)\nplt.scatter([thetahat], [mse_constant(thetahat, data_constant)], s=50, label = r\"$\\hat{\\theta}_0$\")\nplt.legend();\n# plt.show()\n\n\n\n\n\n\n\n\n\n\n\nCode\n# SLR + MSE\ndef mse_linear(theta_0, theta_1, data_linear):\n data_x, data_y = data_linear.iloc[:, 0], data_linear.iloc[:, 1]\n return np.mean(\n np.array([(y - (theta_0 + theta_1 * x)) ** 2 for x, y in zip(data_x, data_y)]),\n axis=0,\n )\n\n\n# plotting the loss surface\ntheta_0_values = np.linspace(-80, 20, 80)\ntheta_1_values = np.linspace(-10, 30, 80)\nmse_values = np.array(\n [[mse_linear(x, y, data_linear) for x in theta_0_values] for y in theta_1_values]\n)\n\n# Optimal point\ndata_x, data_y = data_linear.iloc[:, 0], data_linear.iloc[:, 1]\ntheta_1_hat = np.corrcoef(data_x, data_y)[0, 1] * np.std(data_y) / np.std(data_x)\ntheta_0_hat = np.mean(data_y) - theta_1_hat * np.mean(data_x)\n\n# Create the 3D plot\nfig = plt.figure(figsize=(7, 5))\nax = fig.add_subplot(111, projection=\"3d\")\n\nX, Y = np.meshgrid(theta_0_values, theta_1_values)\nsurf = ax.plot_surface(\n X, Y, mse_values, cmap=\"viridis\", alpha=0.6\n) # Use alpha to make it slightly transparent\n\n# Scatter point using matplotlib\nsc = ax.scatter(\n [theta_0_hat],\n [theta_1_hat],\n [mse_linear(theta_0_hat, theta_1_hat, data_linear)],\n marker=\"o\",\n color=\"red\",\n s=100,\n label=\"theta hat\",\n)\n\n# Create a colorbar\ncbar = fig.colorbar(surf, ax=ax, shrink=0.5, aspect=10)\ncbar.set_label(\"Cost Value\")\n\nax.set_title(\"MSE for different $\\\\theta_0, \\\\theta_1$\")\nax.set_xlabel(\"$\\\\theta_0$\")\nax.set_ylabel(\"$\\\\theta_1$\")\nax.set_zlabel(\"MSE\")\n\n# plt.show()\n\n\nText(0.5, 0, 'MSE')\n\n\n\n\n\n\n\n\n\n\n\nCode\n# Predictions\nyobs = data_linear[\"Age\"] # The true observations y\nxs = data_linear[\"Length\"] # Needed for linear predictions\nn = len(yobs) # Predictions\n\nyhats_constant = [thetahat for i in range(n)] # Not used, but food for thought\nyhats_linear = [theta_0_hat + theta_1_hat * x for x in xs]\n\n\n\n\nCode\n# Constant Model Rug Plot\n# In case we're in a weird style state\nsns.set_theme()\nadjust_fontsize(size=16)\n%matplotlib inline\n\nfig = plt.figure(figsize=(8, 1.5))\nsns.rugplot(yobs, height=0.25, lw=2) ;\nplt.axvline(thetahat, color='red', lw=4, label=r\"$\\hat{\\theta}_0$\");\nplt.legend()\nplt.yticks([]);\n# plt.show()\n\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\n\n\nCode\n# SLR model scatter plot \n# In case we're in a weird style state\nsns.set_theme()\nadjust_fontsize(size=16)\n%matplotlib inline\n\nsns.scatterplot(x=xs, y=yobs)\nplt.plot(xs, yhats_linear, color='red', lw=4);\n# plt.savefig('dugong_line.png', bbox_inches = 'tight');\n# plt.show()\n\n\n\n\n\n\n\n\n\nInterpreting the RMSE (Root Mean Squared Error):\n\nBecause the constant error is HIGHER than the linear error,\nThe constant model is WORSE than the linear model (at least for this metric).", - "crumbs": [ - "11  Constant Model, Loss, and Transformations" - ] - }, - { - "objectID": "constant_model_loss_transformations/loss_transformations.html#constant-model-mae", - "href": "constant_model_loss_transformations/loss_transformations.html#constant-model-mae", - "title": "11  Constant Model, Loss, and Transformations", - "section": "11.3 Constant Model + MAE", - "text": "11.3 Constant Model + MAE\nWe see now that changing the model used for prediction leads to a wildly different result for the optimal model parameter. What happens if we instead change the loss function used in model evaluation?\nThis time, we will consider the constant model with L1 (absolute loss) as the loss function. This means that the average loss will be expressed as the Mean Absolute Error (MAE).\n\nChoose a model: constant model\nChoose a loss function: L1 loss\nFit the model\nEvaluate model performance\n\n\n11.3.1 Deriving the optimal \\(\\theta_0\\)\nRecall that the MAE is average absolute loss (L1 loss) over the data \\(D = \\{y_1, y_2, ..., y_n\\}\\).\n\\[\\hat{R}(\\theta_0) = \\frac{1}{n}\\sum^{n}_{i=1} |y_i - \\hat{y_i}| \\]\nGiven the constant model \\(\\hat{y} = \\theta_0\\), we can write the MAE as:\n\\[\\hat{R}(\\theta_0) = \\frac{1}{n}\\sum^{n}_{i=1} |y_i - \\theta_0| \\]\nTo fit the model, we find the optimal parameter value \\(\\hat{\\theta_0}\\) that minimizes the MAE by differentiating using a calculus approach:\n\nDifferentiate with respect to \\(\\hat{\\theta_0}\\):\n\n\\[\n\\begin{align}\n\\hat{R}(\\theta_0) &= \\frac{1}{n}\\sum^{n}_{i=1} |y_i - \\theta_0| \\\\\n\\frac{d}{d\\theta_0} R(\\theta_0) &= \\frac{d}{d\\theta_0} \\left(\\frac{1}{n} \\sum^{n}_{i=1} |y_i - \\theta_0| \\right) \\\\\n&= \\frac{1}{n} \\sum^{n}_{i=1} \\frac{d}{d\\theta_0} |y_i - \\theta_0|\n\\end{align}\n\\]\n\nHere, we seem to have run into a problem: the derivative of an absolute value is undefined when the argument is 0 (i.e. when \\(y_i = \\theta_0\\)). For now, we’ll ignore this issue. It turns out that disregarding this case doesn’t influence our final result.\nTo perform the derivative, consider two cases. When \\(\\theta_0\\) is less than or equal to \\(y_i\\), the term \\(y_i - \\theta_0\\) will be positive and the absolute value has no impact. When \\(\\theta_0\\) is greater than \\(y_i\\), the term \\(y_i - \\theta_0\\) will be negative. Applying the absolute value will convert this to a positive value, which we can express by saying \\(-(y_i - \\theta_0) = \\theta_0 - y_i\\).\n\n\\[|y_i - \\theta_0| = \\begin{cases} y_i - \\theta_0 \\quad \\text{ if } \\theta_0 \\le y_i \\\\ \\theta_0 - y_i \\quad \\text{if }\\theta_0 > y_i \\end{cases}\\]\n\nTaking derivatives:\n\n\\[\\frac{d}{d\\theta_0} |y_i - \\theta_0| = \\begin{cases} \\frac{d}{d\\theta_0} (y_i - \\theta_0) = -1 \\quad \\text{if }\\theta_0 < y_i \\\\ \\frac{d}{d\\theta_0} (\\theta_0 - y_i) = 1 \\quad \\text{if }\\theta_0 > y_i \\end{cases}\\]\n\nThis means that we obtain a different value for the derivative for data points where \\(\\theta_0 < y_i\\) and where \\(\\theta_0 > y_i\\). We can summarize this by saying:\n\n\\[\n\\frac{d}{d\\theta_0} R(\\theta_0) = \\frac{1}{n} \\sum^{n}_{i=1} \\frac{d}{d\\theta_0} |y_i - \\theta_0| \\\\\n= \\frac{1}{n} \\left[\\sum_{\\theta_0 < y_i} (-1) + \\sum_{\\theta_0 > y_i} (+1) \\right]\n\\]\n\nIn other words, we take the sum of values for \\(i = 1, 2, ..., n\\):\n\n\\(-1\\) if our observation \\(y_i\\) is greater than our prediction \\(\\hat{\\theta_0}\\)\n\\(+1\\) if our observation \\(y_i\\) is smaller than our prediction \\(\\hat{\\theta_0}\\)\n\n\n\nSet the derivative equation equal to 0: \\[ 0 = \\frac{1}{n}\\sum_{\\hat{\\theta_0} < y_i} (-1) + \\frac{1}{n}\\sum_{\\hat{\\theta_0} > y_i} (+1) \\]\nSolve for \\(\\hat{\\theta_0}\\): \\[ 0 = -\\frac{1}{n}\\sum_{\\hat{\\theta_0} < y_i} (1) + \\frac{1}{n}\\sum_{\\hat{\\theta_0} > y_i} (1)\\]\n\n\\[\\sum_{\\hat{\\theta_0} < y_i} (1) = \\sum_{\\hat{\\theta_0} > y_i} (1) \\]\nThus, the constant model parameter \\(\\theta = \\hat{\\theta_0}\\) that minimizes MAE must satisfy:\n\\[ \\sum_{\\hat{\\theta_0} < y_i} (1) = \\sum_{\\hat{\\theta_0} > y_i} (1) \\]\nIn other words, the number of observations greater than \\(\\theta_0\\) must be equal to the number of observations less than \\(\\theta_0\\); there must be an equal number of points on the left and right sides of the equation. This is the definition of median, so our optimal value is \\[ \\hat{\\theta_0} = median(y) \\]", - "crumbs": [ - "11  Constant Model, Loss, and Transformations" - ] - }, - { - "objectID": "constant_model_loss_transformations/loss_transformations.html#summary-loss-optimization-calculus-and-critical-points", - "href": "constant_model_loss_transformations/loss_transformations.html#summary-loss-optimization-calculus-and-critical-points", - "title": "11  Constant Model, Loss, and Transformations", - "section": "11.4 Summary: Loss Optimization, Calculus, and Critical Points", - "text": "11.4 Summary: Loss Optimization, Calculus, and Critical Points\nFirst, define the objective function as average loss.\n\nPlug in L1 or L2 loss.\nPlug in the model so that the resulting expression is a function of \\(\\theta\\).\n\nThen, find the minimum of the objective function:\n\nDifferentiate with respect to \\(\\theta\\).\nSet equal to 0.\nSolve for \\(\\hat{\\theta}\\).\n(If we have multiple parameters) repeat steps 1-3 with partial derivatives.\n\nRecall critical points from calculus: \\(R(\\hat{\\theta})\\) could be a minimum, maximum, or saddle point!\n\nWe should technically also perform the second derivative test, i.e., show \\(R''(\\hat{\\theta}) > 0\\).\nMSE has a property—convexity—that guarantees that \\(R(\\hat{\\theta})\\) is a global minimum.\nThe proof of convexity for MAE is beyond this course.", - "crumbs": [ - "11  Constant Model, Loss, and Transformations" - ] - }, - { - "objectID": "constant_model_loss_transformations/loss_transformations.html#comparing-loss-functions", - "href": "constant_model_loss_transformations/loss_transformations.html#comparing-loss-functions", - "title": "11  Constant Model, Loss, and Transformations", - "section": "11.5 Comparing Loss Functions", - "text": "11.5 Comparing Loss Functions\nWe’ve now tried our hand at fitting a model under both MSE and MAE cost functions. How do the two results compare?\nLet’s consider a dataset where each entry represents the number of drinks sold at a bubble tea store each day. We’ll fit a constant model to predict the number of drinks that will be sold tomorrow.\n\ndrinks = np.array([20, 21, 22, 29, 33])\ndrinks\n\narray([20, 21, 22, 29, 33])\n\n\nFrom our derivations above, we know that the optimal model parameter under MSE cost is the mean of the dataset. Under MAE cost, the optimal parameter is the median of the dataset.\n\nnp.mean(drinks), np.median(drinks)\n\n(25.0, 22.0)\n\n\nIf we plot each empirical risk function across several possible values of \\(\\theta\\), we find that each \\(\\hat{\\theta}\\) does indeed correspond to the lowest value of error:\n\nNotice that the MSE above is a smooth function – it is differentiable at all points, making it easy to minimize using numerical methods. The MAE, in contrast, is not differentiable at each of its “kinks.” We’ll explore how the smoothness of the cost function can impact our ability to apply numerical optimization in a few weeks.\nHow do outliers affect each cost function? Imagine we replace the largest value in the dataset with 1000. The mean of the data increases substantially, while the median is nearly unaffected.\n\ndrinks_with_outlier = np.append(drinks, 1033)\ndisplay(drinks_with_outlier)\nnp.mean(drinks_with_outlier), np.median(drinks_with_outlier)\n\narray([ 20, 21, 22, 29, 33, 1033])\n\n\n(193.0, 25.5)\n\n\n\nThis means that under the MSE, the optimal model parameter \\(\\hat{\\theta}\\) is strongly affected by the presence of outliers. Under the MAE, the optimal parameter is not as influenced by outlying data. We can generalize this by saying that the MSE is sensitive to outliers, while the MAE is robust to outliers.\nLet’s try another experiment. This time, we’ll add an additional, non-outlying datapoint to the data.\n\ndrinks_with_additional_observation = np.append(drinks, 35)\ndrinks_with_additional_observation\n\narray([20, 21, 22, 29, 33, 35])\n\n\nWhen we again visualize the cost functions, we find that the MAE now plots a horizontal line between 22 and 29. This means that there are infinitely many optimal values for the model parameter: any value \\(\\hat{\\theta} \\in [22, 29]\\) will minimize the MAE. In contrast, the MSE still has a single best value for \\(\\hat{\\theta}\\). In other words, the MSE has a unique solution for \\(\\hat{\\theta}\\); the MAE is not guaranteed to have a single unique solution.\n \nTo summarize our example,\n\n\n\n\n\n\n\n\n\nMSE (Mean Squared Loss)\nMAE (Mean Absolute Loss)\n\n\n\n\nLoss Function\n\\(\\hat{R}(\\theta) = \\frac{1}{n}\\sum^{n}_{i=1} (y_i - \\theta_0)^2\\)\n\\(\\hat{R}(\\theta) = \\frac{1}{n}\\sum^{n}_{i=1} |y_i - \\theta_0|\\)\n\n\nOptimal \\(\\hat{\\theta_0}\\)\n\\(\\hat{\\theta_0} = mean(y) = \\bar{y}\\)\n\\(\\hat{\\theta_0} = median(y)\\)\n\n\nLoss Surface\n\n\n\n\nShape\nSmooth - easy to minimize using numerical methods (in a few weeks)\nPiecewise - at each of the “kinks,” it’s not differentiable. Harder to minimize.\n\n\nOutliers\nSensitive to outliers (since they change mean substantially). Sensitivity also depends on the dataset size.\nMore robust to outliers.\n\n\n\\(\\hat{\\theta_0}\\) Uniqueness\nUnique \\(\\hat{\\theta_0}\\)\nInfinitely many \\(\\hat{\\theta_0}\\)s", - "crumbs": [ - "11  Constant Model, Loss, and Transformations" - ] - }, - { - "objectID": "constant_model_loss_transformations/loss_transformations.html#transformations-to-fit-linear-models", - "href": "constant_model_loss_transformations/loss_transformations.html#transformations-to-fit-linear-models", - "title": "11  Constant Model, Loss, and Transformations", - "section": "11.6 Transformations to fit Linear Models", - "text": "11.6 Transformations to fit Linear Models\nAt this point, we have an effective method of fitting models to predict linear relationships. Given a feature variable and target, we can apply our four-step process to find the optimal model parameters.\nA key word above is linear. When we computed parameter estimates earlier, we assumed that \\(x_i\\) and \\(y_i\\) shared a roughly linear relationship. Data in the real world isn’t always so straightforward, but we can transform the data to try and obtain linearity.\nThe Tukey-Mosteller Bulge Diagram is a useful tool for summarizing what transformations can linearize the relationship between two variables. To determine what transformations might be appropriate, trace the shape of the “bulge” made by your data. Find the quadrant of the diagram that matches this bulge. The transformations shown on the vertical and horizontal axes of this quadrant can help improve the fit between the variables.\n\nNote that:\n\nThere are multiple solutions. Some will fit better than others.\nsqrt and log make a value “smaller.”\nRaising to a power makes a value “bigger.”\nEach of these transformations equates to increasing or decreasing the scale of an axis.\n\nOther goals in addition to linearity are possible, for example, making data appear more symmetric. Linearity allows us to fit lines to the transformed data.\nLet’s revisit our dugongs example. The lengths and ages are plotted below:\n\n\nCode\n# `corrcoef` computes the correlation coefficient between two variables\n# `std` finds the standard deviation\nx = dugongs[\"Length\"]\ny = dugongs[\"Age\"]\nr = np.corrcoef(x, y)[0, 1]\ntheta_1 = r * np.std(y) / np.std(x)\ntheta_0 = np.mean(y) - theta_1 * np.mean(x)\n\nfig, ax = plt.subplots(1, 2, dpi=200, figsize=(8, 3))\nax[0].scatter(x, y)\nax[0].set_xlabel(\"Length\")\nax[0].set_ylabel(\"Age\")\n\nax[1].scatter(x, y)\nax[1].plot(x, theta_0 + theta_1 * x, \"tab:red\")\nax[1].set_xlabel(\"Length\")\nax[1].set_ylabel(\"Age\")\n\n\nText(0, 0.5, 'Age')\n\n\n\n\n\n\n\n\n\nLooking at the plot on the left, we see that there is a slight curvature to the data points. Plotting the SLR curve on the right results in a poor fit.\nFor SLR to perform well, we’d like there to be a rough linear trend relating \"Age\" and \"Length\". What is making the raw data deviate from a linear relationship? Notice that the data points with \"Length\" greater than 2.6 have disproportionately high values of \"Age\" relative to the rest of the data. If we could manipulate these data points to have lower \"Age\" values, we’d “shift” these points downwards and reduce the curvature in the data. Applying a logarithmic transformation to \\(y_i\\) (that is, taking \\(\\log(\\) \"Age\" \\()\\) ) would achieve just that.\nAn important word on \\(\\log\\): in Data 100 (and most upper-division STEM courses), \\(\\log\\) denotes the natural logarithm with base \\(e\\). The base-10 logarithm, where relevant, is indicated by \\(\\log_{10}\\).\n\n\nCode\nz = np.log(y)\n\nr = np.corrcoef(x, z)[0, 1]\ntheta_1 = r * np.std(z) / np.std(x)\ntheta_0 = np.mean(z) - theta_1 * np.mean(x)\n\nfig, ax = plt.subplots(1, 2, dpi=200, figsize=(8, 3))\nax[0].scatter(x, z)\nax[0].set_xlabel(\"Length\")\nax[0].set_ylabel(r\"$\\log{(Age)}$\")\n\nax[1].scatter(x, z)\nax[1].plot(x, theta_0 + theta_1 * x, \"tab:red\")\nax[1].set_xlabel(\"Length\")\nax[1].set_ylabel(r\"$\\log{(Age)}$\")\n\nplt.subplots_adjust(wspace=0.3)\n\n\n\n\n\n\n\n\n\nOur SLR fit looks a lot better! We now have a new target variable: the SLR model is now trying to predict the log of \"Age\", rather than the untransformed \"Age\". In other words, we are applying the transformation \\(z_i = \\log{(y_i)}\\). Notice that the resulting model is still linear in the parameters \\(\\theta = [\\theta_0, \\theta_1]\\). The SLR model becomes:\n\\[\\hat{\\log{y}} = \\theta_0 + \\theta_1 x\\] \\[\\hat{z} = \\theta_0 + \\theta_1 x\\]\nIt turns out that this linearized relationship can help us understand the underlying relationship between \\(x\\) and \\(y\\). If we rearrange the relationship above, we find:\n\\[\\log{(y)} = \\theta_0 + \\theta_1 x\\] \\[y = e^{\\theta_0 + \\theta_1 x}\\] \\[y = (e^{\\theta_0})e^{\\theta_1 x}\\] \\[y_i = C e^{k x}\\]\nFor some constants \\(C\\) and \\(k\\).\n\\(y\\) is an exponential function of \\(x\\). Applying an exponential fit to the untransformed variables corroborates this finding.\n\n\nCode\nplt.figure(dpi=120, figsize=(4, 3))\n\nplt.scatter(x, y)\nplt.plot(x, np.exp(theta_0) * np.exp(theta_1 * x), \"tab:red\")\nplt.xlabel(\"Length\")\nplt.ylabel(\"Age\")\n\n\nText(0, 0.5, 'Age')\n\n\n\n\n\n\n\n\n\nYou may wonder: why did we choose to apply a log transformation specifically? Why not some other function to linearize the data?\nPractically, many other mathematical operations that modify the relative scales of \"Age\" and \"Length\" could have worked here.", - "crumbs": [ - "11  Constant Model, Loss, and Transformations" - ] - }, - { - "objectID": "constant_model_loss_transformations/loss_transformations.html#multiple-linear-regression", - "href": "constant_model_loss_transformations/loss_transformations.html#multiple-linear-regression", - "title": "11  Constant Model, Loss, and Transformations", - "section": "11.7 Multiple Linear Regression", - "text": "11.7 Multiple Linear Regression\nMultiple linear regression is an extension of simple linear regression that adds additional features to the model. The multiple linear regression model takes the form:\n\\[\\hat{y} = \\theta_0\\:+\\:\\theta_1x_{1}\\:+\\:\\theta_2 x_{2}\\:+\\:...\\:+\\:\\theta_p x_{p}\\]\nOur predicted value of \\(y\\), \\(\\hat{y}\\), is a linear combination of the single observations (features), \\(x_i\\), and the parameters, \\(\\theta_i\\).\nWe’ll dive deeper into Multiple Linear Regression in the next lecture.", - "crumbs": [ - "11  Constant Model, Loss, and Transformations" - ] - }, - { - "objectID": "constant_model_loss_transformations/loss_transformations.html#bonus-calculating-constant-model-mse-using-an-algebraic-trick", - "href": "constant_model_loss_transformations/loss_transformations.html#bonus-calculating-constant-model-mse-using-an-algebraic-trick", - "title": "11  Constant Model, Loss, and Transformations", - "section": "11.8 Bonus: Calculating Constant Model MSE Using an Algebraic Trick", - "text": "11.8 Bonus: Calculating Constant Model MSE Using an Algebraic Trick\nEarlier, we calculated the constant model MSE using calculus. It turns out that there is a much more elegant way of performing this same minimization algebraically, without using calculus at all.\nIn this calculation, we use the fact that the sum of deviations from the mean is \\(0\\) or that \\(\\sum_{i=1}^{n} (y_i - \\bar{y}) = 0\\).\nLet’s quickly walk through the proof for this: \\[\n\\begin{align}\n\\sum_{i=1}^{n} (y_i - \\bar{y}) &= \\sum_{i=1}^{n} y_i - \\sum_{i=1}^{n} \\bar{y} \\\\\n&= \\sum_{i=1}^{n} y_i - n\\bar{y} \\\\\n&= \\sum_{i=1}^{n} y_i - n\\frac{1}{n}\\sum_{i=1}^{n}y_i \\\\\n&= \\sum_{i=1}^{n} y_i - \\sum_{i=1}^{n}y_i \\\\\n& = 0\n\\end{align}\n\\]\nIn our calculations, we’ll also be using the definition of the variance as a sample. As a refresher:\n\\[\\sigma_y^2 = \\frac{1}{n}\\sum_{i=1}^{n} (y_i - \\bar{y})^2\\]\nGetting into our calculation for MSE minimization:\n\\[\n\\begin{align}\nR(\\theta) &= {\\frac{1}{n}}\\sum^{n}_{i=1} (y_i - \\theta)^2\n\\\\ &= \\frac{1}{n}\\sum^{n}_{i=1} [(y_i - \\bar{y}) + (\\bar{y} - \\theta)]^2\\quad \\quad \\text{using trick that a-b can be written as (a-c) + (c-b) } \\\\\n&\\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\space \\space \\text{where a, b, and c are any numbers}\n\\\\ &= \\frac{1}{n}\\sum^{n}_{i=1} [(y_i - \\bar{y})^2 + 2(y_i - \\bar{y})(\\bar{y} - \\theta) + (\\bar{y} - \\theta)^2]\n\\\\ &= \\frac{1}{n}[\\sum^{n}_{i=1}(y_i - \\bar{y})^2 + 2(\\bar{y} - \\theta)\\sum^{n}_{i=1}(y_i - \\bar{y}) + n(\\bar{y} - \\theta)^2] \\quad \\quad \\text{distribute sum to individual terms}\n\\\\ &= \\frac{1}{n}\\sum^{n}_{i=1}(y_i - \\bar{y})^2 + \\frac{2}{n}(\\bar{y} - \\theta)\\cdot0 + (\\bar{y} - \\theta)^2 \\quad \\quad \\text{sum of deviations from mean is 0}\n\\\\ &= \\sigma_y^2 + (\\bar{y} - \\theta)^2\n\\end{align}\n\\]\nSince variance can’t be negative, we know that our first term, \\(\\sigma_y^2\\) is greater than or equal to \\(0\\). Also note, that the first term doesn’t involve \\(\\theta\\) at all, meaning changing our model won’t change this value. For the purposes of determining $#, we can then essentially ignore this term.\nLooking at the second term, \\((\\bar{y} - \\theta)^2\\), since it is squared, we know it must be greater than or equal to \\(0\\). As this term does involve \\(\\theta\\), picking the value of \\(\\theta\\) that minimizes this term will allow us to minimize our average loss. For the second term to equal \\(0\\), \\(\\theta = \\bar{y}\\), or in other words, \\(\\hat{\\theta} = \\bar{y} = mean(y)\\).\n\n11.8.0.0.1 Note\nIn the derivation above, we decompose the expected loss, \\(R(\\theta)\\), into two key components: the variance of the data, \\(\\sigma_y^2\\), and the square of the bias, \\((\\bar{y} - \\theta)^2\\). This decomposition is insightful for understanding the behavior of estimators in statistical models.\n\nVariance, \\(\\sigma_y^2\\): This term represents the spread of the data points around their mean, \\(\\bar{y}\\), and is a measure of the data’s inherent variability. Importantly, it does not depend on the choice of \\(\\theta\\), meaning it’s a fixed property of the data. Variance serves as an indicator of the data’s dispersion and is crucial in understanding the dataset’s structure, but it remains constant regardless of how we adjust our model parameter \\(\\theta\\).\nBias Squared, \\((\\bar{y} - \\theta)^2\\): This term captures the bias of the estimator, defined as the square of the difference between the mean of the data points, \\(\\bar{y}\\), and the parameter \\(\\theta\\). The bias quantifies the systematic error introduced when estimating \\(\\theta\\). Minimizing this term is essential for improving the accuracy of the estimator. When \\(\\theta = \\bar{y}\\), the bias is \\(0\\), indicating that the estimator is unbiased for the parameter it estimates. This highlights a critical principle in statistical estimation: choosing \\(\\theta\\) to be the sample mean, \\(\\bar{y}\\), minimizes the average loss, rendering the estimator both efficient and unbiased for the population mean.", - "crumbs": [ - "11  Constant Model, Loss, and Transformations" - ] - }, - { - "objectID": "ols/ols.html", - "href": "ols/ols.html", - "title": "12  Ordinary Least Squares", - "section": "", - "text": "12.1 OLS Problem Formulation", - "crumbs": [ - "12  Ordinary Least Squares" - ] - }, - { - "objectID": "ols/ols.html#ols-problem-formulation", - "href": "ols/ols.html#ols-problem-formulation", - "title": "12  Ordinary Least Squares", - "section": "", - "text": "12.1.1 Multiple Linear Regression\nMultiple linear regression is an extension of simple linear regression that adds additional features to the model. The multiple linear regression model takes the form:\n\\[\\hat{y} = \\theta_0\\:+\\:\\theta_1x_{1}\\:+\\:\\theta_2 x_{2}\\:+\\:...\\:+\\:\\theta_p x_{p}\\]\nOur predicted value of \\(y\\), \\(\\hat{y}\\), is a linear combination of the single observations (features), \\(x_i\\), and the parameters, \\(\\theta_i\\).\nWe can explore this idea further by looking at a dataset containing aggregate per-player data from the 2018-19 NBA season, downloaded from Kaggle.\n\n\nCode\nimport pandas as pd\nnba = pd.read_csv('data/nba18-19.csv', index_col=0)\nnba.index.name = None # Drops name of index (players are ordered by rank)\n\n\n\n\nCode\nnba.head(5)\n\n\n\n\n\n\n\n\n\nPlayer\nPos\nAge\nTm\nG\nGS\nMP\nFG\nFGA\nFG%\n...\nFT%\nORB\nDRB\nTRB\nAST\nSTL\nBLK\nTOV\nPF\nPTS\n\n\n\n\n1\nÁlex Abrines\\abrinal01\nSG\n25\nOKC\n31\n2\n19.0\n1.8\n5.1\n0.357\n...\n0.923\n0.2\n1.4\n1.5\n0.6\n0.5\n0.2\n0.5\n1.7\n5.3\n\n\n2\nQuincy Acy\\acyqu01\nPF\n28\nPHO\n10\n0\n12.3\n0.4\n1.8\n0.222\n...\n0.700\n0.3\n2.2\n2.5\n0.8\n0.1\n0.4\n0.4\n2.4\n1.7\n\n\n3\nJaylen Adams\\adamsja01\nPG\n22\nATL\n34\n1\n12.6\n1.1\n3.2\n0.345\n...\n0.778\n0.3\n1.4\n1.8\n1.9\n0.4\n0.1\n0.8\n1.3\n3.2\n\n\n4\nSteven Adams\\adamsst01\nC\n25\nOKC\n80\n80\n33.4\n6.0\n10.1\n0.595\n...\n0.500\n4.9\n4.6\n9.5\n1.6\n1.5\n1.0\n1.7\n2.6\n13.9\n\n\n5\nBam Adebayo\\adebaba01\nC\n21\nMIA\n82\n28\n23.3\n3.4\n5.9\n0.576\n...\n0.735\n2.0\n5.3\n7.3\n2.2\n0.9\n0.8\n1.5\n2.5\n8.9\n\n\n\n\n5 rows × 29 columns\n\n\n\nLet’s say we are interested in predicting the number of points (PTS) an athlete will score in a basketball game this season.\nSuppose we want to fit a linear model by using some characteristics, or features of a player. Specifically, we’ll focus on field goals, assists, and 3-point attempts.\n\nFG, the average number of (2-point) field goals per game\nAST, the average number of assists per game\n3PA, the average number of 3-point field goals attempted per game\n\n\n\nCode\nnba[['FG', 'AST', '3PA', 'PTS']].head()\n\n\n\n\n\n\n\n\n\nFG\nAST\n3PA\nPTS\n\n\n\n\n1\n1.8\n0.6\n4.1\n5.3\n\n\n2\n0.4\n0.8\n1.5\n1.7\n\n\n3\n1.1\n1.9\n2.2\n3.2\n\n\n4\n6.0\n1.6\n0.0\n13.9\n\n\n5\n3.4\n2.2\n0.2\n8.9\n\n\n\n\n\n\n\nBecause we are now dealing with many parameter values, we’ve collected them all into a parameter vector with dimensions \\((p+1) \\times 1\\) to keep things tidy. Remember that \\(p\\) represents the number of features we have (in this case, 3).\n\\[\\theta = \\begin{bmatrix}\n \\theta_{0} \\\\\n \\theta_{1} \\\\\n \\vdots \\\\\n \\theta_{p}\n \\end{bmatrix}\\]\nWe are working with two vectors here: a row vector representing the observed data, and a column vector containing the model parameters. The multiple linear regression model is equivalent to the dot (scalar) product of the observation vector and parameter vector.\n\\[[1,\\:x_{1},\\:x_{2},\\:x_{3},\\:...,\\:x_{p}] \\theta = [1,\\:x_{1},\\:x_{2},\\:x_{3},\\:...,\\:x_{p}] \\begin{bmatrix}\n \\theta_{0} \\\\\n \\theta_{1} \\\\\n \\vdots \\\\\n \\theta_{p}\n \\end{bmatrix} = \\theta_0\\:+\\:\\theta_1x_{1}\\:+\\:\\theta_2 x_{2}\\:+\\:...\\:+\\:\\theta_p x_{p}\\]\nNotice that we have inserted 1 as the first value in the observation vector. When the dot product is computed, this 1 will be multiplied with \\(\\theta_0\\) to give the intercept of the regression model. We call this 1 entry the intercept or bias term.\nGiven that we have three features here, we can express this model as: \\[\\hat{y} = \\theta_0\\:+\\:\\theta_1x_{1}\\:+\\:\\theta_2 x_{2}\\:+\\:\\theta_3 x_{3}\\]\nOur features are represented by \\(x_1\\) (FG), \\(x_2\\) (AST), and \\(x_3\\) (3PA) with each having correpsonding parameters, \\(\\theta_1\\), \\(\\theta_2\\), and \\(\\theta_3\\).\nIn statistics, this model + loss is called Ordinary Least Squares (OLS). The solution to OLS is the minimizing loss for parameters \\(\\hat{\\theta}\\), also called the least squares estimate.\n\n\n12.1.2 Linear Algebra Approach\n\n\n\n\n\n\nLinear Algebra Review: Vector Dot Product\n\n\n\n\n\nThe dot product (or inner product) is a vector operation that:\n\nCan only be carried out on two vectors of the same length\nSums up the products of the corresponding entries of the two vectors\nReturns a single number\n\nFor example, let \\[\n\\begin{align}\n\\vec{u} = \\begin{bmatrix}1 \\\\ 2 \\\\ 3\\end{bmatrix}, \\vec{v} = \\begin{bmatrix}1 \\\\ 1 \\\\ 1\\end{bmatrix}\n\\end{align}\n\\]\nThe dot product between \\(\\vec{u}\\) and \\(\\vec{v}\\) is \\[\n\\begin{align}\n\\vec{u} \\cdot \\vec{v} &= \\vec{u}^T \\vec{v} = \\vec{v}^T \\vec{u} \\\\\n &= 1 \\cdot 1 + 2 \\cdot 1 + 3 \\cdot 1 \\\\\n &= 6\n\\end{align}\n\\]\nWhile not in scope, note that we can also interpret the dot product geometrically:\n\nIt is the product of three things: the magnitude of both vectors, and the cosine of the angles between them: \\[\\vec{u} \\cdot \\vec{v} = ||\\vec{u}|| \\cdot ||\\vec{v}|| \\cdot {cos \\theta}\\]\n\n\n\n\nWe now know how to generate a single prediction from multiple observed features. Data scientists usually work at scale – that is, they want to build models that can produce many predictions, all at once. The vector notation we introduced above gives us a hint on how we can expedite multiple linear regression. We want to use the tools of linear algebra.\nLet’s think about how we can apply what we did above. To accommodate for the fact that we’re considering several feature variables, we’ll adjust our notation slightly. Each observation can now be thought of as a row vector with an entry for each of \\(p\\) features.\n\n\n\n\n\n\n\n\n\nTo make a prediction from the first observation in the data, we take the dot product of the parameter vector and first observation vector. To make a prediction from the second observation, we would repeat this process to find the dot product of the parameter vector and the second observation vector. If we wanted to find the model predictions for each observation in the dataset, we’d repeat this process for all \\(n\\) observations in the data.\n\\[\\hat{y}_1 = \\theta_0 + \\theta_1 x_{11} + \\theta_2 x_{12} + ... + \\theta_p x_{1p} = [1,\\:x_{11},\\:x_{12},\\:x_{13},\\:...,\\:x_{1p}] \\theta\\] \\[\\hat{y}_2 = \\theta_0 + \\theta_1 x_{21} + \\theta_2 x_{22} + ... + \\theta_p x_{2p} = [1,\\:x_{21},\\:x_{22},\\:x_{23},\\:...,\\:x_{2p}] \\theta\\] \\[\\vdots\\] \\[\\hat{y}_n = \\theta_0 + \\theta_1 x_{n1} + \\theta_2 x_{n2} + ... + \\theta_p x_{np} = [1,\\:x_{n1},\\:x_{n2},\\:x_{n3},\\:...,\\:x_{np}] \\theta\\]\nOur observed data is represented by \\(n\\) row vectors, each with dimension \\((p+1)\\). We can collect them all into a single matrix, which we call \\(\\mathbb{X}\\).\n\n\n\n\n\n\n\n\n\nThe matrix \\(\\mathbb{X}\\) is known as the design matrix. It contains all observed data for each of our \\(p\\) features, where each row corresponds to one observation, and each column corresponds to a feature. It often (but not always) contains an additional column of all ones to represent the intercept or bias column.\nTo review what is happening in the design matrix: each row represents a single observation. For example, a student in Data 100. Each column represents a feature. For example, the ages of students in Data 100. This convention allows us to easily transfer our previous work in DataFrames over to this new linear algebra perspective.\n\n\n\n\n\n\n\n\n\nThe multiple linear regression model can then be restated in terms of matrices: \\[\n\\Large\n\\mathbb{\\hat{Y}} = \\mathbb{X} \\theta\n\\]\nHere, \\(\\mathbb{\\hat{Y}}\\) is the prediction vector with \\(n\\) elements (\\(\\mathbb{\\hat{Y}} \\in \\mathbb{R}^{n}\\)); it contains the prediction made by the model for each of the \\(n\\) input observations. \\(\\mathbb{X}\\) is the design matrix with dimensions \\(\\mathbb{X} \\in \\mathbb{R}^{n \\times (p + 1)}\\), and \\(\\theta\\) is the parameter vector with dimensions \\(\\theta \\in \\mathbb{R}^{(p + 1)}\\). Note that our true output \\(\\mathbb{Y}\\) is also a vector with \\(n\\) elements (\\(\\mathbb{Y} \\in \\mathbb{R}^{n}\\)).\n\n\n\n\n\n\nLinear Algebra Review: Linearity\n\n\n\nAn expression is linear in \\(\\theta\\) (a set of parameters) if it is a linear combination of the elements of the set. Checking if an expression can separate into a matrix product of two terms – a vector of \\(\\theta\\) s, and a matrix/vector not involving \\(\\theta\\) – is a good indicator of linearity.\nFor example, consider the vector \\(\\theta = [\\theta_0, \\theta_1, \\theta_2]\\)\n\n\\(\\hat{y} = \\theta_0 + 2\\theta_1 + 3\\theta_2\\) is linear in theta, and we can separate it into a matrix product of two terms:\n\n\\[\\hat{y} = \\begin{bmatrix} 1 \\space 2 \\space 3 \\end{bmatrix} \\begin{bmatrix} \\theta_0 \\\\ \\theta_1 \\\\ \\theta_2 \\end{bmatrix}\\]\n\n\\(\\hat{y} = \\theta_0\\theta_1 + 2\\theta_1^2 + 3log(\\theta_2)\\) is not linear in theta, as the \\(\\theta_1\\) term is squared, and the \\(\\theta_2\\) term is logged. We cannot separate it into a matrix product of two terms.\n\n\n\n\n\n12.1.3 Mean Squared Error\nWe now have a new approach to understanding models in terms of vectors and matrices. To accompany this new convention, we should update our understanding of risk functions and model fitting.\nRecall our definition of MSE: \\[R(\\theta) = \\frac{1}{n} \\sum_{i=1}^n (y_i - \\hat{y}_i)^2\\]\nAt its heart, the MSE is a measure of distance – it gives an indication of how “far away” the predictions are from the true values, on average.\n\n\n\n\n\n\nLinear Algebra: L2 Norm\n\n\n\nWhen working with vectors, this idea of “distance” or the vector’s size/length is represented by the norm. More precisely, the distance between two vectors \\(\\vec{a}\\) and \\(\\vec{b}\\) can be expressed as: \\[||\\vec{a} - \\vec{b}||_2 = \\sqrt{(a_1 - b_1)^2 + (a_2 - b_2)^2 + \\ldots + (a_n - b_n)^2} = \\sqrt{\\sum_{i=1}^n (a_i - b_i)^2}\\]\nThe double bars are mathematical notation for the norm. The subscript 2 indicates that we are computing the L2, or squared norm.\nThe two norms we need to know for Data 100 are the L1 and L2 norms (sound familiar?). In this note, we’ll focus on L2 norm. We’ll dive into L1 norm in future lectures.\nFor the n-dimensional vector \\[\\vec{x} = \\begin{bmatrix} x_1 \\\\ x_2 \\\\ \\vdots \\\\ x_n \\end{bmatrix}\\] its L2 vector norm is\n\\[||\\vec{x}||_2 = \\sqrt{(x_1)^2 + (x_2)^2 + \\ldots + (x_n)^2} = \\sqrt{\\sum_{i=1}^n (x_i)^2}\\]\nThe L2 vector norm is a generalization of the Pythagorean theorem in \\(n\\) dimensions. Thus, it can be used as a measure of the length of a vector or even as a measure of the distance between two vectors.\n\n\nWe can express the MSE as a squared L2 norm if we rewrite it in terms of the prediction vector, \\(\\hat{\\mathbb{Y}}\\), and true target vector, \\(\\mathbb{Y}\\):\n\\[R(\\theta) = \\frac{1}{n} \\sum_{i=1}^n (y_i - \\hat{y}_i)^2 = \\frac{1}{n} (||\\mathbb{Y} - \\hat{\\mathbb{Y}}||_2)^2\\]\nHere, the superscript 2 outside of the parentheses means that we are squaring the norm. If we plug in our linear model \\(\\hat{\\mathbb{Y}} = \\mathbb{X} \\theta\\), we find the MSE cost function in vector notation:\n\\[R(\\theta) = \\frac{1}{n} (||\\mathbb{Y} - \\mathbb{X} \\theta||_2)^2\\]\nUnder the linear algebra perspective, our new task is to fit the optimal parameter vector \\(\\theta\\) such that the cost function is minimized. Equivalently, we wish to minimize the norm \\[||\\mathbb{Y} - \\mathbb{X} \\theta||_2 = ||\\mathbb{Y} - \\hat{\\mathbb{Y}}||_2.\\]\nWe can restate this goal in two ways:\n\nMinimize the distance between the vector of true values, \\(\\mathbb{Y}\\), and the vector of predicted values, \\(\\mathbb{\\hat{Y}}\\)\nMinimize the length of the residual vector, defined as: \\[e = \\mathbb{Y} - \\mathbb{\\hat{Y}} = \\begin{bmatrix}\n y_1 - \\hat{y}_1 \\\\\n y_2 - \\hat{y}_2 \\\\\n \\vdots \\\\\n y_n - \\hat{y}_n\n \\end{bmatrix}\\]\n\n\n\n12.1.4 A Note on Terminology for Multiple Linear Regression\nThere are several equivalent terms in the context of regression. The ones we use most often for this course are bolded.\n\n\\(x\\) can be called a\n\nFeature(s)\nCovariate(s)\nIndependent variable(s)\nExplanatory variable(s)\nPredictor(s)\nInput(s)\nRegressor(s)\n\n\\(y\\) can be called an\n\nOutput\nOutcome\nResponse\nDependent variable\n\n\\(\\hat{y}\\) can be called a\n\nPrediction\nPredicted response\nEstimated value\n\n\\(\\theta\\) can be called a\n\nWeight(s)\nParameter(s)\nCoefficient(s)\n\n\\(\\hat{\\theta}\\) can be called a\n\nEstimator(s)\nOptimal parameter(s)\n\nA datapoint \\((x, y)\\) is also called an observation.", - "crumbs": [ - "12  Ordinary Least Squares" - ] - }, - { - "objectID": "ols/ols.html#geometric-derivation", - "href": "ols/ols.html#geometric-derivation", - "title": "12  Ordinary Least Squares", - "section": "12.2 Geometric Derivation", - "text": "12.2 Geometric Derivation\n\n\n\n\n\n\nLinear Algebra: Span\n\n\n\nRecall that the span or column space of a matrix \\(\\mathbb{X}\\) (denoted \\(span(\\mathbb{X})\\)) is the set of all possible linear combinations of the matrix’s columns. In other words, the span represents every point in space that could possibly be reached by adding and scaling some combination of the matrix columns. Additionally, if each column of \\(\\mathbb{X}\\) has length \\(n\\), \\(span(\\mathbb{X})\\) is a subspace of \\(\\mathbb{R}^{n}\\).\n\n\n\n\n\n\n\n\nLinear Algebra: Matrix-Vector Multiplication\n\n\n\nThere are 2 ways we can think about matrix-vector multiplication\n\nSo far, we’ve thought of our model as horizontally stacked predictions per datapoint\n\n\n\n\n\n\n\n\n\nHowever, it is helpful sometimes to think of matrix-vector multiplication as performed by columns. We can also think of \\(\\mathbb{Y}\\) as a linear combination of feature vectors, scaled by parameters.\n\n\n\n\n\n\n\n\n\n\n\n\nUp until now, we’ve mostly thought of our model as a scalar product between horizontally stacked observations and the parameter vector. We can also think of \\(\\hat{\\mathbb{Y}}\\) as a linear combination of feature vectors, scaled by the parameters. We use the notation \\(\\mathbb{X}_{:, i}\\) to denote the \\(i\\)th column of the design matrix. You can think of this as following the same convention as used when calling .iloc and .loc. “:” means that we are taking all entries in the \\(i\\)th column.\n\n\n\n\n\n\n\n\n\n\\[\n\\hat{\\mathbb{Y}} =\n\\theta_0 \\begin{bmatrix}\n 1 \\\\\n 1 \\\\\n \\vdots \\\\\n 1\n \\end{bmatrix} + \\theta_1 \\begin{bmatrix}\n x_{11} \\\\\n x_{21} \\\\\n \\vdots \\\\\n x_{n1}\n \\end{bmatrix} + \\ldots + \\theta_p \\begin{bmatrix}\n x_{1p} \\\\\n x_{2p} \\\\\n \\vdots \\\\\n x_{np}\n \\end{bmatrix}\n = \\theta_0 \\mathbb{X}_{:,\\:1} + \\theta_1 \\mathbb{X}_{:,\\:2} + \\ldots + \\theta_p \\mathbb{X}_{:,\\:p+1}\\]\nThis new approach is useful because it allows us to take advantage of the properties of linear combinations.\nBecause the prediction vector, \\(\\hat{\\mathbb{Y}} = \\mathbb{X} \\theta\\), is a linear combination of the columns of \\(\\mathbb{X}\\), we know that the predictions are contained in the span of \\(\\mathbb{X}\\). That is, we know that \\(\\mathbb{\\hat{Y}} \\in \\text{Span}(\\mathbb{X})\\).\nThe diagram below is a simplified view of \\(\\text{Span}(\\mathbb{X})\\), assuming that each column of \\(\\mathbb{X}\\) has length \\(n\\). Notice that the columns of \\(\\mathbb{X}\\) define a subspace of \\(\\mathbb{R}^n\\), where each point in the subspace can be reached by a linear combination of \\(\\mathbb{X}\\)’s columns. The prediction vector \\(\\mathbb{\\hat{Y}}\\) lies somewhere in this subspace.\n\n\n\n\n\n\n\n\n\nExamining this diagram, we find a problem. The vector of true values, \\(\\mathbb{Y}\\), could theoretically lie anywhere in \\(\\mathbb{R}^n\\) space – its exact location depends on the data we collect out in the real world. However, our multiple linear regression model can only make predictions in the subspace of \\(\\mathbb{R}^n\\) spanned by \\(\\mathbb{X}\\). Remember the model fitting goal we established in the previous section: we want to generate predictions such that the distance between the vector of true values, \\(\\mathbb{Y}\\), and the vector of predicted values, \\(\\mathbb{\\hat{Y}}\\), is minimized. This means that we want \\(\\mathbb{\\hat{Y}}\\) to be the vector in \\(\\text{Span}(\\mathbb{X})\\) that is closest to \\(\\mathbb{Y}\\).\nAnother way of rephrasing this goal is to say that we wish to minimize the length of the residual vector \\(e\\), as measured by its \\(L_2\\) norm.\n\n\n\n\n\n\n\n\n\nThe vector in \\(\\text{Span}(\\mathbb{X})\\) that is closest to \\(\\mathbb{Y}\\) is always the orthogonal projection of \\(\\mathbb{Y}\\) onto \\(\\text{Span}(\\mathbb{X}).\\) Thus, we should choose the parameter vector \\(\\theta\\) that makes the residual vector orthogonal to any vector in \\(\\text{Span}(\\mathbb{X})\\). You can visualize this as the vector created by dropping a perpendicular line from \\(\\mathbb{Y}\\) onto the span of \\(\\mathbb{X}\\).\n\n\n\n\n\n\nLinear Algebra: Orthogonality\n\n\n\nRecall that two vectors \\(\\vec{a}\\) and \\(\\vec{b}\\) are orthogonal if their dot product is zero: \\(\\vec{a}^{T}\\vec{b} = 0\\).\nA vector \\(v\\) is orthogonal to the span of a matrix \\(M\\) if and only if \\(v\\) is orthogonal to each column in \\(M\\). Put together, a vector \\(v\\) is orthogonal to \\(\\text{Span}(M)\\) if:\n\\[M^Tv = \\vec{0}\\]\nNote that \\(\\vec{0}\\) represents the zero vector, a \\(d\\)-length vector full of 0s.\n\n\nRemember our goal is to find \\(\\hat{\\theta}\\) such that we minimize the objective function \\(R(\\theta)\\). Equivalently, this is the \\(\\hat{\\theta}\\) such that the residual vector \\(e = \\mathbb{Y} - \\mathbb{X} \\hat{\\theta}\\) is orthogonal to \\(\\text{Span}(\\mathbb{X})\\).\nLooking at the definition of orthogonality of \\(\\mathbb{Y} - \\mathbb{X}\\hat{\\theta}\\) to \\(span(\\mathbb{X})\\), we can write: \\[\\mathbb{X}^T (\\mathbb{Y} - \\mathbb{X}\\hat{\\theta}) = \\vec{0}\\]\nLet’s then rearrange the terms: \\[\\mathbb{X}^T \\mathbb{Y} - \\mathbb{X}^T \\mathbb{X} \\hat{\\theta} = \\vec{0}\\]\nAnd finally, we end up with the normal equation: \\[\\mathbb{X}^T \\mathbb{X} \\hat{\\theta} = \\mathbb{X}^T \\mathbb{Y}\\]\nAny vector \\(\\theta\\) that minimizes MSE on a dataset must satisfy this equation.\nIf \\(\\mathbb{X}^T \\mathbb{X}\\) is invertible, we can conclude: \\[\\hat{\\theta} = (\\mathbb{X}^T \\mathbb{X})^{-1} \\mathbb{X}^T \\mathbb{Y}\\]\nThis is called the least squares estimate of \\(\\theta\\): it is the value of \\(\\theta\\) that minimizes the squared loss.\nNote that the least squares estimate was derived under the assumption that \\(\\mathbb{X}^T \\mathbb{X}\\) is invertible. This condition holds true when \\(\\mathbb{X}^T \\mathbb{X}\\) is full column rank, which, in turn, happens when \\(\\mathbb{X}\\) is full column rank. The proof for why \\(\\mathbb{X}\\) needs to be full column rank is optional and in the Bonus section at the end.", - "crumbs": [ - "12  Ordinary Least Squares" - ] - }, - { - "objectID": "ols/ols.html#evaluating-model-performance", - "href": "ols/ols.html#evaluating-model-performance", - "title": "12  Ordinary Least Squares", - "section": "12.3 Evaluating Model Performance", - "text": "12.3 Evaluating Model Performance\nOur geometric view of multiple linear regression has taken us far! We have identified the optimal set of parameter values to minimize MSE in a model of multiple features. Now, we want to understand how well our fitted model performs.\n\n12.3.1 RMSE\nOne measure of model performance is the Root Mean Squared Error, or RMSE. The RMSE is simply the square root of MSE. Taking the square root converts the value back into the original, non-squared units of \\(y_i\\), which is useful for understanding the model’s performance. A low RMSE indicates more “accurate” predictions – that there is a lower average loss across the dataset.\n\\[\\text{RMSE} = \\sqrt{\\frac{1}{n} \\sum_{i=1}^n (y_i - \\hat{y}_i)^2}\\]\n\n\n12.3.2 Residual Plots\nWhen working with SLR, we generated plots of the residuals against a single feature to understand the behavior of residuals. When working with several features in multiple linear regression, it no longer makes sense to consider a single feature in our residual plots. Instead, multiple linear regression is evaluated by making plots of the residuals against the predicted values. As was the case with SLR, a multiple linear model performs well if its residual plot shows no patterns.\n\n\n\n\n\n\n\n\n\n\n\n12.3.3 Multiple \\(R^2\\)\nFor SLR, we used the correlation coefficient to capture the association between the target variable and a single feature variable. In a multiple linear model setting, we will need a performance metric that can account for multiple features at once. Multiple \\(R^2\\), also called the coefficient of determination, is the proportion of variance of our fitted values (predictions) \\(\\hat{y}_i\\) to our true values \\(y_i\\). It ranges from 0 to 1 and is effectively the proportion of variance in the observations that the model explains.\n\\[R^2 = \\frac{\\text{variance of } \\hat{y}_i}{\\text{variance of } y_i} = \\frac{\\sigma^2_{\\hat{y}}}{\\sigma^2_y}\\]\nNote that for OLS with an intercept term, for example \\(\\hat{y} = \\theta_0 + \\theta_1x_1 + \\theta_2x_2 + \\cdots + \\theta_px_p\\), \\(R^2\\) is equal to the square of the correlation between \\(y\\) and \\(\\hat{y}\\). On the other hand for SLR, \\(R^2\\) is equal to \\(r^2\\), the correlation between \\(x\\) and \\(y\\). The proof of these last two properties is out of scope for this course.\nAdditionally, as we add more features, our fitted values tend to become closer and closer to our actual values. Thus, \\(R^2\\) increases.\nAdding more features doesn’t always mean our model is better though! We’ll see why later in the course.", - "crumbs": [ - "12  Ordinary Least Squares" - ] - }, - { - "objectID": "ols/ols.html#ols-properties", - "href": "ols/ols.html#ols-properties", - "title": "12  Ordinary Least Squares", - "section": "12.4 OLS Properties", - "text": "12.4 OLS Properties\n\nWhen using the optimal parameter vector, our residuals \\(e = \\mathbb{Y} - \\hat{\\mathbb{Y}}\\) are orthogonal to \\(span(\\mathbb{X})\\).\n\n\\[\\mathbb{X}^Te = 0 \\]\n\n\n\n\n\n\nProof:\n\nThe optimal parameter vector, \\(\\hat{\\theta}\\), solves the normal equations \\(\\implies \\hat{\\theta} = (\\mathbb{X}^T\\mathbb{X})^{-1}\\mathbb{X}^T\\mathbb{Y}\\)\n\n\\[\\mathbb{X}^Te = \\mathbb{X}^T (\\mathbb{Y} - \\mathbb{\\hat{Y}}) \\]\n\\[\\mathbb{X}^T (\\mathbb{Y} - \\mathbb{X}\\hat{\\theta}) = \\mathbb{X}^T\\mathbb{Y} - \\mathbb{X}^T\\mathbb{X}\\hat{\\theta}\\]\n\nAny matrix multiplied with its own inverse is the identity matrix \\(\\mathbb{I}\\)\n\n\\[\\mathbb{X}^T\\mathbb{Y} - (\\mathbb{X}^T\\mathbb{X})(\\mathbb{X}^T\\mathbb{X})^{-1}\\mathbb{X}^T\\mathbb{Y} = \\mathbb{X}^T\\mathbb{Y} - \\mathbb{X}^T\\mathbb{Y} = 0\\]\n\n\n\n\nFor all linear models with an intercept term, the sum of residuals is zero.\n\n\\[\\sum_i^n e_i = 0\\]\n\n\n\n\n\n\nProof:\n\nFor all linear models with an intercept term, the average of the predicted \\(y\\) values is equal to the average of the true \\(y\\) values. \\[\\bar{y} = \\bar{\\hat{y}}\\]\nRewriting the sum of residuals as two separate sums, \\[\\sum_i^n e_i = \\sum_i^n y_i - \\sum_i^n\\hat{y}_i\\]\nEach respective sum is a multiple of the average of the sum. \\[\\sum_i^n e_i = n\\bar{y} - n\\bar{y} = n(\\bar{y} - \\bar{y}) = 0\\]\n\n\n\n\nTo summarize:\n\n\n\n\n\n\n\n\n\n\nModel\nEstimate\nUnique?\n\n\n\n\nConstant Model + MSE\n\\(\\hat{y} = \\theta_0\\)\n\\(\\hat{\\theta}_0 = mean(y) = \\bar{y}\\)\nYes. Any set of values has a unique mean.\n\n\nConstant Model + MAE\n\\(\\hat{y} = \\theta_0\\)\n\\(\\hat{\\theta}_0 = median(y)\\)\nYes, if odd. No, if even. Return the average of the middle 2 values.\n\n\nSimple Linear Regression + MSE\n\\(\\hat{y} = \\theta_0 + \\theta_1x\\)\n\\(\\hat{\\theta}_0 = \\bar{y} - \\hat{\\theta}_1\\bar{x}\\) \\(\\hat{\\theta}_1 = r\\frac{\\sigma_y}{\\sigma_x}\\)\nYes. Any set of non-constant* values has a unique mean, SD, and correlation coefficient.\n\n\nOLS (Linear Model + MSE)\n\\(\\mathbb{\\hat{Y}} = \\mathbb{X}\\mathbb{\\theta}\\)\n\\(\\hat{\\theta} = (\\mathbb{X}^T\\mathbb{X})^{-1}\\mathbb{X}^T\\mathbb{Y}\\)\nYes, if \\(\\mathbb{X}\\) is full column rank (all columns are linearly independent, # of datapoints >>> # of features).", - "crumbs": [ - "12  Ordinary Least Squares" - ] - }, - { - "objectID": "ols/ols.html#bonus-uniqueness-of-the-solution", - "href": "ols/ols.html#bonus-uniqueness-of-the-solution", - "title": "12  Ordinary Least Squares", - "section": "12.5 Bonus: Uniqueness of the Solution", - "text": "12.5 Bonus: Uniqueness of the Solution\nThe Least Squares estimate \\(\\hat{\\theta}\\) is unique if and only if \\(\\mathbb{X}\\) is full column rank.\n\n\n\n\n\n\nProof:\n\nWe know the solution to the normal equation \\(\\mathbb{X}^T\\mathbb{X}\\hat{\\theta} = \\mathbb{X}^T\\mathbb{Y}\\) is the least square estimate that minimizes the squared loss.\n\\(\\hat{\\theta}\\) has a unique solution \\(\\iff\\) the square matrix \\(\\mathbb{X}^T\\mathbb{X}\\) is invertible \\(\\iff\\) \\(\\mathbb{X}^T\\mathbb{X}\\) is full rank.\n\nThe column rank of a square matrix is the max number of linearly independent columns it contains.\nAn \\(n\\) x \\(n\\) square matrix is deemed full column rank when all of its columns are linearly independent. That is, its rank would be equal to \\(n\\).\n\\(\\mathbb{X}^T\\mathbb{X}\\) has shape \\((p + 1) \\times (p + 1)\\), and therefore has max rank \\(p + 1\\).\n\n\\(rank(\\mathbb{X}^T\\mathbb{X})\\) = \\(rank(\\mathbb{X})\\) (proof out of scope).\nTherefore, \\(\\mathbb{X}^T\\mathbb{X}\\) has rank \\(p + 1\\) \\(\\iff\\) \\(\\mathbb{X}\\) has rank \\(p + 1\\) \\(\\iff \\mathbb{X}\\) is full column rank.\n\n\n\n\nTherefore, if \\(\\mathbb{X}\\) is not full column rank, we will not have unique estimates. This can happen for two major reasons.\n\nIf our design matrix \\(\\mathbb{X}\\) is “wide”:\n\nIf n < p, then we have way more features (columns) than observations (rows).\nThen \\(rank(\\mathbb{X})\\) = min(n, p+1) < p+1, so \\(\\hat{\\theta}\\) is not unique.\nTypically we have n >> p so this is less of an issue.\n\nIf our design matrix \\(\\mathbb{X}\\) has features that are linear combinations of other features:\n\nBy definition, rank of \\(\\mathbb{X}\\) is number of linearly independent columns in \\(\\mathbb{X}\\).\nExample: If “Width”, “Height”, and “Perimeter” are all columns,\n\nPerimeter = 2 * Width + 2 * Height \\(\\rightarrow\\) \\(\\mathbb{X}\\) is not full rank.\n\nImportant with one-hot encoding (to discuss later).", - "crumbs": [ - "12  Ordinary Least Squares" - ] - }, - { - "objectID": "gradient_descent/gradient_descent.html", - "href": "gradient_descent/gradient_descent.html", - "title": "13  sklearn and Gradient Descent", - "section": "", - "text": "13.1 sklearn", - "crumbs": [ - "13  sklearn and Gradient Descent" - ] - }, - { - "objectID": "gradient_descent/gradient_descent.html#sklearn", - "href": "gradient_descent/gradient_descent.html#sklearn", - "title": "13  sklearn and Gradient Descent", - "section": "", - "text": "13.1.1 Implementing Derived Formulas in Code\nThroughout this lecture, we’ll refer to the penguins dataset.\n\n\nCode\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\n\npenguins = sns.load_dataset(\"penguins\")\npenguins = penguins[penguins[\"species\"] == \"Adelie\"].dropna()\npenguins.head()\n\n\n\n\n\n\n\n\n\nspecies\nisland\nbill_length_mm\nbill_depth_mm\nflipper_length_mm\nbody_mass_g\nsex\n\n\n\n\n0\nAdelie\nTorgersen\n39.1\n18.7\n181.0\n3750.0\nMale\n\n\n1\nAdelie\nTorgersen\n39.5\n17.4\n186.0\n3800.0\nFemale\n\n\n2\nAdelie\nTorgersen\n40.3\n18.0\n195.0\n3250.0\nFemale\n\n\n4\nAdelie\nTorgersen\n36.7\n19.3\n193.0\n3450.0\nFemale\n\n\n5\nAdelie\nTorgersen\n39.3\n20.6\n190.0\n3650.0\nMale\n\n\n\n\n\n\n\nOur goal will be to predict the value of the \"bill_depth_mm\" for a particular penguin given its \"flipper_length_mm\" and \"body_mass_g\". We’ll also add a bias column of all ones to represent the intercept term of our models.\n\n# Add a bias column of all ones to `penguins`\npenguins[\"bias\"] = np.ones(len(penguins), dtype=int) \n\n# Define the design matrix, X...\n# Note that we use .to_numpy() to convert our DataFrame into a NumPy array so it is in Matrix form\nX = penguins[[\"bias\", \"flipper_length_mm\", \"body_mass_g\"]].to_numpy()\n\n# ...as well as the target variable, Y\n# Again, we use .to_numpy() to convert our DataFrame into a NumPy array so it is in Matrix form\nY = penguins[[\"bill_depth_mm\"]].to_numpy()\n\nIn the lecture on ordinary least squares, we expressed multiple linear regression using matrix notation.\n\\[\\hat{\\mathbb{Y}} = \\mathbb{X}\\theta\\]\nWe used a geometric approach to derive the following expression for the optimal model parameters:\n\\[\\hat{\\theta} = (\\mathbb{X}^T \\mathbb{X})^{-1}\\mathbb{X}^T \\mathbb{Y}\\]\nThat’s a whole lot of matrix manipulation. How do we implement it in python?\nThere are three operations we need to perform here: multiplying matrices, taking transposes, and finding inverses.\n\nTo perform matrix multiplication, use the @ operator\nTo take a transpose, call the .T attribute of an NumPy array or DataFrame\nTo compute an inverse, use NumPy’s in-built method np.linalg.inv\n\nPutting this all together, we can compute the OLS estimate for the optimal model parameters, stored in the array theta_hat.\n\ntheta_hat = np.linalg.inv(X.T @ X) @ X.T @ Y\ntheta_hat\n\narray([[1.10029953e+01],\n [9.82848689e-03],\n [1.47749591e-03]])\n\n\nTo make predictions using our optimized parameter values, we matrix-multiply the design matrix with the parameter vector:\n\\[\\hat{\\mathbb{Y}} = \\mathbb{X}\\theta\\]\n\nY_hat = X @ theta_hat\npd.DataFrame(Y_hat).head()\n\n\n\n\n\n\n\n\n0\n\n\n\n\n0\n18.322561\n\n\n1\n18.445578\n\n\n2\n17.721412\n\n\n3\n17.997254\n\n\n4\n18.263268\n\n\n\n\n\n\n\n\n\n13.1.2 The sklearn Workflow\nWe’ve already saved a lot of time (and avoided tedious calculations) by translating our derived formulas into code. However, we still had to go through the process of writing out the linear algebra ourselves.\nTo make life even easier, we can turn to the sklearn python library. sklearn is a robust library of machine learning tools used extensively in research and industry. It is the standard for simple machine learning tasks and gives us a wide variety of in-built modeling frameworks and methods, so we’ll keep returning to sklearn techniques as we progress through Data 100.\nRegardless of the specific type of model being implemented, sklearn follows a standard set of steps for creating a model:\n\nImport the LinearRegression model from sklearn\nfrom sklearn.linear_model import LinearRegression\nCreate a model object. This generates a new instance of the model class. You can think of it as making a new “copy” of a standard “template” for a model. In code, this looks like:\nmy_model = LinearRegression()\nFit the model to the X design matrix and Y target vector. This calculates the optimal model parameters “behind the scenes” without us explicitly working through the calculations ourselves. The fitted parameters are then stored within the model for use in future predictions:\nmy_model.fit(X, Y)\nUse the fitted model to make predictions on the X input data using .predict.\nmy_model.predict(X)\n\nTo extract the fitted parameters, we can use:\nmy_model.coef_\n\nmy_model.intercept_\nLet’s put this into action with our multiple regression task!\n1. Initialize an instance of the model class\nsklearn stores “templates” of useful models for machine learning. We begin the modeling process by making a “copy” of one of these templates for our own use. Model initialization looks like ModelClass(), where ModelClass is the type of model we wish to create.\nFor now, let’s create a linear regression model using LinearRegression.\nmy_model is now an instance of the LinearRegression class. You can think of it as the “idea” of a linear regression model. We haven’t trained it yet, so it doesn’t know any model parameters and cannot be used to make predictions. In fact, we haven’t even told it what data to use for modeling! It simply waits for further instructions.\n\nmy_model = LinearRegression()\n\n2. Train the model using .fit\nBefore the model can make predictions, we will need to fit it to our training data. When we fit the model, sklearn will run gradient descent behind the scenes to determine the optimal model parameters. It will then save these model parameters to our model instance for future use.\nAll sklearn model classes include a .fit method, which is used to fit the model. It takes in two inputs: the design matrix, X, and the target variable, Y.\nLet’s start by fitting a model with just one feature: the flipper length. We create a design matrix X by pulling out the \"flipper_length_mm\" column from the DataFrame.\n\n# .fit expects a 2D data design matrix, so we use double brackets to extract a DataFrame\nX = penguins[[\"flipper_length_mm\"]]\nY = penguins[\"bill_depth_mm\"]\n\nmy_model.fit(X, Y)\n\nLinearRegression()In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.LinearRegressionLinearRegression()\n\n\nNotice that we use double brackets to extract this column. Why double brackets instead of just single brackets? The .fit method, by default, expects to receive 2-dimensional data – some kind of data that includes both rows and columns. Writing penguins[\"flipper_length_mm\"] would return a 1D Series, causing sklearn to error. We avoid this by writing penguins[[\"flipper_length_mm\"]] to produce a 2D DataFrame.\nAnd in just three lines of code, our model has run gradient descent to determine the optimal model parameters! Our single-feature model takes the form:\n\\[\\text{bill depth} = \\theta_0 + \\theta_1 \\text{flipper length}\\]\nNote that LinearRegression will automatically include an intercept term.\nThe fitted model parameters are stored as attributes of the model instance. my_model.intercept_ will return the value of \\(\\hat{\\theta}_0\\) as a scalar. my_model.coef_ will return all values \\(\\hat{\\theta}_1,\n\\hat{\\theta}_1, ...\\) in an array. Because our model only contains one feature, we see just the value of \\(\\hat{\\theta}_1\\) in the cell below.\n\n# The intercept term, theta_0\nmy_model.intercept_\n\n7.297305899612299\n\n\n\n# All parameters theta_1, ..., theta_p\nmy_model.coef_\n\narray([0.05812622])\n\n\n3. Use the fitted model to make predictions\nNow that the model has been trained, we can use it to make predictions! To do so, we use the .predict method. .predict takes in one argument: the design matrix that should be used to generate predictions. To understand how the model performs on the training set, we would pass in the training data. Alternatively, to make predictions on unseen data, we would pass in a new dataset that wasn’t used to train the model.\nBelow, we call .predict to generate model predictions on the original training data. As before, we use double brackets to ensure that we extract 2-dimensional data.\n\nY_hat_one_feature = my_model.predict(penguins[[\"flipper_length_mm\"]])\n\nprint(f\"The RMSE of the model is {np.sqrt(np.mean((Y-Y_hat_one_feature)**2))}\")\n\nThe RMSE of the model is 1.154936309923901\n\n\nWhat if we wanted a model with two features?\n\\[\\text{bill depth} = \\theta_0 + \\theta_1 \\text{flipper length} + \\theta_2 \\text{body mass}\\]\nWe repeat this three-step process by intializing a new model object, then calling .fit and .predict as before.\n\n# Step 1: initialize LinearRegression model\ntwo_feature_model = LinearRegression()\n\n# Step 2: fit the model\nX_two_features = penguins[[\"flipper_length_mm\", \"body_mass_g\"]]\nY = penguins[\"bill_depth_mm\"]\n\ntwo_feature_model.fit(X_two_features, Y)\n\n# Step 3: make predictions\nY_hat_two_features = two_feature_model.predict(X_two_features)\n\nprint(f\"The RMSE of the model is {np.sqrt(np.mean((Y-Y_hat_two_features)**2))}\")\n\nThe RMSE of the model is 0.9881331104079045\n\n\nWe can also see that we obtain the same predictions using sklearn as we did when applying the ordinary least squares formula before!\n\n\nCode\npd.DataFrame({\"Y_hat from OLS\":np.squeeze(Y_hat), \"Y_hat from sklearn\":Y_hat_two_features}).head()\n\n\n\n\n\n\n\n\n\nY_hat from OLS\nY_hat from sklearn\n\n\n\n\n0\n18.322561\n18.322561\n\n\n1\n18.445578\n18.445578\n\n\n2\n17.721412\n17.721412\n\n\n3\n17.997254\n17.997254\n\n\n4\n18.263268\n18.263268", - "crumbs": [ - "13  sklearn and Gradient Descent" - ] - }, - { - "objectID": "gradient_descent/gradient_descent.html#gradient-descent", - "href": "gradient_descent/gradient_descent.html#gradient-descent", - "title": "13  sklearn and Gradient Descent", - "section": "13.2 Gradient Descent", - "text": "13.2 Gradient Descent\nAt this point, we’ve grown quite familiar with the process of choosing a model and a corresponding loss function and optimizing parameters by choosing the values of \\(\\theta\\) that minimize the loss function. So far, we’ve optimized \\(\\theta\\) by\n\nUsing calculus to take the derivative of the loss function with respect to \\(\\theta\\), setting it equal to 0, and solving for \\(\\theta\\).\nUsing the geometric argument of orthogonality to derive the OLS solution \\(\\hat{\\theta} = (\\mathbb{X}^T \\mathbb{X})^{-1}\\mathbb{X}^T \\mathbb{Y}\\).\n\nOne thing to note, however, is that the techniques we used above can only be applied if we make some big assumptions. For the calculus approach, we assumed that the loss function was differentiable at all points and that we could algebraically solve for the zero points of the derivative; for the geometric approach, OLS only applies when using a linear model with MSE loss. What happens when we have more complex models with different, more complex loss functions? The techniques we’ve learned so far will not work, so we need a new optimization technique: gradient descent.\n\nBIG IDEA: use an iterative algorithm to numerically compute the minimum of the loss.\n\n\n13.2.1 Minimizing an Arbitrary 1D Function\nLet’s consider an arbitrary function. Our goal is to find the value of \\(x\\) that minimizes this function.\n\ndef arbitrary(x):\n return (x**4 - 15*x**3 + 80*x**2 - 180*x + 144)/10\n\n\n\n13.2.1.1 The Naive Approach: Guess and Check\nAbove, we saw that the minimum is somewhere around 5.3. Let’s see if we can figure out how to find the exact minimum algorithmically from scratch. One very slow (and terrible) way would be manual guess-and-check.\n\narbitrary(6)\n\n0.0\n\n\nA somewhat better (but still slow) approach is to use brute force to try out a bunch of x values and return the one that yields the lowest loss.\n\ndef simple_minimize(f, xs):\n # Takes in a function f and a set of values xs. \n # Calculates the value of the function f at all values x in xs\n # Takes the minimum value of f(x) and returns the corresponding value x \n y = [f(x) for x in xs] \n return xs[np.argmin(y)]\n\nguesses = [5.3, 5.31, 5.32, 5.33, 5.34, 5.35]\nsimple_minimize(arbitrary, guesses)\n\n5.33\n\n\nThis process is essentially the same as before where we made a graphical plot, it’s just that we’re only looking at 20 selected points.\n\n\nCode\nxs = np.linspace(1, 7, 200)\nsparse_xs = np.linspace(1, 7, 5)\n\nys = arbitrary(xs)\nsparse_ys = arbitrary(sparse_xs)\n\nfig = px.line(x = xs, y = arbitrary(xs))\nfig.add_scatter(x = sparse_xs, y = arbitrary(sparse_xs), mode = \"markers\")\nfig.update_layout(showlegend= False)\nfig.update_layout(autosize=False, width=800, height=600)\nfig.show()\n\n\n \n\n\nThis basic approach suffers from three major flaws:\n\nIf the minimum is outside our range of guesses, the answer will be completely wrong.\nEven if our range of guesses is correct, if the guesses are too coarse, our answer will be inaccurate.\nIt is very computationally inefficient, considering potentially vast numbers of guesses that are useless.\n\n\n\n13.2.1.2 Scipy.optimize.minimize\nOne way to minimize this mathematical function is to use the scipy.optimize.minimize function. It takes a function and a starting guess and tries to find the minimum.\n\nfrom scipy.optimize import minimize\n\n# takes a function f and a starting point x0 and returns a readout \n# with the optimal input value of x which minimizes f\nminimize(arbitrary, x0 = 3.5)\n\n message: Optimization terminated successfully.\n success: True\n status: 0\n fun: -0.13827491292966557\n x: [ 2.393e+00]\n nit: 3\n jac: [ 6.486e-06]\n hess_inv: [[ 7.385e-01]]\n nfev: 20\n njev: 10\n\n\nscipy.optimize.minimize is great. It may also seem a bit magical. How could you write a function that can find the minimum of any mathematical function? There are a number of ways to do this, which we’ll explore in today’s lecture, eventually arriving at the important idea of gradient descent, which is the principle that scipy.optimize.minimize uses.\nIt turns out that under the hood, the fit method for LinearRegression models uses gradient descent. Gradient descent is also how much of machine learning works, including even advanced neural network models.\nIn Data 100, the gradient descent process will usually be invisible to us, hidden beneath an abstraction layer. However, to be good data scientists, it’s important that we know the underlying principles that optimization functions harness to find optimal parameters.\n\n\n13.2.1.3 Digging into Gradient Descent\nLooking at the function across this domain, it is clear that the function’s minimum value occurs around \\(\\theta = 5.3\\). Let’s pretend for a moment that we couldn’t see the full view of the cost function. How would we guess the value of \\(\\theta\\) that minimizes the function?\nIt turns out that the first derivative of the function can give us a clue. In the plots below, the line indicates the value of the derivative of each value of \\(\\theta\\). The derivative is negative where it is red and positive where it is green.\nSay we make a guess for the minimizing value of \\(\\theta\\). Remember that we read plots from left to right, and assume that our starting \\(\\theta\\) value is to the left of the optimal \\(\\hat{\\theta}\\). If the guess “undershoots” the true minimizing value – our guess for \\(\\theta\\) is lower than the value of the \\(\\hat{\\theta}\\) that minimizes the function – the derivative will be negative. This means that if we increase \\(\\theta\\) (move further to the right), then we can decrease our loss function further. If this guess “overshoots” the true minimizing value, the derivative will be positive, implying the converse.\n\n\n\n\n\n\n\n\n\nWe can use this pattern to help formulate our next guess for the optimal \\(\\hat{\\theta}\\). Consider the case where we’ve undershot \\(\\theta\\) by guessing too low of a value. We’ll want our next guess to be greater in value than our previous guess – that is, we want to shift our guess to the right. You can think of this as following the slope “downhill” to the function’s minimum value.\n\n\n\n\n\n\n\n\n\nIf we’ve overshot \\(\\hat{\\theta}\\) by guessing too high of a value, we’ll want our next guess to be lower in value – we want to shift our guess for \\(\\hat{\\theta}\\) to the left.\n\n\n\n\n\n\n\n\n\nIn other words, the derivative of the function at each point tells us the direction of our next guess.\n\nA negative slope means we want to step to the right, or move in the positive direction.\nA positive slope means we want to step to the left, or move in the negative direction.\n\n\n\n13.2.1.4 Algorithm Attempt 1\nArmed with this knowledge, let’s try to see if we can use the derivative to optimize the function.\nWe start by making some guess for the minimizing value of \\(x\\). Then, we look at the derivative of the function at this value of \\(x\\), and step downhill in the opposite direction. We can express our new rule as a recurrence relation:\n\\[x^{(t+1)} = x^{(t)} - \\frac{d}{dx} f(x^{(t)})\\]\nTranslating this statement into English: we obtain our next guess for the minimizing value of \\(x\\) at timestep \\(t+1\\) (\\(x^{(t+1)}\\)) by taking our last guess (\\(x^{(t)}\\)) and subtracting the derivative of the function at that point (\\(\\frac{d}{dx} f(x^{(t)})\\)).\nA few steps are shown below, where the old step is shown as a transparent point, and the next step taken is the green-filled dot.\n\n\n\n\n\n\n\n\n\nLooking pretty good! We do have a problem though – once we arrive close to the minimum value of the function, our guesses “bounce” back and forth past the minimum without ever reaching it.\n\n\n\n\n\n\n\n\n\nIn other words, each step we take when updating our guess moves us too far. We can address this by decreasing the size of each step.\n\n\n13.2.1.5 Algorithm Attempt 2\nLet’s update our algorithm to use a learning rate (also sometimes called the step size), which controls how far we move with each update. We represent the learning rate with \\(\\alpha\\).\n\\[x^{(t+1)} = x^{(t)} - \\alpha \\frac{d}{dx} f(x^{(t)})\\]\nA small \\(\\alpha\\) means that we will take small steps; a large \\(\\alpha\\) means we will take large steps. When do we stop updating? We stop updating either after a fixed number of updates or after a subsequent update doesn’t change much.\nUpdating our function to use \\(\\alpha=0.3\\), our algorithm successfully converges (settles on a solution and stops updating significantly, or at all) on the minimum value.\n\n\n\n\n\n\n\n\n\n\n\n\n13.2.2 Convexity\nIn our analysis above, we focused our attention on the global minimum of the loss function. You may be wondering: what about the local minimum that’s just to the left?\nIf we had chosen a different starting guess for \\(\\theta\\), or a different value for the learning rate \\(\\alpha\\), our algorithm may have gotten “stuck” and converged on the local minimum, rather than on the true optimum value of loss.\n\n\n\n\n\n\n\n\n\nIf the loss function is convex, gradient descent is guaranteed to converge and find the global minimum of the objective function. Formally, a function \\(f\\) is convex if: \\[tf(a) + (1-t)f(b) \\geq f(ta + (1-t)b)\\] for all \\(a, b\\) in the domain of \\(f\\) and \\(t \\in [0, 1]\\).\nTo put this into words: if you drew a line between any two points on the curve, all values on the curve must be on or below the line. Importantly, any local minimum of a convex function is also its global minimum so we avoid the situation where the algorithm converges on some critical point that is not the minimum of the function.\n\n\n\n\n\n\n\n\n\nIn summary, non-convex loss functions can cause problems with optimization. This means that our choice of loss function is a key factor in our modeling process. It turns out that MSE is convex, which is a major reason why it is such a popular choice of loss function. Gradient descent is only guaranteed to converge (given enough iterations and an appropriate step size) for convex functions.\n\n\n13.2.3 Gradient Descent in 1 Dimension\n\nTerminology clarification: In past lectures, we have used “loss” to refer to the error incurred on a single datapoint. In applications, we usually care more about the average error across all datapoints. Going forward, we will take the “model’s loss” to mean the model’s average error across the dataset. This is sometimes also known as the empirical risk, cost function, or objective function. \\[L(\\theta) = R(\\theta) = \\frac{1}{n} \\sum_{i=1}^{n} L(y, \\hat{y})\\]\n\nIn our discussion above, we worked with some arbitrary function \\(f\\). As data scientists, we will almost always work with gradient descent in the context of optimizing models – specifically, we want to apply gradient descent to find the minimum of a loss function. In a modeling context, our goal is to minimize a loss function by choosing the minimizing model parameters.\nRecall our modeling workflow from the past few lectures:\n\nDefine a model with some parameters \\(\\theta_i\\)\nChoose a loss function\nSelect the values of \\(\\theta_i\\) that minimize the loss function on the data\n\nGradient descent is a powerful technique for completing this last task. By applying the gradient descent algorithm, we can select values for our parameters \\(\\theta_i\\) that will lead to the model having minimal loss on the training data.\nWhen using gradient descent in a modeling context, we:\n\nMake guesses for the minimizing \\(\\theta_i\\)\nCompute the derivative of the loss function \\(L\\)\n\nWe can “translate” our gradient descent rule from before by replacing \\(x\\) with \\(\\theta\\) and \\(f\\) with \\(L\\):\n\\[\\theta^{(t+1)} = \\theta^{(t)} - \\alpha \\frac{d}{d\\theta} L(\\theta^{(t)})\\]\n\n13.2.3.1 Gradient Descent on the tips Dataset\nTo see this in action, let’s consider a case where we have a linear model with no offset. We want to predict the tip (y) given the price of a meal (x). To do this, we\n\nChoose a model: \\(\\hat{y} = \\theta_1 x\\),\nChoose a loss function: \\(L(\\theta) = MSE(\\theta) = \\frac{1}{n} \\sum_{i=1}^n (y_i - \\theta_1x_i)^2\\).\n\nLet’s apply our gradient_descent function from before to optimize our model on the tips dataset. We will try to select the best parameter \\(\\theta_i\\) to predict the tip \\(y\\) from the total_bill \\(x\\).\n\ndf = sns.load_dataset(\"tips\")\ndf.head()\n\n\n\n\n\n\n\n\ntotal_bill\ntip\nsex\nsmoker\nday\ntime\nsize\n\n\n\n\n0\n16.99\n1.01\nFemale\nNo\nSun\nDinner\n2\n\n\n1\n10.34\n1.66\nMale\nNo\nSun\nDinner\n3\n\n\n2\n21.01\n3.50\nMale\nNo\nSun\nDinner\n3\n\n\n3\n23.68\n3.31\nMale\nNo\nSun\nDinner\n2\n\n\n4\n24.59\n3.61\nFemale\nNo\nSun\nDinner\n4\n\n\n\n\n\n\n\nWe can visualize the value of the MSE on our dataset for different possible choices of \\(\\theta_1\\). To optimize our model, we want to select the value of \\(\\theta_1\\) that leads to the lowest MSE.\n\n\nCode\nimport plotly.graph_objects as go\n\ndef derivative_arbitrary(x):\n return (4*x**3 - 45*x**2 + 160*x - 180)/10\n\nfig = go.Figure()\nroots = np.array([2.3927, 3.5309, 5.3263])\n\nfig.add_trace(go.Scatter(x = xs, y = arbitrary(xs), \n mode = \"lines\", name = \"f\"))\nfig.add_trace(go.Scatter(x = xs, y = derivative_arbitrary(xs), \n mode = \"lines\", name = \"df\", line = {\"dash\": \"dash\"}))\nfig.add_trace(go.Scatter(x = np.array(roots), y = 0*roots, \n mode = \"markers\", name = \"df = zero\", marker_size = 12))\nfig.update_layout(font_size = 20, yaxis_range=[-1, 3])\nfig.update_layout(autosize=False, width=800, height=600)\nfig.show()\n\n\n \n\n\nTo apply gradient descent, we need to compute the derivative of the loss function with respect to our parameter \\(\\theta_1\\).\n\nGiven our loss function, \\[L(\\theta) = MSE(\\theta) = \\frac{1}{n} \\sum_{i=1}^n (y_i - \\theta_1x_i)^2\\]\nWe take the derivative with respect to \\(\\theta_1\\) \\[\\frac{\\partial}{\\partial \\theta_{1}} L(\\theta_1^{(t)}) = \\frac{-2}{n} \\sum_{i=1}^n (y_i - \\theta_1^{(t)} x_i) x_i\\]\nWhich results in the gradient descent update rule \\[\\theta_1^{(t+1)} = \\theta_1^{(t)} - \\alpha \\frac{d}{d\\theta}L(\\theta_1^{(t)})\\]\n\nfor some learning rate \\(\\alpha\\).\nImplementing this in code, we can visualize the MSE loss on the tips data. MSE is convex, so there is one global minimum.\n\n\nCode\ndef gradient_descent(df, initial_guess, alpha, n):\n \"\"\"Performs n steps of gradient descent on df using learning rate alpha starting\n from initial_guess. Returns a numpy array of all guesses over time.\"\"\"\n guesses = [initial_guess]\n current_guess = initial_guess\n while len(guesses) < n:\n current_guess = current_guess - alpha * df(current_guess)\n guesses.append(current_guess)\n \n return np.array(guesses)\n\ndef mse_single_arg(theta_1):\n \"\"\"Returns the MSE on our data for the given theta1\"\"\"\n x = df[\"total_bill\"]\n y_obs = df[\"tip\"]\n y_hat = theta_1 * x\n return np.mean((y_hat - y_obs) ** 2)\n\ndef mse_loss_derivative_single_arg(theta_1):\n \"\"\"Returns the derivative of the MSE on our data for the given theta1\"\"\"\n x = df[\"total_bill\"]\n y_obs = df[\"tip\"]\n y_hat = theta_1 * x\n \n return np.mean(2 * (y_hat - y_obs) * x)\n\nloss_df = pd.DataFrame({\"theta_1\":np.linspace(-1.5, 1), \"MSE\":[mse_single_arg(theta_1) for theta_1 in np.linspace(-1.5, 1)]})\n\ntrajectory = gradient_descent(mse_loss_derivative_single_arg, -0.5, 0.0001, 100)\n\nplt.plot(loss_df[\"theta_1\"], loss_df[\"MSE\"])\nplt.scatter(trajectory, [mse_single_arg(guess) for guess in trajectory], c=\"white\", edgecolor=\"firebrick\")\nplt.scatter(trajectory[-1], mse_single_arg(trajectory[-1]), c=\"firebrick\")\nplt.xlabel(r\"$\\theta_1$\")\nplt.ylabel(r\"$L(\\theta_1)$\");\n\nprint(f\"Final guess for theta_1: {trajectory[-1]}\")\n\n\nFinal guess for theta_1: 0.14369554654231262\n\n\n\n\n\n\n\n\n\n\n\n\n13.2.4 Gradient Descent on Multi-Dimensional Models\nThe function we worked with above was one-dimensional – we were only minimizing the function with respect to a single parameter, \\(\\theta\\). However, models usually have a cost function with multiple parameters that need to be optimized. For example, simple linear regression has 2 parameters: \\[\\hat{y} + \\theta_0 + \\theta_1x\\] and multiple linear regression has \\(p+1\\) parameters: \\[\\mathbb{Y} = \\theta_0 + \\theta_1 \\Bbb{X}_{:,1} + \\theta_2 \\Bbb{X}_{:,2} + \\cdots + \\theta_p \\Bbb{X}_{:,p}\\]\nWe’ll need to expand gradient descent so we can update our guesses for all model parameters all in one go.\nWith multiple parameters to optimize, we consider a loss surface, or the model’s loss for a particular combination of possible parameter values.\n\n\nCode\nimport plotly.graph_objects as go\n\n\ndef mse_loss(theta, X, y_obs):\n y_hat = X @ theta\n return np.mean((y_hat - y_obs) ** 2) \n\ntips_with_bias = df.copy()\ntips_with_bias[\"bias\"] = 1\ntips_with_bias = tips_with_bias[[\"bias\", \"total_bill\"]]\n\nuvalues = np.linspace(0, 2, 10)\nvvalues = np.linspace(-0.1, 0.35, 10)\n(u,v) = np.meshgrid(uvalues, vvalues)\nthetas = np.vstack((u.flatten(),v.flatten()))\n\ndef mse_loss_single_arg(theta):\n return mse_loss(theta, tips_with_bias, df[\"tip\"])\n\nMSE = np.array([mse_loss_single_arg(t) for t in thetas.T])\n\nloss_surface = go.Surface(x=u, y=v, z=np.reshape(MSE, u.shape))\n\nind = np.argmin(MSE)\noptimal_point = go.Scatter3d(name = \"Optimal Point\",\n x = [thetas.T[ind,0]], y = [thetas.T[ind,1]], \n z = [MSE[ind]],\n marker=dict(size=10, color=\"red\"))\n\nfig = go.Figure(data=[loss_surface, optimal_point])\nfig.update_layout(scene = dict(\n xaxis_title = \"theta0\",\n yaxis_title = \"theta1\",\n zaxis_title = \"MSE\"), autosize=False, width=800, height=600)\n\nfig.show()\n\n\n \n\n\nWe can also visualize a bird’s-eye view of the loss surface from above using a contour plot:\n\n\nCode\ncontour = go.Contour(x=u[0], y=v[:, 0], z=np.reshape(MSE, u.shape))\nfig = go.Figure(contour)\nfig.update_layout(\n xaxis_title = \"theta0\",\n yaxis_title = \"theta1\", autosize=False, width=800, height=600)\n\nfig.show()\n\n\n \n\n\n\n13.2.4.1 The Gradient Vector\nAs before, the derivative of the loss function tells us the best way towards the minimum value.\nOn a 2D (or higher) surface, the best way to go down (gradient) is described by a vector.\n\n\n\n\n\n\n\n\n\n\nMath Aside: Partial Derivatives\n\n\n\nFor an equation with multiple variables, we take a partial derivative by differentiating with respect to just one variable at a time. The partial derivative is denoted with a \\(\\partial\\). Intuitively, we want to see how the function changes if we only vary one variable while holding other variables constant.\nUsing \\(f(x, y) = 3x^2 + y\\) as an example,\n\ntaking the partial derivative with respect to x and treating y as a constant gives us \\(\\frac{\\partial f}{\\partial x} = 6x\\)\ntaking the partial derivative with respect to y and treating x as a constant gives us \\(\\frac{\\partial f}{\\partial y} = 1\\)\n\n\n\nFor the vector of parameter values \\(\\vec{\\theta} = \\begin{bmatrix}\n \\theta_{0} \\\\\n \\theta_{1} \\\\\n \\end{bmatrix}\\), we take the partial derivative of loss with respect to each parameter: \\(\\frac{\\partial L}{\\partial \\theta_0}\\) and \\(\\frac{\\partial L}{\\partial \\theta_1}\\).\n\nFor example, consider the 2D function: \\[f(\\theta_0, \\theta_1) = 8 \\theta_0^2 + 3\\theta_0\\theta_1\\] For a function of 2 variables \\(f(\\theta_0, \\theta_1)\\), we define the gradient \\[\n\\begin{align}\n\\frac{\\partial f}{\\partial \\theta_{0}} &= 16\\theta_0 + 3\\theta_1 \\\\\n\\frac{\\partial f}{\\partial \\theta_{1}} &= 3\\theta_0 \\\\\n\\nabla_{\\vec{\\theta}} f(\\vec{\\theta}) &= \\begin{bmatrix} 16\\theta_0 + 3\\theta_1 \\\\ 3\\theta_0 \\\\ \\end{bmatrix}\n\\end{align}\n\\]\n\nThe gradient vector of a generic function of \\(p+1\\) variables is therefore \\[\\nabla_{\\vec{\\theta}} L = \\begin{bmatrix} \\frac{\\partial L}{\\partial \\theta_0} \\\\ \\frac{\\partial L}{\\partial \\theta_1} \\\\ \\vdots \\end{bmatrix}\\] where \\(\\nabla_\\theta L\\) always points in the downhill direction of the surface. We can interpret each gradient as: “If I nudge the \\(i\\)th model weight, what happens to loss?”\nWe can use this to update our 1D gradient rule for models with multiple parameters.\n\nRecall our 1D update rule: \\[\\theta^{(t+1)} = \\theta^{(t)} - \\alpha \\frac{d}{d\\theta}L(\\theta^{(t)})\\]\nFor models with multiple parameters, we work in terms of vectors: \\[\\begin{bmatrix}\n \\theta_{0}^{(t+1)} \\\\\n \\theta_{1}^{(t+1)} \\\\\n \\vdots\n \\end{bmatrix} = \\begin{bmatrix}\n \\theta_{0}^{(t)} \\\\\n \\theta_{1}^{(t)} \\\\\n \\vdots\n \\end{bmatrix} - \\alpha \\begin{bmatrix}\n \\frac{\\partial L}{\\partial \\theta_{0}} \\\\\n \\frac{\\partial L}{\\partial \\theta_{1}} \\\\\n \\vdots \\\\\n \\end{bmatrix}\\]\nWritten in a more compact form, \\[\\vec{\\theta}^{(t+1)} = \\vec{\\theta}^{(t)} - \\alpha \\nabla_{\\vec{\\theta}} L(\\theta^{(t)}) \\]\n\n\\(\\theta\\) is a vector with our model weights\n\\(L\\) is the loss function\n\\(\\alpha\\) is the learning rate (ours is constant, but other techniques use an \\(\\alpha\\) that decreases over time)\n\\(\\vec{\\theta}^{(t)}\\) is the current value of \\(\\theta\\)\n\\(\\vec{\\theta}^{(t+1)}\\) is the next value of \\(\\theta\\)\n\\(\\nabla_{\\vec{\\theta}} L(\\theta^{(t)})\\) is the gradient of the loss function evaluated at the current \\(\\vec{\\theta}^{(t)}\\)\n\n\n\n\n\n13.2.5 Batch Gradient Descent and Stochastic Gradient Descent\nFormally, the algorithm we derived above is called batch gradient descent. For each iteration of the algorithm, the derivative of loss is computed across the entire batch of all \\(n\\) datapoints. While this update rule works well in theory, it is not practical in most circumstances. For large datasets (with perhaps billions of datapoints), finding the gradient across all the data is incredibly computationally taxing; gradient descent will converge slowly because each individual update is slow.\nStochastic (mini-batch) gradient descent tries to address this issue. In stochastic descent, only a sample of the full dataset is used at each update. We estimate the true gradient of the loss surface using just that sample of data. The batch size is the number of data points used in each sample. The sampling strategy is generally without replacement (data is shuffled and batch size examples are selected one at a time.)\nEach complete “pass” through the data is known as a training epoch. After shuffling the data, in a single training epoch of stochastic gradient descent, we\n\nCompute the gradient on the first x% of the data. Update the parameter guesses.\nCompute the gradient on the next x% of the data. Update the parameter guesses.\n\\(\\dots\\)\nCompute the gradient on the last x% of the data. Update the parameter guesses.\n\nEvery data point appears once in a single training epoch. We then perform several training epochs until we’re satisfied.\nBatch gradient descent is a deterministic technique – because the entire dataset is used at each update iteration, the algorithm will always advance towards the minimum of the loss surface. In contrast, stochastic gradient descent involve an element of randomness. Since only a subset of the full data is used to update the guess for \\(\\vec{\\theta}\\) at each iteration, there’s a chance the algorithm will not progress towards the true minimum of loss with each update. Over the longer term, these stochastic techniques should still converge towards the optimal solution.\nThe diagrams below represent a “bird’s eye view” of a loss surface from above. Notice that batch gradient descent takes a direct path towards the optimal \\(\\hat{\\theta}\\). Stochastic gradient descent, in contrast, “hops around” on its path to the minimum point on the loss surface. This reflects the randomness of the sampling process at each update step.\n\n\n\n\n\n\n\n\n\nTo summarize the tradeoffs of batch size:\n\n\n\n\n\n\n\n\n-\nSmaller Batch Size\nLarger Batch Size\n\n\n\n\nPros\nMore frequent gradient updates\nLeverage hardware acceleration to improve overall system performance and higher quality gradient updates\n\n\nCons\nMore variability in the gradient estimates\nLess frequent gradient updates\n\n\n\nThe typical solution is to set batch size to ensure sufficient hardware utilization.", - "crumbs": [ - "13  sklearn and Gradient Descent" - ] - }, - { - "objectID": "feature_engineering/feature_engineering.html", - "href": "feature_engineering/feature_engineering.html", - "title": "14  Feature Engineering", - "section": "", - "text": "14.1 Gradient Descent Cont.\nBefore we dive into feature engineering, let’s quickly review gradient descent, which we covered in the last lecture. Recall that gradient descent is a powerful technique for choosing the model parameters that minimize the loss function.", - "crumbs": [ - "14  Feature Engineering" - ] - }, - { - "objectID": "feature_engineering/feature_engineering.html#gradient-descent-cont.", - "href": "feature_engineering/feature_engineering.html#gradient-descent-cont.", - "title": "14  Feature Engineering", - "section": "", - "text": "14.1.1 Gradient Descent Review\nAs we learned earlier, we set the derivative of the loss function to zero and solve to determine the optimal parameters \\(\\theta\\) that minimize loss. For a loss surface in 2D (or higher), the best way to minimize loss is to “walk” down the loss surface until we reach our optimal parameters \\(\\vec{\\theta}\\). The gradient vector tells us which direction to “walk” in.\nFor example, the vector of parameter values \\(\\vec{\\theta} = \\begin{bmatrix}\n \\theta_{0} \\\\\n \\theta_{1} \\\\\n \\end{bmatrix}\\) gives us a two parameter model (d = 2). To calculate our gradient vector, we can take the partial derivative of loss with respect to each parameter: \\(\\frac{\\partial L}{\\partial \\theta_0}\\) and \\(\\frac{\\partial L}{\\partial \\theta_1}\\).\nIts gradient vector would then be the 2D vector: \\[\\nabla_{\\vec{\\theta}} L = \\begin{bmatrix} \\frac{\\partial L}{\\partial \\theta_0} \\\\ \\frac{\\partial L}{\\partial \\theta_1} \\end{bmatrix}\\]\nNote that \\(-\\nabla_{\\vec{\\theta}} L\\) always points in the downhill direction of the surface.\nRecall that we also discussed the gradient descent update rule, where we nudge \\(\\theta\\) in a negative gradient direction until \\(\\theta\\) converges.\nAs a refresher, the rule is as follows: \\[\\vec{\\theta}^{(t+1)} = \\vec{\\theta}^{(t)} - \\alpha \\nabla_{\\vec{\\theta}} L(\\vec{\\theta}^{(t)}) \\]\n\n\\(\\theta\\) is a vector with our model weights\n\\(L\\) is the loss function\n\\(\\alpha\\) is the learning rate\n\\(\\vec{\\theta}^{(t)}\\) is the current value of \\(\\theta\\)\n\\(\\vec{\\theta}^{(t+1)}\\) is the next value of \\(\\theta\\)\n\\(\\nabla_{\\vec{\\theta}} L(\\vec{\\theta}^{(t)})\\) is the gradient of the loss function evaluated at the current \\(\\theta\\): \\[\\frac{1}{n}\\sum_{i=1}^{n}\\nabla_{\\vec{\\theta}} l(y_i, f_{\\vec{\\theta}^{(t)}}(X_i))\\]\n\nLet’s now walk through an example of calculating and updating the gradient vector. Say our model and loss are: \\[\\begin{align}\nf_{\\vec{\\theta}}(\\vec{x}) &= \\vec{x}^T\\vec{\\theta} = \\theta_0x_0 + \\theta_1x_1\n\\\\l(y, \\hat{y}) &= (y - \\hat{y})^2\n\\end{align}\n\\]\nPlugging in \\(f_{\\vec{\\theta}}(\\vec{x})\\) for \\(\\hat{y}\\), our loss function becomes \\(l(\\vec{\\theta}, \\vec{x}, y_i) = (y_i - \\theta_0x_0 - \\theta_1x_1)^2\\).\nTo calculate our gradient vector, we can start by computing the partial derivative of the loss function with respect to \\(\\theta_0\\): \\[\\frac{\\partial}{\\partial \\theta_{0}} l(\\vec{\\theta}, \\vec{x}, y_i) = 2(y_i - \\theta_0x_0 - \\theta_1x_1)(-x_0)\\]\nLet’s now do the same but with respect to \\(\\theta_1\\): \\[\\frac{\\partial}{\\partial \\theta_{1}} l(\\vec{\\theta}, \\vec{x}, y_i) = 2(y_i - \\theta_0x_0 - \\theta_1x_1)(-x_1)\\]\nPutting this together, our gradient vector is: \\[\\nabla_{\\theta} l(\\vec{\\theta}, \\vec{x}, y_i) = \\begin{bmatrix} -2(y_i - \\theta_0x_0 - \\theta_1x_1)(x_0) \\\\ -2(y_i - \\theta_0x_0 - \\theta_1x_1)(x_1) \\end{bmatrix}\\]\nRemember that we need to keep updating \\(\\theta\\) until the algorithm converges to a solution and stops updating significantly (or at all). When updating \\(\\theta\\), we’ll have a fixed number of updates and subsequent updates will be quite small (we won’t change \\(\\theta\\) by much).\n\n\n14.1.2 Stochastic (Mini-batch) Gradient Descent\nLet’s now dive deeper into gradient and stochastic gradient descent. In the previous lecture, we discussed how finding the gradient across all the data is extremeley computationally taxing and takes a lot of resources to calculate.\nWe know that the solution to the normal equation is \\(\\hat{\\theta} = (\\mathbb{X}^T\\mathbb{X})^{-1}\\mathbb{X}^T\\mathbb{Y}\\). Let’s break this down and determine the computational complexity for this solution.\n\n\n\nLet \\(n\\) be the number of samples (rows) and \\(d\\) be the number of features (columns).\n\nComputing \\((\\mathbb{X}^{\\top}\\mathbb{X})\\) takes \\(O(nd^2)\\) time, and it’s inverse takes another \\(O(d^3)\\) time to calculate; overall, \\((\\mathbb{X}^{\\top}\\mathbb{X})^{-1}\\) takes \\(O(nd^2) + O(d^3)\\) time.\n\\(\\mathbb{X}^{\\top}\\mathbb{Y}\\) takes \\(O(nd)\\) time.\nMultiplying \\((\\mathbb{X}^{\\top}\\mathbb{X})^{-1}\\) and \\(\\mathbb{X}^{\\top}\\mathbb{Y}\\) takes \\(O(d^2)\\) time.\n\nIn total, calculating the solution to the normal equation takes \\(O(nd^2) + O(d^3) + O(nd) + O(d^2)\\) time. We can see that \\(O(nd^2) + O(d^3)\\) dominates the complexity — this can be problematic for high-dimensional models and very large datasets.\nOn the other hand, the time complexity of a single gradient descent step takes only \\(O(nd)\\) time.\n\n\n\nSuppose we run \\(T\\) iterations. The final complexity would then be \\(O(Tnd)\\). Typically, \\(n\\) is much larger than \\(T\\) and \\(d\\). How can we reduce the cost of this algorithm using a technique from Data 100? Do we really need to use \\(n\\) data points? We don’t! Instead, we can use stochastic gradient descent.\nWe know that our true gradient of \\(\\nabla_{\\vec{\\theta}} L (\\vec{\\theta^{(t)}}) = \\frac{1}{n}\\sum_{i=1}^{n}\\nabla_{\\vec{\\theta}} l(y_i, f_{\\vec{\\theta}^{(t)}}(X_i))\\) has a time complexity of \\(O(nd)\\). Instead of using all \\(n\\) samples to calculate the true gradient of the loss surface, let’s use a sample of our data to approximate. Say we sample \\(b\\) records (\\(s_1, \\cdots, s_b\\)) from our \\(n\\) datapoints. Our new (stochastic) gradient descent function will be \\(\\nabla_{\\vec{\\theta}} L (\\vec{\\theta^{(t)}}) = \\frac{1}{b}\\sum_{i=1}^{b}\\nabla_{\\vec{\\theta}} l(y_{s_i}, f_{\\vec{\\theta}^{(t)}}(X_{s_i}))\\) and will now have a time complexity of \\(O(bd)\\), which is much faster!\nStochastic gradient descent helps us approximate the gradient while also reducing the time complexity and computational cost. The time complexity scales with the number of datapoints selected in the sample. To sample data, there are two approaches we can use:\n\nShuffle the data and select samples one at a time.\nTake a simple random sample for each gradient computation.\n\nBut how do we decide our mini-batch size (\\(b\\)), or the number of datapoints in our sample? The original stochastic gradient descent algorithm uses \\(b=1\\) so that only one sample is used to approximate the gradient at a time. Although we don’t use such a small mini-batch size often, \\(b\\) typically is small. When choosing \\(b\\), there are several factors to consider: a larger batch size results in a better gradient estimate, parallelism, and other systems factors. On the other hand, a smaller batch size will be faster and have more frequent updates. It is up to data scientists to balance the tradeoff between batch size and time complexity.\nSummarizing our two gradient descent techniques:\n\n(Batch) Gradient Descent: Gradient descent computes the true descent and always descends towards the true minimum of the loss. While accurate, it can often be computationally expensive.\n\n\n\n\n\n(Minibatch) Stochastic gradient descent: Stochastic gradient descent approximates the true gradient descent. It may not descend towards the true minimum with each update, but it’s often less computationally expensive than batch gradient descent.", - "crumbs": [ - "14  Feature Engineering" - ] - }, - { - "objectID": "feature_engineering/feature_engineering.html#feature-engineering", - "href": "feature_engineering/feature_engineering.html#feature-engineering", - "title": "14  Feature Engineering", - "section": "14.2 Feature Engineering", - "text": "14.2 Feature Engineering\nAt this point in the course, we’ve equipped ourselves with some powerful techniques to build and optimize models. We’ve explored how to develop models of multiple variables, as well as how to transform variables to help linearize a dataset and fit these models to maximize their performance.\nAll of this was done with one major caveat: the regression models we’ve worked with so far are all linear in the input variables. We’ve assumed that our predictions should be some combination of linear variables. While this works well in some cases, the real world isn’t always so straightforward. We’ll learn an important method to address this issue – feature engineering – and consider some new problems that can arise when we do so.\nFeature engineering is the process of transforming raw features into more informative features that can be used in modeling or EDA tasks and improve model performance.\nFeature engineering allows you to:\n\nCapture domain knowledge\nExpress non-linear relationships using linear models\nUse non-numeric (qualitative) features in models", - "crumbs": [ - "14  Feature Engineering" - ] - }, - { - "objectID": "feature_engineering/feature_engineering.html#feature-functions", - "href": "feature_engineering/feature_engineering.html#feature-functions", - "title": "14  Feature Engineering", - "section": "14.3 Feature Functions", - "text": "14.3 Feature Functions\nA feature function describes the transformations we apply to raw features in a dataset to create a design matrix of transformed features. We typically denote the feature function as \\(\\Phi\\) (the Greek letter “phi” that we use to represent the true function). When we apply the feature function to our original dataset \\(\\mathbb{X}\\), the result, \\(\\Phi(\\mathbb{X})\\), is a transformed design matrix ready to be used in modeling.\nFor example, we might design a feature function that computes the square of an existing feature and adds it to the design matrix. In this case, our existing matrix \\([x]\\) is transformed to \\([x, x^2]\\). Its dimension increases from 1 to 2. Often, the dimension of the featurized dataset increases as seen here.\n\n\n\nThe new features introduced by the feature function can then be used in modeling. Often, we use the symbol \\(\\phi_i\\) to represent transformed features after feature engineering.\n\\[\n\\begin{align}\n\\hat{y} &= \\theta_0 + \\theta_1 x + \\theta_2 x^2 \\\\\n\\hat{y} &= \\theta_0 + \\theta_1 \\phi_1 + \\theta_2 \\phi_2\n\\end{align}\n\\]\nIn matrix notation, the symbol \\(\\Phi\\) is sometimes used to denote the design matrix after feature engineering has been performed. Note that in the usage below, \\(\\Phi\\) is now a feature-engineered matrix, rather than a function.\n\\[\\hat{\\mathbb{Y}} = \\Phi \\theta\\]\nMore formally, we describe a feature function as transforming the original \\(\\mathbb{R}^{n \\times p}\\) dataset \\(\\mathbb{X}\\) to a featurized \\(\\mathbb{R}^{n \\times p'}\\) dataset \\(\\mathbb{\\Phi}\\), where \\(p'\\) is typically greater than \\(p\\).\n\\[\\mathbb{X} \\in \\mathbb{R}^{n \\times p} \\longrightarrow \\Phi \\in \\mathbb{R}^{n \\times p'}\\]", - "crumbs": [ - "14  Feature Engineering" - ] - }, - { - "objectID": "feature_engineering/feature_engineering.html#one-hot-encoding", - "href": "feature_engineering/feature_engineering.html#one-hot-encoding", - "title": "14  Feature Engineering", - "section": "14.4 One Hot Encoding", - "text": "14.4 One Hot Encoding\nFeature engineering opens up a whole new set of possibilities for designing better-performing models. As you will see in lab and homework, feature engineering is one of the most important parts of the entire modeling process.\nA particularly powerful use of feature engineering is to allow us to perform regression on non-numeric features. One hot encoding is a feature engineering technique that generates numeric features from categorical data, allowing us to use our usual methods to fit a regression model on the data.\nTo illustrate how this works, we’ll refer back to the tips dataset from previous lectures. Consider the \"day\" column of the dataset:\n\n\nCode\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nimport sklearn.linear_model as lm\ntips = sns.load_dataset(\"tips\")\ntips.head()\n\n\n\n\n\n\n\n\n\ntotal_bill\ntip\nsex\nsmoker\nday\ntime\nsize\n\n\n\n\n0\n16.99\n1.01\nFemale\nNo\nSun\nDinner\n2\n\n\n1\n10.34\n1.66\nMale\nNo\nSun\nDinner\n3\n\n\n2\n21.01\n3.50\nMale\nNo\nSun\nDinner\n3\n\n\n3\n23.68\n3.31\nMale\nNo\nSun\nDinner\n2\n\n\n4\n24.59\n3.61\nFemale\nNo\nSun\nDinner\n4\n\n\n\n\n\n\n\nAt first glance, it doesn’t seem possible to fit a regression model to this data – we can’t directly perform any mathematical operations on the entry “Sun”.\nTo resolve this, we instead create a new table with a feature for each unique value in the original \"day\" column. We then iterate through the \"day\" column. For each entry in \"day\" we fill the corresponding feature in the new table with 1. All other features are set to 0.\n\n\n\n\nIn short, each category of a categorical variable gets its own feature\n\n\nValue = 1 if a row belongs to the category\n\n\nValue = 0 otherwise\n\n\nThe OneHotEncoder class of sklearn (documentation) offers a quick way to perform this one-hot encoding. You will explore its use in detail in the lab. For now, recognize that we follow a very similar workflow to when we were working with the LinearRegression class: we initialize a OneHotEncoder object, fit it to our data, and finally use .transform to apply the fitted encoder.\n\nfrom sklearn.preprocessing import OneHotEncoder\n\n# Initialize a OneHotEncoder object\nohe = OneHotEncoder()\n\n# Fit the encoder\nohe.fit(tips[[\"day\"]])\n\n# Use the encoder to transform the raw \"day\" feature\nencoded_day = ohe.transform(tips[[\"day\"]]).toarray()\nencoded_day_df = pd.DataFrame(encoded_day, columns=ohe.get_feature_names_out())\n\nencoded_day_df.head()\n\n\n\n\n\n\n\n\nday_Fri\nday_Sat\nday_Sun\nday_Thur\n\n\n\n\n0\n0.0\n0.0\n1.0\n0.0\n\n\n1\n0.0\n0.0\n1.0\n0.0\n\n\n2\n0.0\n0.0\n1.0\n0.0\n\n\n3\n0.0\n0.0\n1.0\n0.0\n\n\n4\n0.0\n0.0\n1.0\n0.0\n\n\n\n\n\n\n\nThe one-hot encoded features can then be used in the design matrix to train a model:\n\n\n\n\\[\\hat{y} = \\theta_1 (\\text{total}\\_\\text{bill}) + \\theta_2 (\\text{size}) + \\theta_3 (\\text{day}\\_\\text{Fri}) + \\theta_4 (\\text{day}\\_\\text{Sat}) + \\theta_5 (\\text{day}\\_\\text{Sun}) + \\theta_6 (\\text{day}\\_\\text{Thur})\\]\nOr in shorthand:\n\\[\\hat{y} = \\theta_{1}\\phi_{1} + \\theta_{2}\\phi_{2} + \\theta_{3}\\phi_{3} + \\theta_{4}\\phi_{4} + \\theta_{5}\\phi_{5} + \\theta_{6}\\phi_{6}\\]\nNow, the day feature (or rather, the four new boolean features that represent day) can be used to fit a model.\nUsing sklearn to fit the new model, we can determine the model coefficients, allowing us to understand how each feature impacts the predicted tip.\n\nfrom sklearn.linear_model import LinearRegression\ndata_w_ohe = tips[[\"total_bill\", \"size\", \"day\"]].join(encoded_day_df).drop(columns = \"day\")\nohe_model = lm.LinearRegression(fit_intercept=False) #Tell sklearn to not add an additional bias column. Why?\nohe_model.fit(data_w_ohe, tips[\"tip\"])\n\npd.DataFrame({\"Feature\":data_w_ohe.columns, \"Model Coefficient\":ohe_model.coef_})\n\n\n\n\n\n\n\n\nFeature\nModel Coefficient\n\n\n\n\n0\ntotal_bill\n0.092994\n\n\n1\nsize\n0.187132\n\n\n2\nday_Fri\n0.745787\n\n\n3\nday_Sat\n0.621129\n\n\n4\nday_Sun\n0.732289\n\n\n5\nday_Thur\n0.668294\n\n\n\n\n\n\n\nFor example, when looking at the coefficient for day_Fri, we can now understand the impact of it being Friday on the predicted tip — if it is a Friday, the predicted tip increases by approximately $0.75.\nWhen one-hot encoding, keep in mind that any set of one-hot encoded columns will always sum to a column of all ones, representing the bias column. More formally, the bias column is a linear combination of the OHE columns.\n\n\n\nWe must be careful not to include this bias column in our design matrix. Otherwise, there will be linear dependence in the model, meaning \\(\\mathbb{X}^{\\top}\\mathbb{X}\\) would no longer be invertible, and our OLS estimate \\(\\hat{\\theta} = (\\mathbb{X}^{\\top}\\mathbb{X})^{-1}\\mathbb{X}^{\\top}\\mathbb{Y}\\) fails.\nTo resolve this issue, we simply omit one of the one-hot encoded columns or do not include an intercept term. The adjusted design matrices are shown below.\n\n\n\nEither approach works — we still retain the same information as the omitted column being a linear combination of the remaining columns.", - "crumbs": [ - "14  Feature Engineering" - ] - }, - { - "objectID": "feature_engineering/feature_engineering.html#polynomial-features", - "href": "feature_engineering/feature_engineering.html#polynomial-features", - "title": "14  Feature Engineering", - "section": "14.5 Polynomial Features", - "text": "14.5 Polynomial Features\nWe have encountered a few cases now where models with linear features have performed poorly on datasets that show clear non-linear curvature.\nAs an example, consider the vehicles dataset, which contains information about cars. Suppose we want to use the hp (horsepower) of a car to predict its \"mpg\" (gas mileage in miles per gallon). If we visualize the relationship between these two variables, we see a non-linear curvature. Fitting a linear model to these variables results in a high (poor) value of RMSE.\n\\[\\hat{y} = \\theta_0 + \\theta_1 (\\text{hp})\\]\n\n\nCode\npd.options.mode.chained_assignment = None \nvehicles = sns.load_dataset(\"mpg\").dropna().rename(columns = {\"horsepower\": \"hp\"}).sort_values(\"hp\")\n\nX = vehicles[[\"hp\"]]\nY = vehicles[\"mpg\"]\n\nhp_model = lm.LinearRegression()\nhp_model.fit(X, Y)\nhp_model_predictions = hp_model.predict(X)\n\nimport matplotlib.pyplot as plt\n\nsns.scatterplot(data=vehicles, x=\"hp\", y=\"mpg\")\nplt.plot(vehicles[\"hp\"], hp_model_predictions, c=\"tab:red\");\n\nprint(f\"MSE of model with (hp) feature: {np.mean((Y-hp_model_predictions)**2)}\")\n\n\nMSE of model with (hp) feature: 23.943662938603104\n\n\n\n\n\n\n\n\n\nAs we can see from the plot, the data follows a curved line rather than a straight one. To capture this non-linearity, we can incorporate non-linear features. Let’s introduce a polynomial term, \\(\\text{hp}^2\\), into our regression model. The model now takes the form:\n\\[\\hat{y} = \\theta_0 + \\theta_1 (\\text{hp}) + \\theta_2 (\\text{hp}^2)\\] \\[\\hat{y} = \\theta_0 + \\theta_1 \\phi_1 + \\theta_2 \\phi_2\\]\nHow can we fit a model with non-linear features? We can use the exact same techniques as before: ordinary least squares, gradient descent, or sklearn. This is because our new model is still a linear model. Although it contains non-linear features, it is linear with respect to the model parameters. All of our previous work on fitting models was done under the assumption that we were working with linear models. Because our new model is still linear, we can apply our existing methods to determine the optimal parameters.\n\n# Add a hp^2 feature to the design matrix\nX = vehicles[[\"hp\"]]\nX[\"hp^2\"] = vehicles[\"hp\"]**2\n\n# Use sklearn to fit the model\nhp2_model = lm.LinearRegression()\nhp2_model.fit(X, Y)\nhp2_model_predictions = hp2_model.predict(X)\n\nsns.scatterplot(data=vehicles, x=\"hp\", y=\"mpg\")\nplt.plot(vehicles[\"hp\"], hp2_model_predictions, c=\"tab:red\");\n\nprint(f\"MSE of model with (hp^2) feature: {np.mean((Y-hp2_model_predictions)**2)}\")\n\nMSE of model with (hp^2) feature: 18.98476890761722\n\n\n\n\n\n\n\n\n\nLooking a lot better! By incorporating a squared feature, we are able to capture the curvature of the dataset. Our model is now a parabola centered on our data. Notice that our new model’s error has decreased relative to the original model with linear features.", - "crumbs": [ - "14  Feature Engineering" - ] - }, - { - "objectID": "feature_engineering/feature_engineering.html#complexity-and-overfitting", - "href": "feature_engineering/feature_engineering.html#complexity-and-overfitting", - "title": "14  Feature Engineering", - "section": "14.6 Complexity and Overfitting", - "text": "14.6 Complexity and Overfitting\nWe’ve seen now that feature engineering allows us to build all sorts of features to improve the performance of the model. In particular, we saw that designing a more complex feature (squaring hp in the vehicles data previously) substantially improved the model’s ability to capture non-linear relationships. To take full advantage of this, we might be inclined to design increasingly complex features. Consider the following three models, each of different order (the maximum exponent power of each model):\n\nModel with order 2: \\(\\hat{y} = \\theta_0 + \\theta_1 (\\text{hp}) + \\theta_2 (\\text{hp}^2)\\)\nModel with order 3: \\(\\hat{y} = \\theta_0 + \\theta_1 (\\text{hp}) + \\theta_2 (\\text{hp}^2) + \\theta_3 (\\text{hp}^3)\\)\nModel with order 4: \\(\\hat{y} = \\theta_0 + \\theta_1 (\\text{hp}) + \\theta_2 (\\text{hp}^2) + \\theta_3 (\\text{hp}^3) + \\theta_4 (\\text{hp}^4)\\)\n\n\n\n\nAs we can see in the plots above, MSE continues to decrease with each additional polynomial term. To visualize it further, let’s plot models as the complexity increases from 0 to 7:\n\n\n\nWhen we use our model to make predictions on the same data that was used to fit the model, we find that the MSE decreases with each additional polynomial term (as our model gets more complex). The training error is the model’s error when generating predictions from the same data that was used for training purposes. We can conclude that the training error goes down as the complexity of the model increases.\n\n\n\nThis seems like good news – when working on the training data, we can improve model performance by designing increasingly complex models.\n\n\n\n\n\n\nMath Fact: Polynomial Degrees\n\n\n\nGiven \\(N\\) overlapping data points, we can always find a polynomial of degree \\(N-1\\) that goes through all those points.\nFor example, there always exists a degree-4 polynomial curve that can perfectly model a dataset of 5 datapoints:\n\n\n\n\n\nHowever, high model complexity comes with its own set of issues. When building the vehicles models above, we trained the models on the entire dataset and then evaluated their performance on this same dataset. In reality, we are likely to instead train the model on a sample from the population, then use it to make predictions on data it didn’t encounter during training.\nLet’s walk through a more realistic example. Say we are given a training dataset of just 6 datapoints and want to train a model to then make predictions on a different set of points. We may be tempted to make a highly complex model (e.g., degree 5), especially given it makes perfect predictions on the training data as clear on the left. However, as shown in the graph on the right, this model would perform horribly on the rest of the population!\n\n\n\nThis phenomenon called overfitting. The model effectively just memorized the training data it encountered when it was fitted, leaving it unable to generalize well to data it didn’t encounter during training. This is a problem: we want models that are generalizable to “unseen” data.\nAdditionally, since complex models are sensitive to the specific dataset used to train them, they have high variance. A model with high variance tends to vary more dramatically when trained on different datasets. Going back to our example above, we can see our degree-5 model varies erratically when we fit it to different samples of 6 points from vehicles.\n\n\n\nWe now face a dilemma: we know that we can decrease training error by increasing model complexity, but models that are too complex start to overfit and can’t be reapplied to new datasets due to high variance.\n\n\n\nWe can see that there is a clear trade-off that comes from the complexity of our model. As model complexity increases, the model’s error on the training data decreases. At the same time, the model’s variance tends to increase.\nThe takeaway here: we need to strike a balance in the complexity of our models; we want models that are generalizable to “unseen” data. A model that is too simple won’t be able to capture the key relationships between our variables of interest; a model that is too complex runs the risk of overfitting.\nThis begs the question: how do we control the complexity of a model? Stay tuned for Lecture 17 on Cross-Validation and Regularization!", - "crumbs": [ - "14  Feature Engineering" - ] - }, - { - "objectID": "feature_engineering/feature_engineering.html#bonus-stochastic-gradient-descent-in-pytorch", - "href": "feature_engineering/feature_engineering.html#bonus-stochastic-gradient-descent-in-pytorch", - "title": "14  Feature Engineering", - "section": "14.7 [Bonus] Stochastic Gradient Descent in PyTorch", - "text": "14.7 [Bonus] Stochastic Gradient Descent in PyTorch\nWhile this material is out of scope for Data 100, it is useful if you plan to enter a career in data science!\nIn practice, you will use software packages such as PyTorch when computing gradients and implementing gradient descent. You’ll often follow three main steps:\n\nSample a batch of the data.\nCompute the loss and the gradient.\nUpdate your gradient until you reach an appropriate estimate of the true gradient.\n\n\n\n\nIf you want to learn more, this Intro to PyTorch tutorial is a great resource to get started!", - "crumbs": [ - "14  Feature Engineering" - ] - }, - { - "objectID": "case_study_HCE/case_study_HCE.html", - "href": "case_study_HCE/case_study_HCE.html", - "title": "15  Case Study in Human Contexts and Ethics", - "section": "", - "text": "15.1 The Problem\nWhat prompted the formation of the CCAO and led to the development of this model? In 2017, an investigative report by the Chicago Tribune uncovered a major scandal in the property assessment system managed by the CCAO under the watch of former County Assessor Joseph Berrios. Working with experts from the University of Chicago, the Chicago Tribune journalists found that the CCAO’s model for estimating house value perpetuated a highly regressive tax system that disproportionately burdened African-American and Latinx homeowners in Cook County. How did the journalists demonstrate this disparity?\nThe image above shows two standard metrics to estimate the fairness of assessments: the coefficient of dispersion and price-related differential. How they’re calculated is out of scope for this class, but you can assume that these metrics have been rigorously tested by experts in the field and are a good indication of fairness. As we see above, calculating these metrics for the Cook County prices revealed that the pricing created by the CCAO did not fall in acceptable ranges. While this on its own is not the entire story, it was a good indicator that something fishy was going on.\nThis prompted journalists to investigate if the CCAO’s model itself was producing fair tax rates. When accounting for the homeowner’s income, they found that the model actually produced a regressive tax rate (see figure above). A tax rate is regressive if the percentage tax rate is higher for individuals with lower net income; it is progressive if the percentage tax rate is higher for individuals with higher net income.\nDigging further, journalists found that the model was not only regressive and unfair to lower-income individuals, but it was also unfair to non-white homeowners (see figure above). The likelihood of a property being under- or over-assessed was highly dependent on the owner’s race, and that did not sit well with many homeowners.", - "crumbs": [ - "15  Case Study in Human Contexts and Ethics" - ] - }, - { - "objectID": "case_study_HCE/case_study_HCE.html#the-problem", - "href": "case_study_HCE/case_study_HCE.html#the-problem", - "title": "15  Case Study in Human Contexts and Ethics", - "section": "", - "text": "15.1.1 Spotlight: Appeals\nWhat was the cause of such a major issue? It might be easy to simply blame “biased” algorithms, but the main issue was not a faulty model. Instead, it was largely due to the appeals system which enabled the wealthy and privileged to more easily and successfully challenge their assessments. Once given the CCAO model’s initial assessment of their home’s value, homeowners could choose to appeal to a board of elected officials to try and change the listed value of their home and, consequently, how much they are taxed. In theory, this sounds like a very fair system: a human being oversees the final pricing of houses rather than a computer algorithm. In reality, this ended up exacerbating the problem.\n\n“Appeals are a good thing,” Thomas Jaconetty, deputy assessor for valuation and appeals, said in an interview. “The goal here is fairness. We made the numbers. We can change them.”\n\n\n \n\n\nWe can borrow lessons from Critical Race Theory —— on the surface, everyone has the legal right to try and appeal the value of their home. However, not everyone has an equal ability to do so. Those who have the money to hire tax lawyers to appeal for them have a drastically higher chance of trying and succeeding in their appeal (see above figure). Many homeowners who appealed were generally under-assessed compared to homeowners who did not (see figure below). Clearly, the model is part of a deeper institutional pattern rife with potential corruption.\n\n \n\n\nIn fact, Chicago boasts a large and thriving tax attorney industry dedicated precisely to appealing property assessments, reflected in the growing number of appeals in Cook County in the 21st century. Given wealthier, whiter neighborhoods typically have greater access to lawyers, they often appealed more and won reductions far more often than their less wealthy neighbors. In other words, those with higher incomes pay less in property tax, tax lawyers can grow their business due to their role in appeals, and politicians are socially connected to the aforementioned tax lawyers and wealthy homeowners. All these stakeholders have reasons to advertise the appeals system as an integral part of a fair system; after all, it serves to benefit them. Here lies the value in asking questions: a system that seems fair on the surface may, in reality, be unfair upon taking a closer look.\n\n\n15.1.2 Human Impacts\n\n \n\n\nWhat happened as a result of this corrupt system? As the Chicago Tribune reported, many African American and Latino homeowners purchased homes only to find their houses were later appraised at levels far higher than what they paid. As a result, homeowners were now responsible for paying significantly more in taxes every year than initially budgeted, putting them at risk of not being able to afford their homes and losing them.\nThe impact of the housing model extends beyond the realm of home ownership and taxation —— the issues of justice go much deeper. This model perpetrated much older patterns of racially discriminatory practices in Chicago and across the United States. Unfortunately, it is no accident that this happened in Chicago, one of the most segregated cities in the United States (source). These factors are central to informing us, as data scientists, about what is at stake.\n\n\n15.1.3 Spotlight: Intersection of Real Estate and Race\nBefore we dive into how the CCAO used data science to “solve” this problem, let’s briefly go through the history of discriminatory housing practices in the United States to give more context on the gravity and urgency of this situation.\nHousing and real estate, among other factors, have been one of the most significant and enduring drivers of structural racism and racial inequality in the United States since the Civil War. It is one of the main areas where inequalities are created and reproduced. In the early 20th century, Jim Crow laws were explicit in forbidding people of color from utilizing the same facilities —— such as buses, bathrooms, and pools —— as white individuals. This set of practices by government actors in combination with overlapping practices driven by the private real estate industry further served to make neighborhoods increasingly segregated.\n\n\n\n\nAlthough advancements in civil rights have been made, the spirit of the laws is alive in many parts of the US. In the 1920s and 1930s, it was illegal for governments to actively segregate neighborhoods according to race, but other methods were available for achieving the same ends. One of the most notorious practices was redlining: the federal housing agencies’ process of distinguishing neighborhoods in a city in terms of relative risk. The goal was to increase access to homeownership for low-income Americans. In practice, however, it allowed real estate professionals to legally perpetuate segregation. The federal housing agencies deemed predominantly African American neighborhoods as high risk and colored them in red —— hence the name redlining —— making it nearly impossible for African Americans to own a home.\nThe origins of the data that made these maps possible lay in a kind of “racial data revolution” in the private real estate industry beginning in the 1920s. Segregation was established and reinforced in part through the work of real estate agents who were also very concerned with establishing reliable methods for predicting the value of a home. The effects of these practices continue to resonate today.\n\n\n\nSource: Colin Koopman, How We Became Our Data (2019) p. 137", - "crumbs": [ - "15  Case Study in Human Contexts and Ethics" - ] - }, - { - "objectID": "case_study_HCE/case_study_HCE.html#the-response-cook-county-open-data-initiative", - "href": "case_study_HCE/case_study_HCE.html#the-response-cook-county-open-data-initiative", - "title": "15  Case Study in Human Contexts and Ethics", - "section": "15.2 The Response: Cook County Open Data Initiative", - "text": "15.2 The Response: Cook County Open Data Initiative\nThe response to this problem started in politics. A new assessor, Fritz Kaegi, was elected and created a new mandate with two goals:\n\nDistributional equity in property taxation, meaning that properties of the same value are treated alike during assessments.\nCreating a new Office of Data Science.\n\nHe wanted to not only create a more accurate algorithmic model but also to design a new system to address the problems with the CCAO.\n\n\n\n\nLet’s frame this problem through the lens of the data science lifecycle.\n\n\n\n\n15.2.1 1. Question/Problem Formulation\n\n\n\n\n\n\nDriving Questions\n\n\n\n\nWhat do we want to know?\nWhat problems are we trying to solve?\nWhat are the hypotheses we want to test?\nWhat are our metrics for success?\n\n\n\nThe old system was unfair because it was systemically inaccurate; it made one kind of error for one group, and another kind of error for another. Its goal was to “create a robust pipeline that accurately assesses property values at scale and is fair”, and in turn, they defined fairness as accuracy: “the ability of our pipeline to accurately assess all residential property values, accounting for disparities in geography, information, etc.” Thus, the plan —— make the system more fair —— was already framed in terms of a task appropriate to a data scientist: make the assessments more accurate (or more precisely, minimize errors in a particular way).\nThe idea here is that if the model is more accurate it will also (perhaps necessarily) become more fair, which is a big assumption. There are, in a sense, two different problems —— make accurate assessments, and make a fair system. Treating these two problems as one makes it a more straightforward issue that can be solved technically (with a good model) but does raise the question of if fairness and accuracy are one and the same.\nFor now, let’s just talk about the technical part of this —— accuracy. For you, the data scientist, this part might feel more comfortable. We can determine some metrics of success and frame a social problem as a data science problem.\n\n\n\n\n\n\nDefinitions: Fairness and Transparency\n\n\n\nThe definitions, as given by the Cook County Assessor’s Office, are given below: \n\nFairness: The ability of our pipeline to accurately assess property values, accounting for disparities in geography, information, etc. \nTransparency: The ability of the data science department to share and explain pipeline results and decisions to both internal and external stakeholders \n\n\n\nThe new Office of Data Science started by framing the problem and redefining their goals. They determined that they needed to:\n\nAccurately, uniformly, and impartially assess the value of a home and accurately predict the sale price of a home within the next year by:\n\nFollowing international standards (e.g., coefficient of dispersion)\nPredicting the value of all homes with as little total error as possible\n\nCreate a robust pipeline that accurately assesses property values at scale and is fair to all people by:\n\nDisrupting the circuit of corruption (Board of Review appeals process)\nEliminating regressivity\nEngendering trust in the system among all stakeholders\n\n\nThe goals defined above lead us to ask the question: what does it actually mean to accurately assess property values, and what role does “scale” play?\n\nWhat is an assessment of a home’s value?\nWhat makes one assessment more accurate than another?\nWhat makes one batch of assessments more accurate than another batch?\n\nEach of the above questions leads to a slew of more questions. Considering just the first question, one answer could be that an assessment is an estimate of the value of a home. This leads to more inquiries: what is the value of a home? What determines it? How do we know? For this class, we take it to be the house’s market value, or how much it would sell for.\nUnfortunately, if you are the county assessor, it becomes hard to determine property values with this definition. After all, you can’t make everyone sell their house every year. And as many properties haven’t been sold in decades, every year that passes makes that previous sale less reliable as an indicator.\nSo how would one generate reliable estimates? You’re probably thinking, well, with data about homes and their sale prices you can probably predict the value of a property reliably. Even if you’re not a data scientist, you might know there are websites like Zillow and RedFin that estimate what properties would sell for and constantly update them. They don’t know the value, but they estimate them. How do you think they do this? Let’s start with the data —— which is the next step in the lifecycle.\n\n\n15.2.2 2. Data Acquisition and Cleaning\n\n\n\n\n\n\nDriving Questions\n\n\n\n\nWhat data do we have, and what data do we need?\nHow will we sample more data?\nIs our data representative of the population we want to study?\n\n\n\nTo generate estimates, the data scientists used two datasets. The first contained all recorded sales data from 2013 to 2019. The second contained property characteristics, including a property identification number and physical characteristics (e.g., age, bedroom, baths, square feet, neighborhood, site desirability, etc.).\n\n\n\n\nAs they examined the datasets, they asked the questions:\n\nHow was this data collected?\nWhen was this data collected?\nWho collected this data?\nFor what purposes was the data collected?\nHow and why were particular categories created?\n\nWith so much data available, data scientists worked to see how all the different data points correlated with each other and with the sales prices. By discovering patterns in datasets containing known sale prices and characteristics of similar and nearby properties, training a model on this data, and applying it to all the properties without sales data, it was now possible to create a linear model that could predict the sale price (“fair market value”) of unsold properties.\nSome other key questions data scientists asked about the data were:\n\nAre any attributes of a house differentially reported? How might these attributes be differentially reported?\nHow are “improvements” like renovations tracked and updated?\nWhich data is missing, and for which neighborhoods or populations is it missing?\nWhat other data sources or attributes might be valuable?\n\nAttributes can have different likelihoods of appearing in the data. For example, housing data in the floodplain geographic region of Chicago were less represented than other regions.\nFeatures can also be reported at different rates. Improvements in homes, which tend to increase property value, were unlikely to be reported by the homeowners.\nAdditionally, they found that there was simply more missing data in lower-income neighborhoods.\n\n\n15.2.3 3. Exploratory Data Analysis\n\n\n\n\n\n\nDriving Questions\n\n\n\n\nHow is our data organized, and what does it contain?\nDo we already have relevant data?\nWhat are the biases, anomalies, or other issues with the data?\nHow do we transform the data to enable effective analysis?\n\n\n\nBefore the modeling step, they investigated a multitude of crucial questions:\n\nWhich attributes are most predictive of sales price?\nIs the data uniformly distributed?\nDo all neighborhoods have recent data? Do all neighborhoods have the same granularity?\n\nDo some neighborhoods have missing or outdated data?\n\nThey found that certain features, such as bedroom number, were much more useful in determining house value for certain neighborhoods than for others. This informed them that different models should be used depending on the neighborhood.\nThey also noticed that low-income neighborhoods had disproportionately spottier data. This informed them that they needed to develop new data collection practices - including finding new sources of data.\n\n\n15.2.4 4. Prediction and Inference\n\n\n\n\n\n\nDriving Questions\n\n\n\n\nWhat does the data say about the world?\nDoes it answer our questions or accurately solve the problem?\nHow robust are our conclusions, and can we trust the predictions?\n\n\n\nRather than using a singular model to predict sale prices (“fair market value”) of unsold properties, the CCAO predicts sale prices using machine learning models that discover patterns in data sets containing known sale prices and characteristics of similar and nearby properties. It uses different model weights for each neighborhood.\nCompared to traditional mass appraisal, the CCAO’s new approach is more granular and more sensitive to neighborhood variations.\nBut how do we know if an assessment is accurate? We can see how our model performs when predicting the sales prices of properties it wasn’t trained on! We can then evaluate how “close” our estimate was to the actual sales price, using Root Mean Square Error (RMSE). However, is RMSE a good proxy for fairness in this context?\nBroad metrics of error like RMSE can be limiting when evaluating the “fairness” of a property appraisal system. RMSE does not tell us anything about the distribution of errors, whether the errors are positive or negative, and the relative size of the errors. It does not tell us anything about the regressivity of the model, instead just giving a rough measure of our model’s overall error.\nEven with a low RMSE, we can’t guarantee a fair model. The error we see (no matter how small) may be a result of our model overvaluing less expensive homes and undervaluing more expensive homes.\nRegarding accuracy, it’s important to ask what makes a batch of assessments better or more accurate than another batch of assessments. The value of a home that a model predicts is relational. It’s a product of the interaction of social and technical elements so property assessment involves social trust.\nWhy should any particular individual believe that the model is accurate for their property? Why should any individual trust the model?\nTo foster public trust, the CCAO focuses on “transparency”, putting data, models, and the pipeline onto GitLab. By doing so, they can better equate the production of “accurate assessments” with “fairness”.\nThere’s a lot more to be said here on the relationship between accuracy, fairness, and metrics we tend to use when evaluating our models. Given the nuanced nature of the argument, it is recommended you view the corresponding lecture as the course notes are not as comprehensive for this portion of the lecture.\n\n\n15.2.5 5. Results and Conclusions\n\n\n\n\n\n\nDriving Questions\n\n\n\n\nHow successful is the system for each goal?\n\nAccuracy/uniformity of the model\nFairness and transparency that eliminates regressivity and engenders trust\n\nHow do you know?\n\n\n\nUnfortunately, it may be naive to hope that a more accurate and transparent algorithm will translate into more fair outcomes in practice. Even if our model is perfectly optimized according to the standards of fairness we’ve set, there is no guarantee that people will actually pay their expected share of taxes as determined by the model. While it is a good step in the right direction, maintaining a level of social trust is key to ensuring people pay their fair share.\nDespite all their best efforts, the CCAO is still struggling to create fair assessments and engender trust.\nStories like the one show that total taxes for residential properties went up overall (because commercial taxes went down). But looking at the distribution, we can see that the biggest increases occurred in wealthy neighborhoods, and the biggest decreases occurred in poorer, predominantly Black neighborhoods. So maybe there was some success after all?\nHowever, it’ll ultimately be hard to overcome the propensity of the board of review to reduce the tax burden of the rich, preventing the CCAO from creating a truly fair system. This is in part because there are many cases where the model makes big, frustrating mistakes. In some cases like this one, it is due to spotty data.", - "crumbs": [ - "15  Case Study in Human Contexts and Ethics" - ] - }, - { - "objectID": "case_study_HCE/case_study_HCE.html#summary-questions-to-consider", - "href": "case_study_HCE/case_study_HCE.html#summary-questions-to-consider", - "title": "15  Case Study in Human Contexts and Ethics", - "section": "15.3 Summary: Questions to Consider", - "text": "15.3 Summary: Questions to Consider\n\nQuestion/Problem Formulation\n\nWho is responsible for framing the problem?\nWho are the stakeholders? How are they involved in the problem framing?\nWhat do you bring to the table? How does your positionality affect your understanding of the problem?\nWhat are the narratives that you’re tapping into?\n\nData Acquisition and Cleaning\n\nWhere does the data come from?\nWho collected it? For what purpose?\nWhat kinds of collecting and recording systems and techniques were used?\nHow has this data been used in the past?\nWhat restrictions are there on access to the data, and what enables you to have access?\n\nExploratory Data Analysis & Visualization\n\nWhat kind of personal or group identities have become salient in this data?\nWhich variables became salient, and what kinds of relationships do we see between them?\nDo any of the relationships made visible lend themselves to arguments that might be potentially harmful to a particular community?\n\nPrediction and Inference\n\nWhat does the prediction or inference do in the world?\nAre the results useful for the intended purposes?\nAre there benchmarks to compare the results?\nHow are your predictions and inferences dependent upon the larger system in which your model works?", - "crumbs": [ - "15  Case Study in Human Contexts and Ethics" - ] - }, - { - "objectID": "case_study_HCE/case_study_HCE.html#key-takeaways", - "href": "case_study_HCE/case_study_HCE.html#key-takeaways", - "title": "15  Case Study in Human Contexts and Ethics", - "section": "15.4 Key Takeaways", - "text": "15.4 Key Takeaways\n\nAccuracy is a necessary, but not sufficient, condition of a fair system.\nFairness and transparency are context-dependent and sociotechnical concepts.\nLearn to work with contexts, and consider how your data analysis will reshape them.\nKeep in mind the power, and limits, of data analysis.", - "crumbs": [ - "15  Case Study in Human Contexts and Ethics" - ] - }, - { - "objectID": "cv_regularization/cv_reg.html", - "href": "cv_regularization/cv_reg.html", - "title": "16  Cross Validation and Regularization", - "section": "", - "text": "16.1 Cross-validation", - "crumbs": [ - "16  Cross Validation and Regularization" - ] - }, - { - "objectID": "cv_regularization/cv_reg.html#cross-validation", - "href": "cv_regularization/cv_reg.html#cross-validation", - "title": "16  Cross Validation and Regularization", - "section": "", - "text": "16.1.1 Training, Test, and Validation Sets\n\n\n\n\nFrom the last lecture, we learned that increasing model complexity decreased our model’s training error but increased its variance. This makes intuitive sense: adding more features causes our model to fit more closely to data it encountered during training, but it generalizes worse to new data that hasn’t been seen before. For this reason, a low training error is not always representative of our model’s underlying performance – we need to also assess how well it performs on unseen data to ensure that it is not overfitting.\nTruly, the only way to know when our model overfits is by evaluating it on unseen data. Unfortunately, that means we need to wait for more data. This may be very expensive and time-consuming.\nHow should we proceed? In this section, we will build up a viable solution to this problem.\n\n16.1.1.1 Test Sets\nThe simplest approach to avoid overfitting is to keep some of our data “secret” from ourselves. We can set aside a random portion of our full dataset to use only for testing purposes. The datapoints in this test set will not be used to fit the model. Instead, we will:\n\nUse the remaining portion of our dataset – now called the training set – to run ordinary least squares, gradient descent, or some other technique to train our model,\nTake the fitted model and use it to make predictions on datapoints in the test set. The model’s performance on the test set (expressed as the MSE, RMSE, etc.) is now indicative of how well it can make predictions on unseen data\n\nImportantly, the optimal model parameters were found by only considering the data in the training set. After the model has been fitted to the training data, we do not change any parameters before making predictions on the test set. Importantly, we only ever make predictions on the test set once after all model design has been completely finalized. We treat the test set performance as the final test of how well a model does. To reiterate, the test set is only ever touched once: to compute the performance of the model after all fine-tuning has been completed.\nThe process of sub-dividing our dataset into training and test sets is known as a train-test split. Typically, between 10% and 20% of the data is allocated to the test set.\n\n\n\n\nIn sklearn, the train_test_split function (documentation) of the model_selection module allows us to automatically generate train-test splits.\nWe will work with the vehicles dataset from previous lectures. As before, we will attempt to predict the mpg of a vehicle from transformations of its hp. In the cell below, we allocate 20% of the full dataset to testing, and the remaining 80% to training.\n\n\nCode\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Load the dataset and construct the design matrix\nvehicles = sns.load_dataset(\"mpg\").rename(columns={\"horsepower\":\"hp\"}).dropna()\nX = vehicles[[\"hp\"]]\nX[\"hp^2\"] = vehicles[\"hp\"]**2\nX[\"hp^3\"] = vehicles[\"hp\"]**3\nX[\"hp^4\"] = vehicles[\"hp\"]**4\n\nY = vehicles[\"mpg\"]\n\n\n\nfrom sklearn.model_selection import train_test_split\n\n# `test_size` specifies the proportion of the full dataset that should be allocated to testing\n# `random_state` makes our results reproducible for educational purposes\nX_train, X_test, Y_train, Y_test = train_test_split(\n X, \n Y, \n test_size=0.2, \n random_state=220\n )\n\nprint(f\"Size of full dataset: {X.shape[0]} points\")\nprint(f\"Size of training set: {X_train.shape[0]} points\")\nprint(f\"Size of test set: {X_test.shape[0]} points\")\n\nSize of full dataset: 392 points\nSize of training set: 313 points\nSize of test set: 79 points\n\n\nAfter performing our train-test split, we fit a model to the training set and assess its performance on the test set.\n\nimport sklearn.linear_model as lm\nfrom sklearn.metrics import mean_squared_error\n\nmodel = lm.LinearRegression()\n\n# Fit to the training set\nmodel.fit(X_train, Y_train)\n\n# Calculate errors\ntrain_error = mean_squared_error(Y_train, model.predict(X_train))\ntest_error = mean_squared_error(Y_test, model.predict(X_test))\n\nprint(f\"Training error: {train_error}\")\nprint(f\"Test error: {test_error}\")\n\nTraining error: 17.85851684101209\nTest error: 23.192405630290637\n\n\n\n\n16.1.1.2 Validation Sets\nNow, what if we were dissatisfied with our test set performance? With our current framework, we’d be stuck. As outlined previously, assessing model performance on the test set is the final stage of the model design process; we can’t go back and adjust our model based on the new discovery that it is overfitting. If we did, then we would be factoring in information from the test set to design our model. The test error would no longer be a true representation of the model’s performance on unseen data!\nOur solution is to introduce a validation set. A validation set is a random portion of the training set that is set aside for assessing model performance while the model is still being developed. The process for using a validation set is:\n\nPerform a train-test split.\nSet the test set aside; we will not touch it until the very end of the model design process.\nSet aside a portion of the training set to be used for validation.\nFit the model parameters to the datapoints contained in the remaining portion of the training set.\nAssess the model’s performance on the validation set. Adjust the model as needed, re-fit it to the remaining portion of the training set, then re-evaluate it on the validation set. Repeat as necessary until you are satisfied.\nAfter all model development is complete, assess the model’s performance on the test set. This is the final test of how well the model performs on unseen data. No further modifications should be made to the model.\n\nThe process of creating a validation set is called a validation split.\n\n\n\n\nNote that the validation error behaves quite differently from the training error explored previously. As the model becomes more complex, it makes better predictions on the training data; the variance of the model typically increases as model complexity increases. Validation error, on the other hand, decreases then increases as we increase model complexity. This reflects the transition from under- to overfitting: at low model complexity, the model underfits because it is not complex enough to capture the main trends in the data; at high model complexity, the model overfits because it “memorizes” the training data too closely.\nWe can update our understanding of the relationships between error, complexity, and model variance:\n\n\n\n\nOur goal is to train a model with complexity near the orange dotted line – this is where our model minimizes the validation error. Note that this relationship is a simplification of the real-world, but it’s a good enough approximation for the purposes of Data 100.\n\n\n\n16.1.2 K-Fold Cross-Validation\nIntroducing a validation set gave us an “extra” chance to assess model performance on another set of unseen data. We are able to finetune the model design based on its performance on this one set of validation data.\nBut what if, by random chance, our validation set just happened to contain many outliers? It is possible that the validation datapoints we set aside do not actually represent other unseen data that the model might encounter. Ideally, we would like to validate our model’s performance on several different unseen datasets. This would give us greater confidence in our understanding of how the model behaves on new data.\nLet’s think back to our validation framework. Earlier, we set aside \\(x\\)% of our training data (say, 20%) to use for validation.\n\n\n\nIn the example above, we set aside the first 20% of training datapoints for the validation set. This was an arbitrary choice. We could have set aside any 20% portion of the training data for validation. In fact, there are 5 non-overlapping “chunks” of training points that we could have designated as the validation set.\n\n\n\nThe common term for one of these chunks is a fold. In the example above, we had 5 folds, each containing 20% of the training data. This gives us a new perspective: we really have 5 validation sets “hidden” in our training set.\nIn cross-validation, we perform validation splits for each fold in the training set. For a dataset with \\(K\\) folds, we:\n\nPick one fold to be the validation fold\nFit the model to training data from every fold other than the validation fold\nCompute the model’s error on the validation fold and record it\nRepeat for all \\(K\\) folds\n\nThe cross-validation error is then the average error across all \\(K\\) validation folds. In the example below, the cross-validation error is the mean of validation errors #1 to #5.\n\n\n\n\n\n16.1.3 Model Selection Workflow\nAt this stage, we have refined our model selection workflow. We begin by performing a train-test split to set aside a test set for the final evaluation of model performance. Then, we alternate between adjusting our design matrix and computing the cross-validation error to finetune the model’s design. In the example below, we illustrate the use of 4-fold cross-validation to help inform model design.\n\n\n\n\n\n16.1.4 Hyperparameters\nAn important use of cross-validation is for hyperparameter selection. A hyperparameter is some value in a model that is chosen before the model is fit to any data. This means that it is distinct from the model parameters, \\(\\theta_i\\), because its value is selected before the training process begins. We cannot use our usual techniques – calculus, ordinary least squares, or gradient descent – to choose its value. Instead, we must decide it ourselves.\nSome examples of hyperparameters in Data 100 are:\n\nThe degree of our polynomial model (recall that we selected the degree before creating our design matrix and calling .fit)\nThe learning rate, \\(\\alpha\\), in gradient descent\nThe regularization penalty, \\(\\lambda\\) (to be introduced later this lecture)\n\nTo select a hyperparameter value via cross-validation, we first list out several “guesses” for what the best hyperparameter may be. For each guess, we then run cross-validation to compute the cross-validation error incurred by the model when using that choice of hyperparameter value. We then select the value of the hyperparameter that resulted in the lowest cross-validation error.\nFor example, we may wish to use cross-validation to decide what value we should use for \\(\\alpha\\), which controls the step size of each gradient descent update. To do so, we list out some possible guesses for the best \\(\\alpha\\), like 0.1, 1, and 10. For each possible value, we perform cross-validation to see what error the model has when we use that value of \\(\\alpha\\) to train it.", - "crumbs": [ - "16  Cross Validation and Regularization" - ] - }, - { - "objectID": "cv_regularization/cv_reg.html#regularization", - "href": "cv_regularization/cv_reg.html#regularization", - "title": "16  Cross Validation and Regularization", - "section": "16.2 Regularization", - "text": "16.2 Regularization\nWe’ve now addressed the first of our two goals for today: creating a framework to assess model performance on unseen data. Now, we’ll discuss our second objective: developing a technique to adjust model complexity. This will allow us to directly tackle the issues of under- and overfitting.\nEarlier, we adjusted the complexity of our polynomial model by tuning a hyperparameter – the degree of the polynomial. We tested out several different polynomial degrees, computed the validation error for each, and selected the value that minimized the validation error. Tweaking the “complexity” was simple; it was only a matter of adjusting the polynomial degree.\nIn most machine learning problems, complexity is defined differently from what we have seen so far. Today, we’ll explore two different definitions of complexity: the squared and absolute magnitude of \\(\\theta_i\\) coefficients.\n\n16.2.1 Constraining Model Parameters\nThink back to our work using gradient descent to descend down a loss surface. You may find it helpful to refer back to the Gradient Descent note to refresh your memory. Our aim was to find the combination of model parameters that the smallest, minimum loss. We visualized this using a contour map by plotting possible parameter values on the horizontal and vertical axes, which allows us to take a bird’s eye view above the loss surface. Notice that the contour map has \\(p=2\\) parameters for ease of visualization. We want to find the model parameters corresponding to the lowest point on the loss surface.\n\n\n\nLet’s review our current modeling framework.\n\\[\\hat{\\mathbb{Y}} = \\theta_0 + \\theta_1 \\phi_1 + \\theta_2 \\phi_2 + \\ldots + \\theta_p \\phi_p\\]\nRecall that we represent our features with \\(\\phi_i\\) to reflect the fact that we have performed feature engineering.\nPreviously, we restricted model complexity by limiting the total number of features present in the model. We only included a limited number of polynomial features at a time; all other polynomials were excluded from the model.\nWhat if, instead of fully removing particular features, we kept all features and used each one only a “little bit”? If we put a limit on how much each feature can contribute to the predictions, we can still control the model’s complexity without the need to manually determine how many features should be removed.\nWhat do we mean by a “little bit”? Consider the case where some parameter \\(\\theta_i\\) is close to or equal to 0. Then, feature \\(\\phi_i\\) barely impacts the prediction – the feature is weighted by such a small value that its presence doesn’t significantly change the value of \\(\\hat{\\mathbb{Y}}\\). If we restrict how large each parameter \\(\\theta_i\\) can be, we restrict how much feature \\(\\phi_i\\) contributes to the model. This has the effect of reducing model complexity.\nIn regularization, we restrict model complexity by putting a limit on the magnitudes of the model parameters \\(\\theta_i\\).\nWhat do these limits look like? Suppose we specify that the sum of all absolute parameter values can be no greater than some number \\(Q\\). In other words:\n\\[\\sum_{i=1}^p |\\theta_i| \\leq Q\\]\nwhere \\(p\\) is the total number of parameters in the model. You can think of this as us giving our model a “budget” for how it distributes the magnitudes of each parameter. If the model assigns a large value to some \\(\\theta_i\\), it may have to assign a small value to some other \\(\\theta_j\\). This has the effect of increasing feature \\(\\phi_i\\)’s influence on the predictions while decreasing the influence of feature \\(\\phi_j\\). The model will need to be strategic about how the parameter weights are distributed – ideally, more “important” features will receive greater weighting.\nNotice that the intercept term, \\(\\theta_0\\), is excluded from this constraint. We typically do not regularize the intercept term.\nNow, let’s think back to gradient descent and visualize the loss surface as a contour map. As a refresher, a loss surface means that each point represents the model’s loss for a particular combination of \\(\\theta_1\\), \\(\\theta_2\\). Let’s say our goal is to find the combination of parameters that gives us the lowest loss.\n\n\n\n With no constraint, the optimal \\(\\hat{\\theta}\\) is in the center. We denote this as \\(\\hat{\\theta}_\\text{No Reg}\\).\nApplying this constraint limits what combinations of model parameters are valid. We can now only consider parameter combinations with a total absolute sum less than or equal to our number \\(Q\\). For our 2D example, the constraint \\(\\sum_{i=1}^p |\\theta_i| \\leq Q\\) can be rewritten as \\(|\\theta_0| + |\\theta_1| \\leq Q\\). This means that we can only assign our regularized parameter vector \\(\\hat{\\theta}_{\\text{Reg}}\\) to positions in the green diamond below.\n\n\n\n We can no longer select the parameter vector that truly minimizes the loss surface, \\(\\hat{\\theta}_{\\text{No Reg}}\\), because this combination of parameters does not lie within our allowed region. Instead, we select whatever allowable combination brings us closest to the true minimum loss, which is depicted by the red point below.\n\n\n\n Notice that, under regularization, our optimized \\(\\theta_1\\) and \\(\\theta_2\\) values are much smaller than they were without regularization (indeed, \\(\\theta_1\\) has decreased to 0). The model has decreased in complexity because we have limited how much our features contribute to the model. In fact, by setting its parameter to 0, we have effectively removed the influence of feature \\(\\phi_1\\) from the model altogether.\nIf we change the value of \\(Q\\), we change the region of allowed parameter combinations. The model will still choose the combination of parameters that produces the lowest loss – the closest point in the constrained region to the true minimizer, \\(\\hat{\\theta}_{\\text{No Reg}}\\).\nWhen \\(Q\\) is small, we severely restrict the size of our parameters. \\(\\theta_i\\)s are small in value, and features \\(\\phi_i\\) only contribute a little to the model. The allowed region of model parameters contracts, and the model becomes much simpler:\n\n\n\n\nWhen \\(Q\\) is large, we do not restrict our parameter sizes by much. \\(\\theta_i\\)s are large in value, and features \\(\\phi_i\\) contribute more to the model. The allowed region of model parameters expands, and the model becomes more complex:\n\n\n\n\nConsider the extreme case of when \\(Q\\) is extremely large. In this situation, our restriction has essentially no effect, and the allowed region includes the OLS solution!\n\n\n\n\nNow what if \\(Q\\) was extremely small? Most parameters are then set to (essentially) 0.\n\nIf the model has no intercept term: \\(\\hat{\\mathbb{Y}} = (0)\\phi_1 + (0)\\phi_2 + \\ldots = 0\\).\nIf the model has an intercept term: \\(\\hat{\\mathbb{Y}} = (0)\\phi_1 + (0)\\phi_2 + \\ldots = \\theta_0\\). Remember that the intercept term is excluded from the constraint - this is so we avoid the situation where we always predict 0.\n\nLet’s summarize what we have seen.\n\n\n\n\n\n16.2.2 L1 (LASSO) Regularization\nHow do we actually apply our constraint \\(\\sum_{i=1}^p |\\theta_i| \\leq Q\\)? We will do so by modifying the objective function that we seek to minimize when fitting a model.\nRecall our ordinary least squares objective function: our goal was to find parameters that minimize the model’s mean squared error:\n\\[\\frac{1}{n} \\sum_{i=1}^n (y_i - \\hat{y}_i)^2 = \\frac{1}{n} \\sum_{i=1}^n (y_i - (\\theta_0 + \\theta_1 \\phi_{i, 1} + \\theta_2 \\phi_{i, 2} + \\ldots + \\theta_p \\phi_{i, p}))^2\\]\nTo apply our constraint, we need to rephrase our minimization goal as:\n\\[\\frac{1}{n} \\sum_{i=1}^n (y_i - (\\theta_0 + \\theta_1 \\phi_{i, 1} + \\theta_2 \\phi_{i, 2} + \\ldots + \\theta_p \\phi_{i, p}))^2\\:\\text{such that} \\sum_{i=1}^p |\\theta_i| \\leq Q\\]\nUnfortunately, we can’t directly use this formulation as our objective function – it’s not easy to mathematically optimize over a constraint. Instead, we will apply the magic of the Lagrangian Duality. The details of this are out of scope (take EECS 127 if you’re interested in learning more), but the end result is very useful. It turns out that minimizing the following augmented objective function is equivalent to our minimization goal above.\n\\[\\frac{1}{n} \\sum_{i=1}^n (y_i - (\\theta_0 + \\theta_1 \\phi_{i, 1} + \\theta_2 \\phi_{i, 2} + \\ldots + \\theta_p \\phi_{i, p}))^2 + \\lambda \\sum_{i=1}^p \\vert \\theta_i \\vert\\] \\[ = \\frac{1}{n}||\\mathbb{Y} - \\mathbb{X}\\theta||_2^2 + \\lambda \\sum_{i=1}^p |\\theta_i|\\] \\[ = \\frac{1}{n}||\\mathbb{Y} - \\mathbb{X}\\theta||_2^2 + \\lambda || \\theta ||_1\\]\nThe last two expressions include the MSE expressed using vector notation, and the last expression writes \\(\\sum_{i=1}^p |\\theta_i|\\) as it’s L1 norm equivalent form, \\(|| \\theta ||_1\\).\nNotice that we’ve replaced the constraint with a second term in our objective function. We’re now minimizing a function with an additional regularization term that penalizes large coefficients. In order to minimize this new objective function, we’ll end up balancing two components:\n\nKeeping the model’s error on the training data low, represented by the term \\(\\frac{1}{n} \\sum_{i=1}^n (y_i - (\\theta_0 + \\theta_1 x_{i, 1} + \\theta_2 x_{i, 2} + \\ldots + \\theta_p x_{i, p}))^2\\)\nKeeping the magnitudes of model parameters low, represented by the term \\(\\lambda \\sum_{i=1}^p |\\theta_i|\\)\n\nThe \\(\\lambda\\) factor controls the degree of regularization. Roughly speaking, \\(\\lambda\\) is related to our \\(Q\\) constraint from before by the rule \\(\\lambda \\approx \\frac{1}{Q}\\). To understand why, let’s consider two extreme examples. Recall that our goal is to minimize the cost function: \\(\\frac{1}{n}||\\mathbb{Y} - \\mathbb{X}\\theta||_2^2 + \\lambda || \\theta ||_1\\).\n\nAssume \\(\\lambda \\rightarrow \\infty\\). Then, \\(\\lambda || \\theta ||_1\\) dominates the cost function. In order to neutralize the \\(\\infty\\) and minimize this term, we set \\(\\theta_j = 0\\) for all \\(j \\ge 1\\). This is a very constrained model that is mathematically equivalent to the constant model \nAssume \\(\\lambda \\rightarrow 0\\). Then, \\(\\lambda || \\theta ||_1=0\\). Minimizing the cost function is equivalent to minimizing \\(\\frac{1}{n} || Y - X\\theta ||_2^2\\), our usual MSE loss function. The act of minimizing MSE loss is just our familiar OLS, and the optimal solution is the global minimum \\(\\hat{\\theta} = \\hat\\theta_{No Reg.}\\). \n\nWe call \\(\\lambda\\) the regularization penalty hyperparameter; it needs to be determined prior to training the model, so we must find the best value via cross-validation.\nThe process of finding the optimal \\(\\hat{\\theta}\\) to minimize our new objective function is called L1 regularization. It is also sometimes known by the acronym “LASSO”, which stands for “Least Absolute Shrinkage and Selection Operator.”\nUnlike ordinary least squares, which can be solved via the closed-form solution \\(\\hat{\\theta}_{OLS} = (\\mathbb{X}^{\\top}\\mathbb{X})^{-1}\\mathbb{X}^{\\top}\\mathbb{Y}\\), there is no closed-form solution for the optimal parameter vector under L1 regularization. Instead, we use the Lasso model class of sklearn.\n\nimport sklearn.linear_model as lm\n\n# The alpha parameter represents our lambda term\nlasso_model = lm.Lasso(alpha=2)\nlasso_model.fit(X_train, Y_train)\n\nlasso_model.coef_\n\narray([-2.54932056e-01, -9.48597165e-04, 8.91976284e-06, -1.22872290e-08])\n\n\nNotice that all model coefficients are very small in magnitude. In fact, some of them are so small that they are essentially 0. An important characteristic of L1 regularization is that many model parameters are set to 0. In other words, LASSO effectively selects only a subset of the features. The reason for this comes back to our loss surface and allowed “diamond” regions from earlier – we can often get closer to the lowest loss contour at a corner of the diamond than along an edge.\nWhen a model parameter is set to 0 or close to 0, its corresponding feature is essentially removed from the model. We say that L1 regularization performs feature selection because, by setting the parameters of unimportant features to 0, LASSO “selects” which features are more useful for modeling. L1 regularization indicates that the features with non-zero parameters are more informative for modeling than those with parameters set to zero.\n\n\n16.2.3 Scaling Features for Regularization\nThe regularization procedure we just performed had one subtle issue. To see what it is, let’s take a look at the design matrix for our lasso_model.\n\n\nCode\nX_train.head()\n\n\n\n\n\n\n\n\n\nhp\nhp^2\nhp^3\nhp^4\n\n\n\n\n259\n85.0\n7225.0\n614125.0\n52200625.0\n\n\n129\n67.0\n4489.0\n300763.0\n20151121.0\n\n\n207\n102.0\n10404.0\n1061208.0\n108243216.0\n\n\n302\n70.0\n4900.0\n343000.0\n24010000.0\n\n\n71\n97.0\n9409.0\n912673.0\n88529281.0\n\n\n\n\n\n\n\nOur features – hp, hp^2, hp^3, and hp^4 – are on drastically different numeric scales! The values contained in hp^4 are orders of magnitude larger than those contained in hp. This can be a problem because the value of hp^4 will naturally contribute more to each predicted \\(\\hat{y}\\) because it is so much greater than the values of the other features. For hp to have much of an impact at all on the prediction, it must be scaled by a large model parameter.\nBy inspecting the fitted parameters of our model, we see that this is the case – the parameter for hp is much larger in magnitude than the parameter for hp^4.\n\npd.DataFrame({\"Feature\":X_train.columns, \"Parameter\":lasso_model.coef_})\n\n\n\n\n\n\n\n\nFeature\nParameter\n\n\n\n\n0\nhp\n-2.549321e-01\n\n\n1\nhp^2\n-9.485972e-04\n\n\n2\nhp^3\n8.919763e-06\n\n\n3\nhp^4\n-1.228723e-08\n\n\n\n\n\n\n\nRecall that by applying regularization, we give our a model a “budget” for how it can allocate the values of model parameters. For hp to have much of an impact on each prediction, LASSO is forced to “spend” more of this budget on the parameter for hp.\nWe can avoid this issue by scaling the data before regularizing. This is a process where we convert all features to the same numeric scale. A common way to scale data is to perform standardization such that all features have mean 0 and standard deviation 1; essentially, we replace everything with its Z-score.\n\\[z_i = \\frac{x_i - \\mu}{\\sigma}\\]\n\n\n16.2.4 L2 (Ridge) Regularization\nIn all of our work above, we considered the constraint \\(\\sum_{i=1}^p |\\theta_i| \\leq Q\\) to limit the complexity of the model. What if we had applied a different constraint?\nIn L2 regularization, also known as ridge regression, we constrain the model such that the sum of the squared parameters must be less than some number \\(Q\\). This constraint takes the form:\n\\[\\sum_{i=1}^p \\theta_i^2 \\leq Q\\]\nAs before, we typically do not regularize the intercept term.\nIn our 2D example, the constraint becomes \\(\\theta_1^2 + \\theta_2^2 \\leq Q\\). Can you see how this is similar to the equation for a circle, \\(x^2 + y^2 = r^2\\)? The allowed region of parameters for a given value of \\(Q\\) is now shaped like a ball.\n\n\n\nIf we modify our objective function like before, we find that our new goal is to minimize the function: \\[\\frac{1}{n} \\sum_{i=1}^n (y_i - (\\theta_0 + \\theta_1 \\phi_{i, 1} + \\theta_2 \\phi_{i, 2} + \\ldots + \\theta_p \\phi_{i, p}))^2\\:\\text{such that} \\sum_{i=1}^p \\theta_i^2 \\leq Q\\]\nNotice that all we have done is change the constraint on the model parameters. The first term in the expression, the MSE, has not changed.\nUsing Lagrangian Duality (again, out of scope for Data 100), we can re-express our objective function as: \\[\\frac{1}{n} \\sum_{i=1}^n (y_i - (\\theta_0 + \\theta_1 \\phi_{i, 1} + \\theta_2 \\phi_{i, 2} + \\ldots + \\theta_p \\phi_{i, p}))^2 + \\lambda \\sum_{i=1}^p \\theta_i^2\\] \\[= \\frac{1}{n}||\\mathbb{Y} - \\mathbb{X}\\theta||_2^2 + \\lambda \\sum_{i=1}^p \\theta_i^2\\] \\[= \\frac{1}{n}||\\mathbb{Y} - \\mathbb{X}\\theta||_2^2 + \\lambda || \\theta ||_2^2\\]\nThe last two expressions include the MSE expressed using vector notation, and the last expression writes \\(\\sum_{i=1}^p \\theta_i^2\\) as it’s L2 norm equivalent form, \\(|| \\theta ||_2^2\\).\nWhen applying L2 regularization, our goal is to minimize this updated objective function.\nUnlike L1 regularization, L2 regularization does have a closed-form solution for the best parameter vector when regularization is applied:\n\\[\\hat\\theta_{\\text{ridge}} = (\\mathbb{X}^{\\top}\\mathbb{X} + n\\lambda I)^{-1}\\mathbb{X}^{\\top}\\mathbb{Y}\\]\nThis solution exists even if \\(\\mathbb{X}\\) is not full column rank. This is a major reason why L2 regularization is often used – it can produce a solution even when there is colinearity in the features. We will discuss the concept of colinearity in a future lecture, but we will not derive this result in Data 100, as it involves a fair bit of matrix calculus.\nIn sklearn, we perform L2 regularization using the Ridge class. It runs gradient descent to minimize the L2 objective function. Notice that we scale the data before regularizing.\n\nridge_model = lm.Ridge(alpha=1) # alpha represents the hyperparameter lambda\nridge_model.fit(X_train, Y_train)\n\nridge_model.coef_\n\narray([ 5.89130560e-02, -6.42445916e-03, 4.44468157e-05, -8.83981945e-08])", - "crumbs": [ - "16  Cross Validation and Regularization" - ] - }, - { - "objectID": "cv_regularization/cv_reg.html#regression-summary", - "href": "cv_regularization/cv_reg.html#regression-summary", - "title": "16  Cross Validation and Regularization", - "section": "16.3 Regression Summary", - "text": "16.3 Regression Summary\nOur regression models are summarized below. Note the objective function is what the gradient descent optimizer minimizes.\n\n\n\n\n\n\n\n\n\n\n\nType\nModel\nLoss\nRegularization\nObjective Function\nSolution\n\n\n\n\nOLS\n\\(\\hat{\\mathbb{Y}} = \\mathbb{X}\\theta\\)\nMSE\nNone\n\\(\\frac{1}{n} \\|\\mathbb{Y}-\\mathbb{X} \\theta\\|^2_2\\)\n\\(\\hat{\\theta}_{OLS} = (\\mathbb{X}^{\\top}\\mathbb{X})^{-1}\\mathbb{X}^{\\top}\\mathbb{Y}\\) if \\(\\mathbb{X}\\) is full column rank\n\n\nRidge\n\\(\\hat{\\mathbb{Y}} = \\mathbb{X} \\theta\\)\nMSE\nL2\n\\(\\frac{1}{n} \\|\\mathbb{Y}-\\mathbb{X}\\theta\\|^2_2 + \\lambda \\sum_{i=1}^p \\theta_i^2\\)\n\\(\\hat{\\theta}_{ridge} = (\\mathbb{X}^{\\top}\\mathbb{X} + n \\lambda I)^{-1}\\mathbb{X}^{\\top}\\mathbb{Y}\\)\n\n\nLASSO\n\\(\\hat{\\mathbb{Y}} = \\mathbb{X} \\theta\\)\nMSE\nL1\n\\(\\frac{1}{n} \\|\\mathbb{Y}-\\mathbb{X}\\theta\\|^2_2 + \\lambda \\sum_{i=1}^p \\vert \\theta_i \\vert\\)\nNo closed form solution", - "crumbs": [ - "16  Cross Validation and Regularization" - ] - }, - { - "objectID": "probability_1/probability_1.html", - "href": "probability_1/probability_1.html", - "title": "17  Random Variables", - "section": "", - "text": "17.1 Random Variables and Distributions\nSuppose we generate a set of random data, like a random sample from some population. A random variable is a function from the outcome of a random event to a number.\nIt is random since our sample was drawn at random; it is variable because its exact value depends on how this random sample came out. As such, the domain or input of our random variable is all possible outcomes for some random event in a sample space, and its range or output is the real number line. We typically denote random variables with uppercase letters, such as \\(X\\) or \\(Y\\). In contrast, note that regular variables tend to be denoted using lowercase letters. Sometimes we also use uppercase letters to refer to matrices (such as your design matrix \\(\\mathbb{X}\\)), but we will do our best to be clear with the notation.\nTo motivate what this (rather abstract) definition means, let’s consider the following examples:", - "crumbs": [ - "17  Random Variables" - ] - }, - { - "objectID": "probability_1/probability_1.html#random-variables-and-distributions", - "href": "probability_1/probability_1.html#random-variables-and-distributions", - "title": "17  Random Variables", - "section": "", - "text": "17.1.1 Example: Tossing a Coin\nLet’s formally define a fair coin toss. A fair coin can land on heads (\\(H\\)) or tails (\\(T\\)), each with a probability of 0.5. With these possible outcomes, we can define a random variable \\(X\\) as: \\[X = \\begin{cases}\n 1, \\text{if the coin lands heads} \\\\\n 0, \\text{if the coin lands tails}\n \\end{cases}\\]\n\\(X\\) is a function with a domain, or input, of \\(\\{H, T\\}\\) and a range, or output, of \\(\\{1, 0\\}\\). In practice, while we don’t use the following function notation, you could write the above as \\[X = \\begin{cases} X(H) = 1 \\\\ X(T) = 0 \\end{cases}\\]\n\n\n17.1.2 Example: Sampling Data 100 Students\nSuppose we draw a random sample \\(s\\) of size 3 from all students enrolled in Data 100.\nWe can define \\(Y\\) as the number of data science students in our sample. Its domain is all possible samples of size 3, and its range is \\(\\{0, 1, 2, 3\\}\\).\n\n\n\nNote that we can use random variables in mathematical expressions to create new random variables.\nFor example, let’s say we sample 3 students at random from lecture and look at their midterm scores. Let \\(X_1\\), \\(X_2\\), and \\(X_3\\) represent each student’s midterm grade.\nWe can use these random variables to create a new random variable, \\(Y\\), which represents the average of the 3 scores: \\(Y = (X_1 + X_2 + X_3)/3\\).\nAs we’re creating this random variable, a few questions arise:\n\nWhat can we say about the distribution of \\(Y\\)?\nHow does it depend on the distribution of \\(X_1\\), \\(X_2\\), and \\(X_3\\)?\n\nBut, what exactly is a distribution? Let’s dive into this!\n\n\n17.1.3 Distributions\nTo define any random variable \\(X\\), we need to be able to specify 2 things:\n\nPossible values: the set of values the random variable can take on.\nProbabilities: the set of probabilities describing how the total probability of 100% is split over the possible values.\n\nIf \\(X\\) is discrete (has a finite number of possible values), the probability that a random variable \\(X\\) takes on the value \\(x\\) is given by \\(P(X=x)\\), and probabilities must sum to 1: \\(\\sum_{\\text{all } x} P(X=x) = 1\\),\nWe can often display this using a probability distribution table. In the coin toss example, the probability distribution table of \\(X\\) is given by.\n\n\n\n\\(x\\)\n\\(P(X=x)\\)\n\n\n\n\n0\n\\(\\frac{1}{2}\\)\n\n\n1\n\\(\\frac{1}{2}\\)\n\n\n\nThe distribution of a random variable \\(X\\) describes how the total probability of 100% is split across all the possible values of \\(X\\), and it fully defines a random variable. If you know the distribution of a random variable you can:\n\ncompute properties of the random variables and derived variables\nsimulate the random variables by randomly picking values of \\(X\\) according to its distribution using np.random.choice, df.sample, or scipy.stats.<dist>.rvs(...)\n\nThe distribution of a discrete random variable can also be represented using a histogram. If a variable is continuous, meaning it can take on infinitely many values, we can illustrate its distribution using a density curve.\n\n\n\nWe often don’t know the (true) distribution and instead compute an empirical distribution. If you flip a coin 3 times and get {H, H, T}, you may ask —— what is the probability that the coin will land heads? We can come up with an empirical estimate of \\(\\frac{2}{3}\\), though the true probability might be \\(\\frac{1}{2}\\).\nProbabilities are areas. For discrete random variables, the area of the red bars represents the probability that a discrete random variable \\(X\\) falls within those values. For continuous random variables, the area under the curve represents the probability that a discrete random variable \\(Y\\) falls within those values.\n\n\n\nIf we sum up the total area of the bars/under the density curve, we should get 100%, or 1.\nWe can show the distribution of \\(Y\\) in the following tables. The table on the left lists all possible samples of \\(s\\) and the number of times they can appear (\\(Y(s)\\)). We can use this to calculate the values for the table on the right, a probability distribution table.\n\n\n\nRather than fully write out a probability distribution or show a histogram, there are some common distributions that come up frequently when doing data science. These distributions are specified by some parameters, which are constants that specify the shape of the distribution. In terms of notation, the ‘~’ means “has the probability distribution of”.\nThese common distributions are listed below:\n\nBernoulli(\\(p\\)): If \\(X\\) ~ Bernoulli(\\(p\\)), then \\(X\\) takes on a value 1 with probability \\(p\\), and 0 with probability \\(1 - p\\). Bernoulli random variables are also termed the “indicator” random variables.\nBinomial(\\(n\\), \\(p\\)): If \\(X\\) ~ Binomial(\\(n\\), \\(p\\)), then \\(X\\) counts the number of 1s in \\(n\\) independent Bernoulli(\\(p\\)) trials.\nCategorical(\\(p_1, ..., p_k\\)) of values: The probability of each value is 1 / (number of possible values).\nUniform on the unit interval (0, 1): The density is flat at 1 on (0, 1) and 0 elsewhere. We won’t get into what density means as much here, but intuitively, this is saying that there’s an equally likely chance of getting any value on the interval (0, 1).\nNormal(\\(\\mu\\), \\(\\sigma^2\\)): The probability density is specified by \\(\\frac{1}{\\sqrt{2\\pi}}e^{-\\frac{1}{2}\\frac{(x-\\mu)^2}{\\sigma^2}}\\). This bell-shaped distribution comes up fairly often in data, in part due to the Central Limit Theorem you saw back in Data 8.", - "crumbs": [ - "17  Random Variables" - ] - }, - { - "objectID": "probability_1/probability_1.html#expectation-and-variance", - "href": "probability_1/probability_1.html#expectation-and-variance", - "title": "17  Random Variables", - "section": "17.2 Expectation and Variance", - "text": "17.2 Expectation and Variance\nThere are several ways to describe a random variable. The methods shown above —— a table of all samples \\(s, X(s)\\), distribution table \\(P(X=x)\\), and histograms —— are all definitions that fully describe a random variable. Often, it is easier to describe a random variable using some numerical summary rather than fully defining its distribution. These numerical summaries are numbers that characterize some properties of the random variable. Because they give a “summary” of how the variable tends to behave, they are not random. Instead, think of them as a static number that describes a certain property of the random variable. In Data 100, we will focus our attention on the expectation and variance of a random variable.\n\n17.2.1 Expectation\nThe expectation of a random variable \\(X\\) is the weighted average of the values of \\(X\\), where the weights are the probabilities of each value occurring. There are two equivalent ways to compute the expectation:\n\nApply the weights one sample at a time: \\[\\mathbb{E}[X] = \\sum_{\\text{all possible } s} X(s) P(s)\\].\nApply the weights one possible value at a time: \\[\\mathbb{E}[X] = \\sum_{\\text{all possible } x} x P(X=x)\\]\n\nThe latter is more commonly used as we are usually just given the distribution, not all possible samples.\nWe want to emphasize that the expectation is a number, not a random variable. Expectation is a generalization of the average, and it has the same units as the random variable. It is also the center of gravity of the probability distribution histogram, meaning if we simulate the variable many times, it is the long-run average of the simulated values.\n\n17.2.1.1 Example 1: Coin Toss\nGoing back to our coin toss example, we define a random variable \\(X\\) as: \\[X = \\begin{cases}\n 1, \\text{if the coin lands heads} \\\\\n 0, \\text{if the coin lands tails}\n \\end{cases}\\]\nWe can calculate its expectation \\(\\mathbb{E}[X]\\) using the second method of applying the weights one possible value at a time: \\[\\begin{align}\n\\mathbb{E}[X] &= \\sum_{x} x P(X=x) \\\\\n&= 1 * 0.5 + 0 * 0.5 \\\\\n&= 0.5\n\\end{align}\\]\nNote that \\(\\mathbb{E}[X] = 0.5\\) is not a possible value of \\(X\\); it’s an average. The expectation of X does not need to be a possible value of X.\n\n\n17.2.1.2 Example 2\nConsider the random variable \\(X\\):\n\n\n\n\\(x\\)\n\\(P(X=x)\\)\n\n\n\n\n3\n0.1\n\n\n4\n0.2\n\n\n6\n0.4\n\n\n8\n0.3\n\n\n\nTo calculate it’s expectation, \\[\\begin{align}\n\\mathbb{E}[X] &= \\sum_{x} x P(X=x) \\\\\n&= 3 * 0.1 + 4 * 0.2 + 6 * 0.4 + 8 * 0.3 \\\\\n&= 0.3 + 0.8 + 2.4 + 2.4 \\\\\n&= 5.9\n\\end{align}\\]\nAgain, note that \\(\\mathbb{E}[X] = 5.9\\) is not a possible value of \\(X\\); it’s an average. The expectation of X does not need to be a possible value of X.\n\n\n\n17.2.2 Variance\nThe variance of a random variable is a measure of its chance error. It is defined as the expected squared deviation from the expectation of \\(X\\). Put more simply, variance asks: how far does \\(X\\) typically vary from its average value, just by chance? What is the spread of \\(X\\)’s distribution?\n\\[\\text{Var}(X) = \\mathbb{E}[(X-\\mathbb{E}[X])^2]\\]\nThe units of variance are the square of the units of \\(X\\). To get it back to the right scale, use the standard deviation of \\(X\\): \\[\\text{SD}(X) = \\sqrt{\\text{Var}(X)}\\]\nLike with expectation, variance and standard deviation are numbers, not random variables! Variance helps us describe the variability of a random variable. It is the expected squared error between the random variable and its expected value. As you will see shortly, we can use variance to help us quantify the chance error that arises when using a sample \\(X\\) to estimate the population mean.\nBy Chebyshev’s inequality, which you saw in Data 8, no matter what the shape of the distribution of \\(X\\) is, the vast majority of the probability lies in the interval “expectation plus or minus a few SDs.”\nIf we expand the square and use properties of expectation, we can re-express variance as the computational formula for variance.\n\\[\\text{Var}(X) = \\mathbb{E}[X^2] - (\\mathbb{E}[X])^2\\]\nThis form is often more convenient to use when computing the variance of a variable by hand, and it is also useful in Mean Squared Error calculations, as \\(\\mathbb{E}[X^2] = \\text{Var}(X)\\) if \\(X\\) is centered and \\(E(X)=0\\).\n\n\n\n\n\n\nProof\n\n\n\n\n\n\\[\\begin{align}\n \\text{Var}(X) &= \\mathbb{E}[(X-\\mathbb{E}[X])^2] \\\\\n &= \\mathbb{E}(X^2 - 2X\\mathbb{E}(X) + (\\mathbb{E}(X))^2) \\\\\n &= \\mathbb{E}(X^2) - 2 \\mathbb{E}(X)\\mathbb{E}(X) +( \\mathbb{E}(X))^2\\\\\n &= \\mathbb{E}[X^2] - (\\mathbb{E}[X])^2\n\\end{align}\\]\n\n\n\nHow do we compute \\(\\mathbb{E}[X^2]\\)? Any function of a random variable is also a random variable. That means that by squaring \\(X\\), we’ve created a new random variable. To compute \\(\\mathbb{E}[X^2]\\), we can simply apply our definition of expectation to the random variable \\(X^2\\).\n\\[\\mathbb{E}[X^2] = \\sum_{x} x^2 P(X = x)\\]\n\n\n17.2.3 Example: Die\nLet \\(X\\) be the outcome of a single fair die roll. \\(X\\) is a random variable defined as \\[X = \\begin{cases}\n \\frac{1}{6}, \\text{if } x \\in \\{1,2,3,4,5,6\\} \\\\\n 0, \\text{otherwise}\n \\end{cases}\\]\n\n\n\n\n\n\nWhat’s the expectation, \\(\\mathbb{E}[X]?\\)\n\n\n\n\n\n\\[ \\begin{align}\n \\mathbb{E}[X] &= 1\\big(\\frac{1}{6}\\big) + 2\\big(\\frac{1}{6}\\big) + 3\\big(\\frac{1}{6}\\big) + 4\\big(\\frac{1}{6}\\big) + 5\\big(\\frac{1}{6}\\big) + 6\\big(\\frac{1}{6}\\big) \\\\\n &= \\big(\\frac{1}{6}\\big)( 1 + 2 + 3 + 4 + 5 + 6) \\\\\n &= \\frac{7}{2}\n \\end{align}\\]\n\n\n\n\n\n\n\n\n\nWhat’s the variance, \\(\\text{Var}(X)?\\)\n\n\n\n\n\nUsing Approach 1 (definition): \\[\\begin{align}\n \\text{Var}(X) &= \\big(\\frac{1}{6}\\big)((1 - \\frac{7}{2})^2 + (2 - \\frac{7}{2})^2 + (3 - \\frac{7}{2})^2 + (4 - \\frac{7}{2})^2 + (5 - \\frac{7}{2})^2 + (6 - \\frac{7}{2})^2) \\\\\n &= \\frac{35}{12}\n \\end{align}\\]\nUsing Approach 2 (property): \\[\\mathbb{E}[X^2] = \\sum_{x} x^2 P(X = x) = \\frac{91}{6}\\] \\[\\text{Var}(X) = \\frac{91}{6} - (\\frac{7}{2})^2 = \\frac{35}{12}\\]\n\n\n\nWe can summarize our discussion so far in the following diagram:", - "crumbs": [ - "17  Random Variables" - ] - }, - { - "objectID": "probability_1/probability_1.html#sums-of-random-variables", - "href": "probability_1/probability_1.html#sums-of-random-variables", - "title": "17  Random Variables", - "section": "17.3 Sums of Random Variables", - "text": "17.3 Sums of Random Variables\nOften, we will work with multiple random variables at the same time. A function of a random variable is also a random variable. If you create multiple random variables based on your sample, then functions of those random variables are also random variables.\nFor example, if \\(X_1, X_2, ..., X_n\\) are random variables, then so are all of these:\n\n\\(X_n^2\\)\n\\(\\#\\{i : X_i > 10\\}\\)\n\\(\\text{max}(X_1, X_2, ..., X_n)\\)\n\\(\\frac{1}{n} \\sum_{i=1}^n (X_i - c)^2\\)\n\\(\\frac{1}{n} \\sum_{i=1}^n X_i\\)\n\nMany functions of random variables that we are interested in (e.g., counts, means) involve sums of random variables, so let’s dive deeper into the properties of sums of random variables.\n\n17.3.1 Properties of Expectation\nInstead of simulating full distributions, we often just compute expectation and variance directly. Recall the definition of expectation: \\[\\mathbb{E}[X] = \\sum_{x} x P(X=x)\\]\nFrom it, we can derive some useful properties:\n\nLinearity of expectation. The expectation of the linear transformation \\(aX+b\\), where \\(a\\) and \\(b\\) are constants, is:\n\n\\[\\mathbb{E}[aX+b] = aE[\\mathbb{X}] + b\\]\n\n\n\n\n\n\nProof\n\n\n\n\n\n\\[\\begin{align}\n \\mathbb{E}[aX+b] &= \\sum_{x} (ax + b) P(X=x) \\\\\n &= \\sum_{x} (ax P(X=x) + bP(X=x)) \\\\\n &= a\\sum_{x}P(X=x) + b\\sum_{x}P(X=x)\\\\\n &= a\\mathbb{E}(X) + b * 1\n \\end{align}\\]\n\n\n\n\nExpectation is also linear in sums of random variables.\n\n\\[\\mathbb{E}[X+Y] = \\mathbb{E}[X] + \\mathbb{E}[Y]\\]\n\n\n\n\n\n\nProof\n\n\n\n\n\n\\[\\begin{align}\n \\mathbb{E}[X+Y] &= \\sum_{s} (X+Y)(s) P(s) \\\\\n &= \\sum_{s} (X(s)P(s) + Y(s)P(s)) \\\\\n &= \\sum_{s} X(s)P(s) + \\sum_{s} Y(s)P(s)\\\\\n &= \\mathbb{E}[X] + \\mathbb{E}[Y]\n\\end{align}\\]\n\n\n\n\nIf \\(g\\) is a non-linear function, then in general, \\[\\mathbb{E}[g(X)] \\neq g(\\mathbb{E}[X])\\] For example, if \\(X\\) is -1 or 1 with equal probability, then \\(\\mathbb{E}[X] = 0\\), but \\(\\mathbb{E}[X^2] = 1 \\neq 0\\).\n\n\n\n17.3.2 Properties of Variance\nLet’s now get into the properties of variance. Recall the definition of variance: \\[\\text{Var}(X) = \\mathbb{E}[(X-\\mathbb{E}[X])^2]\\]\nCombining it with the properties of expectation, we can derive some useful properties:\n\nUnlike expectation, variance is non-linear. The variance of the linear transformation \\(aX+b\\) is: \\[\\text{Var}(aX+b) = a^2 \\text{Var}(X)\\]\n\n\nSubsequently, \\[\\text{SD}(aX+b) = |a| \\text{SD}(X)\\]\nThe full proof of this fact can be found using the definition of variance. As general intuition, consider that \\(aX+b\\) scales the variable \\(X\\) by a factor of \\(a\\), then shifts the distribution of \\(X\\) by \\(b\\) units.\n\n\n\n\n\n\n\nProof\n\n\n\n\n\nWe know that \\[\\mathbb{E}[aX+b] = aE[\\mathbb{X}] + b\\]\nIn order to compute \\(\\text{Var}(aX+b)\\), consider that a shift by \\(b\\) units does not affect spread, so \\(\\text{Var}(aX+b) = \\text{Var}(aX)\\).\nThen, \\[\\begin{align}\n \\text{Var}(aX+b) &= \\text{Var}(aX) \\\\\n &= E((aX)^2) - (E(aX))^2 \\\\\n &= E(a^2 X^2) - (aE(X))^2\\\\\n &= a^2 (E(X^2) - (E(X))^2) \\\\\n &= a^2 \\text{Var}(X)\n\\end{align}\\]\n\n\n\n\nShifting the distribution by \\(b\\) does not impact the spread of the distribution. Thus, \\(\\text{Var}(aX+b) = \\text{Var}(aX)\\).\nScaling the distribution by \\(a\\) does impact the spread of the distribution.\n\n\n\n\n\nVariance of sums of random variables is affected by the (in)dependence of the random variables. \\[\\text{Var}(X + Y) = \\text{Var}(X) + \\text{Var}(Y) + 2\\text{cov}(X,Y)\\] \\[\\text{Var}(X + Y) = \\text{Var}(X) + \\text{Var}(Y) \\qquad \\text{if } X, Y \\text{ independent}\\]\n\n\n\n\n\n\n\nProof\n\n\n\n\n\nThe variance of a sum is affected by the dependence between the two random variables that are being added. Let’s expand the definition of \\(\\text{Var}(X + Y)\\) to see what’s going on.\nTo simplify the math, let \\(\\mu_x = \\mathbb{E}[X]\\) and \\(\\mu_y = \\mathbb{E}[Y]\\).\n\\[ \\begin{align}\n\\text{Var}(X + Y) &= \\mathbb{E}[(X+Y- \\mathbb{E}(X+Y))^2] \\\\\n&= \\mathbb{E}[((X - \\mu_x) + (Y - \\mu_y))^2] \\\\\n&= \\mathbb{E}[(X - \\mu_x)^2 + 2(X - \\mu_x)(Y - \\mu_y) + (Y - \\mu_y)^2] \\\\\n&= \\mathbb{E}[(X - \\mu_x)^2] + \\mathbb{E}[(Y - \\mu_y)^2] + \\mathbb{E}[(X - \\mu_x)(Y - \\mu_y)] \\\\\n&= \\text{Var}(X) + \\text{Var}(Y) + \\mathbb{E}[(X - \\mu_x)(Y - \\mu_y)]\n\\end{align}\\]\n\n\n\n\n\n17.3.3 Covariance and Correlation\nWe define the covariance of two random variables as the expected product of deviations from expectation. Put more simply, covariance is a generalization of variance to variance:\n\\[\\text{Cov}(X, X) = \\mathbb{E}[(X - \\mathbb{E}[X])^2] = \\text{Var}(X)\\]\n\\[\\text{Cov}(X, Y) = \\mathbb{E}[(X - \\mathbb{E}[X])(Y - \\mathbb{E}[Y])]\\]\nWe can treat the covariance as a measure of association. Remember the definition of correlation given when we first established SLR?\n\\[r(X, Y) = \\mathbb{E}\\left[\\left(\\frac{X-\\mathbb{E}[X]}{\\text{SD}(X)}\\right)\\left(\\frac{Y-\\mathbb{E}[Y]}{\\text{SD}(Y)}\\right)\\right] = \\frac{\\text{Cov}(X, Y)}{\\text{SD}(X)\\text{SD}(Y)}\\]\nIt turns out we’ve been quietly using covariance for some time now! If \\(X\\) and \\(Y\\) are independent, then \\(\\text{Cov}(X, Y) =0\\) and \\(r(X, Y) = 0\\). Note, however, that the converse is not always true: \\(X\\) and \\(Y\\) could have \\(\\text{Cov}(X, Y) = r(X, Y) = 0\\) but not be independent.\n\n\n17.3.4 Equal vs. Identically Distributed vs. i.i.d\nSuppose that we have two random variables \\(X\\) and \\(Y\\):\n\n\\(X\\) and \\(Y\\) are equal if \\(X(s) = Y(s)\\) for every sample \\(s\\). Regardless of the exact sample drawn, \\(X\\) is always equal to \\(Y\\).\n\\(X\\) and \\(Y\\) are identically distributed if the distribution of \\(X\\) is equal to the distribution of \\(Y\\). We say “\\(X\\) and \\(Y\\) are equal in distribution.” That is, \\(X\\) and \\(Y\\) take on the same set of possible values, and each of these possible values is taken with the same probability. On any specific sample \\(s\\), identically distributed variables do not necessarily share the same value. If \\(X = Y\\), then \\(X\\) and \\(Y\\) are identically distributed; however, the converse is not true (ex: \\(Y = 7 - X\\), \\(X\\) is a die)\n\\(X\\) and \\(Y\\) are independent and identically distributed (i.i.d) if\n\nThe variables are identically distributed.\nKnowing the outcome of one variable does not influence our belief of the outcome of the other.\n\n\nNote that in Data 100, you’ll never be expected to prove that random variables are i.i.d.\nNow let’s walk through an example. Say \\(X_1\\) and \\(X_2\\) be numbers on rolls of two fair die. \\(X_1\\) and \\(X_2\\) are i.i.d, so \\(X_1\\) and \\(X_2\\) have the same distribution. However, the sums \\(Y = X_1 + X_1 = 2X_1\\) and \\(Z=X_1+X_2\\) have different distributions but the same expectation.\n\n\n\nHowever, \\(Y = X_1\\) has a larger variance.\n\n\n\n\n\n17.3.5 Example: Bernoulli Random Variable\nTo get some practice with the formulas discussed so far, let’s derive the expectation and variance for a Bernoulli(\\(p\\)) random variable. If \\(X\\) ~ Bernoulli(\\(p\\)),\n\\(\\mathbb{E}[X] = 1 \\cdot p + 0 \\cdot (1 - p) = p\\)\nTo compute the variance, we will use the computational formula. We first find that: \\(\\mathbb{E}[X^2] = 1^2 \\cdot p + 0^2 \\cdot (1 - p) = p\\)\nFrom there, let’s calculate our variance: \\(\\text{Var}(X) = \\mathbb{E}[X^2] - \\mathbb{E}[X]^2 = p - p^2 = p(1-p)\\)\n\n\n17.3.6 Example: Binomial Random Variable\nLet \\(Y\\) ~ Binomial(\\(n\\), \\(p\\)). We can think of \\(Y\\) as being the sum of \\(n\\) i.i.d. Bernoulli(\\(p\\)) random variables. Mathematically, this translates to\n\\[Y = \\sum_{i=1}^n X_i\\]\nwhere \\(X_i\\) is the indicator of a success on trial \\(i\\).\nUsing linearity of expectation,\n\\[\\mathbb{E}[Y] = \\sum_{i=1}^n \\mathbb{E}[X_i] = np\\]\nFor the variance, since each \\(X_i\\) is independent of the other, \\(\\text{Cov}(X_i, X_j) = 0\\),\n\\[\\text{Var}(Y) = \\sum_{i=1}^n \\text{Var}[X_i] = np(1-p)\\]\n\n\n17.3.7 Summary\n\nLet \\(X\\) be a random variable with distribution \\(P(X=x)\\).\n\n\\(\\mathbb{E}[X] = \\sum_{x} x P(X=x)\\)\n\\(\\text{Var}(X) = \\mathbb{E}[(X-\\mathbb{E}[X])^2] = \\mathbb{E}[X^2] - (\\mathbb{E}[X])^2\\)\n\nLet \\(a\\) and \\(b\\) be scalar values.\n\n\\(\\mathbb{E}[aX+b] = aE[\\mathbb{X}] + b\\)\n\\(\\text{Var}(aX+b) = a^2 \\text{Var}(X)\\)\n\nLet \\(Y\\) be another random variable.\n\n\\(\\mathbb{E}[X+Y] = \\mathbb{E}[X] + \\mathbb{E}[Y]\\)\n\\(\\text{Var}(X + Y) = \\text{Var}(X) + \\text{Var}(Y) + 2\\text{Cov}(X,Y)\\)\n\n\nNote that \\(\\text{Cov}(X,Y)\\) would equal 0 if \\(X\\) and \\(Y\\) are independent.", - "crumbs": [ - "17  Random Variables" - ] - }, - { - "objectID": "probability_2/probability_2.html", - "href": "probability_2/probability_2.html", - "title": "18  Estimators, Bias, and Variance", - "section": "", - "text": "18.1 Common Random Variables\nThere are several cases of random variables that appear often and have useful properties. Below are the ones we will explore further in this course. The numbers in parentheses are the parameters of a random variable, which are constants. Parameters define a random variable’s shape (i.e., distribution) and its values. For this lecture, we’ll focus more heavily on the bolded random variables and their special properties, but you should familiarize yourself with all the ones listed below:", - "crumbs": [ - "18  Estimators, Bias, and Variance" - ] - }, - { - "objectID": "probability_2/probability_2.html#common-random-variables", - "href": "probability_2/probability_2.html#common-random-variables", - "title": "18  Estimators, Bias, and Variance", - "section": "", - "text": "Bernoulli(\\(p\\))\n\nTakes on value 1 with probability \\(p\\), and 0 with probability \\((1 - p)\\).\nAKA the “indicator” random variable.\nLet \\(X\\) be a Bernoulli(\\(p\\)) random variable.\n\n\\(\\mathbb{E}[X] = 1 * p + 0 * (1-p) = p\\)\n\n\\(\\mathbb{E}[X^2] = 1^2 * p + 0 * (1-p) = p\\)\n\n\\(\\text{Var}(X) = \\mathbb{E}[X^2] - (\\mathbb{E}[X])^2 = p - p^2 = p(1-p)\\)\n\n\nBinomial(\\(n\\), \\(p\\))\n\nNumber of 1s in \\(n\\) independent Bernoulli(\\(p\\)) trials.\nLet \\(Y\\) be a Binomial(\\(n\\), \\(p\\)) random variable.\n\nThe distribution of \\(Y\\) is given by the binomial formula, and we can write \\(Y = \\sum_{i=1}^n X_i\\) where:\n\n\\(X_i\\) s the indicator of success on trial i. \\(X_i = 1\\) if trial i is a success, else 0.\nAll \\(X_i\\) are i.i.d. and Bernoulli(\\(p\\)).\n\n\\(\\mathbb{E}[Y] = \\sum_{i=1}^n \\mathbb{E}[X_i] = np\\)\n\\(\\text{Var}(X) = \\sum_{i=1}^n \\text{Var}(X_i) = np(1-p)\\)\n\n\\(X_i\\)’s are independent, so \\(\\text{Cov}(X_i, X_j) = 0\\) for all i, j.\n\n\n\nUniform on a finite set of values\n\nThe probability of each value is \\(\\frac{1}{\\text{(number of possible values)}}\\).\nFor example, a standard/fair die.\n\nUniform on the unit interval (0, 1)\n\nDensity is flat at 1 on (0, 1) and 0 elsewhere.\n\nNormal(\\(\\mu, \\sigma^2\\)), a.k.a Gaussian\n\n\\(f(x) = \\frac{1}{\\sigma\\sqrt{2\\pi}} \\exp\\left( -\\frac{1}{2}\\left(\\frac{x-\\mu}{\\sigma}\\right)^{\\!2}\\,\\right)\\)\n\n\n\n18.1.1 Example\nSuppose you win cash based on the number of heads you get in a series of 20 coin flips. Let \\(X_i = 1\\) if the \\(i\\)-th coin is heads, \\(0\\) otherwise. Which payout strategy would you choose?\nA. \\(Y_A = 10 * X_1 + 10 * X_2\\)\nB. \\(Y_B = \\sum_{i=1}^{20} X_i\\)\nC. \\(Y_C = 20 * X_1\\)\n\n\n\n\n\n\nSolution\n\n\n\n\n\nLet \\(X_1, X_2, ... X_{20}\\) be 20 i.i.d Bernoulli(0.5) random variables. Since the \\(X_i\\)’s are independent, \\(\\text{Cov}(X_i, X_j) = 0\\) for all pairs \\(i, j\\). Additionally, Since \\(X_i\\) is Bernoulli(0.5), we know that \\(\\mathbb{E}[X] = p = 0.5\\) and \\(\\text{Var}(X) = p(1-p) = 0.25\\). We can calculate the following for each scenario:\n\n\n\n\n\n\n\n\n\n\nA. \\(Y_A = 10 * X_1 + 10 * X_2\\)\nB. \\(Y_B = \\sum_{i=1}^{20} X_i\\)\nC. \\(Y_C = 20 * X_1\\)\n\n\n\n\nExpectation\n\\(\\mathbb{E}[Y_A] = 10 (0.5) + 10(0.5) = 10\\)\n\\(\\mathbb{E}[Y_B] = 0.5 + ... + 0.5 = 10\\)\n\\(\\mathbb{E}[Y_C] = 20(0.5) = 10\\)\n\n\nVariance\n\\(\\text{Var}(Y_A) = 10^2 (0.25) + 10^2 (0.25) = 50\\)\n\\(\\text{Var}(Y_B) = 0.25 + ... + 0.25 = 5\\)\n\\(\\text{Var}(Y_C) = 20^2 (0.25) = 100\\)\n\n\nStandard Deviation\n\\(\\text{SD}(Y_A) \\approx 7.07\\)\n\\(\\text{SD}(Y_B) \\approx 2.24\\)\n\\(\\text{SD}(Y_C) = 10\\)\n\n\n\nAs we can see, all the scenarios have the same expected value but different variances. The higher the variance, the greater the risk and uncertainty, so the “right” strategy depends on your personal preference. Would you choose the “safest” option B, the most “risky” option C, or somewhere in the middle (option A)?", - "crumbs": [ - "18  Estimators, Bias, and Variance" - ] - }, - { - "objectID": "probability_2/probability_2.html#sample-statistics", - "href": "probability_2/probability_2.html#sample-statistics", - "title": "18  Estimators, Bias, and Variance", - "section": "18.2 Sample Statistics", - "text": "18.2 Sample Statistics\nToday, we’ve talked extensively about populations; if we know the distribution of a random variable, we can reliably compute expectation, variance, functions of the random variable, etc. Note that:\n\nThe distribution of a population describes how a random variable behaves across all individuals of interest.\nThe distribution of a sample describes how a random variable behaves in a specific sample from the population.\n\nIn Data Science, however, we often do not have access to the whole population, so we don’t know its distribution. As such, we need to collect a sample and use its distribution to estimate or infer properties of the population. In cases like these, we can take several samples of size \\(n\\) from the population (an easy way to do this is using df.sample(n, replace=True)), and compute the mean of each sample. When sampling, we make the (big) assumption that we sample uniformly at random with replacement from the population; each observation in our sample is a random variable drawn i.i.d from our population distribution. Remember that our sample mean is a random variable since it depends on our randomly drawn sample! On the other hand, our population mean is simply a number (a fixed value).\n\n18.2.1 Sample Mean\nConsider an i.i.d. sample \\(X_1, X_2, ..., X_n\\) drawn from a population with mean 𝜇 and SD 𝜎. We define the sample mean as \\[\\bar{X}_n = \\frac{1}{n} \\sum_{i=1}^n X_i\\]\nThe expectation of the sample mean is given by: \\[\\begin{align}\n \\mathbb{E}[\\bar{X}_n] &= \\frac{1}{n} \\sum_{i=1}^n \\mathbb{E}[X_i] \\\\\n &= \\frac{1}{n} (n \\mu) \\\\\n &= \\mu\n\\end{align}\\]\nThe variance is given by: \\[\\begin{align}\n \\text{Var}(\\bar{X}_n) &= \\frac{1}{n^2} \\text{Var}( \\sum_{i=1}^n X_i) \\\\\n &= \\frac{1}{n^2} \\left( \\sum_{i=1}^n \\text{Var}(X_i) \\right) \\\\\n &= \\frac{1}{n^2} (n \\sigma^2) = \\frac{\\sigma^2}{n}\n\\end{align}\\]\n\\(\\bar{X}_n\\) is approximately normally distributed by the Central Limit Theorem (CLT).\n\n\n18.2.2 Central Limit Theorem\nIn Data 8 and in the previous lecture, you encountered the Central Limit Theorem (CLT). This is a powerful theorem for estimating the distribution of a population with mean \\(\\mu\\) and standard deviation \\(\\sigma\\) from a collection of smaller samples. The CLT tells us that if an i.i.d sample of size \\(n\\) is large, then the probability distribution of the sample mean is roughly normal with mean \\(\\mu\\) and SD of \\(\\frac{\\sigma}{\\sqrt{n}}\\). More generally, any theorem that provides the rough distribution of a statistic and doesn’t need the distribution of the population is valuable to data scientists! This is because we rarely know a lot about the population.\n\n\n\nImportantly, the CLT assumes that each observation in our samples is drawn i.i.d from the distribution of the population. In addition, the CLT is accurate only when \\(n\\) is “large”, but what counts as a “large” sample size depends on the specific distribution. If a population is highly symmetric and unimodal, we could need as few as \\(n=20\\); if a population is very skewed, we need a larger \\(n\\). If in doubt, you can bootstrap the sample mean and see if the bootstrapped distribution is bell-shaped. Classes like Data 140 investigate this idea in great detail. \nFor a more in-depth demo, check out onlinestatbook.\n\n\n18.2.3 Using the Sample Mean to Estimate the Population Mean\nNow let’s say we want to use the sample mean to estimate the population mean, for example, the average height of Cal undergraduates. We can typically collect a single sample, which has just one average. However, what if we happened, by random chance, to draw a sample with a different mean or spread than that of the population? We might get a skewed view of how the population behaves (consider the extreme case where we happen to sample the exact same value \\(n\\) times!).\n\n\n\nFor example, notice the difference in variation between these two distributions that are different in sample size. The distribution with a bigger sample size (\\(n=800\\)) is tighter around the mean than the distribution with a smaller sample size (\\(n=200\\)). Try plugging in these values into the standard deviation equation for the sample mean to make sense of this!\nApplying the CLT allows us to make sense of all of this and resolve this issue. By drawing many samples, we can consider how the sample distribution varies across multiple subsets of the data. This allows us to approximate the properties of the population without the need to survey every single member.\nGiven this potential variance, it is also important that we consider the average value and spread of all possible sample means, and what this means for how big \\(n\\) should be. For every sample size, the expected value of the sample mean is the population mean: \\[\\mathbb{E}[\\bar{X}_n] = \\mu\\] We call the sample mean an unbiased estimator of the population mean and will explore this idea more in the next lecture.\n\n\n\n\n\n\nData 8 Recap: Square Root Law\n\n\n\n\n\nThe square root law (Data 8) states that if you increase the sample size by a factor, the SD of the sample mean decreases by the square root of the factor. \\[\\text{SD}(\\bar{X_n}) = \\frac{\\sigma}{\\sqrt{n}}\\] The sample mean is more likely to be close to the population mean if we have a larger sample size.", - "crumbs": [ - "18  Estimators, Bias, and Variance" - ] - }, - { - "objectID": "probability_2/probability_2.html#prediction-and-inference", - "href": "probability_2/probability_2.html#prediction-and-inference", - "title": "18  Estimators, Bias, and Variance", - "section": "18.3 Prediction and Inference", - "text": "18.3 Prediction and Inference\nAt this point in the course, we’ve spent a great deal of time working with models. When we first introduced the idea of modeling a few weeks ago, we did so in the context of prediction: using models to make accurate predictions about unseen data. Another reason we might build models is to better understand complex phenomena in the world around us. Inference is the task of using a model to infer the true underlying relationships between the feature and response variables. For example, if we are working with a set of housing data, prediction might ask: given the attributes of a house, how much is it worth? Inference might ask: how much does having a local park impact the value of a house?\nA major goal of inference is to draw conclusions about the full population of data given only a random sample. To do this, we aim to estimate the value of a parameter, which is a numerical function of the population (for example, the population mean \\(\\mu\\)). We use a collected sample to construct a statistic, which is a numerical function of the random sample (for example, the sample mean \\(\\bar{X}_n\\)). It’s helpful to think “p” for “parameter” and “population,” and “s” for “sample” and “statistic.”\nSince the sample represents a random subset of the population, any statistic we generate will likely deviate from the true population parameter, and it could have been different. We say that the sample statistic is an estimator of the true population parameter. Notationally, the population parameter is typically called \\(\\theta\\), while its estimator is denoted by \\(\\hat{\\theta}\\).\nTo address our inference question, we aim to construct estimators that closely estimate the value of the population parameter. We evaluate how “good” an estimator is by answering three questions:\n\nHow close is our answer to the parameter? (Risk / MSE) \\[ MSE(\\hat{\\theta}) = E[(\\hat{\\theta} - \\theta)]^2\\]\nDo we get the right answer for the parameter, on average? (Bias) \\[\\text{Bias}(\\hat{\\theta}) = E[\\hat{\\theta} - \\theta] = E[\\hat{\\theta}] - \\theta\\]\nHow variable is the answer? (Variance) \\[Var(\\hat{\\theta}) = E[(\\theta - E[\\theta])^2] \\]\n\nThis relationship can be illustrated with an archery analogy. Imagine that the center of the target is the \\(\\theta\\) and each arrow corresponds to a separate parameter estimate \\(\\hat{\\theta}\\)\n\n\n\nIdeally, we want our estimator to have low bias and low variance, but how can we mathematically quantify that? See Section 18.4 for more detail.\n\n18.3.1 Prediction as Estimation\nNow that we’ve established the idea of an estimator, let’s see how we can apply this learning to the modeling process. To do so, we’ll take a moment to formalize our data collection and models in the language of random variables.\nSay we are working with an input variable, \\(x\\), and a response variable, \\(Y\\). We assume that \\(Y\\) and \\(x\\) are linked by some relationship \\(g\\); in other words, \\(Y = g(x)\\) where \\(g\\) represents some “universal truth” or “law of nature” that defines the underlying relationship between \\(x\\) and \\(Y\\). In the image below, \\(g\\) is denoted by the red line.\nAs data scientists, however, we have no way of directly “seeing” the underlying relationship \\(g\\). The best we can do is collect observed data out in the real world to try to understand this relationship. Unfortunately, the data collection process will always have some inherent error (think of the randomness you might encounter when taking measurements in a scientific experiment). We say that each observation comes with some random error or noise term, \\(\\epsilon\\) (read: “epsilon”). This error is assumed to be a random variable with expectation \\(\\mathbb{E}(\\epsilon)=0\\), variance \\(\\text{Var}(\\epsilon) = \\sigma^2\\), and be i.i.d. across each observation. The existence of this random noise means that our observations, \\(Y(x)\\), are random variables.\n\n\n\nWe can only observe our random sample of data, represented by the blue points above. From this sample, we want to estimate the true relationship \\(g\\). We do this by constructing the model \\(\\hat{Y}(x)\\) to estimate \\(g\\).\n\\[\\text{True relationship: } g(x)\\]\n\\[\\text{Observed relationship: }Y = g(x) + \\epsilon\\]\n\\[\\text{Prediction: }\\hat{Y}(x)\\]\n\n\n\nWhen building models, it is also important to note that our choice of features will also significantly impact our estimation. In the plot below, you can see how the different models (green and purple) can lead to different estimates.\n\n\n\n\n18.3.1.1 Estimating a Linear Relationship\nIf we assume that the true relationship \\(g\\) is linear, we can express the response as \\(Y = f_{\\theta}(x)\\), where our true relationship is modeled by \\[Y = g(x) + \\epsilon\\] \\[ f_{\\theta}(x) = Y = \\theta_0 + \\sum_{j=1}^p \\theta_j x_j + \\epsilon\\]\n\n\n\n\n\n\nWhich expressions are random?\n\n\n\n\n\nIn our two equations above, the true relationship \\(g(x) = \\theta_0 + \\sum_{j=1}^p \\theta_j x_j\\) is not random, but \\(\\epsilon\\) is random. Hence, \\(Y = f_{\\theta}(x)\\) is also random.\n\n\n\nThis true relationship has true, unobservable parameters \\(\\theta\\), and it has random noise \\(\\epsilon\\), so we can never observe the true relationship. Instead, the next best thing we can do is obtain a sample \\(\\Bbb{X}\\), \\(\\Bbb{Y}\\) of \\(n\\) observed relationships, \\((x, Y)\\) and use it to train a model and obtain an estimate of \\(\\hat{\\theta}\\) \\[\\hat{Y}(x) = f_{\\hat{\\theta}}(x) = \\hat{\\theta_0} + \\sum_{j=1}^p \\hat{\\theta_j} x_j\\]\n\n\n\n\n\n\nWhich expressions are random?\n\n\n\n\n\nIn our estimating equation above, our sample \\(\\Bbb{X}\\), \\(\\Bbb{Y}\\) are random (often due to human error). Hence, the estimates we calculate from our samples \\(\\hat{\\theta}\\) are also random, so our predictor \\(\\hat{Y}(x)\\) is also random.\n\n\n\nNow taking a look at our original equations, we can see that they both have differing sources of randomness. For our observed relationship, \\(Y = g(x) + \\epsilon\\), \\(\\epsilon\\) represents errors which occur during or after the observation or measurement process. For the estimation model, the data we have is a random sample collected from the population, which was constructed from decisions made before the measurement process.", - "crumbs": [ - "18  Estimators, Bias, and Variance" - ] - }, - { - "objectID": "probability_2/probability_2.html#sec-bias-variance-tradeoff", - "href": "probability_2/probability_2.html#sec-bias-variance-tradeoff", - "title": "18  Estimators, Bias, and Variance", - "section": "18.4 Bias-Variance Tradeoff", - "text": "18.4 Bias-Variance Tradeoff\nRecall the model and the data we generated from that model in the last section:\n\\[\\text{True relationship: } g(x)\\]\n\\[\\text{Observed relationship: }Y = g(x) + \\epsilon\\]\n\\[\\text{Prediction: }\\hat{Y}(x)\\]\nWith this reformulated modeling goal, we can now revisit the Bias-Variance Tradeoff from two lectures ago (shown below):\n\n\n\nIn today’s lecture, we’ll explore a more mathematical version of the graph you see above by introducing the terms model risk, observation variance, model bias, and model variance. Eventually, we’ll work our way up to an updated version of the Bias-Variance Tradeoff graph that you see below\n\n\n\n\n18.4.1 Model Risk\nModel risk is defined as the mean square prediction error of the random variable \\(\\hat{Y}\\). It is an expectation across all samples we could have possibly gotten when fitting the model, which we can denote as random variables \\(X_1, X_2, \\ldots, X_n, Y\\). Model risk considers the model’s performance on any sample that is theoretically possible, rather than the specific data that we have collected.\n\\[\\text{model risk }=E\\left[(Y-\\hat{Y(x)})^2\\right]\\]\nWhat is the origin of the error encoded by model risk? Note that there are two types of errors:\n\nChance errors: happen due to randomness alone\n\nSource 1 (Observation Variance): randomness in new observations \\(Y\\) due to random noise \\(\\epsilon\\)\nSource 2 (Model Variance): randomness in the sample we used to train the models, as samples \\(X_1, X_2, \\ldots, X_n, Y\\) are random\n\n(Model Bias): non-random error due to our model being different from the true underlying function \\(g\\)\n\nRecall the data-generating process we established earlier. There is a true underlying relationship \\(g\\), observed data (with random noise) \\(Y\\), and model \\(\\hat{Y}\\).\n\n\n\nTo better understand model risk, we’ll zoom in on a single data point in the plot above.\n\n\n\nRemember that \\(\\hat{Y}(x)\\) is a random variable – it is the prediction made for \\(x\\) after being fit on the specific sample used for training. If we had used a different sample for training, a different prediction might have been made for this value of \\(x\\). To capture this, the diagram above considers both the prediction \\(\\hat{Y}(x)\\) made for a particular random training sample, and the expected prediction across all possible training samples, \\(E[\\hat{Y}(x)]\\).\nWe can use this simplified diagram to break down the prediction error into smaller components. First, start by considering the error on a single prediction, \\(Y(x)-\\hat{Y}(x)\\).\n\n\n\nWe can identify three components of this error.\n\n\n\nThat is, the error can be written as:\n\\[Y(x)-\\hat{Y}(x) = \\epsilon + \\left(g(x)-E\\left[\\hat{Y}(x)\\right]\\right) + \\left(E\\left[\\hat{Y}(x)\\right] - \\hat{Y}(x)\\right)\\] \\[\\newline \\]\nThe model risk is the expected square of the expression above, \\(E\\left[(Y(x)-\\hat{Y}(x))^2\\right]\\). If we square both sides and then take the expectation, we will get the following decomposition of model risk:\n\\[E\\left[(Y(x)-\\hat{Y}(x))^2\\right] = E[\\epsilon^2] + \\left(g(x)-E\\left[\\hat{Y}(x)\\right]\\right)^2 + E\\left[\\left(E\\left[\\hat{Y}(x)\\right] - \\hat{Y}(x)\\right)^2\\right]\\]\nIt looks like we are missing some cross-product terms when squaring the right-hand side, but it turns out that all of those cross-product terms are zero. The detailed derivation is out of scope for this class, but a proof is included at the end of this note for your reference.\nThis expression may look complicated at first glance, but we’ve actually already defined each term earlier in this lecture! Let’s look at them term by term.\n\n18.4.1.1 Observation Variance\nThe first term in the above decomposition is \\(E[\\epsilon^2]\\). Remember \\(\\epsilon\\) is the random noise when observing \\(Y\\), with expectation \\(\\mathbb{E}(\\epsilon)=0\\) and variance \\(\\text{Var}(\\epsilon) = \\sigma^2\\). We can show that \\(E[\\epsilon^2]\\) is the variance of \\(\\epsilon\\): \\[\n\\begin{align*}\n\\text{Var}(\\epsilon) &= E[\\epsilon^2] + \\left(E[\\epsilon]\\right)^2\\\\\n&= E[\\epsilon^2] + 0^2\\\\\n&= \\sigma^2.\n\\end{align*}\n\\]\nThis term describes how variable the random error \\(\\epsilon\\) (and \\(Y\\)) is for each observation. This is called the observation variance. It exists due to the randomness in our observations \\(Y\\). It is a form of chance error we talked about in the Sampling lecture.\n\\[\\text{observation variance} = \\text{Var}(\\epsilon) = \\sigma^2.\\]\nThe observation variance results from measurement errors when observing data or missing information that acts like noise. To reduce this observation variance, we could try to get more precise measurements, but it is often beyond the control of data scientists. Because of this, the observation variance \\(\\sigma^2\\) is sometimes called “irreducible error.”\n\n\n18.4.1.2 Model Variance\nWe will then look at the last term: \\(E\\left[\\left(E\\left[\\hat{Y}(x)\\right] - \\hat{Y}(x)\\right)^2\\right]\\). If you recall the definition of variance from the last lecture, this is precisely \\(\\text{Var}(\\hat{Y}(x))\\). We call this the model variance.\nIt describes how much the prediction \\(\\hat{Y}(x)\\) tends to vary when we fit the model on different samples. Remember the sample we collect can come out very differently, thus the prediction \\(\\hat{Y}(x)\\) will also be different. The model variance describes this variability due to the randomness in our sampling process. Like observation variance, it is also a form of chance error—even though the sources of randomness are different.\n\\[\\text{model variance} = \\text{Var}(\\hat{Y}(x)) = E\\left[\\left(\\hat{Y}(x) - E\\left[\\hat{Y}(x)\\right]\\right)^2\\right]\\]\nThe main reason for the large model variance is because of overfitting: we paid too much attention to the details in our sample that small differences in our random sample lead to large differences in the fitted model. To remediate this, we try to reduce model complexity (e.g. take out some features and limit the magnitude of estimated model coefficients) and not fit our model on the noises.\n\n\n18.4.1.3 Model Bias\nFinally, the second term is \\(\\left(g(x)-E\\left[\\hat{Y}(x)\\right]\\right)^2\\). What is this? The term \\(E\\left[\\hat{Y}(x)\\right] - g(x)\\) is called the model bias.\nRemember that \\(g(x)\\) is the fixed underlying truth and \\(\\hat{Y}(x)\\) is our fitted model, which is random. Model bias therefore measures how far off \\(g(x)\\) and \\(\\hat{Y}(x)\\) are on average over all possible samples.\n\\[\\text{model bias} = E\\left[\\hat{Y}(x) - g(x)\\right] = E\\left[\\hat{Y}(x)\\right] - g(x)\\]\nThe model bias is not random; it’s an average measure for a specific individual \\(x\\). If bias is positive, our model tends to overestimate \\(g(x)\\); if it’s negative, our model tends to underestimate \\(g(x)\\). And if it’s 0, we can say that our model is unbiased.\n\n\n\n\n\n\nUnbiased Estimators\n\n\n\nAn unbiased model has a \\(\\text{model bias } = 0\\). In other words, our model predicts \\(g(x)\\) on average.\nSimilarly, we can define bias for estimators like the mean. The sample mean is an unbiased estimator of the population mean, as by CLT, \\(\\mathbb{E}[\\bar{X}_n] = \\mu\\). Therefore, the \\(\\text{estimator bias } = \\mathbb{E}[\\bar{X}_n] - \\mu = 0\\).\n\n\nThere are two main reasons for large model biases:\n\nUnderfitting: our model is too simple for the data\nLack of domain knowledge: we don’t understand what features are useful for the response variable\n\nTo fix this, we increase model complexity (but we don’t want to overfit!) or consult domain experts to see which models make sense. You can start to see a tradeoff here: if we increase model complexity, we decrease the model bias, but we also risk increasing the model variance.\n\n\n\n18.4.2 The Decomposition\nTo summarize:\n\nThe model risk, \\(\\mathbb{E}\\left[(Y(x)-\\hat{Y}(x))^2\\right]\\), is the mean squared prediction error of the model. It is an expectation and is therefore a fixed number (for a given x).\nThe observation variance, \\(\\sigma^2\\), is the variance of the random noise in the observations. It describes how variable the random error \\(\\epsilon\\) is for each observation and cannot be addressed by modeling.\nThe model bias, \\(\\mathbb{E}\\left[\\hat{Y}(x)\\right]-g(x)\\), is how “off” the \\(\\hat{Y}(x)\\) is as an estimator of the true underlying relationship \\(g(x)\\).\nThe model variance, \\(\\text{Var}(\\hat{Y}(x))\\), describes how much the prediction \\(\\hat{Y}(x)\\) tends to vary when we fit the model on different samples.\n\nThe above definitions enable us to simplify the decomposition of model risk before as:\n\\[ E[(Y(x) - \\hat{Y}(x))^2] = \\sigma^2 + (E[\\hat{Y}(x)] - g(x))^2 + \\text{Var}(\\hat{Y}(x)) \\] \\[\\text{model risk } = \\text{observation variance} + (\\text{model bias})^2 \\text{+ model variance}\\]\nThis is known as the bias-variance tradeoff. What does it mean? Remember that the model risk is a measure of the model’s performance. Our goal in building models is to keep model risk low; this means that we will want to ensure that each component of model risk is kept at a small value.\nObservation variance is an inherent, random part of the data collection process. We aren’t able to reduce the observation variance, so we’ll focus our attention on the model bias and model variance.\nIn the Feature Engineering lecture, we considered the issue of overfitting. We saw that the model’s error or bias tends to decrease as model complexity increases — if we design a highly complex model, it will tend to make predictions that are closer to the true relationship \\(g\\). At the same time, model variance tends to increase as model complexity increases; a complex model may overfit to the training data, meaning that small differences in the random samples used for training lead to large differences in the fitted model. We have a problem. To decrease model bias, we could increase the model’s complexity, which would lead to overfitting and an increase in model variance. Alternatively, we could decrease model variance by decreasing the model’s complexity at the cost of increased model bias due to underfitting.\n\n\n\nWe need to strike a balance. Our goal in model creation is to use a complexity level that is high enough to keep bias low, but not so high that model variance is large.", - "crumbs": [ - "18  Estimators, Bias, and Variance" - ] - }, - { - "objectID": "probability_2/probability_2.html#bonus-proof-of-bias-variance-decomposition", - "href": "probability_2/probability_2.html#bonus-proof-of-bias-variance-decomposition", - "title": "18  Estimators, Bias, and Variance", - "section": "18.5 [Bonus] Proof of Bias-Variance Decomposition", - "text": "18.5 [Bonus] Proof of Bias-Variance Decomposition\nThis section walks through the detailed derivation of the Bias-Variance Decomposition in the Bias-Variance Tradeoff section above, and this content is out of scope.\n\n\n\n\n\n\nClick to show\n\n\n\n\n\nWe want to prove that the model risk can be decomposed as\n\\[\n\\begin{align*}\nE\\left[(Y(x)-\\hat{Y}(x))^2\\right] &= E[\\epsilon^2] + \\left(g(x)-E\\left[\\hat{Y}(x)\\right]\\right)^2 + E\\left[\\left(E\\left[\\hat{Y}(x)\\right] - \\hat{Y}(x)\\right)^2\\right].\n\\end{align*}\n\\]\nTo prove this, we will first need the following lemma:\n\nIf \\(V\\) and \\(W\\) are independent random variables then \\(E[VW] = E[V]E[W]\\).\n\nWe will prove this in the discrete finite case. Trust that it’s true in greater generality.\nThe job is to calculate the weighted average of the values of \\(VW\\), where the weights are the probabilities of those values. Here goes.\n\\[\\begin{align*}\nE[VW] ~ &= ~ \\sum_v\\sum_w vwP(V=v \\text{ and } W=w) \\\\\n&= ~ \\sum_v\\sum_w vwP(V=v)P(W=w) ~~~~ \\text{by independence} \\\\\n&= ~ \\sum_v vP(V=v)\\sum_w wP(W=w) \\\\\n&= ~ E[V]E[W]\n\\end{align*}\\]\nNow we go into the actual proof:\n\n18.5.1 Goal\nDecompose the model risk into recognizable components.\n\n\n18.5.2 Step 1\n\\[\n\\begin{align*}\n\\text{model risk} ~ &= ~ E\\left[\\left(Y - \\hat{Y}(x)\\right)^2 \\right] \\\\\n&= ~ E\\left[\\left(g(x) + \\epsilon - \\hat{Y}(x)\\right)^2 \\right] \\\\\n&= ~ E\\left[\\left(\\epsilon + \\left(g(x)- \\hat{Y}(x)\\right)\\right)^2 \\right] \\\\\n&= ~ E\\left[\\epsilon^2\\right] + 2E\\left[\\epsilon \\left(g(x)- \\hat{Y}(x)\\right)\\right] + E\\left[\\left(g(x) - \\hat{Y}(x)\\right)^2\\right]\\\\\n\\end{align*}\n\\]\nOn the right hand side:\n\nThe first term is the observation variance \\(\\sigma^2\\).\nThe cross product term is 0 because \\(\\epsilon\\) is independent of \\(g(x) - \\hat{Y}(x)\\) and \\(E(\\epsilon) = 0\\)\nThe last term is the mean squared difference between our predicted value and the value of the true function at \\(x\\)\n\n\n\n18.5.3 Step 2\nAt this stage we have\n\\[\n\\text{model risk} ~ = ~ E\\left[\\epsilon^2\\right] + E\\left[\\left(g(x) - \\hat{Y}(x)\\right)^2\\right]\n\\]\nWe don’t yet have a good understanding of \\(g(x) - \\hat{Y}(x)\\). But we do understand the deviation \\(D_{\\hat{Y}(x)} = \\hat{Y}(x) - E\\left[\\hat{Y}(x)\\right]\\). We know that\n\n\\(E\\left[D_{\\hat{Y}(x)}\\right] ~ = ~ 0\\)\n\\(E\\left[D_{\\hat{Y}(x)}^2\\right] ~ = ~ \\text{model variance}\\)\n\nSo let’s add and subtract \\(E\\left[\\hat{Y}(x)\\right]\\) and see if that helps.\n\\[\ng(x) - \\hat{Y}(x) ~ = ~ \\left(g(x) - E\\left[\\hat{Y}(x)\\right] \\right) + \\left(E\\left[\\hat{Y}(x)\\right] - \\hat{Y}(x)\\right)\n\\]\nThe first term on the right hand side is the model bias at \\(x\\). The second term is \\(-D_{\\hat{Y}(x)}\\). So\n\\[\ng(x) - \\hat{Y}(x) ~ = ~ \\text{model bias} - D_{\\hat{Y}(x)}\n\\]\n\n\n18.5.4 Step 3\nRemember that the model bias at \\(x\\) is a constant, not a random variable. Think of it as your favorite number, say 10. Then \\[\n\\begin{align*}\nE\\left[ \\left(g(x) - \\hat{Y}(x)\\right)^2 \\right] ~ &= ~ \\text{model bias}^2 - 2(\\text{model bias})E\\left[D_{\\hat{Y}(x)}\\right] + E\\left[D_{\\hat{Y}(x)}^2\\right] \\\\\n&= ~ \\text{model bias}^2 - 0 + \\text{model variance} \\\\\n&= ~ \\text{model bias}^2 + \\text{model variance}\n\\end{align*}\n\\]\nAgain, the cross-product term is \\(0\\) because \\(E\\left[D_{\\hat{Y}(x)}\\right] ~ = ~ 0\\).\n\n\n18.5.5 Step 4: Bias-Variance Decomposition\nIn Step 2, we had:\n\\[\n\\text{model risk} ~ = ~ \\text{observation variance} + E\\left[\\left(g(x) - \\hat{Y}(x)\\right)^2\\right]\n\\]\nStep 3 showed:\n\\[\nE\\left[ \\left(g(x) - \\hat{Y}(x)\\right)^2 \\right] ~ = ~ \\text{model bias}^2 + \\text{model variance}\n\\]\nThus, we have proven the bias-variance decomposition:\n\\[\n\\text{model risk} = \\text{observation variance} + \\text{model bias}^2 + \\text{model variance}.\n\\]\nThat is,\n\\[\nE\\left[(Y(x)-\\hat{Y}(x))^2\\right] = \\sigma^2 + \\left(E\\left[\\hat{Y}(x)\\right] - g(x)\\right)^2 + E\\left[\\left(\\hat{Y}(x)-E\\left[\\hat{Y}(x)\\right]\\right)^2\\right]\n\\]", - "crumbs": [ - "18  Estimators, Bias, and Variance" - ] - }, - { - "objectID": "inference_causality/inference_causality.html", - "href": "inference_causality/inference_causality.html", - "title": "19  Causal Inference and Confounding", - "section": "", - "text": "19.1 Parameter Inference: Interpreting Regression Coefficients\nThere are two main reasons why we build models:\nRecall the framework we established in the last lecture. The relationship between datapoints is given by \\(Y = g(x) + \\epsilon\\), where \\(g(x)\\) is the true underlying relationship, and \\(\\epsilon\\) represents randomness. If we assume \\(g(x)\\) is linear, we can express this relationship in terms of the unknown, true model parameters \\(\\theta\\).\n\\[f_{\\theta}(x) = g(x) + \\epsilon = \\theta_0 + \\theta_1 x_1 + \\ldots + \\theta_p x_p + \\epsilon\\]\nOur model attempts to estimate each true population parameter \\(\\theta_i\\) using the sample estimates \\(\\hat{\\theta}_i\\) calculated from the design matrix \\(\\Bbb{X}\\) and response vector \\(\\Bbb{Y}\\).\n\\[f_{\\hat{\\theta}}(x) = \\hat{\\theta}_0 + \\hat{\\theta}_1 x_1 + \\ldots + \\hat{\\theta}_p x_p\\]\nLet’s pause for a moment. At this point, we’re very used to working with the idea of a model parameter. But what exactly does each coefficient \\(\\theta_i\\) actually mean? We can think of each \\(\\theta_i\\) as a slope of the linear model. If all other variables are held constant, a unit change in \\(x_i\\) will result in a \\(\\theta_i\\) change in \\(f_{\\theta}(x)\\). Broadly speaking, a large value of \\(\\theta_i\\) means that the feature \\(x_i\\) has a large effect on the response; conversely, a small value of \\(\\theta_i\\) means that \\(x_i\\) has little effect on the response. In the extreme case, if the true parameter \\(\\theta_i\\) is 0, then the feature \\(x_i\\) has no effect on \\(Y(x)\\).\nIf the true parameter \\(\\theta_i\\) for a particular feature is 0, this tells us something pretty significant about the world: there is no underlying relationship between \\(x_i\\) and \\(Y(x)\\)! But how can we test if a parameter is actually 0? As a baseline, we go through our usual process of drawing a sample, using this data to fit a model, and computing an estimate \\(\\hat{\\theta}_i\\). However, we also need to consider that if our random sample comes out differently, we may find a different result for \\(\\hat{\\theta}_i\\). To infer if the true parameter \\(\\theta_i\\) is 0, we want to draw our conclusion from the distribution of \\(\\hat{\\theta}_i\\) estimates we could have drawn across all other random samples. This is where hypothesis testing comes in handy!\nTo test if the true parameter \\(\\theta_i\\) is 0, we construct a hypothesis test where our null hypothesis states that the true parameter \\(\\theta_i\\) is 0, and the alternative hypothesis states that the true parameter \\(\\theta_i\\) is not 0. If our p-value is smaller than our cutoff value (usually p = 0.05), we reject the null hypothesis in favor of the alternative hypothesis.", - "crumbs": [ - "19  Causal Inference and Confounding" - ] - }, - { - "objectID": "inference_causality/inference_causality.html#parameter-inference-interpreting-regression-coefficients", - "href": "inference_causality/inference_causality.html#parameter-inference-interpreting-regression-coefficients", - "title": "19  Causal Inference and Confounding", - "section": "", - "text": "Prediction: using our model to make accurate predictions about unseen data\nInference: using our model to draw conclusions about the underlying relationship(s) between our features and response. We want to understand the complex phenomena occurring in the world we live in. While training is the process of fitting a model, inference is the process of making predictions.", - "crumbs": [ - "19  Causal Inference and Confounding" - ] - }, - { - "objectID": "inference_causality/inference_causality.html#review-bootstrap-resampling", - "href": "inference_causality/inference_causality.html#review-bootstrap-resampling", - "title": "19  Causal Inference and Confounding", - "section": "19.2 Review: Bootstrap Resampling", - "text": "19.2 Review: Bootstrap Resampling\nTo determine the properties (e.g., variance) of the sampling distribution of an estimator, we’d need access to the population. Ideally, we’d want to consider all possible samples in the population, compute an estimate for each sample, and study the distribution of those estimates.\n\n\n\nHowever, this can be quite expensive and time-consuming. Even more importantly, we don’t have access to the population —— we only have one random sample from the population. How can we consider all possible samples if we only have one?\nBootstrapping comes in handy here! With bootstrapping, we treat our random sample as a “population” and resample from it with replacement. Intuitively, a random sample resembles the population (if it is big enough), so a random resample also resembles a random sample of the population. When sampling, there are a couple things to keep in mind:\n\nWe need to sample the same way we constructed the original sample. Typically, this involves taking a simple random sample with replacement.\nNew samples must be the same size as the original sample. We need to accurately model the variability of our estimates.\n\n\n\n\n\n\n\nWhy must we resample with replacement?\n\n\n\n\n\nGiven an original sample of size \\(n\\), we want a resample that has the same size \\(n\\) as the original. Sampling without replacement will give us the original sample with shuffled rows. Hence, when we calculate summary statistics like the average, our sample without replacement will always have the same average as the original sample, defeating the purpose of a bootstrap.\n\n\n\n\n\n\nBootstrap resampling is a technique for estimating the sampling distribution of an estimator. To execute it, we can follow the pseudocode below:\ncollect a random sample of size n (called the bootstrap population)\n\ninitiate a list of estimates\n\nrepeat 10,000 times:\n resample with replacement from the bootstrap population\n apply estimator f to the resample\n store in list\n\nlist of estimates is the bootstrapped sampling distribution of f\nHow well does bootstrapping actually represent our population? The bootstrapped sampling distribution of an estimator does not exactly match the sampling distribution of that estimator, but it is often close. Similarly, the variance of the bootstrapped distribution is often close to the true variance of the estimator. The example below displays the results of different bootstraps from a known population using a sample size of \\(n=50\\).\n\n\n\nIn the real world, we don’t know the population distribution. The center of the bootstrapped distribution is the estimator applied to our original sample, so we have no way of understanding the estimator’s true expected value; the center and spread of our bootstrap are approximations. The quality of our bootstrapped distribution also depends on the quality of our original sample. If our original sample was not representative of the population (like Sample 5 in the image above), then the bootstrap is next to useless. In general, bootstrapping works better for large samples, when the population distribution is not heavily skewed (no outliers), and when the estimator is “low variance” (insensitive to extreme values).\n\nAlthough our bootstrapped sample distribution does not exactly match the sampling distribution of the population, we can see that it is relatively close. This demonstrates the benefit of bootstrapping —— without knowing the actual population distribution, we can still roughly approximate the true slope for the model by using only a single random sample of 20 cars.", - "crumbs": [ - "19  Causal Inference and Confounding" - ] - }, - { - "objectID": "inference_causality/inference_causality.html#collinearity", - "href": "inference_causality/inference_causality.html#collinearity", - "title": "19  Causal Inference and Confounding", - "section": "19.3 Collinearity", - "text": "19.3 Collinearity\n\n19.3.1 Hypothesis Testing Through Bootstrap: Snowy Plover Demo\nWe can conduct the hypothesis testing described earlier through bootstrapping (this equivalence can be proven through the duality argument, which is out of scope for this class). We use bootstrapping to compute approximate 95% confidence intervals for each \\(\\theta_i\\). If the interval doesn’t contain 0, we reject the null hypothesis at the p=5% level. Otherwise, the data is consistent with the null, as the true parameter could possibly be 0.\nTo show an example of this hypothesis testing process, we’ll work with the snowy plover dataset throughout this section. The data are about the eggs and newly hatched chicks of the Snowy Plover. The data were collected at the Point Reyes National Seashore by a former student at Berkeley. Here’s a parent bird and some eggs.\n\n\n\nNote that Egg Length and Egg Breadth (widest diameter) are measured in millimeters, and Egg Weight and Bird Weight are measured in grams. For reference, a standard paper clip weighs about one gram.\n\n\nCode\nimport pandas as pd\neggs = pd.read_csv(\"data/snowy_plover.csv\")\neggs.head(5)\n\n\n\n\n\n\n\n\n\negg_weight\negg_length\negg_breadth\nbird_weight\n\n\n\n\n0\n7.4\n28.80\n21.84\n5.2\n\n\n1\n7.7\n29.04\n22.45\n5.4\n\n\n2\n7.9\n29.36\n22.48\n5.6\n\n\n3\n7.5\n30.10\n21.71\n5.3\n\n\n4\n8.3\n30.17\n22.75\n5.9\n\n\n\n\n\n\n\nOur goal will be to predict the weight of a newborn plover chick, which we assume follows the true relationship \\(Y = f_{\\theta}(x)\\) below.\n\\[\\text{bird\\_weight} = \\theta_0 + \\theta_1 \\text{egg\\_weight} + \\theta_2 \\text{egg\\_length} + \\theta_3 \\text{egg\\_breadth} + \\epsilon\\]\nNote that for each \\(i\\), the parameter \\(\\theta_i\\) is a fixed number, but it is unobservable. We can only estimate it. The random error \\(\\epsilon\\) is also unobservable, but it is assumed to have expectation 0 and be independent and identically distributed across eggs.\nSay we wish to determine if the egg_weight impacts the bird_weight of a chick – we want to infer if \\(\\theta_1\\) is equal to 0.\nFirst, we define our hypotheses:\n\nNull hypothesis: the true parameter \\(\\theta_1\\) is 0; any variation is due to random chance.\nAlternative hypothesis: the true parameter \\(\\theta_1\\) is not 0.\n\nNext, we use our data to fit a model \\(\\hat{Y} = f_{\\hat{\\theta}}(x)\\) that approximates the relationship above. This gives us the observed value of \\(\\hat{\\theta}_1\\) from our data.\n\nfrom sklearn.linear_model import LinearRegression\nimport numpy as np\n\nX = eggs[[\"egg_weight\", \"egg_length\", \"egg_breadth\"]]\nY = eggs[\"bird_weight\"]\n\nmodel = LinearRegression()\nmodel.fit(X, Y)\n\n# This gives an array containing the fitted model parameter estimates\nthetas = model.coef_\n\n# Put the parameter estimates in a nice table for viewing\ndisplay(pd.DataFrame(\n [model.intercept_] + list(model.coef_),\n columns=['theta_hat'],\n index=['intercept', 'egg_weight', 'egg_length', 'egg_breadth']\n))\n\nprint(\"RMSE\", np.mean((Y - model.predict(X)) ** 2))\n\n\n\n\n\n\n\n\ntheta_hat\n\n\n\n\nintercept\n-4.605670\n\n\negg_weight\n0.431229\n\n\negg_length\n0.066570\n\n\negg_breadth\n0.215914\n\n\n\n\n\n\n\nRMSE 0.04547085380275766\n\n\nOur single sample of data gives us the value of \\(\\hat{\\theta}_1=0.431\\). To get a sense of how this estimate might vary if we were to draw different random samples, we will use bootstrapping. As a refresher, to construct a bootstrap sample, we will draw a resample from the collected data that:\n\nHas the same sample size as the collected data\nIs drawn with replacement (this ensures that we don’t draw the exact same sample every time!)\n\nWe draw a bootstrap sample, use this sample to fit a model, and record the result for \\(\\hat{\\theta}_1\\) on this bootstrapped sample. We then repeat this process many times to generate a bootstrapped empirical distribution of \\(\\hat{\\theta}_1\\). This gives us an estimate of what the true distribution of \\(\\hat{\\theta}_1\\) across all possible samples might look like.\n\n# Set a random seed so you generate the same random sample as staff\n# In the \"real world\", we wouldn't do this\nimport numpy as np\nnp.random.seed(1337)\n\n# Set the sample size of each bootstrap sample\nn = len(eggs)\n\n# Create a list to store all the bootstrapped estimates\nestimates = []\n\n# Generate a bootstrap resample from `eggs` and find an estimate for theta_1 using this sample. \n# Repeat 10000 times.\nfor i in range(10000):\n # draw a bootstrap sample\n bootstrap_resample = eggs.sample(n, replace=True)\n X_bootstrap = bootstrap_resample[[\"egg_weight\", \"egg_length\", \"egg_breadth\"]]\n Y_bootstrap = bootstrap_resample[\"bird_weight\"]\n \n # use bootstrapped sample to fit a model\n bootstrap_model = LinearRegression()\n bootstrap_model.fit(X_bootstrap, Y_bootstrap)\n bootstrap_thetas = bootstrap_model.coef_\n \n # record the result for theta_1\n estimates.append(bootstrap_thetas[0])\n \n# calculate the 95% confidence interval \nlower = np.percentile(estimates, 2.5, axis=0)\nupper = np.percentile(estimates, 97.5, axis=0)\nconf_interval = (lower, upper)\nconf_interval\n\n(-0.25864811956848754, 1.1034243854204049)\n\n\nOur bootstrapped 95% confidence interval for \\(\\theta_1\\) is \\([-0.259, 1.103]\\). Immediately, we can see that 0 is indeed contained in this interval – this means that we cannot conclude that \\(\\theta_1\\) is non-zero! More formally, we fail to reject the null hypothesis (that \\(\\theta_1\\) is 0) under a 5% p-value cutoff.\nWe can repeat this process to construct 95% confidence intervals for the other parameters of the model.\n\n\nCode\nnp.random.seed(1337)\n\ntheta_0_estimates = []\ntheta_1_estimates = []\ntheta_2_estimates = []\ntheta_3_estimates = []\n\n\nfor i in range(10000):\n bootstrap_resample = eggs.sample(n, replace=True)\n X_bootstrap = bootstrap_resample[[\"egg_weight\", \"egg_length\", \"egg_breadth\"]]\n Y_bootstrap = bootstrap_resample[\"bird_weight\"]\n \n bootstrap_model = LinearRegression()\n bootstrap_model.fit(X_bootstrap, Y_bootstrap)\n bootstrap_theta_0 = bootstrap_model.intercept_\n bootstrap_theta_1, bootstrap_theta_2, bootstrap_theta_3 = bootstrap_model.coef_\n \n theta_0_estimates.append(bootstrap_theta_0)\n theta_1_estimates.append(bootstrap_theta_1)\n theta_2_estimates.append(bootstrap_theta_2)\n theta_3_estimates.append(bootstrap_theta_3)\n \ntheta_0_lower, theta_0_upper = np.percentile(theta_0_estimates, 2.5), np.percentile(theta_0_estimates, 97.5)\ntheta_1_lower, theta_1_upper = np.percentile(theta_1_estimates, 2.5), np.percentile(theta_1_estimates, 97.5)\ntheta_2_lower, theta_2_upper = np.percentile(theta_2_estimates, 2.5), np.percentile(theta_2_estimates, 97.5)\ntheta_3_lower, theta_3_upper = np.percentile(theta_3_estimates, 2.5), np.percentile(theta_3_estimates, 97.5)\n\n# Make a nice table to view results\npd.DataFrame({\"lower\":[theta_0_lower, theta_1_lower, theta_2_lower, theta_3_lower], \"upper\":[theta_0_upper, \\\n theta_1_upper, theta_2_upper, theta_3_upper]}, index=[\"theta_0\", \"theta_1\", \"theta_2\", \"theta_3\"])\n\n\n\n\n\n\n\n\n\nlower\nupper\n\n\n\n\ntheta_0\n-15.278542\n5.161473\n\n\ntheta_1\n-0.258648\n1.103424\n\n\ntheta_2\n-0.099138\n0.208557\n\n\ntheta_3\n-0.257141\n0.758155\n\n\n\n\n\n\n\nSomething’s off here. Notice that 0 is included in the 95% confidence interval for every parameter of the model. Using the interpretation we outlined above, this would suggest that we can’t say for certain that any of the input variables impact the response variable! This makes it seem like our model can’t make any predictions – and yet, each model we fit in our bootstrap experiment above could very much make predictions of \\(Y\\).\nHow can we explain this result? Think back to how we first interpreted the parameters of a linear model. We treated each \\(\\theta_i\\) as a slope, where a unit increase in \\(x_i\\) leads to a \\(\\theta_i\\) increase in \\(Y\\), if all other variables are held constant. It turns out that this last assumption is very important. If variables in our model are somehow related to one another, then it might not be possible to have a change in one of them while holding the others constant. This means that our interpretation framework is no longer valid! In the models we fit above, we incorporated egg_length, egg_breadth, and egg_weight as input variables. These variables are very likely related to one another – an egg with large egg_length and egg_breadth will likely be heavy in egg_weight. This means that the model parameters cannot be meaningfully interpreted as slopes.\nTo support this conclusion, we can visualize the relationships between our feature variables. Notice the strong positive association between the features.\n\n\nCode\nimport seaborn as sns\nsns.pairplot(eggs[[\"egg_length\", \"egg_breadth\", \"egg_weight\", 'bird_weight']]);\n\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\nThis issue is known as collinearity, sometimes also called multicollinearity. Collinearity occurs when one feature can be predicted fairly accurately by a linear combination of the other features, which happens when one feature is highly correlated with the others.\nWhy is collinearity a problem? Its consequences span several aspects of the modeling process:\n\nInference: Slopes can’t be interpreted for an inference task.\nModel Variance: If features strongly influence one another, even small changes in the sampled data can lead to large changes in the estimated slopes.\nUnique Solution: If one feature is a linear combination of the other features, the design matrix will not be full rank, and \\(\\mathbb{X}^{\\top}\\mathbb{X}\\) is not invertible. This means that least squares does not have a unique solution. See this section of Course Note 12 for more on this.\n\nThe take-home point is that we need to be careful with what features we select for modeling. If two features likely encode similar information, it is often a good idea to choose only one of them as an input variable.\n\n\n19.3.2 A Simpler Model\nLet us now consider a more interpretable model: we instead assume a true relationship using only egg weight:\n\\[f_\\theta(x) = \\theta_0 + \\theta_1 \\text{egg\\_weight} + \\epsilon\\]\n\n\nCode\nfrom sklearn.linear_model import LinearRegression\nX_int = eggs[[\"egg_weight\"]]\nY_int = eggs[\"bird_weight\"]\n\nmodel_int = LinearRegression()\n\nmodel_int.fit(X_int, Y_int)\n\n# This gives an array containing the fitted model parameter estimates\nthetas_int = model_int.coef_\n\n# Put the parameter estimates in a nice table for viewing\npd.DataFrame({\"theta_hat\":[model_int.intercept_, thetas_int[0]]}, index=[\"theta_0\", \"theta_1\"])\n\n\n\n\n\n\n\n\n\ntheta_hat\n\n\n\n\ntheta_0\n-0.058272\n\n\ntheta_1\n0.718515\n\n\n\n\n\n\n\n\n\nCode\nimport matplotlib.pyplot as plt\n\n# Set a random seed so you generate the same random sample as staff\n# In the \"real world\", we wouldn't do this\nnp.random.seed(1337)\n\n# Set the sample size of each bootstrap sample\nn = len(eggs)\n\n# Create a list to store all the bootstrapped estimates\nestimates_int = []\n\n# Generate a bootstrap resample from `eggs` and find an estimate for theta_1 using this sample. \n# Repeat 10000 times.\nfor i in range(10000):\n bootstrap_resample_int = eggs.sample(n, replace=True)\n X_bootstrap_int = bootstrap_resample_int[[\"egg_weight\"]]\n Y_bootstrap_int = bootstrap_resample_int[\"bird_weight\"]\n \n bootstrap_model_int = LinearRegression()\n bootstrap_model_int.fit(X_bootstrap_int, Y_bootstrap_int)\n bootstrap_thetas_int = bootstrap_model_int.coef_\n \n estimates_int.append(bootstrap_thetas_int[0])\n\nplt.figure(dpi=120)\nsns.histplot(estimates_int, stat=\"density\")\nplt.xlabel(r\"$\\hat{\\theta}_1$\")\nplt.title(r\"Bootstrapped estimates $\\hat{\\theta}_1$ Under the Interpretable Model\");\n\n\n/Users/xiaoruiliu/anaconda3/lib/python3.11/site-packages/seaborn/_oldcore.py:1119: FutureWarning:\n\nuse_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n\n\n\n\n\n\n\n\n\n\nNotice how the interpretable model performs almost as well as our other model:\n\n\nCode\nfrom sklearn.metrics import mean_squared_error\n\nrmse = mean_squared_error(Y, model.predict(X))\nrmse_int = mean_squared_error(Y_int, model_int.predict(X_int))\nprint(f'RMSE of Original Model: {rmse}')\nprint(f'RMSE of Interpretable Model: {rmse_int}')\n\n\nRMSE of Original Model: 0.04547085380275766\nRMSE of Interpretable Model: 0.046493941375556846\n\n\nYet, the confidence interval for the true parameter \\(\\theta_{1}\\) does not contain zero.\n\n\nCode\nlower_int = np.percentile(estimates_int, 2.5)\nupper_int = np.percentile(estimates_int, 97.5)\n\nconf_interval_int = (lower_int, upper_int)\nconf_interval_int\n\n\n(0.6029335250209633, 0.8208401738546206)\n\n\nIn retrospect, it’s no surprise that the weight of an egg best predicts the weight of a newly-hatched chick.\nA model with highly correlated variables prevents us from interpreting how the variables are related to the prediction.\n\n\n19.3.3 Reminder: Assumptions Matter\nKeep the following in mind: All inference assumes that the regression model holds.\n\nIf the model doesn’t hold, the inference might not be valid.\nIf the assumptions of the bootstrap don’t hold…\n\nSample size n is large\nSample is representative of population distribution (drawn i.i.d., unbiased)\n\n…then the results of the bootstrap might not be valid.", - "crumbs": [ - "19  Causal Inference and Confounding" - ] - }, - { - "objectID": "inference_causality/inference_causality.html#bonus-content", - "href": "inference_causality/inference_causality.html#bonus-content", - "title": "19  Causal Inference and Confounding", - "section": "19.4 [Bonus Content]", - "text": "19.4 [Bonus Content]\nNote: the content in this section is out of scope.\n\n\n19.4.1 Prediction vs Causation\nThe difference between correlation/prediction vs. causation is best illustrated through examples.\nSome questions about correlation / prediction include:\n\nAre homes with granite countertops worth more money?\nIs college GPA higher for students who win a certain scholarship?\nAre breastfed babies less likely to develop asthma?\nDo cancer patients given some aggressive treatment have a higher 5-year survival rate?\nAre people who smoke more likely to get cancer?\n\nWhile these may sound like causal questions, they are not! Questions about causality are about the effects of interventions (not just passive observation). For example:\n\nHow much do granite countertops raise the value of a house?\nDoes getting the scholarship improve students’ GPAs?\nDoes breastfeeding protect babies against asthma?\nDoes the treatment improve cancer survival?\nDoes smoking cause cancer?\n\nNote, however, that regression coefficients are sometimes called “effects”, which can be deceptive!\nWhen using data alone, predictive questions (i.e., are breastfed babies healthier?) can be answered, but causal questions (i.e., does breastfeeding improve babies’ health?) cannot. The reason for this is that there are many possible causes for our predictive question. For example, possible explanations for why breastfed babies are healthier on average include:\n\nCausal effect: breastfeeding makes babies healthier\nReverse causality: healthier babies more likely to successfully breastfeed\nCommon cause: healthier / richer parents have healthier babies and are more likely to breastfeed\n\nWe cannot tell which explanations are true (or to what extent) just by observing (\\(x\\),\\(y\\)) pairs. Additionally, causal questions implicitly involve counterfactuals, events that didn’t happen. For example, we could ask, would the same breastfed babies have been less healthy if they hadn’t been breastfed? Explanation 1 from above implies they would be, but explanations 2 and 3 do not.\n\n\n19.4.2 Confounders\nLet T represent a treatment (for example, alcohol use) and Y represent an outcome (for example, lung cancer).\n\nA confounder is a variable that affects both T and Y, distorting the correlation between them. Using the example above, rich parents could be a confounder for breastfeeding and a baby’s health. Confounders can be a measured covariate (a feature) or an unmeasured variable we don’t know about, and they generally cause problems, as the relationship between T and Y is affected by data we cannot see. We commonly assume that all confounders are observed (this is also called ignorability).\n\n\n19.4.3 How to perform causal inference?\nIn a randomized experiment, participants are randomly assigned into two groups: treatment and control. A treatment is applied only to the treatment group. We assume ignorability and gather as many measurements as possible so that we can compare them between the control and treatment groups to determine whether or not the treatment has a true effect or is just a confounding factor.\n\nHowever, often, randomly assigning treatments is impractical or unethical. For example, assigning a treatment of cigarettes to test the effect of smoking on the lungs would not only be impractical but also unethical.\nAn alternative to bypass this issue is to utilize observational studies. This can be done by obtaining two participant groups separated based on some identified treatment variable. Unlike randomized experiments, however, we cannot assume ignorability here: the participants could have separated into two groups based on other covariates! In addition, there could also be unmeasured confounders.", - "crumbs": [ - "19  Causal Inference and Confounding" - ] - }, - { - "objectID": "logistic_regression_1/logistic_reg_1.html", - "href": "logistic_regression_1/logistic_reg_1.html", - "title": "22  Logistic Regression I", - "section": "", - "text": "22.1 Classification\nIn the next two lectures, we’ll tackle the task of classification. A classification problem aims to classify data into categories. Unlike in regression, where we predicted a numeric output, classification involves predicting some categorical variable, or response, \\(y\\). Examples of classification tasks include:\nThere are a couple of different types of classification:\nWe can further combine multiple related classfication predictions (e.g., translation, voice recognition, etc.) to tackle complex problems through structured prediction tasks.\nIn Data 100, we will mostly deal with binary classification, where we are attempting to classify data into one of two classes.", - "crumbs": [ - "22  Logistic Regression I" - ] - }, - { - "objectID": "logistic_regression_1/logistic_reg_1.html#classification", - "href": "logistic_regression_1/logistic_reg_1.html#classification", - "title": "22  Logistic Regression I", - "section": "", - "text": "Predicting which team won from its turnover percentage\nPredicting the day of the week of a meal from the total restaurant bill\nPredicting the model of car from its horsepower\n\n\n\nBinary classification: classify data into two classes, and responses \\(y\\) are either 0 or 1\nMulticlass classification: classify data into multiple classes (e.g., image labeling, next word in a sentence, etc.)\n\n\n\n\n22.1.1 Modeling Process\nTo build a classification model, we need to modify our modeling workflow slightly. Recall that in regression we:\n\nCreated a design matrix of numeric features\nDefined our model as a linear combination of these numeric features\nUsed the model to output numeric predictions\n\nIn classification, however, we no longer want to output numeric predictions; instead, we want to predict the class to which a datapoint belongs. This means that we need to update our workflow. To build a classification model, we will:\n\nCreate a design matrix of numeric features.\nDefine our model as a linear combination of these numeric features, transformed by a non-linear sigmoid function. This outputs a numeric quantity.\nApply a decision rule to interpret the outputted quantity and decide a classification.\nOutput a predicted class.\n\nThere are two key differences: as we’ll soon see, we need to incorporate a non-linear transformation to capture the non-linear relationships hidden in our data. We do so by applying the sigmoid function to a linear combination of the features. Secondly, we must apply a decision rule to convert the numeric quantities computed by our model into an actual class prediction. This can be as simple as saying that any datapoint with a feature greater than some number \\(x\\) belongs to Class 1.\nRegression:\n\n\n\nClassification:\n\n\n\nThis was a very high-level overview. Let’s walk through the process in detail to clarify what we mean.", - "crumbs": [ - "22  Logistic Regression I" - ] - }, - { - "objectID": "logistic_regression_1/logistic_reg_1.html#deriving-the-logistic-regression-model", - "href": "logistic_regression_1/logistic_reg_1.html#deriving-the-logistic-regression-model", - "title": "22  Logistic Regression I", - "section": "22.2 Deriving the Logistic Regression Model", - "text": "22.2 Deriving the Logistic Regression Model\nThroughout this lecture, we will work with the games dataset, which contains information about games played in the NBA basketball league. Our goal will be to use a basketball team’s \"GOAL_DIFF\" to predict whether or not a given team won their game (\"WON\"). If a team wins their game, we’ll say they belong to Class 1. If they lose, they belong to Class 0.\nFor those who are curious, \"GOAL_DIFF\" represents the difference in successful field goal percentages between the two competing teams.\n\n\nCode\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport pandas as pd\nimport numpy as np\nnp.seterr(divide='ignore')\n\ngames = pd.read_csv(\"data/games\").dropna()\ngames.head()\n\n\n\n\n\n\n\n\n\nGAME_ID\nTEAM_NAME\nMATCHUP\nWON\nGOAL_DIFF\nAST\n\n\n\n\n0\n21701216\nDallas Mavericks\nDAL vs. PHX\n0\n-0.251\n20\n\n\n1\n21700846\nPhoenix Suns\nPHX @ GSW\n0\n-0.237\n13\n\n\n2\n21700071\nSan Antonio Spurs\nSAS @ ORL\n0\n-0.234\n19\n\n\n3\n21700221\nNew York Knicks\nNYK @ TOR\n0\n-0.234\n17\n\n\n4\n21700306\nMiami Heat\nMIA @ NYK\n0\n-0.222\n21\n\n\n\n\n\n\n\nLet’s visualize the relationship between \"GOAL_DIFF\" and \"WON\" using the Seaborn function sns.stripplot. A strip plot automatically introduces a small amount of random noise to jitter the data. Recall that all values in the \"WON\" column are either 1 (won) or 0 (lost) – if we were to directly plot them without jittering, we would see severe overplotting.\n\n\nCode\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.stripplot(data=games, x=\"GOAL_DIFF\", y=\"WON\", orient=\"h\", hue='WON', alpha=0.7)\n# By default, sns.stripplot plots 0, then 1. We invert the y axis to reverse this behavior\nplt.gca().invert_yaxis();\n\n\n\n\n\n\n\n\n\nThis dataset is unlike anything we’ve seen before – our target variable contains only two unique values! (Remember that each y value is either 0 or 1; the plot above jitters the y data slightly for ease of reading.)\nThe regression models we have worked with always assumed that we were attempting to predict a continuous target. If we apply a linear regression model to this dataset, something strange happens.\n\n\nCode\nimport sklearn.linear_model as lm\n\nX, Y = games[[\"GOAL_DIFF\"]], games[\"WON\"]\nregression_model = lm.LinearRegression()\nregression_model.fit(X, Y)\n\nplt.plot(X.squeeze(), regression_model.predict(X), \"k\")\nsns.stripplot(data=games, x=\"GOAL_DIFF\", y=\"WON\", orient=\"h\", hue='WON', alpha=0.7)\nplt.gca().invert_yaxis();\n\n\n\n\n\n\n\n\n\nThe linear regression fit follows the data as closely as it can. However, this approach has a key flaw - the predicted output, \\(\\hat{y}\\), can be outside the range of possible classes (there are predictions above 1 and below 0). This means that the output can’t always be interpreted (what does it mean to predict a class of -2.3?).\nOur usual linear regression framework won’t work here. Instead, we’ll need to get more creative.\n\n22.2.1 Graph of Averages\nBack in Data 8, you gradually built up to the concept of linear regression by using the graph of averages. Before you knew the mathematical underpinnings of the regression line, you took a more intuitive approach: you bucketed the \\(x\\) data into bins of common values, then computed the average \\(y\\) for all datapoints in the same bin. The result gave you the insight needed to derive the regression fit.\nLet’s take the same approach as we grapple with our new classification task. In the cell below, we 1) bucket the \"GOAL_DIFF\" data into bins of similar values and 2) compute the average \"WON\" value of all datapoints in a bin.\n\n# bucket the GOAL_DIFF data into 20 bins\nbins = pd.cut(games[\"GOAL_DIFF\"], 20)\ngames[\"bin\"] = [(b.left + b.right) / 2 for b in bins]\nwin_rates_by_bin = games.groupby(\"bin\")[\"WON\"].mean()\n\n# plot the graph of averages\nsns.stripplot(data=games, x=\"GOAL_DIFF\", y=\"WON\", orient=\"h\", alpha=0.5, hue='WON') # alpha makes the points transparent\nplt.plot(win_rates_by_bin.index, win_rates_by_bin, c=\"tab:red\")\nplt.gca().invert_yaxis();\n\n\n\n\n\n\n\n\nInteresting: our result is certainly not like the straight line produced by finding the graph of averages for a linear relationship. We can make two observations:\n\nAll predictions on our line are between 0 and 1\nThe predictions are non-linear, following a rough “S” shape\n\nLet’s think more about what we’ve just done.\nTo find the average \\(y\\) value for each bin, we computed:\n\\[\\frac{1 \\text{(\\# Y = 1 in bin)} + 0 \\text{(\\# Y = 0 in bin)}}{\\text{\\# datapoints in bin}} = \\frac{\\text{\\# Y = 1 in bin}}{\\text{\\# datapoints in bin}} = P(\\text{Y = 1} | \\text{bin})\\]\nThis is simply the probability of a datapoint in that bin belonging to Class 1! This aligns with our observation from earlier: all of our predictions lie between 0 and 1, just as we would expect for a probability.\nOur graph of averages was really modeling the probability, \\(p\\), that a datapoint belongs to Class 1, or essentially that \\(\\text{Y = 1}\\) for a particular value of \\(\\text{x}\\).\n\\[ p = P(Y = 1 | \\text{ x} )\\]\nIn logistic regression, we have a new modeling goal. We want to model the probability that a particular datapoint belongs to Class 1 by approximating the S-shaped curve we plotted above. However, we’ve only learned about linear modeling techniques like Linear Regression and OLS.\n\n\n22.2.2 Handling Non-Linear Output\nFortunately for us, we’re already well-versed with a technique to model non-linear relationships – we can apply non-linear transformations like log or exponents to make a non-linear relationship more linear. Recall the steps we’ve applied previously:\n\nTransform the variables until we linearize their relationship\nFit a linear model to the transformed variables\n“Undo” our transformations to identify the underlying relationship between the original variables\n\nIn past examples, we used the bulge diagram to help us decide what transformations may be useful. The S-shaped curve we saw above, however, looks nothing like any relationship we’ve seen in the past. We’ll need to think carefully about what transformations will linearize this curve.\n\n22.2.2.1 1. Odds\nLet’s consider our eventual goal: determining if we should predict a Class of 0 or 1 for each datapoint. Rephrased, we want to decide if it seems more “likely” that the datapoint belongs to Class 0 or to Class 1. One way of deciding this is to see which class has the higher predicted probability for a given datapoint. The odds is defined as the probability of a datapoint belonging to Class 1 divided by the probability of it belonging to Class 0.\n\\[\\text{odds} = \\frac{P(Y=1|x)}{P(Y=0|x)} = \\frac{p}{1-p}\\]\nIf we plot the odds for each input \"GOAL_DIFF\" (\\(x\\)), we see something that looks more promising.\n\n\nCode\np = win_rates_by_bin\nodds = p/(1-p) \n\nplt.plot(odds.index, odds)\nplt.xlabel(\"x\")\nplt.ylabel(r\"Odds $= \\frac{p}{1-p}$\");\n\n\n\n\n\n\n\n\n\n\n\n22.2.2.2 2. Log\nIt turns out that the relationship between our input \"GOAL_DIFF\" and the odds is roughly exponential! Let’s linearize the exponential by taking the logarithm (as suggested by the Tukey-Mosteller Bulge Diagram). As a reminder, you should assume that any logarithm in Data 100 is the base \\(e\\) natural logarithm unless told otherwise.\n\n\nCode\nimport numpy as np\nlog_odds = np.log(odds)\nplt.plot(odds.index, log_odds, c=\"tab:green\")\nplt.xlabel(\"x\")\nplt.ylabel(r\"Log-Odds $= \\log{\\frac{p}{1-p}}$\");\n\n\n\n\n\n\n\n\n\n\n\n22.2.2.3 3. Putting it Together\nWe see something promising – the relationship between the log-odds and our input feature is approximately linear. This means that we can use a linear model to describe the relationship between the log-odds and \\(x\\). In other words:\n\\[\\begin{align}\n\\log{(\\frac{p}{1-p})} &= \\theta_0 + \\theta_1 x_1 + ... + \\theta_p x_p\\\\\n&= x^{\\top} \\theta\n\\end{align}\\]\nHere, we use \\(x^{\\top}\\) to represent an observation in our dataset, stored as a row vector. You can imagine it as a single row in our design matrix. \\(x^{\\top} \\theta\\) indicates a linear combination of the features for this observation (just as we used in multiple linear regression).\nWe’re in good shape! We have now derived the following relationship:\n\\[\\log{(\\frac{p}{1-p})} = x^{\\top} \\theta\\]\nRemember that our goal is to predict the probability of a datapoint belonging to Class 1, \\(p\\). Let’s rearrange this relationship to uncover the original relationship between \\(p\\) and our input data, \\(x^{\\top}\\).\n\\[\\begin{align}\n\\log{(\\frac{p}{1-p})} &= x^T \\theta\\\\\n\\frac{p}{1-p} &= e^{x^T \\theta}\\\\\np &= (1-p)e^{x^T \\theta}\\\\\np &= e^{x^T \\theta}- p e^{x^T \\theta}\\\\\np(1 + e^{x^T \\theta}) &= e^{x^T \\theta} \\\\\np &= \\frac{e^{x^T \\theta}}{1+e^{x^T \\theta}}\\\\\np &= \\frac{1}{1+e^{-x^T \\theta}}\\\\\n\\end{align}\\]\nPhew, that was a lot of algebra. What we’ve uncovered is the logistic regression model to predict the probability of a datapoint \\(x^{\\top}\\) belonging to Class 1. If we plot this relationship for our data, we see the S-shaped curve from earlier!\n\n\nCode\n# We'll discuss the `LogisticRegression` class next time\nxs = np.linspace(-0.3, 0.3)\n\nlogistic_model = lm.LogisticRegression(C=20)\nlogistic_model.fit(X, Y)\npredicted_prob = logistic_model.predict_proba(xs[:, np.newaxis])[:, 1]\n\nsns.stripplot(data=games, x=\"GOAL_DIFF\", y=\"WON\", orient=\"h\", alpha=0.5)\nplt.plot(xs, predicted_prob, c=\"k\", lw=3, label=\"Logistic regression model\")\nplt.plot(win_rates_by_bin.index, win_rates_by_bin, lw=2, c=\"tab:red\", label=\"Graph of averages\")\nplt.legend(loc=\"upper left\")\nplt.gca().invert_yaxis();\n\n\n\n\n\n\n\n\n\nThe S-shaped curve is formally known as the sigmoid function and is typically denoted by \\(\\sigma\\).\n\\[\\sigma(t) = \\frac{1}{1+e^{-t}}\\]\n\n\n\n\n\n\nProperties of the Sigmoid\n\n\n\n\nReflection/Symmetry: \\[1-\\sigma(t) = \\frac{e^{-t}}{1+e^{-t}}=\\sigma(-t)\\]\nInverse: \\[t=\\sigma^{-1}(p)=\\log{(\\frac{p}{1-p})}\\]\nDerivative: \\[\\frac{d}{dz} \\sigma(t) = \\sigma(t) (1-\\sigma(t))=\\sigma(t)\\sigma(-t)\\]\nDomain: \\(-\\infty < t < \\infty\\)\nRange: \\(0 < \\sigma(t) < 1\\)\n\n\n\nIn the context of our modeling process, the sigmoid is considered an activation function. It takes in a linear combination of the features and applies a non-linear transformation.", - "crumbs": [ - "22  Logistic Regression I" - ] - }, - { - "objectID": "logistic_regression_1/logistic_reg_1.html#the-logistic-regression-model", - "href": "logistic_regression_1/logistic_reg_1.html#the-logistic-regression-model", - "title": "22  Logistic Regression I", - "section": "22.3 The Logistic Regression Model", - "text": "22.3 The Logistic Regression Model\nTo predict a probability using the logistic regression model, we:\n\nCompute a linear combination of the features, \\(x^{\\top}\\theta\\)\nApply the sigmoid activation function, \\(\\sigma(x^{\\top} \\theta)\\).\n\nOur predicted probabilities are of the form \\(P(Y=1|x) = p = \\frac{1}{1+e^{-x^T \\theta}} = \\frac{1}{1+e^{-(\\theta_0 + \\theta_1 x_1 + \\theta_2 x_2 + \\ldots + \\theta_p x_p)}}\\)\nAn important note: despite its name, logistic regression is used for classification tasks, not regression tasks. In Data 100, we always apply logistic regression with the goal of classifying data.\nLet’s summarize our logistic regression modeling workflow:\n\n\n\nOur main takeaways from this section are:\n\nAssume log-odds is a linear combination of \\(x\\) and \\(\\theta\\)\nFit the “S” curve as best as possible\nThe curve models the probability: \\(P = (Y=1 | x)\\)\n\nPutting this together, we know that the estimated probability that response is 1 given the features \\(x\\) is equal to the logistic function \\(\\sigma()\\) at the value \\(x^{\\top}\\theta\\):\n\\[\\begin{align}\n\\hat{P}_{\\theta}(Y = 1 | x) = \\frac{1}{1 + e^{-x^{\\top}\\theta}}\n\\end{align}\\]\nMore commonly, the logistic regression model is written as:\n\\[\\begin{align}\n\\hat{P}_{\\theta}(Y = 1 | x) = \\sigma(x^{\\top}\\theta)\n\\end{align}\\]\n\n\n\n\n\n\nProperties of the Logistic Model\n\n\n\nConsider a logistic regression model with one feature and an intercept term:\n\\[\\begin{align}\np = P(Y = 1 | x) = \\frac{1}{1+e^{-(\\theta_0 + \\theta_1 x)}}\n\\end{align}\\]\nProperties:\n\n\\(\\theta_0\\) controls the position of the curve along the horizontal axis\nThe magnitude of \\(\\theta_1\\) controls the “steepness” of the sigmoid\nThe sign of \\(\\theta_1\\) controls the orientation of the curve\n\n\n\n\n\n\n\n\n\nExample Calculation\n\n\n\nSuppose we want to predict the probability that a team wins a game, given \"GOAL_DIFF\" (first feature) and the number of free throws (second feature). Let’s say we fit a logistic regression model (with no intercept) using the training data and estimate the optimal parameters. Now we want to predict the probability that a new team will win their game.\n\\[\\begin{align}\n\\hat{\\theta}^{\\top} = \\begin{matrix}[0.1 & -0.5]\\end{matrix}\n\\\\x^{\\top} = \\begin{matrix}[15 & 1]\\end{matrix}\n\\end{align}\\]\n\\[\\begin{align}\n\\hat{P}_{\\hat{\\theta}}(Y = 1 | x) = \\sigma(x^{\\top}\\hat{\\theta}) = \\sigma(0.1 \\cdot 15 + (-0.5) \\cdot 1) = \\sigma(1) = \\frac{1}{1+e^{-1}} \\approx 0.7311\n\\end{align}\\]\nWe see that the response is more likely to be 1 than 0, indicating that a reasonable prediction is \\(\\hat{y} = 1\\). We’ll dive deeper into this in the next lecture.", - "crumbs": [ - "22  Logistic Regression I" - ] - }, - { - "objectID": "logistic_regression_1/logistic_reg_1.html#cross-entropy-loss", - "href": "logistic_regression_1/logistic_reg_1.html#cross-entropy-loss", - "title": "22  Logistic Regression I", - "section": "22.4 Cross-Entropy Loss", - "text": "22.4 Cross-Entropy Loss\nTo quantify the error of our logistic regression model, we’ll need to define a new loss function.\n\n22.4.1 Why Not MSE?\nYou may wonder: why not use our familiar mean squared error? It turns out that the MSE is not well suited for logistic regression. To see why, let’s consider a simple, artificially generated toy dataset with just one feature (this will be easier to work with than the more complicated games data).\n\n\nCode\ntoy_df = pd.DataFrame({\n \"x\": [-4, -2, -0.5, 1, 3, 5],\n \"y\": [0, 0, 1, 0, 1, 1]})\ntoy_df.head()\n\n\n\n\n\n\n\n\n\nx\ny\n\n\n\n\n0\n-4.0\n0\n\n\n1\n-2.0\n0\n\n\n2\n-0.5\n1\n\n\n3\n1.0\n0\n\n\n4\n3.0\n1\n\n\n\n\n\n\n\nWe’ll construct a basic logistic regression model with only one feature and no intercept term. Our predicted probabilities take the form:\n\\[p=P(Y=1|x)=\\frac{1}{1+e^{-\\theta_1 x}}\\]\nIn the cell below, we plot the MSE for our model on the data.\n\n\nCode\ndef sigmoid(z):\n return 1/(1+np.e**(-z))\n \ndef mse_on_toy_data(theta):\n p_hat = sigmoid(toy_df['x'] * theta)\n return np.mean((toy_df['y'] - p_hat)**2)\n\nthetas = np.linspace(-15, 5, 100)\nplt.plot(thetas, [mse_on_toy_data(theta) for theta in thetas])\nplt.title(\"MSE on toy classification data\")\nplt.xlabel(r'$\\theta_1$')\nplt.ylabel('MSE');\n\n\n\n\n\n\n\n\n\nThis looks nothing like the parabola we found when plotting the MSE of a linear regression model! In particular, we can identify two flaws with using the MSE for logistic regression:\n\nThe MSE loss surface is non-convex. There is both a global minimum and a (barely perceptible) local minimum in the loss surface above. This means that there is the risk of gradient descent converging on the local minimum of the loss surface, missing the true optimum parameter \\(\\theta_1\\).\n\n\n\nSquared loss is bounded for a classification task. Recall that each true \\(y\\) has a value of either 0 or 1. This means that even if our model makes the worst possible prediction (e.g. predicting \\(p=0\\) for \\(y=1\\)), the squared loss for an observation will be no greater than 1: \\[(y-p)^2=(1-0)^2=1\\] The MSE does not strongly penalize poor predictions.\n\n\n\n\n\n\n22.4.2 Motivating Cross-Entropy Loss\nSuffice to say, we don’t want to use the MSE when working with logistic regression. Instead, we’ll consider what kind of behavior we would like to see in a loss function.\nLet \\(y\\) be the binary label (it can either be 0 or 1), and \\(p\\) be the model’s predicted probability of the label \\(y\\) being 1.\n\nWhen the true \\(y\\) is 1, we should incur low loss when the model predicts large \\(p\\)\nWhen the true \\(y\\) is 0, we should incur high loss when the model predicts large \\(p\\)\n\nIn other words, our loss function should behave differently depending on the value of the true class, \\(y\\).\nThe cross-entropy loss incorporates this changing behavior. We will use it throughout our work on logistic regression. Below, we write out the cross-entropy loss for a single datapoint (no averages just yet).\n\\[\\text{Cross-Entropy Loss} = \\begin{cases}\n -\\log{(p)} & \\text{if } y=1 \\\\\n -\\log{(1-p)} & \\text{if } y=0\n\\end{cases}\\]\nWhy does this (seemingly convoluted) loss function “work”? Let’s break it down.\n\n\n\n\n\n\n\nWhen \\(y=1\\)\nWhen \\(y=0\\)\n\n\n\n\n\n\n\n\nAs \\(p \\rightarrow 0\\), loss approches \\(\\infty\\)\nAs \\(p \\rightarrow 0\\), loss approches 0\n\n\nAs \\(p \\rightarrow 1\\), loss approaches 0\nAs \\(p \\rightarrow 1\\), loss approaches \\(\\infty\\)\n\n\n\n\nAll good – we are seeing the behavior we want for our logistic regression model.\nThe piecewise function we outlined above is difficult to optimize: we don’t want to constantly “check” which form of the loss function we should be using at each step of choosing the optimal model parameters. We can re-express cross-entropy loss in a more convenient way:\n\\[\\text{Cross-Entropy Loss} = -\\left(y\\log{(p)}+(1-y)\\log{(1-p)}\\right)\\]\nBy setting \\(y\\) to 0 or 1, we see that this new form of cross-entropy loss gives us the same behavior as the original formulation. Another way to think about this is that in either scenario (y being equal to 0 or 1), only one of the cross-entropy loss terms is activated, which gives us a convenient way to combine the two independent loss functions.\n\n\nWhen \\(y=1\\):\n\\[\\begin{align}\n\\text{CE} &= -\\left((1)\\log{(p)}+(1-1)\\log{(1-p)}\\right)\\\\\n&= -\\log{(p)}\n\\end{align}\\]\n\n\n\nWhen \\(y=0\\):\n\\[\\begin{align}\n\\text{CE} &= -\\left((0)\\log{(p)}+(1-0)\\log{(1-p)}\\right)\\\\\n&= -\\log{(1-p)}\n\\end{align}\\]\n\n\nThe empirical risk of the logistic regression model is then the mean cross-entropy loss across all datapoints in the dataset. When fitting the model, we want to determine the model parameter \\(\\theta\\) that leads to the lowest mean cross-entropy loss possible.\n\\[\n\\begin{align}\nR(\\theta) &= - \\frac{1}{n} \\sum_{i=1}^n \\left(y_i\\log{(p_i)}+(1-y_i)\\log{(1-p_i)}\\right) \\\\\n&= - \\frac{1}{n} \\sum_{i=1}^n \\left(y_i\\log{\\sigma(X_i^{\\top}\\theta)}+(1-y_i)\\log{(1-\\sigma(X_i^{\\top}\\theta))}\\right)\n\\end{align}\n\\]\nThe optimization problem is therefore to find the estimate \\(\\hat{\\theta}\\) that minimizes \\(R(\\theta)\\):\n\\[\n\\hat{\\theta} = \\underset{\\theta}{\\arg\\min} - \\frac{1}{n} \\sum_{i=1}^n \\left(y_i\\log{(\\sigma(X_i^{\\top}\\theta))}+(1-y_i)\\log{(1-\\sigma(X_i^{\\top}\\theta))}\\right)\n\\]\nPlotting the cross-entropy loss surface for our toy dataset gives us a more encouraging result – our loss function is now convex. This means we can optimize it using gradient descent. Computing the gradient of the logistic model is fairly challenging, so we’ll let sklearn take care of this for us. You won’t need to compute the gradient of the logistic model in Data 100.\n\n\nCode\ndef cross_entropy(y, p_hat):\n return - y * np.log(p_hat) - (1 - y) * np.log(1 - p_hat)\n\ndef mean_cross_entropy_on_toy_data(theta):\n p_hat = sigmoid(toy_df['x'] * theta)\n return np.mean(cross_entropy(toy_df['y'], p_hat))\n\nplt.plot(thetas, [mean_cross_entropy_on_toy_data(theta) for theta in thetas], color = 'green')\nplt.ylabel(r'Mean Cross-Entropy Loss($\\theta$)')\nplt.xlabel(r'$\\theta$');", - "crumbs": [ - "22  Logistic Regression I" - ] - }, - { - "objectID": "logistic_regression_1/logistic_reg_1.html#maximum-likelihood-estimation", - "href": "logistic_regression_1/logistic_reg_1.html#maximum-likelihood-estimation", - "title": "22  Logistic Regression I", - "section": "22.5 Maximum Likelihood Estimation", - "text": "22.5 Maximum Likelihood Estimation\nIt may have seemed like we pulled cross-entropy loss out of thin air. How did we know that taking the negative logarithms of our probabilities would work so well? It turns out that cross-entropy loss is justified by probability theory.\nThe following section is out of scope, but is certainly an interesting read!\n\n22.5.1 Building Intuition: The Coin Flip\nTo build some intuition for logistic regression, let’s look at an introductory example to classification: the coin flip. Suppose we observe some outcomes of a coin flip (1 = Heads, 0 = Tails).\n\nflips = [0, 0, 1, 1, 1, 1, 0, 0, 0, 0]\nflips\n\n[0, 0, 1, 1, 1, 1, 0, 0, 0, 0]\n\n\nA reasonable model is to assume all flips are IID (independent and identically distributed). In other words, each flip has the same probability of returning a 1 (or heads). Let’s define a parameter \\(\\theta\\), the probability that the next flip is a heads. We will use this parameter to inform our decision for \\(\\hat y\\) (predicting either 0 or 1) of the next flip. If \\(\\theta \\ge 0.5, \\hat y = 1, \\text{else } \\hat y = 0\\).\nYou may be inclined to say \\(0.5\\) is the best choice for \\(\\theta\\). However, notice that we made no assumption about the coin itself. The coin may be biased, so we should make our decision based only on the data. We know that exactly \\(\\frac{4}{10}\\) of the flips were heads, so we might guess \\(\\hat \\theta = 0.4\\). In the next section, we will mathematically prove why this is the best possible estimate.\n\n\n22.5.2 Likelihood of Data\nLet’s call the result of the coin flip a random variable \\(Y\\). This is a Bernoulli random variable with two outcomes. \\(Y\\) has the following distribution:\n\\[P(Y = y) = \\begin{cases}\n p, \\text{if } y=1\\\\\n 1 - p, \\text{if } y=0\n \\end{cases} \\]\n\\(p\\) is unknown to us. But we can find the \\(p\\) that makes the data we observed the most likely.\nThe probability of observing 4 heads and 6 tails follows the binomial distribution.\n\\[\\binom{10}{4} (p)^4 (1-p)^6\\]\nWe define the likelihood of obtaining our observed data as a quantity proportional to the probability above. To find it, simply multiply the probabilities of obtaining each coin flip.\n\\[(p)^{4} (1-p)^6\\]\nThe technique known as maximum likelihood estimation finds the \\(p\\) that maximizes the above likelihood. You can find this maximum by taking the derivative of the likelihood, but we’ll provide a more intuitive graphical solution.\n\nthetas = np.linspace(0, 1)\nplt.plot(thetas, (thetas**4)*(1-thetas)**6)\nplt.xlabel(r\"$\\theta$\")\nplt.ylabel(\"Likelihood\");\n\n\n\n\n\n\n\n\nMore generally, the likelihood for some Bernoulli(\\(p\\)) random variable \\(Y\\) is:\n\\[P(Y = y) = \\begin{cases}\n 1, \\text{with probability } p\\\\\n 0, \\text{with probability } 1 - p\n \\end{cases} \\]\nEquivalently, this can be written in a compact way:\n\\[P(Y=y) = p^y(1-p)^{1-y}\\]\n\nWhen \\(y = 1\\), this reads \\(P(Y=y) = p\\)\nWhen \\(y = 0\\), this reads \\(P(Y=y) = (1-p)\\)\n\nIn our example, a Bernoulli random variable is analogous to a single data point (e.g., one instance of a basketball team winning or losing a game). All together, our games data consists of many IID Bernoulli(\\(p\\)) random variables. To find the likelihood of independent events in succession, simply multiply their likelihoods.\n\\[\\prod_{i=1}^{n} p^{y_i} (1-p)^{1-y_i}\\]\nAs with the coin example, we want to find the parameter \\(p\\) that maximizes this likelihood. Earlier, we gave an intuitive graphical solution, but let’s take the derivative of the likelihood to find this maximum.\nAt a first glance, this derivative will be complicated! We will have to use the product rule, followed by the chain rule. Instead, we can make an observation that simplifies the problem.\nFinding the \\(p\\) that maximizes \\[\\prod_{i=1}^{n} p^{y_i} (1-p)^{1-y_i}\\] is equivalent to the \\(p\\) that maximizes \\[\\text{log}(\\prod_{i=1}^{n} p^{y_i} (1-p)^{1-y_i})\\]\nThis is because \\(\\text{log}\\) is a strictly increasing function. It won’t change the maximum or minimum of the function it was applied to. From \\(\\text{log}\\) properties, \\(\\text{log}(a*b)\\) = \\(\\text{log}(a) + \\text{log}(b)\\). We can apply this to our equation above to get:\n\\[\\underset{p}{\\text{argmax}} \\sum_{i=1}^{n} \\text{log}(p^{y_i} (1-p)^{1-y_i})\\]\n\\[= \\underset{p}{\\text{argmax}} \\sum_{i=1}^{n} (\\text{log}(p^{y_i}) + \\text{log}((1-p)^{1-y_i}))\\]\n\\[= \\underset{p}{\\text{argmax}} \\sum_{i=1}^{n} (y_i\\text{log}(p) + (1-y_i)\\text{log}(1-p))\\]\nWe can add a constant factor of \\(\\frac{1}{n}\\) out front. It won’t affect the \\(p\\) that maximizes our likelihood.\n\\[=\\underset{p}{\\text{argmax}} \\frac{1}{n} \\sum_{i=1}^{n} y_i\\text{log}(p) + (1-y_i)\\text{log}(1-p)\\]\nOne last “trick” we can do is change this to a minimization problem by negating the result. This works because we are dealing with a concave function, which can be made convex.\n\\[= \\underset{p}{\\text{argmin}} -\\frac{1}{n} \\sum_{i=1}^{n} y_i\\text{log}(p) + (1-y_i)\\text{log}(1-p)\\]\nNow let’s say that we have data that are independent with different probability \\(p_i\\). Then, we would want to find the \\(p_1, p_2, \\dots, p_n\\) that maximize \\[\\prod_{i=1}^{n} p_i^{y_i} (1-p_i)^{1-y_i}\\]\nSetting up and simplifying the optimization problems as we did above, we ultimately want to find:\n\\[= \\underset{p}{\\text{argmin}} -\\frac{1}{n} \\sum_{i=1}^{n} y_i\\text{log}(p_i) + (1-y_i)\\text{log}(1-p_i)\\]\nFor logistic regression, \\(p_i = \\sigma(x^{\\top}\\theta)\\). Plugging that in, we get:\n\\[= \\underset{p}{\\text{argmin}} -\\frac{1}{n} \\sum_{i=1}^{n} y_i\\text{log}(\\sigma(x^{\\top}\\theta)) + (1-y_i)\\text{log}(1-\\sigma(x^{\\top}\\theta))\\]\nThis is exactly our average cross-entropy loss minimization problem from before!\nWhy did we do all this complicated math? We have shown that minimizing cross-entropy loss is equivalent to maximizing the likelihood of the training data.\n\nBy minimizing cross-entropy loss, we are choosing the model parameters that are “most likely” for the data we observed.\n\nNote that this is under the assumption that all data is drawn independently from the same logistic regression model with parameter \\(\\theta\\). In fact, many of the model + loss combinations we’ve seen can be motivated using MLE (e.g., OLS, Ridge Regression, etc.). In probability and ML classes, you’ll get the chance to explore MLE further.", - "crumbs": [ - "22  Logistic Regression I" - ] - }, - { - "objectID": "logistic_regression_2/logistic_reg_2.html", - "href": "logistic_regression_2/logistic_reg_2.html", - "title": "23  Logistic Regression II", - "section": "", - "text": "23.1 Decision Boundaries\nIn logistic regression, we model the probability that a datapoint belongs to Class 1.\nLast week, we developed the logistic regression model to predict that probability, but we never actually made any classifications for whether our prediction \\(y\\) belongs in Class 0 or Class 1.\n\\[ p = P(Y=1 | x) = \\frac{1}{1 + e^{-x^{\\top}\\theta}}\\]\nA decision rule tells us how to interpret the output of the model to make a decision on how to classify a datapoint. We commonly make decision rules by specifying a threshold, \\(T\\). If the predicted probability is greater than or equal to \\(T\\), predict Class 1. Otherwise, predict Class 0.\n\\[\\hat y = \\text{classify}(x) = \\begin{cases}\n 1, & P(Y=1|x) \\ge T\\\\\n 0, & \\text{otherwise }\n \\end{cases}\\]\nThe threshold is often set to \\(T = 0.5\\), but not always. We’ll discuss why we might want to use other thresholds \\(T \\neq 0.5\\) later in this lecture.\nUsing our decision rule, we can define a decision boundary as the “line” that splits the data into classes based on its features. For logistic regression, since we are working in \\(p\\) dimensions, the decision boundary is a hyperplane – a linear combination of the features in \\(p\\)-dimensions – and we can recover it from the final logistic regression model. For example, if we have a model with 2 features (2D), we have \\(\\theta = [\\theta_0, \\theta_1, \\theta_2]\\) including the intercept term, and we can solve for the decision boundary like so:\n\\[\n\\begin{align}\nT &= \\frac{1}{1 + e^{-(\\theta_0 + \\theta_1 * \\text{feature1} + \\theta_2 * \\text{feature2})}} \\\\\n1 + e^{-(\\theta_0 + \\theta_1 \\cdot \\text{feature1} + \\theta_2 \\cdot \\text{feature2})} &= \\frac{1}{T} \\\\\ne^{-(\\theta_0 + \\theta_1 \\cdot \\text{feature1} + \\theta_2 \\cdot \\text{feature2})} &= \\frac{1}{T} - 1 \\\\\n\\theta_0 + \\theta_1 \\cdot \\text{feature1} + \\theta_2 \\cdot \\text{feature2} &= -\\log(\\frac{1}{T} - 1)\n\\end{align}\n\\]\nFor a model with 2 features, the decision boundary is a line in terms of its features. To make it easier to visualize, we’ve included an example of a 1-dimensional and a 2-dimensional decision boundary below. Notice how the decision boundary predicted by our logistic regression model perfectly separates the points into two classes. Here the color is the predicted class, rather than the true class.\nIn real life, however, that is often not the case, and we often see some overlap between points of different classes across the decision boundary. The true classes of the 2D data are shown below:\nAs you can see, the decision boundary predicted by our logistic regression does not perfectly separate the two classes. There’s a “muddled” region near the decision boundary where our classifier predicts the wrong class. What would the data have to look like for the classifier to make perfect predictions?", - "crumbs": [ - "23  Logistic Regression II" - ] - }, - { - "objectID": "logistic_regression_2/logistic_reg_2.html#linear-separability-and-regularization", - "href": "logistic_regression_2/logistic_reg_2.html#linear-separability-and-regularization", - "title": "23  Logistic Regression II", - "section": "23.2 Linear Separability and Regularization", - "text": "23.2 Linear Separability and Regularization\nA classification dataset is said to be linearly separable if there exists a hyperplane among input features \\(x\\) that separates the two classes \\(y\\).\nLinear separability in 1D can be found with a rugplot of a single feature where a point perfectly separates the classes (Remember that in 1D, our decision boundary is just a point). For example, notice how the plot on the bottom left is linearly separable along the vertical line \\(x=0\\). However, no such line perfectly separates the two classes on the bottom right.\n\n\n\nThis same definition holds in higher dimensions. If there are two features, the separating hyperplane must exist in two dimensions (any line of the form \\(y=mx+b\\)). We can visualize this using a scatter plot.\n\n\n\nThis sounds great! When the dataset is linearly separable, a logistic regression classifier can perfectly assign datapoints into classes. Can it achieve 0 cross-entropy loss?\n\\[-(y \\log(p) + (1 - y) \\log(1 - p))\\]\nCross-entropy loss is 0 if \\(p = 1\\) when \\(y = 1\\), and \\(p = 0\\) when \\(y = 0\\). Consider a simple model with one feature and no intercept.\n\\[P_{\\theta}(Y = 1|x) = \\sigma(\\theta x) = \\frac{1}{1 + e^{-\\theta x}}\\]\nWhat \\(\\theta\\) will achieve 0 loss if we train on the datapoint \\(x = 1, y = 1\\)? We would want \\(p = 1\\) which occurs when \\(\\theta \\rightarrow \\infty\\).\nHowever, (unexpected) complications may arise. When data is linearly separable, the optimal model parameters diverge to \\(\\pm \\infty\\). The sigmoid can never output exactly 0 or 1, so no finite optimal \\(\\theta\\) exists. This can be a problem when using gradient descent to fit the model. Consider a simple, linearly separable “toy” dataset with two datapoints.\n\n\n\nLet’s also visualize the mean cross entropy loss along with the direction of the gradient (how this loss surface is calculated is out of scope).\n\n\n\nIt’s nearly impossible to see, but the plateau to the right is slightly tilted. Because gradient descent follows the tilted loss surface downwards, it never converges.\nThe diverging weights cause the model to be overconfident. Say we add a new point \\((x, y) = (-0.5, 1)\\). Following the behavior above, our model will incorrectly predict \\(p=0\\), and thus, \\(\\hat y = 0\\).\n\n\n\n The loss incurred by this misclassified point is infinite.\n\\[-(y\\text{ log}(p) + (1-y)\\text{ log}(1-p))=1 * \\text{log}(0)\\]\nThus, diverging weights (\\(|\\theta| \\rightarrow \\infty\\)) occur with linearly separable data. “Overconfidence”, as shown here, is a particularly dangerous version of overfitting.\n\n23.2.1 Regularized Logistic Regression\nTo avoid large weights and infinite loss (particularly on linearly separable data), we use regularization. The same principles apply as with linear regression - make sure to standardize your features first.\nFor example, \\(L2\\) (Ridge) Logistic Regression takes on the form:\n\\[\\min_{\\theta} -\\frac{1}{n} \\sum_{i=1}^{n} (y_i \\text{log}(\\sigma(X_i^T\\theta)) + (1-y_i)\\text{log}(1-\\sigma(X_i^T\\theta))) + \\lambda \\sum_{j=1}^{d} \\theta_j^2\\]\nNow, let us compare the loss functions of un-regularized and regularized logistic regression.\n\n\n\n\n\n\nAs we can see, \\(L2\\) regularization helps us prevent diverging weights and deters against “overconfidence.”\nsklearn’s logistic regression defaults to \\(L2\\) regularization and C=1.0; C is the inverse of \\(\\lambda\\): \\[C = \\frac{1}{\\lambda}\\] Setting C to a large value, for example, C=300.0, results in minimal regularization.\n# sklearn defaults\nmodel = LogisticRegression(penalty = 'l2', C = 1.0, ...)\nmodel.fit()\nNote that in Data 100, we only use sklearn to fit logistic regression models. There is no closed-form solution to the optimal theta vector, and the gradient is a little messy (see the bonus section below for details).\nFrom here, the .predict function returns the predicted class \\(\\hat y\\) of the point. In the simple binary case where the threshold is 0.5,\n\\[\\hat y = \\begin{cases}\n 1, & P(Y=1|x) \\ge 0.5\\\\\n 0, & \\text{otherwise }\n \\end{cases}\\]", - "crumbs": [ - "23  Logistic Regression II" - ] - }, - { - "objectID": "logistic_regression_2/logistic_reg_2.html#performance-metrics", - "href": "logistic_regression_2/logistic_reg_2.html#performance-metrics", - "title": "23  Logistic Regression II", - "section": "23.3 Performance Metrics", - "text": "23.3 Performance Metrics\nYou might be thinking, if we’ve already introduced cross-entropy loss, why do we need additional ways of assessing how well our models perform? In linear regression, we made numerical predictions and used a loss function to determine how “good” these predictions were. In logistic regression, our ultimate goal is to classify data – we are much more concerned with whether or not each datapoint was assigned the correct class using the decision rule. As such, we are interested in the quality of classifications, not the predicted probabilities.\nThe most basic evaluation metric is accuracy, that is, the proportion of correctly classified points.\n\\[\\text{accuracy} = \\frac{\\# \\text{ of points classified correctly}}{\\# \\text{ of total points}}\\]\nTranslated to code:\ndef accuracy(X, Y):\n return np.mean(model.predict(X) == Y)\n \nmodel.score(X, y) # built-in accuracy function\nYou can find the sklearn documentation here.\nHowever, accuracy is not always a great metric for classification. To understand why, let’s consider a classification problem with 100 emails where only 5 are truly spam, and the remaining 95 are truly ham. We’ll investigate two models where accuracy is a poor metric.\n\nModel 1: Our first model classifies every email as non-spam. The model’s accuracy is high (\\(\\frac{95}{100} = 0.95\\)), but it doesn’t detect any spam emails. Despite the high accuracy, this is a bad model.\nModel 2: The second model classifies every email as spam. The accuracy is low (\\(\\frac{5}{100} = 0.05\\)), but the model correctly labels every spam email. Unfortunately, it also misclassifies every non-spam email.\n\nAs this example illustrates, accuracy is not always a good metric for classification, particularly when your data could exhibit class imbalance (e.g., very few 1’s compared to 0’s).\n\n23.3.1 Types of Classification\nThere are 4 different different classifications that our model might make:\n\nTrue positive: correctly classify a positive point as being positive (\\(y=1\\) and \\(\\hat{y}=1\\))\nTrue negative: correctly classify a negative point as being negative (\\(y=0\\) and \\(\\hat{y}=0\\))\nFalse positive: incorrectly classify a negative point as being positive (\\(y=0\\) and \\(\\hat{y}=1\\))\nFalse negative: incorrectly classify a positive point as being negative (\\(y=1\\) and \\(\\hat{y}=0\\))\n\nThese classifications can be concisely summarized in a confusion matrix.\n\n\n\nAn easy way to remember this terminology is as follows:\n\nLook at the second word in the phrase. Positive means a prediction of 1. Negative means a prediction of 0.\nLook at the first word in the phrase. True means our prediction was correct. False means it was incorrect.\n\nWe can now write the accuracy calculation as \\[\\text{accuracy} = \\frac{TP + TN}{n}\\]\nIn sklearn, we use the following syntax to plot a confusion matrix:\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(Y_true, Y_pred)\n\n\n\n\n\n23.3.2 Accuracy, Precision, and Recall\nThe purpose of our discussion of the confusion matrix was to motivate better performance metrics for classification problems with class imbalance - namely, precision and recall.\nPrecision is defined as\n\\[\\text{precision} = \\frac{\\text{TP}}{\\text{TP + FP}}\\]\nPrecision answers the question: “Of all observations that were predicted to be \\(1\\), what proportion was actually \\(1\\)?” It measures how accurate the classifier is when its predictions are positive.\nRecall (or sensitivity) is defined as\n\\[\\text{recall} = \\frac{\\text{TP}}{\\text{TP + FN}}\\]\nRecall aims to answer: “Of all observations that were actually \\(1\\), what proportion was predicted to be \\(1\\)?” It measures how many positive predictions were missed.\nHere’s a helpful graphic that summarizes our discussion above.\n\n\n\n\n\n23.3.3 Example Calculation\nIn this section, we will calculate the accuracy, precision, and recall performance metrics for our earlier spam classification example. As a reminder, we had 100 emails, 5 of which were spam. We designed two models:\n\nModel 1: Predict that every email is non-spam\nModel 2: Predict that every email is spam\n\n\n23.3.3.1 Model 1\nFirst, let’s begin by creating the confusion matrix.\n\n\n\n\n\n\n\n\n\n0\n1\n\n\n\n\n0\nTrue Negative: 95\nFalse Positive: 0\n\n\n1\nFalse Negative: 5\nTrue Positive: 0\n\n\n\n\\[\\text{accuracy} = \\frac{95}{100} = 0.95\\] \\[\\text{precision} = \\frac{0}{0 + 0} = \\text{undefined}\\] \\[\\text{recall} = \\frac{0}{0 + 5} = 0\\]\nNotice how our precision is undefined because we never predicted class \\(1\\). Our recall is 0 for the same reason – the numerator is 0 (we had no positive predictions).\n\n\n23.3.3.2 Model 2\nThe confusion matrix for Model 2 is:\n\n\n\n\n\n\n\n\n\n0\n1\n\n\n\n\n0\nTrue Negative: 0\nFalse Positive: 95\n\n\n1\nFalse Negative: 0\nTrue Positive: 5\n\n\n\n\\[\\text{accuracy} = \\frac{5}{100} = 0.05\\] \\[\\text{precision} = \\frac{5}{5 + 95} = 0.05\\] \\[\\text{recall} = \\frac{5}{5 + 0} = 1\\]\nOur precision is low because we have many false positives, and our recall is perfect - we correctly classified all spam emails (we never predicted class \\(0\\)).\n\n\n\n23.3.4 Precision vs. Recall\nPrecision (\\(\\frac{\\text{TP}}{\\text{TP} + \\textbf{ FP}}\\)) penalizes false positives, while recall (\\(\\frac{\\text{TP}}{\\text{TP} + \\textbf{ FN}}\\)) penalizes false negatives. In fact, precision and recall are inversely related. This is evident in our second model – we observed a high recall and low precision. Usually, there is a tradeoff in these two (most models can either minimize the number of FP or FN; and in rare cases, both).\nThe specific performance metric(s) to prioritize depends on the context. In many medical settings, there might be a much higher cost to missing positive cases. For instance, in our breast cancer example, it is more costly to misclassify malignant tumors (false negatives) than it is to incorrectly classify a benign tumor as malignant (false positives). In the case of the latter, pathologists can conduct further studies to verify malignant tumors. As such, we should minimize the number of false negatives. This is equivalent to maximizing recall.\n\n\n23.3.5 Three More Metrics\nThe True Positive Rate (TPR) is defined as\n\\[\\text{true positive rate} = \\frac{\\text{TP}}{\\text{TP + FN}}\\]\nYou’ll notice this is equivalent to recall. In the context of our spam email classifier, it answers the question: “What proportion of spam did I mark correctly?”. We’d like this to be close to \\(1\\).\nThe True Negative Rate (TNR) is defined as\n\\[\\text{true negative rate} = \\frac{\\text{TN}}{\\text{TN + FP}}\\]\nAnother word for TNR is specificity. This answers the question: “What proportion of ham did I mark correctly?”. We’d like this to be close to \\(1\\).\nThe False Positive Rate (FPR) is defined as\n\\[\\text{false positive rate} = \\frac{\\text{FP}}{\\text{FP + TN}}\\]\nFPR is equal to 1 - specificity, or 1 - TNR. This answers the question: “What proportion of regular email did I mark as spam?”. We’d like this to be close to \\(0\\).\nAs we increase threshold \\(T\\), both TPR and FPR decrease. We’ve plotted this relationship below for some model on a toy dataset.", - "crumbs": [ - "23  Logistic Regression II" - ] - }, - { - "objectID": "logistic_regression_2/logistic_reg_2.html#adjusting-the-classification-threshold", - "href": "logistic_regression_2/logistic_reg_2.html#adjusting-the-classification-threshold", - "title": "23  Logistic Regression II", - "section": "23.4 Adjusting the Classification Threshold", - "text": "23.4 Adjusting the Classification Threshold\nOne way to minimize the number of FP vs. FN (equivalently, maximizing precision vs. recall) is by adjusting the classification threshold \\(T\\).\n\\[\\hat y = \\begin{cases}\n 1, & P(Y=1|x) \\ge T\\\\\n 0, & \\text{otherwise }\n \\end{cases}\\]\nThe default threshold in sklearn is \\(T = 0.5\\). As we increase the threshold \\(T\\), we “raise the standard” of how confident our classifier needs to be to predict 1 (i.e., “positive”).\n\n\n\nAs you may notice, the choice of threshold \\(T\\) impacts our classifier’s performance.\n\nHigh \\(T\\): Most predictions are \\(0\\).\n\nLots of false negatives\nFewer false positives\n\nLow \\(T\\): Most predictions are \\(1\\).\n\nLots of false positives\nFewer false negatives\n\n\nIn fact, we can choose a threshold \\(T\\) based on our desired number, or proportion, of false positives and false negatives. We can do so using a few different tools. We’ll touch on two of the most important ones in Data 100.\n\nPrecision-Recall Curve (PR Curve)\n“Receiver Operating Characteristic” Curve (ROC Curve)\n\n\n23.4.1 Precision-Recall Curves\nA Precision-Recall Curve (PR Curve) is an alternative to the ROC curve that displays the relationship between precision and recall for various threshold values. In this curve, we test out many different possible thresholds and for each one we compute the precision and recall of the classifier.\nLet’s first consider how precision and recall change as a function of the threshold \\(T\\). We know this quite well from earlier – precision will generally increase, and recall will decrease.\n\n\n\nDisplayed below is the PR Curve for the same toy dataset. Notice how threshold values increase as we move to the left.\n\n\n\nOnce again, the perfect classifier will resemble the orange curve, this time, facing the opposite direction.\n\n\n\nWe want our PR curve to be as close to the “top right” of this graph as possible. Again, we use the AUC to determine “closeness”, with the perfect classifier exhibiting an AUC = 1 (and the worst with an AUC = 0.5).\n\n\n23.4.2 The ROC Curve\nThe “Receiver Operating Characteristic” Curve (ROC Curve) plots the tradeoff between FPR and TPR. Notice how the far-left of the curve corresponds to higher threshold \\(T\\) values. At lower thresholds, the FPR and TPR are both high as there are many positive predictions while at higher thresholds the FPR and TPR are both low as there are fewer positive predictions.\n\n\n\nThe “perfect” classifier is the one that has a TPR of 1, and FPR of 0. This is achieved at the top-left of the plot below. More generally, it’s ROC curve resembles the curve in orange.\n\n\n\nWe want our model to be as close to this orange curve as possible. How do we quantify “closeness”?\nWe can compute the area under curve (AUC) of the ROC curve. Notice how the perfect classifier has an AUC = 1. The closer our model’s AUC is to 1, the better it is.\n\n23.4.2.1 (Extra) What is the “worst” AUC, and why is it 0.5?\nOn the other hand, a terrible model will have an AUC closer to 0.5. Random predictors randomly predict \\(P(Y = 1 | x)\\) to be uniformly between 0 and 1. This indicates the classifier is not able to distinguish between positive and negative classes, and thus, randomly predicts one of the two.\n\n\n\nWe can also illustrate this by comparing different thresholds and seeing their points on the ROC curve.", - "crumbs": [ - "23  Logistic Regression II" - ] - }, - { - "objectID": "logistic_regression_2/logistic_reg_2.html#bonus-gradient-descent-for-logistic-regression", - "href": "logistic_regression_2/logistic_reg_2.html#bonus-gradient-descent-for-logistic-regression", - "title": "23  Logistic Regression II", - "section": "23.5 (Bonus) Gradient Descent for Logistic Regression", - "text": "23.5 (Bonus) Gradient Descent for Logistic Regression\nLet’s define the following terms: \\[\n\\begin{align}\nt_i &= \\phi(x_i)^T \\theta \\\\\np_i &= \\sigma(t_i) \\\\\nt_i &= \\log(\\frac{p_i}{1 - p_i}) \\\\\n1 - \\sigma(t_i) &= \\sigma(-t_i) \\\\\n\\frac{d}{dt} \\sigma(t) &= \\sigma(t) \\sigma(-t)\n\\end{align}\n\\]\nNow, we can simplify the cross-entropy loss \\[\n\\begin{align}\ny_i \\log(p_i) + (1 - y_i) \\log(1 - p_i) &= y_i \\log(\\frac{p_i}{1 - p_i}) + \\log(1 - p_i) \\\\\n&= y_i \\phi(x_i)^T + \\log(\\sigma(-\\phi(x_i)^T \\theta))\n\\end{align}\n\\]\nHence, the optimal \\(\\hat{\\theta}\\) is \\[\\text{argmin}_{\\theta} - \\frac{1}{n} \\sum_{i=1}^n (y_i \\phi(x_i)^T + \\log(\\sigma(-\\phi(x_i)^T \\theta)))\\]\nWe want to minimize \\[L(\\theta) = - \\frac{1}{n} \\sum_{i=1}^n (y_i \\phi(x_i)^T + \\log(\\sigma(-\\phi(x_i)^T \\theta)))\\]\nSo we take the derivative \\[\n\\begin{align}\n\\triangledown_{\\theta} L(\\theta) &= - \\frac{1}{n} \\sum_{i=1}^n \\triangledown_{\\theta} y_i \\phi(x_i)^T + \\triangledown_{\\theta} \\log(\\sigma(-\\phi(x_i)^T \\theta)) \\\\\n&= - \\frac{1}{n} \\sum_{i=1}^n y_i \\phi(x_i) + \\triangledown_{\\theta} \\log(\\sigma(-\\phi(x_i)^T \\theta)) \\\\\n&= - \\frac{1}{n} \\sum_{i=1}^n y_i \\phi(x_i) + \\frac{1}{\\sigma(-\\phi(x_i)^T \\theta)} \\triangledown_{\\theta} \\sigma(-\\phi(x_i)^T \\theta) \\\\\n&= - \\frac{1}{n} \\sum_{i=1}^n y_i \\phi(x_i) + \\frac{\\sigma(-\\phi(x_i)^T \\theta)}{\\sigma(-\\phi(x_i)^T \\theta)} \\sigma(\\phi(x_i)^T \\theta)\\triangledown_{\\theta} \\sigma(-\\phi(x_i)^T \\theta) \\\\\n&= - \\frac{1}{n} \\sum_{i=1}^n (y_i - \\sigma(\\phi(x_i)^T \\theta)\\phi(x_i))\n\\end{align}\n\\]\nSetting the derivative equal to 0 and solving for \\(\\hat{\\theta}\\), we find that there’s no general analytic solution. Therefore, we must solve using numeric methods.\n\n23.5.1 Gradient Descent Update Rule\n\\[\\theta^{(0)} \\leftarrow \\text{initial vector (random, zeros, ...)} \\]\nFor \\(\\tau\\) from 0 to convergence: \\[ \\theta^{(\\tau + 1)} \\leftarrow \\theta^{(\\tau)} - \\rho(\\tau)\\left( \\frac{1}{n} \\sum_{i=1}^n \\triangledown_{\\theta} L_i(\\theta) \\mid_{\\theta = \\theta^{(\\tau)}}\\right) \\]\n\n\n23.5.2 Stochastic Gradient Descent Update Rule\n\\[\\theta^{(0)} \\leftarrow \\text{initial vector (random, zeros, ...)} \\]\nFor \\(\\tau\\) from 0 to convergence, let \\(B\\) ~ \\(\\text{Random subset of indices}\\). \\[ \\theta^{(\\tau + 1)} \\leftarrow \\theta^{(\\tau)} - \\rho(\\tau)\\left( \\frac{1}{|B|} \\sum_{i \\in B} \\triangledown_{\\theta} L_i(\\theta) \\mid_{\\theta = \\theta^{(\\tau)}}\\right) \\]", - "crumbs": [ - "23  Logistic Regression II" - ] - }, - { - "objectID": "pca_1/pca_1.html", - "href": "pca_1/pca_1.html", - "title": "24  PCA I", - "section": "", - "text": "24.1 Visualization (Revisited)\nVisualization can help us identify clusters or patterns in our dataset, and it can give us an intuition about our data and how to clean it for the model. For this demo, we’ll return to the MPG dataset from Lecture 19 and see how far we can push visualization for multiple features.\nCode\nimport pandas as pd\nimport numpy as np\nimport scipy as sp\nimport plotly.express as px\nimport seaborn as sns\nCode\nmpg = sns.load_dataset(\"mpg\").dropna()\nmpg.head()\n\n\n\n\n\n\n\n\n\nmpg\ncylinders\ndisplacement\nhorsepower\nweight\nacceleration\nmodel_year\norigin\nname\n\n\n\n\n0\n18.0\n8\n307.0\n130.0\n3504\n12.0\n70\nusa\nchevrolet chevelle malibu\n\n\n1\n15.0\n8\n350.0\n165.0\n3693\n11.5\n70\nusa\nbuick skylark 320\n\n\n2\n18.0\n8\n318.0\n150.0\n3436\n11.0\n70\nusa\nplymouth satellite\n\n\n3\n16.0\n8\n304.0\n150.0\n3433\n12.0\n70\nusa\namc rebel sst\n\n\n4\n17.0\n8\n302.0\n140.0\n3449\n10.5\n70\nusa\nford torino\nWe can plot one feature as a histogram to see it’s distribution. Since we only plot one feature, we consider this a 1-dimensional plot.\nCode\npx.histogram(mpg, x=\"displacement\")\nWe can also visualize two features (2-dimensional scatter plot):\nCode\npx.scatter(mpg, x=\"displacement\", y=\"horsepower\")\nThree features (3-dimensional scatter plot):\nCode\nfig = px.scatter_3d(mpg, x=\"displacement\", y=\"horsepower\", z=\"weight\",\n width=800, height=800)\nfig.update_traces(marker=dict(size=3))\nWe can even push to 4 features using a 3D scatter plot and a colorbar:\nCode\nfig = px.scatter_3d(mpg, x=\"displacement\", \n y=\"horsepower\", \n z=\"weight\", \n color=\"model_year\",\n width=800, height=800, \n opacity=.7)\nfig.update_traces(marker=dict(size=5))\nVisualizing 5 features is also possible if we make the scatter dots unique to the datapoint’s origin.\nCode\nfig = px.scatter_3d(mpg, x=\"displacement\", \n y=\"horsepower\", \n z=\"weight\", \n color=\"model_year\",\n size=\"mpg\",\n symbol=\"origin\",\n width=900, height=800, \n opacity=.7)\n# hide color scale legend on the plotly fig\nfig.update_layout(coloraxis_showscale=False)\nHowever, adding more features to our visualization can make our plot look messy and uninformative, and it can also be near impossible if we have a large number of features. The problem is that many datasets come with more than 5 features —— hundreds, even. Is it still possible to visualize all those features?", - "crumbs": [ - "24  PCA I" - ] - }, - { - "objectID": "pca_1/pca_1.html#dimensionality", - "href": "pca_1/pca_1.html#dimensionality", - "title": "24  PCA I", - "section": "24.2 Dimensionality", - "text": "24.2 Dimensionality\nSuppose we have a dataset of:\n\n\\(N\\) observations (datapoints/rows)\n\\(d\\) attributes (features/columns)\n\nLet’s “rename” this in terms of linear algebra so that we can be more clear with our wording. Using linear algebra, we can view our matrix as:\n\n\\(N\\) row vectors in a \\(d\\)-Dimensional space, OR\n\\(d\\) column vectors in an \\(N\\)-Dimensions space\n\nThe intrinsic dimension of a dataset is the minimal set of dimensions needed to approximately represent the data. In linear algebra terms, it is the dimension of the column space of a matrix, or the number of linearly independent columns in a matrix; this is equivalently called the rank of a matrix.\nIn the examples below, Dataset 1 has 2 dimensions because it has 2 linearly independent columns. Similarly, Dataset 2 has 3 dimensions because it has 3 linearly independent columns.\n\n\n\n\nWhat about Dataset 4 below?\n\n\n\nIt may be tempting to say that it has 4 dimensions, but the Weight (lbs) column is actually just a linear transformation of the Weight (kg) column. Thus, no new information is captured, and the matrix of our dataset has a (column) rank of 3! Therefore, despite having 4 columns, we still say that this data is 3-dimensional.\nPlotting the weight columns together reveals the key visual intuition. While the two columns visually span a 2D space as a line, the data does not deviate at all from that singular line. This means that one of the weight columns is redundant! Even given the option to cover the whole 2D space, the data below does not. It might as well not have this dimension, which is why we still do not consider the data below to span more than 1 dimension.\n\n\n\nWhat happens when there are outliers? Below, we’ve added one outlier point to the dataset above, and just that one point is enough to change the rank of the matrix from 1 to 2 dimensions. However, the data is still approximately 1-dimensional.\n\n\n\nDimensionality reduction is generally an approximation of the original data that’s achieved by projecting the data onto a desired dimension. In the example below, our original datapoints (blue dots) are 2-dimensional. We have a few choices if we want to project them down to 1-dimension: project them onto the \\(x\\)-axis (left), project them onto the \\(y\\)-axis (middle), or project them to a line \\(mx + b\\) (right). The resulting datapoints after the projection is shown in red. Which projection do you think is better? How can we calculate that?\n\n\n\n\nIn general, we want the projection which is the best approximation for the original data (the graph on the right). In other words, we want the projection that captures the most variance of the original data. In the next section, we’ll see how this is calculated.", - "crumbs": [ - "24  PCA I" - ] - }, - { - "objectID": "pca_1/pca_1.html#matrix-decomposition-factorization", - "href": "pca_1/pca_1.html#matrix-decomposition-factorization", - "title": "24  PCA I", - "section": "24.3 Matrix Decomposition (Factorization)", - "text": "24.3 Matrix Decomposition (Factorization)\nOne linear technique for dimensionality reduction is matrix decomposition, which is closely tied to matrix multiplication. In this section, we will decompose our data matrix \\(X\\) into a lower-dimensional matrix \\(Z\\) that approximately recovers the original data when multiplied by \\(W\\).\n\n\n\nFirst, consider the matrix multiplication example below:\n\n\n\n\nFor table 1, each row of the fruits matrix represents one bowl of fruit; for example, the first bowl/row has 2 apples, 2 lemons, and 2 melons.\nFor table 2, each column of the dollars matrix represents the cost of fruit at a store; for example, the first store/column charges 2 dollars for an apple, 1 dollar for a lemon, and 4 dollars for a melon.\nThe output is the cost of each bowl at each store.\n\n\n\n\n\n\n\nLinear Algebra Review: Matrix Multiplication\n\n\n\nIn general, there are two ways to interpret matrix multiplication:\n\nEach datapoint in our output is a dot product between a row in the data matrix and a column in the operations matrix. In this view, we perform multiple linear operations on the data\n\n\n\nEach column in our output is a linear transformation of the original columns based on a column in the transformation matrix\n\n\n\n\nWe will use the second interpretation to link matrix multiplication with matrix decomposition, where we receive a lower dimensional representation of data along with a transformation matrix.\n\n\nMatrix decomposition (a.k.a matrix factorization) is the opposite of matrix multiplication. Instead of multiplying two matrices, we want to decompose a single matrix into 2 separate matrices. Just like with real numbers, there are infinite ways to decompose a matrix into a product of two matrices. For example, \\(9.9\\) can be decomposed as \\(1.1 * 9\\), \\(3.3 * 3.3\\), \\(1 * 9.9\\), etc. Additionally, the sizes of the 2 decomposed matrices can vary drastically. In the example below, the first factorization (top) multiplies a \\(3x2\\) matrix by a \\(2x3\\) matrix while the second factorization (bottom) multiplies a \\(3x3\\) matrix by a \\(3x3\\) matrix; both result in the original matrix on the right.\n\n\n\n\nWe can even expand the \\(3x3\\) matrices to \\(3x4\\) and \\(4x3\\) (shown below as the factorization on top), but this defeats the point of dimensionality reduction since we’re adding more “useless” dimensions. On the flip side, we also can’t reduce the dimension to \\(3x1\\) and \\(1x3\\) (shown below as the factorization on the bottom); since the rank of the original matrix is greater than 1, this decomposition will not result in the original matrix.\n\n\n\n In practice, we often work with datasets containing many features, so we usually want to construct decompositions where the dimensionality is below the rank of the original matrix. While this does not recover the data exactly, we can still provide approximate reconstructions of the matrix.\nIn the next section, we will discuss a method to automatically and approximately factorize data. This avoids redundant features and makes computation easier because we can train on less data. Since some approximations are better than others, we will also discuss how the method helps us capture a lot of information in a low number of dimensions.", - "crumbs": [ - "24  PCA I" - ] - }, - { - "objectID": "pca_1/pca_1.html#principal-component-analysis-pca", - "href": "pca_1/pca_1.html#principal-component-analysis-pca", - "title": "24  PCA I", - "section": "24.4 Principal Component Analysis (PCA)", - "text": "24.4 Principal Component Analysis (PCA)\nIn PCA, our goal is to transform observations from high-dimensional data down to low dimensions (often 2, as most visualizations are 2D) through linear transformations. In other words, we want to find a linear transformation that creates a low-dimension representation that captures as much of the original data’s total variance as possible.\n\n\n\nWe often perform PCA during the Exploratory Data Analysis (EDA) stage of our data science lifecycle when we don’t know what model to use. It helps us with:\n\nVisually identifying clusters of similar observations in high dimensions.\nRemoving irrelevant dimensions if we suspect that the dataset is inherently low rank. For example, if the columns are collinear, there are many attributes, but only a few mostly determine the rest through linear associations.\nCreating a transformed dataset of decorrelated features.\n\n\n\n\nThere are two equivalent ways of framing PCA:\n\nFinding directions of maximum variability in the data.\nFinding the low dimensional (rank) matrix factorization that best approximates the data.\n\nTo execute the first approach of variance maximization framing (more common), we can find the variances of each attribute with np.var and then keep the \\(k\\) attributes with the highest variance. However, this approach limits us to work with attributes individually; it cannot resolve collinearity, and we cannot combine features.\nThe second approach uses PCA to construct principal components with the most variance in the data (even higher than the first approach) using linear combinations of features. We’ll describe the procedure in the next section.\n\n24.4.1 PCA Procedure (Overview)\nTo perform PCA on a matrix:\n\nCenter the data matrix by subtracting the mean of each attribute column.\nTo find the \\(i\\)-th principal component, \\(v_i\\):\n\n\\(v\\) is a unit vector that linearly combines the attributes.\n\\(v\\) gives a one-dimensional projection of the data.\n\\(v\\) is chosen to maximize the variance along the projection onto \\(v\\). This is equivalent to minimizing the sum of squared distances between each point and its projection onto \\(v\\).\nChoose \\(v\\) such that it is orthogonal to all previous principal components.\n\n\nThe \\(k\\) principal components capture the most variance of any \\(k\\)-dimensional reduction of the data matrix.\nIn practice, however, we don’t carry out the procedures in step 2 because they take too long to compute. Instead, we use singular value decomposition (SVD) to find all principal components efficiently.\n\n\n24.4.2 Deriving PCA as Error Minimization\nIn this section, we will derive PCA keeping the following goal in mind: minimize the reconstruction loss for our matrix factorization model. You are not expected to be able to be able to redo this derivation, but understanding the derivation may help with future assignments.\nGiven a matrix \\(X\\) with \\(n\\) rows and \\(d\\) columns, our goal is to find its best decomposition such that \\[X \\approx Z W\\] Z has \\(n\\) rows and \\(k\\) columns; W has \\(k\\) rows and \\(d\\) columns.\n\n\n\nTo measure the accuracy of our reconstruction, we define the reconstruction loss below, where \\(X_i\\) is the row vector of \\(X\\), and \\(Z_i\\) is the row vector of \\(Z\\):\n\n\n\n\nThere are many solutions to the above, so let’s constrain our model such that \\(W\\) is a row-orthonormal matrix (i.e. \\(WW^T=I\\)) where the rows of \\(W\\) are our principal components.\nIn our derivation, let’s first work with the case where \\(k=1\\). Here Z will be an \\(n \\times 1\\) vector and W will be a \\(1 \\times d\\) vector.\n\\[\\begin{aligned}\nL(z,w) &= \\frac{1}{n}\\sum_{i=1}^{n}(X_i - z_{i}w)(X_i - z_{i}w)^T \\\\\n&= \\frac{1}{n}\\sum_{i=1}^{n}(X_{i}X_{i}^T - 2z_{i}X_{i}w^T + z_{i}^{2}ww^T) & \\text{(expand the loss)} \\\\\n= \\frac{1}{n}\\sum_{i=1}^{n}(-2z_{i}X_{i}w^T + z_{i}^{2}) & \\text{(First term is constant and }ww^T=1\\text{ by orthonormality)} \\\\\n\\end{aligned}\\]\nNow, we can take the derivative with respect to \\(Z_i\\). \\[\\begin{aligned}\n\\frac{\\partial{L(Z,W)}}{\\partial{z_i}} &= \\frac{1}{n}(-2X_{i}w^T + 2z_{i}) \\\\\nz_i &= X_iw^T & \\text{(Setting derivative equal to 0 and solving for }z_i\\text{)}\\end{aligned}\\]\nWe can now substitute our solution for \\(z_i\\) in our loss function:\n\\[\\begin{aligned}\nL(z,w) &= \\frac{1}{n}\\sum_{i=1}^{n}(-2z_{i}X_{i}w^T + z_{i}^{2}) \\\\\nL(z=X_iw^T,w) &= \\frac{1}{n}\\sum_{i=1}^{n}(-2X_iw^TX_{i}w^T + (X_iw^T)^{2}) \\\\\n&= \\frac{1}{n}\\sum_{i=1}^{n}(-X_iw^TX_{i}w^T) \\\\\n&= \\frac{1}{n}\\sum_{i=1}^{n}(-wX_{i}^TX_{i}w^T) \\\\\n&= -w\\frac{1}{n}\\sum_{i=1}^{n}(X_i^TX_{i})w^T \\\\\n&= -w\\Sigma w^T\n\\end{aligned}\\]\nNow, we need to minimize our loss with respect to \\(w\\). Since we have a negative sign, one way we can do this is by making \\(w\\) really big. However, we also have the orthonormality constraint \\(ww^T=1\\). To incorporate this constraint into the equation, we can add a Lagrange multiplier, \\(\\lambda\\). Note that lagrangian multipliers are out of scope for Data 100.\n\\[\nL(w,\\lambda) = -w\\Sigma w^T + \\lambda(ww^T-1)\n\\]\nTaking the derivative with respect to \\(w\\), \\[\\begin{aligned}\n\\frac{\\partial{L(w,\\lambda)}}{w} &= -2\\Sigma w^T + 2\\lambda w^T \\\\\n2\\Sigma w^T - 2\\lambda w^T &= 0 & \\text{(Setting derivative equal to 0)} \\\\\n\\Sigma w^T &= \\lambda w^T \\\\\n\\end{aligned}\\]\nThis result implies that:\n\n\\(w\\) is a unitary eigenvector of the covariance matrix. This means that \\(||w||^2 = ww^T = 1\\)\nThe error is minimized when \\(w\\) is the eigenvector with the largest eigenvalue \\(\\lambda\\).\n\nThis derivation can inductively be used for the next (second) principal component (not shown).\nThe final takeaway from this derivation is that the principal components are the eigenvectors with the largest eigenvalues of the covariance matrix. These are the directions of the maximum variance of the data. We can construct the latent factors (the Z matrix) by projecting the centered data X onto the principal component vectors:", - "crumbs": [ - "24  PCA I" - ] - }, - { - "objectID": "clustering/clustering.html", - "href": "clustering/clustering.html", - "title": "26  Clustering", - "section": "", - "text": "26.1 Review: Taxonomy of Machine Learning", - "crumbs": [ - "26  Clustering" - ] - }, - { - "objectID": "clustering/clustering.html#review-taxonomy-of-machine-learning", - "href": "clustering/clustering.html#review-taxonomy-of-machine-learning", - "title": "26  Clustering", - "section": "", - "text": "26.1.1 Supervised Learning\nIn supervised learning, our goal is to create a function that maps inputs to outputs. Each model is learned from example input/output pairs (training set), validated using input/output pairs, and eventually tested on more input/output pairs. Each pair consists of:\n\nInput vector\nOutput value (label)\n\nIn regression, our output value is quantitative, and in classification, our output value is categorical.\n\n\n\nML taxonomy\n\n\n\n\n26.1.2 Unsupervised Learning\nIn unsupervised learning, our goal is to identify patterns in unlabeled data. In this type of learning, we do not have input/output pairs. Sometimes, we may have labels but choose to ignore them (e.g. PCA on labeled data). Instead, we are more interested in the inherent structure of the data we have rather than trying to simply predict a label using that structure of data. For example, if we are interested in dimensionality reduction, we can use PCA to reduce our data to a lower dimension.\nNow, let’s consider a new problem: clustering.\n\n\n26.1.3 Clustering Examples\n\n26.1.3.1 Example 1\nConsider this figure from Fall 2019 Midterm 2. The original dataset had 8 dimensions, but we have used PCA to reduce our data down to 2 dimensions.\n\n\n\nEach point represents the 1st and 2nd principal component of how much time patrons spent at 8 different zoo exhibits. Visually and intuitively, we could potentially guess that this data belongs to 3 groups: one for each cluster. The goal of clustering is now to assign each point (in the 2 dimensional PCA representation) to a cluster.\n\n\n\nThis is an unsupervised task, as:\n\nWe don’t have labels for each visitor.\nWe want to infer patterns even without labels.\n\n\n\n26.1.3.2 Example 2: Netflix\nNow suppose you’re Netflix and are looking at information on customer viewing habits. Clustering can come in handy here. We can assign each person or show to a “cluster.” (Note: while we don’t know for sure that Netflix actually uses ML clustering to identify these categories, they could, in principle.)\nKeep in mind that with clustering, we don’t need to define clusters in advance; it discovers groups automatically. On the other hand, with classification, we have to decide labels in advance. This marks one of the key differences between clustering and classification.\n\n\n26.1.3.3 Example 3: Education\nLet’s say we’re working with student-generated materials and pass them into the S-BERT module to extract sentence embeddings. Features from clusters are extracted to:\n\nDetect anomalies in group activities\nPredict the group’s median quiz grade\n\n\n\n\nHere we can see the outline of the anomaly detection module. It consists of:\n\nS-BERT feature extraction\nTopic extraction\nFeature extraction\n16D \\(\\rightarrow\\) 2D PCA dimensionality reduction and 2D \\(\\rightarrow\\) 16D reconstruction\nAnomaly detection based on reconstruction error\n\nLooking more closely at our clustering, we can better understand the different components, which are represented by the centers. Below we have two examples.\n\n\n\nNote that the details for this example are not in scope.\n\n\n26.1.3.4 Example 4: Reverse Engineering Biology\nNow, consider the plot below:\n\n\n\nThe rows of this plot are conditions (e.g., a row might be: “poured acid on the cells”), and the columns are genes. The green coloration indicates that the gene was “off” whereas red indicates the gene was “on”. For example, the ~9 genes in the top left corner of the plot were all turned off by the 6 experiments (rows) at the top.\nIn a clustering lens, we might be interested in clustering similar observations together based on the reactions (on/off) to certain experiments.\nFor example, here is a look at our data before and after clustering.\n\n\n\nNote: apologies if you can’t differentiate red from green by eye! Historical visualizations are not always the best.", - "crumbs": [ - "26  Clustering" - ] - }, - { - "objectID": "clustering/clustering.html#taxonomy-of-clustering-approaches", - "href": "clustering/clustering.html#taxonomy-of-clustering-approaches", - "title": "26  Clustering", - "section": "26.2 Taxonomy of Clustering Approaches", - "text": "26.2 Taxonomy of Clustering Approaches\n\n\n\nThere are many types of clustering algorithms, and they all have strengths, inherent weaknesses, and different use cases. We will first focus on a partitional approach: K-Means clustering.", - "crumbs": [ - "26  Clustering" - ] - }, - { - "objectID": "clustering/clustering.html#k-means-clustering", - "href": "clustering/clustering.html#k-means-clustering", - "title": "26  Clustering", - "section": "26.3 K-Means Clustering", - "text": "26.3 K-Means Clustering\nThe most popular clustering approach is K-Means. The algorithm itself entails the following:\n\nPick an arbitrary \\(k\\), and randomly place \\(k\\) “centers”, each a different color.\nRepeat until convergence:\n\nColor points according to the closest center.\nMove the center for each color to the center of points with that color.\n\n\nConsider the following data with an arbitrary \\(k = 2\\) and randomly placed “centers” denoted by the different colors (blue, orange):\n\n\n\nNow, we will follow the rest of the algorithm. First, let us color each point according to the closest center:\n\n\n\nNext, we will move the center for each color to the center of points with that color. Notice how the centers are generally well-centered amongst the data that shares its color.\n\n\n\nAssume this process (re-color and re-set centers) repeats for a few more iterations. We eventually reach this state.\n\n\n\nAfter this iteration, the center stays still and does not move at all. Thus, we have converged, and the clustering is complete!\n\n26.3.0.1 A Quick Note\nK-Means is a completely different algorithm than K-Nearest Neighbors. K-means is used for clustering, where each point is assigned to one of \\(K\\) clusters. On the other hand, K-Nearest Neighbors is used for classification (or, less often, regression), and the predicted value is typically the most common class among the \\(K\\)-nearest data points in the training set. The names may be similar, but there isn’t really anything in common.", - "crumbs": [ - "26  Clustering" - ] - }, - { - "objectID": "clustering/clustering.html#minimizing-inertia", - "href": "clustering/clustering.html#minimizing-inertia", - "title": "26  Clustering", - "section": "26.4 Minimizing Inertia", - "text": "26.4 Minimizing Inertia\nConsider the following example where \\(K = 4\\):\n\n\n\nDue to the randomness of where the \\(K\\) centers initialize/start, you will get a different output/clustering every time you run K-Means. Consider three possible K-Means outputs; the algorithm has converged, and the colors denote the final cluster they are clustered as.\n\n\n\n Which clustering output is the best? To evaluate different clustering results, we need a loss function.\nThe two common loss functions are:\n\nInertia: Sum of squared distances from each data point to its center.\nDistortion: Weighted sum of squared distances from each data point to its center.\n\n\n\n\nIn the example above:\n\nCalculated inertia: \\(0.47^2 + 0.19^2 + 0.34^2 + 0.25^2 + 0.58^2 + 0.36^2 + 0.44^2\\)\nCalculated distortion: \\(\\frac{0.47^2 + 0.19^2 + 0.34^2}{3} + \\frac{0.25^2 + 0.58^2 + 0.36^2 + 0.44^2}{4}\\)\n\nSwitching back to the four-cluster example at the beginning of this section, random.seed(25) had an inertia of 44.96, random.seed(29) had an inertia of 45.95, and random.seed(40) had an inertia of 54.35. It seems that the best clustering output was random.seed(25) with an inertia of 44.96!\nIt turns out that the function K-Means is trying to minimize is inertia, but often fails to find global optimum. Why does this happen? We can think of K-means as a pair of optimizers that take turns. The first optimizer holds center positions constant and optimizes data colors. The second optimizer holds data colors constant and optimizes center positions. Neither optimizer gets full control!\nThis is a hard problem: give an algorithm that optimizes inertia FOR A GIVEN \\(K\\); \\(K\\) is picked in advance. Your algorithm should return the EXACT best centers and colors, but you don’t need to worry about runtime.\nNote: This is a bit of a CS61B/CS70/CS170 problem, so do not worry about completely understanding the tricky predicament we are in too much!\nA potential algorithm:\n\nFor all possible \\(k^n\\) colorings:\n\nCompute the \\(k\\) centers for that coloring.\nCompute the inertia for the \\(k\\) centers.\n\nIf current inertia is better than best known, write down the current centers and coloring and call that the new best known.\n\n\n\nNo better algorithm has been found for solving the problem of minimizing inertia exactly.", - "crumbs": [ - "26  Clustering" - ] - }, - { - "objectID": "clustering/clustering.html#hierarchical-agglomerative-clustering", - "href": "clustering/clustering.html#hierarchical-agglomerative-clustering", - "title": "26  Clustering", - "section": "26.5 Hierarchical Agglomerative Clustering", - "text": "26.5 Hierarchical Agglomerative Clustering\nNow, let us consider hierarchical agglomerative clustering.\n\n\n\n Consider the following results of two K-Means clustering outputs:\n\n\n\n Which clustering result do you like better? It seems K-Means likes the one on the right better because it has lower inertia (the sum of squared distances from each data point to its center), but this raises some questions:\n\nWhy is the inertia on the right lower? K-Means optimizes for distance, not “blobbiness”.\nIs clustering on the right “wrong”? Good question!\n\nNow, let us introduce Hierarchical Agglomerative Clustering! We start with every data point in a separate cluster, and we’ll keep merging the most similar pairs of data points/clusters until we have one big cluster left. This is called a bottom-up or agglomerative method.\nThere are various ways to decide the order of combining clusters called Linkage Criterion:\n\nSingle linkage (similarity of the most similar): the distance between two clusters as the minimum distance between a point in the first cluster and a point in the second.\nComplete linkage (similarity of the least similar): the distance between two clusters as the maximum distance between a point in the first cluster and a point in the second.\nAverage linkage: average similarity of pairs of points in clusters.\n\nThe linkage criterion decides how we measure the “distance” between two clusters. Regardless of the criterion we choose, the aim is to combine the two clusters that have the minimum “distance” between them, with the distance computed as per that criterion. In the case of complete linkage, for example, that means picking the two clusters that minimize the maximum distance between a point in the first cluster and a point in the second.\n\n\n\nWhen the algorithm starts, every data point is in its own cluster. In the plot below, there are 12 data points, so the algorithm starts with 12 clusters. As the clustering begins, it assesses which clusters are the closest together.\n\n\n\nThe closest clusters are 10 and 11, so they are merged together.\n\n\n\nNext, points 0 and 4 are merged together because they are closest.\n\n\n\nAt this point, we have 10 clusters: 8 with a single point (clusters 1, 2, 3, 4, 5, 6, 7, 8, and 9) and 2 with 2 points (clusters 0 and 10).\nAlthough clusters 0 and 3 are not the closest, let us consider if we were trying to merge them. A tricky question arises: what is the “distance” between clusters 0 and 3? We can use the Complete-Link approach that uses the max distance among all pairs of points between groups to decide which group has smaller “distance”.\n\n\n\nLet us assume the algorithm runs a little longer, and we have reached the following state. Clusters 0 and 7 are up next, but why? The max line between any member of 0 and 6 is longer than the max line between any member of 0 and 7.\n\n\n\nThus, 0 and 7 are merged into 0 as they are closer under the complete linkage criterion.\nAfter more iterations, we finally converge to the plot on the left. There are two clusters (0, 1), and the agglomerative algorithm has converged.\n\n\n\n Notice that on the full dataset, our agglomerative clustering algorithm achieves the more “correct” output.\n\n26.5.1 Clustering, Dendrograms, and Intuition\nAgglomerative clustering is one form of “hierarchical clustering.” It is interpretable because we can keep track of when two clusters got merged (each cluster is a tree), and we can visualize the merging hierarchy, resulting in a “dendrogram.” Won’t discuss this any further for this course, but you might see these in the wild. Here are some examples:\n\n \n\nSome professors use agglomerative clustering for grading bins; if there is a big gap between two people, draw a grading threshold there. The idea is that grade clustering should be more like the figure below on the left, not the right.", - "crumbs": [ - "26  Clustering" - ] - }, - { - "objectID": "clustering/clustering.html#picking-k", - "href": "clustering/clustering.html#picking-k", - "title": "26  Clustering", - "section": "26.6 Picking K", - "text": "26.6 Picking K\nThe algorithms we’ve discussed require us to pick a \\(K\\) before we start. But how do we pick \\(K\\)? Often, the best \\(K\\) is subjective. For example, consider the state plot below.\n\n\n\nHow many clusters are there here? For K-Means, one approach to determine this is to plot inertia versus many different \\(K\\) values. We’d pick the \\(K\\) in the elbow, where we get diminishing returns afterward. Note that big, complicated data often lacks an elbow, so this method is not foolproof. Here, we would likely select \\(K = 2\\).\n\n\n\n\n26.6.1 Silhouette Scores\nTo evaluate how “well-clustered” a specific data point is, we can use the silhouette score, also termed the silhouette width. A high silhouette score indicates that a point is near the other points in its cluster; a low score means that it’s far from the other points in its cluster.\n\n\n\nFor a data point \\(X\\), score \\(S\\) is: \\[S =\\frac{B - A}{\\max(A, B)}\\] where \\(A\\) is the average distance to other points in the cluster, and \\(B\\) is the average distance to points in the closest cluster.\nConsider what the highest possible value of \\(S\\) is and how that value can occur. The highest possible value of \\(S\\) is 1, which happens if every point in \\(X\\)’s cluster is right on top of \\(X\\); the average distance to other points in \\(X\\)’s cluster is \\(0\\), so \\(A = 0\\). Thus, \\(S = \\frac{B}{\\max(0, B)} = \\frac{B}{B} = 1\\). Another case where \\(S = 1\\) could happen is if \\(B\\) is much greater than \\(A\\) (we denote this as \\(B >> A\\)).\nCan \\(S\\) be negative? The answer is yes. If the average distance to X’s clustermates is larger than the distance to the closest cluster, then this is possible. For example, the “low score” point on the right of the image above has \\(S = -0.13\\).\n\n\n26.6.2 Silhouette Plot\nWe can plot the silhouette scores for all of our datapoints. The x-axis represents the silhouette coefficient value or silhouette score. The y-axis tells us which cluster label the points belong to, as well as the number of points within a particular cluster. Points with large silhouette widths are deeply embedded in their cluster; the red dotted line shows the average. Below, we plot the silhouette score for our plot with \\(K=2\\).\n\n \n\nSimilarly, we can plot the silhouette score for the same dataset but with \\(K=3\\):\n\n \n\nThe average silhouette score is lower with 3 clusters, so \\(K=2\\) is a better choice. This aligns with our visual intuition as well.\n\n\n26.6.3 Picking K: Real World Metrics\nSometimes you can rely on real-world metrics to guide your choice of \\(K\\). For t-shirts, we can either:\n\nCluster heights and weights of customers with \\(K = 3\\) to design Small, Medium, and Large shirts\nCluster heights and weights of customers with \\(K = 5\\) to design XS, S, M, L, and XL shirts\n\nTo choose \\(K\\), consider projected costs and sales for the 2 different \\(K\\)s and select the one that maximizes profit.", - "crumbs": [ - "26  Clustering" - ] - }, - { - "objectID": "clustering/clustering.html#conclusion", - "href": "clustering/clustering.html#conclusion", - "title": "26  Clustering", - "section": "26.7 Conclusion", - "text": "26.7 Conclusion\nWe’ve now discussed a new machine learning goal —— clustering —— and explored two solutions:\n\nK-Means Clustering tries to optimize a loss function called inertia (no known algorithm to find the optimal answer in an efficient manner)\nHierarchical Agglomerative Clustering builds clusters bottom-up by merging clusters “close” to each other, depending on the choice of linkage.\n\nOur version of these algorithms required a hyperparameter \\(K\\). There are 4 ways to pick \\(K\\): the elbow method, silhouette scores, and by harnessing real-world metrics.\nThere are many machine learning problems. Each can be addressed by many different solution techniques. Each has many metrics for evaluating success / loss. Many techniques can be used to solve different problem types. For example, linear models can be used for regression and classification.\nWe’ve only scratched the surface and haven’t discussed many important ideas, such as neural networks and deep learning. In the last lecture, we’ll provide some specific course recommendations on how to explore these topics further.", - "crumbs": [ - "26  Clustering" - ] } ] \ No newline at end of file