IST 718 LAB 3 | EXPLORE, MODEL, INTERPRET

OUTPUT: Models

E: EXPLORE

In [97]:
import pandas as pd
data = pd.read_csv("v2_coaches2019.csv")
# data = pd.read_csv("v2_coaches9.csv")
data.head()
Out[97]:
rank school conf coach school_pay total_pay max_bonus bonus_paid asst_pay buyout
0 1 Clemson ACC Dabo Swinney 9255000.0 9315600.0 1125000.0 1075000.0 7410000.0 50000000.0
1 2 Alabama SEC Nick Saban 8707000.0 8857000.0 1100000.0 875000.0 7541277.0 34100000.0
2 3 Michigan Big Ten Jim Harbaugh 7504000.0 7504000.0 1325000.0 350000.0 6005000.0 11687500.0
3 4 Texas A&M SEC Jimbo Fisher 7500000.0 7500000.0 1500000.0 250000.0 7145215.0 60625000.0
4 5 Georgia SEC Kirby Smart 6703600.0 6871600.0 1150000.0 275000.0 6212935.0 24239584.0
In [98]:
%matplotlib inline
import matplotlib.pyplot as plt
data.hist(bins = 50, figsize=(20,15))
plt.show()
In [99]:
from pandas.plotting import scatter_matrix
scatter_matrix(data, figsize=(12,8))
plt.show()
In [100]:
data.drop(['total_pay'], axis=1, inplace=True)
data.drop(['rank'], axis=1, inplace=True)
In [101]:
from pandas.plotting import scatter_matrix
scatter_matrix(data, figsize=(12,8))
plt.show()

M: MODEL

In [102]:
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(data, test_size=0.2, random_state=42)

data = train_set.drop('school_pay', axis=1)
data_labels = train_set['school_pay'].copy()
In [103]:
from sklearn.compose import ColumnTransformer

data_num = data.drop(['school', 'conf', 'coach'],axis =1) 
num_attribs = list(data_num)
cat_attribs = ['school','conf','coach']
In [104]:
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer

num_pipeline = Pipeline([
    ('imputer', SimpleImputer(strategy="median")),
#     ('attribs_addr', CombinedAttributesAdder()),
    ('std_scaler', StandardScaler()),
])
In [105]:
full_pipeline = ColumnTransformer([
    ('num', num_pipeline, num_attribs),
    ('cat', OneHotEncoder(), cat_attribs)
])
In [106]:
data_prepared = full_pipeline.fit_transform(data)

LINEAR REGRESSION

In [107]:
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(data_prepared, data_labels)
Out[107]:
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False)
In [108]:
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
import numpy as np

def display_scores(scores):
    print("Scores:", scores)
    print("Mean:", scores.mean())
    print("Stardard Deviation:", scores.std())
In [109]:
scores = cross_val_score(lin_reg, data_prepared, data_labels, scoring="neg_mean_squared_error", cv=10)
lin_reg_rmse_scores = np.sqrt(-scores)
display_scores(lin_reg_rmse_scores)
Scores: [ 863391.43623717 1329276.35896028  940871.66390862 1258097.36176848
  823246.34433711  871677.13226375  501034.63415128  905209.97976486
 1056012.22089929 1167760.28656792]
Mean: 971657.7418858772
Stardard Deviation: 229369.45582631155

DECISION TREE

In [110]:
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor()
tree_reg.fit(data_prepared, data_labels)
Out[110]:
DecisionTreeRegressor(criterion='mse', max_depth=None, max_features=None,
                      max_leaf_nodes=None, min_impurity_decrease=0.0,
                      min_impurity_split=None, min_samples_leaf=1,
                      min_samples_split=2, min_weight_fraction_leaf=0.0,
                      presort=False, random_state=None, splitter='best')
In [111]:
scores = cross_val_score(tree_reg, data_prepared, data_labels, scoring="neg_mean_squared_error", cv=10)
tree_reg_rmse_scores = np.sqrt(-scores)
display_scores(tree_reg_rmse_scores)
Scores: [1469853.78732636 1985939.9550611  1607669.72348535 2011720.40863393
 1215687.98128237  982787.80966926  584613.1934606   805628.22551125
 1407292.11675043 1569107.42329036]
Mean: 1364030.0624470995
Stardard Deviation: 448041.9923430583

RANDOM FOREST

In [112]:
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor()
forest_reg.fit(data_prepared, data_labels)
/Users/danielcaraway/anaconda3/lib/python3.7/site-packages/sklearn/ensemble/forest.py:245: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22.
  "10 in version 0.20 to 100 in 0.22.", FutureWarning)
Out[112]:
RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=None,
                      max_features='auto', max_leaf_nodes=None,
                      min_impurity_decrease=0.0, min_impurity_split=None,
                      min_samples_leaf=1, min_samples_split=2,
                      min_weight_fraction_leaf=0.0, n_estimators=10,
                      n_jobs=None, oob_score=False, random_state=None,
                      verbose=0, warm_start=False)
In [113]:
scores = cross_val_score(forest_reg, data_prepared, data_labels, scoring="neg_mean_squared_error", cv=10)
forest_reg_rmse_scores = np.sqrt(-scores)
display_scores(forest_reg_rmse_scores)
Scores: [1119857.44839398 1287429.28953944 1390971.85418215 1796434.34057579
  850164.3655399   679468.08656183  553168.3106628  1030410.36380279
 1093633.48200284 1352072.98180393]
Mean: 1115361.0523065452
Stardard Deviation: 347787.8257380748
In [ ]:
 
In [ ]: