!pip install ipython-autotime
%load_ext autotime
import matplotlib
matplotlib.use("Agg")
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD
from imutils import paths
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import argparse
import random
import pickle
import cv2
import os
## mount your Google Drive folder
from google.colab import drive
drive.mount('/content/drive')
## change into the directory where your data is
os.chdir("drive/My Drive/data")
app_train = pd.read_csv('home-credit-default-risk/application_train.csv')
app_train
app_test= pd.read_csv('home-credit-default-risk/application_test.csv')
app_train['TARGET']
print(app_train.shape)
print(app_test.shape)
app_train = pd.get_dummies(app_train)
app_test = pd.get_dummies(app_test)
print(app_train.shape)
print(app_test.shape)
train_labels = app_train['TARGET']
app_train, app_test = app_train.align(app_test, join = 'inner', axis = 1)
app_train['TARGET'] = train_labels
print('Training Features shape: ', app_train.shape)
print('Testing Features shape: ', app_test.shape)
app_train_labels = app_train['TARGET']
app_train_nolabels = app_train.drop('TARGET', axis=1)
# data = np.array(data, dtype="float") / 255.0
# labels = np.array(labels)
from keras.utils import to_categorical
train_labels = to_categorical(app_train_labels)
train_labels.shape
(trainX, testX, trainY, testY) = train_test_split(app_train_nolabels,
train_labels, test_size=0.25, random_state=42)
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)
model = Sequential()
model.add(Dense(1024, input_shape=(242,), activation="sigmoid"))
model.add(Dense(512, activation="sigmoid"))
model.add(Dense(len(lb.classes_), activation="softmax"))
INIT_LR = 0.01
EPOCHS = 75
print("[INFO] training network...")
opt = SGD(lr=INIT_LR)
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
H = model.fit(trainX, trainY, validation_data=(testX, testY),
epochs=EPOCHS, batch_size=32)