from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
type(fashion_mnist)
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images.shape
# train_images
np.unique(train_labels, return_counts=True)
len(train_labels)
plt.figure()
plt.imshow(train_images[3])
plt.colorbar()
plt.grid(False)
plt.show()
train_images = train_images/255.0
test_images = test_images/255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
train_images_long = [img.flatten() for img in train_images]
len(train_images_long)
import pandas as pd
df = pd.DataFrame(train_images_long)
df_bw = df.copy()
df_bw[(df_bw != 0)] = 1
df_labeled = df_bw.copy()
df_labeled['label'] = train_labels
# df_labeled = df.copy()
# df_labeled['label'] = train_labels
df_labeled_0 = df_labeled[df_labeled['label'] == 0]
df_labeled_1 = df_labeled[df_labeled['label'] == 1]
df_labeled_2 = df_labeled[df_labeled['label'] == 2]
df_labeled_3 = df_labeled[df_labeled['label'] == 3]
# df_bw = df_labeled.copy()
# df_bw[(df_bw != 0)] = 1
# df_bw_1 = df_bw[df_bw['label'] == 1]
# df_bw_3 = df_bw[df_bw['label'] == 3]
df_labeled_0_t = df_labeled_0.T
df_labeled_0_t['sum'] = df_labeled_0_t.sum(axis=1)
df_labeled_0_t['perc'] = (df_labeled_0_t['sum']/6000) * 100
df_labeled_0_t
# df_labeled_0
df = df_labeled_0_t.copy()
df.sort_values(by="perc", ascending=False)
Deep learning = chaining simple layers together
Explanation of below:
Flatten() simply reformats the data -- it takes a 2D array (28x28) and turns it into a 1D array (784)
Dense() this has parameters that are learned during training. Two are used here, the first has 128 nodes (or neurons). The second is a 10 node "softmax" that returns an array of 10 probability scores that sum up to 1.
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28,28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10)
])
Before we can train, we need to add more settings -- the COMPILE step.
Loss Function -- measures how accurate the model is during training. NOTE: You want to minimize this to "steer" the model in the right direction
Optimizer -- how the model is updated based on the data it sees and its loss function
Metrics -- used to monitor training and testing steps. Accuracy is the fraction of images that are correctly classified
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
Training the nn model requires these 4 steps:
IN SHORT:
IN LONG:
## model.fit starts the training because it "fits" the model to the training data
model.fit(train_images, train_labels, epochs=10)
## Now, compare performance
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\n Test Accuracy:', test_acc)
NOTE: The accuracy on the test data is less than our training data due to OVERFITTING. This essentially means the model has "memorized" the training data.
## Now, make predictions!
## The model produces logits and we add a softmax layer to convert the logits to probabilities (easier to interpret)
probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()])
predictions = probability_model.predict(test_images)
predictions[0]
# Get index of highest prediction probability from the array of all possible predictions
# (prediction is an array of 10 numbers, each being the probability that object in question
# is a top, trouser, pullover etc...)
np.argmax(predictions[0])
test_labels[0]
CORRECT PREDICTION!!
# Graph the prediction array to get a full picture
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]), color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0,1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
# Grab image from test set
img = test_images[9]
print(img.shape)
# NOTE: tf.keras models are used to making BATCH predictions so you still
# need to add the single image to a list
img = (np.expand_dims(img,0))
print(img.shape)
predictions_single = probability_model.predict(img)
print(predictions_single)
plot_value_array(9, predictions_single[0], test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
np.argmax(predictions_single[0])