Osomaki67のブログ

個人的な備忘録兼日記にしたいと思います。

AI 事始め11:Get Started with TensorFlow :classic MNIST

Get Started with TensorFlowの冒頭のコードに記述されているtf.keras.datasets.mnistは、classic MNISTのデータセットであることが判明したので、そのJyupiter Notebookを下記のように作成してみた。

 

 

In [1]:
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras  # 最新のtensorflowにはkerasが同梱されているのかな?

# Helper libraries
import numpy as np
import matplotlib.pyplot as plt

print(tf.__version__)
 
1.11.0
In [2]:
classic_mnist = tf.keras.datasets.mnist

(train_images, train_labels), (test_images, test_labels) = classic_mnist.load_data()
In [3]:
class_names = ['zero', 'one', 'two', 'three', 'four', 
               'five', 'six', 'seven', 'eight', 'nine']
In [4]:
train_images.shape
Out[4]:
(60000, 28, 28)
In [5]:
len(train_labels)
Out[5]:
60000
In [6]:
train_labels
Out[6]:
array([5, 0, 4, ..., 5, 6, 8], dtype=uint8)
In [7]:
test_images.shape
Out[7]:
(10000, 28, 28)
In [8]:
len(test_labels)
Out[8]:
10000
In [9]:
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
 
In [10]:
train_images = train_images / 255.0

test_images = test_images / 255.0
In [11]:
plt.figure(figsize=(10,10))
for i in range(25):
    plt.subplot(5,5,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(train_images[i], cmap=plt.cm.binary)
    plt.xlabel(class_names[train_labels[i]])
 
In [12]:
# MODELを変えてみました。
model = tf.keras.models.Sequential([
  tf.keras.layers.Flatten(),
  tf.keras.layers.Dense(512, activation=tf.nn.relu),
  tf.keras.layers.Dropout(0.2),
  tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
# ここの記述を変えてみました。
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
model.evaluate(test_images, test_labels)
 
Epoch 1/5
60000/60000 [==============================] - 14s 237us/step - loss: 0.2000 - acc: 0.9412
Epoch 2/5
60000/60000 [==============================] - 14s 227us/step - loss: 0.0802 - acc: 0.9754
Epoch 3/5
60000/60000 [==============================] - 13s 224us/step - loss: 0.0537 - acc: 0.9830
Epoch 4/5
60000/60000 [==============================] - 14s 225us/step - loss: 0.0366 - acc: 0.9884
Epoch 5/5
60000/60000 [==============================] - 13s 224us/step - loss: 0.0265 - acc: 0.9916
10000/10000 [==============================] - 0s 40us/step
Out[12]:
[0.07169422345789499, 0.9801]
In [13]:
predictions = model.predict(test_images)
In [14]:
predictions[0]
Out[14]:
array([4.8529500e-11, 4.6674481e-10, 3.1813300e-08, 1.2909177e-06,
       9.0128634e-15, 2.9170492e-11, 2.8064065e-15, 9.9999869e-01,
       6.5376677e-09, 7.0768231e-09], dtype=float32)
In [15]:
np.argmax(predictions[0])
Out[15]:
7
In [16]:
test_labels[0]
Out[16]:
7
In [17]:
def plot_image(i, predictions_array, true_label, img):
  predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
  plt.grid(False)
  plt.xticks([])
  plt.yticks([])
  
  plt.imshow(img, cmap=plt.cm.binary)

  predicted_label = np.argmax(predictions_array)
  if predicted_label == true_label:
    color = 'blue'
  else:
    color = 'red'
  
  plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
                                100*np.max(predictions_array),
                                class_names[true_label]),
                                color=color)

def plot_value_array(i, predictions_array, true_label):
  predictions_array, true_label = predictions_array[i], true_label[i]
  plt.grid(False)
  plt.xticks([])
  plt.yticks([])
  thisplot = plt.bar(range(10), predictions_array, color="#777777")
  plt.ylim([0, 1]) 
  predicted_label = np.argmax(predictions_array)
 
  thisplot[predicted_label].set_color('red')
  thisplot[true_label].set_color('blue')
In [18]:
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions,  test_labels)
 
In [19]:
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions,  test_labels)
 
In [20]:
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
  plt.subplot(num_rows, 2*num_cols, 2*i+1)
  plot_image(i, predictions, test_labels, test_images)
  plt.subplot(num_rows, 2*num_cols, 2*i+2)
  plot_value_array(i, predictions, test_labels)
 
In [21]:
# Grab an image from the test dataset
img = test_images[0]

print(img.shape)
 
(28, 28)
In [22]:
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))

print(img.shape)
 
(1, 28, 28)
In [24]:
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
 
In [25]:
np.argmax(predictions_single[0])
Out[25]:
7
In [26]:
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.