Tensorflow MNIST For ML Beginners

初歩的な所で悩んでしまって,やりたい事まで行き着かなかった..

MNIST For ML Beginners – Tensorflow

#MNIST For ML Beginners
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
from sklearn.metrics import classification_report


mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

y = tf.nn.softmax(tf.matmul(x, W) + b)

y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
#loss_function = tf.losses.softmax_cross_entropy(y_, y)

train_step = tf.train.GradientDescentOptimizer(0.05).minimize(cross_entropy)
#train_step = tf.train.GradientDescentOptimizer(0.05).minimize(loss_function)
sess = tf.InteractiveSession()

tf.global_variables_initializer().run()

for _ in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})


pred = sess.run(tf.argmax(y,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
y_test = sess.run(tf.argmax(y_,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print(classification_report(y_test, pred))
         precision    recall  f1-score   support

      0       0.93      0.98      0.95       980
      1       0.95      0.97      0.96      1135
      2       0.91      0.85      0.88      1032
      3       0.88      0.89      0.89      1010
      4       0.88      0.92      0.90       982
      5       0.89      0.81      0.85       892
      6       0.93      0.93      0.93       958
      7       0.91      0.89      0.90      1028
      8       0.85      0.86      0.86       974
      9       0.87      0.87      0.87      1009

avg / total 0.90 0.90 0.90 10000

x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

y = tf.nn.softmax(tf.matmul(x, W) + b)

y_ = tf.placeholder(tf.float32, [None, 10])

loss_function = tf.losses.softmax_cross_entropy(y_, y)

train_step = tf.train.GradientDescentOptimizer(0.05).minimize(loss_function)
sess = tf.InteractiveSession()

tf.global_variables_initializer().run()

for _ in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})


pred = sess.run(tf.argmax(y,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
y_test = sess.run(tf.argmax(y_,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print(classification_report(y_test, pred))
          precision    recall  f1-score   support

      0       0.86      0.97      0.91       980
      1       0.89      0.97      0.93      1135
      2       0.86      0.83      0.85      1032
      3       0.67      0.89      0.77      1010
      4       0.84      0.86      0.85       982
      5       0.00      0.00      0.00       892
      6       0.85      0.93      0.89       958
      7       0.86      0.86      0.86      1028
      8       0.69      0.83      0.75       974
      9       0.78      0.84      0.81      1009

avg / total 0.74 0.81 0.77 10000

UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples.
‘precision’, ‘predicted’, average, warn_for)

x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

y = tf.nn.softmax(tf.matmul(x, W) + b)

y_ = tf.placeholder(tf.float32, [None, 10])

loss_function = tf.losses.softmax_cross_entropy(y_, y)

train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss_function)
sess = tf.InteractiveSession()

tf.global_variables_initializer().run()

for _ in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})


pred = sess.run(tf.argmax(y,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
y_test = sess.run(tf.argmax(y_,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print(classification_report(y_test, pred))
         precision    recall  f1-score   support

      0       0.87      0.97      0.92       980
      1       0.92      0.97      0.94      1135
      2       0.90      0.84      0.87      1032
      3       0.69      0.90      0.78      1010
      4       0.84      0.90      0.87       982
      5       0.00      0.00      0.00       892
      6       0.86      0.95      0.90       958
      7       0.88      0.88      0.88      1028
      8       0.67      0.86      0.75       974
      9       0.83      0.85      0.84      1009

avg / total 0.76 0.82 0.79 10000

UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples.
‘precision’, ‘predicted’, average, warn_for)

x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

y = tf.nn.softmax(tf.matmul(x, W) + b)

y_ = tf.placeholder(tf.float32, [None, 10])

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))

train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cost)
sess = tf.InteractiveSession()

tf.global_variables_initializer().run()

for _ in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})


pred = sess.run(tf.argmax(y,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
y_test = sess.run(tf.argmax(y_,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print(classification_report(y_test, pred))
         precision    recall  f1-score   support

      0       0.86      0.97      0.91       980
      1       0.92      0.97      0.95      1135
      2       0.90      0.84      0.87      1032
      3       0.70      0.90      0.79      1010
      4       0.83      0.91      0.87       982
      5       0.00      0.00      0.00       892
      6       0.87      0.94      0.90       958
      7       0.87      0.88      0.88      1028
      8       0.67      0.87      0.75       974
      9       0.84      0.85      0.84      1009

avg / total 0.76 0.82 0.79 10000

UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples.
‘precision’, ‘predicted’, average, warn_for)

x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

y = tf.nn.softmax(tf.matmul(x, W) + b)

y_ = tf.placeholder(tf.float32, [None, 10])

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))

train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cost)
sess = tf.InteractiveSession()

tf.global_variables_initializer().run()

for _ in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})


pred = sess.run(tf.argmax(y,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
y_test = sess.run(tf.argmax(y_,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print(classification_report(y_test, pred))
         precision    recall  f1-score   support

      0       0.95      0.97      0.96       980
      1       0.96      0.97      0.96      1135
      2       0.92      0.87      0.89      1032
      3       0.91      0.90      0.90      1010
      4       0.89      0.93      0.91       982
      5       0.90      0.81      0.85       892
      6       0.90      0.95      0.93       958
      7       0.91      0.91      0.91      1028
      8       0.84      0.89      0.86       974
      9       0.90      0.88      0.89      1009

avg / total 0.91 0.91 0.91 10000

x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

y = tf.nn.softmax(tf.matmul(x, W) + b)

y_ = tf.placeholder(tf.float32, [None, 10])

loss_function = tf.losses.softmax_cross_entropy(y_, y)

train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss_function)
sess = tf.InteractiveSession()

tf.global_variables_initializer().run()

for _ in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})


pred = sess.run(tf.argmax(y,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
y_test = sess.run(tf.argmax(y_,1), feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print(classification_report(y_test, pred))
         precision    recall  f1-score   support

      0       0.93      0.98      0.95       980
      1       0.96      0.97      0.96      1135
      2       0.92      0.87      0.89      1032
      3       0.92      0.89      0.90      1010
      4       0.89      0.92      0.91       982
      5       0.91      0.80      0.85       892
      6       0.90      0.96      0.93       958
      7       0.91      0.91      0.91      1028
      8       0.83      0.89      0.86       974
      9       0.89      0.88      0.88      1009

avg / total 0.91 0.91 0.91 10000

learning rateに大きな注意を払っていなかったけど(0.01~0.05位で良いやみたいな),こんなに影響があるとは思わなかった.Adam Optimizerの場合はぶん回してやりさえすれば,良く収束するけど,GradientDescentOptimizerは上手く行けば収束が最も速いし(Adamだと1000ステップじゃなかなか),特にシンプルなモデルの場合はリーズナブルチョイスだけど,learning rateのTuningが大事.
 
 
 

import numpy as np
from sklearn import svm

train_label = np.dot(mnist.train.labels, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
test_label = np.dot(mnist.test.labels, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])

cls = svm.SVC()
cls.fit(mnist.train.images, train_label)
pred = cls.predict(mnist.test.images)
print(classification_report(test_label, pred))
         precision    recall  f1-score   support

    0.0       0.96      0.99      0.97       980
    1.0       0.97      0.99      0.98      1135
    2.0       0.94      0.93      0.93      1032
    3.0       0.93      0.94      0.93      1010
    4.0       0.93      0.95      0.94       982
    5.0       0.93      0.91      0.92       892
    6.0       0.95      0.97      0.96       958
    7.0       0.96      0.93      0.94      1028
    8.0       0.94      0.92      0.93       974
    9.0       0.94      0.92      0.93      1009

avg / total 0.94 0.94 0.94 10000

rbf-SVMの場合,Grid search等せず,デフォルトパラメータで一切のTuning無しでも94%
しかし,遅い.思ったより時間が掛かるというか,何でこんなに遅いんだろう.
凄く遅い.約17分掛かる.意味が分からないレベル.
(Tensorflowの方は数秒なのに)

他の学習器だったらどうなのかランダムフォレストを試す.

from sklearn.ensemble import RandomForestClassifier


clf = RandomForestClassifier()
clf.fit(mnist.train.images, train_label)
pred_train = clf.predict(mnist.test.images)
print(classification_report(test_label, pred))
         precision    recall  f1-score   support

    0.0       0.96      0.99      0.97       980
    1.0       0.97      0.99      0.98      1135
    2.0       0.94      0.93      0.93      1032
    3.0       0.93      0.94      0.93      1010
    4.0       0.93      0.95      0.94       982
    5.0       0.93      0.91      0.92       892
    6.0       0.95      0.97      0.96       958
    7.0       0.96      0.93      0.94      1028
    8.0       0.94      0.92      0.93       974
    9.0       0.94      0.92      0.93      1009

avg / total 0.94 0.94 0.94 10000

一瞬で結果が出た(ほんの数秒).

XGBoostを試す.

import xgboost as xgb

xgb_model = xgb.XGBClassifier()
xgb_model.fit(mnist.train.images, train_label)
pred_train = xgb_model.predict(mnist.test.images)
print(classification_report(test_label, pred))
         precision    recall  f1-score   support

    0.0       0.96      0.99      0.97       980
    1.0       0.97      0.99      0.98      1135
    2.0       0.94      0.93      0.93      1032
    3.0       0.93      0.94      0.93      1010
    4.0       0.93      0.95      0.94       982
    5.0       0.93      0.91      0.92       892
    6.0       0.95      0.97      0.96       958
    7.0       0.96      0.93      0.94      1028
    8.0       0.94      0.92      0.93       974
    9.0       0.94      0.92      0.93      1009

avg / total 0.94 0.94 0.94 10000

約24分掛かった.

得手不得手というか,学習器とデータの相性によって収束性が大きく変わるんだろうか.SVMスキーとして色々なデータでSVMを使ってきたけど,Tuningで時間が掛かる事はあっても,フィッテイングでこんなに時間が掛かった事は無かったのでちょっと意外.それにしても,ランダムフォレスト速い.汎化性はともかく,フィッテイングは間違いなく良いし,速いとなると(特徴量の選択にも使えるし),やはりランダムフォレストがファーストチョイスとしてリーズナブルかなあ.

import tensorflow as tf
from tensorflow.contrib.layers import real_valued_column
from tensorflow.contrib.learn import DNNClassifier
from tensorflow.contrib.learn.python import SKCompat


train_label_int = train_label.astype(int)
test_label_int = test_label.astype(int)
n_classes = len(set(train_label_int))
feature_columns = [real_valued_column("", dimension=mnist.train.images.shape[1])]
cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[10, 20, 10],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=1000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.96      0.95      0.95       980
      1       0.95      0.98      0.97      1135
      2       0.92      0.89      0.90      1032
      3       0.91      0.86      0.88      1010
      4       0.89      0.93      0.91       982
      5       0.84      0.86      0.85       892
      6       0.89      0.95      0.92       958
      7       0.93      0.91      0.92      1028
      8       0.88      0.83      0.86       974
      9       0.86      0.89      0.87      1009

avg / total 0.91 0.91 0.91 10000

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[110, 970],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=1000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.97      0.96      0.97       980
      1       0.98      0.98      0.98      1135
      2       0.96      0.96      0.96      1032
      3       0.97      0.92      0.94      1010
      4       0.95      0.95      0.95       982
      5       0.92      0.96      0.94       892
      6       0.90      0.98      0.94       958
      7       0.97      0.93      0.95      1028
      8       0.97      0.89      0.93       974
      9       0.91      0.95      0.93      1009

avg / total 0.95 0.95 0.95 10000

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[110, 220],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=1000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.97      0.98      0.97       980
      1       0.98      0.98      0.98      1135
      2       0.96      0.95      0.96      1032
      3       0.97      0.92      0.95      1010
      4       0.95      0.95      0.95       982
      5       0.94      0.95      0.94       892
      6       0.92      0.98      0.95       958
      7       0.96      0.93      0.95      1028
      8       0.95      0.92      0.94       974
      9       0.91      0.96      0.93      1009

avg / total 0.95 0.95 0.95 10000

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[110],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=1000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.96      0.98      0.97       980
      1       0.98      0.98      0.98      1135
      2       0.94      0.94      0.94      1032
      3       0.95      0.93      0.94      1010
      4       0.95      0.93      0.94       982
      5       0.93      0.93      0.93       892
      6       0.93      0.97      0.95       958
      7       0.96      0.93      0.94      1028
      8       0.94      0.90      0.92       974
      9       0.90      0.94      0.92      1009

avg / total 0.94 0.94 0.94 10000

精度を上げるには隠れユニットの第一層の数が重要で,2,3層はそれ程大きく影響しないらしい.2層以降は複雑性(非線形問題への対応や汎化性向上)に主に寄与するとか.

結局,ユニット数の最適化はヒューリスティックに決めるしか無いが,こうすると良いんじゃないか?程度には色々と式がある様だ.

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[178, 256],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=1000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.98      0.97      0.98       980
      1       0.98      0.99      0.98      1135
      2       0.96      0.96      0.96      1032
      3       0.98      0.93      0.95      1010
      4       0.96      0.96      0.96       982
      5       0.95      0.96      0.95       892
      6       0.93      0.98      0.96       958
      7       0.97      0.95      0.96      1028
      8       0.96      0.93      0.95       974
      9       0.93      0.96      0.94      1009

avg / total 0.96 0.96 0.96 10000

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[178, 256],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=10000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.98      0.99      0.99       980
      1       0.99      0.99      0.99      1135
      2       0.97      0.98      0.98      1032
      3       0.97      0.98      0.98      1010
      4       0.98      0.98      0.98       982
      5       0.98      0.97      0.98       892
      6       0.98      0.98      0.98       958
      7       0.98      0.98      0.98      1028
      8       0.97      0.97      0.97       974
      9       0.97      0.98      0.97      1009

avg / total 0.98 0.98 0.98 10000

隠れ層のユニット数は[入力次元, 出力次元]で
特に第1層は(今の場合)
[(12784)(1/2) + 2*(784/12)(1/2),(12784)(1/2) + 10*(784/12)(1/2)]
を参考に決めると大体良い感じになる.

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[130, 260],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=10000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.98      0.99      0.98       980
      1       1.00      0.99      0.99      1135
      2       0.98      0.98      0.98      1032
      3       0.97      0.98      0.98      1010
      4       0.98      0.98      0.98       982
      5       0.98      0.98      0.98       892
      6       0.98      0.98      0.98       958
      7       0.98      0.98      0.98      1028
      8       0.97      0.98      0.97       974
      9       0.98      0.97      0.97      1009

avg / total 0.98 0.98 0.98 10000

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[178, 356],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=10000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.98      0.99      0.98       980
      1       1.00      0.99      0.99      1135
      2       0.98      0.98      0.98      1032
      3       0.98      0.98      0.98      1010
      4       0.98      0.98      0.98       982
      5       0.98      0.98      0.98       892
      6       0.98      0.98      0.98       958
      7       0.99      0.98      0.98      1028
      8       0.98      0.97      0.98       974
      9       0.97      0.98      0.98      1009

avg / total 0.98 0.98 0.98 10000

係数は「log2(入力次元) + 1」 or 「log2(出力次元) + 1」辺りが割と当て嵌まりが良い様な.今,隠れユニット第2層は単に第1層の2倍にしているけど,第1層が精度に大きく寄与し,第2層で数%上下し得るので,第2層についても考えたい.ただ,割と2倍でも良い感じ.

 
 

以下は蛇足.

 

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[178, 356],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=30000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.99      0.99      0.99       980
      1       0.99      0.99      0.99      1135
      2       0.97      0.98      0.98      1032
      3       0.97      0.98      0.98      1010
      4       0.98      0.98      0.98       982
      5       0.98      0.97      0.98       892
      6       0.98      0.98      0.98       958
      7       0.98      0.98      0.98      1028
      8       0.98      0.97      0.98       974
      9       0.98      0.97      0.98      1009

avg / total 0.98 0.98 0.98 10000

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[178],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=10000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.98      0.99      0.99       980
      1       0.99      0.99      0.99      1135
      2       0.98      0.98      0.98      1032
      3       0.97      0.98      0.98      1010
      4       0.98      0.97      0.98       982
      5       0.99      0.97      0.98       892
      6       0.98      0.98      0.98       958
      7       0.97      0.98      0.98      1028
      8       0.97      0.98      0.97       974
      9       0.97      0.97      0.97      1009

avg / total 0.98 0.98 0.98 10000

まず1000ステップでhidden_unit = [unit_n, unit_n*2](unit_n∈[100, 784]を10刻み)と変えて当たりを付けて,候補について10,000ステップで検討後試してみた.

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[220, 440],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=10000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.98      0.99      0.99       980
      1       0.99      0.99      0.99      1135
      2       0.98      0.97      0.98      1032
      3       0.97      0.98      0.98      1010
      4       0.98      0.98      0.98       982
      5       0.99      0.98      0.98       892
      6       0.98      0.98      0.98       958
      7       0.98      0.99      0.98      1028
      8       0.97      0.98      0.97       974
      9       0.98      0.97      0.98      1009

avg / total 0.98 0.98 0.98 10000

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[330, 660],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=10000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.99      0.99      0.99       980
      1       0.99      0.99      0.99      1135
      2       0.98      0.98      0.98      1032
      3       0.97      0.99      0.98      1010
      4       0.98      0.99      0.98       982
      5       0.99      0.98      0.98       892
      6       0.99      0.98      0.98       958
      7       0.98      0.98      0.98      1028
      8       0.98      0.97      0.98       974
      9       0.98      0.98      0.98      1009

avg / total 0.98 0.98 0.98 10000

cls_tf_dnn = SKCompat(
    DNNClassifier(
        feature_columns=feature_columns,
        hidden_units=[356, 712],
        n_classes=n_classes
    )
)

cls_tf_dnn.fit(mnist.train.images, train_label_int, steps=10000)

tf_dnn_pred = cls_tf_dnn.predict(mnist.test.images)

print(classification_report(test_label_int, tf_dnn_pred['classes']))
         precision    recall  f1-score   support

      0       0.98      0.99      0.99       980
      1       0.99      0.99      0.99      1135
      2       0.98      0.98      0.98      1032
      3       0.98      0.98      0.98      1010
      4       0.98      0.98      0.98       982
      5       0.99      0.98      0.98       892
      6       0.98      0.98      0.98       958
      7       0.98      0.98      0.98      1028
      8       0.97      0.98      0.98       974
      9       0.97      0.98      0.98      1009

avg / total 0.98 0.98 0.98 10000

広告
カテゴリー: 未分類 パーマリンク

Tensorflow MNIST For ML Beginners への1件のフィードバック

  1. ピンバック: shapeに注意 | 粉末@それは風のように (日記)

コメントを残す

以下に詳細を記入するか、アイコンをクリックしてログインしてください。

WordPress.com ロゴ

WordPress.com アカウントを使ってコメントしています。 ログアウト / 変更 )

Twitter 画像

Twitter アカウントを使ってコメントしています。 ログアウト / 変更 )

Facebook の写真

Facebook アカウントを使ってコメントしています。 ログアウト / 変更 )

Google+ フォト

Google+ アカウントを使ってコメントしています。 ログアウト / 変更 )

%s と連携中