EPOCH=40
batch_size=50
mnist=input_data.read_data_sets("MNIST_data", one_hot=True)
tf.reset_default_graph()
input_X = tf.placeholder(tf.float32, shape=[None, 784])
input_y = tf.placeholder(tf.int64, shape=[None, 10])
input_layer=tf.layers.dense(input_X, 784, activation=tf.nn.sigmoid)
hidden1=tf.layers.dense(input_layer, 256, activation=tf.nn.sigmoid)
hidden2=tf.layers.dense(hidden1, 256, activation=tf.nn.sigmoid)
output=tf.layers.dense(hidden2, units=10)
output=tf.nn.softmax(output)
entropy=tf.nn.softmax_cross_entropy_with_logits(labels=input_y, logits=output)
loss=tf.reduce_mean(entropy)
step=tf.train.GradientDescentOptimizer(0.01).minimize(loss)
#correct=tf.nn.in_top_k(tf.argmax(output, y, 1) rank error
correct=tf.equal(tf.argmax(output, 1), tf.argmax(input_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
sess=tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
for epoch in range(EPOCH):
for i in range(1000):
X, y=mnist.train.next_batch(batch_size)
sess.run(loss, feed_dict={input_X:X, input_y:y})
acc_train=accuracy.eval(feed_dict={input_X:X, input_y:y})
acc_val=accuracy.eval(feed_dict={input_X: mnist.validation.images
, input_y: mnist.validation.labels})
print(epoch, "Train accuracy: ", acc_train, "\n Val accuracy: ", acc_val)
结果是这样的——
0 Train accuracy: 0.1
Val accuracy: 0.0986
1 Train accuracy: 0.14
Val accuracy: 0.0986
2 Train accuracy: 0.06
Val accuracy: 0.0986
3 Train accuracy: 0.06
Val accuracy: 0.0986
我参考了O'Reily教科书上的部分代码Tensorflow。
编辑-在线找到此 O' Reily 指南- https://github.com/ageron/handson-ml/blob/master/10_introduction_to_artificial_neural_networks.ipynb
他们有一个类似于我的代码,但他们的工作很好。
