import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 載入資料
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# 輸入圖片是28
n_input = 28
max_time = 28
lstm_size = 100 # 隱藏單元
n_class = 10 # 10個分類
batch_size = 50 # 每次50個樣本
n_batch_size = mnist.train.num_examples // batch_size # 計算一共有多少批次
# 這裡None表示第一個維度可以是任意長度
# 建立佔位符
x = tf.placeholder(tf.float32,[None, 28*28])
# 正確的標籤
y = tf.placeholder(tf.float32,[None, 10])
# 初始化權重 ,stddev為標準差
weight = tf.Variable(tf.truncated_normal([lstm_size, n_class], stddev=0.1))
# 初始化偏置層
biases = tf.Variable(tf.constant(0.1, shape=[n_class]))
# 定義RNN網路
def RNN(X, weights, biases):
# 原始資料為[batch_size,28*28]
# input = [batch_size, max_time, n_input]
input = tf.reshape(X,[-1, max_time, n_input ])
# 定義LSTM的基本單元
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# final_state[0] 是cell state
# final_state[1] 是hidden stat
outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, input, dtype=tf.float32)
results = tf.nn.softmax(tf.matmul(final_state[1],weights)+biases)
return results
# 計算RNN的返回結果
prediction = RNN(x, weight, biases)
# 損失函式
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# 使用AdamOptimizer進行優化
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
# 將結果存下來
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
# 計算正確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
# 初始化
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(6):
for batch in range(n_batch_size):
# 取出下一批次資料
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step, feed_dict={x: batch_xs,y: batch_ys})
if(batch%100==0):
print(str(batch)+"/" + str(n_batch_size))
acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})
print("Iter" + str(epoch) + " ,Testing Accuracy = " + str(acc))