1-1. 只有輸入和輸出,無隱層
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#載入資料集
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
#每個批次的大小
batch_size=100
#計算一共有多少批次
n_batch = mnist.train.num_examples // batch_size
#定義兩個placehold
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#建立一個簡單的神經網路
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,W)+b)
#二次代價函式
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#初始化變數
init = tf.global_variables_initializer()
#存放結果到一個布林型變數中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
#求準確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
sess.run(init)
for epoch in range(20):
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
print("Iter "+str(epoch) + ",Testing Accuracy "+str(acc))
複製程式碼
訓練20次結果如下:
Iter 0,Testing Accuracy 0.8303
Iter 1,Testing Accuracy 0.8712
Iter 2,Testing Accuracy 0.8808
Iter 3,Testing Accuracy 0.8887
Iter 4,Testing Accuracy 0.8944
Iter 5,Testing Accuracy 0.8973
Iter 6,Testing Accuracy 0.8992
Iter 7,Testing Accuracy 0.9032
Iter 8,Testing Accuracy 0.9034
Iter 9,Testing Accuracy 0.905
Iter 10,Testing Accuracy 0.9068
Iter 11,Testing Accuracy 0.9074
Iter 12,Testing Accuracy 0.9083
Iter 13,Testing Accuracy 0.9096
Iter 14,Testing Accuracy 0.9101
Iter 15,Testing Accuracy 0.9109
Iter 16,Testing Accuracy 0.9118
Iter 17,Testing Accuracy 0.9121
Iter 18,Testing Accuracy 0.9127
Iter 19,Testing Accuracy 0.9133
複製程式碼
1-2. 使用交叉熵函式
# #二次代價函式
# loss = tf.reduce_mean(tf.square(y-prediction))
#交叉熵定義二次代價函式作用是加快模型收斂速度
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
複製程式碼
訓練結果如下:
Iter 0,Testing Accuracy 0.8239
Iter 1,Testing Accuracy 0.892
Iter 2,Testing Accuracy 0.9011
Iter 3,Testing Accuracy 0.9052
Iter 4,Testing Accuracy 0.9086
Iter 5,Testing Accuracy 0.9098
Iter 6,Testing Accuracy 0.912
……
Iter 15,Testing Accuracy 0.921
Iter 16,Testing Accuracy 0.9207
Iter 17,Testing Accuracy 0.9221
Iter 18,Testing Accuracy 0.9213
Iter 19,Testing Accuracy 0.922
複製程式碼
2-1. 具有3個隱層的神經網路
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#載入資料集
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
#每個批次的大小
batch_size=100
#計算一共有多少批次
n_batch = mnist.train.num_examples // batch_size
#定義兩個placehold
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
keep_prob = tf.placeholder(tf.float32)
#建立一個簡單的神經網路
W1 = tf.Variable(tf.truncated_normal([784,500],stddev=0.1))
b1 = tf.Variable(tf.zeros([500])+0.1)
L1 = tf.nn.tanh(tf.matmul(x,W1)+b1)
L1_drop = tf.nn.dropout(L1,keep_prob)
W2 = tf.Variable(tf.truncated_normal([500,300],stddev=0.1))
b2 = tf.Variable(tf.zeros([300])+0.1)
L2 = tf.nn.tanh(tf.matmul(L1_drop,W2)+b2)
L2_drop = tf.nn.dropout(L2,keep_prob)
W3 = tf.Variable(tf.truncated_normal([300,10],stddev=0.1))
b3 = tf.Variable(tf.zeros([10])+0.1)
prediction = tf.nn.softmax(tf.matmul(L2_drop,W3)+b3)
#二次代價函式
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#初始化變數
init = tf.global_variables_initializer()
#存放結果到一個布林型變數中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
#求準確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
sess.run(init)
for epoch in range(51):
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})
test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0})
print("Iter "+str(epoch) + ",Testing Accuracy "+str(test_acc))
複製程式碼
訓練50次結果如下:
Iter 0,Testing Accuracy 0.8852
Iter 1,Testing Accuracy 0.9103
Iter 2,Testing Accuracy 0.9179
Iter 3,Testing Accuracy 0.9257
……
Iter 13,Testing Accuracy 0.9502
Iter 14,Testing Accuracy 0.9504
……
Iter 24,Testing Accuracy 0.9605
Iter 25,Testing Accuracy 0.9603
……
Iter 49,Testing Accuracy 0.9699
Iter 50,Testing Accuracy 0.9698
複製程式碼
2-2 改用交叉熵函式
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
複製程式碼
訓練五十次後:
Iter 0,Testing Accuracy 0.9239
……
Iter 4,Testing Accuracy 0.9529
……
Iter 7,Testing Accuracy 0.9619
……
Iter 14,Testing Accuracy 0.9714
……
Iter 49,Testing Accuracy 0.9769
Iter 50,Testing Accuracy 0.9765
複製程式碼
2-3 進一步改變優化器優化模型
- 使用AdamOptimizer
#train_step = tf.train.AdamOptimizer(1e-2).minimize(loss)
Iter 0,Testing Accuracy 0.9255
……
Iter 50,Testing Accuracy 0.9255
複製程式碼
- 使用MomentumOptimizer
#train_step = tf.train.MomentumOptimizer(learning_rate=0.1,momentum=0.9).minimize(loss)
Iter 0,Testing Accuracy 0.9422
Iter 1,Testing Accuracy 0.9573
……
Iter 5,Testing Accuracy 0.9708
……
Iter 50,Testing Accuracy 0.9793
複製程式碼
2-4 優化到98%
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#載入資料集
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
#每個批次的大小
batch_size=100
#計算一共有多少批次
n_batch = mnist.train.num_examples // batch_size
#定義兩個placehold
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
keep_prob = tf.placeholder(tf.float32)
lr = tf.Variable(0.001,dtype=tf.float32) #學習率
#建立一個簡單的神經網路
W1 = tf.Variable(tf.truncated_normal([784,500],stddev=0.1))
b1 = tf.Variable(tf.zeros([500])+0.1)
L1 = tf.nn.tanh(tf.matmul(x,W1)+b1)
L1_drop = tf.nn.dropout(L1,keep_prob)
W2 = tf.Variable(tf.truncated_normal([500,300],stddev=0.1))
b2 = tf.Variable(tf.zeros([300])+0.1)
L2 = tf.nn.tanh(tf.matmul(L1_drop,W2)+b2)
L2_drop = tf.nn.dropout(L2,keep_prob)
W3 = tf.Variable(tf.truncated_normal([300,10],stddev=0.1))
b3 = tf.Variable(tf.zeros([10])+0.1)
prediction = tf.nn.softmax(tf.matmul(L2_drop,W3)+b3)
#交叉熵代價函式
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
#訓練
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
#初始化變數
init = tf.global_variables_initializer()
#存放結果到一個布林型變數中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
#求準確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
sess.run(init)
for epoch in range(51):
sess.run(tf.assign(lr,0.001 * (0.95 ** epoch)))
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})
learning_rate = sess.run(lr)
acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
print("Iter "+str(epoch) + ",Testing Accuracy= "+str(acc)+" Learning Rate= "+str(learning_rate))
複製程式碼
訓練結果如下:
Iter 0,Testing Accuracy= 0.9497 Learning Rate= 0.001
Iter 1,Testing Accuracy= 0.9629 Learning Rate= 0.00095
Iter 2,Testing Accuracy= 0.971 Learning Rate= 0.0009025
Iter 3,Testing Accuracy= 0.9664 Learning Rate= 0.000857375
Iter 4,Testing Accuracy= 0.973 Learning Rate= 0.00081450626
Iter 5,Testing Accuracy= 0.9766 Learning Rate= 0.0007737809
Iter 6,Testing Accuracy= 0.9756 Learning Rate= 0.0007350919
Iter 7,Testing Accuracy= 0.9753 Learning Rate= 0.0006983373
Iter 8,Testing Accuracy= 0.9767 Learning Rate= 0.0006634204
Iter 9,Testing Accuracy= 0.9786 Learning Rate= 0.0006302494
Iter 10,Testing Accuracy= 0.9782 Learning Rate= 0.0005987369
Iter 11,Testing Accuracy= 0.9781 Learning Rate= 0.0005688001
Iter 12,Testing Accuracy= 0.9767 Learning Rate= 0.0005403601
Iter 13,Testing Accuracy= 0.9796 Learning Rate= 0.0005133421
Iter 14,Testing Accuracy= 0.9807 Learning Rate= 0.000487675
Iter 15,Testing Accuracy= 0.9809 Learning Rate= 0.00046329122
……
Iter 49,Testing Accuracy= 0.9818 Learning Rate= 8.099471e-05
Iter 50,Testing Accuracy= 0.9825 Learning Rate= 7.6944976e-05
複製程式碼
3使用inception-v3進行影象識別
daima
import tensorflow as tf
import os
import numpy as np
import re
from PIL import Image
import matplotlib.pyplot as plt
class NodeLookup(object):
def __init__(self):
label_lookup_path = 'inception_model/imagenet_2012_challenge_label_map_proto.pbtxt'
uid_lookup_path = 'inception_model/imagenet_synset_to_human_label_map.txt'
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
#一行一行讀取資料
for line in proto_as_ascii_lines :
#去掉換行符
line=line.strip('\n')
#按照'\t'分割
parsed_items = line.split('\t')
#獲取分類編號
uid = parsed_items[0]
#獲取分類名稱
human_string = parsed_items[1]
#儲存編號字串n********與分類名稱對映關係
uid_to_human[uid] = human_string
# 載入分類字串n********對應分類編號1-1000的檔案
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
node_id_to_uid = {}
for line in proto_as_ascii:
if line.startswith(' target_class:'):
#獲取分類編號1-1000
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
#獲取編號字串n********
target_class_string = line.split(': ')[1]
#儲存分類編號1-1000與編號字串n********對映關係
node_id_to_uid[target_class] = target_class_string[1:-2]
#建立分類編號1-1000對應分類名稱的對映關係
node_id_to_name = {}
for key, val in node_id_to_uid.items():
#獲取分類名稱
name = uid_to_human[val]
#建立分類編號1-1000到分類名稱的對映關係
node_id_to_name[key] = name
return node_id_to_name
#傳入分類編號1-1000返回分類名稱
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
#建立一個圖來存放google訓練好的模型
with tf.gfile.FastGFile('inception_model/classify_image_graph_def.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
#遍歷目錄
for root,dirs,files in os.walk('images/'):
for file in files:
#載入圖片
image_data = tf.gfile.FastGFile(os.path.join(root,file), 'rb').read()
predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})#圖片格式是jpg格式
predictions = np.squeeze(predictions)#把結果轉為1維資料
#列印圖片路徑及名稱
image_path = os.path.join(root,file)
print(image_path)
#顯示圖片
img=Image.open(image_path)
plt.imshow(img)
plt.axis('off')
plt.show()
#排序
top_k = predictions.argsort()[-5:][::-1]
node_lookup = NodeLookup()
for node_id in top_k:
#獲取分類名稱
human_string = node_lookup.id_to_string(node_id)
#獲取該分類的置信度
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
print()
複製程式碼
識別結果
images/man.jpg
Windsor tie (score = 0.73132)
suit, suit of clothes (score = 0.19451)
stole (score = 0.01027)
jean, blue jean, denim (score = 0.00458)
wool, woolen, woollen (score = 0.00434)
複製程式碼
airliner (score = 0.80073)
space shuttle (score = 0.05414)
wing (score = 0.02091)
airship, dirigible (score = 0.00550)
warplane, military plane (score = 0.00169)
複製程式碼
lakeside, lakeshore (score = 0.93102)
breakwater, groin, groyne, mole, bulwark, seawall, jetty (score = 0.01073)
goose (score = 0.00414)
boathouse (score = 0.00362)
valley, vale (score = 0.00271)
複製程式碼
acoustic guitar (score = 0.97403)
banjo (score = 0.00399)
electric guitar (score = 0.00208)
pick, plectrum, plectron (score = 0.00061)
garter snake, grass snake (score = 0.00035)
複製程式碼
sports car, sport car (score = 0.62812)
convertible (score = 0.14632)
racer, race car, racing car (score = 0.13355)
car wheel (score = 0.00704)
limousine, limo (score = 0.00574)
複製程式碼
golden retriever (score = 0.94793)
Labrador retriever (score = 0.00335)
flat-coated retriever (score = 0.00066)
Tibetan mastiff (score = 0.00063)
Leonberg (score = 0.00061)
複製程式碼