利用Tensorflow實現卷積神經網路模型

獵手家園發表於2017-05-10

首先看一下卷積神經網路模型,如下圖:

卷積神經網路(CNN)由輸入層、卷積層、啟用函式、池化層、全連線層組成,即INPUT-CONV-RELU-POOL-FC
池化層:為了減少運算量和資料維度而設定的一種層。

 

程式碼如下:

n_input  = 784        # 28*28的灰度圖
n_output = 10         # 完成一個10分類的操作
weights  = {
    #'權重引數': tf.Variable(tf.高期([feature的H, feature的W, 當前feature連線的輸入的深度, 最終想得到多少個特徵圖], 標準差=0.1)),
    'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)),
    'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.1)),
   #'全連線層引數': tf.Variable(tf.高斯([特徵圖H*特徵圖W*深度, 最終想得到多少個特徵圖], 標準差=0.1)),
    'wd1': tf.Variable(tf.random_normal([7*7*128, 1024], stddev=0.1)),
    'wd2': tf.Variable(tf.random_normal([1024, n_output], stddev=0.1))
}
biases   = {
   #'偏置引數': tf.Variable(tf.高斯([第1層有多少個偏置項], 標準差=0.1)),
    'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)),
    'bc2': tf.Variable(tf.random_normal([128], stddev=0.1)),
    'bd1': tf.Variable(tf.random_normal([1024], stddev=0.1)),
    'bd2': tf.Variable(tf.random_normal([n_output], stddev=0.1))
}

#卷積神經網路
def conv_basic(_input, _w, _b, _keepratio):
    #將輸入資料轉化成一個四維的[n, h, w, c]tensorflow格式資料
    #_input_r = tf.將輸入資料轉化成tensorflow格式(輸入, shape=[batch_size大小, H, W, 深度])
    _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1])

    #第1層卷積    
    #_conv1 = tf.nn.卷積(輸入, 權重引數, 步長=[batch_size大小, H, W, 深度], padding='建議選擇SAME')
    _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME')
    #_conv1 = tf.nn.非線性啟用函式(tf.nn.加法(_conv1, _b['bc1']))
    _conv1 = tf.nn.relu(tf.nn.bias_add(_conv1, _b['bc1']))
    #第1層池化
    #_pool1 = tf.nn.池化函式(_conv1, 指定池化視窗的大小=[batch_size大小, H, W, 深度], strides=[1, 2, 2, 1], padding='SAME')
    _pool1 = tf.nn.max_pool(_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    #隨機殺死一些節點,不讓所有神經元都加入到訓練中
    #_pool_dr1 = tf.nn.dropout(_pool1, 保留比例)
    _pool_dr1 = tf.nn.dropout(_pool1, _keepratio)
    
    #第2層卷積
    _conv2 = tf.nn.conv2d(_pool_dr1, _w['wc2'], strides=[1, 1, 1, 1], padding='SAME')
    _conv2 = tf.nn.relu(tf.nn.bias_add(_conv2, _b['bc2']))
    _pool2 = tf.nn.max_pool(_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    _pool_dr2 = tf.nn.dropout(_pool2, _keepratio)
    
    #全連線層
    #轉化成tensorflow格式
    _dense1 = tf.reshape(_pool_dr2, [-1, _w['wd1'].get_shape().as_list()[0]])
    #第1層全連線層
    _fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
    _fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
    #第2層全連線層
    _out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
    #返回值
    out = { 'input_r': _input_r, 'conv1': _conv1, 'pool1': _pool1, 'pool1_dr1': _pool_dr1,
        'conv2': _conv2, 'pool2': _pool2, 'pool_dr2': _pool_dr2, 'dense1': _dense1,
        'fc1': _fc1, 'fc_dr1': _fc_dr1, 'out': _out
    }
    return out
print ("CNN READY")

#設定損失函式&最佳化器(程式碼說明:略 請看前面文件)
learning_rate = 0.001
x      = tf.placeholder("float", [None, nsteps, diminput])
y      = tf.placeholder("float", [None, dimoutput])
myrnn  = _RNN(x, weights, biases, nsteps, 'basic')
pred   = myrnn['O']
cost   = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) 
optm   = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Adam Optimizer
accr   = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred,1), tf.argmax(y,1)), tf.float32))
init   = tf.global_variables_initializer()
print ("Network Ready!")

#訓練(程式碼說明:略 請看前面文件)
training_epochs = 5
batch_size      = 16
display_step    = 1
sess = tf.Session()
sess.run(init)
print ("Start optimization")
for epoch in range(training_epochs):
    avg_cost = 0.
    #total_batch = int(mnist.train.num_examples/batch_size)
    total_batch = 100
    # Loop over all batches
    for i in range(total_batch):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        batch_xs = batch_xs.reshape((batch_size, nsteps, diminput))
        # Fit training using batch data
        feeds = {x: batch_xs, y: batch_ys}
        sess.run(optm, feed_dict=feeds)
        # Compute average loss
        avg_cost += sess.run(cost, feed_dict=feeds)/total_batch
    # Display logs per epoch step
    if epoch % display_step == 0: 
        print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
        feeds = {x: batch_xs, y: batch_ys}
        train_acc = sess.run(accr, feed_dict=feeds)
        print (" Training accuracy: %.3f" % (train_acc))
        testimgs = testimgs.reshape((ntest, nsteps, diminput))
        feeds = {x: testimgs, y: testlabels, istate: np.zeros((ntest, 2*dimhidden))}
        test_acc = sess.run(accr, feed_dict=feeds)
        print (" Test accuracy: %.3f" % (test_acc))
print ("Optimization Finished.")        

 

相關文章