動手學深度學習(二)——多層感知機(從零開始)

weixin_33850890發表於2018-02-27

文章作者:Tyan
部落格:noahsnail.com  |  CSDN  |  簡書

注:本文為李沐大神的《動手學深度學習》的課程筆記!

# 匯入mxnet
import mxnet as mx

# 設定隨機種子
mx.random.seed(2)

from mxnet import gluon
from mxnet import ndarray as nd
from mxnet import autograd
from mxnet import image
from utils import load_data_fashion_mnist, accuracy, evaluate_accuracy, SGD

資料獲取

# 批資料大小
batch_size = 256

# 獲取訓練資料和測試資料
train_data, test_data = load_data_fashion_mnist(batch_size)

多層感知機

# 輸入資料大小
num_inputs = 28 * 28
# 輸出資料大小, 分為10類
num_outputs = 10

# 隱藏單元個數
num_hidden = 256

# 正態分佈的標準差
weight_scale = 0.01

# 隨機初始化輸入層權重
W1 = nd.random_normal(shape=(num_inputs, num_hidden), scale=weight_scale)
b1 = nd.zeros(num_hidden)

# 隨機初始化隱藏層權重
W2 = nd.random_normal(shape=(num_hidden, num_outputs), scale=weight_scale)
b2 = nd.zeros(num_outputs)

# 引數陣列
params = [W1, b1, W2, b2]

# 需要計算梯度, 新增自動求導
for param in params:
    param.attach_grad()

啟用函式

# 啟用函式使用ReLU, relu(x)=max(x,0)
def relu(X):
    return nd.maximum(X, 0)

定義模型

def net(X):
    # 輸入資料重排
    X = X.reshape((-1, num_inputs))
    # 計算啟用值
    h1 = relu(nd.dot(X, W1) + b1)
    # 計算輸出
    output = nd.dot(h1, W2) + b2
    return output

Softmax和交叉熵損失函式

# 定義交叉熵損失
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()

訓練

# 定義迭代週期
epochs = 5

## 定義學習率
learning_rate = 0.1

# 訓練
for epoch in range(epochs):
    # 訓練損失
    train_loss = 0.0
    # 訓練集準確率
    train_acc = 0.0
    # 迭代訓練
    for data, label in train_data:
        # 記錄梯度
        with autograd.record():
            # 計算輸出
            output = net(data)
            # 計算損失
            loss = softmax_cross_entropy(output, label)
        # 反向傳播求梯度
        loss.backward()
        # 梯度下降
        SGD(params, learning_rate/batch_size)
        # 總的訓練損失
        train_loss += nd.mean(loss).asscalar()
        # 總的訓練準確率
        train_acc += accuracy(output, label)
    
    # 測試集的準確率
    test_acc = evaluate_accuracy(test_data, net)
    
    print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
        epoch, train_loss / len(train_data), train_acc / len(train_data), test_acc))
Epoch 0. Loss: 1.042064, Train acc 0.630976, Test acc 0.776142
Epoch 1. Loss: 0.601578, Train acc 0.788862, Test acc 0.815204
Epoch 2. Loss: 0.525148, Train acc 0.816556, Test acc 0.835136
Epoch 3. Loss: 0.486619, Train acc 0.829427, Test acc 0.833033
Epoch 4. Loss: 0.459395, Train acc 0.836104, Test acc 0.835136

相關文章