1.迭代次數對精確度的影響

老秦子弟發表於2021-01-05

程式碼環境:win10+anaconda2020.02+TensorFlow1.14.0+Keras2.2.5

原始碼如下

from __future__ import print_function
import numpy as np
from keras.callbacks import TensorBoard
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPool2D
from keras.utils import np_utils
from keras import backend as K
from keras.callbacks import ModelCheckpoint
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
set_session(tf.Session(config=config))
np.random.seed(42)
print("Initialized!")



#定義變數
batch_size = 512
nb_classes = 10
nb_epoch = 5000
img_rows, img_cols = 32, 32
nb_filters = [32, 32, 64, 64]
pool_size = (2,2)
kernel_size = (3, 3)

(X_train, y_train), (X_test, y_test) = cifar10.load_data()  #通過這一語句可以將資料自動下載到C:\Users\dell.keras\datasets裡
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255

y_train = y_train
y_test = y_test

input_shape = (img_rows, img_cols, 3)
Y_train =  np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)


#上游部分,基於生成器的批量生成輸入模組
datagen = ImageDataGenerator(
                    featurewise_center = False,
                    samplewise_center = False,
                    featurewise_std_normalization = False,
                    samplewise_std_normalization = False,
                    zca_whitening = False,
                    rotation_range = 0,
                    width_shift_range = 0.1,
                    height_shift_range = 0.1,
                    horizontal_flip = True,
                    vertical_flip = False)

datagen.fit(X_train)


#用各種零件搭建深度神經網路
model = Sequential()
model.add(Conv2D(nb_filters[0], kernel_size, padding = 'same',
                input_shape = X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(nb_filters[1], kernel_size))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size = pool_size))
model.add(Dropout(0.25))

model.add(Conv2D(nb_filters[2], kernel_size, padding = 'same'))
model.add(Activation('relu'))
model.add(Conv2D(nb_filters[3], kernel_size))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size = pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))


#下游部分,使用凸優化模組訓練模型

adam = Adam(lr=0.0001)
model.compile(loss = 'categorical_crossentropy',
                   optimizer=adam,
                   metrics=['accuracy'])


#最後開始訓練模型,並且評估模型的準確性
#訓練模型
best_model = ModelCheckpoint("cifar10_best.h5", monitor='val_loss', verbose=0, save_best_only=True)
tb = TensorBoard(log_dir="./logs")
model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                        steps_per_epoch=X_train.shape[0] // batch_size,
                        epochs=nb_epoch, verbose=1,
                        validation_data=(X_test, Y_test), callbacks=[best_model,tb])


# 模型評分
score = model.evaluate(X_test, Y_test, verbose=0)
# 輸出結果
print('Test score:', score[0])
print("Accuracy: %.2f%%" % (score[1]*100))                   
print("Compiled!")

程式碼來自《深度學習技術影像處理入門》第八章。
迭代次數截圖

將近5000次左右的準確度情況:
將近5000次左右的準確度情況

迭代3000多次時準確度的情況
迭代3000多次時準確度的情況

在300次左右,準確率就到達82%。
在這裡插入圖片描述

想要說明
(1)在3000多次後,繼續迭代,準確度基本上到達87%這個瓶頸了,如果要想繼續提升精確度,就要從其他地方下手了。
(2)在300次後,為了提升精確度,提升迭代次數取得的效果很小。僅僅從82%提升到87%。但是迭代300次,和5000次的時間相差了很多。戰神筆記本rtx2060顯示卡,計算300次大約75分鐘左右。迭代5000次卻需要21個小時左右,時間上是很不划算的。

相關文章