轉自:https://www.cnblogs.com/miraclepbc/p/14360807.html
resnet預訓練模型
resnet模型與之前筆記中的vgg模型不同,需要我們直接覆蓋掉最後的全連線層
先看一下resnet模型的結構:
我們需要先將所有的引數都設定成requires_grad = False
然後再重新定義fc層,並覆蓋掉原來的。
重新定義的fc層的requires_grad預設為True
for p in model.parameters():
p.requries_grad = False
in_f = model.fc.in_features
model.fc = nn.Linear(in_f, 4)
當定義optimizer的時候,需要注意,傳進去的引數是fc層的引數,而不是所有層的引數
optimizer = torch.optim.Adam(model.fc.parameters(), lr = 0.001)
微調
微調的一般步驟是:
- 重新定義全連線層
- 訓練重新定義的全連線層
- 解凍部分其他層
- 訓練整個模型
注意:微調是在訓練完新的全連線層後,才能進行的。也就相當於整個模型訓練了兩次。
optimizer這時的引數就是整個模型的引數了。
程式碼:
for param in model.parameters():
param.requires_grad = True
extend_epoch = 30
optimizer = torch.optim.Adam(model.parameters(), lr = 0.0001)
全部程式碼
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms, models
import os
import shutil
%matplotlib inline
train_transform = transforms.Compose([
transforms.Resize(224),
transforms.RandomCrop(192),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(0.2),
transforms.ColorJitter(brightness = 0.5),
transforms.ColorJitter(contrast = 0.5),
transforms.ToTensor(),
transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])
])
test_transform = transforms.Compose([
transforms.Resize((192, 192)),
transforms.ToTensor(),
transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])
])
train_ds = datasets.ImageFolder(
"E:/datasets2/29-42/29-42/dataset2/4weather/train",
transform = train_transform
)
test_ds = datasets.ImageFolder(
"E:/datasets2/29-42/29-42/dataset2/4weather/test",
transform = test_transform
)
train_dl = torch.utils.data.DataLoader(train_ds, batch_size = 8, shuffle = True)
test_dl = torch.utils.data.DataLoader(test_ds, batch_size = 8)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = models.resnet101(pretrained = True)
for p in model.parameters():
p.requries_grad = False
in_f = model.fc.in_features
model.fc = nn.Linear(in_f, 4)
loss_func = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.fc.parameters(), lr = 0.001)
epochs = 30
exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size = 7, gamma = 0.1)
def fit(epoch, model, trainloader, testloader):
correct = 0
total = 0
running_loss = 0
model.train()
for x, y in trainloader:
x, y = x.to(device), y.to(device)
y_pred = model(x)
loss = loss_func(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
y_pred = torch.argmax(y_pred, dim = 1)
correct += (y_pred == y).sum().item()
total += y.size(0)
running_loss += loss.item()
exp_lr_scheduler.step()
epoch_acc = correct / total
epoch_loss = running_loss / len(trainloader.dataset)
test_correct = 0
test_total = 0
test_running_loss = 0
model.eval()
with torch.no_grad():
for x, y in testloader:
x, y = x.to(device), y.to(device)
y_pred = model(x)
loss = loss_func(y_pred, y)
y_pred = torch.argmax(y_pred, dim = 1)
test_correct += (y_pred == y).sum().item()
test_total += y.size(0)
test_running_loss += loss.item()
epoch_test_acc = test_correct / test_total
epoch_test_loss = test_running_loss / len(testloader.dataset)
print('epoch: ', epoch,
'loss: ', round(epoch_loss, 3),
'accuracy: ', round(epoch_acc, 3),
'test_loss: ', round(epoch_test_loss, 3),
'test_accuracy: ', round(epoch_test_acc, 3))
return epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc
train_loss = []
train_acc = []
test_loss = []
test_acc = []
for epoch in range(epochs):
epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc = fit(epoch, model, train_dl, test_dl)
train_loss.append(epoch_loss)
train_acc.append(epoch_acc)
test_loss.append(epoch_test_loss)
test_acc.append(epoch_test_acc)
for param in model.parameters():
param.requires_grad = True
extend_epoch = 30
optimizer = torch.optim.Adam(model.parameters(), lr = 0.0001)
train_loss = []
train_acc = []
test_loss = []
test_acc = []
for epoch in range(extend_epoch):
epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc = fit(epoch, model, train_dl, test_dl)
train_loss.append(epoch_loss)
train_acc.append(epoch_acc)
test_loss.append(epoch_test_loss)
test_acc.append(epoch_test_acc)