Pytorch中自定義神經網路卷積核權重

撫琴塵世客發表於2020-05-02

1. 自定義神經網路卷積核權重

       神經網路被深度學習者深深喜愛,究其原因之一是神經網路的便利性,使用者只需要根據自己的需求像搭積木一樣搭建神經網路框架即可,搭建過程中我們只需要考慮卷積核的尺寸,輸入輸出通道數,卷積方式等等。

       我們使用慣了自帶的引數後,當我們要自定義卷積核引數時,突然有種無從下手的感覺,哈哈哈哈哈哈哈哈~~,請允許我開心下,嘿嘿!因為筆者在初入神經網路時也遇到了同樣的問題,當時踩了太多坑了,寶寶想哭(灬ꈍ ꈍ灬)!讓我悲傷的是,找遍了各個資源區,也沒有找到大家的分享。因此,我想把我的方法寫出來,希望能幫助到各位寶寶,開心(*^▽^*)。

  話不多說,正文開始......

2. 定義卷積核權重

  我這裡是自定義的dtt係數卷積核權重,直接上權重程式碼:

2.1 dtt係數權重Code

  def dtt_matrix(n): 這個函式是n*n的DTT係數矩陣,筆者的是8*8的係數矩陣。

       def dtt_kernel(out_channels, in_channels, kernel_size): 這個方法是設定權重,權重需要包括4個引數(輸出通道數,輸入通道數,卷積核高,卷積核寬),這裡有很多細節要注意,寶寶們要親自躺下坑,才能映像深刻也,我就不深究了哈,(#^.^#)。

import numpy as np
import torch
import torch.nn as nn


# ================================
# DTT coefficient matrix of n * n
# ================================
def dtt_matrix(n):
    dtt_coe = np.zeros([n, n], dtype='float32')
    for i in range(0, n):
        dtt_coe[0, i] = 1/np.sqrt(n)
        dtt_coe[1, i] = (2*i + 1 - n)*np.sqrt(3/(n*(np.power(n, 2) - 1)))
    for i in range(1, n-1):
        dtt_coe[i+1, 0] = -np.sqrt((n-i-1)/(n+i+1)) * np.sqrt((2*(i+1)+1)/(2*(i+1)-1)) * dtt_coe[i, 0]
        dtt_coe[i+1, 1] = (1 + (i+1)*(i+2)/(1-n)) * dtt_coe[i+1, 0]
        dtt_coe[i+1, n-1] = np.power(-1, i+1) * dtt_coe[i+1, 0]
        dtt_coe[i+1, n-2] = np.power(-1, i+1) * dtt_coe[i+1, 1]
        for j in range(2, int(n/2)):
            t1 = (-(i+1) * (i+2) - (2*j-1) * (j-n-1) - j)/(j*(n-j))
            t2 = ((j-1) * (j-n-1))/(j * (n-j))
            dtt_coe[i+1, j] = t1 * dtt_coe[i+1, j-1] + t2 * dtt_coe[i+1, j-2]
            dtt_coe[i+1, n-j-1] = np.power(-1, i-1) * dtt_coe[i+1, j]
    return dtt_coe


# ===============================================================
# DTT coefficient matrix of (out_channels * in_channels * n * n)
# ===============================================================
def dtt_kernel(out_channels, in_channels, kernel_size):
    dtt_coe = dtt_matrix(kernel_size)
    dtt_coe = np.array(dtt_coe)

    dtt_weight = np.zeros([out_channels, in_channels, kernel_size, kernel_size], dtype='float32')
    temp = np.zeros([out_channels, in_channels, kernel_size, kernel_size], dtype='float32')

    order = 0
    for i in range(0, kernel_size):
        for j in range(0, kernel_size):
            dtt_row = dtt_coe[i, :]
            dtt_col = dtt_coe[:, j]
            dtt_row = dtt_row.reshape(len(dtt_row), 1)
            dtt_col = dtt_col.reshape(1, len(dtt_col))
            # print("dtt_row: ", dtt_row)
            # print("dtt_col: ", dtt_col)
            # print("i:", i, "j: ", j)
            temp[order, 0, :, :] = np.dot(dtt_row, dtt_col)
            order = order + 1
    for i in range(0, in_channels):
        for j in range(0, out_channels):
            # dtt_weight[j, i, :, :] = flip_180(temp[j, 0, :, :])
            dtt_weight[j, i, :, :] = temp[j, 0, :, :]
    return torch.tensor(dtt_weight)

 

2.2 'same'方式卷積

  如果寶寶需要保持卷積前後的資料尺寸保持不變,即'same'方式卷積,那麼你直接使用我這個卷積核(提一下喲,這個我也是借自某位前輩的,我當時沒備註哇,先在這裡感謝那位前輩,前輩如果路過,還請留言小生哈,(#^.^#))。

import torch.utils.data
from torch.nn import functional as F
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.functional import pad
from torch.nn.modules import Module
from torch.nn.modules.utils import _single, _pair, _triple

class _ConvNd(Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride,
                 padding, dilation, transposed, output_padding, groups, bias):
        super(_ConvNd, self).__init__()
        if in_channels % groups != 0:
            raise ValueError('in_channels must be divisible by groups')
        if out_channels % groups != 0:
            raise ValueError('out_channels must be divisible by groups')
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        self.transposed = transposed
        self.output_padding = output_padding
        self.groups = groups
        if transposed:
            self.weight = Parameter(torch.Tensor(
                in_channels, out_channels // groups, *kernel_size))
        else:
            self.weight = Parameter(torch.Tensor(
                out_channels, in_channels // groups, *kernel_size))
        if bias:
            self.bias = Parameter(torch.Tensor(out_channels))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()

    def reset_parameters(self):
        n = self.in_channels
        for k in self.kernel_size:
            n *= k
        stdv = 1. / math.sqrt(n)
        self.weight.data.uniform_(-stdv, stdv)
        if self.bias is not None:
            self.bias.data.uniform_(-stdv, stdv)

    def __repr__(self):
        s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
             ', stride={stride}')
        if self.padding != (0,) * len(self.padding):
            s += ', padding={padding}'
        if self.dilation != (1,) * len(self.dilation):
            s += ', dilation={dilation}'
        if self.output_padding != (0,) * len(self.output_padding):
            s += ', output_padding={output_padding}'
        if self.groups != 1:
            s += ', groups={groups}'
        if self.bias is None:
            s += ', bias=False'
        s += ')'
        return s.format(name=self.__class__.__name__, **self.__dict__)

class Conv2d(_ConvNd):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, bias=True):
        kernel_size = _pair(kernel_size)
        stride = _pair(stride)
        padding = _pair(padding)
        dilation = _pair(dilation)
        super(Conv2d, self).__init__(
            in_channels, out_channels, kernel_size, stride, padding, dilation,
            False, _pair(0), groups, bias)
    def forward(self, input):
        return conv2d_same_padding(input, self.weight, self.bias, self.stride,
                        self.padding, self.dilation, self.groups)

# custom con2d, because pytorch don't have "padding='same'" option.

def conv2d_same_padding(input, weight, bias=None, stride=1, padding=1, dilation=1, groups=1):
    input_rows = input.size(2)
    filter_rows = weight.size(2)
    effective_filter_size_rows = (filter_rows - 1) * dilation[0] + 1
    out_rows = (input_rows + stride[0] - 1) // stride[0]

    input_cols = input.size(3)
    filter_cols = weight.size(3)
    effective_filter_size_cols = (filter_cols - 1) * dilation[1] + 1
    out_cols = (input_cols + stride[1] - 1) // stride[1]

    padding_needed = max(0, (out_rows - 1) * stride[0] + effective_filter_size_rows -input_rows)
    padding_rows = max(0, (out_rows - 1) * stride[0] +
                        (filter_rows - 1) * dilation[0] + 1 - input_rows)
    rows_odd = (padding_rows % 2 != 0)
    padding_cols = max(0, (out_cols - 1) * stride[1] +
                       (filter_cols - 1) * dilation[1] + 1 - input_cols)
    cols_odd = (padding_cols % 2 != 0)
    if rows_odd or cols_odd:
        input = pad(input, [0, int(cols_odd), 0, int(rows_odd)])
    return F.conv2d(input, weight, bias, stride,
                  padding=(padding_rows // 2, padding_cols // 2),
                  dilation=dilation, groups=groups)

 

 2.3 將權重賦給卷積核

  此處才是寶寶們最關心的吧,不慌,這就來了哈,開心(*^▽^*),進入正文了(#^.^#)。

  這裡給了一個簡單的網路模型(一個固定卷積+3個全連線,全連線是1*1的Conv2d),程式碼裡我給了註釋,寶寶們應該能秒懂滴,(*^▽^*)!

import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import dtt_kernel
import util
import paddingSame

# 定義權重
dtt_weight1 = dtt_kernel.dtt_kernel(64, 2, 8)


class DttNet(nn.Module):
    def __init__(self):
        super(DttNet, self).__init__()
self.conv1
= paddingSame.Conv2d(2, 64, 8)
     # 將權重賦給卷積核 self.conv1.weight
= nn.Parameter(dtt_weight1, requires_grad=False) self.fc1 = util.fc(64, 512, 1) self.fc2 = util.fc(512, 128, 1) self.fc3 = util.fc(128, 2, 1, last=True) def forward(self, x): x = self.conv1(x) x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) return x

 

 2.4 補充我的util類

import torch.nn as nn


def conv(in_channels, out_channels, kernel_size, stride=1, dilation=1, batch_norm=True):
    if batch_norm:
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=(kernel_size // 2)),
            nn.BatchNorm2d(out_channels),
            nn.ReLU()
        )
    else:
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=(kernel_size // 2)),
            nn.ReLU()
        )


def fc(in_channels, out_channels, kernel_size, stride=1, bias=True, last=False):
    if last:
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=(kernel_size // 2)),
        )
    else:
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=(kernel_size // 2)),
            nn.BatchNorm2d(out_channels),
            nn.ReLU()
        )

 

 

3. 總結

  哇哦,寫完了耶,不曉得寶寶們有沒得收穫呢,o((⊙﹏⊙))o,o((⊙﹏⊙))o。大家不懂的可以再下面留言喲,我會時常關注我家的園子呢。若有不足之處,寶寶們也在留言區吱我一下喲,我們下次再見,┏(^0^)┛┏(^0^)┛。

 

相關文章