【PyTorch基礎教程7】多維特徵input(學不會來打我啊)

專注的阿熊發表於2021-10-19

# -*- coding: utf-8 -*-

"""

Created on Mon Oct 18 10:18:24 2021

@author: 86493

"""

import torch

import torch.nn as nn

import numpy as np

import matplotlib.pyplot as plt

# 這裡的 type 不用 double ,特斯拉 GPU double

xy = np.loadtxt('diabetes.csv',

                 delimiter = ' ',

                 dtype = np.float32)

# 最後一列不要

x_data = torch.from_numpy(xy[: , : -1])

# [-1] 則拿出來的是一個矩陣,去了中括號則拿出向量

y_data = torch.from_numpy(xy[:, [-1]])

losslst = []

class Model(nn.Module):

     def __init__(self):

         super(Model, self).__init__()

         self.linear1 = nn.Linear(9, 6)

         self.linear2 = nn.Linear(6, 4)

         self.linear3 = nn.Linear(4, 1)         

         # 上次 logistic 是呼叫 nn.functional Sigmoid

         self.sigmoid = nn.Sigmoid()

         # 外匯跟單gendan5.com 這個也是繼承 Module, 沒有引數 , 比上次寫法不容易出錯

     def forward(self, x):

         x = self.sigmoid(self.linear1(x))

         x = self.sigmoid(self.linear2(x))

         x = self.sigmoid(self.linear3(x))

         return x

model = Model()

# 使用交叉熵作損失函式

criterion = nn.BCELoss(size_average = False)

optimizer = torch.optim.SGD(model.parameters(),

                             lr = 0.01)

# 訓練,下面沒有用 mini-batch ,後面講 dataloader 再說

for epoch in range(10):

     y_predict = model(x_data)

     loss = criterion(y_predict, y_data)

     # 列印 loss 物件會自動呼叫 __str__

     print(epoch, loss.item())

     losslst.append(loss.item())

     # 梯度清零後反向傳播

     optimizer.zero_grad()

     loss.backward()

     # 更新權重

     optimizer.step()

# 畫圖

plt.plot(range(10), losslst)

plt.ylabel('Loss')

plt.xlabel('epoch')

plt.show()


來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/69946337/viewspace-2838188/,如需轉載,請註明出處,否則將追究法律責任。

相關文章