機器學習之迴歸分析--預測值

i see the future發表於2020-11-19

結論:在一般資料上,標準迴歸、嶺迴歸、前向逐步迴歸效果差不多。可利用交叉分析比較出相對較優的模型。一般來說,訓練資料的相關係數會高於測試資料的相關係數。

1.引入regression.py和匯入資料

# 引入regression.py
import regression

df = pd.read_excel('Row_data-array.xlsx')
xArr = df.iloc[:,:-1].values
yArr = df.iloc[:,-1].values

print(xArr.shape)
print(yArr.shape)
#(1007, 9)
#(1007,)

df.head()

在這裡插入圖片描述
.

2. 最小二乘法的標準迴歸分析

ws = regression.standRegres(xArr,yArr)
ws
#matrix([[ 9.87108037e+00],
#        [ 1.33547703e+01],
#        [ 5.53508696e-02],
#        [ 2.54700518e-01],
#        [-2.48875176e-01],
#        [-1.42790710e+00],
#        [ 2.72100042e-03],
#        [ 2.76569295e-02],
#        [ 5.47646170e-01]])

# 求相關係數
xMat = mat(xArr);yMat = mat(yArr)
yHat = xMat*ws
corrcoef(yHat.T,yMat)
#array([[1.        , 0.76793521],
#       [0.76793521, 1.        ]])

# 預測值
yHat1 = mat(xArr[0])*ws
yHat1
#matrix([[327.12552844]])

3. 嶺迴歸

df = pd.read_excel('Row_data-array.xlsx')
xArr = df.iloc[:,:-1].values
yArr = df.iloc[:,-1].values

ridgeWeights = regression.ridgeTest2(xArr,yArr)

# 觀察係數的變化
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(ridgeWeights)
plt.show()

在這裡插入圖片描述
.

ridgeWeights[0]
#array([ 9.87107951e+00,  1.33547677e+01,  5.53508773e-02,  2.54700520e-01,
#       -2.48877259e-01, -1.42790686e+00,  2.72100114e-03,  2.76569309e-02,
#        5.47646173e-01])

# 預測值
xMat = mat(xArr)
yMat = mat(yArr).T
print(xMat[:2])
#[[  1.    2.  570.  380.    1.    8.  225.  275.  302.5]
# [  1.    3.  640.  640.    0.    9.  360.  340.  475. ]]
print(yMat[:2])
#[[345]
# [455]]

yHat = xMat*mat(ridgeWeights[0]).T
yHat
#matrix([[327.12552905],
#        [506.03196428],
#        [348.00154736],
#        ...,
#        [333.09528552],
#        [336.24559748],
#        [560.58028032]])

# 計算嶺迴歸的相關係數
corrcoef(yHat.T,yMat.T)
#array([[1.        , 0.76793521],
#       [0.76793521, 1.        ]])

4.前向逐步迴歸

df = pd.read_excel('Row_data-array.xlsx')
xArr = df.iloc[:,:-1].values
yArr = df.iloc[:,-1].values

4.1 第1種引數:步長0.05,迭代1000次

# 第1種引數:步長0.05,迭代1000次
wMat = regression.stageWise2(xArr,yArr,0.05,1000)
# 列印前向逐步迴歸的係數變化
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(wMat)
plt.legend(range(wMat.shape[1]))
plt.show()

在這裡插入圖片描述

# 列印係數w
wMat[-1]
#array([ 8.7 , -1.2 ,  0.65,  0.1 , -2.65, -4.35,  0.1 ,  0.  ,  0.05])

# 預測值與真實值之間的相關程度
xMat = mat(xArr);yMat = mat(yArr)
yHat = xMat*ws
corrcoef(yHat.T,yMat)
#array([[1.        , 0.76793521],
#       [0.76793521, 1.        ]])

# 預測值
yHat1 = mat(xArr[0])*ws
yHat1.A[0][0]
#327.12552843866297

4.2 第2種引數:步長0.1,迭代1000次

# 第2種引數:步長0.1,迭代1000次
wMat = regression.stageWise2(xArr,yArr,0.1,1000)
# 列印前向逐步迴歸的係數變化
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(wMat)
plt.legend(range(wMat.shape[1]))
plt.show()

在這裡插入圖片描述

# 列印係數w
wMat[-1]
#array([ 8.8, -2.3,  0.7,  0.1, -3.6, -4.8,  0.1,  0. ,  0. ])

# 預測值與真實值之間的相關程度
xMat = mat(xArr);yMat = mat(yArr)
yHat = xMat*ws
corrcoef(yHat.T,yMat)
#array([[1.        , 0.76793521],
#       [0.76793521, 1.        ]])

# 預測值
yHat1 = mat(xArr[0])*ws
yHat1.A[0][0]
#327.12552843866297

4.3 第3種引數:步長0.01,迭代1000次

# 第3種引數:步長0.01,迭代10000次
wMat = regression.stageWise2(xArr,yArr,0.01,1000)
# 列印前向逐步迴歸的係數變化
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(wMat)
plt.legend(range(wMat.shape[1]))
plt.show()

在這裡插入圖片描述

# 列印係數w
wMat[-1]
#array([ 4.64,  0.  ,  0.45,  0.11, -1.58, -2.34,  0.11,  0.05,  0.22])

# 預測值與真實值之間的相關程度
xMat = mat(xArr);yMat = mat(yArr)
yHat = xMat*ws
corrcoef(yHat.T,yMat)
#array([[1.        , 0.76793521],
#       [0.76793521, 1.        ]])

# 預測值
yHat1 = mat(xArr[0])*ws
yHat1.A[0][0]
#327.12552843866297

在交叉驗證之前,未區分訓練資料和測試資料,故預測值都是327.12552843866297。

5. 交叉驗證

# 匯入資料
df = pd.read_excel('Row_data-array.xlsx')
xArr = df.iloc[:,:-1].values
yArr = df.iloc[:,-1].values

# 區分訓練資料和測試資料
trainX,trainY,testX,testY = regression.crossValidation2(xArr,yArr,traning_rate=0.9)

mattrainX = mat(trainX); mattrainY=mat(trainY).T
mattestX = mat(testX);mattestY = mat(testY).T

5.1 交叉驗證標準迴歸分析

ws = regression.standRegres(mattrainX,mattrainY.T)

# 預測值與真實值之間的相關程度
yHat = mattestX*ws
corrcoef(yHat.T,mattestY.T)[0][1]
#0.6376006824230555

5.2 交叉驗證嶺迴歸

ws = regression.ridgeRegres(mattrainX,mattrainY)
# 預測值與真實值之間的相關程度
yHat = mattestX*ws
corrcoef(yHat.T,mattestY.T)[0][1]
#0.6376050542255823

5.3 交叉驗證前向逐步迴歸

ws = regression.stageWise2(mattrainX,mattrainY,eps=0.1,numIt=500)
# 預測值與真實值之間的相關程度
yHat = mattestX*mat(ws[-1]).T
corrcoef(yHat.T,mattestY.T)[0][1]
#0.5642825810183151

regression.py

from numpy import *
import pandas as pd

def standRegres(xArr,yArr):
    xMat = mat(xArr); yMat = mat(yArr).T
    xTx = xMat.T*xMat
    if linalg.det(xTx) == 0.0:
        print("This matrix is singular, cannot do inverse")
        return
    ws = xTx.I * (xMat.T*yMat)
    return ws

def rssError(yArr,yHatArr): #yArr and yHatArr both need to be arrays
    return ((yArr-yHatArr)**2).sum()

def ridgeRegres(xMat,yMat,lam=0.2):
    xTx = xMat.T*xMat
    denom = xTx + eye(shape(xMat)[1])*lam
    if linalg.det(denom) == 0.0:
        print("This matrix is singular, cannot do inverse")
        return
    ws = denom.I * (xMat.T*yMat)
    return ws
    
def ridgeTest2(xArr,yArr):
    xMat = mat(xArr); yMat=mat(yArr).T
    numTestPts = 30
    wMat = zeros((numTestPts,shape(xMat)[1]))
    for i in range(numTestPts):
        ws = ridgeRegres(xMat,yMat,exp(i-10))
        wMat[i,:]=ws.T
    return wMat

def regularize(xMat):#regularize by columns
    inMat = xMat.copy()
    inMeans = mean(inMat,0)   #calc mean then subtract it off
    inVar = var(inMat,0)      #calc variance of Xi then divide by it
    inMat = (inMat - inMeans)/inVar
    return inMat

def stageWise2(xArr,yArr,eps=0.01,numIt=100):
    xMat = mat(xArr); yMat=mat(yArr).T
    m,n=shape(xMat)
    returnMat = zeros((numIt,n)) #testing code remove
    ws = zeros((n,1)); wsTest = ws.copy(); wsMax = ws.copy()
    for i in range(numIt):
        print (ws.T)
        lowestError = inf; 
        for j in range(n):
            for sign in [-1,1]:
                wsTest = ws.copy()
                wsTest[j] += eps*sign
                yTest = xMat*wsTest
                rssE = rssError(yMat.A,yTest.A)
                if rssE < lowestError:
                    lowestError = rssE
                    wsMax = wsTest
        ws = wsMax.copy()
        returnMat[i,:]=ws.T
    return returnMat

def crossValidation2(xArr,yArr,traning_rate=0.9):
    m = len(yArr)                           
    indexList = list(range(m))
    trainX=[]; trainY=[]
    testX = []; testY = []
    random.shuffle(indexList)
    for j in range(m):#create training set based on first 90% of values in indexList
        if j < m*traning_rate: 
            trainX.append(xArr[indexList[j]])
            trainY.append(yArr[indexList[j]])
        else:
            testX.append(xArr[indexList[j]])
            testY.append(yArr[indexList[j]])
    return trainX,trainY,testX,testY
    
    

相關文章