吳恩達機器學習課程:程式設計練習 | (2) ex2-logistic regression
1. logistic-regression
"""
邏輯迴歸
案例:根據學生的兩門學生成績,預測該學生是否會被大學錄取
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def get_Xy(data):
data.insert(0, 'ones', 1)
X = np.array(data.iloc[:, 0:-1])
y = np.array(data.iloc[:, -1])
return X, y
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def cost_function(theta, X, y): # theta 為3×1維
first = y * np.log(sigmoid(X @ theta)) # first為(100,)維度為1,長度100,*點乘,對陣列執行對應位置相乘,對矩陣執行矩陣乘法運算
second = (1 - y) * np.log(1 - sigmoid(X @ theta))
#print(first.shape,second.shape)
return -np.sum(first + second) / len(X)
def gradient_descent(X, y, theta, epoch, alpha): # theta 為3×1維
m = len(X)
costs = []
for i in range(epoch):
A = sigmoid(X @ theta)
theta = theta - (alpha / m) * X.T @ (A - y)
cost = cost_function(theta, X, y)
costs.append(cost)
if i % 1000 == 0:
print(cost)
return costs, theta
def gradient(theta, X, y): # 迭代了一次的梯度 theta 為3×1維
parameters = int(theta.ravel().shape[0]) # ravel展平陣列
grad = np.zeros(parameters) # grad賦與theta一樣的維度,3×1
grad = grad.T
error = sigmoid(X @ theta) - y
for i in range(parameters):
term = np.multiply(error, X[:, i])
grad[i] = np.sum(term) / len(X)
return grad
def predict(theta, X): # theta 為3×1維
probability = sigmoid(X @ theta)
return [1 if x >= 0.5 else 0 for x in probability]
if __name__ == "__main__":
data = pd.read_csv("ex2data1.txt", names=['Exam 1', 'Exam 2', 'Admitted'])
positive = data[data['Admitted'].isin([1])]
negative = data[data['Admitted'].isin([0])]
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(positive['Exam 1'], positive['Exam 2'], s=50, c='b', marker='o',
label='Admitted') # s 浮點或陣列形式,shape(n,),可選大小以點數平方。c表示顏色
ax.scatter(negative['Exam 1'], negative['Exam 2'], s=50, c='r', marker='x', label='Not Admitted')
# 也可以用如下方法
# ax.scatter(data[data['Accepted']==0]['Exam 1'],data[data['Accepted']==0]['Exam 2'],c='r',marker='x',label='y=0')
# ax.scatter(data[data['Accepted']==1]['Exam 1'],data[data['Accepted']==1]['Exam 2'],c='b',marker='o',label='y=1')
ax.legend()
ax.set_xlabel('Exam 1 Score')
ax.set_ylabel('Exam 2 Score')
X, y = get_Xy(data)
print(X.shape,y.shape)
theta = np.zeros(3,) # 1×3維
θ = theta.T
# *****************選擇學習速率α******************************
cost_init = cost_function(θ, X, y)
print("初始最小代價函式值:{}".format(cost_init))
epoch = 200000
alpha = 0.004
costs, final_theta = gradient_descent(X, y, θ, epoch, alpha)
print(final_theta)
# 精度驗證
y_ = np.array(predict(final_theta, X))
print(y_.shape,y.shape)
acc = np.mean(y_ == y)
print ('accuracy = {0}'.format(acc))
print("-" * 30, "我是分割線", "-" * 30)
# *****************呼叫高階優化函式--自動選擇學習速率α******************************
import scipy.optimize as opt
result = opt.fmin_tnc(func=cost_function, x0=θ, fprime=gradient, args=(X, y))
print(result)
print("最終代價函式計算結果:{}".format(cost_function(result[0], X, y)))
# 精度驗證
y_1 = np.array(predict(result[0], X))
print(y_1.shape, y.shape)
acc1 = np.mean(y_1 == y)
print ('accuracy_1 = {0}'.format(acc1))
plt.show()
2. logistic_regression regularization
"""
邏輯迴歸-正則化
案例:案例:設想你是工廠的生產主管,你要決定是否晶片要被接受或拋棄
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def feature_mapping(x1, x2, power):
data = {}
for i in np.arange(power + 1):
for j in np.arange(i + 1):
data['F{}{}'.format(i - j, j)] = np.power(x1, i - j) * np.power(x2, j)
return pd.DataFrame(data)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def cost_function(theta, X, y, λ):
first = y.T @ np.log(sigmoid(X @ theta))
second = (1 - y.T) @ np.log(1 - sigmoid(X @ theta))
reg = (λ / (2 * len(X))) * np.sum(np.power(theta[1:], 2)) # 排除theta0--theta[1:]
# print(first.shape,second.shape,reg)
return -np.sum(first + second) / len(X) + reg
def gradient_descent(theta, X, y, α, epoch, λ):
costs = []
for i in range(epoch):
reg = theta[1:] * (λ / len(X))
reg = np.insert(reg, 0, values=0, axis=0)
theta = theta - (X.T @ (sigmoid(X @ theta) - y)) * α / len(X) - reg
cost = cost_function(theta, X, y, λ)
costs.append(cost)
if i % 1000 == 0:
print(cost)
return theta, costs
def predict(theta, X): # theta 為3×1維
probability = sigmoid(X @ theta)
return [1 if x >= 0.5 else 0 for x in probability]
if __name__ == "__main__":
data = pd.read_csv("ex2data2.txt", names=['Test 1', 'Test 2', 'Accepted'])
fig, ax = plt.subplots()
ax.scatter(data[data['Accepted'] == 0]['Test 1'], data[data['Accepted'] == 0]['Test 2'], c='r', marker='x',
label='y=0')
ax.scatter(data[data['Accepted'] == 1]['Test 1'], data[data['Accepted'] == 1]['Test 2'], c='b', marker='o',
label='y=1')
ax.legend()
ax.set(xlabel='Test1', ylabel='Test2')
x1 = data['Test 1']
x2 = data['Test 2']
data2 = feature_mapping(x1, x2, 6)
X = np.array(data2.values)
y = np.array(data.iloc[:, -1].values).reshape(len(X), 1)
print(X.shape, y.shape)
theta = np.zeros((28, 1))
cost_init = cost_function(theta, X, y, λ=1)
print("初始最小代價函式值:{}".format(cost_init))
α = 0.001
epoch = 200000
final_theta, costs = gradient_descent(theta, X, y, α, epoch, λ=0.1)
print("final_theta:{}".format(final_theta))
# 精度驗證
y_ = np.array(predict(final_theta, X)).reshape(len(X), 1)
print(y_.shape, y.shape) # 注:兩個陣列維數一定保持完全相同,(118,)與(118,1)不同
acc = np.mean(y_ == y)
print('accuracy = {0}'.format(acc))
plt.show()
3. logistic_regression高階優化函式
"""
基於高階優化函式的邏輯迴歸-正則化
案例:案例:設想你是工廠的生產主管,你要決定是否晶片要被接受或拋棄
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def feature_mapping(x1, x2, power):
data = {}
for i in np.arange(power + 1):
for j in np.arange(i + 1):
data['F{}{}'.format(i - j, j)] = np.power(x1, i - j) * np.power(x2, j)
return pd.DataFrame(data)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def cost_function(theta, X, y, λ):
first = y.T @ np.log(sigmoid(X @ theta))
second = (1 - y.T) @ np.log(1 - sigmoid(X @ theta))
reg = (λ / (2 * len(X))) * np.sum(np.power(theta[1:], 2))
# print(first.shape,second.shape,reg)
return -np.sum(first + second) / len(X) + reg
def gradient(theta, X, y, λ): # 梯度下降法
theta = np.mat(theta)
X = np.mat(X)
y = np.mat(y)
parameters = int(theta.ravel().shape[1])
grad = np.zeros(parameters)
error = sigmoid(X * theta.T) - y
# print(theta)
for i in range(parameters):
term = np.multiply(error, X[:, i]) # X[:, i]--從X中選擇第i列資料
if (i == 0):
grad[i] = np.sum(term) / len(X)
else:
grad[i] = (np.sum(term) / len(X)) + ((λ / len(X)) * theta[:, i])
return grad
def predict(theta, X): # theta 為3×1維
probability = sigmoid(X @ theta)
return [1 if x >= 0.5 else 0 for x in probability]
if __name__ == "__main__":
data = pd.read_csv("ex2data2.txt", names=['Test 1', 'Test 2', 'Accepted'])
fig, ax = plt.subplots()
ax.scatter(data[data['Accepted'] == 0]['Test 1'], data[data['Accepted'] == 0]['Test 2'], c='r', marker='x',
label='y=0')
ax.scatter(data[data['Accepted'] == 1]['Test 1'], data[data['Accepted'] == 1]['Test 2'], c='b', marker='o',
label='y=1')
ax.legend()
ax.set(xlabel='Test1', ylabel='Test2')
x1 = data['Test 1']
x2 = data['Test 2']
data2 = feature_mapping(x1, x2, 6)
X = np.array(data2.values)
y = np.array(data.iloc[:, -1].values.reshape(len(X),1))
#print(X.shape, y.shape)
theta = np.zeros((28,1))
import scipy.optimize as opt
λ = 1
result = opt.fmin_tnc(func=cost_function, x0=theta, fprime=gradient, args=(X, y, λ))
print(result[0])
# 精度驗證
y_ = np.array(predict(result[0], X)).reshape(len(X), 1)
print(y_.shape, y.shape) # 注:兩個陣列維數一定保持完全相同,(118,)與(118,1)不同
acc = np.mean(y_ == y)
print('accuracy = {0}'.format(acc))
# sklearn實現方法
from sklearn import linear_model # 呼叫sklearn的線性迴歸包
model = linear_model.LogisticRegression(penalty='l2', C=1.0)
model.fit(X, y.ravel())
print("sklerarn_accuracy={}".format(model.score(X, y)))
# 畫圖
x = np.linspace(-1.2, 1.2, 200)
xx, yy = np.meshgrid(x, x) # 從座標向量中返回座標矩陣,例如X軸可以取三個值1,2,3, Y軸可以取三個值7,8, 有座標(1,7)(2,7)(3,7)(1,8)(2,8)(3,8)
z = feature_mapping(xx.ravel(), yy.ravel(), 6).values
zz = z @ result[0]
zz = zz.reshape(xx.shape)
fig, ax = plt.subplots()
ax.scatter(data[data['Accepted'] == 0]['Test 1'], data[data['Accepted'] == 0]['Test 2'], c='r', marker='x',
label='y=0')
ax.scatter(data[data['Accepted'] == 1]['Test 1'], data[data['Accepted'] == 1]['Test 2'], c='b', marker='o',
label='y=1')
ax.legend()
ax.set(xlabel='Test1',
ylabel='Test2')
plt.contour(xx, yy, zz, 0)
plt.show()
相關文章
- 吳恩達機器學習第一課 Supervised Machine Learning Regression and Classification吳恩達機器學習Mac
- 吳恩達機器學習課程05——Logistic迴歸吳恩達機器學習
- 吳恩達《構建機器學習專案》課程筆記(2)– 機器學習策略(下)吳恩達機器學習筆記
- 2024吳恩達機器學習吳恩達機器學習
- 吳恩達機器學習筆記 —— 12 機器學習系統設計吳恩達機器學習筆記
- 吳恩達機器學習課程 筆記5 神經網路吳恩達機器學習筆記神經網路
- 吳恩達《構建機器學習專案》課程筆記(1)– 機器學習策略(上)吳恩達機器學習筆記
- 吳恩達機器學習-第二課-第二週吳恩達機器學習
- 吳恩達機器學習第二課 Advanced Learning Algorithms吳恩達機器學習Go
- 吳恩達機器學習課程02——模型描述與代價函式吳恩達機器學習模型函式
- 吳恩達機器學習系列0——初識機器學習吳恩達機器學習
- 吳恩達機器學習提交問題吳恩達機器學習
- 吳恩達機器學習筆記(3)吳恩達機器學習筆記
- 機器學習 | 吳恩達機器學習第九周學習筆記機器學習吳恩達筆記
- 吳恩達機器學習第三課 Unsupervised learning recommenders reinforcement learning吳恩達機器學習
- 吳恩達機器學習系列17:支援向量機吳恩達機器學習
- 吳恩達機器學習筆記 —— 18 大規模機器學習吳恩達機器學習筆記
- 《吳恩達機器學習》學習筆記007_支援向量機吳恩達機器學習筆記
- 這份深度學習課程筆記獲吳恩達點贊深度學習筆記吳恩達
- 吳恩達機器學習筆記 —— 14 無監督學習吳恩達機器學習筆記
- 吳恩達機器學習筆記 —— 1 緒論:初識機器學習吳恩達機器學習筆記
- 吳恩達機器學習筆記 —— 13 支援向量機吳恩達機器學習筆記
- 吳恩達機器學習系列14:偏差與方差吳恩達機器學習
- 吳恩達機器學習筆記 —— 15 降維吳恩達機器學習筆記
- 吳恩達(Andrew Ng)——機器學習筆記1吳恩達機器學習筆記
- 吳恩達機器學習網易公開課視訊和講義吳恩達機器學習
- 吳恩達機器學習作業程式碼和資料集吳恩達機器學習
- 吳恩達【機器學習】免費學習+打卡,只要你堅持吳恩達機器學習
- 吳恩達機器學習筆記 —— 9 神經網路學習吳恩達機器學習筆記神經網路
- 吳恩達《神經網路與深度學習》課程筆記(1)– 深度學習概述吳恩達神經網路深度學習筆記
- 吳恩達機器學習筆記(1-1到2-1)吳恩達機器學習筆記
- 吳恩達機器學習筆記 —— 11 應用機器學習的建議吳恩達機器學習筆記
- 我的作業筆記:吳恩達的Python機器學習課程(神經網路篇)筆記吳恩達Python機器學習神經網路
- 吳恩達機器學習 ex1 python實現吳恩達機器學習Python
- 吳恩達機器學習系列11:神經網路吳恩達機器學習神經網路
- 吳恩達機器學習筆記 —— 7 Logistic迴歸吳恩達機器學習筆記
- 吳恩達機器學習筆記 —— 8 正則化吳恩達機器學習筆記
- 林軒田機器學習基石課程學習筆記9 — Linear Regression機器學習筆記