模仿sklearn進行機器學習演算法的封裝

farsun發表於2021-09-09

程式碼中用到的公式
圖片描述


#每一個Class分別在一個單獨的py檔案下

#KNN.py
import numpy as np
from math import sqrt 
from collections import Counter

class KNNClassifier:
	def __init__(self,k):
		#建構函式
		"""初始化KNN分類器"""
		assert k >= 1,"k must be valid"
		self.k = k
		#加下劃線,表明是私有的成員變數
		self.k = k
		self._X_train = None
		self._y_train = None

	def fit(self,X_train,y_train):
		"""根據訓練集X_train和y_train訓練kNN分類器"""
		assert X_train.shape[0] == y_train.shape[0], 
			"the size of X_train must be equal to the size of y_train"
		assert self.k <= X_train.shape[0],
			"the size of X_train must be at least k."
		self._X_train = X_train
		self._y_train = y_train
		return self

	#Sklearn 要求使用者輸入給預測模型的資料型別是二維矩陣
	#SKlearn 返回的結果型別是np.array
	def predict(self,X_predict):
		"""給定待預測資料集X_predict,返回表示X_predict的結果向量"""
		assert self._X_train is not None and self._y_train is not None,
			"must fit before predict!"
		assert X_predict.shape[1] == self._X_train.shape[1],
			"the feature number of X_predict must be equal to X_train"

		y_predict = [self._predict(x) for x in X_predict]
		return np.array(y_predict)

	#私有的predict函式
	def _predict(self,x):
		"""給定單個待預測資料x,返回x的預測結果值"""
		assert x.shape[0] == self._X_train.shape[1],
			"the feature number of x must be equal to X_train"
		#計算距離
		distances = [
		               sqrt(np.sum((x_train - x) ** 2)) 
					   for x_train in self._X_train
                    ]
        #對距離進行排序
		nearest = np.argsort(distances)
		topK_y = [self._y_train[i] for i in nearest[:self.k]]
		#投票
		votes = Counter(topK_y)
		return votes.most_common(1)[0][0]

	def __repr__(self):
		return "KNN(k = %d)"  % self.k 

    
    	


#KNN.py的用法:
"""
1.基本用法
knn_clf = KNNClassifier(k = 6)
knn_clf.fit(X_train,y_train)

y_predict = knn_clf.predict(X_predict)
"""

"""
2.使用網格搜尋來對KNN進行調參
#先對需要調參的超引數建立一個字典
param_grid = [
	{
		'weights':['uniform'],
		'n_neighbors':[i for i in range(1,11)]
	},
	{
		'weights':['distance'],
		'n_neighbors':[i for i in range(1,11)],
		'p': [i for i in range(1,6)]
	}
]

knn_clf = KNNClassifier(k = 6)
knn_clf.fit(X_train,y_train)


from sklearn.model_selection import GridSearchCV
grid_search = GridSearchCV(knn_clf,param_grid)
#使用grid_search來fit
grid_search.fit(X_train,y_train)
#過程比較耗時
grid_search.best_estimator_
#返回最佳模型對應的超引數
grid_search.best_score_
#返回最佳模型的評分
grid_search.best_params_
#返回最佳引數
knn_clf = grid_search.best_estimator_
#拿到最佳引數對應的分類器
y_predict = knn_clf.predict(X_test)

"""


#model_selection.py
import numpy as np
def train_test_split(X,y,test_ratio = 0.2,seed = None):
	assert X.shape[0] == y.shape[0],
		"the size of X must be equal to the size of y"
	assert 0.0 <= test_ratio <= 1.0,
		"test_ration must be valid"

	#如果Seed不為空
	if seed:
		np.random.seed(seed)

	shuffled_indexes = np.random.permutation(len(X))
	#np.random.permutation(len(x))
	#permutation:返回的是一個Array,這個Array是x的索引的隨機排列

	test_size = int(len(X) * test_ratio) 
	test_indexes = shuffled_indexes[:test_size]
	train_indexes = shuffled_indexes[test_size:]

	X_train = X[train_indexes]
	y_train = y[train_indexes]

	X_test = X[test_indexes]
	y_test = y[test_indexes]

	return X_train,X_test,y_train,y_test


#基於正規方程的多元線性迴歸
#LinearRegression.py
import numpy as np 
from sklearn.metrics import r2_score
class LinearRegression:
	def __init__(self):
		"""初始化Linear Regression模型"""
		self.coef_ = None
		self.interception_ = None
		self._theta = None

	#訓練過程
	def fit_normal(self,X_train,y_train):
		assert X_train.shape[0] == y_train.shape[0],
			"the size of X_train must be equal to the size of y_train"
		X_b = np.hstack([np.ones((len(X_train),1)),X_train])
			#正規方程
			#求矩陣的逆 : np.linalg.inv
		self._theta  = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)
			#截距
		self.interception_ = self._theta[0]
			#權重
		self.coef_ = self._theta[1:]

		return self 

	#預測過程	
	def predict(self,X_predict):
		"""給定待預測資料集X_predict,返回表示X_predict的結果向量"""
		assert self.intercept_ is not None and self.coef_ is not None,
			"must fit before predict!"
		assert X_predict.shape[1] == len(self.coef_),
			"the feature number of X_predict must be equal to X_train"

		X_b = np.hstack([np.ones((len(X_predict),1)),X_predict])
		return X_b.dot(self._theta)

	def score(self,X_test,y_test):
		y_predict = self.predict(X_test)
		return r2_score(y_test,y_predict)

	def __repr__(self):
		return "LinearRegression()"





來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/4289/viewspace-2822867/,如需轉載,請註明出處,否則將追究法律責任。

相關文章