第一关:
#encoding=utf8 import numpy as np from collections import Counter class kNNClassifier(object): def __init__(self, k): ''' 初始化函数 :param k:kNN算法中的k ''' self.k = k # 用来存放训练数据,类型为ndarray self.train_feature = None # 用来存放训练标签,类型为ndarray self.train_label = None def fit(self, feature, label): ''' kNN算法的训练过程 :param feature: 训练集数据,类型为ndarray :param label: 训练集标签,类型为ndarray :return: 无返回 ''' #********* Begin *********# self.train_feature = feature self.train_label = label #********* End *********# def predict(self, feature): ''' kNN算法的预测过程 :param feature: 测试集数据,类型为ndarray :return: 预测结果,类型为ndarray或list ''' #********* Begin *********# result = [] for data in feature: dist = np.sqrt(np.sum((self.train_feature - data) ** 2, axis = 1)) # 欧氏距离 neighbor = np.argsort(dist)[0 : self.k] kLabel = (self.train_label[i] for i in neighbor) key, value = Counter(kLabel).most_common(1)[0] # 如果k个邻居中出现次数最多的label不止一个,要取总距离最小的label,这里直接取第一个(懒得写了 result.append(key) return result #********* End *********#
第2关:
from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler def classification(train_feature, train_label, test_feature): ''' 对test_feature进行红酒分类 :param train_feature: 训练集数据,类型为ndarray :param train_label: 训练集标签,类型为ndarray :param test_feature: 测试集数据,类型为ndarray :return: 测试集数据的分类结果 ''' #********* Begin *********# #实例化StandardScaler函数 scaler = StandardScaler() train_feature = scaler.fit_transform(train_feature) test_feature = scaler.transform(test_feature) #生成K近邻分类器 clf = KNeighborsClassifier() #训练分类器 clf.fit(train_feature, train_label) #进行预测 predict_result = clf.predict(test_feature) return predict_result #********* End **********#
标签:kNN,educoder,self,feature,param,label,---,train,ndarray From: https://www.cnblogs.com/kafuuchino/p/18278862