def forward(self,x): h1 = self.w1 * x[0] + self.w2 * x[1] + self.b1 h2 = self.w3 * x[0] + self.w4 * x[1] + self.b2 out = sigmoid(self.w5 *h1 +self.w6 *h2 +self.b3) return out
神经网络模型
import numpy as np def sigmoid(x): return 1 / (1 + np.exp(-x)) def sigmoid_2(x): return sigmoid(x)*(1-sigmoid(x)) def mes_loss(x,y): return ((x-y)**2).mean() class NeuralNetwork(): def __init__(self): #初始化参数s self.w1 = np.random.normal() self.w2 = np.random.normal() self.w3 = np.random.normal() self.w4 = np.random.normal() self.w5 = np.random.normal() self.w6 = np.random.normal() self.b1 = np.random.normal() self.b2 = np.random.normal() self.b3 = np.random.normal() def forward(self,x): h1 = self.w1 * x[0] + self.w2 * x[1] + self.b1 h2 = self.w3 * x[0] + self.w4 * x[1] + self.b2 out = sigmoid(self.w5 *h1 +self.w6 *h2 +self.b3) return out def train(self,data,all_y_trues): learn_rate = 0.1 epochs = 1000 for epoch in range(epochs): for x,y_true in zip(data,all_y_trues): sum_h1 = self.w1 * x[0] + self.w2 * x[1] + self.b1 h1 = sigmoid(sum_h1) sum_h2 = self.w3 * x[0] + self.w4 * x[1] + self.b2 h2 = sigmoid(sum_h2) sum_o1 = self.w5 *h1 +self.w6 *h2 +self.b3 o1 = sigmoid(sum_o1) y_pred = o1 d_L_d_ypred = -2 * (y_true - y_pred) d_ypred_d_w5 = h1*sigmoid_2(sum_o1) d_ypred_d_w6 = h2*sigmoid_2(sum_o1) d_ypred_d_b3 = sigmoid_2(sum_o1) d_ypred_d_h1 = self.w5*sigmoid_2(sum_o1) d_ypred_d_h2 = self.w6*sigmoid_2(sum_o1) d_h1_d_w1 = x[0] * sigmoid_2(sum_h1) d_h1_d_w2 = x[1] * sigmoid_2(sum_h1) d_h1_d_b1 = sigmoid_2(sum_h1) d_h2_d_w3 = x[0] * sigmoid_2(sum_h2) d_h2_d_w4 = x[1] * sigmoid_2(sum_h2) d_h2_d_b2 = sigmoid_2(sum_h2) self.w1 -= learn_rate* d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1 self.w2 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2 self.b1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1 self.w3 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3 self.w4 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4 self.b2 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2 self.w5 -= learn_rate * d_L_d_ypred * d_ypred_d_w5 self.w6 -= learn_rate * d_L_d_ypred * d_ypred_d_w6 self.b3 -= learn_rate * d_L_d_ypred * d_ypred_d_b3 if epoch % 10==0: y_preds=np.apply_along_axis(self.forward,1,data) loss=mes_loss(all_y_trues,y_preds) print("Epoch %d loss:%.3f" % (epoch,loss)) data=np.array([ [1.78,1.14], [1.96,1.18], [1.86,1.20], [1.72,1.24], [2.00,1.26], [2.00,1.28], [1.96,1.30], [1.74,1.36], [1.64,1.38], [1.82,1.38], [1.90,1.38], [1.70,1.40], [1.82,1.48], [1.82,1.54], [2.08,1.56], ]) label = np.array([ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, ]) network=NeuralNetwork() network.train(data,label) #预测 test1=np.array([1.24,1.80]) test2=np.array([1.28,1.84]) test3=np.array([1.40,2.04]) print("test1: %.3f" % network.forward(test1)) print("test2: %.3f" % network.forward(test2)) print("test3: %.3f" % network.forward(test3)) for i in (test1,test2,test3): if network.forward(i)>0.5: print("test类型是:Apf") else: print("test类型是:Af")
结果
Epoch 0 loss:0.336 Epoch 0 loss:0.343 Epoch 0 loss:0.349 Epoch 0 loss:0.355 Epoch 0 loss:0.360 Epoch 0 loss:0.366 Epoch 0 loss:0.371 Epoch 0 loss:0.365 Epoch 0 loss:0.358 Epoch 0 loss:0.351 Epoch 0 loss:0.345 Epoch 0 loss:0.338 Epoch 0 loss:0.332 Epoch 0 loss:0.326 Epoch 0 loss:0.320 Epoch 10 loss:0.302 Epoch 10 loss:0.308 Epoch 10 loss:0.314 Epoch 10 loss:0.320 Epoch 10 loss:0.327 Epoch 10 loss:0.334 Epoch 10 loss:0.340 Epoch 10 loss:0.334 Epoch 10 loss:0.327 Epoch 10 loss:0.321 ... test3: 0.000 test类型是:Af test类型是:Af test类型是:Af
标签:loss,ypred,h2,self,h1,Epoch,实验 From: https://www.cnblogs.com/zeus-1116/p/16910147.html