1.算法原理
y=w*x+b+ε
loss=Σ(w*xi+b-yi)2
w'=w-lr*(loss对w求偏导) # 梯度下降算法
b'=b-lr*(loss对b求偏导) # 梯度下降算法
2.基于numpy实现def compute_loss(b,w,points): loss = 0 n = len(points) for i in range(n): x = points[i,0] y = points[i,1] loss += (y-(w*x+b))**2 return loss/n def step_gradient(current_b,current_w,points,learning_rate): b_gradient = 0 w_gradient = 0 n = len(points) for i in range(0,n): x = points[i,0] y = points[i,1] # grad_b = 2*(w*x+b-y) b_gradient += (2/n)*((current_w*x+current_b)-y) # grad_w = 2*(w*x+b-y)*x w_gradient += (2/n)*x*((current_w*x+current_b)-y) current_b = current_b - learning_rate * b_gradient current_w = current_w - learning_rate * w_gradient return current_b,current_w def gradient_descent_runner(points,start_b,start_w,learning_rate,num): b = start_b w = start_w for _ in range(num): b,w = step_gradient(b,w,np.array(points),learning_rate) return b,w def run(): points = np.genfromtxt("data.csv",delimiter=",") learning_rate = 0.0001 b = 0 w = 0 num = 1000 loss = compute_loss(b,w,points) print(w,b,loss) b,w = gradient_descent_runner(points,b,w,learning_rate,num) loss = compute_loss(b,w,points) print(w,b,loss)
标签:loss,gradient,回归,rate,current,算法,points,learning,线性 From: https://www.cnblogs.com/navysummer/p/17207142.html