机器学习-梯度下降法—一元线性回归
直接上代码吧:
import numpy as np
import matplotlib.pyplot as plt
载入数据 用,进行切分
data = np.genfromtxt(r"G:\work\python\jupyter_notebook_work\机器学习\回归\data.csv",delimiter = “,”)
print(data)
此处截取局部
[[ 32.50234527 31.70700585]
[ 56.86890066 83.14274979]
[ 34.3331247 55.72348926]
[ 59.04974121 77.63418251]
[ 57.78822399 99.05141484]
[ 54.28232871 79.12064627]
[ 68.31936082 97.91982104]
[ 50.03017434 81.53699078]
[ 49.23976534 72.11183247]
[ 50.03957594 85.23200734]
[ 48.14985889 66.22495789]
[ 25.12848465 53.45439421]]
取第一列为 x_data
x_data = data[:,0]
取第二列为 y_data
y_data = data[:,1]
plt.scatter(x_data,y_data)
plt.show()
学习率learning rate
lr = 0.0001
截距
b = 0
斜率
k = 0
最大迭代次数
epochs = 50
最小二乘法
def compute_error(b, k, x_data, y_data):
totalError = 0
for i in range(0, len(x_data)):
totalError += (y_data[i] - (k * x_data[i] + b)) ** 2
return (totalError / float(len(x_data)) / 2.0)
def gradient_descent_runner(x_data, y_data, b, k, lr, epochs):
# 计算总数据量
m = float(len(x_data))
# 循环epochs次
for i in range(epochs):
b_grad = 0
k_grad = 0
# 计算梯度的总和再求平均
for j in range(0, len(x_data)):
# 对seta0进行求导
b_grad += (1/m) * (((k * x_data[j]) + b) - y_data[j])
# 对seta1进行求导
k_grad += (1/m) * x_data[j] * (((k * x_data[j]) + b) - y_data[j])
# 更新b和k
b = b - (lr * b_grad)
k = k - (lr * k_grad)
# 每迭代5次,输出一次图像
if i % 5 == 0:
print("epochs : ", i)
plt.plot(x_data, y_data, ‘b.’)
plt.plot(x_data, k*x_data + b, ‘r’)
plt.show()
return b, k
以下两段代码任选,用来查看模型结果
画图一
print(“Starting b = {0}, k = {1}, error = {2}”.format(b, k, compute_error(b, k, x_data, y_data)))
print(“Running…”)
b, k = gradient_descent_runner(x_data, y_data, b, k, lr, epochs)
print(“After {0} iterations b = {1}, k = {2}, error = {3}”.format(epochs, b, k, compute_error(b, k, x_data, y_data)))
画图二
plt.plot(x_data, y_data, ‘b.’)
plt.plot(x_data, k*x_data + b, ‘r’)
plt.show()