[机器学习算法] 梯度下降 python实现(线性回归)

算法思路

根据吴恩达老师视频中的以下内容编写代码
[机器学习算法] 梯度下降 python实现(线性回归)
[机器学习算法] 梯度下降 python实现(线性回归)

代码

import matplotlib.pyplot as plt
import numpy as np

class Linear_Regression():

    def __init__(self, data, theta0, theta1, learning_rate):
        self.data = data
        self.theta0 = theta0
        self.theta1 = theta1
        self.learning_rate = learning_rate
        self.m = len(data)

    # hypothesis
    def h_theta(self, x):
        return self.theta0 + self.theta1 * x

    # cost function
    def J(self):
        temp = 0
        for i in range(self.m):
            temp += pow(self.h_theta(self.data[i][0]) - self.data[i][1], 2)
        return 1 / (2 * self.m) * temp

    # partial derivative
    def pd_theta0_J(self):
        temp = 0
        for i in range(self.m):
            temp += self.h_theta(self.data[i][0]) - self.data[i][1]
        return 1 / self.m * temp

    def pd_theta1_J(self):
        temp = 0
        for i in range(self.m):
            temp += (self.h_theta(data[i][0]) - self.data[i][1]) * self.data[i][0]
        return 1 / self.m * temp

    # gradient descent
    def gd(self):
        min_cost = 0.01
        round = 1
        max_round = 100
        while (min_cost < abs(self.J()) and round <= max_round):
            temp0 = self.theta0 - self.learning_rate * self.pd_theta0_J()
            temp1 = self.theta1 - self.learning_rate * self.pd_theta1_J()
            self.theta0 = temp0
            self.theta1 = temp1
            print('round', round, ':\ttheta0=%.16f' % self.theta0, '\ttheta1=%.16f' % self.theta1)
            round += 1
        return self.theta0, self.theta1

if __name__ == '__main__':
    #set parameters
    data = [[1,2],[2,5],[4,8],[5,9],[8,15]]
    theta0 = 0
    theta1 = 0
    learning_rate = 0.01

    #plot scatter
    x = []
    y = []
    for i in range(len(data)):
        x.append(data[i][0])
        y.append(data[i][1])
    plt.scatter(x,y)

    #gradient descent
    linear_regression = Linear_Regression(data, theta0, theta1, learning_rate)
    theta0, theta1 = linear_regression.gd()

    #plot returned linear
    x=np.arange(0, 10, 0.01)
    y=theta0 + theta1 * x
    plt.plot(x,y)
    plt.show()

效果图

[机器学习算法] 梯度下降 python实现(线性回归)