tensorflow笔记-----1-----tensorflow实现线性回归
先借助公式y=0.1X+0.3随机生成1000个点,在均方误差和随机梯度下降求W和b
#! /usr/bin/python # -*-coding:utf-8 -*- __author__ = "chunming" import tensorflow as tf import numpy as np import matplotlib.pyplot as plt #随机生成1000个点,模拟y=0.1X+0.3+噪声 num=1000 data=[] for i in range(num): x=np.random.normal(0.0,1.0) y=x*0.1+0.3+np.random.normal(0.0,0.03) data.append([x,y]) xdata=[x[0] for x in data] ydata=[x[1] for x in data] #借助均值平方误差和随机梯度实现线性回归 W=tf.Variable(tf.random_uniform([1],-1.0,1.0),name="W") b=tf.Variable(tf.zeros([1]),name="b") y=W*xdata+b loss=tf.reduce_mean(tf.square(y-ydata),name="loss") optimizer=tf.train.GradientDescentOptimizer(0.05) train=optimizer.minimize(loss,name="train") sess=tf.Session() init=tf.global_variables_initializer() sess.run(init) print("初始化W:",sess.run(W),"初始化b:",sess.run(b),"初始化loss:",sess.run(loss)) for i in range(1000): sess.run(train) print("第",i,"次的W:",sess.run(W),"b:",sess.run(b),"loss:",sess.run(loss)) # writer=tf.train.SummaryWriter("./linerR",sess.graph) plt.scatter(xdata,ydata,c="r") plt.plot(xdata,xdata*sess.run(W)+sess.run(b)) plt.show()