task4: 多层神经网络--pytorch实现
多层神经网络pytorch实现
一、画个图(跟主题无关哈)
import numpy as np
import matplotlib.pyplot as plt
m =200
X =np.random.randn(2,m) #产生2*200 高斯分布 均值为0 方差为1
Y = (X[0,:]>0)*(X[1,:]>0)*1.0 + (X[0,:]<0)*(X[1,:]<0)
**#可视化 jupyter notebook环境下
%matplotlib inline
plt.scatter(X[0,:],X[1,:],c=Y,s=40,cmap = plt.cm.Spectral) #**
2 搭建多层神经网络
2.1 导入库
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
%matplotlib inline
2.2 读取数据集
Pima-Indians-Diabetes数据集,github可搜到
import pandas as pd
xy = pd.read_csv('diabetes.csv',delimiter=',',dtype= np.float32)
#print(xy.head())
xy_numpy = xy.to_numpy() #pandas 转维numpy 为了后面numpy转tensor
x = xy_numpy[:,0:-1] # x为 768*8
y = xy_numpy[:,-1].reshape(-1,1) #为了让其shape为 768*1 ,而不是768
numpy转Tensor
x_data = torch.Tensor(torch.from_numpy(x)) #注意此处的Tensor 若由numpy转换过来 numpy的dtype须为np.float32或其他float类型
y_data = torch.Tensor(torch.from_numpy(y))
查看数据维度
print(x_data.data.shape)
print(y_data.data.shape)
torch.Size([768, 8])
torch.Size([768, 1])
2.3 搭建多层神经网络
三层神经网络:(我的这个还加了两个dropout层和要给sigmoid层)此图只是为了表示三层Linear示意图 见下面代码(另外的dropout和sigmoid层未画)
class Model(nn.Module):
def __init__(self):
super(Model,self).__init__()
# 定义多层神经网络
self.fc1 = torch.nn.Linear(8,6)
self.fc2 = torch.nn.Linear(6,4)
self.fc3 = torch.nn.Linear(4,1)
def forward(self,x):
x = F.relu(self.fc1(x)) # 8->6 第一层:
x = F.dropout(x,p=0.5) #dropout 1
x = F.relu(self.fc2(x)) #-6->4 第二层
x = F.dropout(x,p=0.5) # dropout 2
y_pred = torch.sigmoid(self.fc3(x)) # 4->1 ->sigmoid 第三层+sigmoid层
# warnings.warn("nn.functional.sigmoid is deprecated. Use torch.sigmoid instead."
return y_pred
自定义权重初始化函数
def weight_init(m):
classname = m.__class__.__name__
if classname.find('Linear')!= -1:
print("hi")
m.weight.data = torch.randn(m.weight.data.size()[0],m.weight.data.size()[1])
m.bias.data = torch.randn(m.bias.data.size()[0])
2.4 实例化类
model = Model()
model.apply(weight_init)
2.5 定义损失函数及优化器
criterion = torch.nn.BCELoss() #定义损失函数 binary corsstropy
optimizer = torch.optim.SGD(model.parameters(),lr = 0.01) #学习率设置为0.01,学习率为超参数 ,可以自己设置
Loss = []
print(x.shape)
out:
(768, 8)
2.6 训练
for epoch in range(2000):
y_pred = model(x_data)
#计算误差
loss = criterion(y_pred,y_data)
#
#prin(loss.item())
Loss.append(loss.item())
#每迭代1000次打印Lost并记录
if epoch%100 == 0:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, 2000, loss.item()))
#梯度清零
optimizer.zero_grad()
#反向传播
loss.backward()
#更新梯度
optimizer.step()
由于预测的是概率 所以需要将y_pred的值转化为和y_data一致类型的。
y_data 为1或0(浮点数) 对于二分类,sigmoid函数值大于0.5时为1, 小于0.5时为0。
for i in range(len(y_pred)):
if(y_pred[i]>0.5):
y_pred[i] = 1.0
else:
y_pred[i] = 0.0
#print(y_pred)
type(y_pred)
准确率:
(y_pred == y_data).sum().item()/len(y_data) # torch.Tensor.sum()函数
out:
0.6536458333333334