那个网站做图片,武义县网站制作,建设旅游景点的网站的好处,广州五羊建设官方网站文章目录1. 定义模型1.1 绘制模型1.2 模型参数2. 前向传播3. 反向传播4. 计算损失5. 更新参数6. 完整简洁代码参考 http://pytorch123.com/
1. 定义模型
import torch
import torch.nn as nn
import torch.nn.functional as Fclass Net_model(nn.Module):def __init__(self):…
文章目录1. 定义模型1.1 绘制模型1.2 模型参数2. 前向传播3. 反向传播4. 计算损失5. 更新参数6. 完整简洁代码参考 http://pytorch123.com/
1. 定义模型
import torch
import torch.nn as nn
import torch.nn.functional as Fclass Net_model(nn.Module):def __init__(self):super(Net_model, self).__init__()self.conv1 nn.Conv2d(1,6,5) # 卷积# in_channels, out_channels, kernel_size, stride1,# padding0, dilation1, groups1,# biasTrue, padding_modezerosself.conv2 nn.Conv2d(6,16,5)self.fc1 nn.Linear(16*5*5, 120) # FC层self.fc2 nn.Linear(120, 84)self.fc3 nn.Linear(84, 10)def forward(self, x):x self.conv1(x)x F.relu(x)x F.max_pool2d(x, (2,2))x self.conv2(x)x F.relu(x)x F.max_pool2d(x, 2)x x.view(-1, self.num_flat_features(x)) # 展平x self.fc1(x)x F.relu(x)x self.fc2(x)x F.relu(x)x self.fc3(x)return xdef num_flat_features(self, x):size x.size()[1:] # 除了batch 维度外的维度num_features 1for s in size:num_features * sreturn num_featuresmodel Net_model()
print(model)输出
Net_model((conv1): Conv2d(1, 6, kernel_size(5, 5), stride(1, 1))(conv2): Conv2d(6, 16, kernel_size(5, 5), stride(1, 1))(fc1): Linear(in_features400, out_features120, biasTrue)(fc2): Linear(in_features120, out_features84, biasTrue)(fc3): Linear(in_features84, out_features10, biasTrue)
)1.1 绘制模型
from torchviz import make_dot
vis_graph make_dot(model(input),paramsdict(model.named_parameters()))
vis_graph.view()1.2 模型参数
params list(model.parameters())
print(len(params))
for i in range(len(params)):print(params[i].size())输出
10
torch.Size([6, 1, 5, 5])
torch.Size([6])
torch.Size([16, 6, 5, 5])
torch.Size([16])
torch.Size([120, 400])
torch.Size([120])
torch.Size([84, 120])
torch.Size([84])
torch.Size([10, 84])
torch.Size([10])2. 前向传播
input torch.randn(1,1,32,32)
out model(input)
print(out)输出
tensor([[-0.1100, 0.0273, 0.1260, 0.0713, -0.0744, -0.1442, -0.0068, -0.0965,-0.0601, -0.0463]], grad_fnAddmmBackward)3. 反向传播
# 清零梯度缓存器
model.zero_grad()
out.backward(torch.randn(1,10)) # 使用随机的梯度反向传播4. 计算损失
output model(input)
target torch.randn(10) # 举例用
target target.view(1,-1) # 形状匹配 output
criterion nn.MSELoss() # 定义损失类型
loss criterion(output, target)
print(loss)
# tensor(0.5048, grad_fnMseLossBackward)测试 .zero_grad() 清零梯度缓存作用
model.zero_grad()
print(model.conv1.bias.grad)
loss.backward()
print(model.conv1.bias.grad)输出
tensor([0., 0., 0., 0., 0., 0.])
tensor([-0.0067, 0.0114, 0.0033, -0.0013, 0.0076, 0.0010])5. 更新参数
learning_rate 0.01
for f in model.parameters():f.data.sub_(f.grad.data*learning_rate)6. 完整简洁代码
criterion nn.MSELoss() # 定义损失类型
import torch.optim as optim
optimizer optim.SGD(model.parameters(), lr0.1)# 优化目标学习率# 循环执行以下内容 训练
optimizer.zero_grad() # 清空梯度缓存
output model(input) # 输入输出前向传播loss criterion(output, target) # 计算损失loss.backward() # 反向传播optimizer.step() # 更新参数