制作网站的要素,天猫网店怎么开店,广州做网页,wordpress elementb站小土堆pytorch教程学习笔记 根据loss更新模型参数 1.计算实际输出与目标之间的差距 2.为我们更新输出提供一定的依据#xff08;反向传播#xff09; 1 MSEloss
import torch
from torch.nn import L1Loss
from torch import nninputstorch.tensor([1,2,3],dtypetorch.fl… b站小土堆pytorch教程学习笔记 根据loss更新模型参数 1.计算实际输出与目标之间的差距 2.为我们更新输出提供一定的依据反向传播 1 MSEloss
import torch
from torch.nn import L1Loss
from torch import nninputstorch.tensor([1,2,3],dtypetorch.float32)
targetstorch.tensor([1,2,5],dtypetorch.float32)inputstorch.reshape(inputs,(-1,1,1,3))
targetstorch.reshape(targets,(-1,1,1,3))lossL1Loss()
resultloss(inputs,targets)loss_msenn.MSELoss()
result_mseloss_mse(inputs,targets)print(result)
print(result_mse)tensor(0.6667) tensor(1.3333)
2 Cross EntropyLoss xtorch.tensor([0.1,0.2,0.3])#需要reshape为要求的(batch_size,class)
ytorch.tensor([1])#target已经为要求的batch_size无需reshape
xtorch.reshape(x,(-1,3))
loss_crossnn.CrossEntropyLoss()
result_crossloss_cross(x,y)
print(result_cross)tensor(1.1019)
3 在具体的神经网络中使用loss
import torch
import torchvision.datasets
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriterdatasettorchvision.datasets.CIFAR10(dataset,trainFalse,transformtorchvision.transforms.ToTensor(),downloadTrue)
dataloaderDataLoader(dataset,batch_size1)class Han(nn.Module):def __init__(self):super(Han, self).__init__()self.model1Sequential(Conv2d(3,32,5,padding2),MaxPool2d(2),Conv2d(32,32,5,padding2),MaxPool2d(2),Conv2d(32,64,5,padding2),MaxPool2d(2),Flatten(),Linear(1024,64),Linear(64,10))def forward(self,x):xself.model1(x)return xlossnn.CrossEntropyLoss()
hanHan()
for data in dataloader:imgs,targetdataoutputhan(imgs)# print(target)# print(output)result_lossloss(output,target)print(result_loss)*tensor([7]) tensor([[ 0.0057, -0.0201, -0.0796, 0.0556, -0.0625, 0.0125, -0.0413, -0.0056, 0.0624, -0.1072]], grad_fn)…
tensor(2.2664, grad_fn)…
4 反向传播 优化器
定义优化器将待更新的每个参数梯度清零调用损失函数的反向传播函数求出每个节点的梯度使用step函数对模型的每个参数调优
import torch
import torchvision.datasets
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriterdatasettorchvision.datasets.CIFAR10(dataset,trainFalse,transformtorchvision.transforms.ToTensor(),downloadTrue)
dataloaderDataLoader(dataset,batch_size64)class Han(nn.Module):def __init__(self):super(Han, self).__init__()self.model1Sequential(Conv2d(3,32,5,padding2),MaxPool2d(2),Conv2d(32,32,5,padding2),MaxPool2d(2),Conv2d(32,64,5,padding2),MaxPool2d(2),Flatten(),Linear(1024,64),Linear(64,10))def forward(self,x):xself.model1(x)return xlossnn.CrossEntropyLoss()
hanHan()
optimtorch.optim.SGD(han.parameters(),lr0.01)for epoch in range(5):running_loss0.0#一个epoch结束的loss和for data in dataloader:imgs,targetdataoutputhan(imgs)result_lossloss(output,target)#每次迭代的lossoptim.zero_grad()#将网络中每个可调节参数对应的梯度调为0result_loss.backward()#优化器需要每个参数的梯度使用反向传播获得optim.step()#对每个参数调优running_lossrunning_lossresult_lossprint(running_loss)Files already downloaded and verified tensor(361.0316, grad_fn) tensor(357.6938, grad_fn) tensor(343.0560, grad_fn) tensor(321.8132, grad_fn) tensor(313.3173, grad_fn)