没有官方网站怎么做seo优化,军事新闻,如何使用上线了app建设网站,建手机网站一年费用学习率衰减#xff0c;通常我们英文也叫做scheduler。本文学习率衰减自定义#xff0c;通过2种方法实现自定义#xff0c;一是利用lambda#xff0c;另外一个是继承pytorch的lr_scheduler
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
i…学习率衰减通常我们英文也叫做scheduler。本文学习率衰减自定义通过2种方法实现自定义一是利用lambda另外一个是继承pytorch的lr_scheduler
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.optim import *
from torchvision import modelsclass Net(nn.Module):def __init__(self):super(Net,self).__init__()self.fc nn.Linear(1, 10)def forward(self,x):return self.fc(x)余弦退火
当T_max20
lrs []
model Net()
LR 0.01
epochs 100
optimizer Adam(model.parameters(),lr LR)
scheduler lr_scheduler.CosineAnnealingLR(optimizer, T_max20, eta_min1e-9)
for epoch in range(epochs): optimizer.step()lrs.append(optimizer.state_dict()[param_groups][0][lr])scheduler.step()plt.figure(figsize(10, 6))
plt.plot(lrs, colorr)
plt.text(0, lrs[0], str(lrs[0]))
plt.text(epochs, lrs[-1], str(lrs[-1]))
plt.show()当T_max epochs这就是我们经常用到的弦退火的 scheduler下面再来看看带Warm-up的
lrs []
model Net()
LR 0.01
epochs 100
optimizer Adam(model.parameters(),lr LR)
scheduler lr_scheduler.CosineAnnealingLR(optimizer, T_maxepochs, eta_min1e-9)
for epoch in range(epochs): optimizer.step()lrs.append(optimizer.state_dict()[param_groups][0][lr])scheduler.step()plt.figure(figsize(10, 6))
plt.plot(lrs, colorr)
plt.text(0, lrs[0], str(lrs[0]))
plt.text(epochs, lrs[-1], str(lrs[-1]))
plt.show()WarmUp
下面来看看 Pytorch定义的余弦退货的公式如下 ηtηmin12(ηmax−ηmin)(1cos(TcurTmaxπ)),Tcur≠(2k1)Tmax;ηt1ηt12(ηmax−ηmin)(1−cos(1Tmaxπ)),Tcur(2k1)Tmax.\begin{aligned} \eta_t \eta_{min} \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right), T_{cur} \neq (2k1)T_{max}; \\ \eta_{t1} \eta_{t} \frac{1}{2}(\eta_{max} - \eta_{min}) \left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right), T_{cur} (2k1)T_{max}. \end{aligned}ηtηt1ηmin21(ηmax−ηmin)(1cos(TmaxTcurπ)),ηt21(ηmax−ηmin)(1−cos(Tmax1π)),Tcur(2k1)Tmax;Tcur(2k1)Tmax.
实际上是用下面的公式做为更新的, 当TcurTmaxT_{cur} T_{max}TcurTmax是coscoscos部分为0所以就等于ηmin\eta_{min}ηmin
ηtηmin12(ηmax−ηmin)(1cos(TcurTmaxπ))\eta_t \eta_{min} \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right)ηtηmin21(ηmax−ηmin)(1cos(TmaxTcurπ))
这里直接根据公式的定义来画个图看看
etas []
epochs 100
eta_max 1e-4
eta_min 1e-9
t_max epochs / 1
for i in range(epoch):t_cur ieta eta_min 0.5 * (eta_max - eta_min) * (1 np.cos(np.pi * t_cur / t_max))etas.append(eta)plt.figure(figsize(10, 6))
plt.plot(range(len(etas)), etas, colorr)
plt.text(epochs, lrs[-1], str(lrs[-1]))
plt.show()从图上来看跟上面的余弦退化是一样的眼尖的都会发现lr_min 不等于eta_min1e-9
利用Lambda来定义的
有个较小的bug也不算在description里有指出
def warm_up_cosine_lr_scheduler(optimizer, epochs100, warm_up_epochs5, eta_min1e-9):Description:- Warm up cosin learning rate scheduler, first epoch lr is too smallArguments:- optimizer: input optimizer for the training- epochs: int, total epochs for your training, default is 100. NOTE: you should pass correct epochs for your training- warm_up_epochs: int, default is 5, which mean the lr will be warm up for 5 epochs. if warm_up_epochs0, means no needto warn up, will be as cosine lr scheduler- eta_min: float, setup ConsinAnnealingLR eta_min while warm_up_epochs 0Returns:- schedulerif warm_up_epochs 0:scheduler torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_maxepochs, eta_mineta_min)else:warm_up_with_cosine_lr lambda epoch: eta_min (epoch / warm_up_epochs) if epoch warm_up_epochs else 0.5 * (np.cos((epoch - warm_up_epochs) / (epochs - warm_up_epochs) * np.pi) 1)scheduler torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambdawarm_up_with_cosine_lr)return scheduler# warm up consin lr scheduler
lrs []
model Net()
LR 1e-4
warm_up_epochs 30
epochs 100
optimizer SGD(model.parameters(), lrLR)scheduler warm_up_cosine_lr_scheduler(optimizer, warm_up_epochswarm_up_epochs, eta_min1e-9)for epoch in range(epochs):optimizer.step()lrs.append(optimizer.state_dict()[param_groups][0][lr])scheduler.step()plt.figure(figsize(10, 6)) plt.plot(lrs, colorr)
plt.text(0, lrs[0], str(lrs[0]))
plt.text(epochs, lrs[-1], str(lrs[-1]))
plt.show()从图上看第一个lr非常非常小导致训练时的第一个epoch基本上不更新
继承lr_scheduler的类
class WarmupCosineLR(lr_scheduler._LRScheduler):def __init__(self, optimizer, lr_min, lr_max, warm_up0, T_max10, start_ratio0.1):Description:- get warmup consine lr schedulerArguments:- optimizer: (torch.optim.*), torch optimizer- lr_min: (float), minimum learning rate- lr_max: (float), maximum learning rate- warm_up: (int), warm_up epoch or iteration- T_max: (int), maximum epoch or iteration- start_ratio: (float), to control epoch 0 lr, if ratio0, then epoch 0 lr is lr_minExample: epochs 100 warm_up 5 cosine_lr WarmupCosineLR(optimizer, 1e-9, 1e-3, warm_up, epochs) lrs [] for epoch in range(epochs): optimizer.step() lrs.append(optimizer.state_dict()[param_groups][0][lr]) cosine_lr.step() plt.plot(lrs, colorr) plt.show()self.lr_min lr_minself.lr_max lr_maxself.warm_up warm_upself.T_max T_maxself.start_ratio start_ratioself.cur 0 # current epoch or iterationsuper().__init__(optimizer, -1)def get_lr(self):if (self.warm_up 0) (self.cur 0):lr self.lr_maxelif (self.warm_up ! 0) (self.cur self.warm_up):if self.cur 0:lr self.lr_min (self.lr_max - self.lr_min) * (self.cur self.start_ratio) / self.warm_upelse:lr self.lr_min (self.lr_max - self.lr_min) * (self.cur) / self.warm_up# print(f{self.cur} - {lr})else: # this works finelr self.lr_min (self.lr_max - self.lr_min) * 0.5 *\(np.cos((self.cur - self.warm_up) / (self.T_max - self.warm_up) * np.pi) 1)self.cur 1return [lr for base_lr in self.base_lrs]# class
epochs 100
warm_up 5
cosine_lr WarmupCosineLR(optimizer, 1e-9, 1e-3, warm_up, epochs, 0.1)
lrs []
for epoch in range(epochs):optimizer.step()lrs.append(optimizer.state_dict()[param_groups][0][lr])cosine_lr.step()plt.figure(figsize(10, 6))
plt.plot(lrs, colorr)
plt.text(0, lrs[0], str(lrs[0]))
plt.text(epochs, lrs[-1], str(lrs[-1]))
plt.show()从图上看出第一个epoch的lr也不至于非常非常小了达到了所需预期当然如果你说first epoch的lr你也需要非常非常小(1e-8)你也可以自己尝试其它值。