-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfs.py
71 lines (52 loc) · 1.94 KB
/
fs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
from typing import Union
import numpy as np
import torch
from torch.optim.lr_scheduler import _LRScheduler
class V2LSGDRLR(_LRScheduler):
"""The WarmupLR scheduler
This scheduler is almost same as NoamLR Scheduler except for following
difference:
NoamLR:
lr = optimizer.lr * model_size ** -0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
WarmupLR:
lr = optimizer.lr * warmup_step ** 0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
Note that the maximum lr equals to optimizer.lr in this scheduler.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
last_epoch: int = -1,T_0=15000,eta_min=0.00004,eta_max=0.00006,tmctx=0.99
):
# assert check_argument_types()
self.eta_min = eta_min
self.T_0 = T_0
self.eta_max = eta_max
self.tmctx=tmctx
super().__init__(optimizer, last_epoch)
def __repr__(self):
return f"{self.__class__.__name__}(warmup_steps={self.warmup_steps}, lr={self.base_lr}, min_lr={self.min_lr}, last_epoch={self.last_epoch})"
def mmmctxadjust_lr(self,):
step_num = self.last_epoch+1
T_cur = (step_num ) % self.T_0
T_i = self.T_0
T_curX = (step_num)// self.T_0
cur_lr = self.eta_min*(self.tmctx**T_curX) + 0.5 * (self.eta_max *(self.tmctx**T_curX)- self.eta_min*(self.tmctx**T_curX)) * (1 + np.cos(np.pi * T_cur / T_i))
return cur_lr
def get_lr(self):
# step_num = self.last_epoch + 1
if self.warmup_steps == 0:
lrs = []
for lr in self.base_lrs:
lr = self.ctxadjust_lr()
lrs.append(lr)
return lrs
else:
lrs = []
for lr in self.base_lrs:
lr = self.ctxadjust_lr()
lrs.append(lr)
return lrs
def set_step(self, step: int):
self.last_epoch = step