allow SGDR to anneal optimizer's learning rate
e.g. YellowFin
This commit is contained in:
parent
dbd6c31ea5
commit
0b9c1fe117
1 changed files with 2 additions and 1 deletions
|
@ -1262,9 +1262,10 @@ class SGDR(Learner):
|
||||||
raise Exception('this should never happen.')
|
raise Exception('this should never happen.')
|
||||||
|
|
||||||
def rate_at(self, epoch):
|
def rate_at(self, epoch):
|
||||||
|
base_rate = self.start_rate if self.start_rate is not None else self.optim.lr
|
||||||
restart, sub_epoch, next_restart = self.split_num(max(1, epoch))
|
restart, sub_epoch, next_restart = self.split_num(max(1, epoch))
|
||||||
x = _f(sub_epoch - 1) / _f(next_restart)
|
x = _f(sub_epoch - 1) / _f(next_restart)
|
||||||
return self.start_rate * self.decay**_f(restart) * cosmod(x)
|
return base_rate * self.decay**_f(restart) * cosmod(x)
|
||||||
|
|
||||||
def next(self):
|
def next(self):
|
||||||
if not super().next():
|
if not super().next():
|
||||||
|
|
Loading…
Reference in a new issue