We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c5de0e6 commit f3be98dCopy full SHA for f3be98d
mortal/lr_scheduler.py
@@ -1,4 +1,4 @@
1
-import numpy as np
+import math
2
from torch.optim.lr_scheduler import LambdaLR
3
4
class LinearWarmUpCosineAnnealingLR(LambdaLR):
@@ -25,5 +25,5 @@ def _step_inner(self, steps):
25
if steps < self.max_steps:
26
cos_steps = steps - self.warm_up_steps
27
cos_max_steps = self.max_steps - self.warm_up_steps
28
- return self.final + 0.5 * (self.peak - self.final) * (1 + np.cos(cos_steps / cos_max_steps * np.pi))
+ return self.final + 0.5 * (self.peak - self.final) * (1 + math.cos(cos_steps / cos_max_steps * math.pi))
29
return self.final
0 commit comments