-
Notifications
You must be signed in to change notification settings - Fork 28
/
Optimizer.cpp
33 lines (27 loc) · 1.12 KB
/
Optimizer.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#include "Optimizer.hpp"
void Optimizer::sgd(const MatD& grad, const Real learningRate, MatD& param){
param -= learningRate*grad;
}
void Optimizer::sgd(const VecD& grad, const Real learningRate, VecD& param){
param -= learningRate*grad;
}
void Optimizer::adagrad(MatD& grad, const Real learningRate, MatD& gradHist, MatD& param){
gradHist.array() += grad.array().square();
grad.array() /= gradHist.array().sqrt();
Optimizer::sgd(grad, learningRate, param);
}
void Optimizer::adagrad(VecD& grad, const Real learningRate, VecD& gradHist, VecD& param){
gradHist.array() += grad.array().square();
grad.array() /= gradHist.array().sqrt();
Optimizer::sgd(grad, learningRate, param);
}
void Optimizer::momentum(MatD& grad, const Real learningRate, const Real m, MatD& gradHist, MatD& param){
gradHist.array() *= m;
Optimizer::sgd(grad, -learningRate, gradHist);
Optimizer::sgd(gradHist, 1.0, param);
}
void Optimizer::momentum(VecD& grad, const Real learningRate, const Real m, VecD& gradHist, VecD& param){
gradHist.array() *= m;
Optimizer::sgd(grad, -learningRate, gradHist);
Optimizer::sgd(gradHist, 1.0, param);
}