Skip to content

Commit

Permalink
Merge reversion of variance computation PR #7 from mikucionisaau/reve…
Browse files Browse the repository at this point in the history
…rt-variance

Revert the new variance computation
  • Loading branch information
mikucionisaau authored Nov 1, 2023
2 parents 9358116 + 8341ca8 commit 1c04a52
Show file tree
Hide file tree
Showing 6 changed files with 63 additions and 64 deletions.
28 changes: 13 additions & 15 deletions src/MLearning.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -255,10 +255,10 @@ namespace prlearn {
auto c = clouds[s._cloud]._nodes[s._nodes[i]]._q.avg();
fut = std::min(fut, c);
if (c == best)
var = std::min(var, clouds[s._cloud]._nodes[s._nodes[i]]._q.variance());
var = std::min(var, clouds[s._cloud]._nodes[s._nodes[i]]._q._variance);
else if ((c < best && minimize) || (c > best && !minimize)) {
best = c;
var = clouds[s._cloud]._nodes[s._nodes[i]]._q.variance();
var = clouds[s._cloud]._nodes[s._nodes[i]]._q._variance;
}
}
}
Expand All @@ -275,8 +275,8 @@ namespace prlearn {
auto v = s._variance[d];
v.first.avg() += best;
v.second.avg() += best;
v.first.set_variance(std::max(v.first.variance(), var));
v.second.set_variance(std::max(v.second.variance(), var));
v.first._variance = std::max(v.first._variance, var);
v.second._variance = std::max(v.second._variance, var);
tmpq[d].first.addPoints(v.first.cnt(), v.first.avg());
tmpq[d].second.addPoints(v.second.cnt(), v.second.avg());
mean.addPoints(v.first.cnt(), v.first.avg());
Expand All @@ -288,8 +288,8 @@ namespace prlearn {
auto v = s._old[d];
v.first.avg() += best;
v.second.avg() += best;
v.first.set_variance(std::max(v.first.variance(), var));
v.second.set_variance(std::max(v.second.variance(), var));
v.first._variance = std::max(v.first._variance, var);
v.second._variance = std::max(v.second._variance, var);
old_mean.addPoints(v.first.cnt(), v.first.avg());
old_mean.addPoints(v.second.cnt(), v.second.avg());
old_var.push_back(v.first);
Expand All @@ -305,7 +305,7 @@ namespace prlearn {
for (auto& s : sample_qvar) {
{
const auto dif = std::abs(s.avg() - mean._avg);
const auto std = std::sqrt(s.variance());
const auto std = std::sqrt(s._variance);
auto var = (std::pow(dif + std, 2.0) + std::pow(dif - std, 2.0)) / 2.0;
svar.addPoints(s.cnt(), var);
}
Expand All @@ -317,7 +317,7 @@ namespace prlearn {
}
{
const auto dif = std::abs(s.avg() - dmin);
const auto std = std::sqrt(s.variance());
const auto std = std::sqrt(s._variance);
auto var = (std::pow(dif + std, 2.0) + std::pow(dif - std, 2.0)) / 2.0;
vars[id].addPoints(s.cnt(), var);
}
Expand All @@ -328,20 +328,18 @@ namespace prlearn {

for (auto& s : old_var) {
const auto dif = std::abs(s.avg() - old_mean._avg);
const auto std = std::sqrt(s.variance());
const auto std = std::sqrt(s._variance);
auto var = (std::pow(dif + std, 2.0) + std::pow(dif - std, 2.0)) / 2.0;
ovar.addPoints(s.cnt(), var);
}

for (size_t i = 0; i < dimen; ++i) {
tmpq[i].first.set_variance(vars[i]._avg);
tmpq[i].second.set_variance(vars[i + dimen]._avg);
tmpq[i].first._variance = vars[i]._avg;
tmpq[i].second._variance = vars[i + dimen]._avg;
}

qvar_t nq(mean._avg, mean._cnt / (dimen * 2), 0);
nq.set_variance(svar._avg);
qvar_t oq(old_mean._avg, old_mean._cnt / (dimen * 2), 0);
oq.set_variance(ovar._avg);
qvar_t nq(mean._avg, mean._cnt / (dimen * 2), svar._avg);
qvar_t oq(old_mean._avg, old_mean._cnt / (dimen * 2), ovar._avg);
return std::make_pair(nq, oq);
}

Expand Down
6 changes: 3 additions & 3 deletions src/RefinementTree.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ namespace prlearn {
return qvar_t(std::numeric_limits<double>::quiet_NaN(), 0, 0);
auto n = _nodes[res->_nid].get_leaf(point, res->_nid, _nodes);
auto& node = _nodes[n];
return qvar_t(node._predictor._q.avg(), node._predictor._cnt, node._predictor._q.squared());
return qvar_t(node._predictor._q.avg(), node._predictor._cnt, node._predictor._q._variance);
}

double RefinementTree::getBestQ(const double* point, bool minimization, size_t* next_labels, size_t n_labels) const {
Expand Down Expand Up @@ -231,12 +231,12 @@ namespace prlearn {
if (nodes[slow]._predictor._q.cnt() == 0) {
nodes[slow]._predictor._q.cnt() = 1;
nodes[slow]._predictor._q.avg() = oq.avg();
nodes[slow]._predictor._q.squared() = std::pow(oq.avg(), 2.0);
nodes[slow]._predictor._q._variance = 0;
}
if (nodes[shigh]._predictor._q.cnt() == 0) {
nodes[shigh]._predictor._q.cnt() = 1;
nodes[shigh]._predictor._q.avg() = oq.avg();
nodes[shigh]._predictor._q.squared() = std::pow(oq.avg(), 2.0);
nodes[shigh]._predictor._q._variance = 0;
}
}
nodes[shigh]._predictor._cnt = nodes[shigh]._predictor._q.cnt();
Expand Down
4 changes: 2 additions & 2 deletions src/SimpleMLearning.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,14 +110,14 @@ namespace prlearn {
for(auto& s : n._succssors)
{
const auto dif = std::abs(s._cost.avg() - nq._avg);
const auto std = std::sqrt(s._cost.variance());
const auto std = std::sqrt(s._cost._variance);
auto var = (std::pow(dif + std, 2.0) + std::pow(dif - std, 2.0)) / 2.0;
nv.addPoints(s._cost.cnt(), var);
}
n._q = qvar_t(nq._avg, nq._cnt, nv._avg);
if ((minimization && n._q.avg() <= rq.avg()) ||
(!minimization && n._q.avg() >= rq.avg())) {
if(n._q.avg() != rq.avg() || n._q.variance() < rq.variance() || n._q.cnt() > rq.cnt())
if(n._q.avg() != rq.avg() || n._q._variance < rq._variance || n._q.cnt() > rq.cnt())
rq = n._q;
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/SimpleRegressor.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ namespace prlearn {
auto res = std::lower_bound(std::begin(_labels), std::end(_labels), lf);

if (res != std::end(_labels) && res->_label == label)
return qvar_t{res->_value.avg(), (double)res->_cnt, res->_value.squared()};
return qvar_t{res->_value.avg(), (double)res->_cnt, res->_value._variance};
else
return qvar_t{std::numeric_limits<double>::quiet_NaN(), 0, 0};
}
Expand Down
49 changes: 36 additions & 13 deletions src/structs.cpp
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
/*
* Copyright Peter G. Jensen
*
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/

/*
/*
* File: structs.cpp
* Author: Peter G. Jensen
*
Expand Down Expand Up @@ -44,7 +44,7 @@ namespace prlearn {
void qvar_t::print(std::ostream& stream) const {
stream << "[";
stream << (*(avg_t*)this);
stream << ", " << variance() << "]";
stream << ", " << _variance << "]";
}

std::ostream& operator<<(std::ostream& o, const qvar_t& v) {
Expand All @@ -59,25 +59,48 @@ namespace prlearn {
return a;
qvar_t res = a;
res.addPoints(b._cnt, b._avg);
res._sq = (a._sq * (a._cnt / res._cnt)) + (b._sq * (b._cnt / res._cnt));
const auto adif = std::abs(res._avg - a._avg);
const auto bdif = std::abs(res._avg - b._avg);
const auto astd = std::sqrt(a._variance);
const auto bstd = std::sqrt(b._variance);
auto ca = std::pow(adif + astd, 2.0) + std::pow(adif - astd, 2.0);
auto cb = std::pow(bdif + bstd, 2.0) + std::pow(bdif - bstd, 2.0);
avg_t tmp;
tmp.addPoints(a._cnt, ca / 2.0);
tmp.addPoints(b._cnt, cb / 2.0);
res._variance = tmp._avg;
return res;
}

qvar_t& qvar_t::operator+=(double d) {
assert(!std::isinf(d));
avg_t::operator+=(d);
auto diff = std::pow(d, 2.0) - _sq;
_sq += diff / _cnt;
auto nvar = std::pow(d - _avg, 2.0);
assert(!std::isinf(nvar));
if (_cnt == 1) _variance = nvar;
else {
nvar -= _variance;
_variance += nvar / _cnt;
}
return *this;
}

void qvar_t::addPoints(double weight, double d) {
assert(weight >= 0);
assert(_cnt >= 0);
if (weight == 0) return;
auto oa = _avg;
avg_t::addPoints(weight, d);
auto diff = std::pow(d, 2.0) - _sq;
_sq += diff * (weight / _cnt);
auto nvar = std::abs((d - oa)*(d - _avg));
assert(!std::isinf(nvar));
if (_cnt == weight) _variance = nvar;
else {
nvar -= _variance;
_variance += (nvar * weight) / _cnt;
}
assert(_variance >= 0);
assert(!std::isnan(_variance));
assert(!std::isinf(_variance));
}

double triangular_cdf(double mid, double width, double point) {
Expand All @@ -94,10 +117,10 @@ namespace prlearn {
constexpr double minvar = 0.0001;
if (std::min(a.cnt(), b.cnt()) <= 1)
return;
if (a.variance() == b.variance() && a.avg() == b.avg())
if (a._variance == b._variance && a.avg() == b.avg())
return;
auto vara = std::max(minvar, a.variance());
auto varb = std::max(minvar, b.variance());
auto vara = std::max(minvar, a._variance);
auto varb = std::max(minvar, b._variance);

double tval = std::abs(a.avg() - b.avg()) / std::sqrt(((vara * a.cnt()) + (varb * b.cnt())) / (a.cnt() * b.cnt()));

Expand Down
38 changes: 8 additions & 30 deletions src/structs.h
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
/*
* Copyright Peter G. Jensen
*
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/

/*
/*
* File: structs.h
* Author: Peter G. Jensen
*
Expand All @@ -33,8 +33,6 @@
#include <cassert>
#include <vector>
#include <ostream>
#include <iostream>

namespace prlearn {

struct avg_t {
Expand All @@ -56,7 +54,7 @@ namespace prlearn {
} else {
_cnt += weight;
double diff = d - _avg;
_avg += diff * (weight / _cnt); // add only "share" of difference
_avg += ((diff * weight) / (double) _cnt); // add only "share" of difference
}
assert(!std::isnan(_avg));
}
Expand Down Expand Up @@ -98,14 +96,15 @@ namespace prlearn {

qvar_t() = default;

qvar_t(double d, double w, double squared) {
qvar_t(double d, double w, double v) {
_avg = d;
_cnt = w;
_sq = squared;
_variance = v;
};
// this is a dirty hijack!
qvar_t& operator+=(double d);
void addPoints(double weight, double d);
double _variance = 0;

auto& avg() {
return _avg;
Expand All @@ -128,27 +127,6 @@ namespace prlearn {
}
void print(std::ostream& stream) const;
static qvar_t approximate(const qvar_t& a, const qvar_t& b);
double variance() const {
auto pow = std::pow(_avg, 2.0);
if(pow >= _sq)
return 0;
return _sq - pow;
}

void set_variance(double var) {
_sq = std::pow(_avg, 2.0) + var;
}

double& squared() {
return _sq;
}

const double& squared() const {
return _sq;
}

private:
double _sq = 0;
};

struct splitfilter_t {
Expand Down

0 comments on commit 1c04a52

Please sign in to comment.