From 39cb136507001cdd58945502cdf0fdf56cd27b61 Mon Sep 17 00:00:00 2001 From: Geod24 Date: Tue, 14 Jun 2022 16:03:48 +0200 Subject: [PATCH] Replace usage of approxEqual with isClose approxEqual is deprecated, isClose is the replacement. We also update the CI to use GDC-12 which has a recent frontend, as gdc-11 has v2.076 which doesn't include `isClose`, but gdc-12 has `-Werror=deprecated` on by default. --- .github/workflows/d.yml | 20 +- source/dstats/base.d | 26 +- source/dstats/cor.d | 131 +++--- source/dstats/distrib.d | 298 ++++++------- source/dstats/infotheory.d | 67 ++- source/dstats/kerneldensity.d | 10 +- source/dstats/pca.d | 5 +- source/dstats/regress.d | 338 +++++++-------- source/dstats/summary.d | 68 +-- source/dstats/tests.d | 763 +++++++++++++++++----------------- 10 files changed, 868 insertions(+), 858 deletions(-) diff --git a/.github/workflows/d.yml b/.github/workflows/d.yml index 1723f34..2381ab6 100644 --- a/.github/workflows/d.yml +++ b/.github/workflows/d.yml @@ -14,8 +14,8 @@ jobs: os: [ubuntu-latest, macos-latest] dc: [ldc-latest, dmd-latest] include: - - os: ubuntu-latest - dc: gdc + - os: ubuntu-22.04 + dc: gdc-12 steps: - uses: actions/checkout@v2 @@ -27,18 +27,20 @@ jobs: compiler: ${{ matrix.dc }} - name: Install GDC - if: matrix.dc == 'gdc' + if: matrix.dc == 'gdc-12' run: | - sudo apt install gdc + sudo apt-get install gdc-12 wget https://github.com/dlang/dub/releases/download/v1.23.0/dub-v1.23.0-linux-x86_64.tar.gz tar xvf dub-v1.23.0-linux-x86_64.tar.gz - name: 'Build & Test' + if: matrix.dc != 'gdc-12' run: | - export PATH=$PATH:`pwd` # for GDC build - # Build the project, with its main file included, without unittests dub build - # Build and run tests, as defined by `unittest` configuration - # In this mode, `mainSourceFile` is excluded and `version (unittest)` are included - # See https://dub.pm/package-format-json.html#configurations dub test + + - name: 'Build & Test' + if: matrix.dc == 'gdc-12' + run: | + ./dub build --compiler=${{ matrix.dc }} + ./dub test --compiler=${{ matrix.dc }} diff --git a/source/dstats/base.d b/source/dstats/base.d index 79f2c77..9a0715b 100644 --- a/source/dstats/base.d +++ b/source/dstats/base.d @@ -811,8 +811,8 @@ unittest { // Values worked out by hand on paper. If you don't believe me, work // them out yourself. assert(auroc([4,5,6], [1,2,3]) == 1); - assert(approxEqual(auroc([8,6,7,5,3,0,9], [3,6,2,4,3,6]), 0.6904762)); - assert(approxEqual(auroc([2,7,1,8,2,8,1,8], [3,1,4,1,5,9,2,6]), 0.546875)); + assert(isClose(auroc([8,6,7,5,3,0,9], [3,6,2,4,3,6]), 0.6904762, 1e-2, 1e-2)); + assert(isClose(auroc([2,7,1,8,2,8,1,8], [3,1,4,1,5,9,2,6]), 0.546875, 1e-2, 1e-2)); } /// @@ -848,8 +848,8 @@ unittest { assert(cast(uint) round(exp(logFactorial(7)))==5040); assert(cast(uint) round(exp(logFactorial(3)))==6); // Gamma branch. - assert(approxEqual(logFactorial(12000), 1.007175584216837e5, 1e-14)); - assert(approxEqual(logFactorial(14000), 1.196610688711534e5, 1e-14)); + assert(isClose(logFactorial(12000), 1.007175584216837e5, 1e-14, 1e-2)); + assert(isClose(logFactorial(14000), 1.196610688711534e5, 1e-14, 1e-2)); } ///Log of (n choose k). @@ -1481,13 +1481,13 @@ unittest { scope(exit) std.file.remove("NumericFileTestDeleteMe.txt"); auto myFile = File("NumericFileTestDeleteMe.txt"); auto rng = toNumericRange(myFile.byLine()); - assert(approxEqual(rng.front, 3.14)); + assert(isClose(rng.front, 3.14)); rng.popFront; - assert(approxEqual(rng.front, 2.71)); + assert(isClose(rng.front, 2.71)); rng.popFront; - assert(approxEqual(rng.front, 8.67)); + assert(isClose(rng.front, 8.67)); rng.popFront; - assert(approxEqual(rng.front, 362435)); + assert(isClose(rng.front, 362435, 1e-2, 1e-2)); assert(!rng.empty); rng.popFront; assert(rng.empty); @@ -1622,9 +1622,9 @@ version(scid) { auto mat = [[1.0, 2, 3], [4.0, 7, 6], [7.0, 8, 9]]; auto toMat = [new double[3], new double[3], new double[3]]; invert(mat, toMat); - assert(approxEqual(toMat[0], [-0.625, -0.25, 0.375])); - assert(approxEqual(toMat[1], [-0.25, 0.5, -0.25])); - assert(approxEqual(toMat[2], [0.708333, -0.25, 0.041667])); + assert(isClose(toMat[0], [-0.625, -0.25, 0.375], 1e-2, 1e-2)); + assert(isClose(toMat[1], [-0.25, 0.5, -0.25], 1e-2, 1e-2)); + assert(isClose(toMat[2], [0.708333, -0.25, 0.041667], 1e-2, 1e-2)); } void solve(DoubleMatrix mat, double[] vec) { @@ -1685,12 +1685,12 @@ version(scid) { auto mat = [[2.0, 1, -1], [-3.0, -1, 2], [-2.0, 1, 2]]; auto vec = [8.0, -11, -3]; solve(mat, vec); - assert(approxEqual(vec, [2, 3, -1])); + assert(isClose(vec, [2, 3, -1], 1e-2, 1e-2)); auto mat2 = [[1.0, 2, 3], [4.0, 7, 6], [7.0, 8, 9]]; auto vec2 = [8.0, 6, 7]; solve(mat2, vec2); - assert(approxEqual(vec2, [-3.875, -0.75, 4.45833])); + assert(isClose(vec2, [-3.875, -0.75, 4.45833], 1e-2, 1e-2)); } // Cholesky decomposition functions adapted from Don Clugston's MathExtra diff --git a/source/dstats/cor.d b/source/dstats/cor.d index 44f8ef4..84cd035 100644 --- a/source/dstats/cor.d +++ b/source/dstats/cor.d @@ -134,9 +134,9 @@ if(doubleInput!(T) && doubleInput!(U)) { } unittest { - assert(approxEqual(pearsonCor([1,2,3,4,5][], [1,2,3,4,5][]).cor, 1)); - assert(approxEqual(pearsonCor([1,2,3,4,5][], [10.0, 8.0, 6.0, 4.0, 2.0][]).cor, -1)); - assert(approxEqual(pearsonCor([2, 4, 1, 6, 19][], [4, 5, 1, 3, 2][]).cor, -.2382314)); + assert(isClose(pearsonCor([1,2,3,4,5][], [1,2,3,4,5][]).cor, 1, 1e-2, 1e-2)); + assert(isClose(pearsonCor([1,2,3,4,5][], [10.0, 8.0, 6.0, 4.0, 2.0][]).cor, -1, 1e-2, 1e-2)); + assert(isClose(pearsonCor([2, 4, 1, 6, 19][], [4, 5, 1, 3, 2][]).cor, -.2382314, 1e-2, 1e-2)); // Make sure everything works with lowest common denominator range type. static struct Count { @@ -156,7 +156,7 @@ unittest { Count a, b; a.upTo = 100; b.upTo = 100; - assert(approxEqual(pearsonCor(a, b).cor, 1)); + assert(isClose(pearsonCor(a, b).cor, 1, 1e-2, 1e-2)); PearsonCor cor1 = pearsonCor([1,2,4][], [2,3,5][]); PearsonCor cor2 = pearsonCor([4,2,9][], [2,8,7][]); @@ -165,11 +165,11 @@ unittest { cor1.put(cor2); foreach(ti, elem; cor1.tupleof) { - assert(approxEqual(elem, combined.tupleof[ti])); + assert(isClose(elem, combined.tupleof[ti], 1e-2, 1e-2)); } - assert(approxEqual(pearsonCor([1,2,3,4,5,6,7,8,9,10][], - [8,6,7,5,3,0,9,3,6,2][]).cor, -0.4190758)); + assert(isClose(pearsonCor([1,2,3,4,5,6,7,8,9,10][], + [8,6,7,5,3,0,9,3,6,2][]).cor, -0.4190758, 1e-2, 1e-2)); foreach(iter; 0..1000) { // Make sure results for the ILP-optimized and non-optimized versions @@ -183,7 +183,7 @@ unittest { } foreach(ti, elem; res1.tupleof) { - assert(approxEqual(elem, res2.tupleof[ti])); + assert(isClose(elem, res2.tupleof[ti], 1e-2, 1e-2)); } PearsonCor resCornerCase; // Test where one N is zero. @@ -313,7 +313,7 @@ if(doubleInput!(T) && doubleInput!(U)) { } unittest { - assert(approxEqual(covariance([1,4,2,6,3].dup, [3,1,2,6,2].dup), 2.05)); + assert(isClose(covariance([1,4,2,6,3].dup, [3,1,2,6,2].dup), 2.05, 1e-2, 1e-2)); } /**Spearman's rank correlation. Non-parametric. This is essentially the @@ -358,18 +358,18 @@ is(typeof(input2.front < input2.front) == bool)) { unittest { //Test against a few known values. - assert(approxEqual(spearmanCor([1,2,3,4,5,6].dup, [3,1,2,5,4,6].dup), 0.77143)); - assert(approxEqual(spearmanCor([3,1,2,5,4,6].dup, [1,2,3,4,5,6].dup ), 0.77143)); - assert(approxEqual(spearmanCor([3,6,7,35,75].dup, [1,63,53,67,3].dup), 0.3)); - assert(approxEqual(spearmanCor([1,63,53,67,3].dup, [3,6,7,35,75].dup), 0.3)); - assert(approxEqual(spearmanCor([1.5,6.3,7.8,4.2,1.5].dup, [1,63,53,67,3].dup), .56429)); - assert(approxEqual(spearmanCor([1,63,53,67,3].dup, [1.5,6.3,7.8,4.2,1.5].dup), .56429)); - assert(approxEqual(spearmanCor([1.5,6.3,7.8,7.8,1.5].dup, [1,63,53,67,3].dup), .79057)); - assert(approxEqual(spearmanCor([1,63,53,67,3].dup, [1.5,6.3,7.8,7.8,1.5].dup), .79057)); - assert(approxEqual(spearmanCor([1.5,6.3,7.8,6.3,1.5].dup, [1,63,53,67,3].dup), .63246)); - assert(approxEqual(spearmanCor([1,63,53,67,3].dup, [1.5,6.3,7.8,6.3,1.5].dup), .63246)); - assert(approxEqual(spearmanCor([3,4,1,5,2,1,6,4].dup, [1,3,2,6,4,2,6,7].dup), .6829268)); - assert(approxEqual(spearmanCor([1,3,2,6,4,2,6,7].dup, [3,4,1,5,2,1,6,4].dup), .6829268)); + assert(isClose(spearmanCor([1,2,3,4,5,6].dup, [3,1,2,5,4,6].dup), 0.77143, 1e-2, 1e-2)); + assert(isClose(spearmanCor([3,1,2,5,4,6].dup, [1,2,3,4,5,6].dup ), 0.77143, 1e-2, 1e-2)); + assert(isClose(spearmanCor([3,6,7,35,75].dup, [1,63,53,67,3].dup), 0.3, 1e-2, 1e-2)); + assert(isClose(spearmanCor([1,63,53,67,3].dup, [3,6,7,35,75].dup), 0.3, 1e-2, 1e-2)); + assert(isClose(spearmanCor([1.5,6.3,7.8,4.2,1.5].dup, [1,63,53,67,3].dup), .56429, 1e-2, 1e-2)); + assert(isClose(spearmanCor([1,63,53,67,3].dup, [1.5,6.3,7.8,4.2,1.5].dup), .56429, 1e-2, 1e-2)); + assert(isClose(spearmanCor([1.5,6.3,7.8,7.8,1.5].dup, [1,63,53,67,3].dup), .79057, 1e-2, 1e-2)); + assert(isClose(spearmanCor([1,63,53,67,3].dup, [1.5,6.3,7.8,7.8,1.5].dup), .79057, 1e-2, 1e-2)); + assert(isClose(spearmanCor([1.5,6.3,7.8,6.3,1.5].dup, [1,63,53,67,3].dup), .63246, 1e-2, 1e-2)); + assert(isClose(spearmanCor([1,63,53,67,3].dup, [1.5,6.3,7.8,6.3,1.5].dup), .63246, 1e-2, 1e-2)); + assert(isClose(spearmanCor([3,4,1,5,2,1,6,4].dup, [1,3,2,6,4,2,6,7].dup), .6829268, 1e-2, 1e-2)); + assert(isClose(spearmanCor([1,3,2,6,4,2,6,7].dup, [3,4,1,5,2,1,6,4].dup), .6829268, 1e-2, 1e-2)); uint[] one = new uint[1000], two = new uint[1000]; foreach(i; 0..100) { //Further sanity checks for things like commutativity. size_t lowerBound = uniform(0, one.length); @@ -396,10 +396,10 @@ unittest { two[lowerBound..upperBound].reverse(); double sFive = spearmanCor(one[lowerBound..upperBound], two[lowerBound..upperBound]); - assert(approxEqual(sOne, sTwo) || (isNaN(sOne) && isNaN(sTwo))); - assert(approxEqual(sTwo, sThree) || (isNaN(sThree) && isNaN(sTwo))); - assert(approxEqual(sThree, sFour) || (isNaN(sThree) && isNaN(sFour))); - assert(approxEqual(sFour, sFive) || (isNaN(sFour) && isNaN(sFive))); + assert(isClose(sOne, sTwo, 1e-2, 1e-2) || (isNaN(sOne) && isNaN(sTwo))); + assert(isClose(sTwo, sThree, 1e-2, 1e-2) || (isNaN(sThree) && isNaN(sTwo))); + assert(isClose(sThree, sFour, 1e-2, 1e-2) || (isNaN(sThree) && isNaN(sFour))); + assert(isClose(sFour, sFive, 1e-2, 1e-2) || (isNaN(sFour) && isNaN(sFive))); } // Test input ranges. @@ -420,7 +420,7 @@ unittest { Count a, b; a.upTo = 100; b.upTo = 100; - assert(approxEqual(spearmanCor(a, b), 1)); + assert(isClose(spearmanCor(a, b), 1)); } version(unittest) { @@ -759,9 +759,9 @@ in { unittest { //Test against known values. - assert(approxEqual(kendallCor([1,2,3,4,5].dup, [3,1,7,4,3].dup), 0.1054093)); - assert(approxEqual(kendallCor([3,6,7,35,75].dup,[1,63,53,67,3].dup), 0.2)); - assert(approxEqual(kendallCor([1.5,6.3,7.8,4.2,1.5].dup, [1,63,53,67,3].dup), .3162287)); + assert(isClose(kendallCor([1,2,3,4,5].dup, [3,1,7,4,3].dup), 0.1054093, 1e-2, 1e-2)); + assert(isClose(kendallCor([3,6,7,35,75].dup,[1,63,53,67,3].dup), 0.2, 1e-2, 1e-2)); + assert(isClose(kendallCor([1.5,6.3,7.8,4.2,1.5].dup, [1,63,53,67,3].dup), .3162287, 1e-2, 1e-2)); static void doKendallTest(T)() { T[] one = new T[1000], two = new T[1000]; @@ -781,7 +781,7 @@ unittest { kendallCor(one[lowerBound..upperBound], two[lowerBound..upperBound]); double kTwo = kendallCorSmallN(one[lowerBound..upperBound], two[lowerBound..upperBound]); - assert(approxEqual(kOne, kTwo) || (isNaN(kOne) && isNaN(kTwo))); + assert(isClose(kOne, kTwo) || (isNaN(kOne) && isNaN(kTwo))); } } @@ -807,12 +807,12 @@ unittest { Count a, b; a.upTo = 100; b.upTo = 100; - assert(approxEqual(kendallCor(a, b), 1)); + assert(isClose(kendallCor(a, b), 1)); // This test will fail if there are overflow bugs, especially in tie // handling. auto rng = chain(repeat(0, 100_000), repeat(1, 100_000)); - assert(approxEqual(kendallCor(rng, rng), 1)); + assert(isClose(kendallCor(rng, rng), 1)); // Test the case where we have one range sorted already. assert(kendallCor(iota(5), [3, 1, 2, 5, 4]) == @@ -823,7 +823,7 @@ unittest { kendallCor([3, 1, 2, 5, 4], assumeSorted(iota(5))) ); - assert(approxEqual( + assert(isClose( kendallCor(assumeSorted(iota(5)), assumeSorted(iota(5))), 1 )); @@ -929,11 +929,11 @@ unittest { uint[] consumerFear = [1, 2, 3, 4, 5, 6, 7]; double partialCor = partial!pearsonCor(stock1Price, stock2Price, [economicHealth, consumerFear][]); - assert(approxEqual(partialCor, -0.857818)); + assert(isClose(partialCor, -0.857818, 1e-2, 1e-2)); double spearmanPartial = partial!spearmanCor(stock1Price, stock2Price, economicHealth, consumerFear); - assert(approxEqual(spearmanPartial, -0.7252)); + assert(isClose(spearmanPartial, -0.7252, 1e-2, 1e-2)); } private __gshared TaskPool emptyPool; @@ -1001,7 +1001,7 @@ auto input = [[8.0, 6, 7, 5], [3.0, 0, 9, 3], [1.0, 4, 1, 5]]; auto pearson = pearsonMatrix(input); -assert(approxEqual(pearson[0, 0], 1)); +assert(isClose(pearson[0, 0], 1, 1e-2, 1e-2)); --- */ SymmetricMatrix!double pearsonMatrix(RoR)(RoR mat, TaskPool pool = null) @@ -1058,7 +1058,7 @@ Examples: --- auto pearsonRoR = [[0.0], [0.0, 0.0], [0.0, 0.0, 0.0]]; pearsonMatrix(input, pearsonRoR); -assert(approxEqual(pearsonRoR[1][1], 1)); +assert(isClose(pearsonRoR[1][1], 1, 1e-2, 1e-2)); --- */ void pearsonMatrix(RoR, Ret)(RoR mat, ref Ret ans, TaskPool pool = null) @@ -1298,39 +1298,38 @@ unittest { // Values from R. - alias approxEqual ae; // Save typing. - assert(ae(pearsonRoR[0][0], 1)); - assert(ae(pearsonRoR[1][1], 1)); - assert(ae(pearsonRoR[2][2], 1)); - assert(ae(pearsonRoR[1][0], 0.3077935)); - assert(ae(pearsonRoR[2][0], -0.9393364)); - assert(ae(pearsonRoR[2][1], -0.6103679)); - - assert(ae(spearmanRoR[0][0], 1)); - assert(ae(spearmanRoR[1][1], 1)); - assert(ae(spearmanRoR[2][2], 1)); - assert(ae(spearmanRoR[1][0], 0.3162278)); - assert(ae(spearmanRoR[2][0], -0.9486833)); - assert(ae(spearmanRoR[2][1], -0.5)); - - assert(ae(kendallRoR[0][0], 1)); - assert(ae(kendallRoR[1][1], 1)); - assert(ae(kendallRoR[2][2], 1)); - assert(ae(kendallRoR[1][0], 0.1825742)); - assert(ae(kendallRoR[2][0], -0.9128709)); - assert(ae(kendallRoR[2][1], -0.4)); - - assert(ae(covRoR[0][0], 1.66666)); - assert(ae(covRoR[1][1], 14.25)); - assert(ae(covRoR[2][2], 4.25)); - assert(ae(covRoR[1][0], 1.5)); - assert(ae(covRoR[2][0], -2.5)); - assert(ae(covRoR[2][1], -4.75)); + assert(isClose(pearsonRoR[0][0], 1, 1e-2, 1e-2)); + assert(isClose(pearsonRoR[1][1], 1, 1e-2, 1e-2)); + assert(isClose(pearsonRoR[2][2], 1, 1e-2, 1e-2)); + assert(isClose(pearsonRoR[1][0], 0.3077935, 1e-2, 1e-2)); + assert(isClose(pearsonRoR[2][0], -0.9393364, 1e-2, 1e-2)); + assert(isClose(pearsonRoR[2][1], -0.6103679, 1e-2, 1e-2)); + + assert(isClose(spearmanRoR[0][0], 1, 1e-2, 1e-2)); + assert(isClose(spearmanRoR[1][1], 1, 1e-2, 1e-2)); + assert(isClose(spearmanRoR[2][2], 1, 1e-2, 1e-2)); + assert(isClose(spearmanRoR[1][0], 0.3162278, 1e-2, 1e-2)); + assert(isClose(spearmanRoR[2][0], -0.9486833, 1e-2, 1e-2)); + assert(isClose(spearmanRoR[2][1], -0.5, 1e-2, 1e-2)); + + assert(isClose(kendallRoR[0][0], 1, 1e-2, 1e-2)); + assert(isClose(kendallRoR[1][1], 1, 1e-2, 1e-2)); + assert(isClose(kendallRoR[2][2], 1, 1e-2, 1e-2)); + assert(isClose(kendallRoR[1][0], 0.1825742, 1e-2, 1e-2)); + assert(isClose(kendallRoR[2][0], -0.9128709, 1e-2, 1e-2)); + assert(isClose(kendallRoR[2][1], -0.4, 1e-2, 1e-2)); + + assert(isClose(covRoR[0][0], 1.66666, 1e-2, 1e-2)); + assert(isClose(covRoR[1][1], 14.25, 1e-2, 1e-2)); + assert(isClose(covRoR[2][2], 4.25, 1e-2, 1e-2)); + assert(isClose(covRoR[1][0], 1.5, 1e-2, 1e-2)); + assert(isClose(covRoR[2][0], -2.5, 1e-2, 1e-2)); + assert(isClose(covRoR[2][1], -4.75, 1e-2, 1e-2)); version(scid) { static bool test(double[][] a, SymmetricMatrix!double b) { foreach(i; 0..3) foreach(j; 0..i + 1) { - if(!ae(a[i][j], b[i, j])) return false; + if(!isClose(a[i][j], b[i, j], 1e-2, 1e-2)) return false; } return true; diff --git a/source/dstats/distrib.d b/source/dstats/distrib.d index d240841..b479ec9 100644 --- a/source/dstats/distrib.d +++ b/source/dstats/distrib.d @@ -117,8 +117,6 @@ enum SQ2PI = 2.50662827463100050241576528481104525300698674060993831662992; version(unittest) { import std.stdio, std.random; - - alias std.math.approxEqual ae; } /**Takes a distribution function (CDF or PDF/PMF) as a template argument, and @@ -147,7 +145,7 @@ double delegate(ParameterTypeTuple!(distrib)[0]) unittest { // Just basically see if this compiles. auto stdNormal = parametrize!normalCDF(0, 1); - assert(approxEqual(stdNormal(2.5), normalCDF(2.5, 0, 1))); + assert(isClose(stdNormal(2.5), normalCDF(2.5, 0, 1))); } /// @@ -183,7 +181,7 @@ ParamFunctor!(distrib) paramFunctor(alias distrib) unittest { // Just basically see if this compiles. auto stdNormal = paramFunctor!normalCDF(0, 1); - assert(approxEqual(stdNormal(2.5), normalCDF(2.5, 0, 1))); + assert(isClose(stdNormal(2.5), normalCDF(2.5, 0, 1))); } /// @@ -218,7 +216,7 @@ double poissonPMF(ulong k, double lambda) { } unittest { - assert(approxEqual(poissonPMF(1, .1), .0904837)); + assert(isClose(poissonPMF(1, .1), .0904837, 1e-2, 1e-2)); } enum POISSON_NORMAL = 1UL << 12; // Where to switch to normal approx. @@ -254,13 +252,13 @@ unittest { return ret; } - assert(approxEqual(poissonCDF(1, 0.5), pmfSum(1, 0.5))); - assert(approxEqual(poissonCDF(3, 0.7), pmfSum(3, 0.7))); + assert(isClose(poissonCDF(1, 0.5), pmfSum(1, 0.5), 1e-2, 1e-2)); + assert(isClose(poissonCDF(3, 0.7), pmfSum(3, 0.7), 1e-2, 1e-2)); // Absurdly huge values: Test normal approximation. // Values from R. double ans = poissonCDF( (1UL << 50) - 10_000_000, 1UL << 50); - assert(approxEqual(ans, 0.3828427)); + assert(isClose(ans, 0.3828427, 1e-2, 1e-2)); // Make sure cutoff is reasonable, i.e. make sure gamma incomplete branch // and normal branch get roughly the same answer near the cutoff. @@ -304,13 +302,13 @@ unittest { return ret; } - assert(approxEqual(poissonCDFR(1, 0.5), 1 - pmfSum(0, 0.5))); - assert(approxEqual(poissonCDFR(3, 0.7), 1 - pmfSum(2, 0.7))); + assert(isClose(poissonCDFR(1, 0.5), 1 - pmfSum(0, 0.5), 1e-2, 1e-2)); + assert(isClose(poissonCDFR(3, 0.7), 1 - pmfSum(2, 0.7), 1e-2, 1e-2)); // Absurdly huge value to test normal approximation. // Values from R. double ans = poissonCDFR( (1UL << 50) - 10_000_000, 1UL << 50); - assert(approxEqual(ans, 0.6171573)); + assert(isClose(ans, 0.6171573, 1e-2, 1e-2)); // Make sure cutoff is reasonable, i.e. make sure gamma incomplete branch // and normal branch get roughly the same answer near the cutoff. @@ -384,8 +382,8 @@ double binomialPMF(ulong k, ulong n, double p) { } unittest { - assert(approxEqual(binomialPMF(0, 10, .5), cast(double) 1/1024)); - assert(approxEqual(binomialPMF(100, 1000, .11), .024856)); + assert(isClose(binomialPMF(0, 10, .5), cast(double) 1/1024, 1e-2, 1e-2)); + assert(isClose(binomialPMF(100, 1000, .11), .024856, 1e-2, 1e-2)); } // Determines what value of n we switch to normal approximation at b/c @@ -435,26 +433,26 @@ double binomialCDF(ulong k, ulong n, double p) { } unittest { - assert(approxEqual(binomialCDF(10, 100, .11), 0.4528744401)); - assert(approxEqual(binomialCDF(15, 100, .12), 0.8585510507)); - assert(approxEqual(binomialCDF(50, 1000, .04), 0.95093595)); - assert(approxEqual(binomialCDF(7600, 15000, .5), .9496193045414)); - assert(approxEqual(binomialCDF(0, 10, 0.2), 0.1073742)); + assert(isClose(binomialCDF(10, 100, .11), 0.4528744401, 1e-2, 1e-2)); + assert(isClose(binomialCDF(15, 100, .12), 0.8585510507, 1e-2, 1e-2)); + assert(isClose(binomialCDF(50, 1000, .04), 0.95093595, 1e-2, 1e-2)); + assert(isClose(binomialCDF(7600, 15000, .5), .9496193045414, 1e-2, 1e-2)); + assert(isClose(binomialCDF(0, 10, 0.2), 0.1073742, 1e-2, 1e-2)); // Absurdly huge numbers: { ulong k = (1UL << 60) - 100_000_000; ulong n = 1UL << 61; - assert(approxEqual(binomialCDF(k, n, 0.5L), 0.4476073)); + assert(isClose(binomialCDF(k, n, 0.5L), 0.4476073, 1e-2, 1e-2)); } // Test Poisson branch. double poisAns = binomialCDF(85, 1UL << 26, 1.49e-6); - assert(approxEqual(poisAns, 0.07085327)); + assert(isClose(poisAns, 0.07085327, 1e-2, 1e-2)); // Test poissonCDFR branch. poisAns = binomialCDF( (1UL << 25) - 100, 1UL << 25, 0.9999975L); - assert(approxEqual(poisAns, 0.04713316)); + assert(isClose(poisAns, 0.04713316, 1e-2, 1e-2)); // Make sure cutoff is reasonable: Just below it, we should get similar // results for normal, exact. @@ -515,31 +513,31 @@ double binomialCDFR(ulong k, ulong n, double p) { unittest { // Values from R, Maxima. - assert(approxEqual(binomialCDF(10, 100, .11), 1 - - binomialCDFR(11, 100, .11))); - assert(approxEqual(binomialCDF(15, 100, .12), 1 - - binomialCDFR(16, 100, .12))); - assert(approxEqual(binomialCDF(50, 1000, .04), 1 - - binomialCDFR(51, 1000, .04))); - assert(approxEqual(binomialCDF(7600, 15000, .5), 1 - - binomialCDFR(7601, 15000, .5))); - assert(approxEqual(binomialCDF(9, 10, 0.3), 1 - - binomialCDFR(10, 10, 0.3))); + assert(isClose(binomialCDF(10, 100, .11), 1 - + binomialCDFR(11, 100, .11), 1e-2, 1e-2)); + assert(isClose(binomialCDF(15, 100, .12), 1 - + binomialCDFR(16, 100, .12), 1e-2, 1e-2)); + assert(isClose(binomialCDF(50, 1000, .04), 1 - + binomialCDFR(51, 1000, .04), 1e-2, 1e-2)); + assert(isClose(binomialCDF(7600, 15000, .5), 1 - + binomialCDFR(7601, 15000, .5), 1e-2, 1e-2)); + assert(isClose(binomialCDF(9, 10, 0.3), 1 - + binomialCDFR(10, 10, 0.3), 1e-2, 1e-2)); // Absurdly huge numbers, test normal branch. { ulong k = (1UL << 60) - 100_000_000; ulong n = 1UL << 61; - assert(approxEqual(binomialCDFR(k, n, 0.5L), 0.5523927)); + assert(isClose(binomialCDFR(k, n, 0.5L), 0.5523927, 1e-2, 1e-2)); } // Test Poisson inversion branch. double poisRes = binomialCDFR((1UL << 25) - 70, 1UL << 25, 0.9999975L); - assert(approxEqual(poisRes, 0.06883905)); + assert(isClose(poisRes, 0.06883905, 1e-2, 1e-2)); // Test Poisson branch. poisRes = binomialCDFR(350, 1UL << 25, 1e-5); - assert(approxEqual(poisRes, 0.2219235)); + assert(isClose(poisRes, 0.2219235, 1e-2, 1e-2)); // Make sure cutoff is reasonable: Just below it, we should get similar // results for normal, exact. @@ -631,9 +629,9 @@ in { } unittest { - assert(approxEqual(hypergeometricPMF(5, 10, 10, 10), .3437182)); - assert(approxEqual(hypergeometricPMF(9, 12, 10, 15), .27089783)); - assert(approxEqual(hypergeometricPMF(9, 100, 100, 15), .15500003)); + assert(isClose(hypergeometricPMF(5, 10, 10, 10), .3437182, 1e-2, 1e-2)); + assert(isClose(hypergeometricPMF(9, 12, 10, 15), .27089783, 1e-2, 1e-2)); + assert(isClose(hypergeometricPMF(9, 100, 100, 15), .15500003, 1e-2, 1e-2)); } /**P(X <= x), where X is random variable. Uses either direct summation, @@ -717,21 +715,21 @@ double hypergeometricCDF(long x, long n1, long n2, long n) { unittest { // Values from R and the Maxima CAS. // Test exact branch, including reversing, complementing. - assert(approxEqual(hypergeometricCDF(5, 10, 10, 10), 0.6718591)); - assert(approxEqual(hypergeometricCDF(3, 11, 15, 10), 0.27745322)); - assert(approxEqual(hypergeometricCDF(18, 27, 31, 35), 0.88271714)); - assert(approxEqual(hypergeometricCDF(21, 29, 31, 35), 0.99229253)); + assert(isClose(hypergeometricCDF(5, 10, 10, 10), 0.6718591, 1e-2, 1e-2)); + assert(isClose(hypergeometricCDF(3, 11, 15, 10), 0.27745322, 1e-2, 1e-2)); + assert(isClose(hypergeometricCDF(18, 27, 31, 35), 0.88271714, 1e-2, 1e-2)); + assert(isClose(hypergeometricCDF(21, 29, 31, 35), 0.99229253, 1e-2, 1e-2)); // Normal branch. - assert(approxEqual(hypergeometricCDF(501, 2000, 1000, 800), 0.002767073)); - assert(approxEqual(hypergeometricCDF(565, 2000, 1000, 800), 0.9977068)); - assert(approxEqual(hypergeometricCDF(2700, 10000, 20000, 8000), 0.825652)); + assert(isClose(hypergeometricCDF(501, 2000, 1000, 800), 0.002767073, 1e-2, 1e-2)); + assert(isClose(hypergeometricCDF(565, 2000, 1000, 800), 0.9977068, 1e-2, 1e-2)); + assert(isClose(hypergeometricCDF(2700, 10000, 20000, 8000), 0.825652, 1e-2, 1e-2)); // Binomial branch. One for each transformation. - assert(approxEqual(hypergeometricCDF(110, 5000, 7000, 239), 0.9255627)); - assert(approxEqual(hypergeometricCDF(19840, 2950998, 12624, 19933), 0.2020618)); - assert(approxEqual(hypergeometricCDF(130, 24195, 52354, 295), 0.9999973)); - assert(approxEqual(hypergeometricCDF(103, 901, 49014, 3522), 0.999999)); + assert(isClose(hypergeometricCDF(110, 5000, 7000, 239), 0.9255627, 1e-2, 1e-2)); + assert(isClose(hypergeometricCDF(19840, 2950998, 12624, 19933), 0.2020618, 1e-2, 1e-2)); + assert(isClose(hypergeometricCDF(130, 24195, 52354, 295), 0.9999973, 1e-2, 1e-2)); + assert(isClose(hypergeometricCDF(103, 901, 49014, 3522), 0.999999, 1e-2, 1e-2)); } ///P(X >= x), where X is random variable. @@ -745,14 +743,14 @@ double hypergeometricCDFR(ulong x, ulong n1, ulong n2, ulong n) { unittest { //Reverses n1, n2 and subtracts x from n to get mirror image. - assert(approxEqual(hypergeometricCDF(5,10,10,10), - hypergeometricCDFR(5,10,10,10))); - assert(approxEqual(hypergeometricCDF(3, 11, 15, 10), - hypergeometricCDFR(7, 15, 11, 10))); - assert(approxEqual(hypergeometricCDF(18, 27, 31, 35), - hypergeometricCDFR(17, 31, 27, 35))); - assert(approxEqual(hypergeometricCDF(21, 29, 31, 35), - hypergeometricCDFR(14, 31, 29, 35))); + assert(isClose(hypergeometricCDF(5,10,10,10), + hypergeometricCDFR(5,10,10,10), 1e-2, 1e-2)); + assert(isClose(hypergeometricCDF(3, 11, 15, 10), + hypergeometricCDFR(7, 15, 11, 10), 1e-2, 1e-2)); + assert(isClose(hypergeometricCDF(18, 27, 31, 35), + hypergeometricCDFR(17, 31, 27, 35), 1e-2, 1e-2)); + assert(isClose(hypergeometricCDF(21, 29, 31, 35), + hypergeometricCDFR(14, 31, 29, 35), 1e-2, 1e-2)); } double hyperExact(ulong x, ulong n1, ulong n2, ulong n, ulong startAt = 0) { @@ -802,8 +800,8 @@ double chiSquarePDF(double x, double v) { } unittest { - assert( approxEqual(chiSquarePDF(1, 2), 0.3032653)); - assert( approxEqual(chiSquarePDF(2, 1), 0.1037769)); + assert( isClose(chiSquarePDF(1, 2), 0.3032653, 1e-2, 1e-2)); + assert( isClose(chiSquarePDF(2, 1), 0.1037769, 1e-2, 1e-2)); } /** @@ -882,19 +880,21 @@ double invChiSquareCDFR(double v, double p) { unittest { assert(feqrel(chiSquareCDFR(invChiSquareCDFR(3.5, 0.1), 3.5), 0.1)>=double.mant_dig-3); - assert(approxEqual( - chiSquareCDF(0.4L, 19.02L) + chiSquareCDFR(0.4L, 19.02L), 1.0L)); - assert(ae( invChiSquareCDFR( 3, chiSquareCDFR(1, 3)), 1)); + assert(isClose( + chiSquareCDF(0.4L, 19.02L) + chiSquareCDFR(0.4L, 19.02L), + 1.0L, + 1e-2, 1e-2)); + assert(isClose( invChiSquareCDFR( 3, chiSquareCDFR(1, 3)), 1, 1e-2, 1e-2)); - assert(ae(chiSquareCDFR(0.2, 1), 0.6547208)); - assert(ae(chiSquareCDFR(0.2, 2), 0.9048374)); - assert(ae(chiSquareCDFR(0.8, 1), 0.3710934)); - assert(ae(chiSquareCDFR(0.8, 2), 0.67032)); + assert(isClose(chiSquareCDFR(0.2, 1), 0.6547208, 1e-2, 1e-2)); + assert(isClose(chiSquareCDFR(0.2, 2), 0.9048374, 1e-2, 1e-2)); + assert(isClose(chiSquareCDFR(0.8, 1), 0.3710934, 1e-2, 1e-2)); + assert(isClose(chiSquareCDFR(0.8, 2), 0.67032, 1e-2, 1e-2)); - assert(ae(chiSquareCDF(0.2, 1), 0.3452792)); - assert(ae(chiSquareCDF(0.2, 2), 0.09516258)); - assert(ae(chiSquareCDF(0.8, 1), 0.6289066)); - assert(ae(chiSquareCDF(0.8, 2), 0.3296800)); + assert(isClose(chiSquareCDF(0.2, 1), 0.3452792, 1e-2, 1e-2)); + assert(isClose(chiSquareCDF(0.2, 2), 0.09516258, 1e-2, 1e-2)); + assert(isClose(chiSquareCDF(0.8, 1), 0.6289066, 1e-2, 1e-2)); + assert(isClose(chiSquareCDF(0.8, 2), 0.3296800, 1e-2, 1e-2)); } /// @@ -905,7 +905,7 @@ double normalPDF(double x, double mean = 0, double sd = 1) { } unittest { - assert(approxEqual(normalPDF(3, 1, 2), 0.1209854)); + assert(isClose(normalPDF(3, 1, 2), 0.1209854, 1e-2, 1e-2)); } ///P(X < x) for normal distribution where X is random var. @@ -920,9 +920,9 @@ double normalCDF(double x, double mean = 0, double stdev = 1) { } unittest { - assert(approxEqual(normalCDF(2), .9772498)); - assert(approxEqual(normalCDF(-2), .02275013)); - assert(approxEqual(normalCDF(1.3), .90319951)); + assert(isClose(normalCDF(2), .9772498, 1e-2, 1e-2)); + assert(isClose(normalCDF(-2), .02275013, 1e-2, 1e-2)); + assert(isClose(normalCDF(1.3), .90319951, 1e-2, 1e-2)); } ///P(X > x) for normal distribution where X is random var. @@ -936,7 +936,7 @@ double normalCDFR(double x, double mean = 0, double stdev = 1) { unittest { //Should be essentially a mirror image of normalCDF. for(double i = -8; i < 8; i += .1) { - assert(approxEqual(normalCDF(i), normalCDFR(-i))); + assert(isClose(normalCDF(i), normalCDFR(-i), 1e-2, 1e-2)); } } @@ -985,7 +985,7 @@ unittest { double sd = uniform(1.0L, 3.0L); double inv = invNormalCDF(x, mean, sd); double rec = normalCDF(inv, mean, sd); - assert(approxEqual(x, rec)); + assert(isClose(x, rec, 1e-2, 1e-2)); } } @@ -1002,8 +1002,8 @@ double logNormalPDF(double x, double mu = 0, double sigma = 1) { unittest { // Values from R. - assert(approxEqual(logNormalPDF(1, 0, 1), 0.3989423)); - assert(approxEqual(logNormalPDF(2, 2, 3), 0.06047173)); + assert(isClose(logNormalPDF(1, 0, 1), 0.3989423, 1e-2, 1e-2)); + assert(isClose(logNormalPDF(2, 2, 3), 0.06047173, 1e-2, 1e-2)); } /// @@ -1014,8 +1014,8 @@ double logNormalCDF(double x, double mu = 0, double sigma = 1) { } unittest { - assert(approxEqual(logNormalCDF(4), 0.9171715)); - assert(approxEqual(logNormalCDF(1, -2, 3), 0.7475075)); + assert(isClose(logNormalCDF(4), 0.9171715, 1e-2, 1e-2)); + assert(isClose(logNormalCDF(1, -2, 3), 0.7475075, 1e-2, 1e-2)); } /// @@ -1026,8 +1026,8 @@ double logNormalCDFR(double x, double mu = 0, double sigma = 1) { } unittest { - assert(approxEqual(logNormalCDF(4) + logNormalCDFR(4), 1)); - assert(approxEqual(logNormalCDF(1, -2, 3) + logNormalCDFR(1, -2, 3), 1)); + assert(isClose(logNormalCDF(4) + logNormalCDFR(4), 1, 1e-2, 1e-2)); + assert(isClose(logNormalCDF(1, -2, 3) + logNormalCDFR(1, -2, 3), 1, 1e-2, 1e-2)); } /// @@ -1043,7 +1043,7 @@ double weibullPDF(double x, double shape, double scale = 1) { } unittest { - assert(approxEqual(weibullPDF(2,1,3), 0.1711390)); + assert(isClose(weibullPDF(2,1,3), 0.1711390, 1e-2, 1e-2)); } /// @@ -1056,7 +1056,7 @@ double weibullCDF(double x, double shape, double scale = 1) { } unittest { - assert(approxEqual(weibullCDF(2, 3, 4), 0.1175031)); + assert(isClose(weibullCDF(2, 3, 4), 0.1175031, 1e-2, 1e-2)); } /// @@ -1069,7 +1069,7 @@ double weibullCDFR(double x, double shape, double scale = 1) { } unittest { - assert(approxEqual(weibullCDF(2, 3, 4) + weibullCDFR(2, 3, 4), 1)); + assert(isClose(weibullCDF(2, 3, 4) + weibullCDFR(2, 3, 4), 1, 1e-2, 1e-2)); } // For K-S tests in dstats.random. Todo: Flesh out. @@ -1110,15 +1110,15 @@ double studentsTCDFR(double t, double df) { } unittest { - assert(approxEqual(studentsTPDF(1, 1), 0.1591549)); - assert(approxEqual(studentsTPDF(3, 10), 0.0114055)); - assert(approxEqual(studentsTPDF(-4, 5), 0.005123727)); + assert(isClose(studentsTPDF(1, 1), 0.1591549, 1e-2, 1e-2)); + assert(isClose(studentsTPDF(3, 10), 0.0114055, 1e-2, 1e-2)); + assert(isClose(studentsTPDF(-4, 5), 0.005123727, 1e-2, 1e-2)); - assert(approxEqual(studentsTCDF(1, 1), 0.75)); - assert(approxEqual(studentsTCDF(1.061, 2), 0.8)); - assert(approxEqual(studentsTCDF(5.959, 5), 0.9995)); - assert(approxEqual(studentsTCDF(.667, 20), 0.75)); - assert(approxEqual(studentsTCDF(2.353, 3), 0.95)); + assert(isClose(studentsTCDF(1, 1), 0.75, 1e-2, 1e-2)); + assert(isClose(studentsTCDF(1.061, 2), 0.8, 1e-2, 1e-2)); + assert(isClose(studentsTCDF(5.959, 5), 0.9995, 1e-2, 1e-2)); + assert(isClose(studentsTCDF(.667, 20), 0.75, 1e-2, 1e-2)); + assert(isClose(studentsTCDF(2.353, 3), 0.95, 1e-2, 1e-2)); } /****************************************** @@ -1169,11 +1169,11 @@ unittest { // in the last decimal places. However, they are helpful as a sanity check. // Microsoft Excel 2003 gives TINV(2*(1-0.995), 10) == 3.16927267160917 - assert(approxEqual(invStudentsTCDF(0.995, 10), 3.169_272_67L)); - assert(approxEqual(invStudentsTCDF(0.6, 8), 0.261_921_096_769_043L)); - assert(approxEqual(invStudentsTCDF(0.4, 18), -0.257_123_042_655_869L)); - assert(approxEqual(studentsTCDF(invStudentsTCDF(0.4L, 18), 18), .4L)); - assert(approxEqual(studentsTCDF( invStudentsTCDF(0.9L, 11), 11), 0.9L)); + assert(isClose(invStudentsTCDF(0.995, 10), 3.169_272_67L)); + assert(isClose(invStudentsTCDF(0.6, 8), 0.261_921_096_769_043L)); + assert(isClose(invStudentsTCDF(0.4, 18), -0.257_123_042_655_869L)); + assert(isClose(studentsTCDF(invStudentsTCDF(0.4L, 18), 18), .4L)); + assert(isClose(studentsTCDF( invStudentsTCDF(0.9L, 11), 11), 0.9L)); } /** @@ -1268,7 +1268,7 @@ unittest { assert(fabs(invFisherCDFR(8, 34, 0.2) - 1.48267037661408L)< 0.0000000005L); assert(fabs(invFisherCDFR(4, 16, 0.008) - 5.043_537_593_48596L)< 0.0000000005L); // This one used to fail because of a bug in the definition of MINLOG. - assert(approxEqual(fisherCDFR(invFisherCDFR(4,16, 0.008), 4, 16), 0.008)); + assert(isClose(fisherCDFR(invFisherCDFR(4,16, 0.008), 4, 16), 0.008)); } /// @@ -1281,8 +1281,8 @@ double negBinomPMF(ulong k, ulong n, double p) { unittest { // Values from R. - assert(approxEqual(negBinomPMF(1, 8, 0.7), 0.1383552)); - assert(approxEqual(negBinomPMF(3, 2, 0.5), 0.125)); + assert(isClose(negBinomPMF(1, 8, 0.7), 0.1383552, 1e-2, 1e-2)); + assert(isClose(negBinomPMF(3, 2, 0.5), 0.125, 1e-2, 1e-2)); } @@ -1318,8 +1318,8 @@ double negBinomCDF(ulong k, ulong n, double p ) { unittest { // Values from R. - assert(approxEqual(negBinomCDF(50, 50, 0.5), 0.5397946)); - assert(approxEqual(negBinomCDF(2, 1, 0.5), 0.875)); + assert(isClose(negBinomCDF(50, 50, 0.5), 0.5397946, 1e-2, 1e-2)); + assert(isClose(negBinomCDF(2, 1, 0.5), 0.875, 1e-2, 1e-2)); } /**Probability that k or more failures precede the nth success.*/ @@ -1333,7 +1333,7 @@ double negBinomCDFR(ulong k, ulong n, double p) { } unittest { - assert(approxEqual(negBinomCDFR(10, 20, 0.5), 1 - negBinomCDF(9, 20, 0.5))); + assert(isClose(negBinomCDFR(10, 20, 0.5), 1 - negBinomCDF(9, 20, 0.5), 1e-2, 1e-2)); } /// @@ -1453,12 +1453,12 @@ double invExponentialCDF(double p, double lambda) { unittest { // Values from R. - assert(approxEqual(exponentialPDF(0.75, 3), 0.3161977)); - assert(approxEqual(exponentialCDF(0.75, 3), 0.8946008)); - assert(approxEqual(exponentialCDFR(0.75, 3), 0.1053992)); + assert(isClose(exponentialPDF(0.75, 3), 0.3161977, 1e-2, 1e-2)); + assert(isClose(exponentialCDF(0.75, 3), 0.8946008, 1e-2, 1e-2)); + assert(isClose(exponentialCDFR(0.75, 3), 0.1053992, 1e-2, 1e-2)); - assert(approxEqual(invExponentialCDF(0.8, 2), 0.804719)); - assert(approxEqual(invExponentialCDF(0.2, 7), 0.03187765)); + assert(isClose(invExponentialCDF(0.8, 2), 0.804719, 1e-2, 1e-2)); + assert(isClose(invExponentialCDF(0.2, 7), 0.03187765, 1e-2, 1e-2)); } /// @@ -1510,17 +1510,17 @@ double invGammaCDFR(double p, double rate, double shape) { } unittest { - assert(approxEqual(gammaPDF(1, 2, 5), 0.1804470)); - assert(approxEqual(gammaPDF(0.5, 8, 4), 1.562935)); - assert(approxEqual(gammaPDF(3, 2, 7), 0.3212463)); - assert(approxEqual(gammaCDF(1, 2, 5), 0.05265302)); - assert(approxEqual(gammaCDFR(1, 2, 5), 0.947347)); + assert(isClose(gammaPDF(1, 2, 5), 0.1804470, 1e-2, 1e-2)); + assert(isClose(gammaPDF(0.5, 8, 4), 1.562935, 1e-2, 1e-2)); + assert(isClose(gammaPDF(3, 2, 7), 0.3212463, 1e-2, 1e-2)); + assert(isClose(gammaCDF(1, 2, 5), 0.05265302, 1e-2, 1e-2)); + assert(isClose(gammaCDFR(1, 2, 5), 0.947347, 1e-2, 1e-2)); double inv = invGammaCDFR(0.78, 2, 1); - assert(approxEqual(gammaCDFR(inv, 2, 1), 0.78)); + assert(isClose(gammaCDFR(inv, 2, 1), 0.78, 1e-2, 1e-2)); double inv2 = invGammaCDF(0.78, 2, 1); - assert(approxEqual(gammaCDF(inv2, 2, 1), 0.78)); + assert(isClose(gammaCDF(inv2, 2, 1), 0.78, 1e-2, 1e-2)); } /// @@ -1562,17 +1562,17 @@ double invBetaCDF(double p, double alpha, double beta) { unittest { // Values from R. - assert(approxEqual(betaPDF(0.3, 2, 3), 1.764)); - assert(approxEqual(betaPDF(0.78, 0.9, 4), 0.03518569)); + assert(isClose(betaPDF(0.3, 2, 3), 1.764, 1e-2, 1e-2)); + assert(isClose(betaPDF(0.78, 0.9, 4), 0.03518569, 1e-2, 1e-2)); - assert(approxEqual(betaCDF(0.3, 2, 3), 0.3483)); - assert(approxEqual(betaCDF(0.78, 0.9, 4), 0.9980752)); + assert(isClose(betaCDF(0.3, 2, 3), 0.3483, 1e-2, 1e-2)); + assert(isClose(betaCDF(0.78, 0.9, 4), 0.9980752, 1e-2, 1e-2)); - assert(approxEqual(betaCDFR(0.3, 2, 3), 0.6517)); - assert(approxEqual(betaCDFR(0.78, 0.9, 4), 0.001924818)); + assert(isClose(betaCDFR(0.3, 2, 3), 0.6517, 1e-2, 1e-2)); + assert(isClose(betaCDFR(0.78, 0.9, 4), 0.001924818, 1e-2, 1e-2)); - assert(approxEqual(invBetaCDF(0.3483, 2, 3), 0.3)); - assert(approxEqual(invBetaCDF(0.9980752, 0.9, 4), 0.78)); + assert(isClose(invBetaCDF(0.3483, 2, 3), 0.3, 1e-2, 1e-2)); + assert(isClose(invBetaCDF(0.9980752, 0.9, 4), 0.78, 1e-2, 1e-2)); } /** @@ -1615,11 +1615,11 @@ is(ElementType!A : double)) { unittest { // Test against beta - assert(approxEqual(dirichletPDF([0.1, 0.9], [2, 3]), betaPDF(0.1, 2, 3))); + assert(isClose(dirichletPDF([0.1, 0.9], [2, 3]), betaPDF(0.1, 2, 3))); // A few values from R's gregmisc package - assert(approxEqual(dirichletPDF([0.1, 0.2, 0.7], [4, 5, 6]), 1.356672)); - assert(approxEqual(dirichletPDF([0.8, 0.05, 0.15], [8, 5, 6]), 0.04390199)); + assert(isClose(dirichletPDF([0.1, 0.2, 0.7], [4, 5, 6]), 1.356672, 1e-2, 1e-2)); + assert(isClose(dirichletPDF([0.8, 0.05, 0.15], [8, 5, 6]), 0.04390199, 1e-2, 1e-2)); } /// @@ -1632,8 +1632,8 @@ double cauchyPDF(double X, double X0 = 0, double gamma = 1) { } unittest { - assert(approxEqual(cauchyPDF(5), 0.01224269)); - assert(approxEqual(cauchyPDF(2), 0.06366198)); + assert(isClose(cauchyPDF(5), 0.01224269, 1e-2, 1e-2)); + assert(isClose(cauchyPDF(2), 0.06366198, 1e-2, 1e-2)); } @@ -1646,8 +1646,8 @@ double cauchyCDF(double X, double X0 = 0, double gamma = 1) { unittest { // Values from R - assert(approxEqual(cauchyCDF(-10), 0.03172552)); - assert(approxEqual(cauchyCDF(1), 0.75)); + assert(isClose(cauchyCDF(-10), 0.03172552, 1e-2, 1e-2)); + assert(isClose(cauchyCDF(1), 0.75, 1e-2, 1e-2)); } /// @@ -1659,8 +1659,8 @@ double cauchyCDFR(double X, double X0 = 0, double gamma = 1) { unittest { // Values from R - assert(approxEqual(1 - cauchyCDFR(-10), 0.03172552)); - assert(approxEqual(1 - cauchyCDFR(1), 0.75)); + assert(isClose(1 - cauchyCDFR(-10), 0.03172552, 1e-2, 1e-2)); + assert(isClose(1 - cauchyCDFR(1), 0.75, 1e-2, 1e-2)); } /// @@ -1673,9 +1673,9 @@ double invCauchyCDF(double p, double X0 = 0, double gamma = 1) { unittest { // cauchyCDF already tested. Just make sure this is the inverse. - assert(approxEqual(invCauchyCDF(cauchyCDF(.5)), .5)); - assert(approxEqual(invCauchyCDF(cauchyCDF(.99)), .99)); - assert(approxEqual(invCauchyCDF(cauchyCDF(.03)), .03)); + assert(isClose(invCauchyCDF(cauchyCDF(.5)), .5, 1e-2, 1e-2)); + assert(isClose(invCauchyCDF(cauchyCDF(.99)), .99, 1e-2, 1e-2)); + assert(isClose(invCauchyCDF(cauchyCDF(.03)), .03, 1e-2, 1e-2)); } // For K-S tests in dstats.random. To be fleshed out later. Intentionally @@ -1693,8 +1693,8 @@ double laplacePDF(double x, double mu = 0, double b = 1) { unittest { // Values from Maxima. - assert(approxEqual(laplacePDF(3, 2, 1), 0.18393972058572)); - assert(approxEqual(laplacePDF(-8, 6, 7), 0.0096668059454723)); + assert(isClose(laplacePDF(3, 2, 1), 0.18393972058572, 1e-2, 1e-2)); + assert(isClose(laplacePDF(-8, 6, 7), 0.0096668059454723, 1e-2, 1e-2)); } /// @@ -1708,9 +1708,9 @@ double laplaceCDF(double X, double mu = 0, double b = 1) { unittest { // Values from Octave. - assert(approxEqual(laplaceCDF(5), 0.9963)); - assert(approxEqual(laplaceCDF(-3.14), .021641)); - assert(approxEqual(laplaceCDF(0.012), 0.50596)); + assert(isClose(laplaceCDF(5), 0.9963, 1e-2, 1e-2)); + assert(isClose(laplaceCDF(-3.14), .021641, 1e-2, 1e-2)); + assert(isClose(laplaceCDF(0.012), 0.50596, 1e-2, 1e-2)); } /// @@ -1724,9 +1724,9 @@ double laplaceCDFR(double X, double mu = 0, double b = 1) { unittest { // Values from Octave. - assert(approxEqual(1 - laplaceCDFR(5), 0.9963)); - assert(approxEqual(1 - laplaceCDFR(-3.14), .021641)); - assert(approxEqual(1 - laplaceCDFR(0.012), 0.50596)); + assert(isClose(1 - laplaceCDFR(5), 0.9963, 1e-2, 1e-2)); + assert(isClose(1 - laplaceCDFR(-3.14), .021641, 1e-2, 1e-2)); + assert(isClose(1 - laplaceCDFR(0.012), 0.50596, 1e-2, 1e-2)); } /// @@ -1740,8 +1740,8 @@ double invLaplaceCDF(double p, double mu = 0, double b = 1) { } unittest { - assert(approxEqual(invLaplaceCDF(0.012), -3.7297)); - assert(approxEqual(invLaplaceCDF(0.82), 1.0217)); + assert(isClose(invLaplaceCDF(0.012), -3.7297, 1e-2, 1e-2)); + assert(isClose(invLaplaceCDF(0.82), 1.0217, 1e-2, 1e-2)); } double kolmDist()(double x) { @@ -1781,8 +1781,8 @@ double kolmogorovDistrib(immutable double x) { } unittest { - assert(approxEqual(1 - kolmogorovDistrib(.75), 0.627167)); - assert(approxEqual(1 - kolmogorovDistrib(.5), 0.9639452436)); - assert(approxEqual(1 - kolmogorovDistrib(.9), 0.39273070)); - assert(approxEqual(1 - kolmogorovDistrib(1.2), 0.112249666)); + assert(isClose(1 - kolmogorovDistrib(.75), 0.627167, 1e-2, 1e-2)); + assert(isClose(1 - kolmogorovDistrib(.5), 0.9639452436, 1e-2, 1e-2)); + assert(isClose(1 - kolmogorovDistrib(.9), 0.39273070, 1e-2, 1e-2)); + assert(isClose(1 - kolmogorovDistrib(1.2), 0.112249666, 1e-2, 1e-2)); } diff --git a/source/dstats/infotheory.d b/source/dstats/infotheory.d index 7b37726..e2eb5c6 100644 --- a/source/dstats/infotheory.d +++ b/source/dstats/infotheory.d @@ -51,9 +51,9 @@ version(unittest) { * Examples: * --- * double uniform3 = entropyCounts([4, 4, 4]); - * assert(approxEqual(uniform3, log2(3))); + * assert(isClose(uniform3, log2(3), 1e-2, 1e-2)); * double uniform4 = entropyCounts([5, 5, 5, 5]); - * assert(approxEqual(uniform4, 2)); + * assert(isClose(uniform4, 2, 1e-2, 1e-2)); * --- */ double entropyCounts(T)(T data) @@ -77,12 +77,12 @@ if(isIterable!(T)) { unittest { double uniform3 = entropyCounts([4, 4, 4].dup); - assert(approxEqual(uniform3, log2(3))); + assert(isClose(uniform3, log2(3), 1e-2, 1e-2)); double uniform4 = entropyCounts([5, 5, 5, 5].dup); - assert(approxEqual(uniform4, 2)); + assert(isClose(uniform4, 2, 1e-2, 1e-2)); assert(entropyCounts([2,2].dup)==1); assert(entropyCounts([5.1,5.1,5.1,5.1].dup)==2); - assert(approxEqual(entropyCounts([1,2,3,4,5].dup), 2.1492553971685)); + assert(isClose(entropyCounts([1,2,3,4,5].dup), 2.1492553971685, 1e-2, 1e-2)); } template FlattenType(T...) { @@ -293,10 +293,10 @@ unittest { * --- * int[] foo = [1, 1, 1, 2, 2, 2, 3, 3, 3]; * double entropyFoo = entropy(foo); // Plain old entropy of foo. - * assert(approxEqual(entropyFoo, log2(3))); + * assert(isClose(entropyFoo, log2(3), 1e-2, 1e-2)); * int[] bar = [1, 2, 3, 1, 2, 3, 1, 2, 3]; * double HFooBar = entropy(joint(foo, bar)); // Joint entropy of foo and bar. - * assert(approxEqual(HFooBar, log2(9))); + * assert(isClose(HFooBar, log2(9), 1e-2, 1e-2)); * --- */ double entropy(T)(T data) @@ -381,23 +381,23 @@ unittest { { // Generic version. int[] foo = [1, 1, 1, 2, 2, 2, 3, 3, 3]; double entropyFoo = entropy(foo); - assert(approxEqual(entropyFoo, log2(3))); + assert(isClose(entropyFoo, log2(3), 1e-2, 1e-2)); int[] bar = [1, 2, 3, 1, 2, 3, 1, 2, 3]; auto stuff = joint(foo, bar); double jointEntropyFooBar = entropy(joint(foo, bar)); - assert(approxEqual(jointEntropyFooBar, log2(9))); + assert(isClose(jointEntropyFooBar, log2(9), 1e-2, 1e-2)); } { // byte specialization byte[] foo = [-1, -1, -1, 2, 2, 2, 3, 3, 3]; double entropyFoo = entropy(foo); - assert(approxEqual(entropyFoo, log2(3))); + assert(isClose(entropyFoo, log2(3), 1e-2, 1e-2)); string bar = "ACTGGCTA"; assert(entropy(bar) == 2); } { // NeedsHeap version. string[] arr = ["1", "1", "1", "2", "2", "2", "3", "3", "3"]; auto m = map!("a")(arr); - assert(approxEqual(entropy(m), log2(3))); + assert(isClose(entropy(m), log2(3), 1e-2, 1e-2)); } } @@ -418,8 +418,8 @@ unittest { // This shouldn't be easy to screw up. Just really basic. int[] foo = [1,2,2,1,1]; int[] bar = [1,2,3,1,2]; - assert(approxEqual(entropy(foo) - condEntropy(foo, bar), - mutualInfo(foo, bar))); + assert(isClose(entropy(foo) - condEntropy(foo, bar), + mutualInfo(foo, bar), 1e-2, 1e-2)); } private double miContingency(double observed, double expected) { @@ -457,12 +457,12 @@ if(isInputRange!(T) && isInputRange!(U)) { unittest { // Values from R, but converted from base e to base 2. - assert(approxEqual(mutualInfo(bin([1,2,3,3,8].dup, 10), - bin([8,6,7,5,3].dup, 10)), 1.921928)); - assert(approxEqual(mutualInfo(bin([1,2,1,1,3,4,3,6].dup, 2), - bin([2,7,9,6,3,1,7,40].dup, 2)), .2935645)); - assert(approxEqual(mutualInfo(bin([1,2,1,1,3,4,3,6].dup, 4), - bin([2,7,9,6,3,1,7,40].dup, 4)), .5435671)); + assert(isClose(mutualInfo(bin([1,2,3,3,8].dup, 10), + bin([8,6,7,5,3].dup, 10)), 1.921928, 1e-2, 1e-2)); + assert(isClose(mutualInfo(bin([1,2,1,1,3,4,3,6].dup, 2), + bin([2,7,9,6,3,1,7,40].dup, 2)), .2935645, 1e-2, 1e-2)); + assert(isClose(mutualInfo(bin([1,2,1,1,3,4,3,6].dup, 4), + bin([2,7,9,6,3,1,7,40].dup, 4)), .5435671, 1e-2, 1e-2)); } @@ -492,10 +492,10 @@ unittest { // Values from Matlab mi package by Hanchuan Peng. auto res = condMutualInfo([1,2,1,2,1,2,1,2].dup, [3,1,2,3,4,2,1,2].dup, [1,2,3,1,2,3,1,2].dup); - assert(approxEqual(res, 0.4387)); + assert(isClose(res, 0.4387, 1e-2, 1e-2)); res = condMutualInfo([1,2,3,1,2].dup, [2,1,3,2,1].dup, joint([1,1,1,2,2].dup, [2,2,2,1,1].dup)); - assert(approxEqual(res, 1.3510)); + assert(isClose(res, 1.3510, 1e-2, 1e-2)); } /**Calculates the entropy of any old input range of observations more quickly @@ -535,7 +535,7 @@ unittest { uint[] foo = [1U,2,3,1,3,2,6,3,1,6,3,2,2,1,3,5,2,1].dup; auto sorted = foo.dup; sort(sorted); - assert(approxEqual(entropySorted(sorted), entropy(foo))); + assert(isClose(entropySorted(sorted), entropy(foo))); } /** @@ -762,36 +762,35 @@ struct DenseInfoTheory { } unittest { - alias ae = approxEqual; auto dense = DenseInfoTheory(3); auto a = [0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2]; auto b = [1, 2, 2, 2, 0, 0, 1, 1, 1, 1, 0, 0]; auto c = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0]; - // need some approxEquals in here because some methods do floating + // need some isCloses in here because some methods do floating // point ops in hash-dependent orders assert(entropy(a) == dense.entropy(a)); assert(entropy(b) == dense.entropy(b)); assert(entropy(c) == dense.entropy(c)); - assert(ae(entropy(joint(a, c)), dense.entropy(joint(c, a)))); - assert(ae(entropy(joint(a, b)), dense.entropy(joint(a, b)))); + assert(isClose(entropy(joint(a, c)), dense.entropy(joint(c, a)), 1e-2, 1e-2)); + assert(isClose(entropy(joint(a, b)), dense.entropy(joint(a, b)), 1e-2, 1e-2)); assert(entropy(joint(c, b)) == dense.entropy(joint(c, b))); assert(condEntropy(a, c) == dense.condEntropy(a, c)); - assert(ae(condEntropy(a, b), dense.condEntropy(a, b))); + assert(isClose(condEntropy(a, b), dense.condEntropy(a, b), 1e-2, 1e-2)); assert(condEntropy(c, b) == dense.condEntropy(c, b)); - assert(ae(mutualInfo(a, c), dense.mutualInfo(c, a))); - assert(ae(mutualInfo(a, b), dense.mutualInfo(a, b))); - assert(ae(mutualInfo(c, b), dense.mutualInfo(c, b))); + assert(isClose(mutualInfo(a, c), dense.mutualInfo(c, a), 1e-2, 1e-2)); + assert(isClose(mutualInfo(a, b), dense.mutualInfo(a, b), 1e-2, 1e-2)); + assert(isClose(mutualInfo(c, b), dense.mutualInfo(c, b), 1e-2, 1e-2)); - assert(ae(condMutualInfo(a, b, c), dense.condMutualInfo(a, b, c))); - assert(ae(condMutualInfo(a, c, b), dense.condMutualInfo(a, c, b))); - assert(ae(condMutualInfo(b, c, a), dense.condMutualInfo(b, c, a))); + assert(isClose(condMutualInfo(a, b, c), dense.condMutualInfo(a, b, c), 1e-2, 1e-2)); + assert(isClose(condMutualInfo(a, c, b), dense.condMutualInfo(a, c, b), 1e-2, 1e-2)); + assert(isClose(condMutualInfo(b, c, a), dense.condMutualInfo(b, c, a), 1e-2, 1e-2)); // Test P-value stuff. immutable pDense = dense.mutualInfoPval(dense.mutualInfo(a, b), a.length); immutable pNotDense = gTestObs(a, b).p; - assert(approxEqual(pDense, pNotDense)); + assert(isClose(pDense, pNotDense)); } diff --git a/source/dstats/kerneldensity.d b/source/dstats/kerneldensity.d index 0539b2b..4b97f86 100644 --- a/source/dstats/kerneldensity.d +++ b/source/dstats/kerneldensity.d @@ -306,9 +306,9 @@ public: unittest { auto kde = KernelDensity1D.fromCallable(parametrize!normalPDF(0, 1), [0]); - assert(approxEqual(kde(1), normalPDF(1))); - assert(approxEqual(kde.cdf(1), normalCDF(1))); - assert(approxEqual(kde.cdfr(1), normalCDFR(1))); + assert(isClose(kde(1), normalPDF(1), 1e-2, 1e-2)); + assert(isClose(kde.cdf(1), normalCDF(1), 1e-2, 1e-2)); + assert(isClose(kde.cdfr(1), normalCDFR(1), 1e-2, 1e-2)); // This is purely to see if fromAlias works. auto cosKde = KernelDensity1D.fromAlias!cos([0], 1); @@ -344,8 +344,8 @@ if(isForwardRange!R && is(ElementType!R : double)) { unittest { // Values from R. - assert(approxEqual(scottBandwidth([1,2,3,4,5]), 1.14666)); - assert(approxEqual(scottBandwidth([1,2,2,2,2,8,8,8,8]), 2.242446)); + assert(isClose(scottBandwidth([1,2,3,4,5]), 1.14666, 1e-2, 1e-2)); + assert(isClose(scottBandwidth([1,2,2,2,2,8,8,8,8]), 2.242446, 1e-2, 1e-2)); } /**Construct an N-dimensional kernel density estimator. This is done using diff --git a/source/dstats/pca.d b/source/dstats/pca.d index eba8041..78b14da 100644 --- a/source/dstats/pca.d +++ b/source/dstats/pca.d @@ -217,7 +217,7 @@ private PrincipalComponent firstComponentImpl(Ror)( foreach(i; 0..a.length) { if(!isFinite(a[i]) || !isFinite(b[i])) { return true; - } else if(!approxEqual(a[i], b[i], opts.relError, opts.absError)) { + } else if(!isClose(a[i], b[i], opts.relError, opts.absError)) { return false; } } @@ -492,7 +492,8 @@ version(unittest) { // There are two equally valid answers for PCA that differ only by sign. // This tests whether one of them matches the test value. bool plusMinusAe(T, U)(T lhs, U rhs) { - return approxEqual(lhs, rhs) || approxEqual(lhs, map!"-a"(rhs)); + return isClose(lhs, rhs, 1e-2, 1e-2) || + isClose(lhs, map!"-a"(rhs), 1e-2, 1e-2); } } diff --git a/source/dstats/regress.d b/source/dstats/regress.d index f17f889..f815afc 100644 --- a/source/dstats/regress.d +++ b/source/dstats/regress.d @@ -760,40 +760,40 @@ unittest { // Values from R. auto res1 = polyFit(diseaseSev, temperature, 1); - assert(approxEqual(res1.betas[0], 2.6623)); - assert(approxEqual(res1.betas[1], 0.2417)); - assert(approxEqual(res1.stdErr[0], 1.1008)); - assert(approxEqual(res1.stdErr[1], 0.0635)); - assert(approxEqual(res1.p[0], 0.0419)); - assert(approxEqual(res1.p[1], 0.0052)); - assert(approxEqual(res1.R2, 0.644)); - assert(approxEqual(res1.adjustedR2, 0.6001)); - assert(approxEqual(res1.residualError, 2.03)); - assert(approxEqual(res1.overallP, 0.00518)); + assert(isClose(res1.betas[0], 2.6623, 1e-2, 1e-2)); + assert(isClose(res1.betas[1], 0.2417, 1e-2, 1e-2)); + assert(isClose(res1.stdErr[0], 1.1008, 1e-2, 1e-2)); + assert(isClose(res1.stdErr[1], 0.0635, 1e-2, 1e-2)); + assert(isClose(res1.p[0], 0.0419, 1e-2, 1e-2)); + assert(isClose(res1.p[1], 0.0052, 1e-2, 1e-2)); + assert(isClose(res1.R2, 0.644, 1e-2, 1e-2)); + assert(isClose(res1.adjustedR2, 0.6001, 1e-2, 1e-2)); + assert(isClose(res1.residualError, 2.03, 1e-2, 1e-2)); + assert(isClose(res1.overallP, 0.00518, 1e-2, 1e-2)); auto res2 = polyFit(weights, heights, 2); - assert(approxEqual(res2.betas[0], 128.813)); - assert(approxEqual(res2.betas[1], -143.162)); - assert(approxEqual(res2.betas[2], 61.960)); + assert(isClose(res2.betas[0], 128.813, 1e-2, 1e-2)); + assert(isClose(res2.betas[1], -143.162, 1e-2, 1e-2)); + assert(isClose(res2.betas[2], 61.960, 1e-2, 1e-2)); - assert(approxEqual(res2.stdErr[0], 16.308)); - assert(approxEqual(res2.stdErr[1], 19.833)); - assert(approxEqual(res2.stdErr[2], 6.008)); + assert(isClose(res2.stdErr[0], 16.308, 1e-2, 1e-2)); + assert(isClose(res2.stdErr[1], 19.833, 1e-2, 1e-2)); + assert(isClose(res2.stdErr[2], 6.008, 1e-2, 1e-2)); - assert(approxEqual(res2.p[0], 4.28e-6)); - assert(approxEqual(res2.p[1], 1.06e-5)); - assert(approxEqual(res2.p[2], 2.57e-7)); + assert(isClose(res2.p[0], 4.28e-6, 1e-2, 1e-2)); + assert(isClose(res2.p[1], 1.06e-5, 1e-2, 1e-2)); + assert(isClose(res2.p[2], 2.57e-7, 1e-2, 1e-2)); - assert(approxEqual(res2.R2, 0.9989, 0.0001)); - assert(approxEqual(res2.adjustedR2, 0.9987, 0.0001)); + assert(isClose(res2.R2, 0.9989, 0.0001, 1e-2)); + assert(isClose(res2.adjustedR2, 0.9987, 0.0001, 1e-2)); - assert(approxEqual(res2.lowerBound[0], 92.9, 0.01)); - assert(approxEqual(res2.lowerBound[1], -186.8, 0.01)); - assert(approxEqual(res2.lowerBound[2], 48.7, 0.01)); - assert(approxEqual(res2.upperBound[0], 164.7, 0.01)); - assert(approxEqual(res2.upperBound[1], -99.5, 0.01)); - assert(approxEqual(res2.upperBound[2], 75.2, 0.01)); + assert(isClose(res2.lowerBound[0], 92.9, 0.01, 1e-2)); + assert(isClose(res2.lowerBound[1], -186.8, 0.01, 1e-2)); + assert(isClose(res2.lowerBound[2], 48.7, 0.01, 1e-2)); + assert(isClose(res2.upperBound[0], 164.7, 0.01, 1e-2)); + assert(isClose(res2.upperBound[1], -99.5, 0.01, 1e-2)); + assert(isClose(res2.upperBound[2], 75.2, 0.01, 1e-2)); auto res3 = linearRegress(weights, repeat(1), heights, map!"a * a"(heights)); assert(res2.betas == res3.betas); @@ -803,25 +803,26 @@ unittest { (beta1Buf[], diseaseSev, repeat(1), temperature); assert(beta1Buf.ptr == beta1.ptr); assert(beta1Buf[] == beta1[]); - assert(approxEqual(beta1, res1.betas)); + assert(isClose(beta1, res1.betas)); auto beta2 = polyFitBeta(weights, heights, 2); - assert(approxEqual(beta2, res2.betas)); + assert(isClose(beta2, res2.betas)); auto res4 = linearRegress(weights, repeat(1), heights); - assert(approxEqual(res4.p, 3.604e-14)); - assert(approxEqual(res4.betas, [-39.062, 61.272])); - assert(approxEqual(res4.p, [6.05e-9, 3.60e-14])); - assert(approxEqual(res4.R2, 0.9892)); - assert(approxEqual(res4.adjustedR2, 0.9884)); - assert(approxEqual(res4.residualError, 0.7591)); - assert(approxEqual(res4.lowerBound, [-45.40912, 57.43554])); - assert(approxEqual(res4.upperBound, [-32.71479, 65.10883])); + assert(isClose(res4.p, 3.604e-14, 1e-2, 1e-2)); + assert(isClose(res4.betas, [-39.062, 61.272], 1e-2, 1e-2)); + assert(isClose(res4.p, [6.05e-9, 3.60e-14], 1e-2, 1e-2)); + assert(isClose(res4.R2, 0.9892, 1e-2, 1e-2)); + assert(isClose(res4.adjustedR2, 0.9884, 1e-2, 1e-2)); + assert(isClose(res4.residualError, 0.7591, 1e-2, 1e-2)); + assert(isClose(res4.lowerBound, [-45.40912, 57.43554], 1e-2, 1e-2)); + assert(isClose(res4.upperBound, [-32.71479, 65.10883], 1e-2, 1e-2)); // Test residuals. - assert(approxEqual(residuals(res4.betas, weights, repeat(1), heights), + assert(isClose(residuals(res4.betas, weights, repeat(1), heights), [1.20184170, 0.27367611, 0.40823237, -0.06993322, 0.06462305, -0.40354255, -0.88170814, -0.74715188, -0.76531747, -0.63076120, - -0.65892680, -0.06437053, -0.08253613, 0.96202014, 1.39385455])); + -0.65892680, -0.06437053, -0.08253613, 0.96202014, 1.39385455], + 1e-2, 1e-2)); // Test nonzero ridge parameters. // Values from R's MASS package. @@ -831,16 +832,16 @@ unittest { // With a ridge param. of zero, ridge regression reduces to regular // OLS regression. - assert(approxEqual(linearRegressBeta(a, repeat(1), b, c, 0), + assert(isClose(linearRegressBeta(a, repeat(1), b, c, 0), linearRegressBeta(a, repeat(1), b, c))); // Test the ridge regression. Values from R MASS package. auto ridge1 = linearRegressBeta(a, repeat(1), b, c, 1); auto ridge2 = linearRegressBeta(a, repeat(1), b, c, 2); auto ridge3 = linearRegressBeta(c, [[1,1,1,1,1,1,1], a, b], 10); - assert(approxEqual(ridge1, [6.0357757, -0.2729671, -0.1337131])); - assert(approxEqual(ridge2, [5.62367784, -0.22449854, -0.09775174])); - assert(approxEqual(ridge3, [5.82653624, -0.05197246, -0.27185592 ])); + assert(isClose(ridge1, [6.0357757, -0.2729671, -0.1337131], 1e-2, 1e-2)); + assert(isClose(ridge2, [5.62367784, -0.22449854, -0.09775174], 1e-2, 1e-2)); + assert(isClose(ridge3, [5.82653624, -0.05197246, -0.27185592 ], 1e-2, 1e-2)); } private MeanSD[] calculateSummaries(X...)(X xIn, RegionAllocator alloc) { @@ -1396,11 +1397,11 @@ unittest { // the wide tolerance. However, if the direct normal equations // and linalg trick don't agree extremely closely, then something's // fundamentally wrong. - assert(approxEqual(normalEq, coordDescent, 0.02, 1e-4), text( + assert(isClose(normalEq, coordDescent, 0.02, 1e-4), text( normalEq, coordDescent)); - assert(approxEqual(linalgTrick, coordDescent, 0.02, 1e-4), text( + assert(isClose(linalgTrick, coordDescent, 0.02, 1e-4), text( linalgTrick, coordDescent)); - assert(approxEqual(normalEq, linalgTrick, 1e-6, 1e-8), text( + assert(isClose(normalEq, linalgTrick, 1e-6, 1e-8), text( normalEq, linalgTrick)); } @@ -1412,14 +1413,18 @@ unittest { [3.0, 1, 4, 1, 5, 9, 2], [2.0, 7, 1, 8, 2, 8, 1]]; - assert(approxEqual(linearRegressPenalized(y, x, 1, 0), - [4.16316, -0.3603197, 0.6308278, 0, -0.2633263])); - assert(approxEqual(linearRegressPenalized(y, x, 1, 3), - [2.519590, -0.09116883, 0.38067757, 0.13122413, -0.05637939])); - assert(approxEqual(linearRegressPenalized(y, x, 2, 0.1), - [1.247235, 0, 0.4440735, 0.2023602, 0])); - assert(approxEqual(linearRegressPenalized(y, x, 5, 7), - [3.453787, 0, 0.10968736, 0.01253992, 0])); + assert(isClose(linearRegressPenalized(y, x, 1, 0), + [4.16316, -0.3603197, 0.6308278, 0, -0.2633263], + 1e-2, 1e-2)); + assert(isClose(linearRegressPenalized(y, x, 1, 3), + [2.519590, -0.09116883, 0.38067757, 0.13122413, -0.05637939], + 1e-2, 1e-2)); + assert(isClose(linearRegressPenalized(y, x, 2, 0.1), + [1.247235, 0, 0.4440735, 0.2023602, 0], + 1e-2, 1e-2)); + assert(isClose(linearRegressPenalized(y, x, 5, 7), + [3.453787, 0, 0.10968736, 0.01253992, 0], + 1e-2, 1e-2)); } /** @@ -1573,56 +1578,55 @@ unittest { // R doesn't automatically calculate likelihood ratio P-value, and reports // deviations instead of log likelihoods. Deviations are just // -2 * likelihood. - alias approxEqual ae; // Save typing. // Start with the basics, with X as a ror. auto y1 = [1, 0, 0, 0, 1, 0, 0]; auto x1 = [[1.0, 1 ,1 ,1 ,1 ,1 ,1], [8.0, 6, 7, 5, 3, 0, 9]]; auto res1 = logisticRegress(y1, x1); - assert(ae(res1.betas[0], -0.98273)); - assert(ae(res1.betas[1], 0.01219)); - assert(ae(res1.stdErr[0], 1.80803)); - assert(ae(res1.stdErr[1], 0.29291)); - assert(ae(res1.p[0], 0.587)); - assert(ae(res1.p[1], 0.967)); - assert(ae(res1.aic, 12.374)); - assert(ae(res1.logLikelihood, -0.5 * 8.3758)); - assert(ae(res1.nullLogLikelihood, -0.5 * 8.3740)); - assert(ae(res1.lowerBound[0], -4.5264052)); - assert(ae(res1.lowerBound[1], -0.5618933)); - assert(ae(res1.upperBound[0], 2.560939)); - assert(ae(res1.upperBound[1], 0.586275)); + assert(isClose(res1.betas[0], -0.98273, 1e-2, 1e-2)); + assert(isClose(res1.betas[1], 0.01219, 1e-2, 1e-2)); + assert(isClose(res1.stdErr[0], 1.80803, 1e-2, 1e-2)); + assert(isClose(res1.stdErr[1], 0.29291, 1e-2, 1e-2)); + assert(isClose(res1.p[0], 0.587, 1e-2, 1e-2)); + assert(isClose(res1.p[1], 0.967, 1e-2, 1e-2)); + assert(isClose(res1.aic, 12.374, 1e-2, 1e-2)); + assert(isClose(res1.logLikelihood, -0.5 * 8.3758, 1e-2, 1e-2)); + assert(isClose(res1.nullLogLikelihood, -0.5 * 8.3740, 1e-2, 1e-2)); + assert(isClose(res1.lowerBound[0], -4.5264052, 1e-2, 1e-2)); + assert(isClose(res1.lowerBound[1], -0.5618933, 1e-2, 1e-2)); + assert(isClose(res1.upperBound[0], 2.560939, 1e-2, 1e-2)); + assert(isClose(res1.upperBound[1], 0.586275, 1e-2, 1e-2)); // Use tuple. auto y2 = [1,0,1,1,0,1,0,0,0,1,0,1]; auto x2_1 = [3,1,4,1,5,9,2,6,5,3,5,8]; auto x2_2 = [2,7,1,8,2,8,1,8,2,8,4,5]; auto res2 = logisticRegress(y2, repeat(1), x2_1, x2_2); - assert(ae(res2.betas[0], -1.1875)); - assert(ae(res2.betas[1], 0.1021)); - assert(ae(res2.betas[2], 0.1603)); - assert(ae(res2.stdErr[0], 1.5430)); - assert(ae(res2.stdErr[1], 0.2507)); - assert(ae(res2.stdErr[2], 0.2108)); - assert(ae(res2.p[0], 0.442)); - assert(ae(res2.p[1], 0.684)); - assert(ae(res2.p[2], 0.447)); - assert(ae(res2.aic, 21.81)); - assert(ae(res2.nullLogLikelihood, -0.5 * 16.636)); - assert(ae(res2.logLikelihood, -0.5 * 15.810)); - assert(ae(res2.lowerBound[0], -4.2116584)); - assert(ae(res2.lowerBound[1], -0.3892603)); - assert(ae(res2.lowerBound[2], -0.2528110)); - assert(ae(res2.upperBound[0], 1.8366823)); - assert(ae(res2.upperBound[1], 0.5934631)); - assert(ae(res2.upperBound[2], 0.5733693)); + assert(isClose(res2.betas[0], -1.1875, 1e-2, 1e-2)); + assert(isClose(res2.betas[1], 0.1021, 1e-2, 1e-2)); + assert(isClose(res2.betas[2], 0.1603, 1e-2, 1e-2)); + assert(isClose(res2.stdErr[0], 1.5430, 1e-2, 1e-2)); + assert(isClose(res2.stdErr[1], 0.2507, 1e-2, 1e-2)); + assert(isClose(res2.stdErr[2], 0.2108, 1e-2, 1e-2)); + assert(isClose(res2.p[0], 0.442, 1e-2, 1e-2)); + assert(isClose(res2.p[1], 0.684, 1e-2, 1e-2)); + assert(isClose(res2.p[2], 0.447, 1e-2, 1e-2)); + assert(isClose(res2.aic, 21.81, 1e-2, 1e-2)); + assert(isClose(res2.nullLogLikelihood, -0.5 * 16.636, 1e-2, 1e-2)); + assert(isClose(res2.logLikelihood, -0.5 * 15.810, 1e-2, 1e-2)); + assert(isClose(res2.lowerBound[0], -4.2116584, 1e-2, 1e-2)); + assert(isClose(res2.lowerBound[1], -0.3892603, 1e-2, 1e-2)); + assert(isClose(res2.lowerBound[2], -0.2528110, 1e-2, 1e-2)); + assert(isClose(res2.upperBound[0], 1.8366823, 1e-2, 1e-2)); + assert(isClose(res2.upperBound[1], 0.5934631, 1e-2, 1e-2)); + assert(isClose(res2.upperBound[2], 0.5733693, 1e-2, 1e-2)); auto x2Intercept = [1,1,1,1,1,1,1,1,1,1,1,1]; auto res2a = logisticRegress(y2, filter!"a.length"([x2Intercept, x2_1, x2_2])); foreach(ti, elem; res2a.tupleof) { - assert(ae(elem, res2.tupleof[ti])); + assert(isClose(elem, res2.tupleof[ti], 1e-2, 1e-2)); } // Use a huge range of values to test numerical stability. @@ -1633,21 +1637,21 @@ unittest { auto x3_2 = [1e8, 1e6, 1e7, 1e5, 1e3, 1e0, 1e9, 1e11]; auto x3_3 = [-5e12, 5e2, 6e5, 4e3, -999999, -666, -3e10, -2e10]; auto res3 = logisticRegress(y3, repeat(1), x3_1, x3_2, x3_3, 0.99); - assert(ae(res3.betas[0], 1.115e0)); - assert(ae(res3.betas[1], -4.674e-15)); - assert(ae(res3.betas[2], -7.026e-9)); - assert(ae(res3.betas[3], -2.109e-12)); - assert(ae(res3.stdErr[0], 1.158)); - assert(ae(res3.stdErr[1], 2.098e-13)); - assert(ae(res3.stdErr[2], 1.878e-8)); - assert(ae(res3.stdErr[3], 4.789e-11)); - assert(ae(res3.p[0], 0.336)); - assert(ae(res3.p[1], 0.982)); - assert(ae(res3.p[2], 0.708)); - assert(ae(res3.p[3], 0.965)); - assert(ae(res3.aic, 12.544)); - assert(ae(res3.nullLogLikelihood, -0.5 * 11.0904)); - assert(ae(res3.logLikelihood, -0.5 * 4.5442)); + assert(isClose(res3.betas[0], 1.115e0, 1e-2, 1e-2)); + assert(isClose(res3.betas[1], -4.674e-15, 1e-2, 1e-2)); + assert(isClose(res3.betas[2], -7.026e-9, 1e-2, 1e-2)); + assert(isClose(res3.betas[3], -2.109e-12, 1e-2, 1e-2)); + assert(isClose(res3.stdErr[0], 1.158, 1e-2, 1e-2)); + assert(isClose(res3.stdErr[1], 2.098e-13, 1e-2, 1e-2)); + assert(isClose(res3.stdErr[2], 1.878e-8, 1e-2, 1e-2)); + assert(isClose(res3.stdErr[3], 4.789e-11, 1e-2, 1e-2)); + assert(isClose(res3.p[0], 0.336, 1e-2, 1e-2)); + assert(isClose(res3.p[1], 0.982, 1e-2, 1e-2)); + assert(isClose(res3.p[2], 0.708, 1e-2, 1e-2)); + assert(isClose(res3.p[3], 0.965, 1e-2, 1e-2)); + assert(isClose(res3.aic, 12.544, 1e-2, 1e-2)); + assert(isClose(res3.nullLogLikelihood, -0.5 * 11.0904, 1e-2, 1e-2)); + assert(isClose(res3.logLikelihood, -0.5 * 4.5442, 1e-2, 1e-2)); // Not testing confidence intervals b/c they'd be so buried in numerical // fuzz. @@ -1663,45 +1667,45 @@ unittest { auto x4_3 = take(cycle([1,2,3,4,5]), 1_000_000); auto x4_4 = take(cycle([8,6,7,5,3,0,9]), 1_000_000); auto res4 = logisticRegress(y4, repeat(1), x4_1, x4_2, x4_3, x4_4, 0.99); - assert(ae(res4.betas[0], -1.574)); - assert(ae(res4.betas[1], 5.625e-6)); - assert(ae(res4.betas[2], -7.282e-1)); - assert(ae(res4.betas[3], -4.381e-6)); - assert(ae(res4.betas[4], -8.343e-6)); - assert(ae(res4.stdErr[0], 3.693e-2)); - assert(ae(res4.stdErr[1], 7.188e-8)); - assert(ae(res4.stdErr[2], 4.208e-2)); - assert(ae(res4.stdErr[3], 1.658e-3)); - assert(ae(res4.stdErr[4], 8.164e-4)); - assert(ae(res4.p[0], 0)); - assert(ae(res4.p[1], 0)); - assert(ae(res4.p[2], 0)); - assert(ae(res4.p[3], 0.998)); - assert(ae(res4.p[4], 0.992)); - assert(ae(res4.aic, 1089339)); - assert(ae(res4.nullLogLikelihood, -0.5 * 1386294)); - assert(ae(res4.logLikelihood, -0.5 * 1089329)); - assert(ae(res4.lowerBound[0], -1.668899)); - assert(ae(res4.lowerBound[1], 5.439787e-6)); - assert(ae(res4.lowerBound[2], -0.8366273)); - assert(ae(res4.lowerBound[3], -4.27406e-3)); - assert(ae(res4.lowerBound[4], -2.111240e-3)); - assert(ae(res4.upperBound[0], -1.478623)); - assert(ae(res4.upperBound[1], 5.810089e-6)); - assert(ae(res4.upperBound[2], -6.198418e-1)); - assert(ae(res4.upperBound[3], 4.265302e-3)); - assert(ae(res4.upperBound[4], 2.084554e-3)); + assert(isClose(res4.betas[0], -1.574, 1e-2, 1e-2)); + assert(isClose(res4.betas[1], 5.625e-6, 1e-2, 1e-2)); + assert(isClose(res4.betas[2], -7.282e-1, 1e-2, 1e-2)); + assert(isClose(res4.betas[3], -4.381e-6, 1e-2, 1e-2)); + assert(isClose(res4.betas[4], -8.343e-6, 1e-2, 1e-2)); + assert(isClose(res4.stdErr[0], 3.693e-2, 1e-2, 1e-2)); + assert(isClose(res4.stdErr[1], 7.188e-8, 1e-2, 1e-2)); + assert(isClose(res4.stdErr[2], 4.208e-2, 1e-2, 1e-2)); + assert(isClose(res4.stdErr[3], 1.658e-3, 1e-2, 1e-2)); + assert(isClose(res4.stdErr[4], 8.164e-4, 1e-2, 1e-2)); + assert(isClose(res4.p[0], 0)); + assert(isClose(res4.p[1], 0)); + assert(isClose(res4.p[2], 0, 1e-2, 1e-2)); + assert(isClose(res4.p[3], 0.998, 1e-2, 1e-2)); + assert(isClose(res4.p[4], 0.992, 1e-2, 1e-2)); + assert(isClose(res4.aic, 1089339, 1e-2, 1e-2)); + assert(isClose(res4.nullLogLikelihood, -0.5 * 1386294, 1e-2, 1e-2)); + assert(isClose(res4.logLikelihood, -0.5 * 1089329, 1e-2, 1e-2)); + assert(isClose(res4.lowerBound[0], -1.668899, 1e-2, 1e-2)); + assert(isClose(res4.lowerBound[1], 5.439787e-6, 1e-2, 1e-2)); + assert(isClose(res4.lowerBound[2], -0.8366273, 1e-2, 1e-2)); + assert(isClose(res4.lowerBound[3], -4.27406e-3, 1e-2, 1e-2)); + assert(isClose(res4.lowerBound[4], -2.111240e-3, 1e-2, 1e-2)); + assert(isClose(res4.upperBound[0], -1.478623, 1e-2, 1e-2)); + assert(isClose(res4.upperBound[1], 5.810089e-6, 1e-2, 1e-2)); + assert(isClose(res4.upperBound[2], -6.198418e-1, 1e-2, 1e-2)); + assert(isClose(res4.upperBound[3], 4.265302e-3, 1e-2, 1e-2)); + assert(isClose(res4.upperBound[4], 2.084554e-3, 1e-2, 1e-2)); // Test ridge stuff. auto ridge2 = logisticRegressBeta(y2, repeat(1), x2_1, x2_2, 3); - assert(ae(ridge2[0], -0.40279319)); - assert(ae(ridge2[1], 0.03575638)); - assert(ae(ridge2[2], 0.05313875)); + assert(isClose(ridge2[0], -0.40279319, 1e-2, 1e-2)); + assert(isClose(ridge2[1], 0.03575638, 1e-2, 1e-2)); + assert(isClose(ridge2[2], 0.05313875, 1e-2, 1e-2)); auto ridge2_2 = logisticRegressBeta(y2, repeat(1), x2_1, x2_2, 2); - assert(ae(ridge2_2[0], -0.51411490)); - assert(ae(ridge2_2[1], 0.04536590)); - assert(ae(ridge2_2[2], 0.06809964)); + assert(isClose(ridge2_2[0], -0.51411490, 1e-2, 1e-2)); + assert(isClose(ridge2_2[1], 0.04536590, 1e-2, 1e-2)); + assert(isClose(ridge2_2[2], 0.06809964, 1e-2, 1e-2)); } /// The logistic function used in logistic regression. @@ -1828,21 +1832,22 @@ unittest { // the wide tolerance. However, if the direct normal equations // and linalg trick don't agree extremely closely, then something's // fundamentally wrong. - assert(approxEqual(normalEq, coordDescent, 0.02, 1e-4), text( + assert(isClose(normalEq, coordDescent, 0.02, 1e-4), text( normalEq, coordDescent)); - assert(approxEqual(linalgTrick, coordDescent, 0.02, 1e-4), text( + assert(isClose(linalgTrick, coordDescent, 0.02, 1e-4), text( linalgTrick, coordDescent)); - assert(approxEqual(normalEq, linalgTrick, 1e-6, 1e-8), text( + assert(isClose(normalEq, linalgTrick, 1e-6, 1e-8), text( normalEq, linalgTrick)); } - assert(approxEqual(logisticRegressBeta(y, x[0], x[1], x[2]), + assert(isClose(logisticRegressBeta(y, x[0], x[1], x[2]), logisticRegressPenalized(y, x[1], x[2], 0, 0))); - assert(approxEqual(logisticRegressBeta(y, [x[0], x[1], x[2]]), + assert(isClose(logisticRegressBeta(y, [x[0], x[1], x[2]]), logisticRegressPenalized(y, [x[1], x[2]], 0, 0))); - assert(approxEqual(logisticRegressBeta(y, [x[0], x[1], x[2]]), + assert(isClose(logisticRegressBeta(y, [x[0], x[1], x[2]]), logisticRegressPenalized(y, - [to!(float[])(x[1]), to!(float[])(x[2])], 0, 0))); + [to!(float[])(x[1]), to!(float[])(x[2])], 0, 0), + 1e-2, 1e-2)); // Make sure the adding intercept stuff is right for the Newton path. //assert(logisticRegressBeta(x[0], x[1], x[2]) == @@ -1857,14 +1862,18 @@ unittest { // Values from R's Penalized package. Note that it uses a convention for // the ridge parameter such that Penalized ridge = 2 * dstats ridge. - assert(approxEqual(logisticRegressPenalized(y, x, 1, 0), - [1.642080, -0.22086515, -0.02587546, 0.00000000, 0.00000000 ])); - assert(approxEqual(logisticRegressPenalized(y, x, 1, 3), - [0.5153373, -0.04278257, -0.00888014, 0.01316831, 0.00000000])); - assert(approxEqual(logisticRegressPenalized(y, x, 2, 0.1), - [0.2876821, 0, 0., 0., 0])); - assert(approxEqual(logisticRegressPenalized(y, x, 1.2, 7), - [0.367613 , -0.017227631, 0.000000000, 0.003875104, 0.000000000])); + assert(isClose(logisticRegressPenalized(y, x, 1, 0), + [1.642080, -0.22086515, -0.02587546, 0.00000000, 0.00000000 ], + 1e-2, 1e-2)); + assert(isClose(logisticRegressPenalized(y, x, 1, 3), + [0.5153373, -0.04278257, -0.00888014, 0.01316831, 0.00000000], + 1e-2, 1e-2)); + assert(isClose(logisticRegressPenalized(y, x, 2, 0.1), + [0.2876821, 0, 0., 0., 0], + 1e-2, 1e-2)); + assert(isClose(logisticRegressPenalized(y, x, 1.2, 7), + [0.367613 , -0.017227631, 0.000000000, 0.003875104, 0.000000000], + 1e-2, 1e-2)); } /** @@ -1961,28 +1970,32 @@ unittest { // Values from R's lowess() function. This gets slightly different // results than loess(), probably due to disagreements bout windowing // details. - assert(approxEqual(loess1.predictions(0), + assert(isClose(loess1.predictions(0), [2.9193046, 3.6620295, 4.2229953, 5.2642335, 5.3433985, 4.4225636, - 2.7719778, 0.6643268] + 2.7719778, 0.6643268], + 1e-2, 1e-2 )); loess1 = loess1D(y, x, 0.5, 1); - assert(approxEqual(loess1.predictions(0), + assert(isClose(loess1.predictions(0), [2.1615941, 4.0041736, 4.5642738, 4.8631052, 5.7136895, 5.5642738, - 2.8631052, -0.1977227] + 2.8631052, -0.1977227], + 1e-2, 1e-2 )); - assert(approxEqual(loess1.predictions(2), + assert(isClose(loess1.predictions(2), [2.2079526, 3.9809030, 4.4752888, 4.8849727, 5.7260333, 5.4465225, - 2.8769120, -0.1116018] + 2.8769120, -0.1116018], + 1e-2, 1e-2 )); // Test 0th and 2nd order using R's loess() function since lowess() doesn't // support anything besides first degree. auto loess0 = loess1D(y, x, 0.5, 0); - assert(approxEqual(loess0.predictions(0), + assert(isClose(loess0.predictions(0), [3.378961, 4.004174, 4.564274, 4.863105, 5.713689, 5.564274, 2.863105, - 1.845369] + 1.845369], + 1e-2, 1e-2 )); // Not testing the last point. R's loess() consistently gets slightly @@ -1991,9 +2004,10 @@ unittest { // when this happens.) It's not clear which is right but the differences // are small and not practically important. auto loess2 = loess1D(y, x, 0.75, 2); - assert(approxEqual(loess2.predictions(0)[0..$ - 1], + assert(isClose(loess2.predictions(0)[0..$ - 1], [2.4029984, 4.1021339, 4.8288941, 4.5523535, 6.0000000, 6.4476465, - 3.7669741] + 3.7669741], + 1e-2, 1e-2 )); } @@ -2863,5 +2877,5 @@ unittest { ans2 += a[i] * b[i] * c[i]; } - assert(approxEqual(ans1, ans2)); + assert(isClose(ans1, ans2)); } diff --git a/source/dstats/summary.d b/source/dstats/summary.d index 0692aea..f7ab5fd 100644 --- a/source/dstats/summary.d +++ b/source/dstats/summary.d @@ -138,7 +138,7 @@ unittest { // Off by some tiny fraction in even N case because of division. // No idea why, but it's too small a rounding error to care about. - assert(approxEqual(quickRes, accurateRes)); + assert(isClose(quickRes, accurateRes)); } // Make sure everything works with lowest common denominator range type. @@ -158,7 +158,7 @@ unittest { Count a; a.upTo = 100; - assert(approxEqual(median(a), 49.5)); + assert(isClose(median(a), 49.5)); } /**Plain old data holder struct for median, median absolute deviation. @@ -200,8 +200,8 @@ if(doubleInput!(T)) { } unittest { - assert(approxEqual(medianAbsDev([7,1,8,2,8,1,9,2,8,4,5,9].dup).medianAbsDev, 2.5L)); - assert(approxEqual(medianAbsDev([8,6,7,5,3,0,999].dup).medianAbsDev, 2.0L)); + assert(isClose(medianAbsDev([7,1,8,2,8,1,9,2,8,4,5,9].dup).medianAbsDev, 2.5L)); + assert(isClose(medianAbsDev([8,6,7,5,3,0,999].dup).medianAbsDev, 2.0L)); } /**Computes the interquantile range of data at the given quantile value in O(N) @@ -278,10 +278,10 @@ if(doubleInput!R) { unittest { // 0 3 5 6 7 8 9 - assert(approxEqual(interquantileRange([1,2,3,4,5,6,7,8]), 3.5)); - assert(approxEqual(interquantileRange([1,2,3,4,5,6,7,8,9]), 4)); + assert(isClose(interquantileRange([1,2,3,4,5,6,7,8]), 3.5)); + assert(isClose(interquantileRange([1,2,3,4,5,6,7,8,9]), 4)); assert(interquantileRange([1,9,2,4,3,6,8], 0) == 8); - assert(approxEqual(interquantileRange([8,6,7,5,3,0,9], 0.2), 4.4)); + assert(isClose(interquantileRange([8,6,7,5,3,0,9], 0.2), 4.4)); } /**Output range to calculate the mean online. Getter for mean costs a branch to @@ -333,7 +333,7 @@ public: * combined.put(i); * } * - * assert(approxEqual(combined.mean, mean1.mean)); + * assert(isClose(combined.mean, mean1.mean, 1e-2, 1e-2)); * --- */ void put(typeof(this) rhs) pure nothrow @safe { @@ -474,7 +474,7 @@ unittest { auto foo = map!(to!(uint))(data); auto result = geometricMean(map!(to!(uint))(data)); - assert(approxEqual(result, 2.60517)); + assert(isClose(result, 2.60517, 1e-2, 1e-2)); Mean mean1, mean2, combined; foreach(i; 0..5) { @@ -491,7 +491,7 @@ unittest { combined.put(i); } - assert(approxEqual(combined.mean, mean1.mean), + assert(isClose(combined.mean, mean1.mean), text(combined.mean, " ", mean1.mean)); assert(combined.N == mean1.N); } @@ -540,12 +540,12 @@ unittest { assert(sum([1,2,3,4,5,6,7,8,9,10][]) == 55); assert(sum(filter!"true"([1,2,3,4,5,6,7,8,9,10][])) == 55); assert(sum(cast(int[]) [1,2,3,4,5])==15); - assert(approxEqual( sum(cast(int[]) [40.0, 40.1, 5.2]), 85.3)); + assert(isClose( sum(cast(int[]) [40.0, 40.1, 5.2]), 85.3, 1e-2, 1e-2)); assert(mean(cast(int[]) [1,2,3]).mean == 2); assert(mean(cast(int[]) [1.0, 2.0, 3.0]).mean == 2.0); assert(mean([1, 2, 5, 10, 17][]).mean == 7); assert(mean([1, 2, 5, 10, 17][]).sum == 35); - assert(approxEqual(mean([8,6,7,5,3,0,9,3,6,2,4,3,6][]).mean, 4.769231)); + assert(isClose(mean([8,6,7,5,3,0,9,3,6,2,4,3,6][]).mean, 4.769231, 1e-2, 1e-2)); // Test the OO struct a little, since we're using the new ILP algorithm. Mean m; @@ -566,7 +566,7 @@ unittest { } foreach(ti, elem; res1.tupleof) { - assert(approxEqual(elem, res2.tupleof[ti])); + assert(isClose(elem, res2.tupleof[ti], 1e-2, 1e-2)); } } } @@ -755,12 +755,12 @@ if(doubleIterable!(T)) { unittest { auto res = meanStdev(cast(int[]) [3, 1, 4, 5]); - assert(approxEqual(res.stdev, 1.7078)); - assert(approxEqual(res.mean, 3.25)); + assert(isClose(res.stdev, 1.7078, 1e-2, 1e-2)); + assert(isClose(res.mean, 3.25, 1e-2, 1e-2)); res = meanStdev(cast(double[]) [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]); - assert(approxEqual(res.stdev, 2.160247)); - assert(approxEqual(res.mean, 4)); - assert(approxEqual(res.sum, 28)); + assert(isClose(res.stdev, 2.160247, 1e-2, 1e-2)); + assert(isClose(res.mean, 4, 1e-2, 1e-2)); + assert(isClose(res.sum, 28, 1e-2, 1e-2)); MeanSD mean1, mean2, combined; foreach(i; 0..5) { @@ -777,11 +777,11 @@ unittest { combined.put(i); } - assert(approxEqual(combined.mean, mean1.mean)); - assert(approxEqual(combined.stdev, mean1.stdev)); + assert(isClose(combined.mean, mean1.mean)); + assert(isClose(combined.stdev, mean1.stdev)); assert(combined.N == mean1.N); - assert(approxEqual(combined.mean, 4.5)); - assert(approxEqual(combined.stdev, 3.027650)); + assert(isClose(combined.mean, 4.5)); + assert(isClose(combined.stdev, 3.027650, 1e-2, 1e-2)); foreach(i; 0..100) { // Monte carlo test the unrolled version. @@ -793,7 +793,7 @@ unittest { } foreach(ti, elem; res1.tupleof) { - assert(approxEqual(elem, res2.tupleof[ti])); + assert(isClose(elem, res2.tupleof[ti], 1e-2, 1e-2)); } MeanSD resCornerCase; // Test corner cases where one of the N's is 0. @@ -831,7 +831,7 @@ unittest { * assert(summ.mean == 3); * assert(summ.stdev == sqrt(2.5)); * assert(summ.var == 2.5); - * assert(approxEqual(summ.kurtosis, -1.9120)); + * assert(isClose(summ.kurtosis, -1.9120, 1e-2, 1e-2)); * assert(summ.min == 1); * assert(summ.max == 5); * assert(summ.sum == 15); @@ -1001,7 +1001,7 @@ unittest { } foreach(ti, elem; mean1.tupleof) { - assert(approxEqual(elem, combined.tupleof[ti])); + assert(isClose(elem, combined.tupleof[ti])); } Summary summCornerCase; // Case where one N is zero. @@ -1031,9 +1031,9 @@ if(doubleIterable!(T)) { unittest { // Values from Matlab. - assert(approxEqual(kurtosis([1, 1, 1, 1, 10].dup), 0.25)); - assert(approxEqual(kurtosis([2.5, 3.5, 4.5, 5.5].dup), -1.36)); - assert(approxEqual(kurtosis([1,2,2,2,2,2,100].dup), 2.1657)); + assert(isClose(kurtosis([1, 1, 1, 1, 10].dup), 0.25)); + assert(isClose(kurtosis([2.5, 3.5, 4.5, 5.5].dup), -1.36)); + assert(isClose(kurtosis([1,2,2,2,2,2,100].dup), 2.1657, 1e-2, 1e-2)); } /**Skewness is a measure of symmetry of a distribution. Positive skewness @@ -1054,14 +1054,14 @@ if(doubleIterable!(T)) { unittest { // Values from Octave. - assert(approxEqual(skewness([1,2,3,4,5].dup), 0)); - assert(approxEqual(skewness([3,1,4,1,5,9,2,6,5].dup), 0.5443)); - assert(approxEqual(skewness([2,7,1,8,2,8,1,8,2,8,4,5,9].dup), -0.0866)); + assert(isClose(skewness([1,2,3,4,5].dup), 0, 1e-2, 1e-2)); + assert(isClose(skewness([3,1,4,1,5,9,2,6,5].dup), 0.5443, 1e-2, 1e-2)); + assert(isClose(skewness([2,7,1,8,2,8,1,8,2,8,4,5,9].dup), -0.0866, 1e-2, 1e-2)); // Test handling of ranges that are not arrays. string[] stringy = ["3", "1", "4", "1", "5", "9", "2", "6", "5"]; auto intified = map!(to!(int))(stringy); - assert(approxEqual(skewness(intified), 0.5443)); + assert(isClose(skewness(intified), 0.5443, 1e-2, 1e-2)); } /**Convenience function. Puts all elements of data into a Summary struct, @@ -1196,11 +1196,11 @@ unittest { size_t pos = 0; foreach(elem; z) { - assert(approxEqual(elem, (arr[pos++] - m) / sd)); + assert(isClose(elem, (arr[pos++] - m) / sd)); } assert(z.length == 5); foreach(i; 0..z.length) { - assert(approxEqual(z[i], (arr[i] - m) / sd)); + assert(isClose(z[i], (arr[i] - m) / sd)); } } diff --git a/source/dstats/tests.d b/source/dstats/tests.d index c61a7ee..ab7e2b6 100644 --- a/source/dstats/tests.d +++ b/source/dstats/tests.d @@ -167,22 +167,22 @@ ConfInt studentsTTest(T)( unittest { auto t1 = studentsTTest([1, 2, 3, 4, 5].dup, 2); - assert(approxEqual(t1.testStat, 1.4142)); - assert(approxEqual(t1.p, 0.2302)); - assert(approxEqual(t1.lowerBound, 1.036757)); - assert(approxEqual(t1.upperBound, 4.963243)); + assert(isClose(t1.testStat, 1.4142, 1e-2, 1e-2)); + assert(isClose(t1.p, 0.2302, 1e-2, 1e-2)); + assert(isClose(t1.lowerBound, 1.036757, 1e-2, 1e-2)); + assert(isClose(t1.upperBound, 4.963243, 1e-2, 1e-2)); assert(t1 == studentsTTest( meanStdev([1,2,3,4,5].dup), 2)); auto t2 = studentsTTest([1, 2, 3, 4, 5].dup, 2, Alt.less); - assert(approxEqual(t2.p, .8849)); - assert(approxEqual(t2.testStat, 1.4142)); + assert(isClose(t2.p, .8849, 1e-2, 1e-2)); + assert(isClose(t2.testStat, 1.4142, 1e-2, 1e-2)); assert(t2.lowerBound == -double.infinity); - assert(approxEqual(t2.upperBound, 4.507443)); + assert(isClose(t2.upperBound, 4.507443, 1e-2, 1e-2)); auto t3 = studentsTTest( summary([1, 2, 3, 4, 5].dup), 2, Alt.greater); - assert(approxEqual(t3.p, .1151)); - assert(approxEqual(t3.testStat, 1.4142)); - assert(approxEqual(t3.lowerBound, 1.492557)); + assert(isClose(t3.p, .1151, 1e-2, 1e-2)); + assert(isClose(t3.testStat, 1.4142, 1e-2, 1e-2)); + assert(isClose(t3.lowerBound, 1.492557, 1e-2, 1e-2)); assert(t3.upperBound == double.infinity); } @@ -280,40 +280,40 @@ ConfInt studentsTTest(T, U)( unittest { // Values from R. auto t1 = studentsTTest([1,2,3,4,5], [1,3,4,5,7,9]); - assert(approxEqual(t1.p, 0.2346)); - assert(approxEqual(t1.testStat, -1.274)); - assert(approxEqual(t1.lowerBound, -5.088787)); - assert(approxEqual(t1.upperBound, 1.422120)); + assert(isClose(t1.p, 0.2346, 1e-2, 1e-2)); + assert(isClose(t1.testStat, -1.274, 1e-2, 1e-2)); + assert(isClose(t1.lowerBound, -5.088787, 1e-2, 1e-2)); + assert(isClose(t1.upperBound, 1.422120, 1e-2, 1e-2)); - assert(approxEqual(studentsTTest([1,2,3,4,5], [1,3,4,5,7,9], 0, Alt.less), - 0.1173)); - assert(approxEqual(studentsTTest([1,2,3,4,5], [1,3,4,5,7,9], 0, Alt.greater), - 0.8827)); + assert(isClose(studentsTTest([1,2,3,4,5], [1,3,4,5,7,9], 0, Alt.less), + 0.1173, 1e-2, 1e-2)); + assert(isClose(studentsTTest([1,2,3,4,5], [1,3,4,5,7,9], 0, Alt.greater), + 0.8827, 1e-2, 1e-2)); auto t2 = studentsTTest([1,3,5,7,9,11], [2,2,1,3,4], 5); - assert(approxEqual(t2.p, 0.44444)); - assert(approxEqual(t2.testStat, -0.7998)); - assert(approxEqual(t2.lowerBound, -0.3595529)); - assert(approxEqual(t2.upperBound, 7.5595529)); + assert(isClose(t2.p, 0.44444, 1e-2, 1e-2)); + assert(isClose(t2.testStat, -0.7998, 1e-2, 1e-2)); + assert(isClose(t2.lowerBound, -0.3595529, 1e-2, 1e-2)); + assert(isClose(t2.upperBound, 7.5595529, 1e-2, 1e-2)); auto t5 = studentsTTest([1,3,5,7,9,11], [2,2,1,3,4], 0, Alt.less); - assert(approxEqual(t5.p, 0.965)); - assert(approxEqual(t5.testStat, 2.0567)); - assert(approxEqual(t5.upperBound, 6.80857)); + assert(isClose(t5.p, 0.965, 1e-2, 1e-2)); + assert(isClose(t5.testStat, 2.0567, 1e-2, 1e-2)); + assert(isClose(t5.upperBound, 6.80857, 1e-2, 1e-2)); assert(t5.lowerBound == -double.infinity); auto t6 = studentsTTest([1,3,5,7,9,11], [2,2,1,3,4], 0, Alt.greater); - assert(approxEqual(t6.p, 0.03492)); - assert(approxEqual(t6.testStat, 2.0567)); - assert(approxEqual(t6.lowerBound, 0.391422)); + assert(isClose(t6.p, 0.03492, 1e-2, 1e-2)); + assert(isClose(t6.testStat, 2.0567, 1e-2, 1e-2)); + assert(isClose(t6.lowerBound, 0.391422, 1e-2, 1e-2)); assert(t6.upperBound == double.infinity); auto t7 = studentsTTest([1, 2, 4], [3]); - assert(approxEqual(t7.p, 0.7418)); - assert(approxEqual(t7.testStat, 0.-.378)); - assert(approxEqual(t7.lowerBound, -8.255833)); - assert(approxEqual(t7.upperBound, 6.922499)); + assert(isClose(t7.p, 0.7418, 1e-2, 1e-2)); + assert(isClose(t7.testStat, 0.-.378,1e-2, 1e-2)); + assert(isClose(t7.lowerBound, -8.255833, 1e-2, 1e-2)); + assert(isClose(t7.upperBound, 6.922499, 1e-2, 1e-2)); } @@ -411,28 +411,28 @@ if( (isSummary!T || doubleIterable!T) && (isSummary!U || doubleIterable!U)) { unittest { // Values from R. auto t1 = welchTTest( meanStdev([1,2,3,4,5]), [1,3,4,5,7,9], 2); - assert(approxEqual(t1.p, 0.02285)); - assert(approxEqual(t1.testStat, -2.8099)); - assert(approxEqual(t1.lowerBound, -4.979316)); - assert(approxEqual(t1.upperBound, 1.312649)); + assert(isClose(t1.p, 0.02285, 1e-2, 1e-2)); + assert(isClose(t1.testStat, -2.8099, 1e-2, 1e-2)); + assert(isClose(t1.lowerBound, -4.979316, 1e-2, 1e-2)); + assert(isClose(t1.upperBound, 1.312649, 1e-2, 1e-2)); auto t2 = welchTTest([1,2,3,4,5], summary([1,3,4,5,7,9]), -1, Alt.less); - assert(approxEqual(t2.p, 0.2791)); - assert(approxEqual(t2.testStat, -0.6108)); + assert(isClose(t2.p, 0.2791, 1e-2, 1e-2)); + assert(isClose(t2.testStat, -0.6108, 1e-2, 1e-2)); assert(t2.lowerBound == -double.infinity); - assert(approxEqual(t2.upperBound, 0.7035534)); + assert(isClose(t2.upperBound, 0.7035534, 1e-2, 1e-2)); auto t3 = welchTTest([1,2,3,4,5], [1,3,4,5,7,9], 0.5, Alt.greater); - assert(approxEqual(t3.p, 0.9372)); - assert(approxEqual(t3.testStat, -1.7104)); - assert(approxEqual(t3.lowerBound, -4.37022)); + assert(isClose(t3.p, 0.9372, 1e-2, 1e-2)); + assert(isClose(t3.testStat, -1.7104, 1e-2, 1e-2)); + assert(isClose(t3.lowerBound, -4.37022, 1e-2, 1e-2)); assert(t3.upperBound == double.infinity); - assert(approxEqual(welchTTest([1,3,5,7,9,11], [2,2,1,3,4]).p, 0.06616)); - assert(approxEqual(welchTTest([1,3,5,7,9,11], [2,2,1,3,4], 0, - Alt.less).p, 0.967)); - assert(approxEqual(welchTTest([1,3,5,7,9,11], [2,2,1,3,4], 0, - Alt.greater).p, 0.03308)); + assert(isClose(welchTTest([1,3,5,7,9,11], [2,2,1,3,4]).p, 0.06616, 1e-2, 1e-2)); + assert(isClose(welchTTest([1,3,5,7,9,11], [2,2,1,3,4], 0, + Alt.less).p, 0.967, 1e-2, 1e-2)); + assert(isClose(welchTTest([1,3,5,7,9,11], [2,2,1,3,4], 0, + Alt.greater).p, 0.03308, 1e-2, 1e-2)); } /** @@ -570,16 +570,16 @@ ConfInt pairedTTest(T)( unittest { // Values from R. auto t1 = pairedTTest([3,2,3,4,5], [2,3,5,5,6], 1); - assert(approxEqual(t1.p, 0.02131)); - assert(approxEqual(t1.testStat, -3.6742)); - assert(approxEqual(t1.lowerBound, -2.1601748)); - assert(approxEqual(t1.upperBound, 0.561748)); - - assert(approxEqual(pairedTTest([3,2,3,4,5], [2,3,5,5,6], 0, Alt.less).p, 0.0889)); - assert(approxEqual(pairedTTest([3,2,3,4,5], [2,3,5,5,6], 0, Alt.greater).p, 0.9111)); - assert(approxEqual(pairedTTest([3,2,3,4,5], [2,3,5,5,6], 0, Alt.twoSided).p, 0.1778)); - assert(approxEqual(pairedTTest([3,2,3,4,5], [2,3,5,5,6], 1, Alt.less).p, 0.01066)); - assert(approxEqual(pairedTTest([3,2,3,4,5], [2,3,5,5,6], 1, Alt.greater).p, 0.9893)); + assert(isClose(t1.p, 0.02131, 1e-2, 1e-2)); + assert(isClose(t1.testStat, -3.6742, 1e-2, 1e-2)); + assert(isClose(t1.lowerBound, -2.1601748, 1e-2, 1e-2)); + assert(isClose(t1.upperBound, 0.561748, 1e-2, 1e-2)); + + assert(isClose(pairedTTest([3,2,3,4,5], [2,3,5,5,6], 0, Alt.less).p, 0.0889, 1e-2, 1e-2)); + assert(isClose(pairedTTest([3,2,3,4,5], [2,3,5,5,6], 0, Alt.greater).p, 0.9111, 1e-2, 1e-2)); + assert(isClose(pairedTTest([3,2,3,4,5], [2,3,5,5,6], 0, Alt.twoSided).p, 0.1778, 1e-2, 1e-2)); + assert(isClose(pairedTTest([3,2,3,4,5], [2,3,5,5,6], 1, Alt.less).p, 0.01066, 1e-2, 1e-2)); + assert(isClose(pairedTTest([3,2,3,4,5], [2,3,5,5,6], 1, Alt.greater).p, 0.9893, 1e-2, 1e-2)); } /** @@ -603,8 +603,8 @@ int[] sample2 = [100,200,300,400,500]; auto result = levenesTest(sample1, sample2); // Clearly the variances are different between these two samples. -assert( approxEqual(result.testStat, 10.08)); -assert( approxEqual(result.p, 0.01310)); +assert( isClose(result.testStat, 10.08, 1e-2, 1e-2)); +assert( isClose(result.p, 0.01310, 1e-2, 1e-2)); --- */ TestRes levenesTest(alias central = median, T...)(T data) { @@ -615,16 +615,16 @@ unittest { // Values from R's car package, which uses the median definition // exclusively. auto res1 = levenesTest([1,2,3,4,5][], [2,4,8,16,32][]); - assert(approxEqual(res1.testStat, 3.0316)); - assert(approxEqual(res1.p, 0.1198), res1.toString()); + assert(isClose(res1.testStat, 3.0316, 1e-2, 1e-2)); + assert(isClose(res1.p, 0.1198, 1e-2, 1e-2), res1.toString()); auto res2 = levenesTest([[1,2,3,4,5][], [100,200,300,400,500,600][]][]); - assert(approxEqual(res2.testStat, 13.586)); - assert(approxEqual(res2.p, 0.005029)); + assert(isClose(res2.testStat, 13.586, 1e-2, 1e-2)); + assert(isClose(res2.p, 0.005029, 1e-2, 1e-2)); auto res3 = levenesTest([8,6,7,5,3,0,9][], [3,6,2,4,3,6][]); - assert(approxEqual(res3.testStat, 1.1406)); - assert(approxEqual(res3.p, 0.3084)); + assert(isClose(res3.testStat, 1.1406, 1e-2, 1e-2)); + assert(isClose(res3.p, 0.3084, 1e-2, 1e-2)); } /** @@ -644,8 +644,8 @@ uint[] thing1 = [3,1,4,1], thing2 = [5,9,2,6,5,3], thing3 = [5,8,9,7,9,3]; auto result = fTest(thing1, meanStdev(thing2), summary(thing3)); -assert(approxEqual(result.testStat, 4.9968)); -assert(approxEqual(result.p, 0.02456)); +assert(isClose(result.testStat, 4.9968, 1e-2, 1e-2)); +assert(isClose(result.p, 0.02456, 1e-2, 1e-2)); --- References: http://en.wikipedia.org/wiki/F-test @@ -679,36 +679,36 @@ unittest { thing2 = [5,9,2,6,5,3], thing3 = [5,8,9,7,9,3]; auto result = fTest(thing1, meanStdev(thing2), summary(thing3)); - assert(approxEqual(result.testStat, 4.9968)); - assert(approxEqual(result.p, 0.02456)); + assert(isClose(result.testStat, 4.9968, 1e-2, 1e-2)); + assert(isClose(result.p, 0.02456, 1e-2, 1e-2)); auto welchRes1 = welchAnova(thing1, thing2, thing3); - assert( approxEqual(welchRes1.testStat, 6.7813)); - assert( approxEqual(welchRes1.p, 0.01706)); + assert( isClose(welchRes1.testStat, 6.7813, 1e-2, 1e-2)); + assert( isClose(welchRes1.p, 0.01706, 1e-2, 1e-2)); // Test array case. auto res2 = fTest([thing1, thing2, thing3].dup); - assert(approxEqual(result.testStat, res2.testStat)); - assert(approxEqual(result.p, res2.p)); + assert(isClose(result.testStat, res2.testStat, 1e-2, 1e-2)); + assert(isClose(result.p, res2.p, 1e-2, 1e-2)); thing1 = [2,7,1,8,2]; thing2 = [8,1,8]; thing3 = [2,8,4,5,9]; auto res3 = fTest(thing1, thing2, thing3); - assert(approxEqual(res3.testStat, 0.377)); - assert(approxEqual(res3.p, 0.6953)); + assert(isClose(res3.testStat, 0.377, 1e-2, 1e-2)); + assert(isClose(res3.p, 0.6953, 1e-2, 1e-2)); auto res4 = fTest([summary(thing1), summary(thing2), summary(thing3)][]); - assert(approxEqual(res4.testStat, res3.testStat)); - assert(approxEqual(res4.testStat, res3.testStat)); + assert(isClose(res4.testStat, res3.testStat, 1e-2, 1e-2)); + assert(isClose(res4.testStat, res3.testStat, 1e-2, 1e-2)); auto welchRes2 = welchAnova(summary(thing1), thing2, thing3); - assert( approxEqual(welchRes2.testStat, 0.342)); - assert( approxEqual(welchRes2.p, 0.7257)); + assert( isClose(welchRes2.testStat, 0.342, 1e-2, 1e-2)); + assert( isClose(welchRes2.p, 0.7257, 1e-2, 1e-2)); auto res5 = fTest([1, 2, 4], [3]); - assert(approxEqual(res5.testStat, 0.1429)); - assert(approxEqual(res5.p, 0.7418)); + assert(isClose(res5.testStat, 0.1429, 1e-2, 1e-2)); + assert(isClose(res5.p, 0.7418, 1e-2, 1e-2)); } // Levene's Test, Welch ANOVA and F test have massive overlap at the @@ -935,7 +935,7 @@ if(allSatisfy!(isInputRange, T)) { unittest { // Values from VassarStats utility at // http://faculty.vassar.edu/lowry/VassarStats.html, but they like to - // round a lot, so the approxEqual tolerances are fairly wide. I + // round a lot, so the isClose tolerances are fairly wide. I // think it's adequate to demonstrate the correctness of this function, // though. uint[] alcohol = [8,6,7,5,3,0,9]; @@ -943,14 +943,14 @@ unittest { uint[] noSleep = [3,1,4,1,5,9,2]; uint[] loudMusic = [2,7,1,8,2,8,1]; auto result = correlatedAnova(alcohol, caffeine, noSleep, loudMusic); - assert(approxEqual(result.testStat, 0.43, 0.0, 0.01)); - assert(approxEqual(result.p, 0.734, 0.0, 0.01)); + assert(isClose(result.testStat, 0.43, 0.0, 0.01)); + assert(isClose(result.p, 0.734, 0.0, 0.01)); uint[] stuff1 = [3,4,2,6]; uint[] stuff2 = [4,1,9,8]; auto result2 = correlatedAnova([stuff1, stuff2].dup); - assert(approxEqual(result2.testStat, 0.72, 0.0, 0.01)); - assert(approxEqual(result2.p, 0.4584, 0.0, 0.01)); + assert(isClose(result2.testStat, 0.72, 0.0, 0.01)); + assert(isClose(result2.p, 0.4584, 0.0, 0.01)); } /**The Kruskal-Wallis rank sum test. Tests the null hypothesis that data in @@ -1083,8 +1083,8 @@ unittest { // R is actually wrong here because it apparently doesn't use a correction // for ties. auto res1 = kruskalWallis([3,1,4,1].idup, [5,9,2,6].dup, [5,3,5].dup); - assert(approxEqual(res1.testStat, 4.15)); - assert(approxEqual(res1.p, 0.1256)); + assert(isClose(res1.testStat, 4.15, 1e-2, 1e-2)); + assert(isClose(res1.p, 0.1256, 1e-2, 1e-2)); // Test for other input types. auto res2 = kruskalWallis([[3,1,4,1].idup, [5,9,2,6].idup, [5,3,5].idup].dup); @@ -1099,8 +1099,8 @@ unittest { // Test w/ one more case, just with one input type. auto res5 = kruskalWallis([2,7,1,8,2].dup, [8,1,8,2].dup, [8,4,5,9,2].dup, [7,1,8,2,8,1,8].dup); - assert(approxEqual(res5.testStat, 1.06)); - assert(approxEqual(res5.p, 0.7867)); + assert(isClose(res5.testStat, 1.06, 1e-2, 1e-2)); + assert(isClose(res5.p, 0.7867, 1e-2, 1e-2)); } /**The Friedman test is a non-parametric within-subject ANOVA. It's useful @@ -1180,14 +1180,14 @@ unittest { uint[] noSleep = [3,1,4,1,5,9,2]; uint[] loudMusic = [2,7,1,8,2,8,1]; auto result = friedmanTest(alcohol, caffeine, noSleep, loudMusic); - assert(approxEqual(result.testStat, 1.7463)); - assert(approxEqual(result.p, 0.6267)); + assert(isClose(result.testStat, 1.7463, 1e-2, 1e-2)); + assert(isClose(result.p, 0.6267, 1e-2, 1e-2)); uint[] stuff1 = [3,4,2,6]; uint[] stuff2 = [4,1,9,8]; auto result2 = friedmanTest([stuff1, stuff2].dup); - assert(approxEqual(result2.testStat, 1)); - assert(approxEqual(result2.p, 0.3173)); + assert(isClose(result2.testStat, 1, 1e-2, 1e-2)); + assert(isClose(result2.p, 0.3173, 1e-2, 1e-2)); } /**Computes Wilcoxon rank sum test statistic and P-value for @@ -1299,58 +1299,58 @@ is(CommonType!(ElementType!T, ElementType!U))) { // Simple stuff (no ties) first. Testing approximate // calculation first. - assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, - Alt.twoSided, 0), 0.9273)); - assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, - Alt.less, 0), 0.6079)); - assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, - Alt.greater, 0).p, 0.4636)); - assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, - Alt.twoSided, 0).p, 0.4113)); - assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, - Alt.less, 0).p, 0.2057)); - assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, - map!"a"([3,5,7,8,13,15].dup), Alt.greater, 0).p, 0.8423)); - assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, - Alt.twoSided, 0), .6745)); - assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, - Alt.less, 0), .3372)); - assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, - Alt.greater, 0), .7346)); + assert(isClose(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, + Alt.twoSided, 0), 0.9273, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, + Alt.less, 0), 0.6079, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, + Alt.greater, 0).p, 0.4636, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, + Alt.twoSided, 0).p, 0.4113, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, + Alt.less, 0).p, 0.2057, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,2,6,10,12].dup, + map!"a"([3,5,7,8,13,15].dup), Alt.greater, 0).p, 0.8423, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, + Alt.twoSided, 0), .6745, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, + Alt.less, 0), .3372, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, + Alt.greater, 0), .7346, 1e-2, 1e-2)); // Now, lots of ties. - assert(approxEqual(wilcoxonRankSum([1,2,3,4,5].dup, [2,3,4,5,6].dup, - Alt.twoSided, 0), 0.3976)); - assert(approxEqual(wilcoxonRankSum([1,2,3,4,5].dup, [2,3,4,5,6].dup, - Alt.less, 0), 0.1988)); - assert(approxEqual(wilcoxonRankSum([1,2,3,4,5].dup, [2,3,4,5,6].dup, - Alt.greater, 0), 0.8548)); - assert(approxEqual(wilcoxonRankSum([1,2,1,1,2].dup, [1,2,3,1,1].dup, - Alt.twoSided, 0), 0.9049)); - assert(approxEqual(wilcoxonRankSum([1,2,1,1,2].dup, [1,2,3,1,1].dup, - Alt.less, 0), 0.4524)); - assert(approxEqual(wilcoxonRankSum([1,2,1,1,2].dup, [1,2,3,1,1].dup, - Alt.greater, 0), 0.64)); + assert(isClose(wilcoxonRankSum([1,2,3,4,5].dup, [2,3,4,5,6].dup, + Alt.twoSided, 0), 0.3976, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,2,3,4,5].dup, [2,3,4,5,6].dup, + Alt.less, 0), 0.1988, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,2,3,4,5].dup, [2,3,4,5,6].dup, + Alt.greater, 0), 0.8548, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,2,1,1,2].dup, [1,2,3,1,1].dup, + Alt.twoSided, 0), 0.9049, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,2,1,1,2].dup, [1,2,3,1,1].dup, + Alt.less, 0), 0.4524, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,2,1,1,2].dup, [1,2,3,1,1].dup, + Alt.greater, 0), 0.64, 1e-2, 1e-2)); // Now, testing the exact calculation on the same data. - assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, - Alt.twoSided), 0.9307)); - assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, - Alt.less), 0.6039)); - assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, - Alt.greater), 0.4654)); - assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, - Alt.twoSided), 0.4286)); - assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, - Alt.less), 0.2143)); - assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, - Alt.greater), 0.8355)); - assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, - Alt.twoSided), .6905)); - assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, - Alt.less), .3452)); - assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, - Alt.greater), .7262)); + assert(isClose(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, + Alt.twoSided), 0.9307, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, + Alt.less), 0.6039, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, + Alt.greater), 0.4654, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, + Alt.twoSided), 0.4286, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, + Alt.less), 0.2143, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, + Alt.greater), 0.8355, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, + Alt.twoSided), .6905, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, + Alt.less), .3452, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, + Alt.greater), .7262, 1e-2, 1e-2)); } private @@ -1397,13 +1397,13 @@ double wilcoxonRankSumPval(double w, ulong n1, ulong n2, Alt alt = Alt.twoSided, unittest { /* Values from R. I could only get good values for Alt.less directly. * Using W-values to test Alt.twoSided, Alt.greater indirectly.*/ - assert(approxEqual(wilcoxonRankSumPval(1200, 50, 50, Alt.less), .3670)); - assert(approxEqual(wilcoxonRankSumPval(1500, 50, 50, Alt.less), .957903)); - assert(approxEqual(wilcoxonRankSumPval(8500, 100, 200, Alt.less), .01704)); + assert(isClose(wilcoxonRankSumPval(1200, 50, 50, Alt.less), .3670, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSumPval(1500, 50, 50, Alt.less), .957903, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSumPval(8500, 100, 200, Alt.less), .01704, 1e-2, 1e-2)); auto w = wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup).testStat; - assert(approxEqual(wilcoxonRankSumPval(w, 5, 6), 0.9273)); - assert(approxEqual(wilcoxonRankSumPval(w, 5, 6, Alt.greater), 0.4636)); - assert(approxEqual(wilcoxonRankSumPval(w, 5, 6, Alt.less), 0.6079)); + assert(isClose(wilcoxonRankSumPval(w, 5, 6), 0.9273, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSumPval(w, 5, 6, Alt.greater), 0.4636, 1e-2, 1e-2)); + assert(isClose(wilcoxonRankSumPval(w, 5, 6, Alt.less), 0.6079, 1e-2, 1e-2)); // Monte carlo unit testing: Make sure that the exact and asymptotic // versions agree within a small epsilon; @@ -1523,18 +1523,18 @@ private double wilcoxRSPExact(uint W, uint n1, uint n2, Alt alt = Alt.twoSided) unittest { // Values from R. - assert(approxEqual(wilcoxRSPExact(14, 5, 6), 0.9307)); - assert(approxEqual(wilcoxRSPExact(14, 5, 6, Alt.less), 0.4654)); - assert(approxEqual(wilcoxRSPExact(14, 5, 6, Alt.greater), 0.6039)); - assert(approxEqual(wilcoxRSPExact(16, 6, 5), 0.9307)); - assert(approxEqual(wilcoxRSPExact(16, 6, 5, Alt.less), 0.6039)); - assert(approxEqual(wilcoxRSPExact(16, 6, 5, Alt.greater), 0.4654)); - assert(approxEqual(wilcoxRSPExact(66, 10, 35, Alt.less), 0.001053)); - assert(approxEqual(wilcoxRSPExact(78, 13, 6, Alt.less), 1)); + assert(isClose(wilcoxRSPExact(14, 5, 6), 0.9307, 1e-2, 1e-2)); + assert(isClose(wilcoxRSPExact(14, 5, 6, Alt.less), 0.4654, 1e-2, 1e-2)); + assert(isClose(wilcoxRSPExact(14, 5, 6, Alt.greater), 0.6039, 1e-2, 1e-2)); + assert(isClose(wilcoxRSPExact(16, 6, 5), 0.9307, 1e-2, 1e-2)); + assert(isClose(wilcoxRSPExact(16, 6, 5, Alt.less), 0.6039, 1e-2, 1e-2)); + assert(isClose(wilcoxRSPExact(16, 6, 5, Alt.greater), 0.4654, 1e-2, 1e-2)); + assert(isClose(wilcoxRSPExact(66, 10, 35, Alt.less), 0.001053, 1e-2, 1e-2)); + assert(isClose(wilcoxRSPExact(78, 13, 6, Alt.less), 1, 1e-2, 1e-2)); // Mostly to make sure that underflow doesn't happen until // the N's are truly unreasonable: - //assert(approxEqual(wilcoxRSPExact(6_000, 120, 120, Alt.less), 0.01276508)); + //assert(isClose(wilcoxRSPExact(6_000, 120, 120, Alt.less), 0.01276508, 1e-2, 1e-2)); } /**Computes a test statistic and P-value for a Wilcoxon signed rank test against @@ -1668,31 +1668,30 @@ is(typeof(before.front - after.front) : double)) { unittest { // Values from R. - alias approxEqual ae; assert(wilcoxonSignedRank([1,2,3,4,5].dup, [2,1,4,5,3].dup).testStat == 7.5); assert(wilcoxonSignedRank([3,1,4,1,5].dup, [2,7,1,8,2].dup).testStat == 6); assert(wilcoxonSignedRank([8,6,7,5,3].dup, [0,9,8,6,7].dup).testStat == 5); // With ties, normal approx. - assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,1,4,5,3].dup), 1)); - assert(ae(wilcoxonSignedRank([3,1,4,1,5].dup, map!"a"([2,7,1,8,2].dup)), 0.7865)); - assert(ae(wilcoxonSignedRank([8,6,7,5,3].dup, [0,9,8,6,7].dup), 0.5879)); - assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,1,4,5,3].dup, Alt.less), 0.5562)); - assert(ae(wilcoxonSignedRank([3,1,4,1,5].dup, [2,7,1,8,2].dup, Alt.less), 0.3932)); - assert(ae(wilcoxonSignedRank([8,6,7,5,3].dup, [0,9,8,6,7].dup, Alt.less), 0.2940)); - assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,1,4,5,3].dup, Alt.greater), 0.5562)); - assert(ae(wilcoxonSignedRank([3,1,4,1,5].dup, [2,7,1,8,2].dup, Alt.greater), 0.706)); - assert(ae(wilcoxonSignedRank([8,6,7,5,3].dup, [0,9,8,6,7].dup, Alt.greater), 0.7918)); - assert(ae(wilcoxonSignedRank(cast(int[]) [1,16,2,4,8], cast(int[]) [1,5,2,3,4]).testStat, 6)); - assert(ae(wilcoxonSignedRank(cast(int[]) [1,16,2,4,8], cast(int[]) [1,5,2,3,4]), 0.1814)); + assert(isClose(wilcoxonSignedRank([1,2,3,4,5].dup, [2,1,4,5,3].dup), 1, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([3,1,4,1,5].dup, map!"a"([2,7,1,8,2].dup)), 0.7865, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([8,6,7,5,3].dup, [0,9,8,6,7].dup), 0.5879, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([1,2,3,4,5].dup, [2,1,4,5,3].dup, Alt.less), 0.5562, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([3,1,4,1,5].dup, [2,7,1,8,2].dup, Alt.less), 0.3932, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([8,6,7,5,3].dup, [0,9,8,6,7].dup, Alt.less), 0.2940, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([1,2,3,4,5].dup, [2,1,4,5,3].dup, Alt.greater), 0.5562, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([3,1,4,1,5].dup, [2,7,1,8,2].dup, Alt.greater), 0.706, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([8,6,7,5,3].dup, [0,9,8,6,7].dup, Alt.greater), 0.7918, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank(cast(int[]) [1,16,2,4,8], cast(int[]) [1,5,2,3,4]).testStat, 6, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank(cast(int[]) [1,16,2,4,8], cast(int[]) [1,5,2,3,4]), 0.1814, 1e-2, 1e-2)); // Exact. - assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,16,32].dup), 0.625)); - assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,16,32].dup, Alt.less), 0.3125)); - assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,16,32].dup, Alt.greater), 0.7812)); - assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,-16,32].dup), 0.8125)); - assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,-16,32].dup, Alt.less), 0.6875)); - assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,-16,32].dup, Alt.greater), 0.4062)); + assert(isClose(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,16,32].dup), 0.625, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,16,32].dup, Alt.less), 0.3125, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,16,32].dup, Alt.greater), 0.7812, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,-16,32].dup), 0.8125, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,-16,32].dup, Alt.less), 0.6875, 1e-2, 1e-2)); + assert(isClose(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,-16,32].dup, Alt.greater), 0.4062, 1e-2, 1e-2)); // Monte carlo unit testing. Make sure exact, approx are really, // really close to each other. @@ -1721,8 +1720,8 @@ if(doubleInput!(T) && is(typeof(data.front - mu) : double)) { unittest { auto res = wilcoxonSignedRank([-8,-6,2,4,7].dup, 0); - assert(approxEqual(res.testStat, 7)); - assert(approxEqual(res.p, 1)); + assert(isClose(res.testStat, 7)); + assert(isClose(res.p, 1)); } private double wilcoxonSignedRankPval(double W, ulong N, Alt alt = Alt.twoSided, @@ -1842,12 +1841,12 @@ private double wilcoxSRPExact(uint W, uint N, Alt alt = Alt.twoSided) { unittest { // Values from R. - assert(approxEqual(wilcoxSRPExact(25, 10, Alt.less), 0.4229)); - assert(approxEqual(wilcoxSRPExact(25, 10, Alt.greater), 0.6152)); - assert(approxEqual(wilcoxSRPExact(25, 10, Alt.twoSided), 0.8457)); - assert(approxEqual(wilcoxSRPExact(31, 10, Alt.less), 0.6523)); - assert(approxEqual(wilcoxSRPExact(31, 10, Alt.greater), 0.3848)); - assert(approxEqual(wilcoxSRPExact(31, 10, Alt.twoSided), 0.7695)); + assert(isClose(wilcoxSRPExact(25, 10, Alt.less), 0.4229, 1e-2, 1e-2)); + assert(isClose(wilcoxSRPExact(25, 10, Alt.greater), 0.6152, 1e-2, 1e-2)); + assert(isClose(wilcoxSRPExact(25, 10, Alt.twoSided), 0.8457, 1e-2, 1e-2)); + assert(isClose(wilcoxSRPExact(31, 10, Alt.less), 0.6523, 1e-2, 1e-2)); + assert(isClose(wilcoxSRPExact(31, 10, Alt.greater), 0.3848, 1e-2, 1e-2)); + assert(isClose(wilcoxSRPExact(31, 10, Alt.twoSided), 0.7695, 1e-2, 1e-2)); } /**Sign test for differences between paired values. This is a very robust @@ -1902,16 +1901,15 @@ is(typeof(before.front < after.front) == bool)) { } unittest { - alias approxEqual ae; - assert(ae(signTest([1,3,4,2,5].dup, [1,2,4,8,16].dup), 1)); - assert(ae(signTest([1,3,4,2,5].dup, [1,2,4,8,16].dup, Alt.less), 0.5)); - assert(ae(signTest([1,3,4,2,5].dup, [1,2,4,8,16].dup, Alt.greater), 0.875)); - assert(ae(signTest([5,3,4,6,8].dup, [1,2,3,4,5].dup, Alt.greater), 0.03125)); - assert(ae(signTest([5,3,4,6,8].dup, [1,2,3,4,5].dup, Alt.less), 1)); - assert(ae(signTest([5,3,4,6,8].dup, [1,2,3,4,5].dup), 0.0625)); - - assert(approxEqual(signTest([1,2,6,7,9].dup, 2), 0.625)); - assert(ae(signTest([1,2,6,7,9].dup, 2).testStat, 0.75)); + assert(isClose(signTest([1,3,4,2,5].dup, [1,2,4,8,16].dup), 1, 1e-2, 1e-2)); + assert(isClose(signTest([1,3,4,2,5].dup, [1,2,4,8,16].dup, Alt.less), 0.5, 1e-2, 1e-2)); + assert(isClose(signTest([1,3,4,2,5].dup, [1,2,4,8,16].dup, Alt.greater), 0.875, 1e-2, 1e-2)); + assert(isClose(signTest([5,3,4,6,8].dup, [1,2,3,4,5].dup, Alt.greater), 0.03125, 1e-2, 1e-2)); + assert(isClose(signTest([5,3,4,6,8].dup, [1,2,3,4,5].dup, Alt.less), 1, 1e-2, 1e-2)); + assert(isClose(signTest([5,3,4,6,8].dup, [1,2,3,4,5].dup), 0.0625, 1e-2, 1e-2)); + + assert(isClose(signTest([1,2,6,7,9].dup, 2), 0.625, 1e-2, 1e-2)); + assert(isClose(signTest([1,2,6,7,9].dup, 2).testStat, 0.75, 1e-2, 1e-2)); } /**Similar to the overload, but allows testing for a difference between a @@ -1941,7 +1939,7 @@ double binomialTest(ulong k, ulong n, double p) { immutable mode = cast(long) ((n + 1) * p); if(k == mode || - approxEqual(binomialPMF(k, n, p), binomialPMF(mode, n, p), 1 - epsilon)) { + isClose(binomialPMF(k, n, p), binomialPMF(mode, n, p), 1 - epsilon, 1e-5)) { return 1; } else if(k > mode) { immutable double upperPart = binomialCDFR(k, n, p); @@ -2010,25 +2008,25 @@ double binomialTest(ulong k, ulong n, double p) { unittest { // Values from R. - assert(approxEqual(binomialTest(46, 96, 0.5), 0.759649)); - assert(approxEqual(binomialTest(44, 56, 0.5), 2.088e-5)); - assert(approxEqual(binomialTest(12, 56, 0.5), 2.088e-5)); - assert(approxEqual(binomialTest(0, 40, 0.25), 2.236e-5)); - assert(approxEqual(binomialTest(5, 16, 0.5), 0.2101)); - assert(approxEqual(binomialTest(0, 20, 0.4), 4.16e-5)); - assert(approxEqual(binomialTest(20, 20, 0.6), 4.16e-5)); - assert(approxEqual(binomialTest(6, 88, 0.1), 0.3784)); - assert(approxEqual(binomialTest(3, 4, 0.5), 0.625)); - assert(approxEqual(binomialTest(4, 7, 0.8), 0.1480)); - assert(approxEqual(binomialTest(3, 9, 0.8), 0.003066)); - assert(approxEqual(binomialTest(9, 9, 0.7), 0.06565)); - assert(approxEqual(binomialTest(2, 11, 0.1), 0.3026)); - assert(approxEqual(binomialTest(1, 11, 0.1), 1)); - assert(approxEqual(binomialTest(5, 11, 0.1), 0.002751)); - assert(approxEqual(binomialTest(5, 12, 0.5), 0.7744)); - assert(approxEqual(binomialTest(12, 12, 0.5), 0.0004883)); - assert(approxEqual(binomialTest(12, 13, 0.6), 0.02042)); - assert(approxEqual(binomialTest(0, 9, 0.1), 1)); + assert(isClose(binomialTest(46, 96, 0.5), 0.759649, 1e-2, 1e-2)); + assert(isClose(binomialTest(44, 56, 0.5), 2.088e-5, 1e-2, 1e-2)); + assert(isClose(binomialTest(12, 56, 0.5), 2.088e-5, 1e-2, 1e-2)); + assert(isClose(binomialTest(0, 40, 0.25), 2.236e-5, 1e-2, 1e-2)); + assert(isClose(binomialTest(5, 16, 0.5), 0.2101, 1e-2, 1e-2)); + assert(isClose(binomialTest(0, 20, 0.4), 4.16e-5, 1e-2, 1e-2)); + assert(isClose(binomialTest(20, 20, 0.6), 4.16e-5, 1e-2, 1e-2)); + assert(isClose(binomialTest(6, 88, 0.1), 0.3784, 1e-2, 1e-2)); + assert(isClose(binomialTest(3, 4, 0.5), 0.625, 1e-2, 1e-2)); + assert(isClose(binomialTest(4, 7, 0.8), 0.1480, 1e-2, 1e-2)); + assert(isClose(binomialTest(3, 9, 0.8), 0.003066, 1e-2, 1e-2)); + assert(isClose(binomialTest(9, 9, 0.7), 0.06565, 1e-2, 1e-2)); + assert(isClose(binomialTest(2, 11, 0.1), 0.3026, 1e-2, 1e-2)); + assert(isClose(binomialTest(1, 11, 0.1), 1, 1e-2, 1e-2)); + assert(isClose(binomialTest(5, 11, 0.1), 0.002751, 1e-2, 1e-2)); + assert(isClose(binomialTest(5, 12, 0.5), 0.7744, 1e-2, 1e-2)); + assert(isClose(binomialTest(12, 12, 0.5), 0.0004883, 1e-2, 1e-2)); + assert(isClose(binomialTest(12, 13, 0.6), 0.02042, 1e-2, 1e-2)); + assert(isClose(binomialTest(0, 9, 0.1), 1, 1e-2, 1e-2)); } ///For chiSquareFit and gTestFit, is expected value range counts or proportions? @@ -2068,8 +2066,8 @@ Examples: uint[] observed = [980, 1028, 1001, 964, 1102]; auto expected = repeat(1.0); auto res2 = chiSquareFit(observed, expected); -assert(approxEqual(res2, 0.0207)); -assert(approxEqual(res2.testStat, 11.59)); +assert(isClose(res2, 0.0207, 1e-2, 1e-2)); +assert(isClose(res2.testStat, 11.59, 1e-2, 1e-2)); --- * References: http://en.wikipedia.org/wiki/Pearson%27s_chi-square_test @@ -2088,14 +2086,14 @@ unittest { uint[] observed = [980, 1028, 1001, 964, 1102]; auto expected = repeat(cast(double) sum(observed) / observed.length); auto res = chiSquareFit(observed, expected, Expected.count); - assert(approxEqual(res, 0.0207)); - assert(approxEqual(res.testStat, 11.59)); + assert(isClose(res, 0.0207, 1e-2, 1e-2)); + assert(isClose(res.testStat, 11.59, 1e-2, 1e-2)); auto expected2 = [5.0, 5, 5, 5, 5, 0]; observed ~= 0; auto res2 = chiSquareFit(observed, expected2); - assert(approxEqual(res2, 0.0207)); - assert(approxEqual(res2.testStat, 11.59)); + assert(isClose(res2, 0.0207, 1e-2, 1e-2)); + assert(isClose(res2.testStat, 11.59, 1e-2, 1e-2)); } // Alias for old name, for backwards compatibility. Don't document it @@ -2280,7 +2278,7 @@ unittest { uint[] counts = [k, n - k]; double multino = multinomialTest(counts, ps); //writeln(k, "\t", n, "\t", p, "\t", bino, "\t", multino); - assert(approxEqual(bino, multino), + assert(isClose(bino, multino, 1e-2, 1e-2), text(bino, '\t', multino, '\t', k, '\t', n, '\t', p)); } } @@ -2344,25 +2342,25 @@ unittest { uint[][] table2 = [[60, 20, 10], [80, 50, 15], [70, 40, 11]]; - assert(approxEqual(chiSquareContingency(table1), 0.3449)); - assert(approxEqual(chiSquareContingency(table2), 0.3449)); - assert(approxEqual(chiSquareContingency(table1).testStat, 4.48)); + assert(isClose(chiSquareContingency(table1), 0.3449, 1e-2, 1e-2)); + assert(isClose(chiSquareContingency(table2), 0.3449, 1e-2, 1e-2)); + assert(isClose(chiSquareContingency(table1).testStat, 4.48, 1e-2, 1e-2)); // Test tuple version. auto p1 = chiSquareContingency(cast(uint[]) [31, 41, 59], cast(uint[]) [26, 53, 58], cast(uint[]) [97, 93, 93]); - assert(approxEqual(p1, 0.0059)); + assert(isClose(p1, 0.0059, 1e-2, 1e-2)); auto p2 = chiSquareContingency(cast(uint[]) [31, 26, 97], cast(uint[]) [41, 53, 93], cast(uint[]) [59, 58, 93]); - assert(approxEqual(p2, 0.0059)); + assert(isClose(p2, 0.0059, 1e-2, 1e-2)); uint[] drug1 = [1000, 2000, 1500]; uint[] drug2 = [1500, 3000, 2300]; uint[] placebo = [500, 1100, 750]; - assert(approxEqual(chiSquareContingency(drug1, drug2, placebo), 0.2397)); + assert(isClose(chiSquareContingency(drug1, drug2, placebo), 0.2397, 1e-2, 1e-2)); } // Alias for old name, for backwards compatibility. Don't document it @@ -2417,17 +2415,17 @@ unittest { uint[] withoutCHD = [268, 199, 42]; uint[] withCHD = [807, 759, 184]; auto res = gTestContingency(withoutCHD, withCHD); - assert(approxEqual(res.testStat, 7.3)); - assert(approxEqual(res.p, 0.026)); - assert(approxEqual(res.mutualInfo, 0.0023313)); + assert(isClose(res.testStat, 7.3, 1e-2, 1e-2)); + assert(isClose(res.p, 0.026, 1e-2, 1e-2)); + assert(isClose(res.mutualInfo, 0.0023313, 1e-2, 1e-2)); uint[] moringa = [127, 99, 264]; uint[] vicinus = [116, 67, 161]; auto res2 = gTestContingency(moringa, vicinus); - assert(approxEqual(res2.testStat, 6.23)); - assert(approxEqual(res2.p, 0.044)); - assert(approxEqual(res2.mutualInfo, 0.00538613)); + assert(isClose(res2.testStat, 6.23, 1e-2, 1e-2)); + assert(isClose(res2.p, 0.044, 1e-2, 1e-2)); + assert(isClose(res2.mutualInfo, 0.00538613, 1e-2, 1e-2)); } // Pearson and likelihood ratio code are pretty much the same. Factor out @@ -2603,14 +2601,14 @@ unittest { auto gRes = chiSquareContingency(cTable); auto miRes = chiSquareObs(obs1, obs2); foreach(ti, elem; miRes.tupleof) { - assert(approxEqual(elem, gRes.tupleof[ti])); + assert(isClose(elem, gRes.tupleof[ti], 1e-2, 1e-2)); } auto x = ["foo", "bar", "bar", "foo", "foo"]; auto y = ["xxx", "baz", "baz", "xxx", "baz"]; auto result = chiSquareObs(x, y); - assert(approxEqual(result.testStat, 2.22222222)); - assert(approxEqual(result.p, 0.136037)); + assert(isClose(result.testStat, 2.22222222, 1e-2, 1e-2)); + assert(isClose(result.p, 0.136037, 1e-2, 1e-2)); auto contingency = new uint[][](2, 2); foreach(i; 0..x.length) { @@ -2620,9 +2618,9 @@ unittest { } auto result2 = chiSquareContingency(contingency); - assert(approxEqual(result.testStat, result2.testStat), + assert(isClose(result.testStat, result2.testStat, 1e-2, 1e-2), text(result.testStat, ' ', result2.testStat)); - assert(approxEqual(result.p, result2.p)); + assert(isClose(result.p, result2.p, 1e-2, 1e-2)); } /** @@ -2677,15 +2675,15 @@ unittest { auto gRes = gTestContingency(cTable); auto miRes = gTestObs(obs1, obs2); foreach(ti, elem; miRes.tupleof) { - assert(approxEqual(elem, gRes.tupleof[ti])); + assert(isClose(elem, gRes.tupleof[ti], 1e-2, 1e-2)); } auto x = ["foo", "bar", "bar", "foo", "foo"]; auto y = ["xxx", "baz", "baz", "xxx", "baz"]; auto result = gTestObs(x, y); - assert(approxEqual(result.testStat, 2.91103)); - assert(approxEqual(result.p, 0.0879755)); - assert(approxEqual(result.mutualInfo, 0.419973)); + assert(isClose(result.testStat, 2.91103, 1e-2, 1e-2)); + assert(isClose(result.p, 0.0879755, 1e-2, 1e-2)); + assert(isClose(result.mutualInfo, 0.419973, 1e-2, 1e-2)); } package double toContingencyScore(T, U, Uint) @@ -2768,8 +2766,8 @@ alternative. Examples: --- double res = fisherExact([[2u, 7], [8, 2]], Alt.less); -assert(approxEqual(res.p, 0.01852)); // Odds ratio is very small in this case. -assert(approxEqual(res.testStat, 4.0 / 56.0)); +assert(isClose(res.p, 0.01852, 1e-2, 1e-2)); // Odds ratio is very small in this case. +assert(isClose(res.testStat, 4.0 / 56.0, 1e-2, 1e-2)); --- References: http://en.wikipedia.org/wiki/Fisher%27s_Exact_Test @@ -2817,7 +2815,7 @@ if(isIntegral!(T)) { immutable double pMode = hypergeometricPMF(mode, n1, n2, n); enum epsilon = 1 - 1e-5; - if(approxEqual(pExact, pMode, 1 - epsilon)) { + if (isClose(pExact, pMode, 1 - epsilon)) { return TestRes(oddsRatio, 1); } else if(c[0][0] < mode) { immutable double pLower = hypergeometricCDF(c[0][0], n1, n2, n); @@ -2930,7 +2928,7 @@ unittest { cast(uint) ((cast(double) (n + 1) * (n1 + 1)) / (n1 + n2 + 2)); immutable double pExact = hypergeometricPMF(c[0][0], n1, n2, n); immutable double pMode = hypergeometricPMF(mode, n1, n2, n); - if(approxEqual(pExact, pMode, 1e-7)) + if (isClose(pExact, pMode, 1e-7)) return 1; double sum = 0; foreach(i; 0..n + 1) { @@ -2950,50 +2948,50 @@ unittest { c[1][1] = uniform(0U, 51U); double naiveAns = naive(c); double fastAns = fisherExact(c); - assert(approxEqual(naiveAns, fastAns), text(c, " - ", naiveAns, " - ", fastAns)); + assert(isClose(naiveAns, fastAns), text(c, " - ", naiveAns, " - ", fastAns)); } auto res = fisherExact([[19000, 80000], [20000, 90000]]); - assert(approxEqual(res.testStat, 1.068731)); - assert(approxEqual(res, 3.319e-9)); + assert(isClose(res.testStat, 1.068731, 1e-2, 1e-2)); + assert(isClose(res, 3.319e-9, 1e-2, 1e-2)); res = fisherExact([[18000, 80000], [20000, 90000]]); - assert(approxEqual(res, 0.2751)); + assert(isClose(res, 0.2751, 1e-2, 1e-2)); res = fisherExact([[14500, 20000], [30000, 40000]]); - assert(approxEqual(res, 0.01106)); + assert(isClose(res, 0.01106, 1e-2, 1e-2)); res = fisherExact([[100, 2], [1000, 5]]); - assert(approxEqual(res, 0.1301)); + assert(isClose(res, 0.1301, 1e-2, 1e-2)); res = fisherExact([[2, 7], [8, 2]]); - assert(approxEqual(res, 0.0230141)); + assert(isClose(res, 0.0230141, 1e-2, 1e-2)); res = fisherExact([[5, 1], [10, 10]]); - assert(approxEqual(res, 0.1973244)); + assert(isClose(res, 0.1973244, 1e-2, 1e-2)); res = fisherExact([[5, 15], [20, 20]]); - assert(approxEqual(res, 0.0958044)); + assert(isClose(res, 0.0958044, 1e-2, 1e-2)); res = fisherExact([[5, 16], [20, 25]]); - assert(approxEqual(res, 0.1725862)); + assert(isClose(res, 0.1725862, 1e-2, 1e-2)); res = fisherExact([[10, 5], [10, 1]]); - assert(approxEqual(res, 0.1973244)); + assert(isClose(res, 0.1973244, 1e-2, 1e-2)); res = fisherExact([[5, 0], [1, 4]]); - assert(approxEqual(res.p, 0.04761904)); + assert(isClose(res.p, 0.04761904, 1e-2, 1e-2)); res = fisherExact([[0, 1], [3, 2]]); - assert(approxEqual(res.p, 1.0)); + assert(isClose(res.p, 1.0, 1e-2, 1e-2)); res = fisherExact([[0, 2], [6, 4]]); - assert(approxEqual(res.p, 0.4545454545)); + assert(isClose(res.p, 0.4545454545, 1e-2, 1e-2)); res = fisherExact([[2, 7], [8, 2]], Alt.less); - assert(approxEqual(res, 0.01852)); + assert(isClose(res, 0.01852, 1e-2, 1e-2)); res = fisherExact([[5, 1], [10, 10]], Alt.less); - assert(approxEqual(res, 0.9783)); + assert(isClose(res, 0.9783, 1e-2, 1e-2)); res = fisherExact([[5, 15], [20, 20]], Alt.less); - assert(approxEqual(res, 0.05626)); + assert(isClose(res, 0.05626, 1e-2, 1e-2)); res = fisherExact([[5, 16], [20, 25]], Alt.less); - assert(approxEqual(res, 0.08914)); + assert(isClose(res, 0.08914, 1e-2, 1e-2)); res = fisherExact([[2, 7], [8, 2]], Alt.greater); - assert(approxEqual(res, 0.999)); + assert(isClose(res, 0.999, 1e-2, 1e-2)); res = fisherExact([[5, 1], [10, 10]], Alt.greater); - assert(approxEqual(res, 0.1652)); + assert(isClose(res, 0.1652, 1e-2, 1e-2)); res = fisherExact([[5, 15], [20, 20]], Alt.greater); - assert(approxEqual(res, 0.985)); + assert(isClose(res, 0.985, 1e-2, 1e-2)); res = fisherExact([[5, 16], [20, 25]], Alt.greater); - assert(approxEqual(res, 0.9723)); + assert(isClose(res, 0.9723, 1e-2, 1e-2)); } /** @@ -3018,17 +3016,17 @@ if(doubleInput!(T) && doubleInput!(U)) { } unittest { - assert(approxEqual(ksTest([1,2,3,4,5], [1,2,3,4,5]).testStat, 0)); - assert(approxEqual(ksTestDestructive([1,2,3,4,5], [1,2,2,3,5]).testStat, -.2)); - assert(approxEqual(ksTest([-1,0,2,8, 6], [1,2,2,3,5]).testStat, .4)); - assert(approxEqual(ksTest([1,2,3,4,5], [1,2,2,3,5,7,8]).testStat, .2857)); - assert(approxEqual(ksTestDestructive([1, 2, 3, 4, 4, 4, 5], - [1, 2, 3, 4, 5, 5, 5]).testStat, .2857)); - - assert(approxEqual(ksTest([1, 2, 3, 4, 4, 4, 5], [1, 2, 3, 4, 5, 5, 5]), - .9375)); - assert(approxEqual(ksTestDestructive([1, 2, 3, 4, 4, 4, 5], - [1, 2, 3, 4, 5, 5, 5]), .9375)); + assert(isClose(ksTest([1,2,3,4,5], [1,2,3,4,5]).testStat, 0)); + assert(isClose(ksTestDestructive([1,2,3,4,5], [1,2,2,3,5]).testStat, -.2)); + assert(isClose(ksTest([-1,0,2,8, 6], [1,2,2,3,5]).testStat, .4)); + assert(isClose(ksTest([1,2,3,4,5], [1,2,2,3,5,7,8]).testStat, .2857, 1e-2, 1e-2)); + assert(isClose(ksTestDestructive([1, 2, 3, 4, 4, 4, 5], + [1, 2, 3, 4, 5, 5, 5]).testStat, .2857, 1e-2, 1e-2)); + + assert(isClose(ksTest([1, 2, 3, 4, 4, 4, 5], [1, 2, 3, 4, 5, 5, 5]), + .9375, 1e-2, 1e-2)); + assert(isClose(ksTestDestructive([1, 2, 3, 4, 4, 4, 5], + [1, 2, 3, 4, 5, 5, 5]), .9375, 1e-2, 1e-2)); } template isArrayLike(T) { @@ -3065,15 +3063,15 @@ if(doubleInput!(T) && is(ReturnType!(Func) : double)) { unittest { auto stdNormal = paramFunctor!(normalCDF)(0.0, 1.0); - assert(approxEqual(ksTest([1,2,3,4,5].dup, stdNormal).testStat, -.8413)); - assert(approxEqual(ksTestDestructive([-1,0,2,8, 6].dup, stdNormal).testStat, -.5772)); + assert(isClose(ksTest([1,2,3,4,5].dup, stdNormal).testStat, -.8413, 1e-2, 1e-2)); + assert(isClose(ksTestDestructive([-1,0,2,8, 6].dup, stdNormal).testStat, -.5772, 1e-2, 1e-2)); auto lotsOfTies = [5,1,2,2,2,2,2,2,3,4].dup; - assert(approxEqual(ksTest(lotsOfTies, stdNormal).testStat, -0.8772)); + assert(isClose(ksTest(lotsOfTies, stdNormal).testStat, -0.8772, 1e-2, 1e-2)); - assert(approxEqual(ksTest([0,1,2,3,4].dup, stdNormal), .03271)); + assert(isClose(ksTest([0,1,2,3,4].dup, stdNormal), .03271, 1e-2, 1e-2)); auto uniform01 = parametrize!(uniformCDF)(0, 1); - assert(approxEqual(ksTestDestructive([0.1, 0.3, 0.5, 0.9, 1].dup, uniform01), 0.7591)); + assert(isClose(ksTestDestructive([0.1, 0.3, 0.5, 0.9, 1].dup, uniform01), 0.7591, 1e-2, 1e-2)); } @@ -3195,9 +3193,9 @@ unittest { // hard-coded as the equivalent to positive(). The median of this data // is 0.5, so everything works. immutable int[] data = [1,0,0,0,1,1,0,0,1,0,1,0,1,0,1,1,1,0,0,1].idup; - assert(approxEqual(runsTest(data), 0.3581)); - assert(approxEqual(runsTest(data, Alt.less), 0.821)); - assert(approxEqual(runsTest(data, Alt.greater), 0.1791)); + assert(isClose(runsTest(data), 0.3581, 1e-2, 1e-2)); + assert(isClose(runsTest(data, Alt.less), 0.821, 1e-2, 1e-2)); + assert(isClose(runsTest(data, Alt.greater), 0.1791, 1e-2, 1e-2)); } /** @@ -3309,9 +3307,9 @@ ConfInt pearsonCorTest()( double confLevel = 0.95 ) { dstatsEnforce(N >= 0, "N must be >= 0 for pearsonCorTest."); - dstatsEnforce(cor > -1.0 || approxEqual(cor, -1.0), + dstatsEnforce(cor > -1.0 || isClose(cor, -1.0, 1e-2, 1e-2), "Correlation must be between 0, 1."); - dstatsEnforce(cor < 1.0 || approxEqual(cor, 1.0), + dstatsEnforce(cor < 1.0 || isClose(cor, 1.0, 1e-2, 1e-2), "Correlation must be between 0, 1."); enforceConfidence(confLevel); @@ -3391,21 +3389,21 @@ unittest { auto t2 = pearsonCorTest([1,2,3,4,5].dup, [2,1,4,3,5].dup, Alt.less); auto t3 = pearsonCorTest([1,2,3,4,5].dup, [2,1,4,3,5].dup, Alt.greater); - assert(approxEqual(t1.testStat, 0.8)); - assert(approxEqual(t2.testStat, 0.8)); - assert(approxEqual(t3.testStat, 0.8)); + assert(isClose(t1.testStat, 0.8)); + assert(isClose(t2.testStat, 0.8)); + assert(isClose(t3.testStat, 0.8)); - assert(approxEqual(t1.p, 0.1041)); - assert(approxEqual(t2.p, 0.948)); - assert(approxEqual(t3.p, 0.05204)); + assert(isClose(t1.p, 0.1041, 1e-2, 1e-2)); + assert(isClose(t2.p, 0.948, 1e-2, 1e-2)); + assert(isClose(t3.p, 0.05204, 1e-2, 1e-2)); - assert(approxEqual(t1.lowerBound, -0.2796400)); - assert(approxEqual(t3.lowerBound, -0.06438567)); - assert(approxEqual(t2.lowerBound, -1)); + assert(isClose(t1.lowerBound, -0.2796400, 1e-2, 1e-2)); + assert(isClose(t3.lowerBound, -0.06438567, 1e-2, 1e-2)); + assert(isClose(t2.lowerBound, -1, 1e-2, 1e-2)); - assert(approxEqual(t1.upperBound, 0.9861962)); - assert(approxEqual(t2.upperBound, 0.9785289)); - assert(approxEqual(t3.upperBound, 1)); + assert(isClose(t1.upperBound, 0.9861962, 1e-2, 1e-2)); + assert(isClose(t2.upperBound, 0.9785289, 1e-2, 1e-2)); + assert(isClose(t3.upperBound, 1, 1e-2, 1e-2)); // Test special case hack for cor = +- 1. uint[] myArr = [1,2,3,4,5]; @@ -3415,18 +3413,18 @@ unittest { auto t4 = pearsonCorTest(myArr, myArr, Alt.twoSided); auto t5 = pearsonCorTest(myArr, myArr, Alt.less); auto t6 = pearsonCorTest(myArr, myArr, Alt.greater); - assert(approxEqual(t4.testStat, 1)); - assert(approxEqual(t4.p, 0)); - assert(approxEqual(t5.p, 1)); - assert(approxEqual(t6.p, 0)); + assert(isClose(t4.testStat, 1)); + assert(isClose(t4.p, 0)); + assert(isClose(t5.p, 1)); + assert(isClose(t6.p, 0)); auto t7 = pearsonCorTest(myArr, myArrReverse, Alt.twoSided); auto t8 = pearsonCorTest(myArr, myArrReverse, Alt.less); auto t9 = pearsonCorTest(myArr, myArrReverse, Alt.greater); - assert(approxEqual(t7.testStat, -1)); - assert(approxEqual(t7.p, 0)); - assert(approxEqual(t8.p, 0)); - assert(approxEqual(t9.p, 1)); + assert(isClose(t7.testStat, -1)); + assert(isClose(t7.p, 0)); + assert(isClose(t8.p, 0)); + assert(isClose(t9.p, 1)); } /** @@ -3467,13 +3465,13 @@ unittest { auto t2 = spearmanCorTest(arr1, arr2, Alt.less); auto t3 = spearmanCorTest(arr1, arr2, Alt.greater); - assert(approxEqual(t1.testStat, -0.1769406)); - assert(approxEqual(t2.testStat, -0.1769406)); - assert(approxEqual(t3.testStat, -0.1769406)); + assert(isClose(t1.testStat, -0.1769406, 1e-2, 1e-2)); + assert(isClose(t2.testStat, -0.1769406, 1e-2, 1e-2)); + assert(isClose(t3.testStat, -0.1769406, 1e-2, 1e-2)); - assert(approxEqual(t1.p, 0.4429)); - assert(approxEqual(t3.p, 0.7785)); - assert(approxEqual(t2.p, 0.2215)); + assert(isClose(t1.p, 0.4429, 1e-2, 1e-2)); + assert(isClose(t3.p, 0.7785, 1e-2, 1e-2)); + assert(isClose(t2.p, 0.2215, 1e-2, 1e-2)); } /** @@ -3672,46 +3670,46 @@ unittest { auto t2 = kendallCorTest(arr1, arr2, Alt.less); auto t3 = kendallCorTest(arr1, arr2, Alt.greater); - assert(approxEqual(t1.testStat, -.1448010)); - assert(approxEqual(t2.testStat, -.1448010)); - assert(approxEqual(t3.testStat, -.1448010)); + assert(isClose(t1.testStat, -.1448010, 1e-2, 1e-2)); + assert(isClose(t2.testStat, -.1448010, 1e-2, 1e-2)); + assert(isClose(t3.testStat, -.1448010, 1e-2, 1e-2)); - assert(approxEqual(t1.p, 0.3923745)); - //assert(approxEqual(t3.p, 0.8038127)); - assert(approxEqual(t2.p, 0.1961873)); + assert(isClose(t1.p, 0.3923745, 1e-2, 1e-2)); + //assert(isClose(t3.p, 0.8038127, 1e-2, 1e-2)); + assert(isClose(t2.p, 0.1961873, 1e-2, 1e-2)); // Now, test the case of ties in both arrays. arr1 = [1,1,1,2,2,3,4,5,5,6]; arr2 = [1,1,2,3,4,5,5,5,5,6]; - assert(approxEqual(kendallCorTest(arr1, arr2, Alt.twoSided).p, 0.001216776)); - //assert(approxEqual(kendallCorTest(arr1, arr2, Alt.less).p, 0.9993916)); - assert(approxEqual(kendallCorTest(arr1, arr2, Alt.greater).p, 0.0006083881)); + assert(isClose(kendallCorTest(arr1, arr2, Alt.twoSided).p, 0.001216776, 1e-2, 1e-2)); + //assert(isClose(kendallCorTest(arr1, arr2, Alt.less).p, 0.9993916, 1e-2, 1e-2)); + assert(isClose(kendallCorTest(arr1, arr2, Alt.greater).p, 0.0006083881, 1e-2, 1e-2)); arr1 = [1,1,1,2,2,2,3,3,3,4,4,4,5,5,5]; arr2 = [1,1,1,3,3,3,2,2,2,5,5,5,4,4,4]; - assert(approxEqual(kendallCorTest(arr1, arr2).p, 0.006123)); - assert(approxEqual(kendallCorTest(assumeSorted(arr1), arr2).p, 0.006123)); + assert(isClose(kendallCorTest(arr1, arr2).p, 0.006123, 1e-2, 1e-2)); + assert(isClose(kendallCorTest(assumeSorted(arr1), arr2).p, 0.006123, 1e-2, 1e-2)); // Test the exact stuff. Still using values from R. uint[] foo = [1,2,3,4,5]; uint[] bar = [1,2,3,5,4]; uint[] baz = [5,3,1,2,4]; - assert(approxEqual(kendallCorTest(foo, foo).p, 0.01666666)); - assert(approxEqual(kendallCorTest(foo, foo, Alt.greater).p, 0.008333333)); - assert(approxEqual(kendallCorTest(foo, foo, Alt.less).p, 1)); + assert(isClose(kendallCorTest(foo, foo).p, 0.01666666, 1e-2, 1e-2)); + assert(isClose(kendallCorTest(foo, foo, Alt.greater).p, 0.008333333, 1e-2, 1e-2)); + assert(isClose(kendallCorTest(foo, foo, Alt.less).p, 1, 1e-2, 1e-2)); - assert(approxEqual(kendallCorTest(foo, bar).p, 0.083333333)); - assert(approxEqual(kendallCorTest(foo, bar, Alt.greater).p, 0.041666667)); - assert(approxEqual(kendallCorTest(foo, bar, Alt.less).p, 0.9917)); + assert(isClose(kendallCorTest(foo, bar).p, 0.083333333, 1e-2, 1e-2)); + assert(isClose(kendallCorTest(foo, bar, Alt.greater).p, 0.041666667, 1e-2, 1e-2)); + assert(isClose(kendallCorTest(foo, bar, Alt.less).p, 0.9917, 1e-2, 1e-2)); - assert(approxEqual(kendallCorTest(foo, baz).p, 0.8167)); - assert(approxEqual(kendallCorTest(foo, baz, Alt.greater).p, 0.7583)); - assert(approxEqual(kendallCorTest(foo, baz, Alt.less).p, .4083)); + assert(isClose(kendallCorTest(foo, baz).p, 0.8167, 1e-2, 1e-2)); + assert(isClose(kendallCorTest(foo, baz, Alt.greater).p, 0.7583, 1e-2, 1e-2)); + assert(isClose(kendallCorTest(foo, baz, Alt.less).p, .4083, 1e-2, 1e-2)); - assert(approxEqual(kendallCorTest(bar, baz).p, 0.4833)); - assert(approxEqual(kendallCorTest(bar, baz, Alt.greater).p, 0.8833)); - assert(approxEqual(kendallCorTest(bar, baz, Alt.less).p, 0.2417)); + assert(isClose(kendallCorTest(bar, baz).p, 0.4833, 1e-2, 1e-2)); + assert(isClose(kendallCorTest(bar, baz, Alt.greater).p, 0.8833, 1e-2, 1e-2)); + assert(isClose(kendallCorTest(bar, baz, Alt.less).p, 0.2417, 1e-2, 1e-2)); // A little monte carlo unittesting. For large ranges, the deviation // between the exact and approximate version should be extremely small. @@ -3821,11 +3819,11 @@ unittest { auto r1 = dAgostinoK(arr1); auto r2 = dAgostinoK(arr2); - assert(approxEqual(r1.testStat, 3.1368)); - assert(approxEqual(r1.p, 0.2084)); + assert(isClose(r1.testStat, 3.1368, 1e-2, 1e-2)); + assert(isClose(r1.p, 0.2084, 1e-2, 1e-2)); - assert(approxEqual(r2.testStat, 1.1816)); - assert(approxEqual(r2.p, 0.5539)); + assert(isClose(r2.testStat, 1.1816, 1e-2, 1e-2)); + assert(isClose(r2.p, 0.5539, 1e-2, 1e-2)); } /**Fisher's method of meta-analyzing a set of P-values to determine whether @@ -3859,11 +3857,11 @@ unittest { // First, basic sanity check. Make sure w/ one P-value, we get back that // P-value. for(double p = 0.01; p < 1; p += 0.01) { - assert(approxEqual(fishersMethod([p].dup).p, p)); + assert(isClose(fishersMethod([p].dup).p, p)); } float[] ps = [0.739, 0.0717, 0.01932, 0.03809]; auto res = fishersMethod(ps); - assert(approxEqual(res.testStat, 20.31)); + assert(isClose(res.testStat, 20.31, 1e-2, 1e-2)); assert(res.p < 0.01); } @@ -3941,39 +3939,38 @@ unittest { // Comparing results to R. auto pVals = [.90, .01, .03, .03, .70, .60, .01].dup; auto qVals = falseDiscoveryRate(pVals); - alias approxEqual ae; - assert(ae(qVals[0], .9)); - assert(ae(qVals[1], .035)); - assert(ae(qVals[2], .052)); - assert(ae(qVals[3], .052)); - assert(ae(qVals[4], .816666666667)); - assert(ae(qVals[5], .816666666667)); - assert(ae(qVals[6], .035)); + assert(isClose(qVals[0], .9, 1e-2, 1e-2)); + assert(isClose(qVals[1], .035, 1e-2, 1e-2)); + assert(isClose(qVals[2], .052, 1e-2, 1e-2)); + assert(isClose(qVals[3], .052, 1e-2, 1e-2)); + assert(isClose(qVals[4], .816666666667, 1e-2, 1e-2)); + assert(isClose(qVals[5], .816666666667, 1e-2, 1e-2)); + assert(isClose(qVals[6], .035)); auto p2 = [.1, .02, .6, .43, .001].dup; auto q2 = falseDiscoveryRate(p2); - assert(ae(q2[0], .16666666)); - assert(ae(q2[1], .05)); - assert(ae(q2[2], .6)); - assert(ae(q2[3], .5375)); - assert(ae(q2[4], .005)); + assert(isClose(q2[0], .16666666, 1e-2, 1e-2)); + assert(isClose(q2[1], .05, 1e-2, 1e-2)); + assert(isClose(q2[2], .6, 1e-2, 1e-2)); + assert(isClose(q2[3], .5375, 1e-2, 1e-2)); + assert(isClose(q2[4], .005, 1e-2, 1e-2)); // Dependent case. qVals = falseDiscoveryRate(pVals, Dependency.yes); - assert(ae(qVals[0], 1)); - assert(ae(qVals[1], .09075)); - assert(ae(qVals[2], .136125)); - assert(ae(qVals[3], .136125)); - assert(ae(qVals[4], 1)); - assert(ae(qVals[5], 1)); - assert(ae(qVals[6], .09075)); + assert(isClose(qVals[0], 1, 1e-2, 1e-2)); + assert(isClose(qVals[1], .09075, 1e-2, 1e-2)); + assert(isClose(qVals[2], .136125, 1e-2, 1e-2)); + assert(isClose(qVals[3], .136125, 1e-2, 1e-2)); + assert(isClose(qVals[4], 1, 1e-2, 1e-2)); + assert(isClose(qVals[5], 1, 1e-2, 1e-2)); + assert(isClose(qVals[6], .09075, 1e-2, 1e-2)); q2 = falseDiscoveryRate(p2, Dependency.yes); - assert(ae(q2[0], .38055555)); - assert(ae(q2[1], .1141667)); - assert(ae(q2[2], 1)); - assert(ae(q2[3], 1)); - assert(ae(q2[4], .01141667)); + assert(isClose(q2[0], .38055555)); + assert(isClose(q2[1], .1141667)); + assert(isClose(q2[2], 1)); + assert(isClose(q2[3], 1)); + assert(isClose(q2[4], .01141667)); } /**Uses the Hochberg procedure to control the familywise error rate assuming @@ -4019,20 +4016,19 @@ if(doubleInput!(T)) { } unittest { - alias approxEqual ae; auto q = hochberg([0.01, 0.02, 0.025, 0.9].dup); - assert(ae(q[0], 0.04)); - assert(ae(q[1], 0.05)); - assert(ae(q[2], 0.05)); - assert(ae(q[3], 0.9)); + assert(isClose(q[0], 0.04)); + assert(isClose(q[1], 0.05)); + assert(isClose(q[2], 0.05)); + assert(isClose(q[3], 0.9)); auto p2 = [.1, .02, .6, .43, .001].dup; auto q2 = hochberg(p2); - assert(ae(q2[0], .3)); - assert(ae(q2[1], .08)); - assert(ae(q2[2], .6)); - assert(ae(q2[3], .6)); - assert(ae(q2[4], .005)); + assert(isClose(q2[0], .3)); + assert(isClose(q2[1], .08)); + assert(isClose(q2[2], .6)); + assert(isClose(q2[3], .6)); + assert(isClose(q2[4], .005)); } /**Uses the Holm-Bonferroni method to adjust a set of P-values in a way that @@ -4082,12 +4078,11 @@ if(doubleInput!(T)) { unittest { // Values from R. auto ps = holmBonferroni([0.001, 0.2, 0.3, 0.4, 0.7].dup); - alias approxEqual ae; - assert(ae(ps[0], 0.005)); - assert(ae(ps[1], 0.8)); - assert(ae(ps[2], 0.9)); - assert(ae(ps[3], 0.9)); - assert(ae(ps[4], 0.9)); + assert(isClose(ps[0], 0.005)); + assert(isClose(ps[1], 0.8)); + assert(isClose(ps[2], 0.9)); + assert(isClose(ps[3], 0.9)); + assert(isClose(ps[4], 0.9)); ps = holmBonferroni([0.3, 0.1, 0.4, 0.1, 0.5, 0.9].dup); assert(ps == [1f, 0.6f, 1f, 0.6f, 1f, 1f]);