Skip to content

Commit 7ef60ee

Browse files
committed
Merge branch 'main' of https://github.com/DHI/fmskill into main
2 parents 5730a0b + 1fd1c30 commit 7ef60ee

File tree

5 files changed

+115
-81
lines changed

5 files changed

+115
-81
lines changed

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ Or the development version:
3939
* [NetCDF_ModelResult.ipynb](https://nbviewer.jupyter.org/github/DHI/fmskill/blob/main/notebooks/NetCDF_ModelResult.ipynb)
4040
* [Combine_comparers.ipynb](https://nbviewer.jupyter.org/github/DHI/fmskill/blob/main/notebooks/Combine_comparers.ipynb)
4141
* [DMI_observations.ipynb](https://nbviewer.jupyter.org/github/DHI/fmskill/blob/main/notebooks/DMI_observations.ipynb) (download data from REST api)
42-
42+
* [Altimetry_data.ipynb](https://nbviewer.jupyter.org/github/DHI/fmskill/blob/main/notebooks/Altimetry_data.ipynb) (download data from altimetry api)
4343

4444
## Workflow
4545

docs/api.rst

+1
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ Metrics
111111
fmskill.metrics.rho
112112
fmskill.metrics.r2
113113
fmskill.metrics.lin_slope
114+
fmskill.metrics.willmott
114115
fmskill.metrics.hit_ratio
115116

116117
.. automodule:: fmskill.metrics

fmskill/metrics.py

+68-18
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,8 @@
4545
0.637783218973691
4646
>>> lin_slope(obs, mod)
4747
0.4724896836313617
48+
>>> willmott(obs, mod)
49+
0.7484604452865941
4850
>>> hit_ratio(obs, mod, a=0.5)
4951
0.6666666666666666
5052
"""
@@ -59,13 +61,26 @@ def bias(obs, model) -> float:
5961
.. math::
6062
bias=\\frac{1}{n}\\sum_{i=1}^n (model_i - obs_i)
6163
62-
Range: -infinity to infinity; Best: 0.0
64+
Range: :math:`(-\\infty, \\infty)`; Best: 0
6365
"""
6466

6567
assert obs.size == model.size
6668
return np.mean(model.ravel() - obs.ravel())
6769

6870

71+
def max_error(obs, model) -> float:
72+
"""Max (absolute) error
73+
74+
.. math::
75+
max_error = max(|model_i - obs_i|)
76+
77+
Range: :math:`[0, \\infty)`; Best: 0
78+
"""
79+
80+
assert obs.size == model.size
81+
return np.max(np.abs(model.ravel() - obs.ravel()))
82+
83+
6984
def mae(obs: np.ndarray, model: np.ndarray, weights: np.ndarray = None) -> float:
7085
"""alias for mean_absolute_error"""
7186
assert obs.size == model.size
@@ -80,7 +95,7 @@ def mean_absolute_error(
8095
.. math::
8196
MAE=\\frac{1}{n}\\sum_{i=1}^n|model_i - obs_i|
8297
83-
Range: 0.0 to infinity; Best: 0.0
98+
Range: :math:`[0, \\infty)`; Best: 0
8499
"""
85100
assert obs.size == model.size
86101

@@ -100,7 +115,7 @@ def mean_absolute_percentage_error(obs: np.ndarray, model: np.ndarray) -> float:
100115
.. math::
101116
MAPE=\\frac{1}{n}\\sum_{i=1}^n\\frac{|model_i - obs_i|}{obs_i}*100
102117
103-
Range: 0.0 to infinity; Best: 0.0
118+
Range: :math:`[0, \\infty)`; Best: 0
104119
"""
105120

106121
assert obs.size == model.size
@@ -125,7 +140,7 @@ def urmse(obs: np.ndarray, model: np.ndarray, weights: np.ndarray = None) -> flo
125140
126141
uRMSE = \\sqrt{\\frac{1}{n} \\sum_{i=1}^n res_{u,i}^2}
127142
128-
Range: 0.0 to infinity; Best: 0.0
143+
Range: :math:`[0, \\infty)`; Best: 0
129144
130145
See Also
131146
--------
@@ -165,7 +180,7 @@ def root_mean_squared_error(
165180
166181
uRMSE=\\sqrt{\\frac{1}{n} \\sum_{i=1}^n res_{u,i}^2}
167182
168-
Range: 0.0 to infinity; Best: 0.0
183+
Range: :math:`[0, \\infty)`; Best: 0
169184
170185
"""
171186
assert obs.size == model.size
@@ -191,7 +206,7 @@ def nash_sutcliffe_efficiency(obs: np.ndarray, model: np.ndarray) -> float:
191206
NSE = 1 - \\frac {\\sum _{i=1}^{n}\\left(model_{i} - obs_{i}\\right)^{2}}
192207
{\\sum_{i=1}^{n}\\left(obs_{i} - {\\overline{obs}}\\right)^{2}}
193208
194-
Range: -infinity to 1.0; Best: 1.0
209+
Range: :math:`(-\\infty, 1]`; Best: 1
195210
196211
Note
197212
----
@@ -214,16 +229,16 @@ def nash_sutcliffe_efficiency(obs: np.ndarray, model: np.ndarray) -> float:
214229

215230

216231
def r2(obs: np.ndarray, model: np.ndarray) -> float:
217-
"""Coefficient of determination (R2) - pronounced 'R-squared'
232+
"""Coefficient of determination (R2)
218233
219-
The proportion of the variation in the dependent variable that is predictable from the independent variable(s), e.g. the proportion of explained variance.
234+
Pronounced 'R-squared'; the proportion of the variation in the dependent variable that is predictable from the independent variable(s), i.e. the proportion of explained variance.
220235
221236
.. math::
222237
223238
R^2 = 1 - \\frac{\\sum_{i=1}^n (model_i - obs_i)^2}
224239
{\\sum_{i=1}^n (obs_i - \\overline {obs})^2}
225240
226-
Range: -infinity to 1.0; Best: 1.0
241+
Range: :math:`(-\\infty, 1]`; Best: 1
227242
228243
Note
229244
----
@@ -262,7 +277,7 @@ def model_efficiency_factor(obs: np.ndarray, model: np.ndarray) -> float:
262277
MEF = \\frac{RMSE}{STDEV}=\\frac{\\sqrt{\\frac{1}{n} \\sum_{i=1}^n(model_i - obs_i)^2}}
263278
{\\sqrt{\\frac{1}{n} \\sum_{i=1}^n(obs_i - \\overline{obs})^2}}=\\sqrt{1-NSE}
264279
265-
Range: 0.0 to infinity; Best: 0.0
280+
Range: :math:`[0, \\infty)`; Best: 0
266281
267282
See Also
268283
--------
@@ -288,11 +303,11 @@ def corrcoef(obs, model, weights=None) -> float:
288303
{\\sqrt{\\sum_{i=1}^n (model_i - \\overline{model})^2}
289304
\\sqrt{\\sum_{i=1}^n (obs_i - \\overline{obs})^2} }
290305
291-
Range: -1.0 to 1.0; Best: 1.0
306+
Range: [-1, 1]; Best: 1
292307
293308
See Also
294309
--------
295-
numpy.corrcoef
310+
np.corrcoef
296311
"""
297312
assert obs.size == model.size
298313
if len(obs) <= 1:
@@ -321,7 +336,7 @@ def spearmanr(obs: np.ndarray, model: np.ndarray) -> float:
321336
{\\sqrt{\\sum_{i=1}^n (rmodel_i - \\overline{rmodel})^2}
322337
\\sqrt{\\sum_{i=1}^n (robs_i - \\overline{robs})^2} }
323338
324-
Range: -1.0 to 1.0; Best: 1.0
339+
Range: [-1, 1]; Best: 1
325340
326341
Examples
327342
--------
@@ -353,7 +368,7 @@ def scatter_index(obs: np.ndarray, model: np.ndarray) -> float:
353368
\\sqrt {\\frac{\\sum_{i=1}^n \\left( (model_i - \\overline {model}) - (obs_i - \\overline {obs}) \\right)^2}
354369
{\\sum_{i=1}^n obs_i^2}}
355370
356-
Range: 0.0 to 100.0; Best: 0.0
371+
Range: [0, 100]; Best: 0
357372
"""
358373
assert obs.size == model.size
359374
if len(obs) == 0:
@@ -365,19 +380,54 @@ def scatter_index(obs: np.ndarray, model: np.ndarray) -> float:
365380
)
366381

367382

383+
def willmott(obs: np.ndarray, model: np.ndarray) -> float:
384+
"""willmott's Index of Agreement
385+
386+
A scaled representation of the predictive accuracy of the model against observations. A value of 1 indicates a perfect match, and 0 indicates no agreement at all.
387+
388+
.. math::
389+
390+
willmott = 1 - \\frac{\\frac{1}{n} \\sum_{i=1}^n(model_i - obs_i)^2}
391+
{\\frac{1}{n} \\sum_{i=1}^n(|model_i - \\overline{obs}| + |obs_i - \\overline{obs}|)^2}
392+
393+
Range: [0, 1]; Best: 1
394+
395+
Examples
396+
--------
397+
>>> obs = np.array([1.0, 1.1, 1.2, 1.3, 1.4, 1.4, 1.3])
398+
>>> model = np.array([1.02, 1.16, 1.3, 1.38, 1.49, 1.45, 1.32])
399+
>>> willmott(obs, model)
400+
0.9501403174479723
401+
402+
References
403+
----------
404+
Willmott, C. J. 1981. "On the validation of models". Physical Geography, 2, 184–194.
405+
"""
406+
407+
assert obs.size == model.size
408+
if len(obs) == 0:
409+
return np.nan
410+
411+
residual = model.ravel() - obs.ravel()
412+
nominator = np.sum(residual ** 2)
413+
denominator = np.sum((np.abs(model - obs.mean()) + np.abs(obs - obs.mean())) ** 2)
414+
415+
return 1 - nominator / denominator
416+
417+
368418
def hit_ratio(obs: np.ndarray, model: np.ndarray, a=0.1) -> float:
369419
"""Fraction within obs ± acceptable deviation
370420
371421
.. math::
372422
373423
HR = \\frac{1}{n}\\sum_{i=1}^n I_{|(model_i - obs_i)|} < a
374424
375-
Range: 0.0 to 1.0; Best: 1.0
425+
Range: [0, 1]; Best: 1
376426
377427
Examples
378428
--------
379-
>>> obs = np.array([1.0,1.1,1.2,1.3,1.4, 1.4, 1.3])
380-
>>> model = np.array([1.02, 1.16, 1.3 , 1.38, 1.49, 1.45, 1.32])
429+
>>> obs = np.array([1.0, 1.1, 1.2, 1.3, 1.4, 1.4, 1.3])
430+
>>> model = np.array([1.02, 1.16, 1.3, 1.38, 1.49, 1.45, 1.32])
381431
>>> hit_ratio(obs, model, a=0.05)
382432
0.2857142857142857
383433
>>> hit_ratio(obs, model, a=0.1)
@@ -397,7 +447,7 @@ def lin_slope(obs: np.ndarray, model: np.ndarray, reg_method="ols") -> float:
397447
slope = \\frac{\\sum_{i=1}^n (model_i - \\overline {model})(obs_i - \\overline {obs})}
398448
{\\sum_{i=1}^n (obs_i - \\overline {obs})^2}
399449
400-
Range: -infinity to infinity; Best: 1.0
450+
Range: :math:`(-\\infty, \\infty )`; Best: 1
401451
"""
402452
return _linear_regression(obs, model, reg_method)[0]
403453

notebooks/Metrics_widget.ipynb

+14-35
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,15 @@
33
{
44
"cell_type": "markdown",
55
"source": [
6-
"# Metrics widget\r\n",
7-
"\r\n",
6+
"# Metrics widget\n",
7+
"\n",
88
"Execute this notebook to test out the different metrics. "
99
],
1010
"metadata": {}
1111
},
1212
{
1313
"cell_type": "code",
14-
"execution_count": 1,
14+
"execution_count": null,
1515
"source": [
1616
"import numpy as np\n",
1717
"import matplotlib.pyplot as plt\n",
@@ -23,16 +23,17 @@
2323
},
2424
{
2525
"cell_type": "code",
26-
"execution_count": 2,
26+
"execution_count": null,
2727
"source": [
28-
"metrics = [mtr.bias, mtr.rmse, mtr.urmse, mtr.mae, mtr.mape, mtr.mef, mtr.si, mtr.cc, mtr.spearmanr, mtr.r2, mtr.lin_slope]"
28+
"metrics = [mtr.bias, mtr.max_error, mtr.rmse, mtr.urmse, mtr.mae, mtr.mape, \n",
29+
" mtr.mef, mtr.si, mtr.cc, mtr.spearmanr, mtr.r2, mtr.willmott, mtr.lin_slope]"
2930
],
3031
"outputs": [],
3132
"metadata": {}
3233
},
3334
{
3435
"cell_type": "code",
35-
"execution_count": 3,
36+
"execution_count": null,
3637
"source": [
3738
"n = 50\n",
3839
"x = np.linspace(0.0, 6.0, num=n)\n",
@@ -45,9 +46,9 @@
4546
},
4647
{
4748
"cell_type": "code",
48-
"execution_count": 23,
49+
"execution_count": null,
4950
"source": [
50-
"def plot_metrics(bias, noise_level, fixed_y_axis=False):\n",
51+
"def plot_metrics(bias, noise_level, fixed_y_axis=True):\n",
5152
" y_mod = y_obs + bias + noise_level*noise_vec\n",
5253
" plt.plot(x, y_obs, 'r.-', label=\"obs\")\n",
5354
" plt.plot(x, y_mod, 'o-', label=\"model\")\n",
@@ -59,7 +60,7 @@
5960
" ymax = 8\n",
6061
" ymin = 1\n",
6162
" ystep = 1.2*(ymax - ymin)/len(metrics)\n",
62-
" ypos = ymax\n",
63+
" ypos = ymax + 0.5\n",
6364
" for m in metrics:\n",
6465
" plt.text(6.5, ypos, f\"{m.__name__}:\")\n",
6566
" plt.text(8.0, ypos, f\"{m(y_obs,y_mod):.4f}\")\n",
@@ -75,32 +76,10 @@
7576
},
7677
{
7778
"cell_type": "code",
78-
"execution_count": 24,
79+
"execution_count": null,
7980
"source": [
8081
"interact(plot_metrics, bias = (-1,3,0.1), noise_level=(0,2,0.05));"
8182
],
82-
"outputs": [
83-
{
84-
"output_type": "display_data",
85-
"data": {
86-
"application/vnd.jupyter.widget-view+json": {
87-
"version_major": 2,
88-
"version_minor": 0,
89-
"model_id": "0666c7c25f1e449f9198d321ab95a84d"
90-
},
91-
"text/plain": [
92-
"interactive(children=(FloatSlider(value=1.0, description='bias', max=3.0, min=-1.0), FloatSlider(value=1.0, de…"
93-
]
94-
},
95-
"metadata": {}
96-
}
97-
],
98-
"metadata": {}
99-
},
100-
{
101-
"cell_type": "code",
102-
"execution_count": null,
103-
"source": [],
10483
"outputs": [],
10584
"metadata": {}
10685
}
@@ -109,7 +88,7 @@
10988
"orig_nbformat": 4,
11089
"language_info": {
11190
"name": "python",
112-
"version": "3.9.6",
91+
"version": "3.8.10",
11392
"mimetype": "text/x-python",
11493
"codemirror_mode": {
11594
"name": "ipython",
@@ -121,10 +100,10 @@
121100
},
122101
"kernelspec": {
123102
"name": "python3",
124-
"display_name": "Python 3.9.6 64-bit"
103+
"display_name": "Python 3.8.10 64-bit ('base': conda)"
125104
},
126105
"interpreter": {
127-
"hash": "f4041ee05ab07c15354d6207e763f17a216c3f5ccf08906343c2b4fd3fa7a6fb"
106+
"hash": "fa576ebcd40e010bdc0ae86b06ce09151f3424f9e9aed6893ff04f39a9299d89"
128107
}
129108
},
130109
"nbformat": 4,

0 commit comments

Comments
 (0)