API Reference¶
+API Reference¶
hesseflux provides functions used in the processing and post-processing of the Eddy covariance flux data
It was developed for the ICOS ecosystem site FR-Hes.
@@ -52,7 +52,7 @@diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fab04ed..ac9b9f2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,9 @@ Changelog All notable changes after its initial release in May 2020 (v2.0) are documented in this file. +v5.1 (??? 2024) + * Removed all warning with new pandas and numpy versions. + v5.0 (Jan 2023) * Dropped support for Python 3.6 because cannot test it anymore. * Add timecolumns and ftimeformat to config and post-processing file in diff --git a/docs/html/.buildinfo b/docs/html/.buildinfo index ed2c7d4..de007f8 100644 --- a/docs/html/.buildinfo +++ b/docs/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: a229c28f73769ec9b4c577ab465ca4f4 +config: 52be7eb0a3df7d9001d8779717d404cb tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/html/_modules/hesseflux/gapfill.html b/docs/html/_modules/hesseflux/gapfill.html index e7700b6..2023604 100644 --- a/docs/html/_modules/hesseflux/gapfill.html +++ b/docs/html/_modules/hesseflux/gapfill.html @@ -1,24 +1,24 @@ - - +
-
__all__ = ['gapfill']
-[docs]def gapfill(dfin, flag=None, date=None, timeformat='%Y-%m-%d %H:%M:%S',
+
+[docs]
+def gapfill(dfin, flag=None, date=None, timeformat='%Y-%m-%d %H:%M:%S',
colhead=None,
sw_dev=50., ta_dev=2.5, vpd_dev=5.,
longgap=60, fullday=False, undef=-9999, ddof=1,
@@ -353,10 +355,10 @@ Source code for hesseflux.gapfill
break
astr = 'Global radiation with name SW or starting with SW_'
astr = astr + ' must be in input.'
- assert sw_id, astr
+ assert sw_id, astr
astr = 'Air temperature with name TA or starting with TA_'
astr = astr + ' must be in input.'
- assert ta_id, astr
+ assert ta_id, astr
astr = 'Vapour pressure deficit with name VPD or starting'
astr = astr + ' with VPD_ must be in input.'
assert vpd_id, astr
@@ -421,11 +423,11 @@ Source code for hesseflux.gapfill
if firstvalid > nn:
if verbose > 1:
print(' Large margin at beginning: ', firstvalid)
- largegap[0:(firstvalid-nn)] = True
- if lastvalid < (ndata-nn):
+ largegap[0:(firstvalid - nn)] = True
+ if lastvalid < (ndata - nn):
if verbose > 1:
- print(' Large margin at end: ', lastvalid-nn)
- largegap[(lastvalid+nn):] = True
+ print(' Large margin at end: ', lastvalid - nn)
+ largegap[(lastvalid + nn):] = True
# Large gaps
@@ -439,12 +441,12 @@ Source code for hesseflux.gapfill
index += [i]
count = 1
if i > 0:
- if (dflag[i] != 0) and (dflag[i-1] == 0):
+ if (dflag[i] != 0) and (dflag[i - 1] == 0):
index += [i]
count = 1
elif dflag[i] != 0:
count += 1
- elif (dflag[i] == 0) and (dflag[i-1] != 0):
+ elif (dflag[i] == 0) and (dflag[i - 1] != 0):
length += [count]
count = 0
else:
@@ -456,17 +458,18 @@ Source code for hesseflux.gapfill
for i in range(len(index)):
if length[i] > nn:
if verbose > 1:
- print(' Large gap: ', index[i], ':', index[i]+length[i])
- largegap[index[i]:index[i]+length[i]] = True
+ print(' Large gap: ', index[i], ':',
+ index[i] + length[i])
+ largegap[index[i]:index[i] + length[i]] = True
# set or unset rest of days in large gaps
if fullday:
- for i in range(ndata-1):
+ for i in range(ndata - 1):
# end of large margin
- if largegap[i] and not largegap[i+1]:
+ if largegap[i] and not largegap[i + 1]:
largegap[np.where(day == day[i])[0]] = False
# beginning of large margin
- elif not largegap[i] and largegap[i+1]:
+ elif not largegap[i] and largegap[i + 1]:
largegap[np.where(day == day[i])[0]] = False
else:
continue
@@ -499,14 +502,14 @@ Source code for hesseflux.gapfill
# search for values around the met-conditions
# in a window of time
# (one week in the first iteration and odd weeks in the next)
- j1 = j - np.arange(1, week+1, dtype=int) + 1
+ j1 = j - np.arange(1, week + 1, dtype=int) + 1
j2 = j + np.arange(1, week, dtype=int)
jj = np.append(j1, j2)
- win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
+ win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
# get boolean array where meteo-conditions are in a given width
- conditions = ( (np.abs(sw[win]-sw[j]) < sw_devmax) &
- (np.abs(ta[win]-ta[j]) < ta_dev) &
- (np.abs(vpd[win]-vpd[j]) < vpd_dev) &
+ conditions = ( (np.abs(sw[win] - sw[j]) < sw_devmax) &
+ (np.abs(ta[win] - ta[j]) < ta_dev) &
+ (np.abs(vpd[win] - vpd[j]) < vpd_dev) &
total_flg[win] )
num4avg = np.sum(conditions)
# we need at least two samples with similar conditions
@@ -523,10 +526,10 @@ Source code for hesseflux.gapfill
dflag_f[j] = 1
continue
else: # --> extend time window to two weeks
- j1 = j - np.arange(1, 2*week+1, dtype=int) + 1
- j2 = j + np.arange(1, 2*week, dtype=int)
+ j1 = j - np.arange(1, 2 * week + 1, dtype=int) + 1
+ j2 = j + np.arange(1, 2 * week, dtype=int)
jj = np.append(j1, j2)
- win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
+ win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
conditions = ( (np.abs(sw[win] - sw[j]) < sw_devmax) &
(np.abs(ta[win] - ta[j]) < ta_dev) &
(np.abs(vpd[win] - vpd[j]) < vpd_dev) &
@@ -554,12 +557,12 @@ Source code for hesseflux.gapfill
# Method 2: just global radiation available
if sw_flg[j] == 0:
- j1 = j - np.arange(1, week+1, dtype=int) + 1
+ j1 = j - np.arange(1, week + 1, dtype=int) + 1
j2 = j + np.arange(1, week, dtype=int)
jj = np.append(j1, j2)
- win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
+ win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
# get boolean array where meteo-conditions are in a given width
- conditions = ( (np.abs(sw[win]-sw[j]) < sw_devmax) &
+ conditions = ( (np.abs(sw[win] - sw[j]) < sw_devmax) &
total_flg[win] )
num4avg = np.sum(conditions)
# we need at least two samples with similar conditions
@@ -578,12 +581,12 @@ Source code for hesseflux.gapfill
# Method 3: same hour
enough = False
for i in range(2):
- t_win = (nperday * (2*i+1))//2
- j1 = j - np.arange(1, t_win+1, dtype=int) + 1
+ t_win = (nperday * (2 * i + 1)) // 2
+ j1 = j - np.arange(1, t_win + 1, dtype=int) + 1
j2 = j + np.arange(1, t_win, dtype=int)
jj = np.append(j1, j2)
- win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
- conditions = ( (np.abs(hour[win]-hour[j]) < 1.1)
+ win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
+ conditions = ( (np.abs(hour[win] - hour[j]) < 1.1)
& (dflag[win] == 0) )
num4avg = np.sum(conditions)
if num4avg >= 2:
@@ -607,10 +610,10 @@ Source code for hesseflux.gapfill
# Method 4: same as 1 but for 3-12 weeks
if meteo_flg[j]:
for multi in range(3, 12):
- j1 = j - np.arange(1, multi*week+1, dtype=int) + 1
- j2 = j + np.arange(1, multi*week, dtype=int)
+ j1 = j - np.arange(1, multi * week + 1, dtype=int) + 1
+ j2 = j + np.arange(1, multi * week, dtype=int)
jj = np.append(j1, j2)
- win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
+ win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
conditions = ( (np.abs(sw[win] - sw[j]) < sw_devmax) &
(np.abs(ta[win] - ta[j]) < ta_dev) &
(np.abs(vpd[win] - vpd[j]) < vpd_dev) &
@@ -640,10 +643,10 @@ Source code for hesseflux.gapfill
# Method 5: same as 2 but for 2-12 weeks
if sw_flg[j] == 0:
for multi in range(2, 12):
- j1 = j - np.arange(1, multi*week+1, dtype=int) + 1
- j2 = j + np.arange(1, multi*week, dtype=int)
+ j1 = j - np.arange(1, multi * week + 1, dtype=int) + 1
+ j2 = j + np.arange(1, multi * week, dtype=int)
jj = np.append(j1, j2)
- win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
+ win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
# get boolean array where meteo-conditions are
# in a given width
conditions = ( (np.abs(sw[win] - sw[j]) < sw_devmax) &
@@ -669,12 +672,12 @@ Source code for hesseflux.gapfill
# Method 6: same as 3 but for 3-120 days
for i in range(3, 120):
- t_win = nperday * (2*i+1)/2
- j1 = j - np.arange(1, t_win+1, dtype=int) + 1
+ t_win = nperday * (2 * i + 1) / 2
+ j1 = j - np.arange(1, t_win + 1, dtype=int) + 1
j2 = j + np.arange(1, t_win, dtype=int)
jj = np.append(j1, j2)
- win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
- conditions = ( (np.abs(hour[win]-hour[j]) < 1.1)
+ win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
+ conditions = ( (np.abs(hour[win] - hour[j]) < 1.1)
& (dflag[win] == 0) )
num4avg = np.sum(conditions)
if num4avg >= 2:
@@ -716,6 +719,7 @@ Source code for hesseflux.gapfill
return dfout, ffout
+
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
@@ -823,7 +827,7 @@ Related Topics
-
+
diff --git a/docs/html/_modules/hesseflux/madspikes.html b/docs/html/_modules/hesseflux/madspikes.html
index 0522cf1..e688fc3 100644
--- a/docs/html/_modules/hesseflux/madspikes.html
+++ b/docs/html/_modules/hesseflux/madspikes.html
@@ -1,24 +1,24 @@
-
-
+
- hesseflux.madspikes — hesseflux 5.1.dev0 documentation
-
-
-
-
-
-
+ hesseflux.madspikes — hesseflux 5.1.dev2 documentation
+
+
+
+
+
+
+
+
-
@@ -59,6 +59,7 @@ Source code for hesseflux.madspikes
* Removed iteration, Apr 2020, Matthias Cuntz
* Using numpy docstring format, May 2020, Matthias Cuntz
* Improved flake8 and numpy docstring, Oct 2021, Matthias Cuntz
+ * Removed np.float and np.bool, Jun 2024, Matthias Cuntz
"""
from __future__ import division, absolute_import, print_function
@@ -70,9 +71,11 @@ Source code for hesseflux.madspikes
__all__ = ['madspikes']
-[docs]def madspikes(dfin, flag=None, isday=None,
+
+[docs]
+def madspikes(dfin, flag=None, isday=None,
colhead=None, undef=-9999,
- nscan=15*48, nfill=1*48,
+ nscan=15 * 48, nfill=1 * 48,
z=7, deriv=2, swthr=10.,
plot=False):
"""
@@ -146,7 +149,7 @@ Source code for hesseflux.madspikes
estr = ('Length of colhead must be number of columns in input'
'array. len(colhead)=' + str(len(colhead)) +
' shape(input)=(' + str(dfin.shape[0]) + ',' +
- str(dfin.shape[1])+').')
+ str(dfin.shape[1]) + ').')
raise ValueError(estr)
else:
isnumpy = False
@@ -206,15 +209,15 @@ Source code for hesseflux.madspikes
# parameters
nrow, ncol = df.shape
- half_scan_win = nscan//2
- half_fill_win = nfill//2
+ half_scan_win = nscan // 2
+ half_fill_win = nfill // 2
# calculate dusk and dawn times and separate in day and night
- isdawn = np.zeros(nrow, dtype=np.bool)
- isdusk = np.zeros(nrow, dtype=np.bool)
- dis = isday.astype(int) - np.roll(isday,-1).astype(int)
+ isdawn = np.zeros(nrow, dtype=bool)
+ isdusk = np.zeros(nrow, dtype=bool)
+ dis = isday.astype(int) - np.roll(isday, -1).astype(int)
isdawn[:-1] = np.where(dis[:-1] == -1, True, False)
- isdusk[:-1] = np.where(dis[:-1] == 1, True, False)
+ isdusk[:-1] = np.where(dis[:-1] == 1, True, False)
isddday = isdawn
tmp = np.roll(isdusk, 1)
isddday[1:] += tmp[1:] # start and end of day
@@ -246,23 +249,25 @@ Source code for hesseflux.madspikes
np.nan)
# iterate over fill window
- for j in range(half_fill_win, nrow-1, 2*half_fill_win):
+ for j in range(half_fill_win, nrow - 1, 2 * half_fill_win):
j1 = max(j - half_scan_win - 1, 0)
j2 = min(j + half_scan_win + 1, nrow)
fill_start = max(j - half_fill_win, 1)
- fill_end = min(j + half_fill_win, nrow-1)
+ fill_end = min(j + half_fill_win, nrow - 1)
dd = data_day[j1:j2].to_numpy()
day_flag = mad(np.ma.masked_array(data=dd, mask=np.isnan(dd)),
z=z, deriv=deriv)
ff.iloc[fill_start:fill_end, cols.index(hcol)] += (
- np.where(day_flag[fill_start-j1-1:fill_end-j1-1], 2, 0))
+ np.where(day_flag[fill_start - j1 - 1:fill_end - j1 - 1],
+ 2, 0))
nn = data_night[j1:j2]
night_flag = mad(np.ma.masked_array(data=nn, mask=np.isnan(nn)),
z=z, deriv=deriv)
ff.iloc[fill_start:fill_end, cols.index(hcol)] += (
- np.where(night_flag[fill_start-j1-1:fill_end-j1-1], 2, 0))
+ np.where(night_flag[fill_start - j1 - 1:fill_end - j1 - 1],
+ 2, 0))
if plot:
fig = plt.figure(1)
@@ -288,6 +293,7 @@ Source code for hesseflux.madspikes
return ff
+
if __name__ == '__main__':
import doctest
doctest.testmod()
@@ -339,7 +345,7 @@ Related Topics
-
+
diff --git a/docs/html/_modules/hesseflux/nee2gpp.html b/docs/html/_modules/hesseflux/nee2gpp.html
index adfe081..b910adf 100644
--- a/docs/html/_modules/hesseflux/nee2gpp.html
+++ b/docs/html/_modules/hesseflux/nee2gpp.html
@@ -1,24 +1,24 @@
-
-
+
- hesseflux.nee2gpp — hesseflux 5.1.dev0 documentation
-
-
-
-
-
-
+ hesseflux.nee2gpp — hesseflux 5.1.dev2 documentation
+
+
+
+
+
+
+
+
-
@@ -49,13 +49,15 @@ Source code for hesseflux.nee2gpp
* Set default undef to NaN, Mar 2012, Arndt Piayda
* Add wrapper nee2gpp for individual routines, Nov 2012, Matthias Cuntz
* Ported to Python 3, Feb 2013, Matthias Cuntz
-* Use generel cost function cost_abs from functions module, May 2013, Matthias Cuntz
+* Use generel cost function cost_abs from functions module,
+ May 2013, Matthias Cuntz
* Use fmin_tnc to allow params < 0, Aug 2014, Arndt Piayda
* Keyword nogppnight, Aug 2014, Arndt Piayda
* Add wrapper nee2gpp for individual routines, Nov 2012, Matthias Cuntz
* Add wrapper nee2gpp for individual routines, Nov 2012, Matthias Cuntz
* Input can be pandas Dataframe or numpy array(s), Apr 2020, Matthias Cuntz
* Using numpy docstring format, May 2020, Matthias Cuntz
+* Removed np.float and np.int, Jun 2024, Matthias Cuntz
.. moduleauthor:: Matthias Cuntz, Arndt Piayda
@@ -79,8 +81,11 @@ Source code for hesseflux.nee2gpp
# ----------------------------------------------------------------------
-[docs]def nee2gpp(dfin, flag=None, isday=None, date=None, timeformat='%Y-%m-%d %H:%M:%S', colhead=None,
- undef=-9999, method='reichstein', nogppnight=False, swthr=10.):
+
+[docs]
+def nee2gpp(dfin, flag=None, isday=None, date=None,
+ timeformat='%Y-%m-%d %H:%M:%S', colhead=None, undef=-9999,
+ method='reichstein', nogppnight=False, swthr=10.):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (RECO)
from Eddy covariance CO2 flux data.
@@ -104,26 +109,29 @@ Source code for hesseflux.nee2gpp
incoming shortwave radiation and air vapour pressure deficit.
`dfin` can be a pandas.Dataframe with the columns
- 'FC' or 'NEE' (or starting with 'FC\_' or 'NEE\_') for observed CO2 flux [umol(CO2) m-2 s-1]
- 'TA' (or starting with 'TA\_') for air temperature [K]
+ 'FC' or 'NEE' (or starting with 'FC\\_' or 'NEE\\_') for observed
+ CO2 flux [umol(CO2) m-2 s-1]
+ 'TA' (or starting with 'TA\\_') for air temperature [K]
`method='lasslop'` or `method='day'` needs also
- 'SW_IN' (or starting with 'SW_IN') for incoming short-wave radiation [W m-2]
+ 'SW_IN' (or starting with 'SW_IN') for incoming short-wave
+ radiation [W m-2]
'VPD' (or starting with 'VPD') for air vapour deficit [Pa]
The index is taken as date variable.
`dfin` can also me a numpy array with the same columns. In this case
`colhead`, `date`, and possibly `dateformat` must be given.
flag : pandas.Dataframe or numpy.array, optional
- flag Dataframe or array has the same shape as `dfin`. Non-zero values in
- `flag` will be treated as missing values in `dfin`.
+ flag Dataframe or array has the same shape as `dfin`.
+ Non-zero values in `flag` will be treated as missing values in `dfin`.
`flag` must follow the same rules as `dfin` if pandas.Dataframe.
- If `flag` is numpy array, `df.columns.values` will be used as column heads
- and the index of `dfin` will be copied to `flag`.
+ If `flag` is numpy array, `df.columns.values` will be used as column
+ heads and the index of `dfin` will be copied to `flag`.
isday : array_like of bool, optional
- True when it is day, False when night. Must have the same length as `dfin.shape[0]`.
+ True when it is day, False when night. Must have the same length
+ as `dfin.shape[0]`.
If `isday` is not given, `dfin` must have a column with head 'SW_IN' or
starting with 'SW_IN'. `isday` will then be `dfin['SW_IN'] > swthr`.
@@ -136,22 +144,29 @@ Source code for hesseflux.nee2gpp
See strftime documentation of Python's datetime module:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
colhed : array_like of str, optional
- column names if `dfin` is numpy array. See `dfin` for mandatory column names.
+ column names if `dfin` is numpy array. See `dfin` for mandatory
+ column names.
undef : float, optional
- values having `undef` value are treated as missing values in `dfin` (default: -9999)
+ values having `undef` value are treated as missing values in `dfin`
+ (default: -9999)
method : str, optional
method to use for partitioning. Possible values are:
- 'global' or 'falge': fit of Reco vs. temperature to all nighttime data
+ 'global' or 'falge': fit of Reco vs. temperature to all nighttime
+ data
- 'local' of 'reichstein': several fits over the season of Reco vs. temperature
- as in Reichstein et al. (2005) (default)
+ 'local' of 'reichstein': several fits over the season of Reco vs.
+ temperature as in Reichstein et al. (2005)
+ (default)
- 'day' or 'lasslop': method of Lasslop et al. (2010) fitting a light-response curve
+ 'day' or 'lasslop': method of Lasslop et al. (2010) fitting a
+ light-response curve
nogppnight : float, optional
- GPP will be set to zero at night. RECO will then equal NEE at night (default: False)
+ GPP will be set to zero at night. RECO will then equal NEE at night
+ (default: False)
swthr : float, optional
- Threshold to determine daytime from incoming shortwave radiation if `isday` not given (default: 10).
+ Threshold to determine daytime from incoming shortwave radiation
+ if `isday` not given (default: 10).
Returns
-------
@@ -162,22 +177,25 @@ Source code for hesseflux.nee2gpp
Notes
-----
- Negative respiration possible at night if GPP is forced to 0 with `nogppnight=True`.
+ Negative respiration possible at night if GPP is forced to 0 with
+ `nogppnight=True`.
References
----------
.. [1] Falge et al. (2001)
- Gap filling strategies for defensible annual sums of net ecosystem exchange,
+ Gap filling strategies for defensible annual sums of
+ net ecosystem exchange,
Acricultural and Forest Meteorology 107, 43-69
.. [2] Reichstein et al. (2005)
- On the separation of net ecosystem exchange into assimilation and ecosystem
- respiration: review and improved algorithm,
+ On the separation of net ecosystem exchange into assimilation
+ and ecosystem respiration: review and improved algorithm,
Global Change Biology 11, 1424-1439
.. [3] Lasslop et al. (2010)
- Separation of net ecosystem exchange into assimilation and respiration using
- a light response curve approach: critical issues and global evaluation,
+ Separation of net ecosystem exchange into assimilation and respiration
+ using a light response curve approach: critical issues and global
+ evaluation,
Global Change Biology 16, 187-208
Examples
@@ -206,7 +224,8 @@ Source code for hesseflux.nee2gpp
>>> # flag
>>> flag = np.where(dfin == undef, 2, 0)
>>> # partition
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='local')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ ... undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
@@ -214,27 +233,32 @@ Source code for hesseflux.nee2gpp
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='local')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ ... undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='global')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ ... undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='Reichstein')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ ... undef=undef, method='Reichstein')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='reichstein')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ ... undef=undef, method='reichstein')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='day')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ ... undef=undef, method='day')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 2.78457540e+00
6.63212545e+00 8.88902165e+00 6.74243873e+00 9.51364527e+00]
@@ -245,56 +269,66 @@ Source code for hesseflux.nee2gpp
History
-------
Written Matthias Cuntz, Mar 2012
- Modified Arndt Piayda, Mar 2012 - undef=np.nan
- Matthias Cuntz, Nov 2012 - wrapper for individual routines nee2gpp_reichstein etc.
- Matthias Cuntz, Feb 2013 - ported to Python 3
- Matthias Cuntz, May 2013 - replaced cost functions by generel cost function cost_abs if possible
- Arndt Piayda, Aug 2014 - replaced fmin with fmin_tnc to permit params<0,
- permit gpp<0 at any time if nogppnight=True
+ Modified Arndt Piayda, Mar 2012
+ - undef=np.nan
+ Matthias Cuntz, Nov 2012
+ - wrapper for individual routines nee2gpp_reichstein etc.
+ Matthias Cuntz, Feb 2013
+ - ported to Python 3
+ Matthias Cuntz, May 2013
+ - replaced cost functions by generel cost function cost_abs
+ if possible
+ Arndt Piayda, Aug 2014
+ - replaced fmin with fmin_tnc to permit params<0,
+ permit gpp<0 at any time if nogppnight=True
+ Matthias Cuntz, Jun 2024
+ - removed np.int and np.float
+
"""
# Check input
# numpy or panda
if isinstance(dfin, (np.ndarray, np.ma.MaskedArray)):
isnumpy = True
- istrans = False
- assert colhead is not None, 'colhead must be given if input is numpy.ndarray.'
+ assert colhead is not None, (
+ 'colhead must be given if input is numpy.ndarray.')
if dfin.shape[0] == len(colhead):
- istrans = True
df = pd.DataFrame(dfin.T, columns=colhead)
elif dfin.shape[1] == len(colhead):
df = pd.DataFrame(dfin, columns=colhead)
else:
- raise ValueError('Length of colhead must be number of columns in input array. len(colhead)='+str(len(colhead))+' shape(input)=('+str(dfin.shape[0])+','+str(dfin.shape[1])+').')
+ raise ValueError('Length of colhead must be number of columns'
+ ' in input array. len(colhead)=' +
+ str(len(colhead)) + ' shape(input)=(' +
+ str(dfin.shape[0]) + ',' +
+ str(dfin.shape[1]) + ').')
assert date is not None, 'Date must be given if input is numpy arrary.'
df['Datetime'] = pd.to_datetime(date, format=timeformat)
df.set_index('Datetime', drop=True, inplace=True)
else:
isnumpy = False
- istrans = False
- assert isinstance(dfin, pd.core.frame.DataFrame), 'Input must be either numpy.ndarray or pandas.DataFrame.'
+ assert isinstance(dfin, pd.core.frame.DataFrame), (
+ 'Input must be either numpy.ndarray or pandas.DataFrame.')
df = dfin.copy(deep=True)
# Incoming flags
if flag is not None:
if isinstance(flag, (np.ndarray, np.ma.MaskedArray)):
- fisnumpy = True
- fistrans = False
if flag.shape[0] == len(df):
ff = pd.DataFrame(flag, columns=df.columns.values)
elif flag.shape[1] == len(df):
- fistrans = True
ff = pd.DataFrame(flag.T, columns=df.columns.values)
else:
- raise ValueError('flag must have same shape as data array. data: ({:d},{:d}); flag: ({:d},{:d})'.format(dfin.shape[0], dfin.shape[1], flag.shape[0], flag.shape[1]))
+ raise ValueError(
+ 'flag must have same shape as data array.'
+ ' data: ({:d},{:d}); flag: ({:d},{:d})'.format(
+ dfin.shape[0], dfin.shape[1], flag.shape[0],
+ flag.shape[1]))
ff = ff.set_index(df.index)
else:
- fisnumpy = False
- fistrans = False
- assert isinstance(flag, pd.core.frame.DataFrame), 'Flag must be either numpy.ndarray or pandas.DataFrame.'
+ assert isinstance(flag, pd.core.frame.DataFrame), (
+ 'Flag must be either numpy.ndarray or pandas.DataFrame.')
ff = flag.copy(deep=True)
else:
- fisnumpy = isnumpy
- fistrans = istrans
# flags: 0: good; 1: input flagged; 2: output flagged
ff = df.copy(deep=True).astype(int)
ff[:] = 0
@@ -308,9 +342,11 @@ Source code for hesseflux.nee2gpp
if cc.startswith('SW_IN'):
sw_id = cc
break
- assert sw_id, 'Global radiation with name SW or starting with SW_ must be in input if isday not given.'
- isday = df[sw_id] > swthr # Papale et al. (Biogeosciences, 2006): 20; REddyProc: 10
- if isinstance(isday, (pd.core.series.Series,pd.core.frame.DataFrame)):
+ assert sw_id, ('Global radiation with name SW or starting with'
+ ' SW_ must be in input if isday not given.')
+ # Papale et al. (Biogeosciences, 2006): 20; REddyProc: 10
+ isday = df[sw_id] > swthr
+ if isinstance(isday, (pd.core.series.Series, pd.core.frame.DataFrame)):
isday = isday.to_numpy()
isday[isday == undef] = np.nan
ff[np.isnan(isday)] = 1
@@ -320,10 +356,12 @@ Source code for hesseflux.nee2gpp
dfout = _nee2gpp_falge(df, ff, isday, undef=undef)
# Local relationship = Reichstein et al. (2005)
elif ((method.lower() == 'local') | (method.lower() == 'reichstein')):
- dfout = _nee2gpp_reichstein(df, ff, isday, undef=undef, nogppnight=nogppnight)
+ dfout = _nee2gpp_reichstein(df, ff, isday, undef=undef,
+ nogppnight=nogppnight)
# Lasslop et al. (2010) daytime method
elif ((method.lower() == 'day') | (method.lower() == 'lasslop')):
- dfout = _nee2gpp_lasslop(df, ff, isday, undef=undef, nogppnight=nogppnight)
+ dfout = _nee2gpp_lasslop(df, ff, isday, undef=undef,
+ nogppnight=nogppnight)
# Include new methods here
else:
raise ValueError('Error nee2gpp: method not implemented yet.')
@@ -334,6 +372,7 @@ Source code for hesseflux.nee2gpp
return dfout
+
# ----------------------------------------------------------------------
def _nee2gpp_falge(df, ff, isday, undef=-9999):
"""
@@ -347,8 +386,9 @@ Source code for hesseflux.nee2gpp
time series of CO2 fluxes and air temperature.
pandas.Dataframe with the columns
- 'FC' or 'NEE' (or starting with 'FC\_' or 'NEE\_') for observed CO2 flux [umol(CO2) m-2 s-1]
- 'TA' (or starting with 'TA\_') for air temperature [K]
+ 'FC' or 'NEE' (or starting with 'FC\\_' or 'NEE\\_') for observed
+ CO2 flux [umol(CO2) m-2 s-1]
+ 'TA' (or starting with 'TA\\_') for air temperature [K]
The index is taken as date variable.
ff : pandas.Dataframe
flag Dataframe or array has the same shape as `df`. Non-zero values in
@@ -356,9 +396,11 @@ Source code for hesseflux.nee2gpp
`ff` must follow the same rules as `df`.
isday : array_like of bool
- True when it is day, False when night. Must have the same length as `df.shape[0].`
+ True when it is day, False when night. Must have the same length
+ as `df.shape[0].`
undef : float, optional
- values having `undef` value are treated as missing values in `df` (default: -9999)
+ values having `undef` value are treated as missing values in `df`
+ (default: -9999)
Returns
-------
@@ -369,7 +411,8 @@ Source code for hesseflux.nee2gpp
References
----------
.. [1] Falge et al. (2001)
- Gap filling strategies for defensible annual sums of net ecosystem exchange,
+ Gap filling strategies for defensible annual sums of
+ net ecosystem exchange,
Acricultural and Forest Meteorology 107, 43-69
Examples
@@ -384,7 +427,8 @@ Source code for hesseflux.nee2gpp
>>> head = fread(ifile, skip=2, header=True)
>>> head1 = head[0]
>>> # date
- >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
+ >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:],
+ ... hr=dat[3,:], mi=dat[4,:])
>>> adate = dec2date(jdate, eng=True)
>>> # colhead
>>> idx = []
@@ -398,7 +442,8 @@ Source code for hesseflux.nee2gpp
>>> # flag
>>> flag = np.where(dfin == undef, 2, 0)
>>> # partition
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='global')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ ... undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
@@ -409,11 +454,14 @@ Source code for hesseflux.nee2gpp
Modified Arndt Piayda, Mar 2012 - undef=np.nan
Matthias Cuntz, Nov 2012 - individual routine
Matthias Cuntz, Feb 2013 - ported to Python 3
+ Matthias Cuntz, Jun 2024 - removed np.int and np.float
+
"""
# Variables
fc_id = ''
for cc in df.columns:
- if cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or (cc == 'NEE'):
+ if ( cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or
+ (cc == 'NEE') ):
fc_id = cc
break
ta_id = ''
@@ -421,12 +469,15 @@ Source code for hesseflux.nee2gpp
if cc.startswith('TA_') or (cc == 'TA'):
ta_id = cc
break
- assert fc_id, 'Carbon net flux with name FC or NEE or starting with FC_ or NEE_ must be in input.'
- assert ta_id, 'Air temperature with name TA or starting with TA_ must be in input.'
+ assert fc_id, ('Carbon net flux with name FC or NEE or starting with FC_'
+ ' or NEE_ must be in input.')
+ assert ta_id, ('Air temperature with name TA or starting with TA_ must'
+ ' be in input.')
nee = np.ma.array(df[fc_id], mask=(ff[fc_id] > 0))
t = np.ma.array(df[ta_id], mask=(ff[ta_id] > 0))
- misday = np.ma.array(isday, mask=((~np.isfinite(isday)) | (isday == undef)))
+ misday = np.ma.array(isday, mask=((~np.isfinite(isday)) |
+ (isday == undef)))
# Partition - Global relationship as in Falge et al. (2001)
@@ -439,16 +490,16 @@ Source code for hesseflux.nee2gpp
p = opt.fmin(cost_abs, [2., 200.],
args=(lloyd_fix_p, tt, net), disp=False)
- Reco = np.ones(ndata)*undef
+ Reco = np.ones(ndata) * undef
ii = np.where(~t.mask)[0]
Reco[ii] = lloyd_fix(t[ii], p[0], p[1])
# GPP
- GPP = np.ones(ndata)*undef
+ GPP = np.ones(ndata) * undef
ii = np.where(~(t.mask | nee.mask))[0]
GPP[ii] = Reco[ii] - nee[ii]
- dfout = pd.DataFrame({'GPP':GPP, 'RECO':Reco}, index=df.index)
+ dfout = pd.DataFrame({'GPP': GPP, 'RECO': Reco}, index=df.index)
return dfout
@@ -467,8 +518,9 @@ Source code for hesseflux.nee2gpp
time series of CO2 fluxes and air temperature.
pandas.Dataframe with the columns
- 'FC' or 'NEE' (or starting with 'FC\_' or 'NEE\_') for observed CO2 flux [umol(CO2) m-2 s-1]
- 'TA' (or starting with 'TA\_') for air temperature [K]
+ 'FC' or 'NEE' (or starting with 'FC\\_' or 'NEE\\_') for
+ observed CO2 flux [umol(CO2) m-2 s-1]
+ 'TA' (or starting with 'TA\\_') for air temperature [K]
The index is taken as date variable.
ff : pandas.Dataframe
flag Dataframe or array has the same shape as `df`. Non-zero values in
@@ -476,11 +528,14 @@ Source code for hesseflux.nee2gpp
`ff` must follow the same rules as `df`.
isday : array_like of bool
- True when it is day, False when night. Must have the same length as `df.shape[0].`
+ True when it is day, False when night. Must have the same length
+ as `df.shape[0].`
undef : float, optional
- values having `undef` value are treated as missing values in `df` (default: -9999)
+ values having `undef` value are treated as missing values in `df`
+ (default: -9999)
nogppnight : float, optional
- GPP will be set to zero at night. RECO will then equal NEE at night (default: False)
+ GPP will be set to zero at night. RECO will then equal NEE at night
+ (default: False)
Returns
-------
@@ -491,8 +546,8 @@ Source code for hesseflux.nee2gpp
References
----------
.. [2] Reichstein et al. (2005)
- On the separation of net ecosystem exchange into assimilation and ecosystem
- respiration: review and improved algorithm,
+ On the separation of net ecosystem exchange into assimilation and
+ ecosystem respiration: review and improved algorithm,
Global Change Biology 11, 1424-1439
Examples
@@ -507,7 +562,8 @@ Source code for hesseflux.nee2gpp
>>> head = fread(ifile, skip=2, header=True)
>>> head1 = head[0]
>>> # date
- >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
+ >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:],
+ ... mi=dat[4,:])
>>> adate = dec2date(jdate, eng=True)
>>> # colhead
>>> idx = []
@@ -521,7 +577,8 @@ Source code for hesseflux.nee2gpp
>>> # flag
>>> flag = np.where(dfin == undef, 2, 0)
>>> # partition
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='local')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ ... undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
@@ -529,17 +586,20 @@ Source code for hesseflux.nee2gpp
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='local')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ ... undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='Reichstein')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ ... undef=undef, method='Reichstein')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='reichstein')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ ... undef=undef, method='reichstein')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
@@ -550,11 +610,14 @@ Source code for hesseflux.nee2gpp
Modified Arndt Piayda, Mar 2012 - undef=np.nan
Matthias Cuntz, Nov 2012 - individual routine
Matthias Cuntz, Feb 2013 - ported to Python 3
+ Matthias Cuntz, Jun 2024 - removed np.int and np.float
+
"""
# Variables
fc_id = ''
for cc in df.columns:
- if cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or (cc == 'NEE'):
+ if ( cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or
+ (cc == 'NEE') ):
fc_id = cc
break
ta_id = ''
@@ -562,118 +625,133 @@ Source code for hesseflux.nee2gpp
if cc.startswith('TA_') or (cc == 'TA'):
ta_id = cc
break
- assert fc_id, 'Carbon net flux with name FC or NEE or starting with FC_ or NEE_ must be in input.'
- assert ta_id, 'Air temperature with name TA or starting with TA_ must be in input.'
+ assert fc_id, ('Carbon net flux with name FC or NEE or starting with FC_'
+ ' or NEE_ must be in input.')
+ assert ta_id, ('Air temperature with name TA or starting with TA_ must'
+ ' be in input.')
nee = np.ma.array(df[fc_id], mask=(ff[fc_id] > 0))
t = np.ma.array(df[ta_id], mask=(ff[ta_id] > 0))
- misday = np.ma.array(isday, mask=((~np.isfinite(isday)) | (isday == undef)))
+ misday = np.ma.array(isday, mask=((~np.isfinite(isday)) |
+ (isday == undef)))
dates = df.index.to_julian_date()
# Partition - Local relationship = Reichstein et al. (2005)
ndata = nee.size
- GPP = np.ones(ndata)*undef
- Reco = np.ones(ndata)*undef
- dfout = pd.DataFrame({'GPP':GPP, 'RECO':Reco}, index=df.index)
+ GPP = np.ones(ndata) * undef
+ Reco = np.ones(ndata) * undef
+ dfout = pd.DataFrame({'GPP': GPP, 'RECO': Reco}, index=df.index)
# Select valid nighttime
mask = misday | nee.mask | t.mask | misday.mask
ii = np.where(~mask)[0]
- if (ii.size==0):
- # raise ValueError('Error _nee2gpp_reichstein: no valid nighttime data.')
+ if (ii.size == 0):
+ # raise ValueError('Error _nee2gpp_reichstein:'
+ # ' no valid nighttime data.')
print('Warning _nee2gpp_reichstein: no valid nighttime data.')
return dfout
jul = dates[ii]
tt = np.ma.compressed(t[ii])
net = np.ma.compressed(nee[ii])
# 1. each 5 days, in 15 day period, fit if range of T > 5
- locp = [] # local param
- locs = [] # local err
- dmin = np.floor(np.amin(jul)).astype(np.int) # be aware that julian days starts at noon, i.e. 1.0 is 12h
- dmax = np.ceil(np.amax(jul)).astype(np.int) # so the search will be from noon to noon and thus includes all nights
- for i in range(dmin,dmax,5):
- iii = np.where((jul>=i) & (jul<(i+14)))[0]
+ locp = [] # local param
+ locs = [] # local err
+ # be aware that julian days starts at noon, i.e. 1.0 is 12h
+ # so the search will be from noon to noon and thus includes all nights
+ dmin = np.floor(np.amin(jul)).astype(int)
+ dmax = np.ceil(np.amax(jul)).astype(int)
+ for i in range(dmin, dmax, 5):
+ iii = np.where((jul >= i) & (jul < (i + 14)))[0]
niii = iii.size
if niii > 6:
tt1 = tt[iii]
net1 = net[iii]
- mm = ~mad(net1, z=4.5) # make fit more robust by removing outliers
+ # make fit more robust by removing outliers
+ mm = ~mad(net1, z=4.5)
if (np.ptp(tt[iii]) >= 5.) & (np.sum(mm) > 6):
p, temp1, temp2 = opt.fmin_tnc(cost_lloyd_fix, [2., 200.],
- bounds=[[0.,None], [0.,None]],
+ bounds=[[0., None], [0., None]],
args=(tt1[mm], net1[mm]),
approx_grad=True, disp=False)
try:
- p1, c = opt.curve_fit(lloyd_fix, tt1[mm], net1[mm], p0=p, maxfev=10000) # params, covariance
- if np.all(np.isfinite(c)): # possible return of curvefit: c=inf
+ # params, covariance
+ p1, c = opt.curve_fit(lloyd_fix, tt1[mm], net1[mm],
+ p0=p, maxfev=10000)
+ # possible return of curvefit: c=inf
+ if np.all(np.isfinite(c)):
s = np.sqrt(np.diag(c))
else:
- s = 10.*np.abs(p)
+ s = 10. * np.abs(p)
except:
- s = 10.*np.abs(p)
+ s = 10. * np.abs(p)
locp += [p]
locs += [s]
# if ((s[1]/p[1])<0.5) & (p[1] > 0.): pdb.set_trace()
if len(locp) == 0:
- # raise ValueError('Error _nee2gpp_reichstein: No local relationship found.')
+ # raise ValueError('Error _nee2gpp_reichstein:'
+ # ' No local relationship found.')
print('Warning _nee2gpp_reichstein: No local relationship found.')
return dfout
- locp = np.squeeze(np.array(locp).astype(np.float))
- locs = np.squeeze(np.array(locs).astype(np.float))
+ locp = np.squeeze(np.array(locp).astype(float))
+ locs = np.squeeze(np.array(locs).astype(float))
# 2. E0 = avg of best 3
# Reichstein et al. (2005), p. 1430, 1st paragraph.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
- iii = np.where((locp[:,1] > 0.) & (locp[:,1] < 450.) & (np.abs(locs[:,1]/locp[:,1]) < 0.5))[0]
+ iii = np.where((locp[:, 1] > 0.) & (locp[:, 1] < 450.) &
+ (np.abs(locs[:, 1] / locp[:, 1]) < 0.5))[0]
niii = iii.size
- if niii==0:
- # raise ValueError('Error _nee2gpp_reichstein: No good local relationship found.')
+ if niii == 0:
+ # raise ValueError('Error _nee2gpp_reichstein:'
+ # ' No good local relationship found.')
# loosen the criteria: take the best three estimates anyway
- iii = np.where((locp[:,1] > 0.))[0]
+ iii = np.where((locp[:, 1] > 0.))[0]
niii = iii.size
- if niii<1:
+ if niii < 1:
# raise ValueError('Error _nee2gpp_reichstein: No E0>0 found.')
print('Warning _nee2gpp_reichstein: No E0>0 found.')
return dfout
- lp = locp[iii,:]
- ls = locs[iii,:]
- iis = np.argsort(ls[:,1])
- bestp = np.mean(lp[iis[0:np.minimum(3,niii)],:],axis=0)
- bests = np.mean(ls[iis[0:np.minimum(3,niii)],:],axis=0)
- elif niii==1:
- bestp = np.squeeze(locp[iii,:])
- bests = np.squeeze(locs[iii,:])
- elif niii==2:
- bestp = np.mean(locp[iii,:],axis=0)
- bests = np.mean(locs[iii,:],axis=0)
+ lp = locp[iii, :]
+ ls = locs[iii, :]
+ iis = np.argsort(ls[:, 1])
+ bestp = np.mean(lp[iis[0:np.minimum(3, niii)], :], axis=0)
+ bests = np.mean(ls[iis[0:np.minimum(3, niii)], :], axis=0)
+ elif niii == 1:
+ bestp = np.squeeze(locp[iii, :])
+ bests = np.squeeze(locs[iii, :])
+ elif niii == 2:
+ bestp = np.mean(locp[iii, :], axis=0)
+ bests = np.mean(locs[iii, :], axis=0)
# ls = locs[iii,:]
# iis = np.argsort(ls[:,1])
else:
- lp = locp[iii,:]
- ls = locs[iii,:]
- iis = np.argsort(ls[:,1])
- bestp = np.mean(lp[iis[0:3],:],axis=0)
- bests = np.mean(ls[iis[0:3],:],axis=0)
+ lp = locp[iii, :]
+ ls = locs[iii, :]
+ iis = np.argsort(ls[:, 1])
+ bestp = np.mean(lp[iis[0:3], :], axis=0)
+ bests = np.mean(ls[iis[0:3], :], axis=0)
# 3. Refit Rref with fixed E0, each 4 days
- refp = [] # Rref param
- refii = [] # mean index of data points
+ refp = [] # Rref param
+ refii = [] # mean index of data points
E0 = bestp[1]
et = lloyd_fix(tt, 1., E0)
- for i in range(dmin,dmax,4):
- iii = np.where((jul>=i) & (jul<(i+4)))[0]
+ for i in range(dmin, dmax, 4):
+ iii = np.where((jul >= i) & (jul < (i + 4)))[0]
niii = iii.size
if niii > 3:
# Calc directly minisation of (nee-p*et)**2
p, temp1, temp2 = opt.fmin_tnc(cost_abs, [2.],
- bounds=[[0.,None]],
- args=(lloyd_only_rref_p, et[iii], net[iii]),
+ bounds=[[0., None]],
+ args=(lloyd_only_rref_p, et[iii],
+ net[iii]),
approx_grad=True, disp=False)
refp += [p]
- refii += [np.int((iii[0]+iii[-1])//2)]
+ refii += [int((iii[0] + iii[-1]) // 2)]
if len(refp) == 0:
- # raise ValueError('Error _nee2gpp_reichstein: No ref relationship found.')
+ # raise ValueError('Error _nee2gpp_reichstein:'
+ # ' No ref relationship found.')
print('Warning _nee2gpp_reichstein: No ref relationship found.')
return dfout
refp = np.squeeze(np.array(refp))
@@ -683,28 +761,28 @@ Source code for hesseflux.nee2gpp
Rref = np.interp(dates, jul[refii], refp)
# 5. Calc Reco
- Reco = np.ones(ndata)*undef
+ Reco = np.ones(ndata) * undef
ii = np.where(~t.mask)[0]
Reco[ii] = lloyd_fix(t[ii], Rref[ii], E0)
# 6. Calc GPP
- GPP = np.ones(ndata)*undef
+ GPP = np.ones(ndata) * undef
ii = np.where(~(t.mask | nee.mask))[0]
GPP[ii] = Reco[ii] - nee[ii]
# 7. Set GPP=0 at night, if wanted
if nogppnight:
- mask = misday | nee.mask | t.mask | misday.mask # night
+ mask = misday | nee.mask | t.mask | misday.mask # night
ii = np.where(~mask)[0]
Reco[ii] = nee[ii]
GPP[ii] = 0.
# and prohibit negative gpp at any time
- mask = nee.mask | t.mask | (GPP>0.)
+ mask = nee.mask | t.mask | (GPP > 0.)
ii = np.where(~mask)[0]
Reco[ii] -= GPP[ii]
GPP[ii] = 0.
- dfout = pd.DataFrame({'GPP':GPP, 'RECO':Reco}, index=df.index)
+ dfout = pd.DataFrame({'GPP': GPP, 'RECO': Reco}, index=df.index)
return dfout
@@ -723,9 +801,11 @@ Source code for hesseflux.nee2gpp
incoming shortwave radiation, and air vapour pressure deficit.
`df` can be a pandas.Dataframe with the columns
- 'FC' or 'NEE' (or starting with 'FC\_' or 'NEE\_') for observed CO2 flux [umol(CO2) m-2 s-1]
- 'TA' (or starting with 'TA\_') for air temperature [K]
- 'SW_IN' (or starting with 'SW_IN') for incoming short-wave radiation [W m-2]
+ 'FC' or 'NEE' (or starting with 'FC\\_' or 'NEE\\_') for observed
+ CO2 flux [umol(CO2) m-2 s-1]
+ 'TA' (or starting with 'TA\\_') for air temperature [K]
+ 'SW_IN' (or starting with 'SW_IN') for incoming short-wave
+ radiation [W m-2]
'VPD' (or starting with 'VPD') for air vapour deficit [Pa]
The index is taken as date variable.
ff : pandas.Dataframe or numpy.array, optional
@@ -734,11 +814,14 @@ Source code for hesseflux.nee2gpp
`ff` must follow the same rules as `df` if pandas.Dataframe.
isday : array_like of bool, optional
- True when it is day, False when night. Must have the same length as `df.shape[0]`.
+ True when it is day, False when night. Must have the same length
+ as `df.shape[0]`.
undef : float, optional
- values having `undef` value are treated as missing values in `df` (default: -9999)
+ values having `undef` value are treated as missing values in `df`
+ (default: -9999)
nogppnight : float, optional
- GPP will be set to zero at night. RECO will then equal NEE at night (default: False)
+ GPP will be set to zero at night. RECO will then equal NEE at night
+ (default: False)
Returns
-------
@@ -749,8 +832,9 @@ Source code for hesseflux.nee2gpp
References
----------
.. [3] Lasslop et al. (2010)
- Separation of net ecosystem exchange into assimilation and respiration using
- a light response curve approach: critical issues and global evaluation,
+ Separation of net ecosystem exchange into assimilation and respiration
+ using a light response curve approach: critical issues and global
+ evaluation,
Global Change Biology 16, 187-208
Examples
@@ -765,7 +849,8 @@ Source code for hesseflux.nee2gpp
>>> head = fread(ifile, skip=2, header=True)
>>> head1 = head[0]
>>> # date
- >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
+ >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:],
+ mi=dat[4,:])
>>> adate = dec2date(jdate, eng=True)
>>> # colhead
>>> idx = []
@@ -779,7 +864,8 @@ Source code for hesseflux.nee2gpp
>>> # flag
>>> flag = np.where(dfin == undef, 2, 0)
>>> # partition
- >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='day')
+ >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+ undef=undef, method='day')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 2.78457540e+00
6.63212545e+00 8.88902165e+00 6.74243873e+00 9.51364527e+00]
@@ -793,11 +879,14 @@ Source code for hesseflux.nee2gpp
Modified Arndt Piayda, Mar 2012 - undef=np.nan
Matthias Cuntz, Nov 2012 - individual routine
Matthias Cuntz, Feb 2013 - ported to Python 3
+ Matthias Cuntz, Jun 2024 - removed np.int and np.float
+
"""
# Variables
fc_id = ''
for cc in df.columns:
- if cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or (cc == 'NEE'):
+ if ( cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or
+ (cc == 'NEE') ):
fc_id = cc
break
ta_id = ''
@@ -815,24 +904,29 @@ Source code for hesseflux.nee2gpp
if cc.startswith('VPD_') or (cc == 'VPD'):
vpd_id = cc
break
- assert fc_id, 'Carbon net flux with name FC or NEE or starting with FC_ or NEE_ must be in input.'
- assert ta_id, 'Air temperature with name TA or starting with TA_ must be in input.'
- assert sw_id, 'Global radiation with name SW or starting with SW_ must be in input.'
- assert vpd_id, 'Vapour pressure deficit with name VPD or starting with VPD_ must be in input.'
-
- nee = np.ma.array(df[fc_id], mask=(ff[fc_id] > 0))
- t = np.ma.array(df[ta_id], mask=(ff[ta_id] > 0))
- sw = np.ma.array(df[sw_id], mask=(ff[sw_id] > 0))
+ assert fc_id, ('Carbon net flux with name FC or NEE or starting with FC_'
+ ' or NEE_ must be in input.')
+ assert ta_id, ('Air temperature with name TA or starting with TA_ must'
+ ' be in input.')
+ assert sw_id, ('Global radiation with name SW or starting with SW_ must'
+ ' be in input.')
+ assert vpd_id, ('Vapour pressure deficit with name VPD or starting with'
+ ' VPD_ must be in input.')
+
+ nee = np.ma.array(df[fc_id], mask=(ff[fc_id] > 0))
+ t = np.ma.array(df[ta_id], mask=(ff[ta_id] > 0))
+ sw = np.ma.array(df[sw_id], mask=(ff[sw_id] > 0))
vpd = np.ma.array(df[vpd_id], mask=(ff[vpd_id] > 0))
- misday = np.ma.array(isday, mask=((~np.isfinite(isday)) | (isday == undef)))
+ misday = np.ma.array(isday, mask=((~np.isfinite(isday)) |
+ (isday == undef)))
dates = df.index.to_julian_date()
# Partition - Lasslop et al. (2010) method
ndata = nee.size
- GPP = np.ones(ndata)*undef
- Reco = np.ones(ndata)*undef
- dfout = pd.DataFrame({'GPP':GPP, 'RECO':Reco}, index=df.index)
+ GPP = np.ones(ndata) * undef
+ Reco = np.ones(ndata) * undef
+ dfout = pd.DataFrame({'GPP': GPP, 'RECO': Reco}, index=df.index)
do_lgpp = False
mask = nee.mask | t.mask | misday.mask | sw.mask | vpd.mask
@@ -855,7 +949,8 @@ Source code for hesseflux.nee2gpp
aalpha = 0.01
qnet = np.sort(dnet)
nqnet = qnet.size
- abeta0 = np.abs(qnet[np.floor(0.97*nqnet).astype(np.int)]-qnet[np.ceil(0.03*nqnet).astype(np.int)])
+ abeta0 = np.abs(qnet[np.floor(0.97 * nqnet).astype(int)] -
+ qnet[np.ceil(0.03 * nqnet).astype(int)])
ak = 0.
# out
lE0 = []
@@ -865,18 +960,19 @@ Source code for hesseflux.nee2gpp
lk = []
lRref = []
lii = []
- dmin = np.floor(np.amin(dates)).astype(np.int)
- dmax = np.ceil(np.amax(dates)).astype(np.int)
+ dmin = np.floor(np.amin(dates)).astype(int)
+ dmax = np.ceil(np.amax(dates)).astype(int)
zaehl = -1
- for i in range(dmin,dmax,2):
+ for i in range(dmin, dmax, 2):
good = True
# 1. Estimate E0 from nighttime data
- iii = np.squeeze(np.where((njul>=i) & (njul<(i+12))))
+ iii = np.squeeze(np.where((njul >= i) & (njul < (i + 12))))
niii = iii.size
if niii > 3:
p, temp1, temp2 = opt.fmin_tnc(cost_abs, [aRref, 100.],
- bounds=[[0.,None], [0.,None]],
- args=(lloyd_fix_p, ntt[iii], nnet[iii]),
+ bounds=[[0., None], [0., None]],
+ args=(lloyd_fix_p, ntt[iii],
+ nnet[iii]),
approx_grad=True, disp=False)
E0 = np.maximum(p[1], 50.)
else:
@@ -887,7 +983,7 @@ Source code for hesseflux.nee2gpp
good = False
continue
# 2. Estimate alpha, k, beta0, Rref from daytime data
- iii = np.squeeze(np.where((djul>=i) & (djul<(i+4))))
+ iii = np.squeeze(np.where((djul >= i) & (djul < (i + 4))))
niii = iii.size
if niii > 3:
et = lloyd_fix(dtt[iii], 1., E0)
@@ -896,34 +992,37 @@ Source code for hesseflux.nee2gpp
ibeta0 = abeta0
ik = ak
iRref = aRref
- bounds = [[None,None], [None,None], [None,None], [None,None]]
+ bounds = [[None, None], [None, None], [None, None], [None, None]]
while again:
again = False
- p, nfeval, rc = opt.fmin_tnc(cost_lasslop, [ialpha, ibeta0, ik, iRref],
+ p, nfeval, rc = opt.fmin_tnc(cost_lasslop,
+ [ialpha, ibeta0, ik, iRref],
bounds=bounds,
- args=(dsw[iii], et, dvpd[iii], dnet[iii]),
+ args=(dsw[iii], et, dvpd[iii],
+ dnet[iii]),
approx_grad=True, disp=False)
- # if parameters beyond some bounds, set params and redo the optim or skip
- if ((p[0] < 0.) | (p[0] > 0.22)): # alpha
+ # if parameters beyond some bounds, set params and redo
+ # the optim or skip
+ if ((p[0] < 0.) | (p[0] > 0.22)): # alpha
again = True
if zaehl >= 0:
- bounds[0] = [lalpha[zaehl],lalpha[zaehl]]
+ bounds[0] = [lalpha[zaehl], lalpha[zaehl]]
ialpha = lalpha[zaehl]
else:
- bounds[0] = [0.,0.]
+ bounds[0] = [0., 0.]
ialpha = 0.
- if p[1] < 0.: # beta0
- bounds[1] = [0.,0.]
+ if p[1] < 0.: # beta0
+ bounds[1] = [0., 0.]
ibeta0 = 0.
again = True
if p[1] > 250.:
good = False
continue
- if p[2] < 0.: # k
- bounds[2] = [0.,0.]
+ if p[2] < 0.: # k
+ bounds[2] = [0., 0.]
ik = 0.
again = True
- if p[3] < 0: # Rref
+ if p[3] < 0: # Rref
good = False
continue
if good:
@@ -932,7 +1031,7 @@ Source code for hesseflux.nee2gpp
lbeta0 = lbeta0 + [p[1]]
lk = lk + [p[2]]
lRref = lRref + [p[3]]
- lii = lii + [np.int((iii[0]+iii[-1])/2)]
+ lii = lii + [int((iii[0] + iii[-1]) / 2)]
else:
continue
else:
@@ -940,7 +1039,8 @@ Source code for hesseflux.nee2gpp
lE0 = lE0 + [E0]
zaehl += 1
if len(lE0) == 0:
- # raise ValueError('Error _nee2gpp_lasslop: No day relationship found.')
+ # raise ValueError('Error _nee2gpp_lasslop:'
+ # ' No day relationship found.')
print('Warning _nee2gpp_lasslop: No day relationship found.')
return dfout
lE0 = np.squeeze(np.array(lE0))
@@ -956,7 +1056,7 @@ Source code for hesseflux.nee2gpp
Rref = np.interp(dates, djul[lii], lRref)
# 4. Calc Reco
- Reco = np.ones(ndata)*undef
+ Reco = np.ones(ndata) * undef
ii = np.squeeze(np.where(~t.mask))
Reco[ii] = lloyd_fix(t[ii], Rref[ii], E0[ii])
@@ -969,26 +1069,27 @@ Source code for hesseflux.nee2gpp
lmask = t.mask | misday.mask | sw.mask | vpd.mask
ii = np.squeeze(np.where(~lmask))
lgpp = np.zeros(ndata)
- lgpp[ii] = lasslop(sw[ii], et[ii], vpd[ii], alpha[ii], beta0[ii], k[ii], Rref[ii]) - Reco[ii]
+ lgpp[ii] = lasslop(sw[ii], et[ii], vpd[ii], alpha[ii],
+ beta0[ii], k[ii], Rref[ii]) - Reco[ii]
# 6. GPP
- GPP = np.ones(ndata)*undef
+ GPP = np.ones(ndata) * undef
ii = np.squeeze(np.where(~(t.mask | nee.mask)))
GPP[ii] = Reco[ii] - nee[ii]
# 7. Set GPP=0 at night, if wanted
if nogppnight:
- mask = misday | nee.mask | t.mask | misday.mask # night
+ mask = misday | nee.mask | t.mask | misday.mask # night
ii = np.where(~mask)[0]
Reco[ii] = nee[ii]
GPP[ii] = 0.
# and prohibit negative gpp at any time
- mask = nee.mask | t.mask | (GPP>0.)
+ mask = nee.mask | t.mask | (GPP > 0.)
ii = np.where(~mask)[0]
Reco[ii] -= GPP[ii]
GPP[ii] = 0.
- dfout = pd.DataFrame({'GPP':GPP, 'RECO':Reco}, index=df.index)
+ dfout = pd.DataFrame({'GPP': GPP, 'RECO': Reco}, index=df.index)
return dfout
@@ -1103,7 +1204,7 @@ Related Topics
-
+
diff --git a/docs/html/_modules/hesseflux/ustarfilter.html b/docs/html/_modules/hesseflux/ustarfilter.html
index a0705a6..182c5bb 100644
--- a/docs/html/_modules/hesseflux/ustarfilter.html
+++ b/docs/html/_modules/hesseflux/ustarfilter.html
@@ -1,24 +1,24 @@
-
-
+
- hesseflux.ustarfilter — hesseflux 5.1.dev0 documentation
-
-
-
-
-
-
+ hesseflux.ustarfilter — hesseflux 5.1.dev2 documentation
+
+
+
+
+
+
+
+
-
@@ -67,6 +67,8 @@ Source code for hesseflux.ustarfilter
only ustar data when NEE and Ta are valid, Jan 2023, Matthias Cuntz
* Use 90% of ustar if no threshold found also for seasonout,
Jan 2023, Matthias Cuntz
+ * Removed np.float and np.bool, Jun 2024, Matthias Cuntz
+ * do not register pandas platting backend, Jun 2024, Matthias Cuntz
"""
import numpy as np
@@ -76,7 +78,9 @@ Source code for hesseflux.ustarfilter
__all__ = ['ustarfilter']
-[docs]def ustarfilter(dfin, flag=None, isday=None, date=None,
+
+[docs]
+def ustarfilter(dfin, flag=None, isday=None, date=None,
timeformat='%Y-%m-%d %H:%M:%S', colhead=None, ustarmin=0.01,
nboot=1, undef=-9999, plot=False, seasonout=False, nmon=3,
ntaclasses=7, corrcheck=0.5, nustarclasses=20,
@@ -189,7 +193,7 @@ Source code for hesseflux.ustarfilter
estr = ('Length of colhead must be number of columns in input'
' array. len(colhead)=' + str(len(colhead)) +
' shape(input)=(' + str(dfin.shape[0]) + ',' +
- str(dfin.shape[1])+').')
+ str(dfin.shape[1]) + ').')
raise ValueError(estr)
assert date is not None, 'Date must be given if input is numpy arrary.'
df['Datetime'] = pd.to_datetime(date, format=timeformat)
@@ -313,10 +317,10 @@ Source code for hesseflux.ustarfilter
yrmin = df.index.min().year
nyears = yrmax - yrmin + 1
ndays = (df.index.max() - df.index.min()).days + 1
- assert ndays//nyears > 360, 'Full years must be given.'
+ assert ndays // nyears > 360, 'Full years must be given.'
# calculate thresholds
- nperiod = 12//nmon # number of nmon periods per year
+ nperiod = 12 // nmon # number of nmon periods per year
if seasonout:
bustars = np.ones((nboot, nyears, nperiod)) * undef
else:
@@ -349,8 +353,8 @@ Source code for hesseflux.ustarfilter
flag_p = ( (~isday_b) &
(ff_b[fc_id] == 0) & (ff_b[ustar_id] == 0) &
(ff_b[ta_id] == 0) &
- (df_b.index.month > p*nmon) &
- (df_b.index.month <= (p+1)*nmon) )
+ (df_b.index.month > p * nmon) &
+ (df_b.index.month <= (p + 1) * nmon) )
fc_p = df_b.loc[flag_p, fc_id]
ustar_p = df_b.loc[flag_p, ustar_id]
ta_p = df_b.loc[flag_p, ta_id]
@@ -361,11 +365,11 @@ Source code for hesseflux.ustarfilter
continue
ta_q = np.quantile(
ta_p,
- np.arange(ntaclasses + 1, dtype=np.float) /
- np.float(ntaclasses))
+ np.arange(ntaclasses + 1, dtype=float) /
+ float(ntaclasses))
ta_q[0] -= 0.1 # 1st include min
for t in range(ntaclasses):
- iita = (ta_p > ta_q[t]) & (ta_p <= ta_q[t+1])
+ iita = (ta_p > ta_q[t]) & (ta_p <= ta_q[t + 1])
fc_t = fc_p[iita]
ustar_t = ustar_p[iita]
ta_t = ta_p[iita]
@@ -378,17 +382,17 @@ Source code for hesseflux.ustarfilter
# ustar classes
ustar_q = np.quantile(
ustar_t,
- np.arange(nustarclasses + 1, dtype=np.float) /
- np.float(nustarclasses))
+ np.arange(nustarclasses + 1, dtype=float) /
+ float(nustarclasses))
ustar_q[0] -= 0.01 # 1st include min
- for u in range(nustarclasses-1):
+ for u in range(nustarclasses - 1):
iiustar = ((ustar_t > ustar_q[u]) &
- (ustar_t <= ustar_q[u+1]))
+ (ustar_t <= ustar_q[u + 1]))
fc_u = fc_t[iiustar]
- fc_a = fc_t[ustar_t > ustar_q[u+1]]
+ fc_a = fc_t[ustar_t > ustar_q[u + 1]]
- if abs(fc_u.mean()) >= abs(plateaucrit*fc_a.mean()):
- custars.append(ustar_q[u+1])
+ if abs(fc_u.mean()) >= abs(plateaucrit * fc_a.mean()):
+ custars.append(ustar_q[u + 1])
break
# median of thresholds of all temperature classes =
@@ -423,7 +427,8 @@ Source code for hesseflux.ustarfilter
else:
flag_b = ( (~isday_b) &
(ff_b[ustar_id] == 0) )
- bustars[b, y] = np.quantile(df_b.loc[flag_b, ustar_id], 0.9)
+ bustars[b, y] = np.quantile(df_b.loc[flag_b, ustar_id],
+ 0.9)
# set minimum ustar threshold
bustars = np.maximum(bustars, ustarmin)
@@ -434,14 +439,14 @@ Source code for hesseflux.ustarfilter
# flag out with original DatetimeIndex
off = ustar_in.astype(int)
off[:] = 0
- ii = np.zeros(len(off), dtype=np.bool)
+ ii = np.zeros(len(off), dtype=bool)
if seasonout:
for y in range(nyears):
yy = yrmin + y
for p in range(nperiod):
iiyr = ( (df.index.year == yy) & # df DatetimeIndex
- (df.index.month > p*nmon) &
- (df.index.month <= (p+1)*nmon) )
+ (df.index.month > p * nmon) &
+ (df.index.month <= (p + 1) * nmon) )
ii[iiyr] = ustar_in[iiyr] < oustars[1, y, p]
else:
for y in range(nyears):
@@ -451,11 +456,14 @@ Source code for hesseflux.ustarfilter
off[ii] = 2 # original DatetimeIndex
if plot:
+ import matplotlib as mpl
+ mpl.use('PDF') # set directly after import matplotlib
+ from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
- import matplotlib.backends.backend_pdf as pdf
- pd.plotting.register_matplotlib_converters()
+ # import matplotlib.backends.backend_pdf as PdfPages
+ # pd.plotting.register_matplotlib_converters()
- pp = pdf.PdfPages('ustarfilter.pdf')
+ pp = PdfPages('ustarfilter.pdf')
if seasonout:
for y in range(nyears):
yy = yrmin + y
@@ -470,8 +478,8 @@ Source code for hesseflux.ustarfilter
flag_p = ( (~isday_f) &
(ff_f[fc_id] == 0) & (ff_f[ustar_id] == 0) &
(ff_f[ta_id] == 0) &
- (df_f.index.month > p*nmon) &
- (df_f.index.month <= (p+1)*nmon) )
+ (df_f.index.month > p * nmon) &
+ (df_f.index.month <= (p + 1) * nmon) )
fc_p = df_f.loc[flag_p, fc_id]
ustar_p = df_f.loc[flag_p, ustar_id]
@@ -522,6 +530,7 @@ Source code for hesseflux.ustarfilter
return oustars, off
+
if __name__ == '__main__':
import doctest
doctest.testmod()
@@ -573,7 +582,7 @@ Related Topics
-
+
diff --git a/docs/html/_modules/index.html b/docs/html/_modules/index.html
index 37ef691..89c1611 100644
--- a/docs/html/_modules/index.html
+++ b/docs/html/_modules/index.html
@@ -1,24 +1,24 @@
-
-
+
- Overview: module code — hesseflux 5.1.dev0 documentation
-
-
-
-
-
-
+ Overview: module code — hesseflux 5.1.dev2 documentation
+
+
+
+
+
+
+
+
-
@@ -81,7 +81,7 @@ Related Topics
-
+
diff --git a/docs/html/_sources/contents.rst.txt b/docs/html/_sources/contents.rst.txt
index 6a7737a..cd4301c 100644
--- a/docs/html/_sources/contents.rst.txt
+++ b/docs/html/_sources/contents.rst.txt
@@ -11,3 +11,5 @@ Contents
api
changelog
authors
+..
+ reddyproc
diff --git a/docs/html/_sources/userguide.rst.txt b/docs/html/_sources/userguide.rst.txt
index d328c0f..1033a16 100644
--- a/docs/html/_sources/userguide.rst.txt
+++ b/docs/html/_sources/userguide.rst.txt
@@ -2,34 +2,37 @@
User Guide
----------
-``hesseflux`` collects functions used for processing Eddy covariance data of the
-ICOS_ ecosystem site FR-Hes_.
+``hesseflux`` collects functions used for processing Eddy covariance
+data of the ICOS_ ecosystem site FR-Hes_.
-The post-processing functionality for Eddy flux data is similar to the R-package
-REddyProc_ and includes basically the steps described in `Papale et al.
-(Biogeosciences, 2006)`_ plus some extensions such as the daytime method of flux
-partitioning (`Lasslop et al., Global Change Biology 2010`_) and the estimation
-of uncertainties on the fluxes as in `Lasslop et al. (Biogeosci, 2008)`_.
+The post-processing functionality for Eddy flux data is similar to the
+R-package REddyProc_ and includes basically the steps described in
+`Papale et al. (Biogeosciences, 2006)`_ plus some extensions such as
+the daytime method of flux partitioning (`Lasslop et al., Global
+Change Biology 2010`_) and the estimation of uncertainties on the
+fluxes as in `Lasslop et al. (Biogeosci, 2008)`_.
-Only the post-processing steps are described here. We are happy to discuss any
-processing or post-processing directly. Contact us at mc (at) macu (dot) de.
+Only the post-processing steps are described here. We are happy to
+discuss any processing or post-processing directly. Contact us at mc
+(at) macu (dot) de.
europe-fluxdata.eu file format
==============================
-The first processing steps at the ICOS ecosystem site FR-Hes (not shown) brings
-the data in a format that can be submitted to the database
-`europe-fluxdata.eu`_. The database predates ICOS and is somewhat a precursor of
-the ICOS data processing.
+The first processing steps at the ICOS ecosystem site FR-Hes (not
+shown) brings the data in a format that can be submitted to the
+database `europe-fluxdata.eu`_. The database predates ICOS and is
+somewhat a precursor of the ICOS data processing.
-The file format of `europe-fluxdata.eu` is hence very similar to the ICOS
-format. The only known difference to us is the unit of atmospheric pressure,
-which is in hPa in `europe-fluxdata.eu`_ and in kPa in `ICOS ecosystems`_. The
-file format has notably one header line with variable names. There are no units
-in the file. ``hesseflux`` provides a little helper script
-`europe-fluxdata_units.py` in the `bin` directory that adds a second header line
-with units. The script can be run on the output as:
+The file format of `europe-fluxdata.eu` is hence very similar to the
+ICOS format. The only known difference to us is the unit of
+atmospheric pressure, which is in hPa in `europe-fluxdata.eu`_ and in
+kPa in `ICOS ecosystems`_. The file format has notably one header line
+with variable names. There are no units in the file. ``hesseflux``
+provides a little helper script `europe-fluxdata_units.py` in the
+`bin` directory that adds a second header line with units. The script
+can be run on the output as:
.. code-block:: bash
@@ -39,20 +42,21 @@ with units. The script can be run on the output as:
Post-processing Eddy covariance data
====================================
-The script `postproc_europe-fluxdata.py` in the `example` directory provides a
-template for post-processing data that is in the `europe-fluxdata.eu`_ file
-format. It basically makes all steps described in `Papale et al.
-(Biogeosciences, 2006)`_. The script is governed by a configuration file in
-Python's standard :mod:`configparser` format. The example configuration file
-`hesseflux_example.cfg` in the `example` directory is highly commented and
-should be (almost) self-explanatory. The script is called like:
+The script `postproc_europe-fluxdata.py` in the `example` directory
+provides a template for post-processing data that is in the
+`europe-fluxdata.eu`_ file format. It basically makes all steps
+described in `Papale et al. (Biogeosciences, 2006)`_. The script is
+governed by a configuration file in Python's standard
+:mod:`configparser` format. The example configuration file
+`hesseflux_example.cfg` in the `example` directory is highly commented
+and should be (almost) self-explanatory. The script is called like:
.. code-block:: bash
python postproc_europe-fluxdata.py hesseflux_example.cfg
-This script should be taken as a template for one's own post-processing but
-includes most standard post-processing steps.
+This script should be taken as a template for one's own
+post-processing but includes most standard post-processing steps.
Here we describe the main parts of the post-processing script.
@@ -60,8 +64,8 @@ Here we describe the main parts of the post-processing script.
Reading the configuration file
------------------------------
-The script `postproc_europe-fluxdata.py` starts by reading the configuration
-file `hesseflux_example.cfg`:
+The script `postproc_europe-fluxdata.py` starts by reading the
+configuration file `hesseflux_example.cfg`:
.. code-block:: python
@@ -74,8 +78,9 @@ file `hesseflux_example.cfg`:
config.read(configfile)
It then analyses the configuration options. The first section in the
-configuration file are the options controlling which steps shall be performed by
-the script. The section in the `hesseflux_example.cfg` looks like:
+configuration file are the options controlling which steps shall be
+performed by the script. The section in the `hesseflux_example.cfg`
+looks like:
.. code-block:: python
@@ -107,15 +112,17 @@ And the code in `postproc_europe-fluxdata.py` is:
fill = config['POSTSWITCH'].getboolean('fill', True)
fluxerr = config['POSTSWITCH'].getboolean('fluxerr', True)
-All options are boolean and set to `True` by default if they are not given in
-the configuration file. All post-processing steps except uncertainty estimation
-of flux data would be performed in the given example.
+All options are boolean and set to `True` by default if they are not
+given in the configuration file. All post-processing steps except
+uncertainty estimation of flux data would be performed in the given
+example.
Read the data
-------------
-The script would then read in the data. The section in the configuration file is:
+The script would then read in the data. The section in the
+configuration file is:
.. code-block:: python
@@ -159,9 +166,10 @@ The analysis of the options in `postproc_europe-fluxdata.py` is:
Note that strings are given without quotes in the configuration file.
-`inputfile` can be a single filename or a comma-separated list of filenames. If
-it is missing or empty, the script will try to open a GUI, where one can choose
-input files. The data will be appended if several input files are given.
+`inputfile` can be a single filename or a comma-separated list of
+filenames. If it is missing or empty, the script will try to open a
+GUI, where one can choose input files. The data will be appended if
+several input files are given.
The (first) input file is read as:
@@ -169,36 +177,37 @@ The (first) input file is read as:
import pandas as pd
- parser = lambda date: pd.to_datetime(date, format=timeformat)
infile = inputfile[0]
df = pd.read_csv(infile, sep, skiprows=skiprows, parse_dates=[0],
- date_parser=parser, index_col=0, header=0)
-
-:mod:`pandas` will use the first column as index (`index_col=0`), assuming that
-these are dates (`parse_dates=[0]`) in the format `timeformat`, where columns
-are separated by `sep`. The defaults follow the `europe-fluxdata.eu`_ format but
-similar formats may be used, and script and/or configuration file can be adapted
-easily. Only variable names (still) have to follow `europe-fluxdata.eu`_,
-`ICOS`_ or `Ameriflux`_ format at the moment. If the input file has a second
+ date_format=timeformat, index_col=0, header=0)
+
+:mod:`pandas` will use the first column as index (`index_col=0`),
+assuming that these are dates (`parse_dates=[0]`) in the format
+`timeformat`, where columns are separated by `sep`. The defaults
+follow the `europe-fluxdata.eu`_ format but similar formats may be
+used, and script and/or configuration file can be adapted easily. Only
+variable names (still) have to follow `europe-fluxdata.eu`_, `ICOS`_
+or `Ameriflux`_ format at the moment. If the input file has a second
header line with units, one can skip it giving `skiprows=[1]` (not
`skiprows=1`).
-All input files are supposed to be in the same format if `inputfile` is a
-comma-separated list of filenames, and they will be read with the same command
-above. The :mod:`pandas` dataframes (`df`) will simply be appended.
+All input files are supposed to be in the same format if `inputfile`
+is a comma-separated list of filenames, and they will be read with the
+same command above. The :mod:`pandas` dataframes (`df`) will simply be
+appended.
The flag dataframe
------------------
-All Not-a-Number (NaN) values will be set to `undef` and will be ignored in the
-following.
+All Not-a-Number (NaN) values will be set to `undef` and will be
+ignored in the following.
-This happens via a second dataframe (`dff`), having the same columns and index
-as the input dataframe `df`, representing quality flags. All cells that have a
-value other than `0` in the flag dataframe `dff` will be ignored in the
-dataframe `df`. This means all cells of `df` with `undef` will be set to `2` in
-`dff` immediately:
+This happens via a second dataframe (`dff`), having the same columns
+and index as the input dataframe `df`, representing quality flags. All
+cells that have a value other than `0` in the flag dataframe `dff`
+will be ignored in the dataframe `df`. This means all cells of `df`
+with `undef` will be set to `2` in `dff` immediately:
.. code-block:: python
@@ -214,12 +223,13 @@ dataframe `df`. This means all cells of `df` with `undef` will be set to `2` in
Day / night
-----------
-Most post-processing routines differentiate between daytime and nighttime data.
-`Papale et al. (Biogeosciences, 2006)`_ use a threshold of 20 W m\ :sup:`-2` of
-global radiation to distinguish between day and night. `REddyProc`_ uses
-incoming shortwave radiation greater than 10 W m\ :sup:`2` as daytime. The
-shortwave radiation threshold `swthr` (same name as in ReddyProc) can be used to
-define the appropriate threshold. The default is 10 W m\ :sup:`2`. The column
+Most post-processing routines differentiate between daytime and
+nighttime data. `Papale et al. (Biogeosciences, 2006)`_ use a
+threshold of 20 W m\ :sup:`-2` of global radiation to distinguish
+between day and night. `REddyProc`_ uses incoming shortwave radiation
+greater than 10 W m\ :sup:`2` as daytime. The shortwave radiation
+threshold `swthr` (same name as in ReddyProc) can be used to define
+the appropriate threshold. The default is 10 W m\ :sup:`2`. The column
`SW_IN_1_1_1` has to exist in the input data.
.. code-block:: python
@@ -231,8 +241,8 @@ define the appropriate threshold. The default is 10 W m\ :sup:`2`. The column
Data check
----------
-`postproc_europe-fluxdata.py` checks the units of air temperature (i.e. the
-first column starting with `TA_`).
+`postproc_europe-fluxdata.py` checks the units of air temperature
+(i.e. the first column starting with `TA_`).
.. code-block:: python
@@ -245,15 +255,17 @@ first column starting with `TA_`).
tkelvin = 0.
df.loc[dff[hout[0]]==0, hout[0]] += tkelvin
-:func:`_findfirststart(starts, names)` is a helper function that finds the first
-occurrence in `names` that starts with the string `starts`. This helper function
-is used for the moment until ``hesseflux`` has the functionality that the user
-can give individual variable names.
+:func:`_findfirststart(starts, names)` is a helper function that finds
+the first occurrence in `names` that starts with the string
+`starts`. This helper function is used for the moment until
+``hesseflux`` has the functionality that the user can give individual
+variable names.
-The script calculates air vapour pressure deficit `VPD_PI_1_1_1` from air
-temperature and relative humidity (i.e. the first column starting with `RH_`) if
-not given in input data using the function :func:`esat` of `pyjams`_ for
-saturation vapour pressure:
+The script calculates air vapour pressure deficit `VPD_PI_1_1_1` from
+air temperature and relative humidity (i.e. the first column starting
+with `RH_`) if not given in input data using the function
+:func:`esat` of `pyjams`_ for saturation vapour
+pressure:
.. code-block:: python
@@ -303,8 +315,8 @@ It further assures that VPD is in Pa for further calculations.
vpdpa = 1. # Pa
df.loc[dff[hout[0]] == 0, hout[0]] *= vpdpa
-And finally determines the time intervals of the input data `dtsec` (s) and the
-number of time steps per day `ntday`.
+And finally determines the time intervals of the input data
+`dtsec` (s) and the number of time steps per day `ntday`.
.. code-block:: python
@@ -316,10 +328,11 @@ number of time steps per day `ntday`.
Spike / outlier flagging
------------------------
-If `outlier=True` is set in the configuration file, spikes will be detected with
-the method given in `Papale et al. (Biogeosciences, 2006)`_. A median absolute
-deviation (MAD) filter will be used on the second derivatives of the time series
-in two-week chunks. The section in `hesseflux_example.cfg` looks like:
+If `outlier=True` is set in the configuration file, spikes will be
+detected with the method given in `Papale et al. (Biogeosciences,
+2006)`_. A median absolute deviation (MAD) filter will be used on the
+second derivatives of the time series in two-week chunks. The section
+in `hesseflux_example.cfg` looks like:
.. code-block:: python
@@ -338,29 +351,31 @@ in two-week chunks. The section in `hesseflux_example.cfg` looks like:
# int
deriv = 2
-`nfill` is the number of days that are treated at once. `nfill=1` means that the
-time series will be stepped through day by day. `nscan` are the days to be
-considered when calculating the mean absolute deviations. `nscan=15` means that
-7 days before the fill day, the fill day itself, and 7 days after the fill day
-will be used for the robust statistic. However, only spikes detected within the
-inner `nfill` days will be flagged in the `nscan` days. Spikes will be detected
-if they deviate more than `z` mean absolute deviations from the median.
-
-For example, `nfill=3`, `nscan=15`, and `z=7` means that the time series will be
-treated in steps of 3 days. Each 3 days, MAD statistics will be calculated using
-15 days around the middle of the 3 days. Then all values within the 3 days that
-deviate more 7 mean absolute deviations from the median of the 15 days will be
-flagged.
-
-`deriv=2` applies the MAD filter to the second derivatives. A spike has
-normally a strong curvature and hence a large second derivative. `deriv=1` is
-currently not implemented. `deriv=0` applies the filter to the raw time series.
-This might be useful to find outliers in smooth time series such as soil
-moisture. `deriv=0` is also used on the 20 Hz Eddy raw data in the quality and
-uncertainty strategy of `Mauder et al. (Agric Forest Meteo, 2013)`_.
-
-The default values, if options are not given in the configuration file, are
-`nscan=15`, `nfill=1`, `z=7`, and `deriv=2`.
+`nfill` is the number of days that are treated at once. `nfill=1`
+means that the time series will be stepped through day by day. `nscan`
+are the days to be considered when calculating the mean absolute
+deviations. `nscan=15` means that 7 days before the fill day, the fill
+day itself, and 7 days after the fill day will be used for the robust
+statistic. However, only spikes detected within the inner `nfill` days
+will be flagged in the `nscan` days. Spikes will be detected if they
+deviate more than `z` mean absolute deviations from the median.
+
+For example, `nfill=3`, `nscan=15`, and `z=7` means that the time
+series will be treated in steps of 3 days. Each 3 days, MAD statistics
+will be calculated using 15 days around the middle of the 3 days. Then
+all values within the 3 days that deviate more 7 mean absolute
+deviations from the median of the 15 days will be flagged.
+
+`deriv=2` applies the MAD filter to the second derivatives. A spike
+has normally a strong curvature and hence a large second
+derivative. `deriv=1` is currently not implemented. `deriv=0` applies
+the filter to the raw time series. This might be useful to find
+outliers in smooth time series such as soil moisture. `deriv=0` is
+also used on the 20 Hz Eddy raw data in the quality and uncertainty
+strategy of `Mauder et al. (Agric Forest Meteo, 2013)`_.
+
+The default values, if options are not given in the configuration
+file, are `nscan=15`, `nfill=1`, `z=7`, and `deriv=2`.
`postproc_europe-fluxdata.py` calls the spike detection like this:
@@ -378,17 +393,17 @@ The default values, if options are not given in the configuration file, are
for ii, hh in enumerate(hout):
dff.loc[sflag[hh] == 2, hh] = 3
-The function :func:`madspikes` returns flag columns for the input variables
-where spiked data is flagged as 2. The scripts sets the corresponding columns in
-the flag dataframe `dff` to 3 (3 is used just to keep track where the flag was
-set).
+The function :func:`~hesseflux.madspikes.madspikes` returns flag
+columns for the input variables where spiked data is flagged as 2. The
+scripts sets the corresponding columns in the flag dataframe `dff` to
+3 (3 is used just to keep track where the flag was set).
u* filtering
------------
-If `ustar=True` is set in the configuration file, a u*-filter will be applied
-following `Papale et al. (Biogeosciences, 2006)`_.
+If `ustar=True` is set in the configuration file, a u*-filter will be
+applied following `Papale et al. (Biogeosciences, 2006)`_.
The section in `hesseflux_example.cfg` looks like:
@@ -416,24 +431,27 @@ The section in `hesseflux_example.cfg` looks like:
# bool
applyustarflag = True
-A minimum threshold `ustarmin` is defined under which data is flagged by
-default. `Papale et al. (Biogeosciences, 2006)`_ suggest 0.1 for forests and
-0.01 for other land cover types. `postproc_europe-fluxdata.py` sets 0.01 as its
-default value. Uncertainty of the u* threshold is calculated via bootstrapping
-in Papale et al. `nboot` gives the number of bootstrapping for the uncertainty
-estimate of the u* threshold. The algorithm divides the input data in 6
-temperature classes and 20 u* classes within each temperature class per season.
-It then determines the threshold for each season as the average u* of the u*
-class where the average CO2 flux is less than `plateaucrit` times the average of
-all CO2 fluxes with u* greater than the u* class. `Papale et al.
-(Biogeosciences, 2006)`_ took 6 temperature classes and `plateaucrit=0.99`,
-while `REddyProc`_ takes 7 temperature classes and `plateaucrit=0.95`, which are
-also the defaults in ``hesseflux``. `Papale et al. (Biogeosciences, 2006)`_ also
-used the maximum of the four seasonal u* thresholds as the threshold applied to
-all the year. If `seasonout=True`, the seasonal u* thresholds will be applied
-instead of the maximum of four seasonal u* thresholds. One can also set
-`applyustarflag=False` to just calculate the u* thresholds without applying them
-to experiment with different parameter values.
+A minimum threshold `ustarmin` is defined under which data is flagged
+by default. `Papale et al. (Biogeosciences, 2006)`_ suggest 0.1 for
+forests and 0.01 for other land cover
+types. `postproc_europe-fluxdata.py` sets 0.01 as its default
+value. Uncertainty of the u* threshold is calculated via bootstrapping
+in Papale et al. `nboot` gives the number of bootstrapping for the
+uncertainty estimate of the u* threshold. The algorithm divides the
+input data in 6 temperature classes and 20 u* classes within each
+temperature class per season. It then determines the threshold for
+each season as the average u* of the u* class where the average CO2
+flux is less than `plateaucrit` times the average of all CO2 fluxes
+with u* greater than the u* class. `Papale et al. (Biogeosciences,
+2006)`_ took 6 temperature classes and `plateaucrit=0.99`, while
+`REddyProc`_ takes 7 temperature classes and `plateaucrit=0.95`, which
+are also the defaults in ``hesseflux``. `Papale et
+al. (Biogeosciences, 2006)`_ also used the maximum of the four
+seasonal u* thresholds as the threshold applied to all the year. If
+`seasonout=True`, the seasonal u* thresholds will be applied instead
+of the maximum of four seasonal u* thresholds. One can also set
+`applyustarflag=False` to just calculate the u* thresholds without
+applying them to experiment with different parameter values.
The u*-filtering is then performed as:
@@ -469,16 +487,17 @@ The u*-filtering is then performed as:
for ii, hh in enumerate(hout):
dff.loc[flag == 2, hh] = 5
-The function :func:`ustarfilter` returns the u* 5, 50 and 95 percentiles of the
-bootstrapped u* thresholds as well as flag columns, which is 0 except where u*
-is smaller than the median u*-threshold. The scripts sets the columns of the
-Eddy fluxes in the flag dataframe `dff` to 5 (5 to keep track where the flag was
-set).
+The function :func:`~hesseflux.ustarfilter.ustarfilter` returns the u*
+5, 50 and 95 percentiles of the bootstrapped u* thresholds as well as
+flag columns, which is 0 except where u* is smaller than the median
+u*-threshold. The scripts sets the columns of the Eddy fluxes in the
+flag dataframe `dff` to 5 (5 to keep track where the flag was set).
-One might not want to do u* filtering, but use for example Integral Turbulence
-Characteristics (ITC) that were calculated, for example, with
-`EddyPro`_\ :sup:`(R)`. These should be set right at the start after reading the input
-data into the dataframe `df` and producing the flag dataframe `dff` like:
+One might not want to do u* filtering, but use for example Integral
+Turbulence Characteristics (ITC) that were calculated, for example,
+with `EddyPro`_\ :sup:`(R)`. These should be set right at the start
+after reading the input data into the dataframe `df` and producing the
+flag dataframe `dff` like:
.. code-block:: python
@@ -488,12 +507,13 @@ data into the dataframe `df` and producing the flag dataframe `dff` like:
Partitioning of Net Ecosystem Exchange
--------------------------------------
-If `partition=True` is set in the configuration file, two estimates of Gross
-Primary Productivity (GPP) and Ecosystem Respiration (RECO) are calculated:
-firstly with the method of `Reichstein et al. (Glob Change Biolo, 2005)`_ using
-nighttime data only, and secondly with the method of `Lasslop et al. (Glob
-Change Biolo, 2010)`_ using a light-response curve on 'daytime' data. The
-configuration `hesseflux_example.cfg` gives only one option in this section:
+If `partition=True` is set in the configuration file, two estimates of
+Gross Primary Productivity (GPP) and Ecosystem Respiration (RECO) are
+calculated: firstly with the method of `Reichstein et al. (Glob Change
+Biolo, 2005)`_ using nighttime data only, and secondly with the method
+of `Lasslop et al. (Glob Change Biolo, 2010)`_ using a light-response
+curve on 'daytime' data. The configuration `hesseflux_example.cfg`
+gives only one option in this section:
.. code-block:: python
@@ -503,11 +523,11 @@ configuration `hesseflux_example.cfg` gives only one option in this section:
# bool
nogppnight = False
-Many people find it unaesthetic that the 'daytime' method gives negative GPP at
-night. We esteem this the correct behaviour, reflecting the uncertainty in the
-gross flux estimates. However, one can set `nogppnight=True` to set GPP=0 at
-night and RECO=NEE in this case, the latter having then all variability of the
-net fluxes.
+Many people find it unaesthetic that the 'daytime' method gives
+negative GPP at night. We esteem this the correct behaviour,
+reflecting the uncertainty in the gross flux estimates. However, one
+can set `nogppnight=True` to set GPP=0 at night and RECO=NEE in this
+case, the latter having then all variability of the net fluxes.
The partitioning is calculated as:
@@ -550,10 +570,11 @@ The partitioning is calculated as:
Gap-filling / Imputation
------------------------
-Marginal Distribution Sampling (MDS) of `Reichstein et al. (Glob Change Biolo,
-2005)`_ is implemented as imputation or so-called gap-filling algorithm. The
-algorithm looks for similar conditions in the vicinity of a missing data point,
-if option `fill=True`. The configuration file is:
+Marginal Distribution Sampling (MDS) of `Reichstein et al. (Glob
+Change Biolo, 2005)`_ is implemented as imputation or so-called
+gap-filling algorithm. The algorithm looks for similar conditions in
+the vicinity of a missing data point, if option `fill=True`. The
+configuration file is:
.. code-block:: python
@@ -571,12 +592,13 @@ if option `fill=True`. The configuration file is:
# avoid extrapolation in gaps longer than longgap days
longgap = 60
-If a flux data point is missing, times with incoming shortwave radiation in the
-range of `sw_dev` around the actual shortwave radiation will be looked for, as
-well as air temperatures within `ta_dev` and air vapour pressure deficit within
-`vpd_dev`. The mean of flux values at the similar conditions is then taken as
-fill value. The function does not fill long gaps longer than `longgap` days. A
-good summary is given in Fig. A1 of `Reichstein et al. (Glob Change Biolo,
+If a flux data point is missing, times with incoming shortwave
+radiation in the range of `sw_dev` around the actual shortwave
+radiation will be looked for, as well as air temperatures within
+`ta_dev` and air vapour pressure deficit within `vpd_dev`. The mean of
+flux values at the similar conditions is then taken as fill value. The
+function does not fill long gaps longer than `longgap` days. A good
+summary is given in Fig. A1 of `Reichstein et al. (Glob Change Biolo,
2005)`_.
The script invokes MDS as:
@@ -613,22 +635,25 @@ The script invokes MDS as:
df = pd.concat([df, df_f], axis=1)
dff = pd.concat([dff, dff_f], axis=1)
-The function :func:`gapfill` returns the filled columns `df_f` as well as flag
-columns `dff_f` indicating fill quality. Fill quality A-C of `Reichstein et al.
-(Glob Change Biolo, 2005)`_ are translated to quality flags 1-3.
+The function :func:`~hesseflux.gapfill.gapfill` returns the filled
+columns `df_f` as well as flag columns `dff_f` indicating fill
+quality. Fill quality A-C of `Reichstein et al. (Glob Change Biolo,
+2005)`_ are translated to quality flags 1-3.
Uncertainty estimates of flux data
----------------------------------
`Lasslop et al. (Biogeosci, 2008)`_ presented an algorithm to estimate
-uncertainties of Eddy covariance fluxes using Marginal Distribution Sampling
-(MDS). The gap-filling function :func:`gapfill` can be used for uncertainty
-estimation giving the keyword `err=True`. The same thresholds as for gap-filling
-are used.
+uncertainties of Eddy covariance fluxes using Marginal Distribution
+Sampling (MDS). The gap-filling function
+:func:`~hesseflux.gapfill.gapfill` can be used for uncertainty
+estimation giving the keyword `err=True`. The same thresholds as for
+gap-filling are used.
-The script `postproc_europe-fluxdata.py` uses the function :func:`gapfill` to
-calculate flux uncertainties like:
+The script `postproc_europe-fluxdata.py` uses the function
+:func:`~hesseflux.gapfill.gapfill` to calculate flux uncertainties
+like:
.. code-block:: python
@@ -667,17 +692,17 @@ calculate flux uncertainties like:
for cc in range(len(colin)):
dff[colout[cc]] = dff[colin[cc]]
-We recommend, however, to calculate flux uncertainties with the Eddy covariance
-raw data as described in `Mauder et al. (Agric Forest Meteo, 2013)`_. This is
-for example implemented in the processing softwares `EddyPro`_\ :sup:`(R)` or
-`TK3`_.
+We recommend, however, to calculate flux uncertainties with the Eddy
+covariance raw data as described in `Mauder et al. (Agric Forest
+Meteo, 2013)`_. This is for example implemented in the processing
+softwares `EddyPro`_\ :sup:`(R)` or `TK3`_.
Writing the output file
-----------------------
The dataframe is written to the output file with :mod:`pandas`
-:func:`pandas.Dataframe.to_csv`:
+:func:`to_csv` method:
.. code-block:: python
@@ -702,13 +727,15 @@ The configuration for output is:
# bool
outflagcols = False
-If `outputfile` is missing or empty, the script will try to open a GUI, where
-one can choose an output directory and the filename will then be name of the
-configuration file with the suffix '.csv'. If `outundef=True` then all values in
-`df` with a flag value in `dff` greater than zero will be set to `undef`. The
-script can also add flag columns, prefixed with `flag_`, for each column in
-`df`, if `outflagcols=True`. The script will always output the columns with the
-flags for fill quality if gap-filling was performed: option `fill=True`.
+If `outputfile` is missing or empty, the script will try to open a
+GUI, where one can choose an output directory and the filename will
+then be name of the configuration file with the suffix '.csv'. If
+`outundef=True` then all values in `df` with a flag value in `dff`
+greater than zero will be set to `undef`. The script can also add flag
+columns, prefixed with `flag_`, for each column in `df`, if
+`outflagcols=True`. The script will always output the columns with the
+flags for fill quality if gap-filling was performed: option
+`fill=True`.
The whole code to write the output file is:
@@ -724,7 +751,7 @@ The whole code to write the output file is:
if outundef:
for cc in df.columns:
if cc.split('_')[-4] != 'f': # exclude gap-filled columns
- df[cc].where(dff[cc] == 0, other=undef, inplace=True)
+ df[cc] = df[cc].where(dff[cc] == 0, other=undef)
if outflagcols:
def _add_flag(c):
return 'flag_' + c
diff --git a/docs/html/_static/alabaster.css b/docs/html/_static/alabaster.css
index 517d0b2..e3174bf 100644
--- a/docs/html/_static/alabaster.css
+++ b/docs/html/_static/alabaster.css
@@ -69,6 +69,11 @@ div.relations {
}
+div.sphinxsidebar {
+ max-height: 100%;
+ overflow-y: auto;
+}
+
div.sphinxsidebar a {
color: #444;
text-decoration: none;
@@ -155,6 +160,14 @@ div.sphinxsidebar input {
font-size: 1em;
}
+div.sphinxsidebar #searchbox input[type="text"] {
+ width: 160px;
+}
+
+div.sphinxsidebar .search > div {
+ display: table-cell;
+}
+
div.sphinxsidebar hr {
border: none;
height: 1px;
@@ -638,15 +651,7 @@ a:hover tt, a:hover code {
display: none!important;
}
-/* Make nested-list/multi-paragraph items look better in Releases changelog
- * pages. Without this, docutils' magical list fuckery causes inconsistent
- * formatting between different release sub-lists.
- */
-div#changelog > div.section > ul > li > p:only-child {
- margin-bottom: 0;
-}
-
-/* Hide fugly table cell borders in ..bibliography:: directive output */
+/* Hide ugly table cell borders in ..bibliography:: directive output */
table.docutils.citation, table.docutils.citation td, table.docutils.citation th {
border: none;
/* Below needed in some edge cases; if not applied, bottom shadows appear */
diff --git a/docs/html/_static/basic.css b/docs/html/_static/basic.css
index 7577acb..e5179b7 100644
--- a/docs/html/_static/basic.css
+++ b/docs/html/_static/basic.css
@@ -4,7 +4,7 @@
*
* Sphinx stylesheet -- basic theme.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -222,7 +222,7 @@ table.modindextable td {
/* -- general body styles --------------------------------------------------- */
div.body {
- min-width: 360px;
+ min-width: inherit;
max-width: 800px;
}
@@ -237,6 +237,10 @@ a.headerlink {
visibility: hidden;
}
+a:visited {
+ color: #551A8B;
+}
+
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
@@ -670,6 +674,16 @@ dd {
margin-left: 30px;
}
+.sig dd {
+ margin-top: 0px;
+ margin-bottom: 0px;
+}
+
+.sig dl {
+ margin-top: 0px;
+ margin-bottom: 0px;
+}
+
dl > dd:last-child,
dl > dd:last-child > :last-child {
margin-bottom: 0;
@@ -738,6 +752,14 @@ abbr, acronym {
cursor: help;
}
+.translated {
+ background-color: rgba(207, 255, 207, 0.2)
+}
+
+.untranslated {
+ background-color: rgba(255, 207, 207, 0.2)
+}
+
/* -- code displays --------------------------------------------------------- */
pre {
diff --git a/docs/html/_static/doctools.js b/docs/html/_static/doctools.js
index d06a71d..4d67807 100644
--- a/docs/html/_static/doctools.js
+++ b/docs/html/_static/doctools.js
@@ -4,7 +4,7 @@
*
* Base JavaScript utilities for all Sphinx HTML documentation.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
diff --git a/docs/html/_static/documentation_options.js b/docs/html/_static/documentation_options.js
index 09a35b7..5aed7fa 100644
--- a/docs/html/_static/documentation_options.js
+++ b/docs/html/_static/documentation_options.js
@@ -1,6 +1,5 @@
-var DOCUMENTATION_OPTIONS = {
- URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
- VERSION: '5.1.dev0',
+const DOCUMENTATION_OPTIONS = {
+ VERSION: '5.1.dev2',
LANGUAGE: 'en',
COLLAPSE_INDEX: false,
BUILDER: 'html',
diff --git a/docs/html/_static/language_data.js b/docs/html/_static/language_data.js
index 250f566..367b8ed 100644
--- a/docs/html/_static/language_data.js
+++ b/docs/html/_static/language_data.js
@@ -5,7 +5,7 @@
* This script contains the language-specific data used by searchtools.js,
* namely the list of stopwords, stemmer, scorer and splitter.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -13,7 +13,7 @@
var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
-/* Non-minified version is copied as a separate JS file, is available */
+/* Non-minified version is copied as a separate JS file, if available */
/**
* Porter Stemmer
diff --git a/docs/html/_static/pygments.css b/docs/html/_static/pygments.css
index 691aeb8..0d49244 100644
--- a/docs/html/_static/pygments.css
+++ b/docs/html/_static/pygments.css
@@ -17,6 +17,7 @@ span.linenos.special { color: #000000; background-color: #ffffc0; padding-left:
.highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #A00000 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
+.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */
.highlight .gr { color: #FF0000 } /* Generic.Error */
.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
.highlight .gi { color: #00A000 } /* Generic.Inserted */
diff --git a/docs/html/_static/searchtools.js b/docs/html/_static/searchtools.js
index 97d56a7..92da3f8 100644
--- a/docs/html/_static/searchtools.js
+++ b/docs/html/_static/searchtools.js
@@ -4,7 +4,7 @@
*
* Sphinx JavaScript utilities for the full-text search.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -57,12 +57,12 @@ const _removeChildren = (element) => {
const _escapeRegExp = (string) =>
string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
-const _displayItem = (item, searchTerms) => {
+const _displayItem = (item, searchTerms, highlightTerms) => {
const docBuilder = DOCUMENTATION_OPTIONS.BUILDER;
- const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT;
const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX;
const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX;
const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
+ const contentRoot = document.documentElement.dataset.content_root;
const [docName, title, anchor, descr, score, _filename] = item;
@@ -75,28 +75,35 @@ const _displayItem = (item, searchTerms) => {
if (dirname.match(/\/index\/$/))
dirname = dirname.substring(0, dirname.length - 6);
else if (dirname === "index/") dirname = "";
- requestUrl = docUrlRoot + dirname;
+ requestUrl = contentRoot + dirname;
linkUrl = requestUrl;
} else {
// normal html builders
- requestUrl = docUrlRoot + docName + docFileSuffix;
+ requestUrl = contentRoot + docName + docFileSuffix;
linkUrl = docName + docLinkSuffix;
}
let linkEl = listItem.appendChild(document.createElement("a"));
linkEl.href = linkUrl + anchor;
linkEl.dataset.score = score;
linkEl.innerHTML = title;
- if (descr)
+ if (descr) {
listItem.appendChild(document.createElement("span")).innerHTML =
" (" + descr + ")";
+ // highlight search terms in the description
+ if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
+ highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted"));
+ }
else if (showSearchSummary)
fetch(requestUrl)
.then((responseData) => responseData.text())
.then((data) => {
if (data)
listItem.appendChild(
- Search.makeSearchSummary(data, searchTerms)
+ Search.makeSearchSummary(data, searchTerms, anchor)
);
+ // highlight search terms in the summary
+ if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
+ highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted"));
});
Search.output.appendChild(listItem);
};
@@ -109,26 +116,43 @@ const _finishSearch = (resultCount) => {
);
else
Search.status.innerText = _(
- `Search finished, found ${resultCount} page(s) matching the search query.`
- );
+ "Search finished, found ${resultCount} page(s) matching the search query."
+ ).replace('${resultCount}', resultCount);
};
const _displayNextItem = (
results,
resultCount,
- searchTerms
+ searchTerms,
+ highlightTerms,
) => {
// results left, load the summary and display it
// this is intended to be dynamic (don't sub resultsCount)
if (results.length) {
- _displayItem(results.pop(), searchTerms);
+ _displayItem(results.pop(), searchTerms, highlightTerms);
setTimeout(
- () => _displayNextItem(results, resultCount, searchTerms),
+ () => _displayNextItem(results, resultCount, searchTerms, highlightTerms),
5
);
}
// search finished, update title and status message
else _finishSearch(resultCount);
};
+// Helper function used by query() to order search results.
+// Each input is an array of [docname, title, anchor, descr, score, filename].
+// Order the results by score (in opposite order of appearance, since the
+// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically.
+const _orderResultsByScoreThenName = (a, b) => {
+ const leftScore = a[4];
+ const rightScore = b[4];
+ if (leftScore === rightScore) {
+ // same score: sort alphabetically
+ const leftTitle = a[1].toLowerCase();
+ const rightTitle = b[1].toLowerCase();
+ if (leftTitle === rightTitle) return 0;
+ return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
+ }
+ return leftScore > rightScore ? 1 : -1;
+};
/**
* Default splitQuery function. Can be overridden in ``sphinx.search`` with a
@@ -152,13 +176,26 @@ const Search = {
_queued_query: null,
_pulse_status: -1,
- htmlToText: (htmlString) => {
+ htmlToText: (htmlString, anchor) => {
const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
- htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() });
+ for (const removalQuery of [".headerlinks", "script", "style"]) {
+ htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() });
+ }
+ if (anchor) {
+ const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`);
+ if (anchorContent) return anchorContent.textContent;
+
+ console.warn(
+ `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.`
+ );
+ }
+
+ // if anchor not specified or not found, fall back to main content
const docContent = htmlElement.querySelector('[role="main"]');
- if (docContent !== undefined) return docContent.textContent;
+ if (docContent) return docContent.textContent;
+
console.warn(
- "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template."
+ "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template."
);
return "";
},
@@ -231,16 +268,7 @@ const Search = {
else Search.deferQuery(query);
},
- /**
- * execute search (requires search index to be loaded)
- */
- query: (query) => {
- const filenames = Search._index.filenames;
- const docNames = Search._index.docnames;
- const titles = Search._index.titles;
- const allTitles = Search._index.alltitles;
- const indexEntries = Search._index.indexentries;
-
+ _parseQuery: (query) => {
// stem the search terms and add them to the correct list
const stemmer = new Stemmer();
const searchTerms = new Set();
@@ -276,16 +304,32 @@ const Search = {
// console.info("required: ", [...searchTerms]);
// console.info("excluded: ", [...excludedTerms]);
- // array of [docname, title, anchor, descr, score, filename]
- let results = [];
+ return [query, searchTerms, excludedTerms, highlightTerms, objectTerms];
+ },
+
+ /**
+ * execute search (requires search index to be loaded)
+ */
+ _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => {
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const titles = Search._index.titles;
+ const allTitles = Search._index.alltitles;
+ const indexEntries = Search._index.indexentries;
+
+ // Collect multiple result groups to be sorted separately and then ordered.
+ // Each is an array of [docname, title, anchor, descr, score, filename].
+ const normalResults = [];
+ const nonMainIndexResults = [];
+
_removeChildren(document.getElementById("search-progress"));
- const queryLower = query.toLowerCase();
+ const queryLower = query.toLowerCase().trim();
for (const [title, foundTitles] of Object.entries(allTitles)) {
- if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) {
+ if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) {
for (const [file, id] of foundTitles) {
let score = Math.round(100 * queryLower.length / title.length)
- results.push([
+ normalResults.push([
docNames[file],
titles[file] !== title ? `${titles[file]} > ${title}` : title,
id !== null ? "#" + id : "",
@@ -300,46 +344,47 @@ const Search = {
// search for explicit entries in index directives
for (const [entry, foundEntries] of Object.entries(indexEntries)) {
if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
- for (const [file, id] of foundEntries) {
- let score = Math.round(100 * queryLower.length / entry.length)
- results.push([
+ for (const [file, id, isMain] of foundEntries) {
+ const score = Math.round(100 * queryLower.length / entry.length);
+ const result = [
docNames[file],
titles[file],
id ? "#" + id : "",
null,
score,
filenames[file],
- ]);
+ ];
+ if (isMain) {
+ normalResults.push(result);
+ } else {
+ nonMainIndexResults.push(result);
+ }
}
}
}
// lookup as object
objectTerms.forEach((term) =>
- results.push(...Search.performObjectSearch(term, objectTerms))
+ normalResults.push(...Search.performObjectSearch(term, objectTerms))
);
// lookup as search terms in fulltext
- results.push(...Search.performTermsSearch(searchTerms, excludedTerms));
+ normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms));
// let the scorer override scores with a custom scoring function
- if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item)));
-
- // now sort the results by score (in opposite order of appearance, since the
- // display function below uses pop() to retrieve items) and then
- // alphabetically
- results.sort((a, b) => {
- const leftScore = a[4];
- const rightScore = b[4];
- if (leftScore === rightScore) {
- // same score: sort alphabetically
- const leftTitle = a[1].toLowerCase();
- const rightTitle = b[1].toLowerCase();
- if (leftTitle === rightTitle) return 0;
- return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
- }
- return leftScore > rightScore ? 1 : -1;
- });
+ if (Scorer.score) {
+ normalResults.forEach((item) => (item[4] = Scorer.score(item)));
+ nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item)));
+ }
+
+ // Sort each group of results by score and then alphabetically by name.
+ normalResults.sort(_orderResultsByScoreThenName);
+ nonMainIndexResults.sort(_orderResultsByScoreThenName);
+
+ // Combine the result groups in (reverse) order.
+ // Non-main index entries are typically arbitrary cross-references,
+ // so display them after other results.
+ let results = [...nonMainIndexResults, ...normalResults];
// remove duplicate search results
// note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
@@ -353,14 +398,19 @@ const Search = {
return acc;
}, []);
- results = results.reverse();
+ return results.reverse();
+ },
+
+ query: (query) => {
+ const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query);
+ const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms);
// for debugging
//Search.lastresults = results.slice(); // a copy
// console.info("search results:", Search.lastresults);
// print the results
- _displayNextItem(results, results.length, searchTerms);
+ _displayNextItem(results, results.length, searchTerms, highlightTerms);
},
/**
@@ -458,14 +508,18 @@ const Search = {
// add support for partial matches
if (word.length > 2) {
const escapedWord = _escapeRegExp(word);
- Object.keys(terms).forEach((term) => {
- if (term.match(escapedWord) && !terms[word])
- arr.push({ files: terms[term], score: Scorer.partialTerm });
- });
- Object.keys(titleTerms).forEach((term) => {
- if (term.match(escapedWord) && !titleTerms[word])
- arr.push({ files: titleTerms[word], score: Scorer.partialTitle });
- });
+ if (!terms.hasOwnProperty(word)) {
+ Object.keys(terms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: terms[term], score: Scorer.partialTerm });
+ });
+ }
+ if (!titleTerms.hasOwnProperty(word)) {
+ Object.keys(titleTerms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: titleTerms[term], score: Scorer.partialTitle });
+ });
+ }
}
// no match but word was a required one
@@ -488,9 +542,8 @@ const Search = {
// create the mapping
files.forEach((file) => {
- if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1)
- fileMap.get(file).push(word);
- else fileMap.set(file, [word]);
+ if (!fileMap.has(file)) fileMap.set(file, [word]);
+ else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word);
});
});
@@ -541,8 +594,8 @@ const Search = {
* search summary for a given text. keywords is a list
* of stemmed words.
*/
- makeSearchSummary: (htmlText, keywords) => {
- const text = Search.htmlToText(htmlText);
+ makeSearchSummary: (htmlText, keywords, anchor) => {
+ const text = Search.htmlToText(htmlText, anchor);
if (text === "") return null;
const textLower = text.toLowerCase();
diff --git a/docs/html/_static/sphinx_highlight.js b/docs/html/_static/sphinx_highlight.js
index aae669d..8a96c69 100644
--- a/docs/html/_static/sphinx_highlight.js
+++ b/docs/html/_static/sphinx_highlight.js
@@ -29,14 +29,19 @@ const _highlight = (node, addItems, text, className) => {
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
+ const rest = document.createTextNode(val.substr(pos + text.length));
parent.insertBefore(
span,
parent.insertBefore(
- document.createTextNode(val.substr(pos + text.length)),
+ rest,
node.nextSibling
)
);
node.nodeValue = val.substr(0, pos);
+ /* There may be more occurrences of search term in this node. So call this
+ * function recursively on the remaining fragment.
+ */
+ _highlight(rest, addItems, text, className);
if (isInSVG) {
const rect = document.createElementNS(
@@ -140,5 +145,10 @@ const SphinxHighlight = {
},
};
-_ready(SphinxHighlight.highlightSearchWords);
-_ready(SphinxHighlight.initEscapeListener);
+_ready(() => {
+ /* Do not call highlightSearchWords() when we are on the search page.
+ * It will highlight words from the *previous* search query.
+ */
+ if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords();
+ SphinxHighlight.initEscapeListener();
+});
diff --git a/docs/html/api.html b/docs/html/api.html
index ee404fc..41f0d59 100644
--- a/docs/html/api.html
+++ b/docs/html/api.html
@@ -1,18 +1,17 @@
-
-
+
-
-
- API Reference — hesseflux 5.1.dev0 documentation
-
-
-
-
-
-
+
+
+ API Reference — hesseflux 5.1.dev2 documentation
+
+
+
+
+
+
@@ -20,8 +19,9 @@
+
+
-
@@ -34,7 +34,7 @@
-API Reference¶
+API Reference¶
hesseflux provides functions used in the processing and
post-processing of the Eddy covariance flux data
It was developed for the ICOS ecosystem site FR-Hes.
@@ -52,7 +52,7 @@
-Subpackages¶
+Subpackages¶