From 92eb5fc1a611b8f44c138d1198fe3e901956ed66 Mon Sep 17 00:00:00 2001 From: Matthias Cuntz Date: Sun, 16 Jun 2024 19:21:42 +0200 Subject: [PATCH] Removed all warning with new pandas and numpy versions --- CHANGELOG.rst | 3 + docs/html/.buildinfo | 2 +- docs/html/_modules/hesseflux/gapfill.html | 112 ++-- docs/html/_modules/hesseflux/madspikes.html | 62 ++- docs/html/_modules/hesseflux/nee2gpp.html | 515 +++++++++++------- docs/html/_modules/hesseflux/ustarfilter.html | 89 +-- docs/html/_modules/index.html | 30 +- docs/html/_sources/contents.rst.txt | 2 + docs/html/_sources/userguide.rst.txt | 387 +++++++------ docs/html/_static/alabaster.css | 23 +- docs/html/_static/basic.css | 26 +- docs/html/_static/doctools.js | 2 +- docs/html/_static/documentation_options.js | 5 +- docs/html/_static/language_data.js | 4 +- docs/html/_static/pygments.css | 1 + docs/html/_static/searchtools.js | 191 ++++--- docs/html/_static/sphinx_highlight.js | 16 +- docs/html/api.html | 38 +- docs/html/authors.html | 36 +- docs/html/changelog.html | 36 +- docs/html/contents.html | 36 +- docs/html/gapfill.html | 62 +-- docs/html/genindex.html | 30 +- docs/html/index.html | 54 +- docs/html/madspikes.html | 57 +- docs/html/nee2gpp.html | 115 ++-- docs/html/objects.inv | Bin 514 -> 543 bytes docs/html/py-modindex.html | 30 +- docs/html/search.html | 36 +- docs/html/searchindex.js | 2 +- docs/html/userguide.html | 441 ++++++++------- docs/html/ustarfilter.html | 70 +-- docs/source/conf.py | 4 +- docs/source/contents.rst | 2 + docs/source/userguide.rst | 387 +++++++------ example/hesseflux_example.cfg | 2 +- example/postproc_europe-fluxdata.py | 17 +- src/hesseflux/gapfill.py | 77 +-- src/hesseflux/madspikes.py | 27 +- src/hesseflux/nee2gpp.py | 482 +++++++++------- src/hesseflux/ustarfilter.py | 54 +- 41 files changed, 1998 insertions(+), 1567 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fab04ed..ac9b9f2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,9 @@ Changelog All notable changes after its initial release in May 2020 (v2.0) are documented in this file. +v5.1 (??? 2024) + * Removed all warning with new pandas and numpy versions. + v5.0 (Jan 2023) * Dropped support for Python 3.6 because cannot test it anymore. * Add timecolumns and ftimeformat to config and post-processing file in diff --git a/docs/html/.buildinfo b/docs/html/.buildinfo index ed2c7d4..de007f8 100644 --- a/docs/html/.buildinfo +++ b/docs/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: a229c28f73769ec9b4c577ab465ca4f4 +config: 52be7eb0a3df7d9001d8779717d404cb tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/html/_modules/hesseflux/gapfill.html b/docs/html/_modules/hesseflux/gapfill.html index e7700b6..2023604 100644 --- a/docs/html/_modules/hesseflux/gapfill.html +++ b/docs/html/_modules/hesseflux/gapfill.html @@ -1,24 +1,24 @@ - - + - hesseflux.gapfill — hesseflux 5.1.dev0 documentation - - - - - - + hesseflux.gapfill — hesseflux 5.1.dev2 documentation + + + + + + + + - @@ -74,7 +74,9 @@

Source code for hesseflux.gapfill

 __all__ = ['gapfill']
 
 
-
[docs]def gapfill(dfin, flag=None, date=None, timeformat='%Y-%m-%d %H:%M:%S', +
+[docs] +def gapfill(dfin, flag=None, date=None, timeformat='%Y-%m-%d %H:%M:%S', colhead=None, sw_dev=50., ta_dev=2.5, vpd_dev=5., longgap=60, fullday=False, undef=-9999, ddof=1, @@ -353,10 +355,10 @@

Source code for hesseflux.gapfill

             break
     astr = 'Global radiation with name SW or starting with SW_'
     astr = astr + ' must be in input.'
-    assert sw_id,  astr
+    assert sw_id, astr
     astr = 'Air temperature with name TA or starting with TA_'
     astr = astr + ' must be in input.'
-    assert ta_id,  astr
+    assert ta_id, astr
     astr = 'Vapour pressure deficit with name VPD or starting'
     astr = astr + ' with VPD_ must be in input.'
     assert vpd_id, astr
@@ -421,11 +423,11 @@ 

Source code for hesseflux.gapfill

         if firstvalid > nn:
             if verbose > 1:
                 print('    Large margin at beginning: ', firstvalid)
-            largegap[0:(firstvalid-nn)] = True
-        if lastvalid < (ndata-nn):
+            largegap[0:(firstvalid - nn)] = True
+        if lastvalid < (ndata - nn):
             if verbose > 1:
-                print('    Large margin at end: ', lastvalid-nn)
-            largegap[(lastvalid+nn):] = True
+                print('    Large margin at end: ', lastvalid - nn)
+            largegap[(lastvalid + nn):] = True
 
         # Large gaps
 
@@ -439,12 +441,12 @@ 

Source code for hesseflux.gapfill

                     index += [i]
                     count  = 1
             if i > 0:
-                if (dflag[i] != 0) and (dflag[i-1] == 0):
+                if (dflag[i] != 0) and (dflag[i - 1] == 0):
                     index += [i]
                     count  = 1
                 elif dflag[i] != 0:
                     count += 1
-                elif (dflag[i] == 0) and (dflag[i-1] != 0):
+                elif (dflag[i] == 0) and (dflag[i - 1] != 0):
                     length += [count]
                     count = 0
                 else:
@@ -456,17 +458,18 @@ 

Source code for hesseflux.gapfill

         for i in range(len(index)):
             if length[i] > nn:
                 if verbose > 1:
-                    print('    Large gap: ', index[i], ':', index[i]+length[i])
-                largegap[index[i]:index[i]+length[i]] = True
+                    print('    Large gap: ', index[i], ':',
+                          index[i] + length[i])
+                largegap[index[i]:index[i] + length[i]] = True
 
         # set or unset rest of days in large gaps
         if fullday:
-            for i in range(ndata-1):
+            for i in range(ndata - 1):
                 # end of large margin
-                if largegap[i] and not largegap[i+1]:
+                if largegap[i] and not largegap[i + 1]:
                     largegap[np.where(day == day[i])[0]] = False
                 # beginning of large margin
-                elif not largegap[i] and largegap[i+1]:
+                elif not largegap[i] and largegap[i + 1]:
                     largegap[np.where(day == day[i])[0]] = False
                 else:
                     continue
@@ -499,14 +502,14 @@ 

Source code for hesseflux.gapfill

                 # search for values around the met-conditions
                 # in a window of time
                 # (one week in the first iteration and odd weeks in the next)
-                j1  = j - np.arange(1, week+1, dtype=int) + 1
+                j1  = j - np.arange(1, week + 1, dtype=int) + 1
                 j2  = j + np.arange(1, week, dtype=int)
                 jj  = np.append(j1, j2)
-                win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
+                win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
                 # get boolean array where meteo-conditions are in a given width
-                conditions = ( (np.abs(sw[win]-sw[j])   < sw_devmax) &
-                               (np.abs(ta[win]-ta[j])   < ta_dev) &
-                               (np.abs(vpd[win]-vpd[j]) < vpd_dev) &
+                conditions = ( (np.abs(sw[win] - sw[j])   < sw_devmax) &
+                               (np.abs(ta[win] - ta[j])   < ta_dev) &
+                               (np.abs(vpd[win] - vpd[j]) < vpd_dev) &
                                total_flg[win] )
                 num4avg = np.sum(conditions)
                 # we need at least two samples with similar conditions
@@ -523,10 +526,10 @@ 

Source code for hesseflux.gapfill

                         dflag_f[j] = 1
                     continue
                 else:  # --> extend time window to two weeks
-                    j1  = j - np.arange(1, 2*week+1, dtype=int) + 1
-                    j2  = j + np.arange(1, 2*week, dtype=int)
+                    j1  = j - np.arange(1, 2 * week + 1, dtype=int) + 1
+                    j2  = j + np.arange(1, 2 * week, dtype=int)
                     jj  = np.append(j1, j2)
-                    win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
+                    win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
                     conditions = ( (np.abs(sw[win]  - sw[j])  < sw_devmax) &
                                    (np.abs(ta[win]  - ta[j])  < ta_dev) &
                                    (np.abs(vpd[win] - vpd[j]) < vpd_dev) &
@@ -554,12 +557,12 @@ 

Source code for hesseflux.gapfill

 
             # Method 2: just global radiation available
             if sw_flg[j] == 0:
-                j1  = j - np.arange(1, week+1, dtype=int) + 1
+                j1  = j - np.arange(1, week + 1, dtype=int) + 1
                 j2  = j + np.arange(1, week, dtype=int)
                 jj  = np.append(j1, j2)
-                win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
+                win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
                 # get boolean array where meteo-conditions are in a given width
-                conditions = ( (np.abs(sw[win]-sw[j]) < sw_devmax) &
+                conditions = ( (np.abs(sw[win] - sw[j]) < sw_devmax) &
                                total_flg[win] )
                 num4avg = np.sum(conditions)
                 # we need at least two samples with similar conditions
@@ -578,12 +581,12 @@ 

Source code for hesseflux.gapfill

             # Method 3: same hour
             enough = False
             for i in range(2):
-                t_win = (nperday * (2*i+1))//2
-                j1  = j - np.arange(1, t_win+1, dtype=int) + 1
+                t_win = (nperday * (2 * i + 1)) // 2
+                j1  = j - np.arange(1, t_win + 1, dtype=int) + 1
                 j2  = j + np.arange(1, t_win, dtype=int)
                 jj  = np.append(j1, j2)
-                win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
-                conditions = ( (np.abs(hour[win]-hour[j]) < 1.1)
+                win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
+                conditions = ( (np.abs(hour[win] - hour[j]) < 1.1)
                                & (dflag[win] == 0) )
                 num4avg = np.sum(conditions)
                 if num4avg >= 2:
@@ -607,10 +610,10 @@ 

Source code for hesseflux.gapfill

             # Method 4: same as 1 but for 3-12 weeks
             if meteo_flg[j]:
                 for multi in range(3, 12):
-                    j1  = j - np.arange(1, multi*week+1, dtype=int) + 1
-                    j2  = j + np.arange(1, multi*week, dtype=int)
+                    j1  = j - np.arange(1, multi * week + 1, dtype=int) + 1
+                    j2  = j + np.arange(1, multi * week, dtype=int)
                     jj  = np.append(j1, j2)
-                    win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
+                    win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
                     conditions = ( (np.abs(sw[win]  - sw[j])  < sw_devmax) &
                                    (np.abs(ta[win]  - ta[j])  < ta_dev) &
                                    (np.abs(vpd[win] - vpd[j]) < vpd_dev) &
@@ -640,10 +643,10 @@ 

Source code for hesseflux.gapfill

             # Method 5: same as 2 but for 2-12 weeks
             if sw_flg[j] == 0:
                 for multi in range(2, 12):
-                    j1  = j - np.arange(1, multi*week+1, dtype=int) + 1
-                    j2  = j + np.arange(1, multi*week, dtype=int)
+                    j1  = j - np.arange(1, multi * week + 1, dtype=int) + 1
+                    j2  = j + np.arange(1, multi * week, dtype=int)
                     jj  = np.append(j1, j2)
-                    win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
+                    win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
                     # get boolean array where meteo-conditions are
                     # in a given width
                     conditions = ( (np.abs(sw[win] - sw[j]) < sw_devmax) &
@@ -669,12 +672,12 @@ 

Source code for hesseflux.gapfill

 
             # Method 6: same as 3 but for 3-120 days
             for i in range(3, 120):
-                t_win = nperday * (2*i+1)/2
-                j1  = j - np.arange(1, t_win+1, dtype=int) + 1
+                t_win = nperday * (2 * i + 1) / 2
+                j1  = j - np.arange(1, t_win + 1, dtype=int) + 1
                 j2  = j + np.arange(1, t_win, dtype=int)
                 jj  = np.append(j1, j2)
-                win = np.unique(np.sort(np.clip(jj, 0, ndata-1)))
-                conditions = ( (np.abs(hour[win]-hour[j]) < 1.1)
+                win = np.unique(np.sort(np.clip(jj, 0, ndata - 1)))
+                conditions = ( (np.abs(hour[win] - hour[j]) < 1.1)
                                & (dflag[win] == 0) )
                 num4avg = np.sum(conditions)
                 if num4avg >= 2:
@@ -716,6 +719,7 @@ 

Source code for hesseflux.gapfill

         return dfout, ffout
+ if __name__ == '__main__': import doctest doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) @@ -823,7 +827,7 @@

Related Topics

- +
diff --git a/docs/html/_modules/hesseflux/madspikes.html b/docs/html/_modules/hesseflux/madspikes.html index 0522cf1..e688fc3 100644 --- a/docs/html/_modules/hesseflux/madspikes.html +++ b/docs/html/_modules/hesseflux/madspikes.html @@ -1,24 +1,24 @@ - - + - hesseflux.madspikes — hesseflux 5.1.dev0 documentation - - - - - - + hesseflux.madspikes — hesseflux 5.1.dev2 documentation + + + + + + + + - @@ -59,6 +59,7 @@

Source code for hesseflux.madspikes

     * Removed iteration, Apr 2020, Matthias Cuntz
     * Using numpy docstring format, May 2020, Matthias Cuntz
     * Improved flake8 and numpy docstring, Oct 2021, Matthias Cuntz
+    * Removed np.float and np.bool, Jun 2024, Matthias Cuntz
 
 """
 from __future__ import division, absolute_import, print_function
@@ -70,9 +71,11 @@ 

Source code for hesseflux.madspikes

 __all__ = ['madspikes']
 
 
-
[docs]def madspikes(dfin, flag=None, isday=None, +
+[docs] +def madspikes(dfin, flag=None, isday=None, colhead=None, undef=-9999, - nscan=15*48, nfill=1*48, + nscan=15 * 48, nfill=1 * 48, z=7, deriv=2, swthr=10., plot=False): """ @@ -146,7 +149,7 @@

Source code for hesseflux.madspikes

             estr = ('Length of colhead must be number of columns in input'
                     'array. len(colhead)=' + str(len(colhead)) +
                     ' shape(input)=(' + str(dfin.shape[0]) + ',' +
-                    str(dfin.shape[1])+').')
+                    str(dfin.shape[1]) + ').')
             raise ValueError(estr)
     else:
         isnumpy = False
@@ -206,15 +209,15 @@ 

Source code for hesseflux.madspikes

 
     # parameters
     nrow, ncol = df.shape
-    half_scan_win = nscan//2
-    half_fill_win = nfill//2
+    half_scan_win = nscan // 2
+    half_fill_win = nfill // 2
 
     # calculate dusk and dawn times and separate in day and night
-    isdawn = np.zeros(nrow, dtype=np.bool)
-    isdusk = np.zeros(nrow, dtype=np.bool)
-    dis    = isday.astype(int) - np.roll(isday,-1).astype(int)
+    isdawn = np.zeros(nrow, dtype=bool)
+    isdusk = np.zeros(nrow, dtype=bool)
+    dis    = isday.astype(int) - np.roll(isday, -1).astype(int)
     isdawn[:-1]    = np.where(dis[:-1] == -1, True, False)
-    isdusk[:-1]    = np.where(dis[:-1] ==  1, True, False)
+    isdusk[:-1]    = np.where(dis[:-1] == 1, True, False)
     isddday        = isdawn
     tmp            = np.roll(isdusk, 1)
     isddday[1:]   += tmp[1:]  # start and end of day
@@ -246,23 +249,25 @@ 

Source code for hesseflux.madspikes

             np.nan)
 
         # iterate over fill window
-        for j in range(half_fill_win, nrow-1, 2*half_fill_win):
+        for j in range(half_fill_win, nrow - 1, 2 * half_fill_win):
             j1 = max(j - half_scan_win - 1, 0)
             j2 = min(j + half_scan_win + 1, nrow)
             fill_start = max(j - half_fill_win, 1)
-            fill_end   = min(j + half_fill_win, nrow-1)
+            fill_end   = min(j + half_fill_win, nrow - 1)
 
             dd = data_day[j1:j2].to_numpy()
             day_flag = mad(np.ma.masked_array(data=dd, mask=np.isnan(dd)),
                            z=z, deriv=deriv)
             ff.iloc[fill_start:fill_end, cols.index(hcol)] += (
-                np.where(day_flag[fill_start-j1-1:fill_end-j1-1], 2, 0))
+                np.where(day_flag[fill_start - j1 - 1:fill_end - j1 - 1],
+                         2, 0))
 
             nn = data_night[j1:j2]
             night_flag = mad(np.ma.masked_array(data=nn, mask=np.isnan(nn)),
                              z=z, deriv=deriv)
             ff.iloc[fill_start:fill_end, cols.index(hcol)] += (
-                np.where(night_flag[fill_start-j1-1:fill_end-j1-1], 2, 0))
+                np.where(night_flag[fill_start - j1 - 1:fill_end - j1 - 1],
+                         2, 0))
 
         if plot:
             fig = plt.figure(1)
@@ -288,6 +293,7 @@ 

Source code for hesseflux.madspikes

         return ff
+ if __name__ == '__main__': import doctest doctest.testmod() @@ -339,7 +345,7 @@

Related Topics

- +
diff --git a/docs/html/_modules/hesseflux/nee2gpp.html b/docs/html/_modules/hesseflux/nee2gpp.html index adfe081..b910adf 100644 --- a/docs/html/_modules/hesseflux/nee2gpp.html +++ b/docs/html/_modules/hesseflux/nee2gpp.html @@ -1,24 +1,24 @@ - - + - hesseflux.nee2gpp — hesseflux 5.1.dev0 documentation - - - - - - + hesseflux.nee2gpp — hesseflux 5.1.dev2 documentation + + + + + + + + - @@ -49,13 +49,15 @@

Source code for hesseflux.nee2gpp

 * Set default undef to NaN, Mar 2012, Arndt Piayda
 * Add wrapper nee2gpp for individual routines, Nov 2012, Matthias Cuntz
 * Ported to Python 3, Feb 2013, Matthias Cuntz
-* Use generel cost function cost_abs from functions module, May 2013, Matthias Cuntz
+* Use generel cost function cost_abs from functions module,
+  May 2013, Matthias Cuntz
 * Use fmin_tnc to allow params < 0, Aug 2014, Arndt Piayda
 * Keyword nogppnight, Aug 2014, Arndt Piayda
 * Add wrapper nee2gpp for individual routines, Nov 2012, Matthias Cuntz
 * Add wrapper nee2gpp for individual routines, Nov 2012, Matthias Cuntz
 * Input can be pandas Dataframe or numpy array(s), Apr 2020, Matthias Cuntz
 * Using numpy docstring format, May 2020, Matthias Cuntz
+* Removed np.float and np.int, Jun 2024, Matthias Cuntz
 
 .. moduleauthor:: Matthias Cuntz, Arndt Piayda
 
@@ -79,8 +81,11 @@ 

Source code for hesseflux.nee2gpp

 
 
 # ----------------------------------------------------------------------
-
[docs]def nee2gpp(dfin, flag=None, isday=None, date=None, timeformat='%Y-%m-%d %H:%M:%S', colhead=None, - undef=-9999, method='reichstein', nogppnight=False, swthr=10.): +
+[docs] +def nee2gpp(dfin, flag=None, isday=None, date=None, + timeformat='%Y-%m-%d %H:%M:%S', colhead=None, undef=-9999, + method='reichstein', nogppnight=False, swthr=10.): """ Calculate photosynthesis (GPP) and ecosystem respiration (RECO) from Eddy covariance CO2 flux data. @@ -104,26 +109,29 @@

Source code for hesseflux.nee2gpp

         incoming shortwave radiation and air vapour pressure deficit.
 
         `dfin` can be a pandas.Dataframe with the columns
-        'FC' or 'NEE' (or starting with 'FC\_' or 'NEE\_') for observed CO2 flux [umol(CO2) m-2 s-1]
-        'TA'    (or starting with 'TA\_') for air temperature [K]
+        'FC' or 'NEE' (or starting with 'FC\\_' or 'NEE\\_') for observed
+        CO2 flux [umol(CO2) m-2 s-1]
+        'TA'    (or starting with 'TA\\_') for air temperature [K]
 
         `method='lasslop'` or `method='day'` needs also
-        'SW_IN' (or starting with 'SW_IN') for incoming short-wave radiation [W m-2]
+        'SW_IN' (or starting with 'SW_IN') for incoming short-wave
+        radiation [W m-2]
         'VPD'   (or starting with 'VPD') for air vapour deficit [Pa]
         The index is taken as date variable.
 
         `dfin` can also me a numpy array with the same columns. In this case
         `colhead`, `date`, and possibly `dateformat` must be given.
     flag : pandas.Dataframe or numpy.array, optional
-        flag Dataframe or array has the same shape as `dfin`. Non-zero values in
-        `flag` will be treated as missing values in `dfin`.
+        flag Dataframe or array has the same shape as `dfin`.
+        Non-zero values in `flag` will be treated as missing values in `dfin`.
 
         `flag` must follow the same rules as `dfin` if pandas.Dataframe.
 
-        If `flag` is numpy array, `df.columns.values` will be used as column heads
-        and the index of `dfin` will be copied to `flag`.
+        If `flag` is numpy array, `df.columns.values` will be used as column
+        heads and the index of `dfin` will be copied to `flag`.
     isday : array_like of bool, optional
-        True when it is day, False when night. Must have the same length as `dfin.shape[0]`.
+        True when it is day, False when night. Must have the same length
+        as `dfin.shape[0]`.
 
         If `isday` is not given, `dfin` must have a column with head 'SW_IN' or
         starting with 'SW_IN'. `isday` will then be `dfin['SW_IN'] > swthr`.
@@ -136,22 +144,29 @@ 

Source code for hesseflux.nee2gpp

         See strftime documentation of Python's datetime module:
         https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
     colhed : array_like of str, optional
-        column names if `dfin` is numpy array. See `dfin` for mandatory column names.
+        column names if `dfin` is numpy array. See `dfin` for mandatory
+        column names.
     undef : float, optional
-        values having `undef` value are treated as missing values in `dfin` (default: -9999)
+        values having `undef` value are treated as missing values in `dfin`
+        (default: -9999)
     method : str, optional
         method to use for partitioning. Possible values are:
 
-        'global' or 'falge':     fit of Reco vs. temperature to all nighttime data
+        'global' or 'falge':     fit of Reco vs. temperature to all nighttime
+                                 data
 
-        'local' of 'reichstein': several fits over the season of Reco vs. temperature
-                                 as in Reichstein et al. (2005) (default)
+        'local' of 'reichstein': several fits over the season of Reco vs.
+                                 temperature as in Reichstein et al. (2005)
+                                 (default)
 
-        'day' or 'lasslop':      method of Lasslop et al. (2010) fitting a light-response curve
+        'day' or 'lasslop':      method of Lasslop et al. (2010) fitting a
+                                 light-response curve
     nogppnight : float, optional
-        GPP will be set to zero at night. RECO will then equal NEE at night (default: False)
+        GPP will be set to zero at night. RECO will then equal NEE at night
+        (default: False)
     swthr : float, optional
-        Threshold to determine daytime from incoming shortwave radiation if `isday` not given (default: 10).
+        Threshold to determine daytime from incoming shortwave radiation
+        if `isday` not given (default: 10).
 
     Returns
     -------
@@ -162,22 +177,25 @@ 

Source code for hesseflux.nee2gpp

 
     Notes
     -----
-    Negative respiration possible at night if GPP is forced to 0 with `nogppnight=True`.
+    Negative respiration possible at night if GPP is forced to 0 with
+    `nogppnight=True`.
 
     References
     ----------
     .. [1] Falge et al. (2001)
-       Gap filling strategies for defensible annual sums of net ecosystem exchange,
+       Gap filling strategies for defensible annual sums of
+       net ecosystem exchange,
        Acricultural and Forest Meteorology 107, 43-69
 
     .. [2] Reichstein et al. (2005)
-       On the separation of net ecosystem exchange into assimilation and ecosystem
-       respiration: review and improved algorithm,
+       On the separation of net ecosystem exchange into assimilation
+       and ecosystem respiration: review and improved algorithm,
        Global Change Biology 11, 1424-1439
 
     .. [3] Lasslop et al. (2010)
-       Separation of net ecosystem exchange into assimilation and respiration using
-       a light response curve approach: critical issues and global evaluation,
+       Separation of net ecosystem exchange into assimilation and respiration
+       using a light response curve approach: critical issues and global
+       evaluation,
        Global Change Biology 16, 187-208
 
     Examples
@@ -206,7 +224,8 @@ 

Source code for hesseflux.nee2gpp

     >>> # flag
     >>> flag = np.where(dfin == undef, 2, 0)
     >>> # partition
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='local')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+    ...                     undef=undef, method='local')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.40606871e+00
       8.31942152e+00  1.06242542e+01  8.49245664e+00  1.12381973e+01]
@@ -214,27 +233,32 @@ 

Source code for hesseflux.nee2gpp

     [1.68311981 1.81012431 1.9874173  2.17108871 2.38759152 2.64372415
      2.90076664 3.18592735]
 
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='local')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+    ...                     undef=undef, method='local')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.40606871e+00
       8.31942152e+00  1.06242542e+01  8.49245664e+00  1.12381973e+01]
 
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='global')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+    ...                     undef=undef, method='global')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.33166157e+00
       8.18228013e+00  1.04092252e+01  8.19395317e+00  1.08427448e+01]
 
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='Reichstein')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+    ...                     undef=undef, method='Reichstein')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.40606871e+00
       8.31942152e+00  1.06242542e+01  8.49245664e+00  1.12381973e+01]
 
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='reichstein')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+    ...                     undef=undef, method='reichstein')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.40606871e+00
       8.31942152e+00  1.06242542e+01  8.49245664e+00  1.12381973e+01]
 
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='day')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+    ...                     undef=undef, method='day')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  2.78457540e+00
       6.63212545e+00  8.88902165e+00  6.74243873e+00  9.51364527e+00]
@@ -245,56 +269,66 @@ 

Source code for hesseflux.nee2gpp

     History
     -------
     Written  Matthias Cuntz, Mar 2012
-    Modified Arndt Piayda,   Mar 2012 - undef=np.nan
-             Matthias Cuntz, Nov 2012 - wrapper for individual routines nee2gpp_reichstein etc.
-             Matthias Cuntz, Feb 2013 - ported to Python 3
-             Matthias Cuntz, May 2013 - replaced cost functions by generel cost function cost_abs if possible
-             Arndt Piayda,   Aug 2014 - replaced fmin with fmin_tnc to permit params<0,
-                                        permit gpp<0 at any time if nogppnight=True 
+    Modified Arndt Piayda,   Mar 2012
+                 - undef=np.nan
+             Matthias Cuntz, Nov 2012
+                 - wrapper for individual routines nee2gpp_reichstein etc.
+             Matthias Cuntz, Feb 2013
+                 - ported to Python 3
+             Matthias Cuntz, May 2013
+                 - replaced cost functions by generel cost function cost_abs
+                   if possible
+             Arndt Piayda,   Aug 2014
+                 - replaced fmin with fmin_tnc to permit params<0,
+                   permit gpp<0 at any time if nogppnight=True
+             Matthias Cuntz, Jun 2024
+                 - removed np.int and np.float
+
     """
     # Check input
     # numpy or panda
     if isinstance(dfin, (np.ndarray, np.ma.MaskedArray)):
         isnumpy = True
-        istrans = False
-        assert colhead is not None, 'colhead must be given if input is numpy.ndarray.'
+        assert colhead is not None, (
+            'colhead must be given if input is numpy.ndarray.')
         if dfin.shape[0] == len(colhead):
-            istrans = True
             df = pd.DataFrame(dfin.T, columns=colhead)
         elif dfin.shape[1] == len(colhead):
             df = pd.DataFrame(dfin, columns=colhead)
         else:
-            raise ValueError('Length of colhead must be number of columns in input array. len(colhead)='+str(len(colhead))+' shape(input)=('+str(dfin.shape[0])+','+str(dfin.shape[1])+').')
+            raise ValueError('Length of colhead must be number of columns'
+                             ' in input array. len(colhead)=' +
+                             str(len(colhead)) + ' shape(input)=(' +
+                             str(dfin.shape[0]) + ',' +
+                             str(dfin.shape[1]) + ').')
         assert date is not None, 'Date must be given if input is numpy arrary.'
         df['Datetime'] = pd.to_datetime(date, format=timeformat)
         df.set_index('Datetime', drop=True, inplace=True)
     else:
         isnumpy = False
-        istrans = False
-        assert isinstance(dfin, pd.core.frame.DataFrame), 'Input must be either numpy.ndarray or pandas.DataFrame.'
+        assert isinstance(dfin, pd.core.frame.DataFrame), (
+            'Input must be either numpy.ndarray or pandas.DataFrame.')
         df = dfin.copy(deep=True)
 
     # Incoming flags
     if flag is not None:
         if isinstance(flag, (np.ndarray, np.ma.MaskedArray)):
-            fisnumpy = True
-            fistrans = False
             if flag.shape[0] == len(df):
                 ff = pd.DataFrame(flag, columns=df.columns.values)
             elif flag.shape[1] == len(df):
-                fistrans = True
                 ff = pd.DataFrame(flag.T, columns=df.columns.values)
             else:
-                raise ValueError('flag must have same shape as data array. data: ({:d},{:d}); flag: ({:d},{:d})'.format(dfin.shape[0], dfin.shape[1], flag.shape[0], flag.shape[1]))
+                raise ValueError(
+                    'flag must have same shape as data array.'
+                    ' data: ({:d},{:d}); flag: ({:d},{:d})'.format(
+                        dfin.shape[0], dfin.shape[1], flag.shape[0],
+                        flag.shape[1]))
             ff = ff.set_index(df.index)
         else:
-            fisnumpy = False
-            fistrans = False
-            assert isinstance(flag, pd.core.frame.DataFrame), 'Flag must be either numpy.ndarray or pandas.DataFrame.'
+            assert isinstance(flag, pd.core.frame.DataFrame), (
+                'Flag must be either numpy.ndarray or pandas.DataFrame.')
             ff = flag.copy(deep=True)
     else:
-        fisnumpy = isnumpy
-        fistrans = istrans
         # flags: 0: good; 1: input flagged; 2: output flagged
         ff              = df.copy(deep=True).astype(int)
         ff[:]           = 0
@@ -308,9 +342,11 @@ 

Source code for hesseflux.nee2gpp

             if cc.startswith('SW_IN'):
                 sw_id = cc
                 break
-        assert sw_id, 'Global radiation with name SW or starting with SW_ must be in input if isday not given.'
-        isday = df[sw_id] > swthr # Papale et al. (Biogeosciences, 2006): 20; REddyProc: 10
-    if isinstance(isday, (pd.core.series.Series,pd.core.frame.DataFrame)):
+        assert sw_id, ('Global radiation with name SW or starting with'
+                       ' SW_ must be in input if isday not given.')
+        # Papale et al. (Biogeosciences, 2006): 20; REddyProc: 10
+        isday = df[sw_id] > swthr
+    if isinstance(isday, (pd.core.series.Series, pd.core.frame.DataFrame)):
         isday = isday.to_numpy()
     isday[isday == undef] = np.nan
     ff[np.isnan(isday)]   = 1
@@ -320,10 +356,12 @@ 

Source code for hesseflux.nee2gpp

         dfout = _nee2gpp_falge(df, ff, isday, undef=undef)
     # Local relationship = Reichstein et al. (2005)
     elif ((method.lower() == 'local') | (method.lower() == 'reichstein')):
-        dfout = _nee2gpp_reichstein(df, ff, isday, undef=undef, nogppnight=nogppnight)
+        dfout = _nee2gpp_reichstein(df, ff, isday, undef=undef,
+                                    nogppnight=nogppnight)
     # Lasslop et al. (2010) daytime method
     elif ((method.lower() == 'day') | (method.lower() == 'lasslop')):
-        dfout = _nee2gpp_lasslop(df, ff, isday, undef=undef, nogppnight=nogppnight)
+        dfout = _nee2gpp_lasslop(df, ff, isday, undef=undef,
+                                 nogppnight=nogppnight)
     # Include new methods here
     else:
         raise ValueError('Error nee2gpp: method not implemented yet.')
@@ -334,6 +372,7 @@ 

Source code for hesseflux.nee2gpp

         return dfout
+ # ---------------------------------------------------------------------- def _nee2gpp_falge(df, ff, isday, undef=-9999): """ @@ -347,8 +386,9 @@

Source code for hesseflux.nee2gpp

         time series of CO2 fluxes and air temperature.
 
         pandas.Dataframe with the columns
-        'FC' or 'NEE' (or starting with 'FC\_' or 'NEE\_') for observed CO2 flux [umol(CO2) m-2 s-1]
-        'TA'    (or starting with 'TA\_') for air temperature [K]
+        'FC' or 'NEE' (or starting with 'FC\\_' or 'NEE\\_') for observed
+        CO2 flux [umol(CO2) m-2 s-1]
+        'TA'    (or starting with 'TA\\_') for air temperature [K]
         The index is taken as date variable.
     ff : pandas.Dataframe
         flag Dataframe or array has the same shape as `df`. Non-zero values in
@@ -356,9 +396,11 @@ 

Source code for hesseflux.nee2gpp

 
         `ff` must follow the same rules as `df`.
     isday : array_like of bool
-        True when it is day, False when night. Must have the same length as `df.shape[0].`
+        True when it is day, False when night. Must have the same length
+        as `df.shape[0].`
     undef : float, optional
-        values having `undef` value are treated as missing values in `df` (default: -9999)
+        values having `undef` value are treated as missing values in `df`
+        (default: -9999)
 
     Returns
     -------
@@ -369,7 +411,8 @@ 

Source code for hesseflux.nee2gpp

     References
     ----------
     .. [1] Falge et al. (2001)
-       Gap filling strategies for defensible annual sums of net ecosystem exchange,
+       Gap filling strategies for defensible annual sums of
+       net ecosystem exchange,
        Acricultural and Forest Meteorology 107, 43-69
 
     Examples
@@ -384,7 +427,8 @@ 

Source code for hesseflux.nee2gpp

     >>> head  = fread(ifile, skip=2, header=True)
     >>> head1 = head[0]
     >>> # date
-    >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
+    >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:],
+    ...                  hr=dat[3,:], mi=dat[4,:])
     >>> adate = dec2date(jdate, eng=True)
     >>> # colhead
     >>> idx   = []
@@ -398,7 +442,8 @@ 

Source code for hesseflux.nee2gpp

     >>> # flag
     >>> flag = np.where(dfin == undef, 2, 0)
     >>> # partition
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='global')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+    ...                     undef=undef, method='global')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.33166157e+00
       8.18228013e+00  1.04092252e+01  8.19395317e+00  1.08427448e+01]
@@ -409,11 +454,14 @@ 

Source code for hesseflux.nee2gpp

     Modified Arndt Piayda,   Mar 2012 - undef=np.nan
              Matthias Cuntz, Nov 2012 - individual routine
              Matthias Cuntz, Feb 2013 - ported to Python 3
+             Matthias Cuntz, Jun 2024 - removed np.int and np.float
+
     """
     # Variables
     fc_id = ''
     for cc in df.columns:
-        if cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or (cc == 'NEE'):
+        if ( cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or
+             (cc == 'NEE') ):
             fc_id = cc
             break
     ta_id = ''
@@ -421,12 +469,15 @@ 

Source code for hesseflux.nee2gpp

         if cc.startswith('TA_') or (cc == 'TA'):
             ta_id = cc
             break
-    assert fc_id, 'Carbon net flux with name FC or NEE or starting with FC_ or NEE_ must be in input.'
-    assert ta_id, 'Air temperature with name TA or starting with TA_ must be in input.'
+    assert fc_id, ('Carbon net flux with name FC or NEE or starting with FC_'
+                   ' or NEE_ must be in input.')
+    assert ta_id, ('Air temperature with name TA or starting with TA_ must'
+                   ' be in input.')
 
     nee    = np.ma.array(df[fc_id], mask=(ff[fc_id] > 0))
     t      = np.ma.array(df[ta_id], mask=(ff[ta_id] > 0))
-    misday = np.ma.array(isday, mask=((~np.isfinite(isday)) | (isday == undef)))
+    misday = np.ma.array(isday, mask=((~np.isfinite(isday)) |
+                                      (isday == undef)))
 
     # Partition - Global relationship as in Falge et al. (2001)
 
@@ -439,16 +490,16 @@ 

Source code for hesseflux.nee2gpp

     p        = opt.fmin(cost_abs, [2., 200.],
                         args=(lloyd_fix_p, tt, net), disp=False)
 
-    Reco     = np.ones(ndata)*undef
+    Reco     = np.ones(ndata) * undef
     ii       = np.where(~t.mask)[0]
     Reco[ii] = lloyd_fix(t[ii], p[0], p[1])
 
     # GPP
-    GPP     = np.ones(ndata)*undef
+    GPP     = np.ones(ndata) * undef
     ii      = np.where(~(t.mask | nee.mask))[0]
     GPP[ii] = Reco[ii] - nee[ii]
 
-    dfout = pd.DataFrame({'GPP':GPP, 'RECO':Reco}, index=df.index)
+    dfout = pd.DataFrame({'GPP': GPP, 'RECO': Reco}, index=df.index)
 
     return dfout
 
@@ -467,8 +518,9 @@ 

Source code for hesseflux.nee2gpp

         time series of CO2 fluxes and air temperature.
 
         pandas.Dataframe with the columns
-        'FC' or 'NEE' (or starting with 'FC\_' or 'NEE\_') for observed CO2 flux [umol(CO2) m-2 s-1]
-        'TA'    (or starting with 'TA\_') for air temperature [K]
+        'FC' or 'NEE' (or starting with 'FC\\_' or 'NEE\\_') for
+        observed CO2 flux [umol(CO2) m-2 s-1]
+        'TA'    (or starting with 'TA\\_') for air temperature [K]
         The index is taken as date variable.
     ff : pandas.Dataframe
         flag Dataframe or array has the same shape as `df`. Non-zero values in
@@ -476,11 +528,14 @@ 

Source code for hesseflux.nee2gpp

 
         `ff` must follow the same rules as `df`.
     isday : array_like of bool
-        True when it is day, False when night. Must have the same length as `df.shape[0].`
+        True when it is day, False when night. Must have the same length
+        as `df.shape[0].`
     undef : float, optional
-        values having `undef` value are treated as missing values in `df` (default: -9999)
+        values having `undef` value are treated as missing values in `df`
+        (default: -9999)
     nogppnight : float, optional
-        GPP will be set to zero at night. RECO will then equal NEE at night (default: False)
+        GPP will be set to zero at night. RECO will then equal NEE at night
+        (default: False)
 
     Returns
     -------
@@ -491,8 +546,8 @@ 

Source code for hesseflux.nee2gpp

     References
     ----------
     .. [2] Reichstein et al. (2005)
-       On the separation of net ecosystem exchange into assimilation and ecosystem
-       respiration: review and improved algorithm,
+       On the separation of net ecosystem exchange into assimilation and
+       ecosystem respiration: review and improved algorithm,
        Global Change Biology 11, 1424-1439
 
     Examples
@@ -507,7 +562,8 @@ 

Source code for hesseflux.nee2gpp

     >>> head  = fread(ifile, skip=2, header=True)
     >>> head1 = head[0]
     >>> # date
-    >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
+    >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:],
+    ...                  mi=dat[4,:])
     >>> adate = dec2date(jdate, eng=True)
     >>> # colhead
     >>> idx   = []
@@ -521,7 +577,8 @@ 

Source code for hesseflux.nee2gpp

     >>> # flag
     >>> flag = np.where(dfin == undef, 2, 0)
     >>> # partition
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='local')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+    ...                     undef=undef, method='local')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.40606871e+00
       8.31942152e+00  1.06242542e+01  8.49245664e+00  1.12381973e+01]
@@ -529,17 +586,20 @@ 

Source code for hesseflux.nee2gpp

     [1.68311981 1.81012431 1.9874173  2.17108871 2.38759152 2.64372415
      2.90076664 3.18592735]
 
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='local')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+    ...                     undef=undef, method='local')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.40606871e+00
       8.31942152e+00  1.06242542e+01  8.49245664e+00  1.12381973e+01]
 
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='Reichstein')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+    ...                     undef=undef, method='Reichstein')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.40606871e+00
       8.31942152e+00  1.06242542e+01  8.49245664e+00  1.12381973e+01]
 
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='reichstein')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+    ...                     undef=undef, method='reichstein')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.40606871e+00
       8.31942152e+00  1.06242542e+01  8.49245664e+00  1.12381973e+01]
@@ -550,11 +610,14 @@ 

Source code for hesseflux.nee2gpp

     Modified Arndt Piayda,   Mar 2012 - undef=np.nan
              Matthias Cuntz, Nov 2012 - individual routine
              Matthias Cuntz, Feb 2013 - ported to Python 3
+             Matthias Cuntz, Jun 2024 - removed np.int and np.float
+
     """
     # Variables
     fc_id = ''
     for cc in df.columns:
-        if cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or (cc == 'NEE'):
+        if ( cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or
+             (cc == 'NEE') ):
             fc_id = cc
             break
     ta_id = ''
@@ -562,118 +625,133 @@ 

Source code for hesseflux.nee2gpp

         if cc.startswith('TA_') or (cc == 'TA'):
             ta_id = cc
             break
-    assert fc_id, 'Carbon net flux with name FC or NEE or starting with FC_ or NEE_ must be in input.'
-    assert ta_id, 'Air temperature with name TA or starting with TA_ must be in input.'
+    assert fc_id, ('Carbon net flux with name FC or NEE or starting with FC_'
+                   ' or NEE_ must be in input.')
+    assert ta_id, ('Air temperature with name TA or starting with TA_ must'
+                   ' be in input.')
 
     nee    = np.ma.array(df[fc_id], mask=(ff[fc_id] > 0))
     t      = np.ma.array(df[ta_id], mask=(ff[ta_id] > 0))
-    misday = np.ma.array(isday, mask=((~np.isfinite(isday)) | (isday == undef)))
+    misday = np.ma.array(isday, mask=((~np.isfinite(isday)) |
+                                      (isday == undef)))
     dates  = df.index.to_julian_date()
 
     # Partition - Local relationship = Reichstein et al. (2005)
 
     ndata = nee.size
-    GPP   = np.ones(ndata)*undef
-    Reco  = np.ones(ndata)*undef
-    dfout = pd.DataFrame({'GPP':GPP, 'RECO':Reco}, index=df.index)
+    GPP   = np.ones(ndata) * undef
+    Reco  = np.ones(ndata) * undef
+    dfout = pd.DataFrame({'GPP': GPP, 'RECO': Reco}, index=df.index)
 
     # Select valid nighttime
     mask  = misday | nee.mask | t.mask | misday.mask
     ii    = np.where(~mask)[0]
-    if (ii.size==0):
-        # raise ValueError('Error _nee2gpp_reichstein: no valid nighttime data.')
+    if (ii.size == 0):
+        # raise ValueError('Error _nee2gpp_reichstein:'
+        #                  ' no valid nighttime data.')
         print('Warning _nee2gpp_reichstein: no valid nighttime data.')
         return dfout
     jul  = dates[ii]
     tt   = np.ma.compressed(t[ii])
     net  = np.ma.compressed(nee[ii])
     # 1. each 5 days, in 15 day period, fit if range of T > 5
-    locp = [] # local param
-    locs = [] # local err
-    dmin = np.floor(np.amin(jul)).astype(np.int) # be aware that julian days starts at noon, i.e. 1.0 is 12h
-    dmax = np.ceil(np.amax(jul)).astype(np.int)  # so the search will be from noon to noon and thus includes all nights
-    for i in range(dmin,dmax,5):
-        iii  = np.where((jul>=i) & (jul<(i+14)))[0]
+    locp = []  # local param
+    locs = []  # local err
+    # be aware that julian days starts at noon, i.e. 1.0 is 12h
+    # so the search will be from noon to noon and thus includes all nights
+    dmin = np.floor(np.amin(jul)).astype(int)
+    dmax = np.ceil(np.amax(jul)).astype(int)
+    for i in range(dmin, dmax, 5):
+        iii  = np.where((jul >= i) & (jul < (i + 14)))[0]
         niii = iii.size
         if niii > 6:
             tt1  = tt[iii]
             net1 = net[iii]
-            mm   = ~mad(net1, z=4.5) # make fit more robust by removing outliers
+            # make fit more robust by removing outliers
+            mm   = ~mad(net1, z=4.5)
             if (np.ptp(tt[iii]) >= 5.) & (np.sum(mm) > 6):
                 p, temp1, temp2 = opt.fmin_tnc(cost_lloyd_fix, [2., 200.],
-                                               bounds=[[0.,None], [0.,None]],
+                                               bounds=[[0., None], [0., None]],
                                                args=(tt1[mm], net1[mm]),
                                                approx_grad=True, disp=False)
                 try:
-                    p1, c = opt.curve_fit(lloyd_fix, tt1[mm], net1[mm], p0=p, maxfev=10000) # params, covariance
-                    if np.all(np.isfinite(c)): # possible return of curvefit: c=inf
+                    # params, covariance
+                    p1, c = opt.curve_fit(lloyd_fix, tt1[mm], net1[mm],
+                                          p0=p, maxfev=10000)
+                    # possible return of curvefit: c=inf
+                    if np.all(np.isfinite(c)):
                         s = np.sqrt(np.diag(c))
                     else:
-                        s = 10.*np.abs(p)
+                        s = 10. * np.abs(p)
                 except:
-                    s = 10.*np.abs(p)
+                    s = 10. * np.abs(p)
                 locp += [p]
                 locs += [s]
                 # if ((s[1]/p[1])<0.5) & (p[1] > 0.): pdb.set_trace()
     if len(locp) == 0:
-        # raise ValueError('Error _nee2gpp_reichstein: No local relationship found.')
+        # raise ValueError('Error _nee2gpp_reichstein:'
+        #                  ' No local relationship found.')
         print('Warning _nee2gpp_reichstein: No local relationship found.')
         return dfout
-    locp   = np.squeeze(np.array(locp).astype(np.float))
-    locs   = np.squeeze(np.array(locs).astype(np.float))
+    locp   = np.squeeze(np.array(locp).astype(float))
+    locs   = np.squeeze(np.array(locs).astype(float))
     # 2. E0 = avg of best 3
     # Reichstein et al. (2005), p. 1430, 1st paragraph.
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
-        iii  = np.where((locp[:,1] > 0.) & (locp[:,1] < 450.) & (np.abs(locs[:,1]/locp[:,1]) < 0.5))[0]
+        iii  = np.where((locp[:, 1] > 0.) & (locp[:, 1] < 450.) &
+                        (np.abs(locs[:, 1] / locp[:, 1]) < 0.5))[0]
     niii = iii.size
-    if niii==0:
-        # raise ValueError('Error _nee2gpp_reichstein: No good local relationship found.')
+    if niii == 0:
+        # raise ValueError('Error _nee2gpp_reichstein:'
+        #                  ' No good local relationship found.')
         # loosen the criteria: take the best three estimates anyway
-        iii   = np.where((locp[:,1] > 0.))[0]
+        iii   = np.where((locp[:, 1] > 0.))[0]
         niii = iii.size
-        if niii<1:
+        if niii < 1:
             # raise ValueError('Error _nee2gpp_reichstein: No E0>0 found.')
             print('Warning _nee2gpp_reichstein: No E0>0 found.')
             return dfout
-        lp    = locp[iii,:]
-        ls    = locs[iii,:]
-        iis   = np.argsort(ls[:,1])
-        bestp = np.mean(lp[iis[0:np.minimum(3,niii)],:],axis=0)
-        bests = np.mean(ls[iis[0:np.minimum(3,niii)],:],axis=0)
-    elif niii==1:
-        bestp = np.squeeze(locp[iii,:])
-        bests = np.squeeze(locs[iii,:])
-    elif niii==2:
-        bestp = np.mean(locp[iii,:],axis=0)
-        bests = np.mean(locs[iii,:],axis=0)
+        lp    = locp[iii, :]
+        ls    = locs[iii, :]
+        iis   = np.argsort(ls[:, 1])
+        bestp = np.mean(lp[iis[0:np.minimum(3, niii)], :], axis=0)
+        bests = np.mean(ls[iis[0:np.minimum(3, niii)], :], axis=0)
+    elif niii == 1:
+        bestp = np.squeeze(locp[iii, :])
+        bests = np.squeeze(locs[iii, :])
+    elif niii == 2:
+        bestp = np.mean(locp[iii, :], axis=0)
+        bests = np.mean(locs[iii, :], axis=0)
         # ls    = locs[iii,:]
         # iis   = np.argsort(ls[:,1])
     else:
-        lp    = locp[iii,:]
-        ls    = locs[iii,:]
-        iis   = np.argsort(ls[:,1])
-        bestp = np.mean(lp[iis[0:3],:],axis=0)
-        bests = np.mean(ls[iis[0:3],:],axis=0)
+        lp    = locp[iii, :]
+        ls    = locs[iii, :]
+        iis   = np.argsort(ls[:, 1])
+        bestp = np.mean(lp[iis[0:3], :], axis=0)
+        bests = np.mean(ls[iis[0:3], :], axis=0)
 
     # 3. Refit Rref with fixed E0, each 4 days
-    refp  = [] # Rref param
-    refii = [] # mean index of data points
+    refp  = []  # Rref param
+    refii = []  # mean index of data points
     E0    = bestp[1]
     et    = lloyd_fix(tt, 1., E0)
-    for i in range(dmin,dmax,4):
-        iii  = np.where((jul>=i) & (jul<(i+4)))[0]
+    for i in range(dmin, dmax, 4):
+        iii  = np.where((jul >= i) & (jul < (i + 4)))[0]
         niii = iii.size
         if niii > 3:
             # Calc directly minisation of (nee-p*et)**2
             p, temp1, temp2 = opt.fmin_tnc(cost_abs, [2.],
-                                           bounds=[[0.,None]],
-                                           args=(lloyd_only_rref_p, et[iii], net[iii]),
+                                           bounds=[[0., None]],
+                                           args=(lloyd_only_rref_p, et[iii],
+                                                 net[iii]),
                                            approx_grad=True, disp=False)
             refp  += [p]
-            refii += [np.int((iii[0]+iii[-1])//2)]
+            refii += [int((iii[0] + iii[-1]) // 2)]
     if len(refp) == 0:
-        # raise ValueError('Error _nee2gpp_reichstein: No ref relationship found.')
+        # raise ValueError('Error _nee2gpp_reichstein:'
+        #                  ' No ref relationship found.')
         print('Warning _nee2gpp_reichstein: No ref relationship found.')
         return dfout
     refp  = np.squeeze(np.array(refp))
@@ -683,28 +761,28 @@ 

Source code for hesseflux.nee2gpp

     Rref = np.interp(dates, jul[refii], refp)
 
     # 5. Calc Reco
-    Reco     = np.ones(ndata)*undef
+    Reco     = np.ones(ndata) * undef
     ii       = np.where(~t.mask)[0]
     Reco[ii] = lloyd_fix(t[ii], Rref[ii], E0)
 
     # 6. Calc GPP
-    GPP     = np.ones(ndata)*undef
+    GPP     = np.ones(ndata) * undef
     ii      = np.where(~(t.mask | nee.mask))[0]
     GPP[ii] = Reco[ii] - nee[ii]
 
     # 7. Set GPP=0 at night, if wanted
     if nogppnight:
-        mask = misday | nee.mask | t.mask | misday.mask # night
+        mask = misday | nee.mask | t.mask | misday.mask  # night
         ii   = np.where(~mask)[0]
         Reco[ii] = nee[ii]
         GPP[ii]  = 0.
         # and prohibit negative gpp at any time
-        mask = nee.mask | t.mask | (GPP>0.)
+        mask = nee.mask | t.mask | (GPP > 0.)
         ii   = np.where(~mask)[0]
         Reco[ii] -= GPP[ii]
         GPP[ii]  = 0.
 
-    dfout = pd.DataFrame({'GPP':GPP, 'RECO':Reco}, index=df.index)
+    dfout = pd.DataFrame({'GPP': GPP, 'RECO': Reco}, index=df.index)
 
     return dfout
 
@@ -723,9 +801,11 @@ 

Source code for hesseflux.nee2gpp

         incoming shortwave radiation, and air vapour pressure deficit.
 
         `df` can be a pandas.Dataframe with the columns
-        'FC' or 'NEE' (or starting with 'FC\_' or 'NEE\_') for observed CO2 flux [umol(CO2) m-2 s-1]
-        'TA'    (or starting with 'TA\_') for air temperature [K]
-        'SW_IN' (or starting with 'SW_IN') for incoming short-wave radiation [W m-2]
+        'FC' or 'NEE' (or starting with 'FC\\_' or 'NEE\\_') for observed
+        CO2 flux [umol(CO2) m-2 s-1]
+        'TA'    (or starting with 'TA\\_') for air temperature [K]
+        'SW_IN' (or starting with 'SW_IN') for incoming short-wave
+        radiation [W m-2]
         'VPD'   (or starting with 'VPD') for air vapour deficit [Pa]
         The index is taken as date variable.
     ff : pandas.Dataframe or numpy.array, optional
@@ -734,11 +814,14 @@ 

Source code for hesseflux.nee2gpp

 
         `ff` must follow the same rules as `df` if pandas.Dataframe.
     isday : array_like of bool, optional
-        True when it is day, False when night. Must have the same length as `df.shape[0]`.
+        True when it is day, False when night. Must have the same length
+        as `df.shape[0]`.
     undef : float, optional
-        values having `undef` value are treated as missing values in `df` (default: -9999)
+        values having `undef` value are treated as missing values in `df`
+        (default: -9999)
     nogppnight : float, optional
-        GPP will be set to zero at night. RECO will then equal NEE at night (default: False)
+        GPP will be set to zero at night. RECO will then equal NEE at night
+        (default: False)
 
     Returns
     -------
@@ -749,8 +832,9 @@ 

Source code for hesseflux.nee2gpp

     References
     ----------
     .. [3] Lasslop et al. (2010)
-       Separation of net ecosystem exchange into assimilation and respiration using
-       a light response curve approach: critical issues and global evaluation,
+       Separation of net ecosystem exchange into assimilation and respiration
+       using a light response curve approach: critical issues and global
+       evaluation,
        Global Change Biology 16, 187-208
 
     Examples
@@ -765,7 +849,8 @@ 

Source code for hesseflux.nee2gpp

     >>> head  = fread(ifile, skip=2, header=True)
     >>> head1 = head[0]
     >>> # date
-    >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
+    >>> jdate = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:],
+                         mi=dat[4,:])
     >>> adate = dec2date(jdate, eng=True)
     >>> # colhead
     >>> idx   = []
@@ -779,7 +864,8 @@ 

Source code for hesseflux.nee2gpp

     >>> # flag
     >>> flag = np.where(dfin == undef, 2, 0)
     >>> # partition
-    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='day')
+    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
+                            undef=undef, method='day')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  2.78457540e+00
       6.63212545e+00  8.88902165e+00  6.74243873e+00  9.51364527e+00]
@@ -793,11 +879,14 @@ 

Source code for hesseflux.nee2gpp

     Modified Arndt Piayda,   Mar 2012 - undef=np.nan
              Matthias Cuntz, Nov 2012 - individual routine
              Matthias Cuntz, Feb 2013 - ported to Python 3
+             Matthias Cuntz, Jun 2024 - removed np.int and np.float
+
     """
     # Variables
     fc_id = ''
     for cc in df.columns:
-        if cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or (cc == 'NEE'):
+        if ( cc.startswith('FC_') or (cc == 'FC') or cc.startswith('NEE_') or
+             (cc == 'NEE') ):
             fc_id = cc
             break
     ta_id = ''
@@ -815,24 +904,29 @@ 

Source code for hesseflux.nee2gpp

         if cc.startswith('VPD_') or (cc == 'VPD'):
             vpd_id = cc
             break
-    assert fc_id,  'Carbon net flux with name FC or NEE or starting with FC_ or NEE_ must be in input.'
-    assert ta_id,  'Air temperature with name TA or starting with TA_ must be in input.'
-    assert sw_id,  'Global radiation with name SW or starting with SW_ must be in input.'
-    assert vpd_id, 'Vapour pressure deficit with name VPD or starting with VPD_ must be in input.'
-
-    nee    = np.ma.array(df[fc_id],  mask=(ff[fc_id] > 0))
-    t      = np.ma.array(df[ta_id],  mask=(ff[ta_id] > 0))
-    sw     = np.ma.array(df[sw_id],  mask=(ff[sw_id] > 0))
+    assert fc_id, ('Carbon net flux with name FC or NEE or starting with FC_'
+                   ' or NEE_ must be in input.')
+    assert ta_id, ('Air temperature with name TA or starting with TA_ must'
+                   ' be in input.')
+    assert sw_id, ('Global radiation with name SW or starting with SW_ must'
+                   ' be in input.')
+    assert vpd_id, ('Vapour pressure deficit with name VPD or starting with'
+                    ' VPD_ must be in input.')
+
+    nee    = np.ma.array(df[fc_id], mask=(ff[fc_id] > 0))
+    t      = np.ma.array(df[ta_id], mask=(ff[ta_id] > 0))
+    sw     = np.ma.array(df[sw_id], mask=(ff[sw_id] > 0))
     vpd    = np.ma.array(df[vpd_id], mask=(ff[vpd_id] > 0))
-    misday = np.ma.array(isday, mask=((~np.isfinite(isday)) | (isday == undef)))
+    misday = np.ma.array(isday, mask=((~np.isfinite(isday)) |
+                                      (isday == undef)))
     dates  = df.index.to_julian_date()
 
     # Partition - Lasslop et al. (2010) method
 
     ndata = nee.size
-    GPP   = np.ones(ndata)*undef
-    Reco  = np.ones(ndata)*undef
-    dfout = pd.DataFrame({'GPP':GPP, 'RECO':Reco}, index=df.index)
+    GPP   = np.ones(ndata) * undef
+    Reco  = np.ones(ndata) * undef
+    dfout = pd.DataFrame({'GPP': GPP, 'RECO': Reco}, index=df.index)
 
     do_lgpp = False
     mask  = nee.mask | t.mask | misday.mask | sw.mask | vpd.mask
@@ -855,7 +949,8 @@ 

Source code for hesseflux.nee2gpp

     aalpha = 0.01
     qnet   = np.sort(dnet)
     nqnet  = qnet.size
-    abeta0 = np.abs(qnet[np.floor(0.97*nqnet).astype(np.int)]-qnet[np.ceil(0.03*nqnet).astype(np.int)])
+    abeta0 = np.abs(qnet[np.floor(0.97 * nqnet).astype(int)] -
+                    qnet[np.ceil(0.03 * nqnet).astype(int)])
     ak     = 0.
     # out
     lE0    = []
@@ -865,18 +960,19 @@ 

Source code for hesseflux.nee2gpp

         lk     = []
     lRref  = []
     lii    = []
-    dmin = np.floor(np.amin(dates)).astype(np.int)
-    dmax = np.ceil(np.amax(dates)).astype(np.int)
+    dmin = np.floor(np.amin(dates)).astype(int)
+    dmax = np.ceil(np.amax(dates)).astype(int)
     zaehl = -1
-    for i in range(dmin,dmax,2):
+    for i in range(dmin, dmax, 2):
         good = True
         # 1. Estimate E0 from nighttime data
-        iii  = np.squeeze(np.where((njul>=i) & (njul<(i+12))))
+        iii  = np.squeeze(np.where((njul >= i) & (njul < (i + 12))))
         niii = iii.size
         if niii > 3:
             p, temp1, temp2 = opt.fmin_tnc(cost_abs, [aRref, 100.],
-                                           bounds=[[0.,None], [0.,None]],
-                                           args=(lloyd_fix_p, ntt[iii], nnet[iii]),
+                                           bounds=[[0., None], [0., None]],
+                                           args=(lloyd_fix_p, ntt[iii],
+                                                 nnet[iii]),
                                            approx_grad=True, disp=False)
             E0 = np.maximum(p[1], 50.)
         else:
@@ -887,7 +983,7 @@ 

Source code for hesseflux.nee2gpp

                 good = False
                 continue
         # 2. Estimate alpha, k, beta0, Rref from daytime data
-        iii  = np.squeeze(np.where((djul>=i) & (djul<(i+4))))
+        iii  = np.squeeze(np.where((djul >= i) & (djul < (i + 4))))
         niii = iii.size
         if niii > 3:
             et     = lloyd_fix(dtt[iii], 1., E0)
@@ -896,34 +992,37 @@ 

Source code for hesseflux.nee2gpp

             ibeta0 = abeta0
             ik     = ak
             iRref  = aRref
-            bounds = [[None,None], [None,None], [None,None], [None,None]]
+            bounds = [[None, None], [None, None], [None, None], [None, None]]
             while again:
                 again = False
-                p, nfeval, rc  = opt.fmin_tnc(cost_lasslop, [ialpha, ibeta0, ik, iRref],
+                p, nfeval, rc  = opt.fmin_tnc(cost_lasslop,
+                                              [ialpha, ibeta0, ik, iRref],
                                               bounds=bounds,
-                                              args=(dsw[iii], et, dvpd[iii], dnet[iii]),
+                                              args=(dsw[iii], et, dvpd[iii],
+                                                    dnet[iii]),
                                               approx_grad=True, disp=False)
-                # if parameters beyond some bounds, set params and redo the optim or skip
-                if ((p[0] < 0.) | (p[0] > 0.22)): # alpha
+                # if parameters beyond some bounds, set params and redo
+                # the optim or skip
+                if ((p[0] < 0.) | (p[0] > 0.22)):  # alpha
                     again = True
                     if zaehl >= 0:
-                        bounds[0] = [lalpha[zaehl],lalpha[zaehl]]
+                        bounds[0] = [lalpha[zaehl], lalpha[zaehl]]
                         ialpha    = lalpha[zaehl]
                     else:
-                        bounds[0] = [0.,0.]
+                        bounds[0] = [0., 0.]
                         ialpha    = 0.
-                if p[1] < 0.:                     # beta0
-                    bounds[1] = [0.,0.]
+                if p[1] < 0.:                      # beta0
+                    bounds[1] = [0., 0.]
                     ibeta0    = 0.
                     again = True
                 if p[1] > 250.:
                     good = False
                     continue
-                if p[2] < 0.:                     # k
-                    bounds[2] = [0.,0.]
+                if p[2] < 0.:                      # k
+                    bounds[2] = [0., 0.]
                     ik        = 0.
                     again = True
-                if p[3] < 0:                      # Rref
+                if p[3] < 0:                       # Rref
                     good = False
                     continue
             if good:
@@ -932,7 +1031,7 @@ 

Source code for hesseflux.nee2gpp

                     lbeta0 = lbeta0 + [p[1]]
                     lk     = lk     + [p[2]]
                 lRref  = lRref  + [p[3]]
-                lii    = lii    + [np.int((iii[0]+iii[-1])/2)]
+                lii    = lii    + [int((iii[0] + iii[-1]) / 2)]
             else:
                 continue
         else:
@@ -940,7 +1039,8 @@ 

Source code for hesseflux.nee2gpp

         lE0    = lE0 + [E0]
         zaehl += 1
     if len(lE0) == 0:
-        # raise ValueError('Error _nee2gpp_lasslop: No day relationship found.')
+        # raise ValueError('Error _nee2gpp_lasslop:'
+        #                  ' No day relationship found.')
         print('Warning _nee2gpp_lasslop: No day relationship found.')
         return dfout
     lE0 = np.squeeze(np.array(lE0))
@@ -956,7 +1056,7 @@ 

Source code for hesseflux.nee2gpp

     Rref = np.interp(dates, djul[lii], lRref)
 
     # 4. Calc Reco
-    Reco     = np.ones(ndata)*undef
+    Reco     = np.ones(ndata) * undef
     ii       = np.squeeze(np.where(~t.mask))
     Reco[ii] = lloyd_fix(t[ii], Rref[ii], E0[ii])
 
@@ -969,26 +1069,27 @@ 

Source code for hesseflux.nee2gpp

         lmask    = t.mask | misday.mask | sw.mask | vpd.mask
         ii       = np.squeeze(np.where(~lmask))
         lgpp     = np.zeros(ndata)
-        lgpp[ii] = lasslop(sw[ii], et[ii], vpd[ii], alpha[ii], beta0[ii], k[ii], Rref[ii]) - Reco[ii]
+        lgpp[ii] = lasslop(sw[ii], et[ii], vpd[ii], alpha[ii],
+                           beta0[ii], k[ii], Rref[ii]) - Reco[ii]
 
     # 6. GPP
-    GPP     = np.ones(ndata)*undef
+    GPP     = np.ones(ndata) * undef
     ii      = np.squeeze(np.where(~(t.mask | nee.mask)))
     GPP[ii] = Reco[ii] - nee[ii]
 
     # 7. Set GPP=0 at night, if wanted
     if nogppnight:
-        mask = misday | nee.mask | t.mask | misday.mask # night
+        mask = misday | nee.mask | t.mask | misday.mask  # night
         ii   = np.where(~mask)[0]
         Reco[ii] = nee[ii]
         GPP[ii]  = 0.
         # and prohibit negative gpp at any time
-        mask = nee.mask | t.mask | (GPP>0.)
+        mask = nee.mask | t.mask | (GPP > 0.)
         ii   = np.where(~mask)[0]
         Reco[ii] -= GPP[ii]
         GPP[ii]   = 0.
 
-    dfout = pd.DataFrame({'GPP':GPP, 'RECO':Reco}, index=df.index)
+    dfout = pd.DataFrame({'GPP': GPP, 'RECO': Reco}, index=df.index)
 
     return dfout
 
@@ -1103,7 +1204,7 @@ 

Related Topics

- +
diff --git a/docs/html/_modules/hesseflux/ustarfilter.html b/docs/html/_modules/hesseflux/ustarfilter.html index a0705a6..182c5bb 100644 --- a/docs/html/_modules/hesseflux/ustarfilter.html +++ b/docs/html/_modules/hesseflux/ustarfilter.html @@ -1,24 +1,24 @@ - - + - hesseflux.ustarfilter — hesseflux 5.1.dev0 documentation - - - - - - + hesseflux.ustarfilter — hesseflux 5.1.dev2 documentation + + + + + + + + - @@ -67,6 +67,8 @@

Source code for hesseflux.ustarfilter

       only ustar data when NEE and Ta are valid, Jan 2023, Matthias Cuntz
     * Use 90% of ustar if no threshold found also for seasonout,
       Jan 2023, Matthias Cuntz
+    * Removed np.float and np.bool, Jun 2024, Matthias Cuntz
+    * do not register pandas platting backend, Jun 2024, Matthias Cuntz
 
 """
 import numpy as np
@@ -76,7 +78,9 @@ 

Source code for hesseflux.ustarfilter

 __all__ = ['ustarfilter']
 
 
-
[docs]def ustarfilter(dfin, flag=None, isday=None, date=None, +
+[docs] +def ustarfilter(dfin, flag=None, isday=None, date=None, timeformat='%Y-%m-%d %H:%M:%S', colhead=None, ustarmin=0.01, nboot=1, undef=-9999, plot=False, seasonout=False, nmon=3, ntaclasses=7, corrcheck=0.5, nustarclasses=20, @@ -189,7 +193,7 @@

Source code for hesseflux.ustarfilter

             estr = ('Length of colhead must be number of columns in input'
                     ' array. len(colhead)=' + str(len(colhead)) +
                     ' shape(input)=(' + str(dfin.shape[0]) + ',' +
-                    str(dfin.shape[1])+').')
+                    str(dfin.shape[1]) + ').')
             raise ValueError(estr)
         assert date is not None, 'Date must be given if input is numpy arrary.'
         df['Datetime'] = pd.to_datetime(date, format=timeformat)
@@ -313,10 +317,10 @@ 

Source code for hesseflux.ustarfilter

     yrmin  = df.index.min().year
     nyears = yrmax - yrmin + 1
     ndays  = (df.index.max() - df.index.min()).days + 1
-    assert ndays//nyears > 360, 'Full years must be given.'
+    assert ndays // nyears > 360, 'Full years must be given.'
 
     # calculate thresholds
-    nperiod = 12//nmon  # number of nmon periods per year
+    nperiod = 12 // nmon  # number of nmon periods per year
     if seasonout:
         bustars = np.ones((nboot, nyears, nperiod)) * undef
     else:
@@ -349,8 +353,8 @@ 

Source code for hesseflux.ustarfilter

                 flag_p   = ( (~isday_b) &
                              (ff_b[fc_id] == 0) & (ff_b[ustar_id] == 0) &
                              (ff_b[ta_id] == 0) &
-                             (df_b.index.month > p*nmon) &
-                             (df_b.index.month <= (p+1)*nmon) )
+                             (df_b.index.month > p * nmon) &
+                             (df_b.index.month <= (p + 1) * nmon) )
                 fc_p    = df_b.loc[flag_p, fc_id]
                 ustar_p = df_b.loc[flag_p, ustar_id]
                 ta_p    = df_b.loc[flag_p, ta_id]
@@ -361,11 +365,11 @@ 

Source code for hesseflux.ustarfilter

                     continue
                 ta_q = np.quantile(
                     ta_p,
-                    np.arange(ntaclasses + 1, dtype=np.float) /
-                    np.float(ntaclasses))
+                    np.arange(ntaclasses + 1, dtype=float) /
+                    float(ntaclasses))
                 ta_q[0] -= 0.1  # 1st include min
                 for t in range(ntaclasses):
-                    iita    = (ta_p > ta_q[t]) & (ta_p <= ta_q[t+1])
+                    iita    = (ta_p > ta_q[t]) & (ta_p <= ta_q[t + 1])
                     fc_t    = fc_p[iita]
                     ustar_t = ustar_p[iita]
                     ta_t    = ta_p[iita]
@@ -378,17 +382,17 @@ 

Source code for hesseflux.ustarfilter

                     # ustar classes
                     ustar_q = np.quantile(
                         ustar_t,
-                        np.arange(nustarclasses + 1, dtype=np.float) /
-                        np.float(nustarclasses))
+                        np.arange(nustarclasses + 1, dtype=float) /
+                        float(nustarclasses))
                     ustar_q[0] -= 0.01  # 1st include min
-                    for u in range(nustarclasses-1):
+                    for u in range(nustarclasses - 1):
                         iiustar = ((ustar_t > ustar_q[u]) &
-                                   (ustar_t <= ustar_q[u+1]))
+                                   (ustar_t <= ustar_q[u + 1]))
                         fc_u    = fc_t[iiustar]
-                        fc_a    = fc_t[ustar_t > ustar_q[u+1]]
+                        fc_a    = fc_t[ustar_t > ustar_q[u + 1]]
 
-                        if abs(fc_u.mean()) >= abs(plateaucrit*fc_a.mean()):
-                            custars.append(ustar_q[u+1])
+                        if abs(fc_u.mean()) >= abs(plateaucrit * fc_a.mean()):
+                            custars.append(ustar_q[u + 1])
                             break
 
                 # median of thresholds of all temperature classes =
@@ -423,7 +427,8 @@ 

Source code for hesseflux.ustarfilter

                 else:
                     flag_b = ( (~isday_b) &
                                (ff_b[ustar_id] == 0) )
-                    bustars[b, y] = np.quantile(df_b.loc[flag_b, ustar_id], 0.9)
+                    bustars[b, y] = np.quantile(df_b.loc[flag_b, ustar_id],
+                                                0.9)
 
     # set minimum ustar threshold
     bustars = np.maximum(bustars, ustarmin)
@@ -434,14 +439,14 @@ 

Source code for hesseflux.ustarfilter

     # flag out with original DatetimeIndex
     off    = ustar_in.astype(int)
     off[:] = 0
-    ii     = np.zeros(len(off), dtype=np.bool)
+    ii     = np.zeros(len(off), dtype=bool)
     if seasonout:
         for y in range(nyears):
             yy = yrmin + y
             for p in range(nperiod):
                 iiyr = ( (df.index.year == yy) &   # df DatetimeIndex
-                         (df.index.month > p*nmon) &
-                         (df.index.month <= (p+1)*nmon) )
+                         (df.index.month > p * nmon) &
+                         (df.index.month <= (p + 1) * nmon) )
                 ii[iiyr] = ustar_in[iiyr] < oustars[1, y, p]
     else:
         for y in range(nyears):
@@ -451,11 +456,14 @@ 

Source code for hesseflux.ustarfilter

     off[ii] = 2  # original DatetimeIndex
 
     if plot:
+        import matplotlib as mpl
+        mpl.use('PDF')  # set directly after import matplotlib
+        from matplotlib.backends.backend_pdf import PdfPages
         import matplotlib.pyplot as plt
-        import matplotlib.backends.backend_pdf as pdf
-        pd.plotting.register_matplotlib_converters()
+        # import matplotlib.backends.backend_pdf as PdfPages
+        # pd.plotting.register_matplotlib_converters()
 
-        pp = pdf.PdfPages('ustarfilter.pdf')
+        pp = PdfPages('ustarfilter.pdf')
         if seasonout:
             for y in range(nyears):
                 yy = yrmin + y
@@ -470,8 +478,8 @@ 

Source code for hesseflux.ustarfilter

                     flag_p = ( (~isday_f) &
                                (ff_f[fc_id] == 0) & (ff_f[ustar_id] == 0) &
                                (ff_f[ta_id] == 0) &
-                               (df_f.index.month > p*nmon) &
-                               (df_f.index.month <= (p+1)*nmon) )
+                               (df_f.index.month > p * nmon) &
+                               (df_f.index.month <= (p + 1) * nmon) )
                     fc_p    = df_f.loc[flag_p, fc_id]
                     ustar_p = df_f.loc[flag_p, ustar_id]
 
@@ -522,6 +530,7 @@ 

Source code for hesseflux.ustarfilter

         return oustars, off
+ if __name__ == '__main__': import doctest doctest.testmod() @@ -573,7 +582,7 @@

Related Topics

- +
diff --git a/docs/html/_modules/index.html b/docs/html/_modules/index.html index 37ef691..89c1611 100644 --- a/docs/html/_modules/index.html +++ b/docs/html/_modules/index.html @@ -1,24 +1,24 @@ - - + - Overview: module code — hesseflux 5.1.dev0 documentation - - - - - - + Overview: module code — hesseflux 5.1.dev2 documentation + + + + + + + + - @@ -81,7 +81,7 @@

Related Topics

- +
diff --git a/docs/html/_sources/contents.rst.txt b/docs/html/_sources/contents.rst.txt index 6a7737a..cd4301c 100644 --- a/docs/html/_sources/contents.rst.txt +++ b/docs/html/_sources/contents.rst.txt @@ -11,3 +11,5 @@ Contents api changelog authors +.. + reddyproc diff --git a/docs/html/_sources/userguide.rst.txt b/docs/html/_sources/userguide.rst.txt index d328c0f..1033a16 100644 --- a/docs/html/_sources/userguide.rst.txt +++ b/docs/html/_sources/userguide.rst.txt @@ -2,34 +2,37 @@ User Guide ---------- -``hesseflux`` collects functions used for processing Eddy covariance data of the -ICOS_ ecosystem site FR-Hes_. +``hesseflux`` collects functions used for processing Eddy covariance +data of the ICOS_ ecosystem site FR-Hes_. -The post-processing functionality for Eddy flux data is similar to the R-package -REddyProc_ and includes basically the steps described in `Papale et al. -(Biogeosciences, 2006)`_ plus some extensions such as the daytime method of flux -partitioning (`Lasslop et al., Global Change Biology 2010`_) and the estimation -of uncertainties on the fluxes as in `Lasslop et al. (Biogeosci, 2008)`_. +The post-processing functionality for Eddy flux data is similar to the +R-package REddyProc_ and includes basically the steps described in +`Papale et al. (Biogeosciences, 2006)`_ plus some extensions such as +the daytime method of flux partitioning (`Lasslop et al., Global +Change Biology 2010`_) and the estimation of uncertainties on the +fluxes as in `Lasslop et al. (Biogeosci, 2008)`_. -Only the post-processing steps are described here. We are happy to discuss any -processing or post-processing directly. Contact us at mc (at) macu (dot) de. +Only the post-processing steps are described here. We are happy to +discuss any processing or post-processing directly. Contact us at mc +(at) macu (dot) de. europe-fluxdata.eu file format ============================== -The first processing steps at the ICOS ecosystem site FR-Hes (not shown) brings -the data in a format that can be submitted to the database -`europe-fluxdata.eu`_. The database predates ICOS and is somewhat a precursor of -the ICOS data processing. +The first processing steps at the ICOS ecosystem site FR-Hes (not +shown) brings the data in a format that can be submitted to the +database `europe-fluxdata.eu`_. The database predates ICOS and is +somewhat a precursor of the ICOS data processing. -The file format of `europe-fluxdata.eu` is hence very similar to the ICOS -format. The only known difference to us is the unit of atmospheric pressure, -which is in hPa in `europe-fluxdata.eu`_ and in kPa in `ICOS ecosystems`_. The -file format has notably one header line with variable names. There are no units -in the file. ``hesseflux`` provides a little helper script -`europe-fluxdata_units.py` in the `bin` directory that adds a second header line -with units. The script can be run on the output as: +The file format of `europe-fluxdata.eu` is hence very similar to the +ICOS format. The only known difference to us is the unit of +atmospheric pressure, which is in hPa in `europe-fluxdata.eu`_ and in +kPa in `ICOS ecosystems`_. The file format has notably one header line +with variable names. There are no units in the file. ``hesseflux`` +provides a little helper script `europe-fluxdata_units.py` in the +`bin` directory that adds a second header line with units. The script +can be run on the output as: .. code-block:: bash @@ -39,20 +42,21 @@ with units. The script can be run on the output as: Post-processing Eddy covariance data ==================================== -The script `postproc_europe-fluxdata.py` in the `example` directory provides a -template for post-processing data that is in the `europe-fluxdata.eu`_ file -format. It basically makes all steps described in `Papale et al. -(Biogeosciences, 2006)`_. The script is governed by a configuration file in -Python's standard :mod:`configparser` format. The example configuration file -`hesseflux_example.cfg` in the `example` directory is highly commented and -should be (almost) self-explanatory. The script is called like: +The script `postproc_europe-fluxdata.py` in the `example` directory +provides a template for post-processing data that is in the +`europe-fluxdata.eu`_ file format. It basically makes all steps +described in `Papale et al. (Biogeosciences, 2006)`_. The script is +governed by a configuration file in Python's standard +:mod:`configparser` format. The example configuration file +`hesseflux_example.cfg` in the `example` directory is highly commented +and should be (almost) self-explanatory. The script is called like: .. code-block:: bash python postproc_europe-fluxdata.py hesseflux_example.cfg -This script should be taken as a template for one's own post-processing but -includes most standard post-processing steps. +This script should be taken as a template for one's own +post-processing but includes most standard post-processing steps. Here we describe the main parts of the post-processing script. @@ -60,8 +64,8 @@ Here we describe the main parts of the post-processing script. Reading the configuration file ------------------------------ -The script `postproc_europe-fluxdata.py` starts by reading the configuration -file `hesseflux_example.cfg`: +The script `postproc_europe-fluxdata.py` starts by reading the +configuration file `hesseflux_example.cfg`: .. code-block:: python @@ -74,8 +78,9 @@ file `hesseflux_example.cfg`: config.read(configfile) It then analyses the configuration options. The first section in the -configuration file are the options controlling which steps shall be performed by -the script. The section in the `hesseflux_example.cfg` looks like: +configuration file are the options controlling which steps shall be +performed by the script. The section in the `hesseflux_example.cfg` +looks like: .. code-block:: python @@ -107,15 +112,17 @@ And the code in `postproc_europe-fluxdata.py` is: fill = config['POSTSWITCH'].getboolean('fill', True) fluxerr = config['POSTSWITCH'].getboolean('fluxerr', True) -All options are boolean and set to `True` by default if they are not given in -the configuration file. All post-processing steps except uncertainty estimation -of flux data would be performed in the given example. +All options are boolean and set to `True` by default if they are not +given in the configuration file. All post-processing steps except +uncertainty estimation of flux data would be performed in the given +example. Read the data ------------- -The script would then read in the data. The section in the configuration file is: +The script would then read in the data. The section in the +configuration file is: .. code-block:: python @@ -159,9 +166,10 @@ The analysis of the options in `postproc_europe-fluxdata.py` is: Note that strings are given without quotes in the configuration file. -`inputfile` can be a single filename or a comma-separated list of filenames. If -it is missing or empty, the script will try to open a GUI, where one can choose -input files. The data will be appended if several input files are given. +`inputfile` can be a single filename or a comma-separated list of +filenames. If it is missing or empty, the script will try to open a +GUI, where one can choose input files. The data will be appended if +several input files are given. The (first) input file is read as: @@ -169,36 +177,37 @@ The (first) input file is read as: import pandas as pd - parser = lambda date: pd.to_datetime(date, format=timeformat) infile = inputfile[0] df = pd.read_csv(infile, sep, skiprows=skiprows, parse_dates=[0], - date_parser=parser, index_col=0, header=0) - -:mod:`pandas` will use the first column as index (`index_col=0`), assuming that -these are dates (`parse_dates=[0]`) in the format `timeformat`, where columns -are separated by `sep`. The defaults follow the `europe-fluxdata.eu`_ format but -similar formats may be used, and script and/or configuration file can be adapted -easily. Only variable names (still) have to follow `europe-fluxdata.eu`_, -`ICOS`_ or `Ameriflux`_ format at the moment. If the input file has a second + date_format=timeformat, index_col=0, header=0) + +:mod:`pandas` will use the first column as index (`index_col=0`), +assuming that these are dates (`parse_dates=[0]`) in the format +`timeformat`, where columns are separated by `sep`. The defaults +follow the `europe-fluxdata.eu`_ format but similar formats may be +used, and script and/or configuration file can be adapted easily. Only +variable names (still) have to follow `europe-fluxdata.eu`_, `ICOS`_ +or `Ameriflux`_ format at the moment. If the input file has a second header line with units, one can skip it giving `skiprows=[1]` (not `skiprows=1`). -All input files are supposed to be in the same format if `inputfile` is a -comma-separated list of filenames, and they will be read with the same command -above. The :mod:`pandas` dataframes (`df`) will simply be appended. +All input files are supposed to be in the same format if `inputfile` +is a comma-separated list of filenames, and they will be read with the +same command above. The :mod:`pandas` dataframes (`df`) will simply be +appended. The flag dataframe ------------------ -All Not-a-Number (NaN) values will be set to `undef` and will be ignored in the -following. +All Not-a-Number (NaN) values will be set to `undef` and will be +ignored in the following. -This happens via a second dataframe (`dff`), having the same columns and index -as the input dataframe `df`, representing quality flags. All cells that have a -value other than `0` in the flag dataframe `dff` will be ignored in the -dataframe `df`. This means all cells of `df` with `undef` will be set to `2` in -`dff` immediately: +This happens via a second dataframe (`dff`), having the same columns +and index as the input dataframe `df`, representing quality flags. All +cells that have a value other than `0` in the flag dataframe `dff` +will be ignored in the dataframe `df`. This means all cells of `df` +with `undef` will be set to `2` in `dff` immediately: .. code-block:: python @@ -214,12 +223,13 @@ dataframe `df`. This means all cells of `df` with `undef` will be set to `2` in Day / night ----------- -Most post-processing routines differentiate between daytime and nighttime data. -`Papale et al. (Biogeosciences, 2006)`_ use a threshold of 20 W m\ :sup:`-2` of -global radiation to distinguish between day and night. `REddyProc`_ uses -incoming shortwave radiation greater than 10 W m\ :sup:`2` as daytime. The -shortwave radiation threshold `swthr` (same name as in ReddyProc) can be used to -define the appropriate threshold. The default is 10 W m\ :sup:`2`. The column +Most post-processing routines differentiate between daytime and +nighttime data. `Papale et al. (Biogeosciences, 2006)`_ use a +threshold of 20 W m\ :sup:`-2` of global radiation to distinguish +between day and night. `REddyProc`_ uses incoming shortwave radiation +greater than 10 W m\ :sup:`2` as daytime. The shortwave radiation +threshold `swthr` (same name as in ReddyProc) can be used to define +the appropriate threshold. The default is 10 W m\ :sup:`2`. The column `SW_IN_1_1_1` has to exist in the input data. .. code-block:: python @@ -231,8 +241,8 @@ define the appropriate threshold. The default is 10 W m\ :sup:`2`. The column Data check ---------- -`postproc_europe-fluxdata.py` checks the units of air temperature (i.e. the -first column starting with `TA_`). +`postproc_europe-fluxdata.py` checks the units of air temperature +(i.e. the first column starting with `TA_`). .. code-block:: python @@ -245,15 +255,17 @@ first column starting with `TA_`). tkelvin = 0. df.loc[dff[hout[0]]==0, hout[0]] += tkelvin -:func:`_findfirststart(starts, names)` is a helper function that finds the first -occurrence in `names` that starts with the string `starts`. This helper function -is used for the moment until ``hesseflux`` has the functionality that the user -can give individual variable names. +:func:`_findfirststart(starts, names)` is a helper function that finds +the first occurrence in `names` that starts with the string +`starts`. This helper function is used for the moment until +``hesseflux`` has the functionality that the user can give individual +variable names. -The script calculates air vapour pressure deficit `VPD_PI_1_1_1` from air -temperature and relative humidity (i.e. the first column starting with `RH_`) if -not given in input data using the function :func:`esat` of `pyjams`_ for -saturation vapour pressure: +The script calculates air vapour pressure deficit `VPD_PI_1_1_1` from +air temperature and relative humidity (i.e. the first column starting +with `RH_`) if not given in input data using the function +:func:`esat` of `pyjams`_ for saturation vapour +pressure: .. code-block:: python @@ -303,8 +315,8 @@ It further assures that VPD is in Pa for further calculations. vpdpa = 1. # Pa df.loc[dff[hout[0]] == 0, hout[0]] *= vpdpa -And finally determines the time intervals of the input data `dtsec` (s) and the -number of time steps per day `ntday`. +And finally determines the time intervals of the input data +`dtsec` (s) and the number of time steps per day `ntday`. .. code-block:: python @@ -316,10 +328,11 @@ number of time steps per day `ntday`. Spike / outlier flagging ------------------------ -If `outlier=True` is set in the configuration file, spikes will be detected with -the method given in `Papale et al. (Biogeosciences, 2006)`_. A median absolute -deviation (MAD) filter will be used on the second derivatives of the time series -in two-week chunks. The section in `hesseflux_example.cfg` looks like: +If `outlier=True` is set in the configuration file, spikes will be +detected with the method given in `Papale et al. (Biogeosciences, +2006)`_. A median absolute deviation (MAD) filter will be used on the +second derivatives of the time series in two-week chunks. The section +in `hesseflux_example.cfg` looks like: .. code-block:: python @@ -338,29 +351,31 @@ in two-week chunks. The section in `hesseflux_example.cfg` looks like: # int deriv = 2 -`nfill` is the number of days that are treated at once. `nfill=1` means that the -time series will be stepped through day by day. `nscan` are the days to be -considered when calculating the mean absolute deviations. `nscan=15` means that -7 days before the fill day, the fill day itself, and 7 days after the fill day -will be used for the robust statistic. However, only spikes detected within the -inner `nfill` days will be flagged in the `nscan` days. Spikes will be detected -if they deviate more than `z` mean absolute deviations from the median. - -For example, `nfill=3`, `nscan=15`, and `z=7` means that the time series will be -treated in steps of 3 days. Each 3 days, MAD statistics will be calculated using -15 days around the middle of the 3 days. Then all values within the 3 days that -deviate more 7 mean absolute deviations from the median of the 15 days will be -flagged. - -`deriv=2` applies the MAD filter to the second derivatives. A spike has -normally a strong curvature and hence a large second derivative. `deriv=1` is -currently not implemented. `deriv=0` applies the filter to the raw time series. -This might be useful to find outliers in smooth time series such as soil -moisture. `deriv=0` is also used on the 20 Hz Eddy raw data in the quality and -uncertainty strategy of `Mauder et al. (Agric Forest Meteo, 2013)`_. - -The default values, if options are not given in the configuration file, are -`nscan=15`, `nfill=1`, `z=7`, and `deriv=2`. +`nfill` is the number of days that are treated at once. `nfill=1` +means that the time series will be stepped through day by day. `nscan` +are the days to be considered when calculating the mean absolute +deviations. `nscan=15` means that 7 days before the fill day, the fill +day itself, and 7 days after the fill day will be used for the robust +statistic. However, only spikes detected within the inner `nfill` days +will be flagged in the `nscan` days. Spikes will be detected if they +deviate more than `z` mean absolute deviations from the median. + +For example, `nfill=3`, `nscan=15`, and `z=7` means that the time +series will be treated in steps of 3 days. Each 3 days, MAD statistics +will be calculated using 15 days around the middle of the 3 days. Then +all values within the 3 days that deviate more 7 mean absolute +deviations from the median of the 15 days will be flagged. + +`deriv=2` applies the MAD filter to the second derivatives. A spike +has normally a strong curvature and hence a large second +derivative. `deriv=1` is currently not implemented. `deriv=0` applies +the filter to the raw time series. This might be useful to find +outliers in smooth time series such as soil moisture. `deriv=0` is +also used on the 20 Hz Eddy raw data in the quality and uncertainty +strategy of `Mauder et al. (Agric Forest Meteo, 2013)`_. + +The default values, if options are not given in the configuration +file, are `nscan=15`, `nfill=1`, `z=7`, and `deriv=2`. `postproc_europe-fluxdata.py` calls the spike detection like this: @@ -378,17 +393,17 @@ The default values, if options are not given in the configuration file, are for ii, hh in enumerate(hout): dff.loc[sflag[hh] == 2, hh] = 3 -The function :func:`madspikes` returns flag columns for the input variables -where spiked data is flagged as 2. The scripts sets the corresponding columns in -the flag dataframe `dff` to 3 (3 is used just to keep track where the flag was -set). +The function :func:`~hesseflux.madspikes.madspikes` returns flag +columns for the input variables where spiked data is flagged as 2. The +scripts sets the corresponding columns in the flag dataframe `dff` to +3 (3 is used just to keep track where the flag was set). u* filtering ------------ -If `ustar=True` is set in the configuration file, a u*-filter will be applied -following `Papale et al. (Biogeosciences, 2006)`_. +If `ustar=True` is set in the configuration file, a u*-filter will be +applied following `Papale et al. (Biogeosciences, 2006)`_. The section in `hesseflux_example.cfg` looks like: @@ -416,24 +431,27 @@ The section in `hesseflux_example.cfg` looks like: # bool applyustarflag = True -A minimum threshold `ustarmin` is defined under which data is flagged by -default. `Papale et al. (Biogeosciences, 2006)`_ suggest 0.1 for forests and -0.01 for other land cover types. `postproc_europe-fluxdata.py` sets 0.01 as its -default value. Uncertainty of the u* threshold is calculated via bootstrapping -in Papale et al. `nboot` gives the number of bootstrapping for the uncertainty -estimate of the u* threshold. The algorithm divides the input data in 6 -temperature classes and 20 u* classes within each temperature class per season. -It then determines the threshold for each season as the average u* of the u* -class where the average CO2 flux is less than `plateaucrit` times the average of -all CO2 fluxes with u* greater than the u* class. `Papale et al. -(Biogeosciences, 2006)`_ took 6 temperature classes and `plateaucrit=0.99`, -while `REddyProc`_ takes 7 temperature classes and `plateaucrit=0.95`, which are -also the defaults in ``hesseflux``. `Papale et al. (Biogeosciences, 2006)`_ also -used the maximum of the four seasonal u* thresholds as the threshold applied to -all the year. If `seasonout=True`, the seasonal u* thresholds will be applied -instead of the maximum of four seasonal u* thresholds. One can also set -`applyustarflag=False` to just calculate the u* thresholds without applying them -to experiment with different parameter values. +A minimum threshold `ustarmin` is defined under which data is flagged +by default. `Papale et al. (Biogeosciences, 2006)`_ suggest 0.1 for +forests and 0.01 for other land cover +types. `postproc_europe-fluxdata.py` sets 0.01 as its default +value. Uncertainty of the u* threshold is calculated via bootstrapping +in Papale et al. `nboot` gives the number of bootstrapping for the +uncertainty estimate of the u* threshold. The algorithm divides the +input data in 6 temperature classes and 20 u* classes within each +temperature class per season. It then determines the threshold for +each season as the average u* of the u* class where the average CO2 +flux is less than `plateaucrit` times the average of all CO2 fluxes +with u* greater than the u* class. `Papale et al. (Biogeosciences, +2006)`_ took 6 temperature classes and `plateaucrit=0.99`, while +`REddyProc`_ takes 7 temperature classes and `plateaucrit=0.95`, which +are also the defaults in ``hesseflux``. `Papale et +al. (Biogeosciences, 2006)`_ also used the maximum of the four +seasonal u* thresholds as the threshold applied to all the year. If +`seasonout=True`, the seasonal u* thresholds will be applied instead +of the maximum of four seasonal u* thresholds. One can also set +`applyustarflag=False` to just calculate the u* thresholds without +applying them to experiment with different parameter values. The u*-filtering is then performed as: @@ -469,16 +487,17 @@ The u*-filtering is then performed as: for ii, hh in enumerate(hout): dff.loc[flag == 2, hh] = 5 -The function :func:`ustarfilter` returns the u* 5, 50 and 95 percentiles of the -bootstrapped u* thresholds as well as flag columns, which is 0 except where u* -is smaller than the median u*-threshold. The scripts sets the columns of the -Eddy fluxes in the flag dataframe `dff` to 5 (5 to keep track where the flag was -set). +The function :func:`~hesseflux.ustarfilter.ustarfilter` returns the u* +5, 50 and 95 percentiles of the bootstrapped u* thresholds as well as +flag columns, which is 0 except where u* is smaller than the median +u*-threshold. The scripts sets the columns of the Eddy fluxes in the +flag dataframe `dff` to 5 (5 to keep track where the flag was set). -One might not want to do u* filtering, but use for example Integral Turbulence -Characteristics (ITC) that were calculated, for example, with -`EddyPro`_\ :sup:`(R)`. These should be set right at the start after reading the input -data into the dataframe `df` and producing the flag dataframe `dff` like: +One might not want to do u* filtering, but use for example Integral +Turbulence Characteristics (ITC) that were calculated, for example, +with `EddyPro`_\ :sup:`(R)`. These should be set right at the start +after reading the input data into the dataframe `df` and producing the +flag dataframe `dff` like: .. code-block:: python @@ -488,12 +507,13 @@ data into the dataframe `df` and producing the flag dataframe `dff` like: Partitioning of Net Ecosystem Exchange -------------------------------------- -If `partition=True` is set in the configuration file, two estimates of Gross -Primary Productivity (GPP) and Ecosystem Respiration (RECO) are calculated: -firstly with the method of `Reichstein et al. (Glob Change Biolo, 2005)`_ using -nighttime data only, and secondly with the method of `Lasslop et al. (Glob -Change Biolo, 2010)`_ using a light-response curve on 'daytime' data. The -configuration `hesseflux_example.cfg` gives only one option in this section: +If `partition=True` is set in the configuration file, two estimates of +Gross Primary Productivity (GPP) and Ecosystem Respiration (RECO) are +calculated: firstly with the method of `Reichstein et al. (Glob Change +Biolo, 2005)`_ using nighttime data only, and secondly with the method +of `Lasslop et al. (Glob Change Biolo, 2010)`_ using a light-response +curve on 'daytime' data. The configuration `hesseflux_example.cfg` +gives only one option in this section: .. code-block:: python @@ -503,11 +523,11 @@ configuration `hesseflux_example.cfg` gives only one option in this section: # bool nogppnight = False -Many people find it unaesthetic that the 'daytime' method gives negative GPP at -night. We esteem this the correct behaviour, reflecting the uncertainty in the -gross flux estimates. However, one can set `nogppnight=True` to set GPP=0 at -night and RECO=NEE in this case, the latter having then all variability of the -net fluxes. +Many people find it unaesthetic that the 'daytime' method gives +negative GPP at night. We esteem this the correct behaviour, +reflecting the uncertainty in the gross flux estimates. However, one +can set `nogppnight=True` to set GPP=0 at night and RECO=NEE in this +case, the latter having then all variability of the net fluxes. The partitioning is calculated as: @@ -550,10 +570,11 @@ The partitioning is calculated as: Gap-filling / Imputation ------------------------ -Marginal Distribution Sampling (MDS) of `Reichstein et al. (Glob Change Biolo, -2005)`_ is implemented as imputation or so-called gap-filling algorithm. The -algorithm looks for similar conditions in the vicinity of a missing data point, -if option `fill=True`. The configuration file is: +Marginal Distribution Sampling (MDS) of `Reichstein et al. (Glob +Change Biolo, 2005)`_ is implemented as imputation or so-called +gap-filling algorithm. The algorithm looks for similar conditions in +the vicinity of a missing data point, if option `fill=True`. The +configuration file is: .. code-block:: python @@ -571,12 +592,13 @@ if option `fill=True`. The configuration file is: # avoid extrapolation in gaps longer than longgap days longgap = 60 -If a flux data point is missing, times with incoming shortwave radiation in the -range of `sw_dev` around the actual shortwave radiation will be looked for, as -well as air temperatures within `ta_dev` and air vapour pressure deficit within -`vpd_dev`. The mean of flux values at the similar conditions is then taken as -fill value. The function does not fill long gaps longer than `longgap` days. A -good summary is given in Fig. A1 of `Reichstein et al. (Glob Change Biolo, +If a flux data point is missing, times with incoming shortwave +radiation in the range of `sw_dev` around the actual shortwave +radiation will be looked for, as well as air temperatures within +`ta_dev` and air vapour pressure deficit within `vpd_dev`. The mean of +flux values at the similar conditions is then taken as fill value. The +function does not fill long gaps longer than `longgap` days. A good +summary is given in Fig. A1 of `Reichstein et al. (Glob Change Biolo, 2005)`_. The script invokes MDS as: @@ -613,22 +635,25 @@ The script invokes MDS as: df = pd.concat([df, df_f], axis=1) dff = pd.concat([dff, dff_f], axis=1) -The function :func:`gapfill` returns the filled columns `df_f` as well as flag -columns `dff_f` indicating fill quality. Fill quality A-C of `Reichstein et al. -(Glob Change Biolo, 2005)`_ are translated to quality flags 1-3. +The function :func:`~hesseflux.gapfill.gapfill` returns the filled +columns `df_f` as well as flag columns `dff_f` indicating fill +quality. Fill quality A-C of `Reichstein et al. (Glob Change Biolo, +2005)`_ are translated to quality flags 1-3. Uncertainty estimates of flux data ---------------------------------- `Lasslop et al. (Biogeosci, 2008)`_ presented an algorithm to estimate -uncertainties of Eddy covariance fluxes using Marginal Distribution Sampling -(MDS). The gap-filling function :func:`gapfill` can be used for uncertainty -estimation giving the keyword `err=True`. The same thresholds as for gap-filling -are used. +uncertainties of Eddy covariance fluxes using Marginal Distribution +Sampling (MDS). The gap-filling function +:func:`~hesseflux.gapfill.gapfill` can be used for uncertainty +estimation giving the keyword `err=True`. The same thresholds as for +gap-filling are used. -The script `postproc_europe-fluxdata.py` uses the function :func:`gapfill` to -calculate flux uncertainties like: +The script `postproc_europe-fluxdata.py` uses the function +:func:`~hesseflux.gapfill.gapfill` to calculate flux uncertainties +like: .. code-block:: python @@ -667,17 +692,17 @@ calculate flux uncertainties like: for cc in range(len(colin)): dff[colout[cc]] = dff[colin[cc]] -We recommend, however, to calculate flux uncertainties with the Eddy covariance -raw data as described in `Mauder et al. (Agric Forest Meteo, 2013)`_. This is -for example implemented in the processing softwares `EddyPro`_\ :sup:`(R)` or -`TK3`_. +We recommend, however, to calculate flux uncertainties with the Eddy +covariance raw data as described in `Mauder et al. (Agric Forest +Meteo, 2013)`_. This is for example implemented in the processing +softwares `EddyPro`_\ :sup:`(R)` or `TK3`_. Writing the output file ----------------------- The dataframe is written to the output file with :mod:`pandas` -:func:`pandas.Dataframe.to_csv`: +:func:`to_csv` method: .. code-block:: python @@ -702,13 +727,15 @@ The configuration for output is: # bool outflagcols = False -If `outputfile` is missing or empty, the script will try to open a GUI, where -one can choose an output directory and the filename will then be name of the -configuration file with the suffix '.csv'. If `outundef=True` then all values in -`df` with a flag value in `dff` greater than zero will be set to `undef`. The -script can also add flag columns, prefixed with `flag_`, for each column in -`df`, if `outflagcols=True`. The script will always output the columns with the -flags for fill quality if gap-filling was performed: option `fill=True`. +If `outputfile` is missing or empty, the script will try to open a +GUI, where one can choose an output directory and the filename will +then be name of the configuration file with the suffix '.csv'. If +`outundef=True` then all values in `df` with a flag value in `dff` +greater than zero will be set to `undef`. The script can also add flag +columns, prefixed with `flag_`, for each column in `df`, if +`outflagcols=True`. The script will always output the columns with the +flags for fill quality if gap-filling was performed: option +`fill=True`. The whole code to write the output file is: @@ -724,7 +751,7 @@ The whole code to write the output file is: if outundef: for cc in df.columns: if cc.split('_')[-4] != 'f': # exclude gap-filled columns - df[cc].where(dff[cc] == 0, other=undef, inplace=True) + df[cc] = df[cc].where(dff[cc] == 0, other=undef) if outflagcols: def _add_flag(c): return 'flag_' + c diff --git a/docs/html/_static/alabaster.css b/docs/html/_static/alabaster.css index 517d0b2..e3174bf 100644 --- a/docs/html/_static/alabaster.css +++ b/docs/html/_static/alabaster.css @@ -69,6 +69,11 @@ div.relations { } +div.sphinxsidebar { + max-height: 100%; + overflow-y: auto; +} + div.sphinxsidebar a { color: #444; text-decoration: none; @@ -155,6 +160,14 @@ div.sphinxsidebar input { font-size: 1em; } +div.sphinxsidebar #searchbox input[type="text"] { + width: 160px; +} + +div.sphinxsidebar .search > div { + display: table-cell; +} + div.sphinxsidebar hr { border: none; height: 1px; @@ -638,15 +651,7 @@ a:hover tt, a:hover code { display: none!important; } -/* Make nested-list/multi-paragraph items look better in Releases changelog - * pages. Without this, docutils' magical list fuckery causes inconsistent - * formatting between different release sub-lists. - */ -div#changelog > div.section > ul > li > p:only-child { - margin-bottom: 0; -} - -/* Hide fugly table cell borders in ..bibliography:: directive output */ +/* Hide ugly table cell borders in ..bibliography:: directive output */ table.docutils.citation, table.docutils.citation td, table.docutils.citation th { border: none; /* Below needed in some edge cases; if not applied, bottom shadows appear */ diff --git a/docs/html/_static/basic.css b/docs/html/_static/basic.css index 7577acb..e5179b7 100644 --- a/docs/html/_static/basic.css +++ b/docs/html/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -222,7 +222,7 @@ table.modindextable td { /* -- general body styles --------------------------------------------------- */ div.body { - min-width: 360px; + min-width: inherit; max-width: 800px; } @@ -237,6 +237,10 @@ a.headerlink { visibility: hidden; } +a:visited { + color: #551A8B; +} + h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, @@ -670,6 +674,16 @@ dd { margin-left: 30px; } +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + dl > dd:last-child, dl > dd:last-child > :last-child { margin-bottom: 0; @@ -738,6 +752,14 @@ abbr, acronym { cursor: help; } +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + /* -- code displays --------------------------------------------------------- */ pre { diff --git a/docs/html/_static/doctools.js b/docs/html/_static/doctools.js index d06a71d..4d67807 100644 --- a/docs/html/_static/doctools.js +++ b/docs/html/_static/doctools.js @@ -4,7 +4,7 @@ * * Base JavaScript utilities for all Sphinx HTML documentation. * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ diff --git a/docs/html/_static/documentation_options.js b/docs/html/_static/documentation_options.js index 09a35b7..5aed7fa 100644 --- a/docs/html/_static/documentation_options.js +++ b/docs/html/_static/documentation_options.js @@ -1,6 +1,5 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '5.1.dev0', +const DOCUMENTATION_OPTIONS = { + VERSION: '5.1.dev2', LANGUAGE: 'en', COLLAPSE_INDEX: false, BUILDER: 'html', diff --git a/docs/html/_static/language_data.js b/docs/html/_static/language_data.js index 250f566..367b8ed 100644 --- a/docs/html/_static/language_data.js +++ b/docs/html/_static/language_data.js @@ -5,7 +5,7 @@ * This script contains the language-specific data used by searchtools.js, * namely the list of stopwords, stemmer, scorer and splitter. * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -13,7 +13,7 @@ var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; -/* Non-minified version is copied as a separate JS file, is available */ +/* Non-minified version is copied as a separate JS file, if available */ /** * Porter Stemmer diff --git a/docs/html/_static/pygments.css b/docs/html/_static/pygments.css index 691aeb8..0d49244 100644 --- a/docs/html/_static/pygments.css +++ b/docs/html/_static/pygments.css @@ -17,6 +17,7 @@ span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #A00000 } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ .highlight .gr { color: #FF0000 } /* Generic.Error */ .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ .highlight .gi { color: #00A000 } /* Generic.Inserted */ diff --git a/docs/html/_static/searchtools.js b/docs/html/_static/searchtools.js index 97d56a7..92da3f8 100644 --- a/docs/html/_static/searchtools.js +++ b/docs/html/_static/searchtools.js @@ -4,7 +4,7 @@ * * Sphinx JavaScript utilities for the full-text search. * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -57,12 +57,12 @@ const _removeChildren = (element) => { const _escapeRegExp = (string) => string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string -const _displayItem = (item, searchTerms) => { +const _displayItem = (item, searchTerms, highlightTerms) => { const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; - const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT; const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; const [docName, title, anchor, descr, score, _filename] = item; @@ -75,28 +75,35 @@ const _displayItem = (item, searchTerms) => { if (dirname.match(/\/index\/$/)) dirname = dirname.substring(0, dirname.length - 6); else if (dirname === "index/") dirname = ""; - requestUrl = docUrlRoot + dirname; + requestUrl = contentRoot + dirname; linkUrl = requestUrl; } else { // normal html builders - requestUrl = docUrlRoot + docName + docFileSuffix; + requestUrl = contentRoot + docName + docFileSuffix; linkUrl = docName + docLinkSuffix; } let linkEl = listItem.appendChild(document.createElement("a")); linkEl.href = linkUrl + anchor; linkEl.dataset.score = score; linkEl.innerHTML = title; - if (descr) + if (descr) { listItem.appendChild(document.createElement("span")).innerHTML = " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } else if (showSearchSummary) fetch(requestUrl) .then((responseData) => responseData.text()) .then((data) => { if (data) listItem.appendChild( - Search.makeSearchSummary(data, searchTerms) + Search.makeSearchSummary(data, searchTerms, anchor) ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); }); Search.output.appendChild(listItem); }; @@ -109,26 +116,43 @@ const _finishSearch = (resultCount) => { ); else Search.status.innerText = _( - `Search finished, found ${resultCount} page(s) matching the search query.` - ); + "Search finished, found ${resultCount} page(s) matching the search query." + ).replace('${resultCount}', resultCount); }; const _displayNextItem = ( results, resultCount, - searchTerms + searchTerms, + highlightTerms, ) => { // results left, load the summary and display it // this is intended to be dynamic (don't sub resultsCount) if (results.length) { - _displayItem(results.pop(), searchTerms); + _displayItem(results.pop(), searchTerms, highlightTerms); setTimeout( - () => _displayNextItem(results, resultCount, searchTerms), + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), 5 ); } // search finished, update title and status message else _finishSearch(resultCount); }; +// Helper function used by query() to order search results. +// Each input is an array of [docname, title, anchor, descr, score, filename]. +// Order the results by score (in opposite order of appearance, since the +// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically. +const _orderResultsByScoreThenName = (a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; +}; /** * Default splitQuery function. Can be overridden in ``sphinx.search`` with a @@ -152,13 +176,26 @@ const Search = { _queued_query: null, _pulse_status: -1, - htmlToText: (htmlString) => { + htmlToText: (htmlString, anchor) => { const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); - htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + for (const removalQuery of [".headerlinks", "script", "style"]) { + htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() }); + } + if (anchor) { + const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`); + if (anchorContent) return anchorContent.textContent; + + console.warn( + `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.` + ); + } + + // if anchor not specified or not found, fall back to main content const docContent = htmlElement.querySelector('[role="main"]'); - if (docContent !== undefined) return docContent.textContent; + if (docContent) return docContent.textContent; + console.warn( - "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template." ); return ""; }, @@ -231,16 +268,7 @@ const Search = { else Search.deferQuery(query); }, - /** - * execute search (requires search index to be loaded) - */ - query: (query) => { - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const titles = Search._index.titles; - const allTitles = Search._index.alltitles; - const indexEntries = Search._index.indexentries; - + _parseQuery: (query) => { // stem the search terms and add them to the correct list const stemmer = new Stemmer(); const searchTerms = new Set(); @@ -276,16 +304,32 @@ const Search = { // console.info("required: ", [...searchTerms]); // console.info("excluded: ", [...excludedTerms]); - // array of [docname, title, anchor, descr, score, filename] - let results = []; + return [query, searchTerms, excludedTerms, highlightTerms, objectTerms]; + }, + + /** + * execute search (requires search index to be loaded) + */ + _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // Collect multiple result groups to be sorted separately and then ordered. + // Each is an array of [docname, title, anchor, descr, score, filename]. + const normalResults = []; + const nonMainIndexResults = []; + _removeChildren(document.getElementById("search-progress")); - const queryLower = query.toLowerCase(); + const queryLower = query.toLowerCase().trim(); for (const [title, foundTitles] of Object.entries(allTitles)) { - if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) { for (const [file, id] of foundTitles) { let score = Math.round(100 * queryLower.length / title.length) - results.push([ + normalResults.push([ docNames[file], titles[file] !== title ? `${titles[file]} > ${title}` : title, id !== null ? "#" + id : "", @@ -300,46 +344,47 @@ const Search = { // search for explicit entries in index directives for (const [entry, foundEntries] of Object.entries(indexEntries)) { if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { - for (const [file, id] of foundEntries) { - let score = Math.round(100 * queryLower.length / entry.length) - results.push([ + for (const [file, id, isMain] of foundEntries) { + const score = Math.round(100 * queryLower.length / entry.length); + const result = [ docNames[file], titles[file], id ? "#" + id : "", null, score, filenames[file], - ]); + ]; + if (isMain) { + normalResults.push(result); + } else { + nonMainIndexResults.push(result); + } } } } // lookup as object objectTerms.forEach((term) => - results.push(...Search.performObjectSearch(term, objectTerms)) + normalResults.push(...Search.performObjectSearch(term, objectTerms)) ); // lookup as search terms in fulltext - results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms)); // let the scorer override scores with a custom scoring function - if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); - - // now sort the results by score (in opposite order of appearance, since the - // display function below uses pop() to retrieve items) and then - // alphabetically - results.sort((a, b) => { - const leftScore = a[4]; - const rightScore = b[4]; - if (leftScore === rightScore) { - // same score: sort alphabetically - const leftTitle = a[1].toLowerCase(); - const rightTitle = b[1].toLowerCase(); - if (leftTitle === rightTitle) return 0; - return leftTitle > rightTitle ? -1 : 1; // inverted is intentional - } - return leftScore > rightScore ? 1 : -1; - }); + if (Scorer.score) { + normalResults.forEach((item) => (item[4] = Scorer.score(item))); + nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item))); + } + + // Sort each group of results by score and then alphabetically by name. + normalResults.sort(_orderResultsByScoreThenName); + nonMainIndexResults.sort(_orderResultsByScoreThenName); + + // Combine the result groups in (reverse) order. + // Non-main index entries are typically arbitrary cross-references, + // so display them after other results. + let results = [...nonMainIndexResults, ...normalResults]; // remove duplicate search results // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept @@ -353,14 +398,19 @@ const Search = { return acc; }, []); - results = results.reverse(); + return results.reverse(); + }, + + query: (query) => { + const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query); + const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms); // for debugging //Search.lastresults = results.slice(); // a copy // console.info("search results:", Search.lastresults); // print the results - _displayNextItem(results, results.length, searchTerms); + _displayNextItem(results, results.length, searchTerms, highlightTerms); }, /** @@ -458,14 +508,18 @@ const Search = { // add support for partial matches if (word.length > 2) { const escapedWord = _escapeRegExp(word); - Object.keys(terms).forEach((term) => { - if (term.match(escapedWord) && !terms[word]) - arr.push({ files: terms[term], score: Scorer.partialTerm }); - }); - Object.keys(titleTerms).forEach((term) => { - if (term.match(escapedWord) && !titleTerms[word]) - arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); - }); + if (!terms.hasOwnProperty(word)) { + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + } + if (!titleTerms.hasOwnProperty(word)) { + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: titleTerms[term], score: Scorer.partialTitle }); + }); + } } // no match but word was a required one @@ -488,9 +542,8 @@ const Search = { // create the mapping files.forEach((file) => { - if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) - fileMap.get(file).push(word); - else fileMap.set(file, [word]); + if (!fileMap.has(file)) fileMap.set(file, [word]); + else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word); }); }); @@ -541,8 +594,8 @@ const Search = { * search summary for a given text. keywords is a list * of stemmed words. */ - makeSearchSummary: (htmlText, keywords) => { - const text = Search.htmlToText(htmlText); + makeSearchSummary: (htmlText, keywords, anchor) => { + const text = Search.htmlToText(htmlText, anchor); if (text === "") return null; const textLower = text.toLowerCase(); diff --git a/docs/html/_static/sphinx_highlight.js b/docs/html/_static/sphinx_highlight.js index aae669d..8a96c69 100644 --- a/docs/html/_static/sphinx_highlight.js +++ b/docs/html/_static/sphinx_highlight.js @@ -29,14 +29,19 @@ const _highlight = (node, addItems, text, className) => { } span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); parent.insertBefore( span, parent.insertBefore( - document.createTextNode(val.substr(pos + text.length)), + rest, node.nextSibling ) ); node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); if (isInSVG) { const rect = document.createElementNS( @@ -140,5 +145,10 @@ const SphinxHighlight = { }, }; -_ready(SphinxHighlight.highlightSearchWords); -_ready(SphinxHighlight.initEscapeListener); +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/docs/html/api.html b/docs/html/api.html index ee404fc..41f0d59 100644 --- a/docs/html/api.html +++ b/docs/html/api.html @@ -1,18 +1,17 @@ - - + - - - API Reference — hesseflux 5.1.dev0 documentation - - - - - - + + + API Reference — hesseflux 5.1.dev2 documentation + + + + + + @@ -20,8 +19,9 @@ + + - @@ -34,7 +34,7 @@
-

API Reference¶

+

API Reference¶

hesseflux provides functions used in the processing and post-processing of the Eddy covariance flux data

It was developed for the ICOS ecosystem site FR-Hes.

@@ -52,7 +52,7 @@
-

Subpackages¶

+

Subpackages¶

@@ -174,7 +174,7 @@

Related Topics

- +
- +
diff --git a/docs/html/index.html b/docs/html/index.html index a2ec4a4..76e14c6 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -1,18 +1,17 @@ - - + - - - hesseflux — hesseflux 5.1.dev0 documentation - - - - - - + + + hesseflux — hesseflux 5.1.dev2 documentation + + + + + + @@ -20,8 +19,9 @@ + + - @@ -34,15 +34,17 @@
-

hesseflux¶

+

hesseflux¶

hesseflux provides functions used in the processing and post-processing of the Eddy covariance flux data of the ICOS ecosystem site FR-Hes.

Zenodo DOI PyPI version -License -Build Status +License + +Build Status +
-

About hesseflux¶

+

About hesseflux¶

hesseflux collects functions used for processing Eddy covariance data of the ICOS ecosystem site FR-Hes.

The post-processing functionality for Eddy flux data is similar to the R-package @@ -51,16 +53,16 @@

About hessefluxLasslop et al., Global Change Biology 2010).

-

Documentation¶

+

Documentation¶

The complete documentation for hesseflux is available at:

-

Quick usage guide¶

+

Quick usage guide¶

-

Post-processing Eddy covariance data that is in europe-fluxdata.eu format¶

+

Post-processing Eddy covariance data that is in europe-fluxdata.eu format¶

An example script that makes all the steps described in Papale et al. (Biogeosciences, 2006) is given in the example directory. It is simply called:

python postproc_europe-fluxdata.py hesseflux_example.cfg
@@ -73,7 +75,7 @@ 

Post-processing Eddy covariance data that is in europe-fluxdata.eu format

-

Installation¶

+

Installation¶

The easiest way to install is via pip:

pip install hesseflux
 
@@ -89,7 +91,7 @@

Installation -

License¶

+

License¶

hesseflux is distributed under the MIT License. See the LICENSE file for details.

Copyright (c) 2009-2022 Matthias Cuntz

@@ -155,7 +157,7 @@

Related Topics

- +

gapfill(dfin[, flag, date, timeformat, ...])

@@ -69,7 +71,7 @@
-nee2gpp(dfin, flag=None, isday=None, date=None, timeformat='%Y-%m-%d %H:%M:%S', colhead=None, undef=-9999, method='reichstein', nogppnight=False, swthr=10.0)[source]¶
+nee2gpp(dfin, flag=None, isday=None, date=None, timeformat='%Y-%m-%d %H:%M:%S', colhead=None, undef=-9999, method='reichstein', nogppnight=False, swthr=10.0)[source]¶

Calculate photosynthesis (GPP) and ecosystem respiration (RECO) from Eddy covariance CO2 flux data.

It uses either

@@ -89,43 +91,53 @@
  • dfin (pandas.Dataframe or numpy.array) –

    time series of CO2 fluxes and air temperature, and possibly incoming shortwave radiation and air vapour pressure deficit.

    dfin can be a pandas.Dataframe with the columns -‘FC’ or ‘NEE’ (or starting with ‘FC_’ or ‘NEE_’) for observed CO2 flux [umol(CO2) m-2 s-1] +‘FC’ or ‘NEE’ (or starting with ‘FC_’ or ‘NEE_’) for observed +CO2 flux [umol(CO2) m-2 s-1] ‘TA’ (or starting with ‘TA_’) for air temperature [K]

    method=’lasslop’ or method=’day’ needs also -‘SW_IN’ (or starting with ‘SW_IN’) for incoming short-wave radiation [W m-2] +‘SW_IN’ (or starting with ‘SW_IN’) for incoming short-wave +radiation [W m-2] ‘VPD’ (or starting with ‘VPD’) for air vapour deficit [Pa] The index is taken as date variable.

    dfin can also me a numpy array with the same columns. In this case colhead, date, and possibly dateformat must be given.

  • -
  • flag (pandas.Dataframe or numpy.array, optional) –

    flag Dataframe or array has the same shape as dfin. Non-zero values in -flag will be treated as missing values in dfin.

    +
  • flag (pandas.Dataframe or numpy.array, optional) –

    flag Dataframe or array has the same shape as dfin. +Non-zero values in flag will be treated as missing values in dfin.

    flag must follow the same rules as dfin if pandas.Dataframe.

    -

    If flag is numpy array, df.columns.values will be used as column heads -and the index of dfin will be copied to flag.

    +

    If flag is numpy array, df.columns.values will be used as column +heads and the index of dfin will be copied to flag.

  • -
  • isday (array_like of bool, optional) –

    True when it is day, False when night. Must have the same length as dfin.shape[0].

    +
  • isday (array_like of bool, optional) –

    True when it is day, False when night. Must have the same length +as dfin.shape[0].

    If isday is not given, dfin must have a column with head ‘SW_IN’ or starting with ‘SW_IN’. isday will then be dfin[‘SW_IN’] > swthr.

  • date (array_like of string, optional) –

    1D-array_like of calendar dates in format given in timeformat.

    date must be given if dfin is numpy array.

  • -
  • timeformat (str, optional) – Format of dates in date, if given (default: ‘%Y-%m-%d %H:%M:%S’). +

  • timeformat (str, optional) – Format of dates in date, if given (default: ‘%Y-%m-%d %H:%M:%S’). See strftime documentation of Python’s datetime module: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior

  • -
  • colhed (array_like of str, optional) – column names if dfin is numpy array. See dfin for mandatory column names.

  • -
  • undef (float, optional) – values having undef value are treated as missing values in dfin (default: -9999)

  • -
  • method (str, optional) –

    method to use for partitioning. Possible values are:

    -

    ’global’ or ‘falge’: fit of Reco vs. temperature to all nighttime data

    +
  • colhed (array_like of str, optional) – column names if dfin is numpy array. See dfin for mandatory +column names.

  • +
  • undef (float, optional) – values having undef value are treated as missing values in dfin +(default: -9999)

  • +
  • method (str, optional) –

    method to use for partitioning. Possible values are:

    -
    ’local’ of ‘reichstein’: several fits over the season of Reco vs. temperature

    as in Reichstein et al. (2005) (default)

    +
    ’global’ or ‘falge’: fit of Reco vs. temperature to all nighttime

    data

    +
    +
    ’local’ of ‘reichstein’: several fits over the season of Reco vs.

    temperature as in Reichstein et al. (2005) +(default)

    +
    +
    ’day’ or ‘lasslop’: method of Lasslop et al. (2010) fitting a

    light-response curve

    -

    ’day’ or ‘lasslop’: method of Lasslop et al. (2010) fitting a light-response curve

  • -
  • nogppnight (float, optional) – GPP will be set to zero at night. RECO will then equal NEE at night (default: False)

  • -
  • swthr (float, optional) – Threshold to determine daytime from incoming shortwave radiation if isday not given (default: 10).

  • +
  • nogppnight (float, optional) – GPP will be set to zero at night. RECO will then equal NEE at night +(default: False)

  • +
  • swthr (float, optional) – Threshold to determine daytime from incoming shortwave radiation +if isday not given (default: 10).

  • Returns:
    @@ -139,28 +151,31 @@

    Notes

    -

    Negative respiration possible at night if GPP is forced to 0 with nogppnight=True.

    +

    Negative respiration possible at night if GPP is forced to 0 with +nogppnight=True.

    References

    [1]

    Falge et al. (2001) -Gap filling strategies for defensible annual sums of net ecosystem exchange, +Gap filling strategies for defensible annual sums of +net ecosystem exchange, Acricultural and Forest Meteorology 107, 43-69

    [2]

    Reichstein et al. (2005) -On the separation of net ecosystem exchange into assimilation and ecosystem -respiration: review and improved algorithm, +On the separation of net ecosystem exchange into assimilation +and ecosystem respiration: review and improved algorithm, Global Change Biology 11, 1424-1439

    [3]

    Lasslop et al. (2010) -Separation of net ecosystem exchange into assimilation and respiration using -a light response curve approach: critical issues and global evaluation, +Separation of net ecosystem exchange into assimilation and respiration +using a light response curve approach: critical issues and global +evaluation, Global Change Biology 16, 187-208

    @@ -189,7 +204,8 @@ >>> # flag >>> flag = np.where(dfin == undef, 2, 0) >>> # partition ->>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='local') +>>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, +... undef=undef, method='local') >>> print(GPP[1120:1128]) [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00 8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01] @@ -198,31 +214,36 @@ 2.90076664 3.18592735]
    -
    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='local')
    +
    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
    +...                     undef=undef, method='local')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.40606871e+00
       8.31942152e+00  1.06242542e+01  8.49245664e+00  1.12381973e+01]
     
    -
    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='global')
    +
    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
    +...                     undef=undef, method='global')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.33166157e+00
       8.18228013e+00  1.04092252e+01  8.19395317e+00  1.08427448e+01]
     
    -
    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='Reichstein')
    +
    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
    +...                     undef=undef, method='Reichstein')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.40606871e+00
       8.31942152e+00  1.06242542e+01  8.49245664e+00  1.12381973e+01]
     
    -
    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='reichstein')
    +
    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
    +...                     undef=undef, method='reichstein')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  4.40606871e+00
       8.31942152e+00  1.06242542e+01  8.49245664e+00  1.12381973e+01]
     
    -
    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead, undef=undef, method='day')
    +
    >>> GPP, Reco = nee2gpp(dfin, flag=flag, date=adate, colhead=colhead,
    +...                     undef=undef, method='day')
     >>> print(GPP[1120:1128])
     [-9.99900000e+03 -9.99900000e+03 -9.99900000e+03  2.78457540e+00
       6.63212545e+00  8.88902165e+00  6.74243873e+00  9.51364527e+00]
    @@ -294,7 +315,7 @@ 

    Related Topics

    - +
    - +
    diff --git a/docs/html/search.html b/docs/html/search.html index b4ad941..f4e7a46 100644 --- a/docs/html/search.html +++ b/docs/html/search.html @@ -1,29 +1,30 @@ - - + - Search — hesseflux 5.1.dev0 documentation - - - + Search — hesseflux 5.1.dev2 documentation + + + - - - + + + - - + + + + + - @@ -61,10 +62,7 @@

    Search

    - -
    - -
    +
    @@ -116,11 +114,11 @@

    Related Topics

    diff --git a/docs/html/searchindex.js b/docs/html/searchindex.js index 453802f..101ccca 100644 --- a/docs/html/searchindex.js +++ b/docs/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["api", "authors", "changelog", "contents", "gapfill", "index", "madspikes", "nee2gpp", "userguide", "ustarfilter"], "filenames": ["api.rst", "authors.rst", "changelog.rst", "contents.rst", "gapfill.rst", "index.rst", "madspikes.rst", "nee2gpp.rst", "userguide.rst", "ustarfilter.rst"], "titles": ["API Reference", "Authors", "Changelog", "Contents", "gapfill", "hesseflux", "madspikes", "nee2gpp", "User Guide", "ustarfilter"], "terms": {"hesseflux": [0, 3, 8], "provid": [0, 4, 5, 6, 7, 8, 9], "function": [0, 2, 4, 5, 6, 7, 8, 9], "us": [0, 2, 4, 5, 6, 7, 8, 9], "process": [0, 2, 3], "post": [0, 2, 3], "eddi": [0, 3, 4, 6, 7, 9], "covari": [0, 3, 4, 7, 9], "flux": [0, 2, 3, 4, 5, 7, 9], "data": [0, 2, 3, 4, 6, 7, 9], "It": [0, 1, 5, 7, 8], "wa": [0, 2, 4, 6, 7, 8, 9], "develop": 0, "ico": [0, 5, 8], "ecosystem": [0, 3, 4, 5, 7], "site": [0, 5, 8], "fr": [0, 5, 8], "he": [0, 5, 8], "The": [0, 2, 3, 4, 5, 6, 7, 9], "i": [0, 1, 3, 4, 6, 7, 8, 9], "similar": [0, 4, 5, 8], "r": [0, 5, 8], "packag": [0, 2, 5, 8], "reddyproc": [0, 5, 8], "includ": [0, 2, 5, 8], "basic": [0, 5, 8], "step": [0, 5, 6, 8], "describ": [0, 5, 8], "papal": [0, 5, 6, 8, 9], "et": [0, 4, 5, 6, 7, 8, 9], "al": [0, 4, 5, 6, 7, 8, 9], "biogeosci": [0, 4, 5, 6, 8, 9], "2006": [0, 5, 6, 8, 9], "plu": [0, 5, 8], "some": [0, 5, 8], "extens": [0, 5, 8], "daytim": [0, 5, 6, 7, 8, 9], "method": [0, 5, 7, 8, 9], "partit": [0, 2, 3, 5, 7], "lasslop": [0, 4, 5, 7, 8], "global": [0, 4, 5, 7, 8], "chang": [0, 2, 4, 5, 7, 8], "biologi": [0, 4, 5, 7, 8], "2010": [0, 5, 7, 8], "copyright": [0, 1, 4, 5, 6, 7, 9], "2009": [0, 1, 5], "2022": [0, 1, 2, 4, 5, 6, 9], "matthia": [0, 1, 4, 5, 6, 7, 9], "cuntz": [0, 1, 4, 5, 6, 7, 9], "see": [0, 1, 4, 5, 6, 7, 8, 9], "author": [0, 3, 4, 6, 9], "rst": [0, 4, 6, 9], "detail": [0, 4, 5, 6, 7, 9], "licens": [0, 1, 3, 4, 6, 7, 9], "mit": [0, 1, 4, 5, 6, 7, 9], "histori": [0, 4, 6, 9], "written": [0, 4, 6, 7, 8, 9], "2017": 0, "mc": [0, 1, 4, 6, 7, 8, 9], "macu": [0, 1, 4, 6, 7, 8, 9], "dot": [0, 1, 4, 6, 7, 8, 9], "de": [0, 1, 4, 6, 7, 8, 9], "v2": [0, 2], "0": [0, 2, 4, 6, 7, 8, 9], "format": [0, 3, 4, 6, 7, 9], "docu": 0, "useabl": 0, "pypi": 0, "apr": [0, 4, 6, 7, 9], "2020": [0, 1, 2, 4, 6, 7, 9], "1": [0, 2, 4, 6, 7, 8, 9], "more": [0, 2, 8], "requir": [0, 2, 5], "readthedoc": [0, 2], "coveral": [0, 2], "etc": [0, 2], "mai": [0, 1, 2, 4, 6, 7, 8, 9], "2": [0, 2, 4, 6, 7, 8, 9], "finish": [0, 2], "setup": [0, 2], "all": [0, 2, 5, 7, 8, 9], "depend": [0, 2], "set": [0, 2, 6, 7, 8, 9], "password": [0, 2], "en": [0, 2], "instead": [0, 2, 4, 8, 9], "eng": [0, 2, 4, 7], "bugfix": [0, 2, 9], "excel": [0, 2], "date": [0, 2, 4, 7, 8, 9], "routin": [0, 2, 4, 7, 8], "jul": [0, 2, 9], "seasonout": [0, 2, 8, 9], "ustarfilt": [0, 2, 3, 8], "flag": [0, 2, 3, 4, 6, 7, 9], "gross": [0, 2, 8], "possibl": [0, 2, 4, 7, 8], "aug": [0, 2, 6, 7, 9], "v3": [0, 2], "const": [0, 2], "logtool": [0, 2], "py": [0, 2, 5, 8], "sep": [0, 2, 8], "support": [0, 2], "cftime": [0, 2], "v1": [0, 2], "3": [0, 2, 4, 7, 8, 9], "ad": 0, "eddy2nc": 0, "feb": [0, 2, 4, 7], "2021": [0, 2, 4, 6, 9], "return": [0, 2, 4, 6, 7, 8, 9], "also": [0, 2, 4, 6, 7, 8, 9], "mean": [0, 2, 4, 8, 9], "valu": [0, 2, 4, 6, 7, 8, 9], "error": [0, 2, 4, 8], "estim": [0, 2, 3, 4, 7, 9], "jun": [0, 2, 4], "code": [0, 8], "refactor": 0, "v4": [0, 2], "move": [0, 2, 4, 6], "pyproject": [0, 2], "toml": [0, 2], "structur": [0, 2, 5], "github": [0, 2, 5], "page": 0, "document": [0, 2, 3, 4, 7, 8, 9], "5": [0, 2, 4, 8, 9], "pyjam": [0, 2, 5, 8], "remov": [0, 2, 6], "old": 0, "modul": [0, 4, 6, 7, 8, 9], "v5": [0, 2], "fgui": [0, 2], "numpi": [0, 4, 5, 6, 7, 8, 9], "docstr": [0, 4, 6, 7, 9], "updat": [0, 2], "jan": [0, 2, 9], "2023": [0, 2, 9], "gapfil": [0, 2, 3, 8], "madspik": [0, 3, 8], "nee2gpp": [0, 3, 8], "creat": 1, "distribut": [1, 4, 5, 8], "under": [1, 5, 7, 8], "file": [1, 2, 3, 4, 5, 7], "c": [1, 4, 5, 7, 8, 9], "main": [1, 8], "e": [1, 4, 6, 8, 9], "mail": 1, "contributor": 1, "arndt": [1, 6, 7, 9], "piayda": [1, 6, 7, 9], "thuenen": 1, "tino": [1, 6, 9], "rau": [1, 6, 9], "julian": 1, "uwaterloo": 1, "ca": 1, "sebastian": [1, 5], "m\u00fcller": [1, 5], "mueller": 1, "ufz": [1, 4, 6, 7, 9], "notabl": [2, 8], "after": [2, 4, 8], "its": [2, 4, 8], "initi": 2, "releas": [2, 7], "ar": [2, 4, 6, 7, 8, 9], "thi": [2, 4, 6, 7, 8, 9], "drop": [2, 8], "python": [2, 4, 5, 7, 8, 9], "6": [2, 4, 7, 8], "becaus": 2, "cannot": [2, 8], "test": 2, "anymor": 2, "add": [2, 7, 8], "timecolumn": 2, "ftimeformat": 2, "config": [2, 8], "exampl": [2, 4, 5, 7, 8], "90": [2, 9], "ustar": [2, 8, 9], "threshold": [2, 4, 6, 7, 8, 9], "found": [2, 9], "quantil": [2, 9], "onli": [2, 4, 6, 8, 9], "when": [2, 6, 7, 8, 9], "nee": [2, 4, 7, 8, 9], "ta": [2, 4, 7, 8, 9], "valid": [2, 9], "eddypro2nc": 2, "latter": [2, 8], "ha": [2, 4, 5, 6, 7, 8, 9], "own": [2, 8], "repositori": 2, "now": 2, "http": [2, 4, 5, 7, 8, 9], "com": 2, "mcuntz": [2, 5], "loggertool": 2, "tool": [2, 4, 8], "from": [2, 4, 5, 6, 7, 8, 9], "rather": 2, "than": [2, 4, 8, 9], "local": [2, 7], "copi": [2, 4, 7, 8, 9], "new": 2, "pip": [2, 5], "action": 2, "markdown": 2, "restructur": 2, "text": 2, "preserv": 2, "trail": 2, "whitespac": 2, "float": [2, 4, 6, 7, 8, 9], "string": [2, 4, 7, 8, 9], "read": [2, 3], "undef": [2, 4, 6, 7, 8, 9], "default": [2, 4, 6, 7, 8, 9], "option": [2, 4, 6, 7, 8, 9], "good": [2, 8], "fill": [2, 3, 4, 7], "algorithm": [2, 4, 7, 8, 9], "check": [2, 3], "gregorian": 2, "calendar": [2, 4, 7, 9], "exist": [2, 8], "skip": [2, 4, 7, 8, 9], "script": [2, 5, 8], "convert": 2, "eddypro": [2, 8], "output": [2, 3, 4], "netcdf": 2, "build": 2, "cibuildwheel": 2, "make": [2, 5, 8], "pure": 2, "wheel": 2, "subpackag": [2, 3], "automat": 2, "No": [2, 9], "bootstrap": [2, 8, 9], "u": [2, 3], "per": [2, 8, 9], "season": [2, 7, 8, 9], "muli": 2, "year": [2, 4, 8, 9], "due": 2, "miss": [2, 4, 6, 7, 8, 9], "meteo": [2, 8], "compat": 2, "flake8": [2, 4, 6, 9], "refin": 2, "python3": 2, "linux": 2, "maco": 2, "window": [2, 4, 6, 8], "travisci": 2, "twine": 2, "public": 2, "about": 3, "quick": 3, "usag": 3, "guid": 3, "europ": 3, "fluxdata": 3, "eu": 3, "instal": 3, "user": [3, 5], "configur": [3, 5], "datafram": [3, 4, 6, 7, 9], "dai": [3, 4, 6, 7, 9], "night": [3, 6, 7, 9], "spike": [3, 6], "outlier": 3, "filter": [3, 6, 9], "net": [3, 4, 7], "exchang": [3, 4, 7], "gap": [3, 4, 7], "imput": 3, "uncertainti": [3, 4], "write": 3, "api": [3, 8], "refer": [3, 7, 8], "changelog": 3, "measur": 4, "while": [4, 6, 7, 8, 9], "depart": [4, 6, 7, 9], "comput": [4, 6, 7, 9], "hydrosystem": [4, 6, 7, 9], "helmholtz": [4, 6, 7, 9], "centr": [4, 6, 7, 9], "environment": [4, 6, 7, 9], "research": [4, 6, 7, 9], "leipzig": [4, 6, 7, 9], "germani": [4, 6, 7, 9], "continu": [4, 6, 7, 9], "institut": [4, 6, 7, 9], "nation": [4, 6, 7, 9], "recherch": [4, 6, 7, 9], "pour": [4, 6, 7, 9], "l": [4, 6, 7, 9], "agricultur": [4, 6, 7, 9], "aliment": [4, 6, 7, 9], "environn": [4, 6, 7, 9], "inra": [4, 6, 7, 9], "nanci": [4, 6, 7, 9], "franc": [4, 6, 7, 9], "2012": [4, 7], "follow": [4, 6, 7, 8, 9], "mar": [4, 7], "port": [4, 7], "2013": [4, 7, 8], "input": [4, 6, 7, 8, 9], "can": [4, 6, 7, 8, 9], "nd": 4, "arrai": [4, 6, 7, 9], "2014": [4, 6, 7, 9], "bug": 4, "longestmarginalgap": 4, "work": [4, 6, 9], "time": [4, 6, 7, 8, 9], "seri": [4, 6, 7, 8, 9], "edg": 4, "renam": [4, 8], "longgap": [4, 8], "keyword": [4, 7, 8], "fulldai": 4, "panda": [4, 5, 6, 7, 8, 9], "": [4, 5, 6, 7, 8, 9], "improv": [4, 6, 7, 9], "oct": [4, 6, 9], "dfin": [4, 6, 7, 9], "none": [4, 6, 7, 8, 9], "timeformat": [4, 7, 8, 9], "y": [4, 7, 8, 9], "m": [4, 7, 8, 9], "d": [4, 7, 8, 9], "h": [4, 7, 8, 9], "colhead": [4, 6, 7, 9], "sw_dev": [4, 8], "50": [4, 8, 9], "ta_dev": [4, 8], "vpd_dev": [4, 8], "60": [4, 8], "fals": [4, 6, 7, 8, 9], "9999": [4, 6, 7, 8, 9], "ddof": 4, "err": [4, 8], "errmean": 4, "verbos": [4, 8], "sourc": [4, 6, 7, 9], "margin": [4, 8], "sampl": [4, 8], "md": [4, 8], "accord": 4, "reichstein": [4, 7, 8], "2005": [4, 7, 8], "look": [4, 8], "meteorolog": 4, "condit": [4, 8], "defin": [4, 8], "maximum": [4, 6, 8, 9], "deviat": [4, 6, 8], "certain": 4, "averag": [4, 8, 9], "do": [4, 8], "same": [4, 6, 7, 8, 9], "search": 4, "everi": 4, "point": [4, 8], "calcul": [4, 6, 7, 8], "standard": [4, 5, 6, 8], "2008": [4, 6, 8, 9], "paramet": [4, 6, 7, 8, 9], "well": [4, 8, 9], "variabl": [4, 7, 8, 9], "incom": [4, 6, 7, 8, 9], "short": [4, 7], "wave": [4, 7], "radiat": [4, 6, 7, 8, 9], "air": [4, 7, 8, 9], "temperatur": [4, 7, 8, 9], "vapour": [4, 7, 8], "pressur": [4, 7, 8], "deficit": [4, 7, 8], "column": [4, 6, 7, 8, 9], "sw_in": [4, 6, 7, 8, 9], "start": [4, 6, 7, 8, 9], "w": [4, 7, 8], "ta_": [4, 7, 8, 9], "deg": [4, 9], "vpd": [4, 7, 8], "hpa": [4, 8], "index": [4, 7, 8, 9], "taken": [4, 7, 8, 9], "me": [4, 6, 7, 9], "In": [4, 6, 7, 9], "case": [4, 6, 7, 8, 9], "possibli": [4, 7, 9], "dateformat": [4, 7, 9], "must": [4, 6, 7, 9], "given": [4, 5, 6, 7, 8, 9], "shape": [4, 6, 7, 8, 9], "non": [4, 6, 7, 8, 9], "zero": [4, 6, 7, 8, 9], "treat": [4, 6, 7, 8, 9], "rule": [4, 7, 9], "If": [4, 6, 7, 8, 9], "df": [4, 6, 7, 8, 9], "head": [4, 6, 7, 9], "array_lik": [4, 6, 7, 9], "1d": [4, 7, 9], "str": [4, 6, 7, 8, 9], "strftime": [4, 7, 8, 9], "datetim": [4, 7, 8, 9], "doc": [4, 7, 8, 9], "org": [4, 7, 8, 9], "librari": [4, 7, 8, 9], "html": [4, 7, 8, 9], "strptime": [4, 7, 8, 9], "behavior": [4, 7, 8, 9], "colh": [4, 6, 7], "name": [4, 6, 7, 8, 9], "mandatori": [4, 7, 9], "int": [4, 6, 8, 9], "avoid": [4, 8], "extrapol": [4, 8], "longer": [4, 8], "bool": [4, 6, 7, 8, 9], "true": [4, 6, 7, 8, 9], "begin": 4, "larg": [4, 8], "next": 4, "end": 4, "last": 4, "have": [4, 6, 7, 8, 9], "np": [4, 6, 7, 8], "nan": [4, 6, 7, 8], "allow": [4, 6, 7], "delta": 4, "degre": 4, "freedom": 4, "divisor": 4, "n": 4, "where": [4, 6, 7, 8, 9], "repres": [4, 8], "number": [4, 8, 9], "element": 4, "gener": 4, "tupl": [4, 9], "reshap": 4, "level": 4, "veri": [4, 8], "filled_data": 4, "quality_class": 4, "err_estim": 4, "mean_estim": 4, "type": [4, 6, 7, 8, 9], "vicin": [4, 8], "first": [4, 6, 8], "cycl": 4, "gcb": [4, 8], "doe": [4, 8], "On": [4, 7], "separ": [4, 7, 8], "assimil": [4, 7], "respir": [4, 7, 8], "review": [4, 7], "11": [4, 7], "1424": [4, 7], "1439": [4, 7], "in\ufb02uenc": 4, "observ": [4, 7, 9], "\ufb02ux": 4, "invers": 4, "model": 4, "1311": 4, "1324": 4, "import": [4, 7, 8], "fread": [4, 7], "date2dec": [4, 7], "dec2dat": [4, 7], "ifil": [4, 7], "test_gapfil": 4, "csv": [4, 7, 8], "tharandt": 4, "1998": 4, "onlin": 4, "dat": [4, 7], "transpos": [4, 7], "ndat": [4, 7], "header": [4, 7, 8], "head1": [4, 7], "idx": [4, 7], "le": [4, 8], "rg": [4, 7], "tair": [4, 7], "append": [4, 7, 8], "fc": [4, 7, 8, 9], "qcnee": 4, "astyp": [4, 8], "qcle": 4, "qch": 4, "day_id": 4, "hour_id": 4, "hour": 4, "ntime": 4, "ones": 4, "dtype": [4, 8], "hh": [4, 8], "mn": 4, "rint": [4, 8], "y0": 4, "yr": [4, 7], "mo": [4, 7], "dy": [4, 7], "hr": [4, 7], "mi": [4, 7], "jdate": [4, 7], "adat": [4, 7], "dat_f": 4, "flag_f": 4, "print": [4, 7, 8], "11006": 4, "11012": 4, "2f": 4, "18": 4, "68": 4, "15": [4, 6, 7, 8], "63": 4, "19": 4, "61": 4, "54": 4, "12": 4, "40": 4, "33": 4, "dat_std": 4, "3f": 4, "372": 4, "13": 4, "118": 4, "477": 4, "000": 4, "dat_err": 4, "kk": 4, "ab": 4, "100": [4, 7, 8], "28": 4, "83": 4, "dat_mean": 4, "677": 4, "633": 4, "610": 4, "collect": [5, 8], "complet": 5, "avail": [5, 8], "io": 5, "an": [5, 8, 9], "directori": [5, 8], "simpli": [5, 8], "call": [5, 8], "postproc_europ": [5, 8], "hesseflux_exampl": [5, 8], "cfg": [5, 8], "govern": [5, 8], "configpars": [5, 8], "highli": [5, 8], "comment": [5, 8], "through": [5, 8], "easiest": 5, "wai": 5, "via": [5, 8], "scipi": 5, "project": 5, "borrow": 5, "heavili": 5, "welltestpi": 5, "detect": [6, 8], "median": [6, 8], "absolut": [6, 8, 9], "differ": [6, 8], "origin": [6, 8, 9], "maintain": [6, 9], "sinc": [6, 9], "iter": 6, "isdai": [6, 7, 8, 9], "nscan": [6, 8], "720": 6, "nfill": [6, 8], "48": 6, "z": [6, 8], "7": [6, 8, 9], "deriv": [6, 8], "swthr": [6, 7, 8, 9], "10": [6, 7, 8, 9], "plot": [6, 8, 9], "vovari": 6, "mad": [6, 8], "should": [6, 8], "appli": [6, 8], "along": 6, "axi": [6, 8], "each": [6, 8, 9], "length": [6, 7, 9], "size": 6, "result": 6, "mask": 6, "middl": [6, 8], "Then": [6, 8], "act": 6, "raw": [6, 8], "2nd": 6, "determin": [6, 7, 8, 9], "shortwav": [6, 7, 8, 9], "pdf": [6, 9], "everywher": [6, 9], "except": [6, 8, 9], "photosynthesi": 7, "gpp": [7, 8], "reco": [7, 8], "co2": [7, 8, 9], "wrapper": 7, "individu": [7, 8], "nov": 7, "generel": 7, "cost": 7, "cost_ab": 7, "fmin_tnc": 7, "param": 7, "nogppnight": [7, 8], "either": [7, 9], "fit": 7, "v": 7, "nighttim": [7, 8], "falg": 7, "sever": [7, 8], "over": 7, "order": 7, "fc_": [7, 9], "nee_": [7, 9], "umol": [7, 9], "k": [7, 8], "need": 7, "pa": [7, 8], "light": [7, 8], "respons": [7, 8], "curv": [7, 8], "equal": [7, 9], "two": [7, 8], "neg": [7, 8], "forc": 7, "2001": 7, "strategi": [7, 8], "defens": 7, "annual": 7, "sum": 7, "acricultur": 7, "forest": [7, 8, 9], "meteorologi": 7, "107": 7, "43": 7, "69": 7, "approach": 7, "critic": 7, "issu": 7, "evalu": 7, "16": 7, "187": 7, "208": 7, "test_nee2gpp": 7, "4": [7, 8], "273": [7, 8], "1120": 7, "1128": 7, "9": 7, "99900000e": 7, "03": 7, "40606871e": 7, "00": 7, "8": 7, "31942152e": 7, "06242542e": 7, "01": [7, 8, 9], "49245664e": 7, "12381973e": 7, "68311981": 7, "81012431": 7, "9874173": 7, "17108871": 7, "38759152": 7, "64372415": 7, "90076664": 7, "18592735": 7, "33166157e": 7, "18228013e": 7, "04092252e": 7, "19395317e": 7, "08427448e": 7, "78457540e": 7, "63212545e": 7, "88902165e": 7, "74243873e": 7, "51364527e": 7, "28786696": 7, "34594516": 7, "43893276": 7, "5495954": 7, "70029545": 7, "90849165": 7, "15074873": 7, "46137527": 7, "here": 8, "we": 8, "happi": 8, "discuss": 8, "ani": 8, "directli": 8, "contact": 8, "shown": 8, "bring": 8, "submit": 8, "databas": 8, "predat": 8, "somewhat": 8, "precursor": 8, "henc": 8, "known": 8, "unit": 8, "atmospher": 8, "which": [8, 9], "kpa": 8, "one": [8, 9], "line": 8, "There": 8, "littl": 8, "helper": 8, "fluxdata_unit": 8, "bin": 8, "second": 8, "run": 8, "output_file_of_postproc_europ": 8, "templat": 8, "almost": 8, "self": 8, "explanatori": 8, "like": 8, "most": 8, "part": 8, "sy": 8, "configfil": 8, "argv": 8, "interpol": 8, "analys": 8, "section": 8, "control": 8, "shall": 8, "perform": 8, "postswitch": 8, "biogeoci": 8, "fluxerr": 8, "And": 8, "program": 8, "switch": 8, "getboolean": 8, "boolean": 8, "thei": 8, "would": 8, "postio": 8, "comma": 8, "list": 8, "singl": 8, "inputfil": 8, "hes_europ": 8, "fluxdata_2016": 8, "txt": 8, "delimit": 8, "read_csv": 8, "builtin": 8, "sniffer": 8, "slow": 8, "pydata": 8, "stabl": 8, "skiprow": 8, "being": 8, "ignor": 8, "analysi": 8, "get": 8, "getfloat": 8, "note": 8, "without": 8, "quot": 8, "filenam": 8, "empti": 8, "try": 8, "open": 8, "gui": 8, "choos": 8, "pd": 8, "parser": 8, "lambda": 8, "to_datetim": 8, "infil": 8, "parse_d": 8, "date_pars": 8, "index_col": 8, "assum": 8, "adapt": 8, "easili": 8, "still": 8, "ameriflux": 8, "moment": 8, "give": 8, "suppos": 8, "command": 8, "abov": [8, 9], "Not": 8, "happen": 8, "dff": 8, "qualiti": 8, "cell": 8, "other": 8, "immedi": 8, "fillna": 8, "inplac": 8, "deep": 8, "differenti": 8, "between": [8, 9], "20": [8, 9], "distinguish": 8, "greater": [8, 9], "appropri": 8, "sw_in_1_1_1": 8, "kelvin": 8, "hta": 8, "hout": 8, "_findfirststart": 8, "max": 8, "tkelvin": 8, "els": 8, "loc": 8, "find": 8, "occurr": 8, "until": 8, "vpd_pi_1_1_1": 8, "rel": 8, "humid": 8, "rh_": 8, "esat": 8, "satur": 8, "pj": 8, "hvpd": 8, "len": 8, "rais": 8, "valueerror": 8, "ta_id": 8, "rh_id": 8, "tk": 8, "rh": 8, "vpd_id": 8, "further": 8, "assur": 8, "vpdpa": 8, "1000": 8, "elif": 8, "final": 8, "interv": [8, 9], "dtsec": 8, "ntdai": 8, "dsec": 8, "86400": 8, "A": 8, "week": 8, "chunk": 8, "postmad": 8, "help": 8, "scan": 8, "onc": 8, "consid": 8, "befor": 8, "itself": 8, "robust": 8, "statist": 8, "howev": 8, "within": 8, "inner": 8, "For": 8, "around": 8, "normal": 8, "strong": 8, "curvatur": 8, "current": 8, "implement": 8, "might": 8, "smooth": 8, "soil": 8, "moistur": 8, "hz": 8, "mauder": 8, "agric": 8, "_pi": 8, "g": 8, "le_pi": 8, "houtlier": 8, "h_": 8, "h_pi": 8, "sflag": 8, "hf": 8, "ii": 8, "enumer": 8, "correspond": 8, "just": 8, "keep": 8, "track": 8, "postustar": 8, "min": 8, "ustarmin": [8, 9], "boostrap": 8, "nboot": [8, 9], "signific": 8, "class": [8, 9], "plateaucrit": [8, 9], "95": [8, 9], "otherwis": [8, 9], "influenc": 8, "applyustarflag": 8, "minimum": [8, 9], "suggest": 8, "land": 8, "cover": 8, "divid": 8, "less": 8, "took": 8, "99": 8, "take": [8, 9], "four": 8, "One": 8, "them": 8, "experi": 8, "hfilt": 8, "assert": 8, "could": 8, "ffsave": 8, "to_numpi": 8, "assign": 8, "ustar_test_1_1_1": 8, "hustar": 8, "percentil": [8, 9], "smaller": 8, "want": 8, "integr": 8, "turbul": 8, "characterist": 8, "itc": 8, "were": 8, "These": 8, "right": 8, "produc": 8, "fc_ssitc_test_1_1_1": 8, "fc_1_1_1": 8, "primari": 8, "product": 8, "firstli": 8, "glob": 8, "biolo": 8, "secondli": 8, "postpartit": 8, "mani": 8, "peopl": 8, "unaesthet": 8, "esteem": 8, "correct": 8, "behaviour": 8, "reflect": 8, "hpart": 8, "astr": 8, "dfpartn": 8, "startswith": 8, "suff": 8, "dfpartd": 8, "concat": 8, "dn": 8, "gg": 8, "so": 8, "postgap": 8, "rang": 8, "actual": 8, "long": 8, "summari": 8, "fig": 8, "a1": 8, "invok": 8, "hfill": 8, "gpp_1_1_1": 8, "reco_1_1_1": 8, "gpp_1_1_2": 8, "reco_1_1_2": 8, "gpp_pi_1_1_1": 8, "reco_pi_1_1_1": 8, "gpp_pi_1_1_2": 8, "reco_pi_1_1_2": 8, "df_f": 8, "dff_f": 8, "hdrop": 8, "def": 8, "_add_f": 8, "_": 8, "join": 8, "split": 8, "f": 8, "indic": 8, "translat": 8, "present": [8, 9], "h_f": 8, "le_f": 8, "fc_f": 8, "h_pi_f": 8, "le_pi_f": 8, "nee_f": 8, "nee_pi_f": 8, "gpp_f_1_1_1": 8, "reco_f_1_1_1": 8, "gpp_f_1_1_2": 8, "reco_f_1_1_2": 8, "gpp_pi_f_1_1_1": 8, "reco_pi_f_1_1_1": 8, "gpp_pi_f_1_1_2": 8, "reco_pi_f_1_1_2": 8, "colin": 8, "nee_pi_err_1_1_1": 8, "colout": 8, "cc": 8, "recommend": 8, "softwar": 8, "tk3": 8, "to_csv": 8, "outputfil": 8, "na_rep": 8, "date_format": 8, "ask": 8, "path": 8, "suffix": 8, "outundef": 8, "prepend": 8, "flag_": 8, "outflagcol": 8, "prefix": 8, "alwai": 8, "whole": 8, "back": 8, "exclud": 8, "_add_flag": 8, "dcol": 8, "_test_": 8, "occ": 8, "dff1": 8, "That": 8, "folk": 8, "friction": 9, "veloc": 9, "multi": 9, "nmon": 9, "ntaclass": 9, "corrcheck": 9, "nustarclass": 9, "below": 9, "correl": 9, "reduct": 9, "boot": 9, "strap": 9, "confid": 9, "month": 9, "combin": 9, "coeffici": 9, "smallest": 9, "higher": 9, "nseason": 9, "least": 9, "full": 9}, "objects": {"": [[0, 0, 0, "-", "hesseflux"]], "hesseflux": [[4, 0, 0, "-", "gapfill"], [6, 0, 0, "-", "madspikes"], [7, 0, 0, "-", "nee2gpp"], [9, 0, 0, "-", "ustarfilter"]], "hesseflux.gapfill": [[4, 1, 1, "", "gapfill"]], "hesseflux.madspikes": [[6, 1, 1, "", "madspikes"]], "hesseflux.nee2gpp": [[7, 1, 1, "", "nee2gpp"]], "hesseflux.ustarfilter": [[9, 1, 1, "", "ustarfilter"]]}, "objtypes": {"0": "py:module", "1": "py:function"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "function", "Python function"]}, "titleterms": {"api": 0, "refer": 0, "subpackag": 0, "author": 1, "changelog": 2, "content": 3, "gapfil": 4, "note": [4, 7, 9], "hesseflux": 5, "about": 5, "document": 5, "quick": 5, "usag": 5, "guid": [5, 8], "post": [5, 8], "process": [5, 8], "eddi": [5, 8], "covari": [5, 8], "data": [5, 8], "i": 5, "europ": [5, 8], "fluxdata": [5, 8], "eu": [5, 8], "format": [5, 8], "instal": 5, "licens": 5, "madspik": 6, "nee2gpp": 7, "user": 8, "file": 8, "read": 8, "configur": 8, "The": 8, "flag": 8, "datafram": 8, "dai": 8, "night": 8, "check": 8, "spike": 8, "outlier": 8, "u": 8, "filter": 8, "partit": 8, "net": 8, "ecosystem": 8, "exchang": 8, "gap": 8, "fill": 8, "imput": 8, "uncertainti": 8, "estim": 8, "flux": 8, "write": 8, "output": 8, "ustarfilt": 9}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.viewcode": 1, "sphinx": 57}, "alltitles": {"API Reference": [[0, "module-hesseflux"]], "Subpackages": [[0, "subpackages"]], "Authors": [[1, "authors"]], "Changelog": [[2, "changelog"]], "Contents": [[3, "contents"]], "gapfill": [[4, "module-hesseflux.gapfill"]], "Notes": [[4, null], [7, null], [9, null]], "hesseflux": [[5, "hesseflux"]], "About hesseflux": [[5, "about-hesseflux"]], "Documentation": [[5, "documentation"]], "Quick usage guide": [[5, "quick-usage-guide"]], "Post-processing Eddy covariance data that is in europe-fluxdata.eu format": [[5, "post-processing-eddy-covariance-data-that-is-in-europe-fluxdata-eu-format"]], "Installation": [[5, "installation"]], "License": [[5, "license"]], "madspikes": [[6, "module-hesseflux.madspikes"]], "nee2gpp": [[7, "module-hesseflux.nee2gpp"]], "User Guide": [[8, "user-guide"]], "europe-fluxdata.eu file format": [[8, "europe-fluxdata-eu-file-format"]], "Post-processing Eddy covariance data": [[8, "post-processing-eddy-covariance-data"]], "Reading the configuration file": [[8, "reading-the-configuration-file"]], "Read the data": [[8, "read-the-data"]], "The flag dataframe": [[8, "the-flag-dataframe"]], "Day / night": [[8, "day-night"]], "Data check": [[8, "data-check"]], "Spike / outlier flagging": [[8, "spike-outlier-flagging"]], "u* filtering": [[8, "u-filtering"]], "Partitioning of Net Ecosystem Exchange": [[8, "partitioning-of-net-ecosystem-exchange"]], "Gap-filling / Imputation": [[8, "gap-filling-imputation"]], "Uncertainty estimates of flux data": [[8, "uncertainty-estimates-of-flux-data"]], "Writing the output file": [[8, "writing-the-output-file"]], "ustarfilter": [[9, "module-hesseflux.ustarfilter"]]}, "indexentries": {"hesseflux": [[0, "module-hesseflux"]], "module": [[0, "module-hesseflux"], [4, "module-hesseflux.gapfill"], [6, "module-hesseflux.madspikes"], [7, "module-hesseflux.nee2gpp"], [9, "module-hesseflux.ustarfilter"]], "gapfill() (in module hesseflux.gapfill)": [[4, "hesseflux.gapfill.gapfill"]], "hesseflux.gapfill": [[4, "module-hesseflux.gapfill"]], "hesseflux.madspikes": [[6, "module-hesseflux.madspikes"]], "madspikes() (in module hesseflux.madspikes)": [[6, "hesseflux.madspikes.madspikes"]], "hesseflux.nee2gpp": [[7, "module-hesseflux.nee2gpp"]], "nee2gpp() (in module hesseflux.nee2gpp)": [[7, "hesseflux.nee2gpp.nee2gpp"]], "hesseflux.ustarfilter": [[9, "module-hesseflux.ustarfilter"]], "ustarfilter() (in module hesseflux.ustarfilter)": [[9, "hesseflux.ustarfilter.ustarfilter"]]}}) \ No newline at end of file +Search.setIndex({"alltitles": {"API Reference": [[0, "module-hesseflux"]], "About hesseflux": [[5, "about-hesseflux"]], "Authors": [[1, "authors"]], "Changelog": [[2, "changelog"]], "Comparison with REddyProc": [[8, "comparison-with-reddyproc"]], "Contents": [[3, "contents"]], "Data check": [[9, "data-check"]], "Day / night": [[9, "day-night"]], "Documentation": [[5, "documentation"]], "Estimating the ustar threshold distribution": [[8, "estimating-the-ustar-threshold-distribution"]], "Gap-filling / Imputation": [[8, "gap-filling-imputation"], [9, "gap-filling-imputation"]], "Gap-filling or imputation": [[8, "gap-filling-or-imputation"]], "Importing the half-hourly data": [[8, "importing-the-half-hourly-data"]], "Installation": [[5, "installation"]], "License": [[5, "license"]], "Notes": [[4, null], [7, null], [10, null]], "Partitioning of Net Ecosystem Exchange": [[8, "partitioning-of-net-ecosystem-exchange"], [9, "partitioning-of-net-ecosystem-exchange"]], "Post-processing Eddy covariance data": [[8, "post-processing-eddy-covariance-data"], [9, "post-processing-eddy-covariance-data"]], "Post-processing Eddy covariance data that is in europe-fluxdata.eu format": [[5, "post-processing-eddy-covariance-data-that-is-in-europe-fluxdata-eu-format"]], "Quick usage guide": [[5, "quick-usage-guide"]], "Read the data": [[9, "read-the-data"]], "Reading the configuration file": [[9, "reading-the-configuration-file"]], "Spike / outlier flagging": [[9, "spike-outlier-flagging"]], "Subpackages": [[0, "subpackages"]], "The flag dataframe": [[9, "the-flag-dataframe"]], "Uncertainty estimates of flux data": [[8, "uncertainty-estimates-of-flux-data"], [9, "uncertainty-estimates-of-flux-data"]], "User Guide": [[9, "user-guide"]], "Writing the output file": [[8, "writing-the-output-file"], [9, "writing-the-output-file"]], "europe-fluxdata.eu file format": [[9, "europe-fluxdata-eu-file-format"]], "gapfill": [[4, "module-hesseflux.gapfill"]], "hesseflux": [[5, "hesseflux"]], "madspikes": [[6, "module-hesseflux.madspikes"]], "nee2gpp": [[7, "module-hesseflux.nee2gpp"]], "u* filtering": [[9, "u-filtering"]], "ustarfilter": [[10, "module-hesseflux.ustarfilter"]]}, "docnames": ["api", "authors", "changelog", "contents", "gapfill", "index", "madspikes", "nee2gpp", "reddyproc", "userguide", "ustarfilter"], "envversion": {"sphinx": 61, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.viewcode": 1}, "filenames": ["api.rst", "authors.rst", "changelog.rst", "contents.rst", "gapfill.rst", "index.rst", "madspikes.rst", "nee2gpp.rst", "reddyproc.rst", "userguide.rst", "ustarfilter.rst"], "indexentries": {}, "objects": {"": [[0, 0, 0, "-", "hesseflux"]], "hesseflux": [[4, 0, 0, "-", "gapfill"], [6, 0, 0, "-", "madspikes"], [7, 0, 0, "-", "nee2gpp"], [10, 0, 0, "-", "ustarfilter"]], "hesseflux.gapfill": [[4, 1, 1, "", "gapfill"]], "hesseflux.madspikes": [[6, 1, 1, "", "madspikes"]], "hesseflux.nee2gpp": [[7, 1, 1, "", "nee2gpp"]], "hesseflux.ustarfilter": [[10, 1, 1, "", "ustarfilter"]]}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "function", "Python function"]}, "objtypes": {"0": "py:module", "1": "py:function"}, "terms": {"": [4, 5, 6, 7, 8, 9, 10], "0": [0, 2, 4, 6, 7, 8, 9, 10], "00": 7, "000": 4, "01": [7, 8, 9, 10], "02d": 8, "03": 7, "03d": 8, "04092252e": 7, "05": 8, "06242542e": 7, "08427448e": 7, "1": [0, 2, 4, 6, 7, 8, 9, 10], "10": [6, 7, 8, 9, 10], "100": [4, 7, 8, 9], "1000": 9, "100l": 8, "107": 7, "11": [4, 7], "11006": 4, "11012": 4, "1120": 7, "1128": 7, "118": 4, "12": 4, "12381973e": 7, "13": 4, "1311": 4, "1324": 4, "1424": [4, 7], "1439": [4, 7], "15": [4, 6, 7, 8, 9], "15074873": 7, "16": 7, "160": 8, "17108871": 7, "1740000": 8, "18": 4, "18228013e": 7, "18592735": 7, "187": 7, "19": 4, "19395317e": 7, "1998": [4, 8], "1998001": 8, "1998003": 8, "1998006": 8, "1998009": 8, "1998012": 8, "1d": [4, 7, 10], "2": [0, 2, 4, 6, 7, 8, 9, 10], "20": [8, 9, 10], "2001": 7, "2005": [4, 7, 8, 9], "2006": [0, 5, 6, 8, 9, 10], "2008": [4, 6, 8, 9, 10], "2009": [0, 1, 5], "2010": [0, 5, 7, 8, 9], "2012": [4, 7], "2013": [4, 7, 8, 9], "2014": [4, 6, 7, 10], "2017": 0, "2020": [0, 1, 2, 4, 6, 7, 10], "2021": [0, 2, 4, 6, 10], "2022": [0, 1, 2, 4, 5, 6, 10], "2023": [0, 2, 10], "2024": [6, 7, 10], "208": 7, "2144982": 8, "2494333": [], "2708250": 8, "2711558": [], "273": [7, 8, 9], "2737381": 8, "28": 4, "28786696": 7, "2f": [4, 8], "2nd": 6, "3": [0, 2, 4, 7, 8, 9, 10], "30": 8, "3000": 8, "31": [], "3160274": [], "31942152e": 7, "32": 8, "3227250": 8, "33": [4, 8], "33166157e": 7, "3349500": [], "3369231": 8, "34": 8, "34594516": 7, "35": 8, "3520000": 8, "36": 8, "3683221": [], "37": 8, "3704545": 8, "372": 4, "37475": 8, "3747500": 8, "3785417": 8, "38": 8, "38759152": 7, "3880595": [], "39": 8, "3900000": [], "3966071": 8, "3f": 4, "4": [7, 8, 9], "40": [4, 8], "4028709": [], "40606871e": 7, "41625": 8, "4162500": 8, "4190000": 8, "42": [], "43": 7, "4326667": [], "43893276": 7, "44": 8, "4407917": 8, "4429643": 8, "4466209": [], "46137527": 7, "4686845": [], "477": 4, "48": 6, "49245664e": 7, "4d": 8, "5": [0, 2, 4, 8, 9, 10], "50": [4, 8, 9, 10], "51364527e": 7, "5143036": 8, "5218058": [], "54": 4, "5495954": 7, "5512931": [], "5662312": 8, "6": [2, 4, 7, 8, 9], "60": [4, 8, 9], "6012429": 8, "61": 4, "610": 4, "6220412": [], "63": 4, "63212545e": 7, "633": 4, "64372415": 7, "6449534": 8, "677": 4, "68": 4, "68311981": 7, "69": 7, "7": [6, 8, 9, 10], "70029545": 7, "720": 6, "74243873e": 7, "78457540e": 7, "8": 7, "81012431": 7, "83": 4, "86400": 9, "88902165e": 7, "9": 7, "90": [2, 10], "90076664": 7, "90849165": 7, "95": [8, 9, 10], "9874173": 7, "99": 9, "99900000e": 7, "9999": [4, 6, 7, 8, 9, 10], "A": [8, 9], "And": 9, "By": 8, "For": 9, "If": [4, 6, 7, 8, 9, 10], "In": [4, 6, 7, 10], "It": [0, 1, 5, 7, 8, 9], "No": [2, 10], "Not": 9, "On": [4, 7], "One": 9, "That": [8, 9], "The": [0, 2, 3, 4, 5, 6, 7, 8, 10], "Then": [6, 9], "There": [8, 9], "These": 9, "_": [8, 9], "_add_f": 9, "_add_flag": 9, "_err": 8, "_f": 8, "_findfirststart": [8, 9], "_pi": [8, 9], "_test_": 9, "a1": [8, 9], "ab": 4, "about": 3, "abov": [9, 10], "absolut": [6, 9, 10], "accord": 4, "acricultur": 7, "act": 6, "action": 2, "actual": [8, 9], "ad": 0, "adapt": 9, "adat": [4, 7], "add": [2, 7, 8, 9], "after": [2, 4, 8, 9], "aggfunc": 8, "aggregationmod": 8, "agric": [8, 9], "agricultur": [4, 6, 7, 10], "air": [4, 7, 8, 9, 10], "al": [0, 4, 5, 6, 7, 8, 9, 10], "algorithm": [2, 4, 7, 8, 9, 10], "aliment": [4, 6, 7, 10], "all": [0, 2, 5, 7, 8, 9, 10], "allow": [4, 6, 7, 8], "almost": 9, "along": 6, "also": [0, 2, 4, 6, 7, 8, 9, 10], "alwai": [8, 9], "ameriflux": 9, "an": [5, 8, 9, 10], "analys": 9, "analysi": 9, "anchor": 8, "ani": [8, 9], "annual": [7, 8], "anymor": 2, "api": [3, 9], "append": [4, 7, 8, 9], "appli": [6, 8, 9], "applyustarflag": 9, "approach": 7, "appropri": 9, "apr": [0, 4, 6, 7, 10], "april": 8, "ar": [2, 4, 6, 7, 8, 9, 10], "argv": 9, "arndt": [1, 6, 7, 10], "around": [8, 9], "arrai": [4, 6, 7, 10], "array_lik": [4, 6, 7, 10], "ask": [8, 9], "assert": 9, "assign": 9, "assimil": [4, 7], "assum": [8, 9], "assur": 9, "astr": 9, "astyp": [4, 8, 9], "atmospher": 9, "aug": [0, 2, 6, 7, 10], "author": [0, 3, 4, 6, 10], "automat": 2, "avail": [5, 8, 9], "averag": [4, 9, 10], "avoid": [4, 8, 9], "axi": [6, 8, 9], "back": 9, "backend": 10, "basic": [0, 5, 8, 9], "bbox_inch": 8, "becaus": [2, 8], "befor": [8, 9], "begin": 4, "behavior": [4, 7, 9, 10], "behaviour": [8, 9], "being": 9, "below": 10, "between": [8, 9, 10], "bin": 9, "biogeoci": 9, "biogeosci": [0, 4, 5, 6, 8, 9, 10], "biolo": [8, 9], "biologi": [0, 4, 5, 7, 8, 9], "bool": [4, 6, 7, 8, 9, 10], "boolean": 9, "boostrap": [8, 9], "boot": 10, "bootstrap": [2, 8, 9, 10], "borrow": 5, "both": 8, "bring": 9, "bug": 4, "bugfix": [0, 2, 10], "build": 2, "builtin": 9, "c": [1, 4, 5, 7, 8, 9, 10], "ca": 1, "calcul": [4, 6, 7, 8, 9], "calendar": [2, 4, 7, 10], "call": [5, 8, 9], "can": [4, 6, 7, 8, 9, 10], "cannot": [2, 9], "case": [4, 6, 7, 8, 9, 10], "cbar_kw": 8, "cc": [8, 9], "cell": 9, "centr": [4, 6, 7, 10], "certain": 4, "cfg": [5, 8, 9], "cftime": [0, 2], "chang": [0, 2, 4, 5, 7, 8, 9], "changelog": 3, "characterist": 9, "check": [2, 3], "choos": [8, 9], "chunk": 9, "cibuildwheel": 2, "class": [8, 9, 10], "clear": 8, "cmap": 8, "co2": [7, 9, 10], "code": [0, 8, 9], "coeffici": 10, "colh": [4, 6, 7], "colhead": [4, 6, 7, 10], "colin": [8, 9], "collect": [5, 9], "colout": [8, 9], "column": [4, 6, 7, 8, 9, 10], "com": 2, "combin": 10, "comma": 9, "command": 9, "comment": [5, 9], "compar": 8, "comparison": [], "compat": 2, "complet": 5, "comput": [4, 6, 7, 10], "concat": [8, 9], "condit": [4, 8, 9], "confid": 10, "config": [2, 8, 9], "configfil": 9, "configpars": [5, 9], "configur": [3, 5, 8], "consid": 9, "const": [0, 2], "contact": 9, "continu": [4, 6, 7, 10], "contributor": 1, "control": [8, 9], "convert": [2, 8], "copi": [2, 4, 7, 8, 9, 10], "copyright": [0, 1, 4, 5, 6, 7, 10], "corrcheck": 10, "correct": [8, 9], "correl": 10, "correspond": [8, 9], "cost": 7, "cost_ab": 7, "could": 9, "covari": [0, 3, 4, 7, 10], "cover": 9, "coveral": [0, 2], "creat": [1, 8], "critic": 7, "csv": [4, 7, 8, 9], "cuntz": [0, 1, 4, 5, 6, 7, 10], "current": 9, "curv": [7, 8, 9], "curvatur": 9, "cycl": 4, "d": [4, 7, 9, 10], "dai": [3, 4, 6, 7, 8, 10], "daili": 8, "dat": [4, 7, 8], "dat_err": 4, "dat_f": 4, "dat_mean": 4, "dat_std": 4, "data": [0, 2, 3, 4, 6, 7, 10], "databas": 9, "datafram": [3, 4, 6, 7, 8, 10], "dataset": 8, "date": [0, 2, 4, 7, 8, 9, 10], "date2dec": [4, 7], "date_format": [8, 9], "date_pars": [], "dateformat": [4, 7, 10], "datetim": [4, 7, 8, 9, 10], "day_id": 4, "daytim": [0, 5, 6, 7, 8, 9, 10], "dcol": 9, "ddof": 4, "de": [0, 1, 4, 6, 7, 8, 9, 10], "dec2dat": [4, 7], "decemb": 8, "decimal_hour": 8, "deep": [8, 9], "def": [8, 9], "default": [2, 4, 6, 7, 8, 9, 10], "defens": 7, "deficit": [4, 7, 8, 9], "defin": [4, 8, 9], "deg": [4, 10], "degre": 4, "delimit": [8, 9], "delta": 4, "depart": [4, 6, 7, 10], "depend": [0, 2], "deriv": [6, 9], "describ": [0, 5, 8, 9], "detail": [0, 4, 5, 6, 7, 10], "detect": [6, 9], "determin": [6, 7, 8, 9, 10], "dev": 8, "develop": 0, "deviat": [4, 6, 8, 9], "df": [4, 6, 7, 8, 9, 10], "df_f": [8, 9], "dff": [8, 9], "dff1": [8, 9], "dff_f": [8, 9], "dfin": [4, 6, 7, 10], "dfpartd": [8, 9], "dfpartn": [8, 9], "dfplot": 8, "dfr": 8, "differ": [6, 8, 9], "differenti": 9, "directli": 9, "directori": [5, 8, 9], "discuss": 9, "disentangl": 8, "disk": 8, "distinguish": 9, "distribut": [1, 4, 5, 9], "divid": 9, "divisor": 4, "dn": [8, 9], "do": [4, 8, 9, 10], "doc": [4, 7, 9, 10], "docstr": [0, 4, 6, 7, 10], "docu": 0, "document": [0, 2, 3, 4, 7, 9, 10], "doe": [4, 8, 9], "doi": 8, "done": 8, "dot": [0, 1, 4, 6, 7, 9, 10], "dplyr": 8, "drop": [2, 8, 9], "dropna": 8, "dsec": 9, "dtsec": 9, "dtype": [4, 9], "due": [2, 8], "dy": [4, 7], "e": [1, 4, 6, 8, 9, 10], "each": [6, 8, 9, 10], "easiest": 5, "easili": 9, "ecosystem": [0, 3, 4, 5, 7], "eddi": [0, 3, 4, 6, 7, 10], "eddy2nc": 0, "eddydata": 8, "eddydatawithposix": 8, "eddypro": [2, 8, 9], "eddypro2nc": 2, "edg": 4, "either": [7, 10], "element": 4, "elif": 9, "els": [8, 9], "empti": [8, 9], "en": [0, 2], "end": 4, "eng": [0, 2, 4, 7], "enumer": [8, 9], "environment": [4, 6, 7, 10], "environn": [4, 6, 7, 10], "eproc": 8, "equal": [7, 8, 10], "err": [4, 8, 9], "err_estim": 4, "errmean": 4, "error": [0, 2, 4, 8, 9], "esat": 9, "esteem": [8, 9], "estim": [0, 2, 3, 4, 7, 10], "et": [0, 4, 5, 6, 7, 8, 9, 10], "etc": [0, 2, 8], "eu": 3, "europ": 3, "evalu": 7, "everi": 4, "everywher": [6, 10], "exampl": [2, 4, 5, 7, 8, 9], "example_detha98": 8, "excel": [0, 2], "except": [6, 9, 10], "exchang": [3, 4, 7], "exclud": [8, 9], "exist": [2, 9], "experi": 9, "explanatori": 9, "extend": 8, "extens": [0, 5, 8, 9], "extrapol": [4, 8, 9], "f": [8, 9], "falg": 7, "fals": [4, 6, 7, 8, 9, 10], "fc": [4, 7, 8, 9, 10], "fc_": [7, 10], "fc_1_1_1": [8, 9], "fc_f": [8, 9], "fc_ssitc_test_1_1_1": 9, "fconverttimetoposix": 8, "feb": [0, 2, 4, 7], "februari": 8, "ffsave": [8, 9], "fgui": [0, 2], "fifth": 8, "fig": [8, 9], "file": [1, 2, 3, 4, 5, 7], "filenam": [8, 9], "fill": [2, 3, 4, 7], "filled_data": 4, "fillna": 9, "filter": [3, 6, 8, 10], "filterlongrun": 8, "final": 9, "find": [8, 9], "fingerprint": 8, "finish": [0, 2], "first": [4, 6, 8, 9], "firstli": [8, 9], "fit": 7, "five": 8, "flag": [0, 2, 3, 4, 6, 7, 8, 10], "flag_": [8, 9], "flag_f": 4, "flake8": [2, 4, 6, 10], "floadtxtintodatafram": 8, "float": [2, 4, 6, 7, 8, 9, 10], "flux": [0, 2, 3, 4, 5, 7, 10], "fluxdata": [3, 8], "fluxdata_2016": [8, 9], "fluxdata_unit": 9, "fluxerr": 9, "fmin_tnc": 7, "folk": [8, 9], "follow": [4, 6, 7, 9, 10], "forc": 7, "forest": [7, 8, 9, 10], "format": [0, 3, 4, 6, 7, 8, 10], "found": [2, 10], "four": [8, 9], "fr": [0, 5, 8, 9], "franc": [4, 6, 7, 10], "fread": [4, 7], "freedom": 4, "friction": [8, 10], "from": [2, 4, 5, 6, 7, 8, 9, 10], "ftimeformat": 2, "full": 10, "fulldai": 4, "function": [0, 2, 4, 5, 6, 7, 8, 9, 10], "further": 9, "g": [8, 9], "gap": [3, 4, 7], "gapfil": [0, 2, 3, 8, 9], "gcb": [4, 9], "gener": 4, "generel": 7, "germani": [4, 6, 7, 10], "get": [8, 9], "getboolean": 9, "getexamplepath": 8, "getfloat": 9, "getrefclass": 8, "gg": [8, 9], "github": [0, 2, 5], "give": [8, 9], "given": [4, 5, 6, 7, 8, 9, 10], "glob": [8, 9], "global": [0, 4, 5, 7, 8, 9], "good": [2, 8, 9], "govern": [5, 9], "gpp": [7, 8, 9], "gpp_1_1_1": [8, 9], "gpp_1_1_2": [8, 9], "gpp_f_1_1_1": [8, 9], "gpp_f_1_1_2": [8, 9], "gpp_pi_1_1_1": [8, 9], "gpp_pi_1_1_2": [8, 9], "gpp_pi_f_1_1_1": [8, 9], "gpp_pi_f_1_1_2": [8, 9], "greater": [8, 9, 10], "gregorian": 2, "gross": [0, 2, 8, 9], "gui": [8, 9], "guid": 3, "h": [4, 7, 8, 9, 10], "h_": [8, 9], "h_1_1_1": 8, "h_f": [8, 9], "h_pi": [8, 9], "h_pi_f": [8, 9], "ha": [2, 4, 5, 6, 7, 9, 10], "half": [], "happen": 9, "happi": 9, "have": [4, 6, 7, 8, 9, 10], "hdrop": [8, 9], "he": [0, 5, 9], "head": [4, 6, 7, 10], "head1": [4, 7], "header": [4, 7, 8, 9], "heat": 8, "heatmap": 8, "heavili": 5, "helmholtz": [4, 6, 7, 10], "help": [8, 9], "helper": 9, "henc": 9, "here": [8, 9], "hes_europ": [8, 9], "hesseflux": [0, 3, 8, 9], "hesseflux_exampl": [5, 8, 9], "hf": [8, 9], "hfill": [8, 9], "hfilt": 9, "hh": [4, 8, 9], "higher": 10, "highli": [5, 9], "histori": [0, 4, 6, 10], "hmeteo": 8, "horizont": 8, "hour": [4, 8], "hour_id": 4, "hourli": [], "hout": [8, 9], "houtlier": 9, "howev": [8, 9], "hpa": [4, 9], "hpart": [8, 9], "hr": [4, 7], "hta": 9, "html": [4, 7, 9, 10], "http": [2, 4, 5, 7, 9, 10], "humid": 9, "hustar": 9, "hvpd": 9, "hydrosystem": [4, 6, 7, 10], "hz": 9, "i": [0, 1, 3, 4, 6, 7, 8, 9, 10], "ico": [0, 5, 8, 9], "identifi": 8, "idx": [4, 7], "ifil": [4, 7, 8], "ignor": 9, "ii": [8, 9], "iic": 8, "iloc": 8, "immedi": 9, "implement": [8, 9], "import": [4, 7, 9], "improv": [4, 6, 7, 10], "imput": 3, "includ": [0, 2, 5, 8, 9], "incom": [4, 6, 7, 8, 9, 10], "index": [4, 7, 8, 9, 10], "index_col": 9, "indic": [8, 9], "individu": [7, 8, 9], "infil": 9, "influenc": [8, 9], "init": 8, "initi": 2, "inner": 9, "inplac": [8, 9], "input": [4, 6, 7, 8, 9, 10], "inputfil": 9, "inra": [4, 6, 7, 10], "instal": [3, 8], "instead": [0, 2, 4, 9, 10], "institut": [4, 6, 7, 10], "int": [4, 6, 7, 8, 9, 10], "integr": 9, "interpol": 9, "interv": [9, 10], "invers": 4, "invok": [8, 9], "in\ufb02uenc": 4, "io": 5, "irr": 8, "isdai": [6, 7, 8, 9, 10], "isna": 8, "issu": 7, "istrydownload": 8, "isusingonebigseasononfewrecord": 8, "itc": 9, "iter": 6, "its": [2, 4, 9], "itself": 9, "j": 8, "jan": [0, 2, 10], "januari": 8, "jdate": [4, 7], "join": [8, 9], "jul": [0, 2, 10], "julian": 1, "jun": [0, 2, 4, 6, 7, 10], "june": 8, "just": 9, "k": [7, 8, 9], "keep": 9, "kelvin": 9, "keyword": [4, 7, 8, 9], "kk": 4, "know": 8, "known": 9, "kpa": 9, "l": [4, 6, 7, 10], "label": 8, "lambda": [8, 9], "land": 9, "larg": [4, 9], "lasslop": [0, 4, 5, 7, 8, 9], "last": 4, "latent": 8, "later": 8, "latter": [2, 8, 9], "le": [4, 8, 9], "le_1_1_1": 8, "le_f": [8, 9], "le_pi": [8, 9], "le_pi_f": [8, 9], "lead": 8, "least": 10, "leipzig": [4, 6, 7, 10], "len": [8, 9], "length": [6, 7, 8, 10], "less": 9, "level": 4, "librari": [4, 7, 8, 9, 10], "licens": [0, 1, 3, 4, 6, 7, 10], "light": [7, 8, 9], "like": [8, 9], "line": 9, "linux": 2, "list": [8, 9], "littl": 9, "load": 8, "loc": [8, 9], "local": [2, 7], "loggertool": 2, "logtool": [0, 2], "long": [8, 9], "longer": [4, 8, 9], "longestmarginalgap": 4, "longgap": [4, 8, 9], "look": [4, 8, 9], "low": 8, "m": [4, 7, 8, 9, 10], "maco": 2, "macu": [0, 1, 4, 6, 7, 9, 10], "mad": [6, 9], "madspik": [0, 3, 9], "mai": [0, 1, 2, 4, 6, 7, 8, 9, 10], "mail": 1, "main": [1, 9], "maintain": [6, 10], "make": [2, 5, 8, 9], "mandatori": [4, 7, 10], "mani": [8, 9], "mar": [4, 7], "march": 8, "margin": [4, 8, 9], "markdown": 2, "mask": 6, "matplotlib": 8, "matthia": [0, 1, 4, 5, 6, 7, 10], "mauder": [8, 9], "max": [8, 9], "maximum": [4, 6, 9, 10], "mc": [0, 1, 4, 6, 7, 9, 10], "mcuntz": [2, 5], "md": [4, 8, 9], "me": [4, 6, 7, 10], "mean": [0, 2, 4, 8, 9, 10], "mean_estim": 4, "measur": 4, "median": [6, 9], "meteo": [2, 8, 9], "meteorolog": [4, 8], "meteorologi": [7, 8], "method": [0, 5, 7, 8, 9, 10], "mi": [4, 7], "middl": [6, 9], "might": 9, "min": 9, "minim": 8, "minimum": [9, 10], "minrecordswithinseason": 8, "minrecordswithintemp": 8, "minrecordswithinyear": 8, "miss": [2, 4, 6, 7, 8, 9, 10], "mit": [0, 1, 4, 5, 6, 7, 10], "mn": 4, "mo": [4, 7], "model": 4, "modul": [0, 4, 6, 7, 9, 10], "moistur": 9, "mol": 8, "moment": 9, "month": [8, 10], "more": [0, 2, 9], "most": 9, "move": [0, 2, 4, 6], "mu": 8, "mueller": 1, "muli": 2, "multi": 10, "must": [4, 6, 7, 10], "m\u00fcller": [1, 5], "n": 4, "na": 8, "na_rep": [8, 9], "name": [4, 6, 7, 8, 9, 10], "nan": [4, 6, 7, 8, 9], "nanci": [4, 6, 7, 10], "nation": [4, 6, 7, 10], "nboot": [8, 9, 10], "nd": 4, "ndat": [4, 7], "nee": [2, 4, 7, 8, 9, 10], "nee2gpp": [0, 3, 8, 9], "nee_": [7, 10], "nee_f": [8, 9], "nee_pi_err_1_1_1": [8, 9], "nee_pi_f": [8, 9], "need": [7, 8], "neg": [7, 8, 9], "net": [3, 4, 7], "netcdf": 2, "new": [2, 8], "next": 4, "nfill": [6, 9], "night": [3, 6, 7, 8, 10], "nighttim": [7, 8, 9], "nmon": 10, "nogppnight": [7, 8, 9], "non": [4, 6, 7, 8, 9, 10], "none": [4, 6, 7, 8, 9, 10], "normal": 9, "notabl": [2, 9], "note": [8, 9], "nov": 7, "now": 2, "np": [4, 6, 7, 8, 9, 10], "nsampl": 8, "nscan": [6, 9], "nseason": 10, "ntaclass": 10, "ntdai": 9, "ntime": 4, "number": [4, 9, 10], "numpi": [0, 4, 5, 6, 7, 8, 9, 10], "nustarclass": 10, "observ": [4, 7, 10], "occ": [8, 9], "occurr": 9, "oct": [4, 6, 10], "off": 8, "old": 0, "onc": 9, "one": [8, 9, 10], "ones": 4, "onli": [2, 4, 6, 8, 9, 10], "onlin": 4, "open": [8, 9], "option": [2, 4, 6, 7, 8, 9, 10], "order": 7, "org": [4, 7, 9, 10], "orient": 8, "origin": [6, 8, 9, 10], "other": [8, 9], "otherwis": [9, 10], "out": 8, "outflagcol": [8, 9], "outlier": 3, "output": [2, 3, 4], "output_file_of_postproc_europ": 9, "outputfil": [8, 9], "outundef": [8, 9], "over": 7, "own": [2, 9], "p": 8, "pa": [7, 8, 9], "packag": [0, 2, 5, 8, 9], "page": 0, "panda": [4, 5, 6, 7, 8, 9, 10], "papal": [0, 5, 6, 8, 9, 10], "param": 7, "paramet": [4, 6, 7, 9, 10], "parse_d": 9, "parser": 8, "part": 9, "partit": [0, 2, 3, 5, 7], "password": [0, 2], "path": [8, 9], "pd": [8, 9], "pdf": [6, 10], "peopl": [8, 9], "per": [2, 8, 9, 10], "percentil": [8, 9, 10], "perform": [8, 9], "period": 8, "photosynthesi": [7, 8], "piayda": [1, 6, 7, 10], "pip": [2, 5], "piv": 8, "pivot_t": 8, "pj": 9, "plat": 10, "plateaucrit": [9, 10], "plot": [6, 8, 9, 10], "plt": 8, "plu": [0, 5, 8, 9], "png": 8, "point": [4, 8, 9], "port": [4, 7], "posix": 8, "possibl": [0, 2, 4, 7, 9], "possibli": [4, 7, 10], "post": [0, 2, 3], "postgap": [8, 9], "postio": [8, 9], "postmad": 9, "postpartit": [8, 9], "postproc_europ": [5, 8, 9], "postswitch": 9, "postustar": 9, "pour": [4, 6, 7, 10], "precursor": 9, "predat": 9, "prefix": [8, 9], "prepend": [8, 9], "present": [8, 9, 10], "preserv": 2, "pressur": [4, 7, 8, 9], "primari": [8, 9], "print": [4, 7, 8, 9], "prob": 8, "process": [0, 2, 3], "produc": 9, "product": [8, 9], "program": 9, "project": 5, "provid": [0, 4, 5, 6, 7, 8, 9, 10], "public": 2, "pure": 2, "py": [0, 2, 5, 8, 9], "pydata": 9, "pyjam": [0, 2, 5, 9], "pylab": 8, "pypi": 0, "pyproject": [0, 2], "pyreadr": 8, "python": [2, 4, 5, 7, 8, 9, 10], "python3": 2, "qch": 4, "qcle": 4, "qcnee": 4, "qualiti": [8, 9], "quality_class": 4, "quantil": [2, 10], "quick": 3, "quot": 9, "r": [0, 5, 8, 9], "r5": 8, "r_fill": 8, "r_sdata": 8, "r_season": 8, "r_ustar_annu": 8, "r_ustar_season": 8, "radiat": [4, 6, 7, 8, 9, 10], "rais": 9, "random": 8, "rang": [8, 9], "rather": 2, "rau": [1, 6, 10], "raw": [6, 8, 9], "rd": 8, "rdata": 8, "rdylbu_r": 8, "read": [2, 3, 8], "read_csv": [8, 9], "read_r": 8, "readthedoc": [0, 2], "realis": 8, "recherch": [4, 6, 7, 10], "reco": [7, 8, 9], "reco_1_1_1": [8, 9], "reco_1_1_2": [8, 9], "reco_f_1_1_1": [8, 9], "reco_f_1_1_2": [8, 9], "reco_pi_1_1_1": [8, 9], "reco_pi_1_1_2": [8, 9], "reco_pi_f_1_1_1": [8, 9], "reco_pi_f_1_1_2": [8, 9], "recommend": [8, 9], "reddyproc": [0, 5, 9], "reduct": 10, "refactor": 0, "refer": [3, 7, 8, 9], "refin": 2, "reflect": [8, 9], "regist": 10, "reichstein": [4, 7, 8, 9], "rel": 9, "releas": [2, 7], "remov": [0, 2, 6, 7, 8, 10], "renam": [4, 8, 9], "replac": 8, "repositori": 2, "repres": [4, 9], "reproduc": 8, "requir": [0, 2, 5], "research": [4, 6, 7, 10], "reset": 8, "reshap": 4, "respir": [4, 7, 8, 9], "respons": [7, 8, 9], "restructur": 2, "result": [6, 8], "return": [0, 2, 4, 6, 7, 8, 9, 10], "review": [4, 7], "rg": [4, 7, 8], "rh": [8, 9], "rh_": 9, "rh_1_1_1": 8, "rh_id": 9, "right": 9, "rint": [4, 9], "robust": 9, "routin": [0, 2, 4, 7, 9], "row": 8, "rr": 8, "rseason": 8, "rst": [0, 4, 6, 10], "rule": [4, 7, 10], "run": [8, 9], "rustar": 8, "rustara": 8, "same": [4, 6, 7, 8, 9, 10], "sampl": [4, 8, 9], "satur": 9, "savefig": 8, "saverd": 8, "scan": 9, "scipi": 5, "script": [2, 5, 8, 9], "sdata": 8, "sdatetim": 8, "seaborn": 8, "search": 4, "season": [2, 7, 8, 9, 10], "seasonout": [0, 2, 8, 9, 10], "seasonyear": 8, "sebastian": [1, 5], "second": [8, 9], "secondli": [8, 9], "section": [8, 9], "seddyproc": 8, "see": [0, 1, 4, 5, 6, 7, 8, 9, 10], "seem": 8, "self": 9, "sensibl": 8, "sep": [0, 2, 8, 9], "separ": [4, 7, 9], "seri": [4, 6, 7, 8, 9, 10], "sestimateustarscenario": 8, "set": [0, 2, 6, 7, 8, 9, 10], "set_index": 8, "setup": [0, 2], "sever": [7, 9], "sflag": 9, "sgetestimatedustarthresholddistribut": 8, "sgetustarscenario": 8, "shall": 9, "shape": [4, 6, 7, 8, 9, 10], "short": [4, 7], "shortwav": [6, 7, 8, 9, 10], "should": [6, 8, 9], "show": 8, "shown": 9, "shrink": 8, "si": 8, "signific": 9, "similar": [0, 4, 5, 8, 9], "simpli": [5, 9], "sinc": [6, 10], "singl": [8, 9], "site": [0, 5, 9], "size": 6, "skip": [2, 4, 7, 9, 10], "skipna": 8, "skiprow": [8, 9], "slightli": 8, "slow": 9, "smaller": 9, "smallest": 10, "smdsgapfillustarscen": 8, "smooth": 9, "sn": 8, "sniffer": 9, "so": [8, 9], "softwar": [8, 9], "soil": 9, "some": [0, 5, 8, 9], "somewhat": 9, "sourc": [4, 6, 7, 10], "spike": [3, 6], "split": [8, 9], "splotfingerprinti": 8, "spring": 8, "spruce": 8, "stabl": 9, "stamp": 8, "standard": [4, 5, 6, 9], "start": [4, 6, 7, 9, 10], "startswith": 9, "station": 8, "statist": 9, "stemp": 8, "step": [0, 5, 6, 8, 9], "still": 9, "str": [4, 6, 7, 8, 9, 10], "strang": 8, "strap": 10, "strategi": [7, 8, 9], "stretch": 8, "strftime": [4, 7, 9, 10], "string": [2, 4, 7, 9, 10], "strong": 9, "strptime": [4, 7, 9, 10], "structur": [0, 2, 5], "sub": 8, "submit": 9, "subpackag": [2, 3], "suff": [8, 9], "suffix": [8, 9], "suggest": 9, "sum": [7, 8], "summari": [8, 9], "support": [0, 2], "suppos": 9, "sustar_detail": 8, "sw_dev": [4, 8, 9], "sw_in": [4, 6, 7, 8, 9, 10], "sw_in_1_1_1": [8, 9], "switch": 9, "swthr": [6, 7, 8, 9, 10], "sy": 9, "t": 8, "ta": [2, 4, 7, 8, 9, 10], "ta_": [4, 7, 8, 9, 10], "ta_1_1_1": 8, "ta_dev": [4, 8, 9], "ta_id": 9, "tab": 8, "taclass": 8, "tair": [4, 7, 8], "take": [8, 9, 10], "taken": [4, 7, 9, 10], "temperatur": [4, 7, 8, 9, 10], "templat": 9, "temporarili": 8, "test": 2, "test_gapfil": 4, "test_nee2gpp": 7, "text": [2, 8], "tha": 8, "tha_1998_nee_ori": 8, "than": [2, 4, 8, 9, 10], "tharandt": [4, 8], "thei": 9, "them": 9, "thi": [2, 4, 6, 7, 8, 9, 10], "thirdli": 8, "three": 8, "threshold": [2, 4, 6, 7, 9, 10], "through": [5, 9], "thuenen": 1, "tight": 8, "time": [4, 6, 7, 8, 9, 10], "timecolumn": 2, "timeformat": [4, 7, 8, 9, 10], "timestamp": 8, "tino": [1, 6, 10], "tk": 9, "tk3": 9, "tkelvin": 9, "to_csv": [8, 9], "to_datetim": 8, "to_numpi": 9, "toml": [0, 2], "took": 9, "tool": [2, 4, 9], "track": 9, "trail": 2, "translat": [8, 9], "transpos": [4, 7], "travisci": 2, "treat": [4, 6, 7, 9, 10], "true": [4, 6, 7, 8, 9, 10], "try": [8, 9], "ts_1_1_1": 8, "tsoil": 8, "tupl": [4, 10], "turbul": 9, "twine": 2, "two": [7, 8, 9], "txt": [8, 9], "type": [4, 6, 7, 9, 10], "typic": 8, "u": [2, 3, 8], "ufz": [1, 4, 6, 7, 10], "umol": [7, 10], "unaesthet": [8, 9], "uncertainti": [3, 4], "undef": [2, 4, 6, 7, 8, 9, 10], "under": [1, 5, 7, 9], "unflag": 8, "unit": [8, 9], "until": 9, "updat": [0, 2], "us": [0, 2, 4, 5, 6, 7, 8, 9, 10], "usag": 3, "useabl": 0, "user": [3, 5], "ustar": [2, 9, 10], "ustar_1_1_1": 8, "ustar_test_1_1_1": 9, "ustaraggr": 8, "ustarclass": 8, "ustarfilt": [0, 2, 3, 8, 9], "ustarmin": [8, 9, 10], "uwaterloo": 1, "v": 7, "v1": [0, 2], "v2": [0, 2], "v3": [0, 2], "v4": [0, 2], "v5": [0, 2], "valid": [2, 10], "valu": [0, 2, 4, 6, 7, 8, 9, 10], "valueerror": 9, "vapour": [4, 7, 8, 9], "vari": 8, "variabl": [4, 7, 8, 9, 10], "veloc": [8, 10], "verbos": [4, 8, 9], "veri": [4, 9], "via": [5, 9], "vicin": [4, 8, 9], "vignett": 8, "visualis": 8, "vovari": 6, "vpd": [4, 7, 8, 9], "vpd_1_1_1": 8, "vpd_dev": [4, 8, 9], "vpd_id": 9, "vpd_pi_1_1_1": 9, "vpdpa": 9, "w": [4, 7, 9], "wa": [0, 2, 4, 6, 7, 8, 9, 10], "wai": 5, "want": 9, "wave": [4, 7], "we": [8, 9], "week": 9, "well": [4, 8, 9, 10], "welltestpi": 5, "were": [8, 9], "wheel": 2, "when": [2, 6, 7, 9, 10], "where": [4, 6, 7, 8, 9, 10], "which": [8, 9, 10], "while": [4, 6, 7, 9, 10], "whitespac": 2, "whole": [8, 9], "window": [2, 4, 6, 9], "winter": 8, "within": [8, 9], "without": 9, "work": [4, 6, 8, 10], "workflow": 8, "would": [8, 9], "wrapper": 7, "write": 3, "written": [0, 4, 6, 7, 8, 9, 10], "x": 8, "xticklabel": 8, "y": [4, 7, 8, 9, 10], "y0": 4, "ydh": 8, "year": [2, 4, 8, 9, 10], "you": 8, "yr": [4, 7], "yticklabel": 8, "z": [6, 9], "zero": [4, 6, 7, 8, 9, 10], "\ufb02ux": 4}, "titles": ["API Reference", "Authors", "Changelog", "Contents", "gapfill", "hesseflux", "madspikes", "nee2gpp", "Comparison with REddyProc", "User Guide", "ustarfilter"], "titleterms": {"The": 9, "about": 5, "api": 0, "author": 1, "changelog": 2, "check": 9, "comparison": 8, "configur": 9, "content": 3, "covari": [5, 8, 9], "dai": 9, "data": [5, 8, 9], "datafram": 9, "distribut": 8, "document": 5, "ecosystem": [8, 9], "eddi": [5, 8, 9], "estim": [8, 9], "eu": [5, 9], "europ": [5, 9], "exchang": [8, 9], "file": [8, 9], "fill": [8, 9], "filter": 9, "flag": 9, "flux": [8, 9], "fluxdata": [5, 9], "format": [5, 9], "gap": [8, 9], "gapfil": 4, "guid": [5, 9], "half": 8, "hesseflux": 5, "hourli": 8, "i": 5, "import": 8, "imput": [8, 9], "instal": 5, "licens": 5, "madspik": 6, "nee2gpp": 7, "net": [8, 9], "night": 9, "note": [4, 7, 10], "outlier": 9, "output": [8, 9], "partit": [8, 9], "post": [5, 8, 9], "process": [5, 8, 9], "quick": 5, "read": 9, "reddyproc": 8, "refer": 0, "spike": 9, "subpackag": 0, "threshold": 8, "u": 9, "uncertainti": [8, 9], "usag": 5, "user": 9, "ustar": 8, "ustarfilt": 10, "write": [8, 9]}}) \ No newline at end of file diff --git a/docs/html/userguide.html b/docs/html/userguide.html index d07d62d..63bcba1 100644 --- a/docs/html/userguide.html +++ b/docs/html/userguide.html @@ -1,18 +1,17 @@ - - + - + - User Guide — hesseflux 5.1.dev0 documentation - - - - - - + User Guide — hesseflux 5.1.dev2 documentation + + + + + + @@ -20,8 +19,9 @@ + + - @@ -34,52 +34,56 @@
    -

    User Guide¶

    -

    hesseflux collects functions used for processing Eddy covariance data of the -ICOS ecosystem site FR-Hes.

    -

    The post-processing functionality for Eddy flux data is similar to the R-package -REddyProc and includes basically the steps described in Papale et al. -(Biogeosciences, 2006) plus some extensions such as the daytime method of flux -partitioning (Lasslop et al., Global Change Biology 2010) and the estimation -of uncertainties on the fluxes as in Lasslop et al. (Biogeosci, 2008).

    -

    Only the post-processing steps are described here. We are happy to discuss any -processing or post-processing directly. Contact us at mc (at) macu (dot) de.

    +

    User Guide¶

    +

    hesseflux collects functions used for processing Eddy covariance +data of the ICOS ecosystem site FR-Hes.

    +

    The post-processing functionality for Eddy flux data is similar to the +R-package REddyProc and includes basically the steps described in +Papale et al. (Biogeosciences, 2006) plus some extensions such as +the daytime method of flux partitioning (Lasslop et al., Global +Change Biology 2010) and the estimation of uncertainties on the +fluxes as in Lasslop et al. (Biogeosci, 2008).

    +

    Only the post-processing steps are described here. We are happy to +discuss any processing or post-processing directly. Contact us at mc +(at) macu (dot) de.

    -

    europe-fluxdata.eu file format¶

    -

    The first processing steps at the ICOS ecosystem site FR-Hes (not shown) brings -the data in a format that can be submitted to the database -europe-fluxdata.eu. The database predates ICOS and is somewhat a precursor of -the ICOS data processing.

    -

    The file format of europe-fluxdata.eu is hence very similar to the ICOS -format. The only known difference to us is the unit of atmospheric pressure, -which is in hPa in europe-fluxdata.eu and in kPa in ICOS ecosystems. The -file format has notably one header line with variable names. There are no units -in the file. hesseflux provides a little helper script -europe-fluxdata_units.py in the bin directory that adds a second header line -with units. The script can be run on the output as:

    +

    europe-fluxdata.eu file format¶

    +

    The first processing steps at the ICOS ecosystem site FR-Hes (not +shown) brings the data in a format that can be submitted to the +database europe-fluxdata.eu. The database predates ICOS and is +somewhat a precursor of the ICOS data processing.

    +

    The file format of europe-fluxdata.eu is hence very similar to the +ICOS format. The only known difference to us is the unit of +atmospheric pressure, which is in hPa in europe-fluxdata.eu and in +kPa in ICOS ecosystems. The file format has notably one header line +with variable names. There are no units in the file. hesseflux +provides a little helper script europe-fluxdata_units.py in the +bin directory that adds a second header line with units. The script +can be run on the output as:

    python europe-fluxdata_units.py output_file_of_postproc_europe-fluxdata.csv
     
    -

    Post-processing Eddy covariance data¶

    -

    The script postproc_europe-fluxdata.py in the example directory provides a -template for post-processing data that is in the europe-fluxdata.eu file -format. It basically makes all steps described in Papale et al. -(Biogeosciences, 2006). The script is governed by a configuration file in -Python’s standard configparser format. The example configuration file -hesseflux_example.cfg in the example directory is highly commented and -should be (almost) self-explanatory. The script is called like:

    +

    Post-processing Eddy covariance data¶

    +

    The script postproc_europe-fluxdata.py in the example directory +provides a template for post-processing data that is in the +europe-fluxdata.eu file format. It basically makes all steps +described in Papale et al. (Biogeosciences, 2006). The script is +governed by a configuration file in Python’s standard +configparser format. The example configuration file +hesseflux_example.cfg in the example directory is highly commented +and should be (almost) self-explanatory. The script is called like:

    python postproc_europe-fluxdata.py hesseflux_example.cfg
     
    -

    This script should be taken as a template for one’s own post-processing but -includes most standard post-processing steps.

    +

    This script should be taken as a template for one’s own +post-processing but includes most standard post-processing steps.

    Here we describe the main parts of the post-processing script.

    -

    Reading the configuration file¶

    -

    The script postproc_europe-fluxdata.py starts by reading the configuration -file hesseflux_example.cfg:

    +

    Reading the configuration file¶

    +

    The script postproc_europe-fluxdata.py starts by reading the +configuration file hesseflux_example.cfg:

    -

    Read the data¶

    -

    The script would then read in the data. The section in the configuration file is:

    +

    Read the data¶

    +

    The script would then read in the data. The section in the +configuration file is:

    -

    pandas will use the first column as index (index_col=0), assuming that -these are dates (parse_dates=[0]) in the format timeformat, where columns -are separated by sep. The defaults follow the europe-fluxdata.eu format but -similar formats may be used, and script and/or configuration file can be adapted -easily. Only variable names (still) have to follow europe-fluxdata.eu, -ICOS or Ameriflux format at the moment. If the input file has a second +

    pandas will use the first column as index (index_col=0), +assuming that these are dates (parse_dates=[0]) in the format +timeformat, where columns are separated by sep. The defaults +follow the europe-fluxdata.eu format but similar formats may be +used, and script and/or configuration file can be adapted easily. Only +variable names (still) have to follow europe-fluxdata.eu, ICOS +or Ameriflux format at the moment. If the input file has a second header line with units, one can skip it giving skiprows=[1] (not skiprows=1).

    -

    All input files are supposed to be in the same format if inputfile is a -comma-separated list of filenames, and they will be read with the same command -above. The pandas dataframes (df) will simply be appended.

    +

    All input files are supposed to be in the same format if inputfile +is a comma-separated list of filenames, and they will be read with the +same command above. The pandas dataframes (df) will simply be +appended.

    -

    The flag dataframe¶

    -

    All Not-a-Number (NaN) values will be set to undef and will be ignored in the -following.

    -

    This happens via a second dataframe (dff), having the same columns and index -as the input dataframe df, representing quality flags. All cells that have a -value other than 0 in the flag dataframe dff will be ignored in the -dataframe df. This means all cells of df with undef will be set to 2 in -dff immediately:

    +

    The flag dataframe¶

    +

    All Not-a-Number (NaN) values will be set to undef and will be +ignored in the following.

    +

    This happens via a second dataframe (dff), having the same columns +and index as the input dataframe df, representing quality flags. All +cells that have a value other than 0 in the flag dataframe dff +will be ignored in the dataframe df. This means all cells of df +with undef will be set to 2 in dff immediately:

    # NaN -> undef
     df.fillna(undef, inplace=True)
     
    @@ -208,13 +217,14 @@ 

    The flag dataframe -

    Day / night¶

    -

    Most post-processing routines differentiate between daytime and nighttime data. -Papale et al. (Biogeosciences, 2006) use a threshold of 20 W m-2 of -global radiation to distinguish between day and night. REddyProc uses -incoming shortwave radiation greater than 10 W m2 as daytime. The -shortwave radiation threshold swthr (same name as in ReddyProc) can be used to -define the appropriate threshold. The default is 10 W m2. The column +

    Day / night¶

    +

    Most post-processing routines differentiate between daytime and +nighttime data. Papale et al. (Biogeosciences, 2006) use a +threshold of 20 W m-2 of global radiation to distinguish +between day and night. REddyProc uses incoming shortwave radiation +greater than 10 W m2 as daytime. The shortwave radiation +threshold swthr (same name as in ReddyProc) can be used to define +the appropriate threshold. The default is 10 W m2. The column SW_IN_1_1_1 has to exist in the input data.

    # day / night
     isday = df['SW_IN_1_1_1'] > swthr
    @@ -222,9 +232,9 @@ 

    Day / night -

    Data check¶

    -

    postproc_europe-fluxdata.py checks the units of air temperature (i.e. the -first column starting with TA_).

    +

    Data check¶

    +

    postproc_europe-fluxdata.py checks the units of air temperature +(i.e. the first column starting with TA_).

    # Check Ta in Kelvin
     hta = ['TA_']
     hout = _findfirststart(hta, df.columns)
    @@ -235,14 +245,16 @@ 

    Data checkdf.loc[dff[hout[0]]==0, hout[0]] += tkelvin

    -

    _findfirststart(starts, names)() is a helper function that finds the first -occurrence in names that starts with the string starts. This helper function -is used for the moment until hesseflux has the functionality that the user -can give individual variable names.

    -

    The script calculates air vapour pressure deficit VPD_PI_1_1_1 from air -temperature and relative humidity (i.e. the first column starting with RH_) if -not given in input data using the function esat() of pyjams for -saturation vapour pressure:

    +

    _findfirststart(starts, names)() is a helper function that finds +the first occurrence in names that starts with the string +starts. This helper function is used for the moment until +hesseflux has the functionality that the user can give individual +variable names.

    +

    The script calculates air vapour pressure deficit VPD_PI_1_1_1 from +air temperature and relative humidity (i.e. the first column starting +with RH_) if not given in input data using the function +esat() of pyjams for saturation vapour +pressure:

    import numpy as np
     import pyjams as pj
     
    @@ -288,8 +300,8 @@ 

    Data checkdf.loc[dff[hout[0]] == 0, hout[0]] *= vpdpa

    -

    And finally determines the time intervals of the input data dtsec (s) and the -number of time steps per day ntday.

    +

    And finally determines the time intervals of the input data +dtsec (s) and the number of time steps per day ntday.

    # time stepping
     dsec = (df.index[1] - df.index[0]).seconds
     ntday = np.rint(86400 / dsec).astype(int)
    @@ -297,11 +309,12 @@ 

    Data check -

    Spike / outlier flagging¶

    -

    If outlier=True is set in the configuration file, spikes will be detected with -the method given in Papale et al. (Biogeosciences, 2006). A median absolute -deviation (MAD) filter will be used on the second derivatives of the time series -in two-week chunks. The section in hesseflux_example.cfg looks like:

    +

    Spike / outlier flagging¶

    +

    If outlier=True is set in the configuration file, spikes will be +detected with the method given in Papale et al. (Biogeosciences, +2006). A median absolute deviation (MAD) filter will be used on the +second derivatives of the time series in two-week chunks. The section +in hesseflux_example.cfg looks like:

    [POSTMAD]
     # spike / outlier detection, see help(hesseflux.madspikes)
     # scan window in days for spike detection
    @@ -318,26 +331,28 @@ 

    Spike / outlier flaggingderiv = 2

    -

    nfill is the number of days that are treated at once. nfill=1 means that the -time series will be stepped through day by day. nscan are the days to be -considered when calculating the mean absolute deviations. nscan=15 means that -7 days before the fill day, the fill day itself, and 7 days after the fill day -will be used for the robust statistic. However, only spikes detected within the -inner nfill days will be flagged in the nscan days. Spikes will be detected -if they deviate more than z mean absolute deviations from the median.

    -

    For example, nfill=3, nscan=15, and z=7 means that the time series will be -treated in steps of 3 days. Each 3 days, MAD statistics will be calculated using -15 days around the middle of the 3 days. Then all values within the 3 days that -deviate more 7 mean absolute deviations from the median of the 15 days will be -flagged.

    -

    deriv=2 applies the MAD filter to the second derivatives. A spike has -normally a strong curvature and hence a large second derivative. deriv=1 is -currently not implemented. deriv=0 applies the filter to the raw time series. -This might be useful to find outliers in smooth time series such as soil -moisture. deriv=0 is also used on the 20 Hz Eddy raw data in the quality and -uncertainty strategy of Mauder et al. (Agric Forest Meteo, 2013).

    -

    The default values, if options are not given in the configuration file, are -nscan=15, nfill=1, z=7, and deriv=2.

    +

    nfill is the number of days that are treated at once. nfill=1 +means that the time series will be stepped through day by day. nscan +are the days to be considered when calculating the mean absolute +deviations. nscan=15 means that 7 days before the fill day, the fill +day itself, and 7 days after the fill day will be used for the robust +statistic. However, only spikes detected within the inner nfill days +will be flagged in the nscan days. Spikes will be detected if they +deviate more than z mean absolute deviations from the median.

    +

    For example, nfill=3, nscan=15, and z=7 means that the time +series will be treated in steps of 3 days. Each 3 days, MAD statistics +will be calculated using 15 days around the middle of the 3 days. Then +all values within the 3 days that deviate more 7 mean absolute +deviations from the median of the 15 days will be flagged.

    +

    deriv=2 applies the MAD filter to the second derivatives. A spike +has normally a strong curvature and hence a large second +derivative. deriv=1 is currently not implemented. deriv=0 applies +the filter to the raw time series. This might be useful to find +outliers in smooth time series such as soil moisture. deriv=0 is +also used on the 20 Hz Eddy raw data in the quality and uncertainty +strategy of Mauder et al. (Agric Forest Meteo, 2013).

    +

    The default values, if options are not given in the configuration +file, are nscan=15, nfill=1, z=7, and deriv=2.

    postproc_europe-fluxdata.py calls the spike detection like this:

    # assume *_PI variables after raw variables, e.g. LE before LE_PI,
     # if available
    @@ -352,15 +367,15 @@ 

    Spike / outlier flaggingdff.loc[sflag[hh] == 2, hh] = 3

    -

    The function madspikes() returns flag columns for the input variables -where spiked data is flagged as 2. The scripts sets the corresponding columns in -the flag dataframe dff to 3 (3 is used just to keep track where the flag was -set).

    +

    The function madspikes() returns flag +columns for the input variables where spiked data is flagged as 2. The +scripts sets the corresponding columns in the flag dataframe dff to +3 (3 is used just to keep track where the flag was set).

    -

    u* filtering¶

    -

    If ustar=True is set in the configuration file, a u*-filter will be applied -following Papale et al. (Biogeosciences, 2006).

    +

    u* filtering¶

    +

    If ustar=True is set in the configuration file, a u*-filter will be +applied following Papale et al. (Biogeosciences, 2006).

    The section in hesseflux_example.cfg looks like:

    [POSTUSTAR]
     # ustar filtering, see help(hesseflux.ustarfilter)
    @@ -385,24 +400,27 @@ 

    u* filteringapplyustarflag = True

    -

    A minimum threshold ustarmin is defined under which data is flagged by -default. Papale et al. (Biogeosciences, 2006) suggest 0.1 for forests and -0.01 for other land cover types. postproc_europe-fluxdata.py sets 0.01 as its -default value. Uncertainty of the u* threshold is calculated via bootstrapping -in Papale et al. nboot gives the number of bootstrapping for the uncertainty -estimate of the u* threshold. The algorithm divides the input data in 6 -temperature classes and 20 u* classes within each temperature class per season. -It then determines the threshold for each season as the average u* of the u* -class where the average CO2 flux is less than plateaucrit times the average of -all CO2 fluxes with u* greater than the u* class. Papale et al. -(Biogeosciences, 2006) took 6 temperature classes and plateaucrit=0.99, -while REddyProc takes 7 temperature classes and plateaucrit=0.95, which are -also the defaults in hesseflux. Papale et al. (Biogeosciences, 2006) also -used the maximum of the four seasonal u* thresholds as the threshold applied to -all the year. If seasonout=True, the seasonal u* thresholds will be applied -instead of the maximum of four seasonal u* thresholds. One can also set -applyustarflag=False to just calculate the u* thresholds without applying them -to experiment with different parameter values.

    +

    A minimum threshold ustarmin is defined under which data is flagged +by default. Papale et al. (Biogeosciences, 2006) suggest 0.1 for +forests and 0.01 for other land cover +types. postproc_europe-fluxdata.py sets 0.01 as its default +value. Uncertainty of the u* threshold is calculated via bootstrapping +in Papale et al. nboot gives the number of bootstrapping for the +uncertainty estimate of the u* threshold. The algorithm divides the +input data in 6 temperature classes and 20 u* classes within each +temperature class per season. It then determines the threshold for +each season as the average u* of the u* class where the average CO2 +flux is less than plateaucrit times the average of all CO2 fluxes +with u* greater than the u* class. Papale et al. (Biogeosciences, +2006) took 6 temperature classes and plateaucrit=0.99, while +REddyProc takes 7 temperature classes and plateaucrit=0.95, which +are also the defaults in hesseflux. Papale et +al. (Biogeosciences, 2006) also used the maximum of the four +seasonal u* thresholds as the threshold applied to all the year. If +seasonout=True, the seasonal u* thresholds will be applied instead +of the maximum of four seasonal u* thresholds. One can also set +applyustarflag=False to just calculate the u* thresholds without +applying them to experiment with different parameter values.

    The u*-filtering is then performed as:

    hfilt = ['NEE', 'USTAR', 'TA_']
     hout = _findfirststart(hfilt, df.columns)
    @@ -435,27 +453,29 @@ 

    u* filteringdff.loc[flag == 2, hh] = 5

    -

    The function ustarfilter() returns the u* 5, 50 and 95 percentiles of the -bootstrapped u* thresholds as well as flag columns, which is 0 except where u* -is smaller than the median u*-threshold. The scripts sets the columns of the -Eddy fluxes in the flag dataframe dff to 5 (5 to keep track where the flag was -set).

    -

    One might not want to do u* filtering, but use for example Integral Turbulence -Characteristics (ITC) that were calculated, for example, with -EddyPro(R). These should be set right at the start after reading the input -data into the dataframe df and producing the flag dataframe dff like:

    +

    The function ustarfilter() returns the u* +5, 50 and 95 percentiles of the bootstrapped u* thresholds as well as +flag columns, which is 0 except where u* is smaller than the median +u*-threshold. The scripts sets the columns of the Eddy fluxes in the +flag dataframe dff to 5 (5 to keep track where the flag was set).

    +

    One might not want to do u* filtering, but use for example Integral +Turbulence Characteristics (ITC) that were calculated, for example, +with EddyPro(R). These should be set right at the start +after reading the input data into the dataframe df and producing the +flag dataframe dff like:

    dff.loc[df['FC_SSITC_TEST_1_1_1']>0, 'FC_1_1_1'] = 2
     
    -

    Partitioning of Net Ecosystem Exchange¶

    -

    If partition=True is set in the configuration file, two estimates of Gross -Primary Productivity (GPP) and Ecosystem Respiration (RECO) are calculated: -firstly with the method of Reichstein et al. (Glob Change Biolo, 2005) using -nighttime data only, and secondly with the method of Lasslop et al. (Glob -Change Biolo, 2010) using a light-response curve on ‘daytime’ data. The -configuration hesseflux_example.cfg gives only one option in this section:

    +

    Partitioning of Net Ecosystem Exchange¶

    +

    If partition=True is set in the configuration file, two estimates of +Gross Primary Productivity (GPP) and Ecosystem Respiration (RECO) are +calculated: firstly with the method of Reichstein et al. (Glob Change +Biolo, 2005) using nighttime data only, and secondly with the method +of Lasslop et al. (Glob Change Biolo, 2010) using a light-response +curve on ‘daytime’ data. The configuration hesseflux_example.cfg +gives only one option in this section:

    [POSTPARTITION]
     # partitioning, see help(hesseflux.nee2gpp)
     # if True, set GPP=0 at night
    @@ -463,11 +483,11 @@ 

    Partitioning of Net Ecosystem Exchangenogppnight = False

    -

    Many people find it unaesthetic that the ‘daytime’ method gives negative GPP at -night. We esteem this the correct behaviour, reflecting the uncertainty in the -gross flux estimates. However, one can set nogppnight=True to set GPP=0 at -night and RECO=NEE in this case, the latter having then all variability of the -net fluxes.

    +

    Many people find it unaesthetic that the ‘daytime’ method gives +negative GPP at night. We esteem this the correct behaviour, +reflecting the uncertainty in the gross flux estimates. However, one +can set nogppnight=True to set GPP=0 at night and RECO=NEE in this +case, the latter having then all variability of the net fluxes.

    The partitioning is calculated as:

    hpart = ['NEE', 'SW_IN', 'TA_', 'VPD']
     hout = _findfirststart(hpart, df.columns)
    @@ -505,11 +525,12 @@ 

    Partitioning of Net Ecosystem Exchange -

    Gap-filling / Imputation¶

    -

    Marginal Distribution Sampling (MDS) of Reichstein et al. (Glob Change Biolo, -2005) is implemented as imputation or so-called gap-filling algorithm. The -algorithm looks for similar conditions in the vicinity of a missing data point, -if option fill=True. The configuration file is:

    +

    Gap-filling / Imputation¶

    +

    Marginal Distribution Sampling (MDS) of Reichstein et al. (Glob +Change Biolo, 2005) is implemented as imputation or so-called +gap-filling algorithm. The algorithm looks for similar conditions in +the vicinity of a missing data point, if option fill=True. The +configuration file is:

    [POSTGAP]
     # gap-filling with MDS, see help(hesseflux.gapfill)
     # max deviation of SW_IN
    @@ -525,12 +546,13 @@ 

    Gap-filling / Imputationlonggap = 60

    -

    If a flux data point is missing, times with incoming shortwave radiation in the -range of sw_dev around the actual shortwave radiation will be looked for, as -well as air temperatures within ta_dev and air vapour pressure deficit within -vpd_dev. The mean of flux values at the similar conditions is then taken as -fill value. The function does not fill long gaps longer than longgap days. A -good summary is given in Fig. A1 of Reichstein et al. (Glob Change Biolo, +

    If a flux data point is missing, times with incoming shortwave +radiation in the range of sw_dev around the actual shortwave +radiation will be looked for, as well as air temperatures within +ta_dev and air vapour pressure deficit within vpd_dev. The mean of +flux values at the similar conditions is then taken as fill value. The +function does not fill long gaps longer than longgap days. A good +summary is given in Fig. A1 of Reichstein et al. (Glob Change Biolo, 2005).

    The script invokes MDS as:

    hfill = ['SW_IN', 'TA_', 'VPD']
    @@ -564,19 +586,22 @@ 

    Gap-filling / Imputationdff = pd.concat([dff, dff_f], axis=1)

    -

    The function gapfill() returns the filled columns df_f as well as flag -columns dff_f indicating fill quality. Fill quality A-C of Reichstein et al. -(Glob Change Biolo, 2005) are translated to quality flags 1-3.

    +

    The function gapfill() returns the filled +columns df_f as well as flag columns dff_f indicating fill +quality. Fill quality A-C of Reichstein et al. (Glob Change Biolo, +2005) are translated to quality flags 1-3.

    -

    Uncertainty estimates of flux data¶

    +

    Uncertainty estimates of flux data¶

    Lasslop et al. (Biogeosci, 2008) presented an algorithm to estimate -uncertainties of Eddy covariance fluxes using Marginal Distribution Sampling -(MDS). The gap-filling function gapfill() can be used for uncertainty -estimation giving the keyword err=True. The same thresholds as for gap-filling -are used.

    -

    The script postproc_europe-fluxdata.py uses the function gapfill() to -calculate flux uncertainties like:

    +uncertainties of Eddy covariance fluxes using Marginal Distribution +Sampling (MDS). The gap-filling function +gapfill() can be used for uncertainty +estimation giving the keyword err=True. The same thresholds as for +gap-filling are used.

    +

    The script postproc_europe-fluxdata.py uses the function +gapfill() to calculate flux uncertainties +like:

    hfill = ['SW_IN', 'TA_', 'VPD']
     hout = _findfirststart(hfill, df.columns)
     assert len(hout) == 3, 'Could not find SW_IN, TA or VPD in input file.'
    @@ -613,15 +638,15 @@ 

    Uncertainty estimates of flux datadff[colout[cc]] = dff[colin[cc]]

    -

    We recommend, however, to calculate flux uncertainties with the Eddy covariance -raw data as described in Mauder et al. (Agric Forest Meteo, 2013). This is -for example implemented in the processing softwares EddyPro(R) or -TK3.

    +

    We recommend, however, to calculate flux uncertainties with the Eddy +covariance raw data as described in Mauder et al. (Agric Forest +Meteo, 2013). This is for example implemented in the processing +softwares EddyPro(R) or TK3.

    -

    Writing the output file¶

    -

    The dataframe is written to the output file with pandas -pandas.Dataframe.to_csv():

    +

    Writing the output file¶

    +

    The dataframe is written to the output file with pandas +to_csv() method:

    df.to_csv(outputfile, sep=sep, na_rep=str(undef), index=True,
               date_format=timeformat)
     
    @@ -641,13 +666,15 @@

    Writing the output fileoutflagcols = False

    -

    If outputfile is missing or empty, the script will try to open a GUI, where -one can choose an output directory and the filename will then be name of the -configuration file with the suffix ‘.csv’. If outundef=True then all values in -df with a flag value in dff greater than zero will be set to undef. The -script can also add flag columns, prefixed with flag_, for each column in -df, if outflagcols=True. The script will always output the columns with the -flags for fill quality if gap-filling was performed: option fill=True.

    +

    If outputfile is missing or empty, the script will try to open a +GUI, where one can choose an output directory and the filename will +then be name of the configuration file with the suffix ‘.csv’. If +outundef=True then all values in df with a flag value in dff +greater than zero will be set to undef. The script can also add flag +columns, prefixed with flag_, for each column in df, if +outflagcols=True. The script will always output the columns with the +flags for fill quality if gap-filling was performed: option +fill=True.

    The whole code to write the output file is: