diff --git a/.codespellrc b/.codespellrc
index a11a40ac..439f6ea7 100644
--- a/.codespellrc
+++ b/.codespellrc
@@ -1,5 +1,5 @@
 [codespell]
-skip = *.nc,*.ipynb,./local_work,./float_source,./binder,./.github,*.log,./.git,./docs/_build,./docs/_static,./argopy/tests/test_data,./build,./docs/mycache_folder,./docs/examples/cache_bgc
+skip = *.nc,*.ipynb,./local_work,./float_source,./binder,./.github,*.log,./.git,./docs/_build,./docs/_static,./argopy/tests/test_data,./build,./docs/mycache_folder,./docs/examples/cache_bgc,./argopy/static/assets/*.json
 count =
 quiet-level = 3
 ignore-words-list = PRES, pres, idel
\ No newline at end of file
diff --git a/.flake8 b/.flake8
index c9f9129c..845125b6 100644
--- a/.flake8
+++ b/.flake8
@@ -10,7 +10,8 @@ ignore =
     E501,
     # line break before binary operator
     W503
-
+    # whitespace before ':' (https://black.readthedocs.io/en/stable/guides/using_black_with_other_tools.html#e203)
+    E203
 exclude =
     # No need to traverse our git directory
     .git,
diff --git a/.github/workflows/pytests-upstream.yml b/.github/workflows/pytests-upstream.yml
index d98bc65f..7cde501c 100644
--- a/.github/workflows/pytests-upstream.yml
+++ b/.github/workflows/pytests-upstream.yml
@@ -74,7 +74,7 @@ jobs:
     strategy:
       fail-fast: true
       matrix:
-        python-version: ["3.8", "3.9"]
+        python-version: ["3.9", "3.10"]
         os: ["ubuntu-latest", "macos-latest", "windows-latest"]
 
     steps:
@@ -200,7 +200,7 @@ jobs:
     strategy:
       fail-fast: true
       matrix:
-        python-version: ["3.8", "3.9"]
+        python-version: ["3.9", "3.10"]
         os: ["ubuntu-latest", "macos-latest", "windows-latest"]
 
     steps:
diff --git a/.github/workflows/pytests.yml b/.github/workflows/pytests.yml
index 43d77dec..23afc8f5 100644
--- a/.github/workflows/pytests.yml
+++ b/.github/workflows/pytests.yml
@@ -52,7 +52,7 @@ jobs:
       max-parallel: 12
       fail-fast: false
       matrix:
-        python-version: ["3.8", "3.9"]
+        python-version: ["3.9", "3.10"]
         os: ["ubuntu-latest", "windows-latest", "macos-latest"]
         experimental: [false]
 
@@ -174,7 +174,7 @@ jobs:
       max-parallel: 12
       fail-fast: false
       matrix:
-        python-version: ["3.8", "3.9"]
+        python-version: ["3.9", "3.10"]
         os: ["ubuntu-latest", "macos-latest", "windows-latest"]
         experimental: [false]
 
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index b71b0dde..905f2f94 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -6,7 +6,7 @@ In the interest of fostering an open and welcoming environment, we as
 contributors and maintainers pledge to making participation in our project and
 our community a harassment-free experience for everyone, regardless of age, body
 size, disability, ethnicity, sex characteristics, gender identity and expression,
-level of experience, education, socio-economic status, nationality, personal
+level of experience, education, socioeconomic status, nationality, personal
 appearance, race, religion, or sexual identity and orientation.
 
 ## Our Standards
diff --git a/argopy/__init__.py b/argopy/__init__.py
index 3eef4a01..824dbf70 100644
--- a/argopy/__init__.py
+++ b/argopy/__init__.py
@@ -29,7 +29,6 @@
 
 # Other Import
 # from . import utils  # noqa: E402
-from . import utilities  # noqa: E402  # being deprecated until 0.1.15, then remove
 from . import stores  # noqa: E402
 from . import errors  # noqa: E402
 from . import plot  # noqa: E402
@@ -72,7 +71,6 @@
     "ArgoDOI",  # Class
 
     # Submodules:
-    "utilities",  # being deprecated until 0.1.15, then remove
     # "utils",
     "errors",
     "plot",
diff --git a/argopy/data_fetchers/argovis_data.py b/argopy/data_fetchers/argovis_data.py
index 0fc4b6b9..a8362b0e 100644
--- a/argopy/data_fetchers/argovis_data.py
+++ b/argopy/data_fetchers/argovis_data.py
@@ -1,9 +1,3 @@
-#!/bin/env python
-# -*coding: UTF-8 -*-
-#
-# Argo data fetcher for Argovis.
-#
-
 import numpy as np
 import pandas as pd
 import xarray as xr
@@ -23,7 +17,7 @@
 access_points = ["wmo", "box"]
 exit_formats = ["xarray"]
 dataset_ids = ["phy"]  # First is default
-api_server = "https://argovis-api.colorado.edu" 
+api_server = "https://argovis-api.colorado.edu"
 api_server_check = "https://argovis-api.colorado.edu/ping"
 
 log = logging.getLogger("argopy.argovis.data")
@@ -58,7 +52,7 @@ def __init__(
         chunks: str = "auto",
         chunks_maxsize: dict = {},
         api_timeout: int = 0,
-        **kwargs
+        **kwargs,
     ):
         """Instantiate an Argovis Argo data loader
 
@@ -95,7 +89,7 @@ def __init__(
             "cachedir": cachedir,
             "timeout": timeout,
             # "size_policy": "head",  # deprecated
-            "client_kwargs": {"headers": {'x-argokey': OPTIONS['argovis_api_key']}},
+            "client_kwargs": {"headers": {"x-argokey": OPTIONS["argovis_api_key"]}},
         }
         self.fs = kwargs["fs"] if "fs" in kwargs else httpstore(**self.store_opts)
 
@@ -134,9 +128,12 @@ def __repr__(self):
         summary = ["<datafetcher.argovis>"]
         summary.append("Name: %s" % self.definition)
         summary.append("API: %s" % api_server)
-        api_key = self.fs.fs.client_kwargs['headers']['x-argokey']
-        if api_key == DEFAULT['argovis_api_key']:
-            summary.append("API KEY: '%s' (get a free key at https://argovis-keygen.colorado.edu)" % api_key)
+        api_key = self.fs.fs.client_kwargs["headers"]["x-argokey"]
+        if api_key == DEFAULT["argovis_api_key"]:
+            summary.append(
+                "API KEY: '%s' (get a free key at https://argovis-keygen.colorado.edu)"
+                % api_key
+            )
         else:
             summary.append("API KEY: '%s'" % api_key)
         summary.append("Domain: %s" % format_oneline(self.cname()))
@@ -286,24 +283,32 @@ def json2dataframe(self, profiles):
         for profile in data:
             # construct metadata dictionary that will be repeated for each level
             metadict = {
-                'date': profile['timestamp'],
-                'date_qc': profile['timestamp_argoqc'],
-                'lat': profile['geolocation']['coordinates'][1],
-                'lon': profile['geolocation']['coordinates'][0],
-                'cycle_number': profile['cycle_number'],
-                'DATA_MODE': profile['data_info'][2][0][1],
-                'DIRECTION': profile['profile_direction'],
-                'platform_number': profile['_id'].split('_')[0],
-                'position_qc': profile['geolocation_argoqc'],
-                'index': 0
+                "date": profile["timestamp"],
+                "date_qc": profile["timestamp_argoqc"],
+                "lat": profile["geolocation"]["coordinates"][1],
+                "lon": profile["geolocation"]["coordinates"][0],
+                "cycle_number": profile["cycle_number"],
+                "DATA_MODE": profile["data_info"][2][0][1],
+                "DIRECTION": profile["profile_direction"],
+                "platform_number": profile["_id"].split("_")[0],
+                "position_qc": profile["geolocation_argoqc"],
+                "index": 0,
             }
             # construct a row for each level in the profile
-            for i in range(len(profile['data'][profile['data_info'][0].index('pressure')])):
+            for i in range(
+                len(profile["data"][profile["data_info"][0].index("pressure")])
+            ):
                 row = {
-                    'temp': profile['data'][profile['data_info'][0].index('temperature')][i],
-                    'pres': profile['data'][profile['data_info'][0].index('pressure')][i],
-                    'psal': profile['data'][profile['data_info'][0].index('salinity')][i],
-                    **metadict
+                    "temp": profile["data"][
+                        profile["data_info"][0].index("temperature")
+                    ][i],
+                    "pres": profile["data"][profile["data_info"][0].index("pressure")][
+                        i
+                    ],
+                    "psal": profile["data"][profile["data_info"][0].index("salinity")][
+                        i
+                    ],
+                    **metadict,
                 }
                 rows.append(row)
         df = pd.DataFrame(rows)
@@ -375,8 +380,8 @@ def to_xarray(self, errors: str = "ignore"):
         ds.attrs["Fetched_from"] = self.server
         try:
             ds.attrs["Fetched_by"] = getpass.getuser()
-        except:
-            ds.attrs["Fetched_by"] = 'anonymous'
+        except:  # noqa: E722
+            ds.attrs["Fetched_by"] = "anonymous"
         ds.attrs["Fetched_date"] = pd.to_datetime("now", utc=True).strftime("%Y/%m/%d")
         ds.attrs["Fetched_constraints"] = self.cname()
         ds.attrs["Fetched_uri"] = self.uri
@@ -435,9 +440,9 @@ def init(self, WMO=[], CYC=None, **kwargs):
     def get_url(self, wmo: int, cyc: int = None) -> str:
         """Return path toward the source file of a given wmo/cyc pair"""
         if cyc is None:
-            return  f'{self.server}/argo?platform={str(wmo)}&data=pressure,temperature,salinity'
+            return f"{self.server}/argo?platform={str(wmo)}&data=pressure,temperature,salinity"
         else:
-            return f'{self.server}/argo?id={str(wmo)}_{str(cyc).zfill(3)}&data=pressure,temperature,salinity'
+            return f"{self.server}/argo?id={str(wmo)}_{str(cyc).zfill(3)}&data=pressure,temperature,salinity"
 
     @property
     def uri(self):
@@ -488,10 +493,7 @@ def init(self, box: list, **kwargs):
 
     def get_url(self):
         """Return the URL used to download data"""
-        shape = [
-                    [self.BOX[0], self.BOX[2]],  # ll
-                    [self.BOX[1], self.BOX[3]]  # ur
-                ]
+        shape = [[self.BOX[0], self.BOX[2]], [self.BOX[1], self.BOX[3]]]  # ll  # ur
         strShape = str(shape).replace(" ", "")
         url = self.server + "/argo?data=pressure,temperature,salinity&box=" + strShape
         url += "&startDate={}".format(
@@ -558,4 +560,4 @@ def uri(self):
             for box in boxes:
                 urls.append(Fetch_box(box=box, ds=self.dataset_id).get_url())
 
-        return self.url_encode(urls)
\ No newline at end of file
+        return self.url_encode(urls)
diff --git a/argopy/data_fetchers/erddap_data.py b/argopy/data_fetchers/erddap_data.py
index 8300c39a..c0b27980 100644
--- a/argopy/data_fetchers/erddap_data.py
+++ b/argopy/data_fetchers/erddap_data.py
@@ -27,8 +27,10 @@
 from ..utils.format import format_oneline
 from ..stores import httpstore
 from ..errors import ErddapServerError, DataNotFound
-from ..stores import indexstore_pd as ArgoIndex  # make sure we work with the Pandas index store
-from ..utils import is_list_of_strings, to_list,Chunker
+from ..stores import (
+    indexstore_pd as ArgoIndex,
+)  # make sure we work with the Pandas index store
+from ..utils import is_list_of_strings, to_list, Chunker
 from .proto import ArgoDataFetcherProto
 
 
@@ -159,11 +161,15 @@ def __init__(  # noqa: C901
             # This will be used to:
             # - retrieve the list of BGC variables to ask the erddap server
             # - get <param>_data_mode information because we can't get it from the server
-            self.indexfs = kwargs['indexfs'] if 'indexfs' in kwargs else ArgoIndex(
-                index_file='argo_synthetic-profile_index.txt',  # the only available in the erddap
-                cache=kwargs['cache_index'] if 'cache_index' in kwargs else cache,
-                cachedir=cachedir,
-                timeout=timeout,
+            self.indexfs = (
+                kwargs["indexfs"]
+                if "indexfs" in kwargs
+                else ArgoIndex(
+                    index_file="argo_synthetic-profile_index.txt",  # the only available in the erddap
+                    cache=kwargs["cache_index"] if "cache_index" in kwargs else cache,
+                    cachedir=cachedir,
+                    timeout=timeout,
+                )
             )
 
             # To handle bugs in the erddap server, we need the list of parameters on the server:
@@ -615,7 +621,9 @@ def getNfromncHeader(url):
         return N
 
 
-    def post_process(self, this_ds, add_dm: bool = True, URI: list = None):  # noqa: C901
+    def post_process(
+        self, this_ds, add_dm: bool = True, URI: list = None
+    ):  # noqa: C901
         """Post-process a xarray.DataSet created from a netcdf erddap response
 
         This method can also be applied on a regular dataset to re-enforce format compliance
@@ -667,8 +675,8 @@ def post_process(self, this_ds, add_dm: bool = True, URI: list = None):  # noqa:
 
         # In the case of a parallel download, this is a trick to preserve the chunk uri in the chunk dataset:
         # (otherwise all chunks have the same list of uri)
-        Fetched_url = this_ds.attrs.get('Fetched_url', False)
-        Fetched_constraints = this_ds.attrs.get('Fetched_constraints', False)
+        Fetched_url = this_ds.attrs.get("Fetched_url", False)
+        Fetched_constraints = this_ds.attrs.get("Fetched_constraints", False)
 
         # Finally overwrite erddap attributes with those from argopy:
         raw_attrs = this_ds.attrs.copy()
@@ -691,12 +699,14 @@ def post_process(self, this_ds, add_dm: bool = True, URI: list = None):  # noqa:
         this_ds.attrs["Fetched_from"] = self.erddap.server
         try:
             this_ds.attrs["Fetched_by"] = getpass.getuser()
-        except:
-            this_ds.attrs["Fetched_by"] = 'anonymous'
+        except:  # noqa: E722
+            this_ds.attrs["Fetched_by"] = "anonymous"
         this_ds.attrs["Fetched_date"] = pd.to_datetime("now", utc=True).strftime(
             "%Y/%m/%d"
         )
-        this_ds.attrs["Fetched_constraints"] = self.cname() if not Fetched_constraints else Fetched_constraints
+        this_ds.attrs["Fetched_constraints"] = (
+            self.cname() if not Fetched_constraints else Fetched_constraints
+        )
         this_ds.attrs["Fetched_uri"] = URI if not Fetched_url else Fetched_url
         this_ds = this_ds[np.sort(this_ds.data_vars)]
 
diff --git a/argopy/data_fetchers/gdacftp_data.py b/argopy/data_fetchers/gdacftp_data.py
index aae219d7..10442340 100644
--- a/argopy/data_fetchers/gdacftp_data.py
+++ b/argopy/data_fetchers/gdacftp_data.py
@@ -283,7 +283,7 @@ def _preprocess_multiprof(self, ds):
         ds.attrs["Fetched_from"] = self.server
         try:
             ds.attrs["Fetched_by"] = getpass.getuser()
-        except:
+        except:  # noqa: E722
             ds.attrs["Fetched_by"] = 'anonymous'
         ds.attrs["Fetched_date"] = pd.to_datetime("now", utc=True).strftime("%Y/%m/%d")
         ds.attrs["Fetched_constraints"] = self.cname()
@@ -352,7 +352,7 @@ def to_xarray(self, errors: str = "ignore"):
         ds.attrs["Fetched_from"] = self.server
         try:
             ds.attrs["Fetched_by"] = getpass.getuser()
-        except:
+        except:  # noqa: E722
             ds.attrs["Fetched_by"] = 'anonymous'
         ds.attrs["Fetched_date"] = pd.to_datetime("now", utc=True).strftime("%Y/%m/%d")
         ds.attrs["Fetched_constraints"] = self.cname()
diff --git a/argopy/plot/plot.py b/argopy/plot/plot.py
index 8db1b22e..fd5486ca 100644
--- a/argopy/plot/plot.py
+++ b/argopy/plot/plot.py
@@ -43,34 +43,39 @@
 log = logging.getLogger("argopy.plot.plot")
 
 
-def open_sat_altim_report(WMO: Union[str, list] = None, embed: Union[str, None] = "dropdown", **kwargs):
-    """ Insert the CLS Satellite Altimeter Report figure in notebook cell
+def open_sat_altim_report(
+    WMO: Union[str, list] = None, embed: Union[str, None] = "dropdown", **kwargs
+):
+    """Insert the CLS Satellite Altimeter Report figure in notebook cell
 
-        This is the method called when using the facade fetcher methods ``plot``::
+    This is the method called when using the facade fetcher methods ``plot``::
 
-            DataFetcher().float(6902745).plot('qc_altimetry')
+        DataFetcher().float(6902745).plot('qc_altimetry')
 
-        Parameters
-        ----------
-        WMO: int or list
-            The float WMO to display. By default, this is set to None and will insert the general dashboard.
-        embed: str, default='dropdown'
-            Set the embedding method. If set to None, simply return the list of urls to figures.
-            Possible values are: ``dropdown``, ``slide`` and ``list``.
+    Parameters
+    ----------
+    WMO: int or list
+        The float WMO to display. By default, this is set to None and will insert the general dashboard.
+    embed: str, default='dropdown'
+        Set the embedding method. If set to None, simply return the list of urls to figures.
+        Possible values are: ``dropdown``, ``slide`` and ``list``.
 
-        Returns
-        -------
-        list of Image with ``list`` embed or a dict with URLs
+    Returns
+    -------
+    list of Image with ``list`` embed or a dict with URLs
 
-        Notes
-        -----
-        Requires IPython to work as expected. If IPython is not available only URLs are returned.
+    Notes
+    -----
+    Requires IPython to work as expected. If IPython is not available only URLs are returned.
 
     """
-    warnUnless(has_ipython, "requires IPython to work as expected, only URLs are returned otherwise")
+    warnUnless(
+        has_ipython,
+        "requires IPython to work as expected, only URLs are returned otherwise",
+    )
 
-    if 'api_server' in kwargs:
-        api_server = kwargs['api_server']
+    if "api_server" in kwargs:
+        api_server = kwargs["api_server"]
     else:
         api_server = "https://data-argo.ifremer.fr"
 
@@ -79,7 +84,10 @@ def open_sat_altim_report(WMO: Union[str, list] = None, embed: Union[str, None]
     urls = []
     urls_dict = {}
     for this_wmo in WMOs:
-        url = "%s/etc/argo-ast9-item13-AltimeterComparison/figures/%i.png" % (api_server, this_wmo)
+        url = "%s/etc/argo-ast9-item13-AltimeterComparison/figures/%i.png" % (
+            api_server,
+            this_wmo,
+        )
         log.debug(url)
         if has_ipython and embed == "list":
             urls.append(Image(url, embed=True))
@@ -90,19 +98,25 @@ def open_sat_altim_report(WMO: Union[str, list] = None, embed: Union[str, None]
     # Prepare rendering:
     if has_ipython and embed is not None:
         if has_ipywidgets and embed == "dropdown":
+
             def f(Float):
                 return Image(url=urls_dict[int(Float)])
+
             return ipywidgets.interact(f, Float=[str(wmo) for wmo in WMOs])
         elif has_ipywidgets and embed == "slide":
+
             def f(Float):
                 return Image(url=urls[Float])
+
             return ipywidgets.interact(
                 f, Float=ipywidgets.IntSlider(min=0, max=len(urls) - 1, step=1)
             )
         elif embed == "list":
             return display(*urls)
         else:
-            raise ValueError("Invalid value for 'embed' argument. Must be: 'dropdown', 'slide', 'list' or None")
+            raise ValueError(
+                "Invalid value for 'embed' argument. Must be: 'dropdown', 'slide', 'list' or None"
+            )
     else:
         return urls_dict
 
@@ -117,7 +131,7 @@ def plot_trajectory(
     with_seaborn: bool = has_seaborn,
     **kwargs
 ):
-    """ Plot trajectories for an Argo index dataframe
+    """Plot trajectories for an Argo index dataframe
 
     This function is called by the Data and Index fetchers method 'plot' with the 'trajectory' option::
 
@@ -161,9 +175,18 @@ def plot_trajectory(
         # Set up the figure and axis:
         defaults = {"figsize": (10, 6), "dpi": 90}
         if with_cartopy:
-            opts = {**defaults, **{'x': 'longitude', 'y': 'latitude', 'hue': 'wmo',
-                                   'traj': True, 'legend': add_legend, 'set_global': set_global,
-                                   'cmap': palette}}
+            opts = {
+                **defaults,
+                **{
+                    "x": "longitude",
+                    "y": "latitude",
+                    "hue": "wmo",
+                    "traj": True,
+                    "legend": add_legend,
+                    "set_global": set_global,
+                    "cmap": palette,
+                },
+            }
             opts = {**opts, **kwargs}
             return scatter_map(df, **opts)
         else:
@@ -238,7 +261,7 @@ def bar_plot(
     with_seaborn: bool = has_seaborn,
     **kwargs
 ):
-    """ Create a bar plot for an Argo index dataframe
+    """Create a bar plot for an Argo index dataframe
 
 
     This is the method called when using the facade fetcher methods ``plot`` with the ``dac`` or ``profiler`` arguments::
@@ -284,24 +307,24 @@ def bar_plot(
 
 
 def scatter_map(  # noqa: C901
-        data: Union[xr.Dataset, pd.core.frame.DataFrame],
-        x: Union[str] = None,
-        y: Union[str] = None,
-        hue: Union[str] = None,
-        markersize: int = 36,
-        markeredgesize: float = 0.5,
-        markeredgecolor: str = 'default',
-        cmap: Union[str] = None,
-        traj: bool = True,
-        traj_axis: Union[str] = None,
-        traj_color: str = 'default',
-        legend: bool = True,
-        legend_title: str = 'default',
-        legend_location: Union[str, int] = 0,
-        cbar: bool = False,
-        cbarlabels: Union[str, list] = 'auto',
-        set_global: bool = False,
-        **kwargs
+    data: Union[xr.Dataset, pd.core.frame.DataFrame],
+    x: Union[str] = None,
+    y: Union[str] = None,
+    hue: Union[str] = None,
+    markersize: int = 36,
+    markeredgesize: float = 0.5,
+    markeredgecolor: str = "default",
+    cmap: Union[str] = None,
+    traj: bool = True,
+    traj_axis: Union[str] = None,
+    traj_color: str = "default",
+    legend: bool = True,
+    legend_title: str = "default",
+    legend_location: Union[str, int] = 0,
+    cbar: bool = False,
+    cbarlabels: Union[str, list] = "auto",
+    set_global: bool = False,
+    **kwargs
 ):
     """Try-to-be generic function to create a scatter plot on a map from **argopy** :class:`xarray.Dataset` or :class:`pandas.DataFrame` data
 
@@ -393,40 +416,48 @@ def scatter_map(  # noqa: C901
 
     if isinstance(data, xr.Dataset) and data.argo._type == "point":
         # data = data.argo.point2profile(drop=True)
-        raise InvalidDatasetStructure('Function only available to a collection of profiles')
+        raise InvalidDatasetStructure(
+            "Function only available to a collection of profiles"
+        )
 
     # Try to guess the default hue, i.e. name for WMO:
     def guess_trajvar(data):
-        for v in ['WMO', 'PLATFORM_NUMBER']:
+        for v in ["WMO", "PLATFORM_NUMBER"]:
             if v.lower() in data:
                 return v.lower()
             if v.upper() in data:
                 return v.upper()
-        raise ValueError("Can't guess the variable name for default hue/trajectory grouping (WMO)")
+        raise ValueError(
+            "Can't guess the variable name for default hue/trajectory grouping (WMO)"
+        )
+
     hue = guess_trajvar(data) if hue is None else hue
 
     if isinstance(data, xr.Dataset) and data.argo.N_LEVELS > 1:
-        warnings.warn("More than one N_LEVELS found in this dataset, scatter_map will use the first level only")
+        warnings.warn(
+            "More than one N_LEVELS found in this dataset, scatter_map will use the first level only"
+        )
         data = data.isel(N_LEVELS=0)
 
     # Try to guess the colormap to use as a function of the 'hue' variable:
     def guess_cmap(hue):
         if hue.lower() in ArgoColors().list_valid_known_colormaps:
             cmap = hue.lower()
-        elif 'qc' in hue.lower():
-            cmap = 'qc'
-        elif 'mode' in hue.lower():
-            cmap = 'data_mode'
-        elif 'status_code' in hue.lower():
-            cmap = 'deployment_status'
+        elif "qc" in hue.lower():
+            cmap = "qc"
+        elif "mode" in hue.lower():
+            cmap = "data_mode"
+        elif "status_code" in hue.lower():
+            cmap = "deployment_status"
         else:
-            cmap = STYLE['palette']
+            cmap = STYLE["palette"]
         return cmap
+
     cmap = guess_cmap(hue) if cmap is None else cmap
 
     # Try to guess the x and y variables:
     def guess_xvar(data):
-        for v in ['lon', 'long', 'longitude', 'x']:
+        for v in ["lon", "long", "longitude", "x"]:
             if v.lower() in data:
                 return v.lower()
             if v.upper() in data:
@@ -434,15 +465,18 @@ def guess_xvar(data):
 
         if isinstance(data, xr.Dataset):
             for v in data.coords:
-                if '_CoordinateAxisType' in data[v].attrs and data[v].attrs['_CoordinateAxisType'] == 'Lon':
+                if (
+                    "_CoordinateAxisType" in data[v].attrs
+                    and data[v].attrs["_CoordinateAxisType"] == "Lon"
+                ):
                     return v
-                if 'axis' in data[v].attrs and data[v].attrs['axis'] == 'X':
+                if "axis" in data[v].attrs and data[v].attrs["axis"] == "X":
                     return v
 
         raise ValueError("Can't guess the variable name for longitudes")
 
     def guess_yvar(data):
-        for v in ['lat', 'lati', 'latitude', 'y']:
+        for v in ["lat", "lati", "latitude", "y"]:
             if v.lower() in data:
                 return v.lower()
             if v.upper() in data:
@@ -450,28 +484,36 @@ def guess_yvar(data):
 
         if isinstance(data, xr.Dataset):
             for v in data.coords:
-                if '_CoordinateAxisType' in data[v].attrs and data[v].attrs['_CoordinateAxisType'] == 'Lat':
+                if (
+                    "_CoordinateAxisType" in data[v].attrs
+                    and data[v].attrs["_CoordinateAxisType"] == "Lat"
+                ):
                     return v
-                if 'axis' in data[v].attrs and data[v].attrs['axis'] == 'Y':
+                if "axis" in data[v].attrs and data[v].attrs["axis"] == "Y":
                     return v
 
         raise ValueError("Can't guess the variable name for latitudes")
+
     x = guess_xvar(data) if x is None else x
     y = guess_yvar(data) if y is None else y
 
     # Adjust legend title:
-    if legend_title == 'default':
+    if legend_title == "default":
         legend_title = str(hue)
 
     # Load Argo colors:
-    nHue = len(data.groupby(hue).first()) if isinstance(data, pd.DataFrame) else len(data.groupby(hue))
+    nHue = (
+        len(data.groupby(hue).first())
+        if isinstance(data, pd.DataFrame)
+        else len(data.groupby(hue))
+    )
     mycolors = ArgoColors(cmap, nHue)
 
     COLORS = mycolors.COLORS
-    if markeredgecolor == 'default':
-        markeredgecolor = COLORS['DARKBLUE']
+    if markeredgecolor == "default":
+        markeredgecolor = COLORS["DARKBLUE"]
 
-    if traj_color == 'default':
+    if traj_color == "default":
         traj_color = markeredgecolor
 
     # Try to guess the trajectory grouping variable, i.e. name for WMO
@@ -482,7 +524,13 @@ def guess_yvar(data):
 
     subplot_kw = {"projection": ccrs.PlateCarree()}
     fig, ax = plt.subplots(**{**defaults, **kwargs}, subplot_kw=subplot_kw)
-    ax.add_feature(land_feature, color=COLORS['BLUE'], edgecolor=COLORS['CYAN'], linewidth=.1, alpha=0.3)
+    ax.add_feature(
+        land_feature,
+        color=COLORS["BLUE"],
+        edgecolor=COLORS["CYAN"],
+        linewidth=0.1,
+        alpha=0.3,
+    )
 
     # vmin = data[hue].min() if vmin == 'auto' else vmin
     # vmax = data[hue].max() if vmax == 'auto' else vmax
@@ -490,48 +538,58 @@ def guess_yvar(data):
     patches = []
     for k, [name, group] in enumerate(data.groupby(hue)):
         if mycolors.registered and name not in mycolors.lookup:
-            log.info("Found '%s' values not available in the '%s' colormap" % (name, mycolors.definition['name']))
+            log.info(
+                "Found '%s' values not available in the '%s' colormap"
+                % (name, mycolors.definition["name"])
+            )
         else:
             scatter_opts = {
-                'color': mycolors.lookup[name] if mycolors.registered else mycolors.cmap(k),
-                'label': "%s: %s" % (name, mycolors.ticklabels[name]) if mycolors.registered else name,
-                'zorder': 10,
-                'sizes': [markersize],
-                'edgecolor': markeredgecolor,
-                'linewidths': markeredgesize,
+                "color": mycolors.lookup[name]
+                if mycolors.registered
+                else mycolors.cmap(k),
+                "label": "%s: %s" % (name, mycolors.ticklabels[name])
+                if mycolors.registered
+                else name,
+                "zorder": 10,
+                "sizes": [markersize],
+                "edgecolor": markeredgecolor,
+                "linewidths": markeredgesize,
             }
             if isinstance(data, pd.DataFrame) and not legend:
-                scatter_opts['legend'] = False  # otherwise Pandas will add a legend even if we set legend=False
-            sc = group.plot.scatter(
-                x=x, y=y,
-                ax=ax,
-                **scatter_opts
-            )
+                scatter_opts[
+                    "legend"
+                ] = False  # otherwise Pandas will add a legend even if we set legend=False
+            sc = group.plot.scatter(x=x, y=y, ax=ax, **scatter_opts)
             patches.append(sc)
 
     if cbar:
-        if cbarlabels == 'auto':
+        if cbarlabels == "auto":
             cbarlabels = None
-        mycolors.cbar(ticklabels=cbarlabels,
-                      ax=ax,
-                      cax=sc,
-                      fraction=0.03, label=legend_title)
+        mycolors.cbar(
+            ticklabels=cbarlabels, ax=ax, cax=sc, fraction=0.03, label=legend_title
+        )
 
     if traj:
         for k, [name, group] in enumerate(data.groupby(traj_axis)):
-            ax.plot(group[x], group[y],
-                    color=traj_color,
-                    linewidth=0.5,
-                    label="_nolegend_",
-                    zorder=2,
-                    )
+            ax.plot(
+                group[x],
+                group[y],
+                color=traj_color,
+                linewidth=0.5,
+                label="_nolegend_",
+                zorder=2,
+            )
 
     if set_global:
         ax.set_global()
 
-    latlongrid(ax, dx="auto", dy="auto",
-               label_style_arg={'color': COLORS['BLUE'], 'fontsize': 10},
-               **{"color": COLORS['BLUE'], "alpha": 0.7})
+    latlongrid(
+        ax,
+        dx="auto",
+        dy="auto",
+        label_style_arg={"color": COLORS["BLUE"], "fontsize": 10},
+        **{"color": COLORS["BLUE"], "alpha": 0.7}
+    )
     ax.get_xaxis().set_visible(False)
     ax.get_yaxis().set_visible(False)
 
@@ -546,54 +604,55 @@ def guess_yvar(data):
         )
 
     for spine in ax.spines.values():
-        spine.set_edgecolor(COLORS['DARKBLUE'])
+        spine.set_edgecolor(COLORS["DARKBLUE"])
 
-    ax.set_title('')
+    ax.set_title("")
 
     return fig, ax
 
 
-def scatter_plot(ds: xr.Dataset,
-                  this_param,
-                  this_x='TIME',
-                  this_y='PRES',
-                  figsize=(18, 6),
-                  cmap=None,
-                  vmin=None,
-                  vmax=None,
-                  s=4,
-                  bgcolor='lightgrey',
-                  ):
+def scatter_plot(
+    ds: xr.Dataset,
+    this_param,
+    this_x="TIME",
+    this_y="PRES",
+    figsize=(18, 6),
+    cmap=None,
+    vmin=None,
+    vmax=None,
+    s=4,
+    bgcolor="lightgrey",
+):
     """A quick-and-dirty parameter scatter plot for one variable"""
     warnUnless(has_mpl, "requires matplotlib installed")
 
     if cmap is None:
-        cmap = mpl.colormaps['gist_ncar']
+        cmap = mpl.colormaps["gist_ncar"]
 
     def get_vlabel(this_ds, this_v):
         attrs = this_ds[this_v].attrs
-        if 'standard_name' in attrs:
-            name = attrs['standard_name']
-        elif 'long_name' in attrs:
-            name = attrs['long_name']
+        if "standard_name" in attrs:
+            name = attrs["standard_name"]
+        elif "long_name" in attrs:
+            name = attrs["long_name"]
         else:
             name = this_v
-        units = attrs['units'] if 'units' in attrs else None
+        units = attrs["units"] if "units" in attrs else None
         return "%s\n[%s]" % (name, units) if units else name
 
     # Read variables for the plot:
     x, y = ds[this_x], ds[this_y]
     if "INTERPOLATED" in this_y:
-        x_bounds, y_bounds = np.meshgrid(x, y, indexing='ij')
+        x_bounds, y_bounds = np.meshgrid(x, y, indexing="ij")
     c = ds[this_param]
 
     #
     fig, ax = plt.subplots(dpi=90, figsize=figsize)
 
-    if vmin == 'attrs':
-        vmin = c.attrs['valid_min'] if 'valid_min' in c.attrs else None
-    if vmax == 'attrs':
-        vmax = c.attrs['valid_max'] if 'valid_max' in c.attrs else None
+    if vmin == "attrs":
+        vmin = c.attrs["valid_min"] if "valid_min" in c.attrs else None
+    if vmax == "attrs":
+        vmax = c.attrs["valid_max"] if "valid_max" in c.attrs else None
     if vmin is None:
         vmin = np.percentile(c, 10)
     if vmax is None:
@@ -605,21 +664,21 @@ def get_vlabel(this_ds, this_v):
         m = ax.scatter(x, y, c=c, cmap=cmap, s=s, vmin=vmin, vmax=vmax)
         ax.set_facecolor(bgcolor)
 
-    cbar = fig.colorbar(m, shrink=0.9, extend='both', ax=ax)
+    cbar = fig.colorbar(m, shrink=0.9, extend="both", ax=ax)
     cbar.ax.set_ylabel(get_vlabel(ds, this_param), rotation=90)
 
     ylim = ax.get_ylim()
-    if 'PRES' in this_y:
+    if "PRES" in this_y:
         ax.invert_yaxis()
         y_bottom, y_top = np.max(ylim), np.min(ylim)
     else:
         y_bottom, y_top = ylim
 
-    if this_x == 'CYCLE_NUMBER':
+    if this_x == "CYCLE_NUMBER":
         ax.set_xlim([np.min(ds[this_x]) - 1, np.max(ds[this_x]) + 1])
-    elif this_x == 'TIME':
+    elif this_x == "TIME":
         ax.set_xlim([np.min(ds[this_x]), np.max(ds[this_x])])
-    if 'PRES' in this_y:
+    if "PRES" in this_y:
         ax.set_ylim([y_bottom, 0])
 
     #
diff --git a/argopy/plot/utils.py b/argopy/plot/utils.py
index ef1d2665..24561a42 100644
--- a/argopy/plot/utils.py
+++ b/argopy/plot/utils.py
@@ -22,11 +22,11 @@ def _importorskip(modname):
 
 
 if has_mpl:
-    import matplotlib as mpl
-    import matplotlib.pyplot as plt
+    import matplotlib as mpl  # noqa: F401
+    import matplotlib.pyplot as plt  # noqa: F401
     import matplotlib.ticker as mticker
-    import matplotlib.cm as cm
-    import matplotlib.colors as mcolors
+    import matplotlib.cm as cm  # noqa: F401
+    import matplotlib.colors as mcolors  # noqa: F401
 
 
 if has_cartopy:
diff --git a/argopy/related/argo_documentation.py b/argopy/related/argo_documentation.py
index 95e8fc74..bc8a5a4f 100644
--- a/argopy/related/argo_documentation.py
+++ b/argopy/related/argo_documentation.py
@@ -49,7 +49,7 @@ def parse(self, file):
             try:
                 with self.fs.open(file, 'r', encoding="utf-8") as f:
                     TXTlines = f.readlines()
-            except:
+            except:  # noqa: E722
                 with self.fs.open(file, 'r', encoding="latin-1") as f:
                     TXTlines = f.readlines()
 
diff --git a/argopy/stores/argo_index_proto.py b/argopy/stores/argo_index_proto.py
index d62c9dfa..6d7fa312 100644
--- a/argopy/stores/argo_index_proto.py
+++ b/argopy/stores/argo_index_proto.py
@@ -26,7 +26,7 @@
 except ModuleNotFoundError:
     pass
 
-from .argo_index_proto_s3 import search_s3
+# from .argo_index_proto_s3 import search_s3
 
 log = logging.getLogger("argopy.stores.index")
 
@@ -457,7 +457,7 @@ def _write(self, fs, path, obj, fmt="pq"):
         if isinstance(fs, memorystore):
             fs.fs.touch(this_path)  # Fix for https://github.com/euroargodev/argopy/issues/345
             # fs.fs.touch(this_path)  # Fix for https://github.com/euroargodev/argopy/issues/345
-            # This is an f* mistery to me, why do we need 2 calls to trigger file creation FOR REAL ????
+            # This is an f* mystery to me, why do we need 2 calls to trigger file creation FOR REAL ????
             # log.debug("memorystore touched this path before open context: '%s'" % this_path)
         with fs.open(this_path, "wb") as handle:
             write_this[fmt](obj, handle)
diff --git a/argopy/stores/argo_index_proto_s3.py b/argopy/stores/argo_index_proto_s3.py
index 7ce7e778..a9d04015 100644
--- a/argopy/stores/argo_index_proto_s3.py
+++ b/argopy/stores/argo_index_proto_s3.py
@@ -17,6 +17,7 @@
     HAS_PYARROW = True
 except ModuleNotFoundError:
     HAS_PYARROW = False
+
     class pa:
         @property
         def Table(self):
@@ -83,7 +84,7 @@ def __init__(self):
             try:
                 access_key = self.fs._request_signer._credentials.get_frozen_credentials().access_key
                 log.debug("Found AWS Credentials for access_key='%s'" % access_key)
-            except:
+            except:  # noqa: E722
                 pass
         else:
             self.fs = boto3.client('s3', config=Config(signature_version=UNSIGNED))
@@ -160,7 +161,7 @@ def query(self, sql_expression: str) -> str:
                                     "CompressionType": self.CompressionType},
                 OutputSerialization={"CSV": {}},
             )
-        except:
+        except:  # noqa: E722
             # log.debug(boto3.set_stream_logger('botocore', level='DEBUG'))
             raise
 
diff --git a/argopy/stores/filesystems.py b/argopy/stores/filesystems.py
index 8855cec8..482fbcbd 100644
--- a/argopy/stores/filesystems.py
+++ b/argopy/stores/filesystems.py
@@ -259,7 +259,7 @@ def cachepath(self, uri: str, errors: str = "raise"):
             )
 
     def _clear_cache_item(self, uri):
-        """Remove medadata and file for fsspec cache uri"""
+        """Remove metadata and file for fsspec cache uri"""
         fn = os.path.join(self.fs.storage[-1], "cache")
         self.fs.load_cache()  # Read set of stored blocks from file and populate self.cached_files
         cache = self.cached_files[-1]
diff --git a/argopy/utilities.py b/argopy/utilities.py
deleted file mode 100644
index a2f232ff..00000000
--- a/argopy/utilities.py
+++ /dev/null
@@ -1,304 +0,0 @@
-import warnings
-import importlib
-import inspect
-from functools import wraps
-
-warnings.filterwarnings("default", category=DeprecationWarning, module=__name__)
-
-
-def refactored(func1):
-
-    rel = importlib.import_module('argopy.related')
-    utils = importlib.import_module('argopy.utils')
-    in_related = hasattr(rel, func1.__name__)
-    func2 = getattr(rel, func1.__name__) if in_related else getattr(utils, func1.__name__)
-
-    func1_type = 'function'
-    if inspect.isclass(func1):
-        func1_type = 'class'
-
-    func2_loc = 'utils'
-    if in_related:
-        func2_loc = 'related'
-
-    msg = "The 'argopy.utilities.{name}' {ftype} has moved to 'argopy.{where}.{name}'. \
-You're seeing this message because you called '{name}' imported from 'argopy.utilities'. \
-Please update your script to import '{name}' from 'argopy.{where}'. \
-After 0.1.15, importing 'utilities' will raise an error."
-
-    @wraps(func1)
-    def decorator(*args, **kwargs):
-        # warnings.simplefilter('always', DeprecationWarning)
-        warnings.warn(
-            msg.format(name=func1.__name__, ftype=func1_type, where=func2_loc),
-            category=DeprecationWarning,
-            stacklevel=2
-        )
-        # warnings.simplefilter('default', DeprecationWarning)
-        return func2(*args, **kwargs)
-
-    return decorator
-
-# Argo related dataset and Meta-data fetchers
-
-@refactored
-class TopoFetcher:
-    pass
-
-@refactored
-class ArgoDocs:
-    pass
-
-@refactored
-class ArgoNVSReferenceTables:
-    pass
-
-@refactored
-class OceanOPSDeployments:
-    pass
-
-@refactored
-def get_coriolis_profile_id(*args, **kwargs):
-    pass
-
-@refactored
-def get_ea_profile_page(*args, **kwargs):
-    pass
-
-@refactored
-def load_dict(*args, **kwargs):
-    pass
-
-@refactored
-def mapp_dict(*args, **kwargs):
-    pass
-
-# Checkers
-@refactored
-def is_box(*args, **kwargs):
-    pass
-
-@refactored
-def is_indexbox(*args, **kwargs):
-    pass
-
-@refactored
-def is_list_of_strings(*args, **kwargs):
-    pass
-
-@refactored
-def is_list_of_dicts(*args, **kwargs):
-    pass
-
-@refactored
-def is_list_of_datasets(*args, **kwargs):
-    pass
-
-@refactored
-def is_list_equal(*args, **kwargs):
-    pass
-
-@refactored
-def check_wmo(*args, **kwargs):
-    pass
-
-@refactored
-def is_wmo(*args, **kwargs):
-    pass
-
-@refactored
-def check_cyc(*args, **kwargs):
-    pass
-
-@refactored
-def is_cyc(*args, **kwargs):
-    pass
-
-@refactored
-def check_index_cols(*args, **kwargs):
-    pass
-
-@refactored
-def check_gdac_path(*args, **kwargs):
-    pass
-
-@refactored
-def isconnected(*args, **kwargs):
-    pass
-
-@refactored
-def isalive(*args, **kwargs):
-    pass
-
-@refactored
-def isAPIconnected(*args, **kwargs):
-    pass
-
-@refactored
-def erddap_ds_exists(*args, **kwargs):
-    pass
-
-@refactored
-def urlhaskeyword(*args, **kwargs):
-    pass
-
-
-# Data type casting:
-
-@refactored
-def to_list(*args, **kwargs):
-    pass
-
-@refactored
-def cast_Argo_variable_type(*args, **kwargs):
-    pass
-
-from .utils.casting import DATA_TYPES
-
-# Decorators
-
-@refactored
-def deprecated(*args, **kwargs):
-    pass
-
-@refactored
-def doc_inherit(*args, **kwargs):
-    pass
-
-# Lists:
-
-@refactored
-def list_available_data_src(*args, **kwargs):
-    pass
-
-@refactored
-def list_available_index_src(*args, **kwargs):
-    pass
-
-@refactored
-def list_standard_variables(*args, **kwargs):
-    pass
-
-@refactored
-def list_multiprofile_file_variables(*args, **kwargs):
-    pass
-
-# Cache management:
-@refactored
-def clear_cache(*args, **kwargs):
-    pass
-
-@refactored
-def lscache(*args, **kwargs):
-    pass
-
-# Computation and performances:
-@refactored
-class Chunker:
-    pass
-
-# Accessories classes (specific objects):
-@refactored
-class float_wmo:
-    pass
-
-@refactored
-class Registry:
-    pass
-
-# Locals (environments, versions, systems):
-@refactored
-def get_sys_info(*args, **kwargs):
-    pass
-
-@refactored
-def netcdf_and_hdf5_versions(*args, **kwargs):
-    pass
-
-@refactored
-def show_versions(*args, **kwargs):
-    pass
-
-@refactored
-def show_options(*args, **kwargs):
-    pass
-
-@refactored
-def modified_environ(*args, **kwargs):
-    pass
-
-
-# Monitors
-@refactored
-def badge(*args, **kwargs):
-    pass
-
-@refactored
-class fetch_status:
-    pass
-
-@refactored
-class monitor_status:
-    pass
-
-# Geo (space/time data utilities)
-@refactored
-def toYearFraction(*args, **kwargs):
-    pass
-
-@refactored
-def YearFraction_to_datetime(*args, **kwargs):
-    pass
-
-@refactored
-def wrap_longitude(*args, **kwargs):
-    pass
-
-@refactored
-def wmo2box(*args, **kwargs):
-    pass
-
-# Computation with datasets:
-@refactored
-def linear_interpolation_remap(*args, **kwargs):
-    pass
-
-@refactored
-def groupby_remap(*args, **kwargs):
-    pass
-
-# Manipulate datasets:
-@refactored
-def drop_variables_not_in_all_datasets(*args, **kwargs):
-    pass
-
-@refactored
-def fill_variables_not_in_all_datasets(*args, **kwargs):
-    pass
-
-# Formatters:
-@refactored
-def format_oneline(*args, **kwargs):
-    pass
-
-@refactored
-def argo_split_path(*args, **kwargs):
-    pass
-
-
-# Loggers
-@refactored
-def warnUnless(*args, **kwargs):
-    pass
-
-@refactored
-def log_argopy_callerstack(*args, **kwargs):
-    pass
-
-if __name__ == "argopy.utilities":
-    warnings.warn(
-        "The 'argopy.utilities' has moved to 'argopy.utils'. After 0.1.15, importing 'utilities' "
-        "will raise an error. Please update your script.",
-        category=DeprecationWarning,
-        stacklevel=2,
-    )
diff --git a/argopy/utils/__init__.py b/argopy/utils/__init__.py
index 5c597e8f..5afa936d 100644
--- a/argopy/utils/__init__.py
+++ b/argopy/utils/__init__.py
@@ -1,4 +1,4 @@
-from .checkers import (
+from .checkers import (  # noqa: F401
     is_box,
     is_indexbox,
     is_list_of_strings,
@@ -30,15 +30,21 @@
 from .monitored_threadpool import MyThreadPoolExecutor as MonitoredThreadPoolExecutor
 from .chunking import Chunker
 from .accessories import Registry, float_wmo
-from .locals import (
+from .locals import (  # noqa: F401
     show_versions,
     show_options,
     modified_environ,
-    get_sys_info,   # noqa: F401
+    get_sys_info,  # noqa: F401
     netcdf_and_hdf5_versions,  # noqa: F401
 )
 from .monitors import monitor_status, badge, fetch_status  # noqa: F401
-from .geo import wmo2box, wrap_longitude, conv_lon, toYearFraction, YearFraction_to_datetime
+from .geo import (
+    wmo2box,
+    wrap_longitude,
+    conv_lon,
+    toYearFraction,
+    YearFraction_to_datetime,
+)
 from .compute import linear_interpolation_remap, groupby_remap
 from .transform import (
     fill_variables_not_in_all_datasets,
diff --git a/argopy/utils/casting.py b/argopy/utils/casting.py
index 00cf60e0..1119debb 100644
--- a/argopy/utils/casting.py
+++ b/argopy/utils/casting.py
@@ -20,201 +20,6 @@
     DATA_TYPES = json.load(f)
 
 
-@deprecated("The 'cast_types' utility is deprecated since 0.1.13. It's been replaced by 'cast_Argo_variable_type'. Calling it will raise an error after argopy 0.1.15")
-def cast_types(ds):  # noqa: C901
-    """Make sure variables are of the appropriate types according to Argo
-
-    #todo: This is hard coded, but should be retrieved from an API somewhere.
-    Should be able to handle all possible variables encountered in the Argo dataset.
-
-    Parameter
-    ---------
-    :class:`xarray.DataSet`
-
-    Returns
-    -------
-    :class:`xarray.DataSet`
-    """
-
-    list_str = [
-        "PLATFORM_NUMBER",
-        "DATA_MODE",
-        "DIRECTION",
-        "DATA_CENTRE",
-        "DATA_TYPE",
-        "FORMAT_VERSION",
-        "HANDBOOK_VERSION",
-        "PROJECT_NAME",
-        "PI_NAME",
-        "STATION_PARAMETERS",
-        "DATA_CENTER",
-        "DC_REFERENCE",
-        "DATA_STATE_INDICATOR",
-        "PLATFORM_TYPE",
-        "FIRMWARE_VERSION",
-        "POSITIONING_SYSTEM",
-        "PROFILE_PRES_QC",
-        "PROFILE_PSAL_QC",
-        "PROFILE_TEMP_QC",
-        "PARAMETER",
-        "SCIENTIFIC_CALIB_EQUATION",
-        "SCIENTIFIC_CALIB_COEFFICIENT",
-        "SCIENTIFIC_CALIB_COMMENT",
-        "HISTORY_INSTITUTION",
-        "HISTORY_STEP",
-        "HISTORY_SOFTWARE",
-        "HISTORY_SOFTWARE_RELEASE",
-        "HISTORY_REFERENCE",
-        "HISTORY_QCTEST",
-        "HISTORY_ACTION",
-        "HISTORY_PARAMETER",
-        "VERTICAL_SAMPLING_SCHEME",
-        "FLOAT_SERIAL_NO",
-        "SOURCE",
-        "EXPOCODE",
-        "QCLEVEL",
-    ]
-    list_int = [
-        "PLATFORM_NUMBER",
-        "WMO_INST_TYPE",
-        "WMO_INST_TYPE",
-        "CYCLE_NUMBER",
-        "CONFIG_MISSION_NUMBER",
-    ]
-    list_datetime = [
-        "REFERENCE_DATE_TIME",
-        "DATE_CREATION",
-        "DATE_UPDATE",
-        "JULD",
-        "JULD_LOCATION",
-        "SCIENTIFIC_CALIB_DATE",
-        "HISTORY_DATE",
-        "TIME",
-    ]
-
-    def fix_weird_bytes(x):
-        x = x.replace(b"\xb1", b"+/-")
-        return x
-
-    fix_weird_bytes = np.vectorize(fix_weird_bytes)
-
-    def cast_this(da, type):
-        """Low-level casting of DataArray values"""
-        try:
-            da.values = da.values.astype(type)
-            da.attrs["casted"] = 1
-        except Exception:
-            msg = (
-                "Oops! %s occurred. Fail to cast <%s> into %s for: %s. Encountered unique values: %s"
-                % (sys.exc_info()[0], str(da.dtype), type, da.name, str(np.unique(da)))
-            )
-            log.debug(msg)
-        return da
-
-    def cast_this_da(da):
-        """Cast any DataArray"""
-        v = da.name
-        da.attrs["casted"] = 0
-        if v in list_str and da.dtype == "O":  # Object
-            if v in ["SCIENTIFIC_CALIB_COEFFICIENT"]:
-                da.values = fix_weird_bytes(da.values)
-            da = cast_this(da, str)
-
-        if v in list_int:  # and da.dtype == 'O':  # Object
-            da = cast_this(da, np.int32)
-
-        if v in list_datetime and da.dtype == "O":  # Object
-            if (
-                "conventions" in da.attrs
-                and da.attrs["conventions"] == "YYYYMMDDHHMISS"
-            ):
-                if da.size != 0:
-                    if len(da.dims) <= 1:
-                        val = da.astype(str).values.astype("U14")
-                        # This should not happen, but still ! That's real world data
-                        val[val == "              "] = "nan"
-                        da.values = pd.to_datetime(val, format="%Y%m%d%H%M%S")
-                    else:
-                        s = da.stack(dummy_index=da.dims)
-                        val = s.astype(str).values.astype("U14")
-                        # This should not happen, but still ! That's real world data
-                        val[val == ""] = "nan"
-                        val[val == "              "] = "nan"
-                        #
-                        s.values = pd.to_datetime(val, format="%Y%m%d%H%M%S")
-                        da.values = s.unstack("dummy_index")
-                    da = cast_this(da, "datetime64[s]")
-                else:
-                    da = cast_this(da, "datetime64[s]")
-
-            elif v == "SCIENTIFIC_CALIB_DATE":
-                da = cast_this(da, str)
-                s = da.stack(dummy_index=da.dims)
-                s.values = pd.to_datetime(s.values, format="%Y%m%d%H%M%S")
-                da.values = (s.unstack("dummy_index")).values
-                da = cast_this(da, "datetime64[s]")
-
-        if "QC" in v and "PROFILE" not in v and "QCTEST" not in v:
-            if da.dtype == "O":  # convert object to string
-                da = cast_this(da, str)
-
-            # Address weird string values:
-            # (replace missing or nan values by a '0' that will be cast as an integer later
-
-            if da.dtype == "<U3":  # string, len 3 because of a 'nan' somewhere
-                ii = (
-                    da == "   "
-                )  # This should not happen, but still ! That's real world data
-                da = xr.where(ii, "0", da)
-
-                ii = (
-                    da == "nan"
-                )  # This should not happen, but still ! That's real world data
-                da = xr.where(ii, "0", da)
-
-                # Get back to regular U1 string
-                da = cast_this(da, np.dtype("U1"))
-
-            if da.dtype == "<U1":  # string
-                ii = (
-                    da == ""
-                )  # This should not happen, but still ! That's real world data
-                da = xr.where(ii, "0", da)
-
-                ii = (
-                    da == " "
-                )  # This should not happen, but still ! That's real world data
-                da = xr.where(ii, "0", da)
-
-                ii = (
-                    da == "n"
-                )  # This should not happen, but still ! That's real world data
-                da = xr.where(ii, "0", da)
-
-            # finally convert QC strings to integers:
-            da = cast_this(da, np.int32)
-
-        if da.dtype == "O":
-            # By default, try to cast as float:
-            da = cast_this(da, np.float32)
-
-        if da.dtype != "O":
-            da.attrs["casted"] = 1
-
-        return da
-
-    for v in ds.variables:
-        try:
-            ds[v] = cast_this_da(ds[v])
-        except Exception:
-            print("Oops!", sys.exc_info()[0], "occurred.")
-            print("Fail to cast: %s " % v)
-            print("Encountered unique values:", np.unique(ds[v]))
-            raise
-
-    return ds
-
-
 def cast_Argo_variable_type(ds, overwrite=True):
     """Ensure that all dataset variables are of the appropriate types according to Argo references
 
diff --git a/argopy/utils/checkers.py b/argopy/utils/checkers.py
index 3775b09e..9bc5b778 100644
--- a/argopy/utils/checkers.py
+++ b/argopy/utils/checkers.py
@@ -698,4 +698,4 @@ def has_aws_credentials():
         client = boto3.client('s3')
         return client._request_signer._credentials is not None
     else:
-        raise Exception("boto3 is not available !")
\ No newline at end of file
+        raise Exception("boto3 is not available !")
diff --git a/argopy/utils/format.py b/argopy/utils/format.py
index ae72701d..c2845a7a 100644
--- a/argopy/utils/format.py
+++ b/argopy/utils/format.py
@@ -209,24 +209,33 @@ def erddapuri2fetchobj(uri: str) -> dict:
     """Given an Ifremer ERDDAP URI, return a dictionary with BOX or WMO or (WMO, CYC) fetcher arguments"""
     params = parse_qs(uri)
     result = {}
-    if 'longitude>' in params.keys():
+    if "longitude>" in params.keys():
         # Recreate the box definition:
-        box = [float(params['longitude>'][0]), float(params['longitude<'][0]),
-               float(params['latitude>'][0]), float(params['latitude<'][0]),
-               float(params['pres>'][0]), float(params['pres<'][0])]
+        box = [
+            float(params["longitude>"][0]),
+            float(params["longitude<"][0]),
+            float(params["latitude>"][0]),
+            float(params["latitude<"][0]),
+            float(params["pres>"][0]),
+            float(params["pres<"][0]),
+        ]
         if "time>" in params.keys():
-            box.append(pd.to_datetime(float(params['time>'][0]), unit='s').strftime("%Y-%m-%d"))
-            box.append(pd.to_datetime(float(params['time<'][0]), unit='s').strftime("%Y-%m-%d"))
-        result['box'] = box
-    elif 'platform_number' in params:
-        wmo = params['platform_number'][0].replace("~","").replace("\"","").split("|")
+            box.append(
+                pd.to_datetime(float(params["time>"][0]), unit="s").strftime("%Y-%m-%d")
+            )
+            box.append(
+                pd.to_datetime(float(params["time<"][0]), unit="s").strftime("%Y-%m-%d")
+            )
+        result["box"] = box
+    elif "platform_number" in params:
+        wmo = params["platform_number"][0].replace("~", "").replace('"', "").split("|")
         wmo = check_wmo(wmo)
-        result['wmo'] = wmo
-        if 'cycle_number' in params:
-            cyc = params['cycle_number'][0].replace("~","").replace("\"","").split("|")
+        result["wmo"] = wmo
+        if "cycle_number" in params:
+            cyc = params["cycle_number"][0].replace("~", "").replace('"', "").split("|")
             cyc = check_cyc(cyc)
-            result['cyc'] = cyc
-    if len(result.keys())==0:
+            result["cyc"] = cyc
+    if len(result.keys()) == 0:
         raise ValueError("This is not a typical Argo Ifremer Erddap uri")
     else:
         return result
@@ -240,25 +249,27 @@ def _is_url(self, url):
         return parsed.scheme and parsed.netloc
 
     def __init__(self, obj):
-        if hasattr(obj, 'BOX'):
+        if hasattr(obj, "BOX"):
             self.BOX = obj.BOX
-        elif hasattr(obj, 'WMO'):
+        elif hasattr(obj, "WMO"):
             self.WMO = obj.WMO
-            if hasattr(obj, 'CYC'):
+            if hasattr(obj, "CYC"):
                 self.CYC = obj.CYC
         elif self._is_url(obj) and "/tabledap/" in obj:
             obj = erddapuri2fetchobj(obj)
-            if 'box' in obj.keys():
-                self.BOX = obj['box']
-            elif 'wmo' in obj.keys():
-                self.WMO = obj['wmo']
-                if 'cyc' in obj.keys():
-                    self.CYC = obj['cyc']
+            if "box" in obj.keys():
+                self.BOX = obj["box"]
+            elif "wmo" in obj.keys():
+                self.WMO = obj["wmo"]
+                if "cyc" in obj.keys():
+                    self.CYC = obj["cyc"]
         else:
-            raise ValueError("This class is only available with Erddap uri string requests or an ArgoDataFetcherProto instance")
+            raise ValueError(
+                "This class is only available with Erddap uri string requests or an ArgoDataFetcherProto instance"
+            )
 
     def _format(self, x, typ: str) -> str:
-        """ string formatting helper """
+        """string formatting helper"""
         if typ == "lon":
             if x < 0:
                 x = 360.0 + x
@@ -276,7 +287,7 @@ def __repr__(self):
 
     @property
     def cname(self) -> str:
-        """ Fetcher one line string definition helper """
+        """Fetcher one line string definition helper"""
         cname = "?"
 
         if hasattr(self, "BOX"):
@@ -325,4 +336,4 @@ def cname(self) -> str:
             if hasattr(self, "dataset_id"):
                 cname = self.dataset_id + ";" + cname
 
-        return cname
\ No newline at end of file
+        return cname
diff --git a/argopy/utils/locals.py b/argopy/utils/locals.py
index dccf3b83..737373f3 100644
--- a/argopy/utils/locals.py
+++ b/argopy/utils/locals.py
@@ -8,9 +8,21 @@
 from importlib.metadata import version
 import contextlib
 import copy
+import shutil
+import json
 from ..options import OPTIONS
 
 
+PIP_INSTALLED = {}
+try:
+    reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'list', '--format', 'json'])
+    reqs = json.loads(reqs.decode())
+    [PIP_INSTALLED.update({mod['name']: mod['version']}) for mod in reqs]
+except:
+    pass
+
+
+
 def get_sys_info():
     """Returns system information as a dict"""
 
@@ -79,6 +91,25 @@ def netcdf_and_hdf5_versions():
     return [("libhdf5", libhdf5_version), ("libnetcdf", libnetcdf_version)]
 
 
+def cli_version(cli_name):
+    try:
+        a = subprocess.run([cli_name, '--version'], capture_output=True)
+        return a.stdout.decode().strip("\n").replace(cli_name, '').strip()
+    except:
+        if shutil.which(cli_name):
+            return "- # installed"
+        else:
+            return "-"
+
+
+def pip_version(pip_name):
+    version = '-'
+    for name in [pip_name, pip_name.replace("_", "-"), pip_name.replace("-", "_")]:
+        if name in PIP_INSTALLED:
+            version = PIP_INSTALLED[name]
+    return version
+
+
 def get_version(module_name):
     ver = '-'
     try:
@@ -87,10 +118,19 @@ def get_version(module_name):
         try:
             ver = version(module_name)
         except importlib.metadata.PackageNotFoundError:
-            pass
+            try:
+                ver = pip_version(module_name)
+            except:
+                try:
+                    ver = cli_version(module_name)
+                except:
+                    pass
+    if sum([int(v == '0') for v in ver.split(".")]) == len(ver.split(".")):
+        ver = '-'
     return ver
 
 
+
 def show_versions(file=sys.stdout, conda=False):  # noqa: C901
     """Print the versions of argopy and its dependencies
 
@@ -112,7 +152,6 @@ def show_versions(file=sys.stdout, conda=False):  # noqa: C901
         "core": sorted(
             [
                 ("argopy", get_version),
-
                 ("xarray", get_version),
                 ("scipy", get_version),
                 ("netCDF4", get_version),
@@ -164,9 +203,8 @@ def show_versions(file=sys.stdout, conda=False):  # noqa: C901
                 ("bottleneck", get_version),
                 ("cftime", get_version),
                 ("cfgrib", get_version),
-                ("conda", get_version),
+                ("codespell", cli_version),
                 ("flake8", get_version),
-                ("nc_time_axis", get_version),
                 ("numpy", get_version),  # will come with xarray and pandas
                 ("pandas", get_version),  # will come with xarray
                 ("pip", get_version),
@@ -178,9 +216,11 @@ def show_versions(file=sys.stdout, conda=False):  # noqa: C901
                 ("sphinx", get_version),
             ]
         ),
-        'pip': sorted([
-            ("pytest-reportlog", get_version),
-        ])
+        "pip": sorted(
+            [
+                ("pytest-reportlog", get_version),
+            ]
+        ),
     }
 
     DEPS_blob = {}
@@ -189,18 +229,10 @@ def show_versions(file=sys.stdout, conda=False):  # noqa: C901
         deps_blob = list()
         for modname, ver_f in deps:
             try:
-                if modname in sys.modules:
-                    mod = sys.modules[modname]
-                else:
-                    mod = importlib.import_module(modname)
+                ver = ver_f(modname)
+                deps_blob.append((modname, ver))
             except Exception:
-                deps_blob.append((modname, "-"))
-            else:
-                try:
-                    ver = ver_f(modname)
-                    deps_blob.append((modname, ver))
-                except Exception:
-                    deps_blob.append((modname, "installed"))
+                deps_blob.append((modname, "installed"))
         DEPS_blob[level] = deps_blob
 
     print("\nSYSTEM", file=file)
diff --git a/argopy/xarray.py b/argopy/xarray.py
index ccd2e594..096e05b8 100644
--- a/argopy/xarray.py
+++ b/argopy/xarray.py
@@ -34,33 +34,33 @@
 class ArgoAccessor:
     """Class registered under scope ``argo`` to access a :class:`xarray.Dataset` object.
 
-        Examples
-        --------
-        - Ensure all variables are of the Argo required dtype with:
-        >>> ds.argo.cast_types()
-        - Convert a collection of points into a collection of profiles:
-        >>> ds.argo.point2profile()
-        - Convert a collection of profiles to a collection of points:
-        >>> ds.argo.profile2point()
-        - Filter measurements according to data mode:
-        >>> ds.argo.filter_data_mode()
-        - Filter measurements according to QC flag values:
-        >>> ds.argo.filter_qc(QC_list=[1, 2], QC_fields='all')
-        - Filter variables according OWC salinity calibration requirements:
-        >>> ds.argo.filter_scalib_pres(force='default')
-        - Interpolate measurements on pressure levels:
-        >>> ds.argo.inter_std_levels(std_lev=[10., 500., 1000.])
-        - Group and reduce measurements by pressure bins:
-        >>> ds.argo.groupby_pressure_bins(bins=[0, 200., 500., 1000.])
-        - Compute and add additional variables to the dataset:
-        >>> ds.argo.teos10(vlist='PV')
-        - Preprocess data for OWC salinity calibration:
-        >>> ds.argo.create_float_source("output_folder")
-
-     """
+    Examples
+    --------
+    - Ensure all variables are of the Argo required dtype with:
+    >>> ds.argo.cast_types()
+    - Convert a collection of points into a collection of profiles:
+    >>> ds.argo.point2profile()
+    - Convert a collection of profiles to a collection of points:
+    >>> ds.argo.profile2point()
+    - Filter measurements according to data mode:
+    >>> ds.argo.filter_data_mode()
+    - Filter measurements according to QC flag values:
+    >>> ds.argo.filter_qc(QC_list=[1, 2], QC_fields='all')
+    - Filter variables according OWC salinity calibration requirements:
+    >>> ds.argo.filter_scalib_pres(force='default')
+    - Interpolate measurements on pressure levels:
+    >>> ds.argo.inter_std_levels(std_lev=[10., 500., 1000.])
+    - Group and reduce measurements by pressure bins:
+    >>> ds.argo.groupby_pressure_bins(bins=[0, 200., 500., 1000.])
+    - Compute and add additional variables to the dataset:
+    >>> ds.argo.teos10(vlist='PV')
+    - Preprocess data for OWC salinity calibration:
+    >>> ds.argo.create_float_source("output_folder")
+
+    """
 
     def __init__(self, xarray_obj):
-        """ Init """
+        """Init"""
         self._obj = xarray_obj
         self._added = list()  # Will record all new variables added by argo
         # self._register = collections.OrderedDict() # Will register mutable instances of sub-modules like 'plot'
@@ -85,7 +85,9 @@ def __init__(self, xarray_obj):
         elif "PRES" in self._vars:
             self._mode = "standard"
         else:
-            raise InvalidDatasetStructure("Argo dataset structure not recognised (no PRES nor PRES_ADJUSTED")
+            raise InvalidDatasetStructure(
+                "Argo dataset structure not recognised (no PRES nor PRES_ADJUSTED"
+            )
 
     def __repr__(self):
         # import xarray.core.formatting as xrf
@@ -176,7 +178,7 @@ def _add_history(self, txt):
             self._obj.attrs["history"] = txt
 
     def _where(self, cond, other=xr.core.dtypes.NA, drop: bool = False):
-        """ where that preserve dtypes of Argo fields
+        """where that preserve dtypes of Argo fields
 
         Parameters
         ----------
@@ -198,7 +200,7 @@ def _where(self, cond, other=xr.core.dtypes.NA, drop: bool = False):
         return this
 
     def cast_types(self, **kwargs):  # noqa: C901
-        """ Make sure variables are of the appropriate types according to Argo """
+        """Make sure variables are of the appropriate types according to Argo"""
         ds = self._obj
         return cast_Argo_variable_type(ds, **kwargs)
 
@@ -206,14 +208,14 @@ def cast_types(self, **kwargs):  # noqa: C901
     def _dummy_argo_uid(self):
         if self._type == "point":
             return xr.DataArray(
-                                self.uid(
-                                        self._obj["PLATFORM_NUMBER"].values,
-                                        self._obj["CYCLE_NUMBER"].values,
-                                        self._obj["DIRECTION"].values,
-                                ),
-                                dims="N_POINTS",
-                                coords={"N_POINTS": self._obj["N_POINTS"]},
-                                name="dummy_argo_uid",
+                self.uid(
+                    self._obj["PLATFORM_NUMBER"].values,
+                    self._obj["CYCLE_NUMBER"].values,
+                    self._obj["DIRECTION"].values,
+                ),
+                dims="N_POINTS",
+                coords={"N_POINTS": self._obj["N_POINTS"]},
+                name="dummy_argo_uid",
             )
         else:
             raise InvalidDatasetStructure(
@@ -221,7 +223,7 @@ def _dummy_argo_uid(self):
             )
 
     def uid(self, wmo_or_uid, cyc=None, direction=None):
-        """ UID encoder/decoder
+        """UID encoder/decoder
 
         Parameters
         ----------
@@ -242,21 +244,22 @@ def uid(self, wmo_or_uid, cyc=None, direction=None):
         >>> wmo, cyc, drc = uid(unique_float_profile_id) # Decode
 
         """
+
         def encode_direction(x):
-            y = np.where(x == 'A', 1, x.astype(object))
-            y = np.where(y == 'D', -1, y.astype(object))
+            y = np.where(x == "A", 1, x.astype(object))
+            y = np.where(y == "D", -1, y.astype(object))
             try:
                 return y.astype(int)
             except ValueError:
-                raise ValueError('x has un-expected values')
+                raise ValueError("x has un-expected values")
 
         def decode_direction(x):
             x = np.array(x)
             if np.any(np.unique(np.abs(x)) != 1):
-                raise ValueError('x has un-expected values')
-            y = np.where(x == 1, 'A', x)
-            y = np.where(y == '-1', 'D', y)
-            return y.astype('<U1')
+                raise ValueError("x has un-expected values")
+            y = np.where(x == 1, "A", x)
+            y = np.where(y == "-1", "D", y)
+            return y.astype("<U1")
 
         offset = 1e5
 
@@ -286,18 +289,27 @@ def index(self):
         this = self._obj
         dummy_argo_uid = self._dummy_argo_uid
 
-        idx = xr.DataArray(
-            this["TIME"],
-            dims="N_POINTS",
-            coords={"N_POINTS": this["N_POINTS"]},
-        ).groupby(dummy_argo_uid).max().to_dataset()
-
-        for v in ["PLATFORM_NUMBER", "CYCLE_NUMBER", "LONGITUDE", "LATITUDE"]:
-            idx[v] = xr.DataArray(
-                this[v],
+        idx = (
+            xr.DataArray(
+                this["TIME"],
                 dims="N_POINTS",
                 coords={"N_POINTS": this["N_POINTS"]},
-            ).groupby(dummy_argo_uid).max()
+            )
+            .groupby(dummy_argo_uid)
+            .max()
+            .to_dataset()
+        )
+
+        for v in ["PLATFORM_NUMBER", "CYCLE_NUMBER", "LONGITUDE", "LATITUDE"]:
+            idx[v] = (
+                xr.DataArray(
+                    this[v],
+                    dims="N_POINTS",
+                    coords={"N_POINTS": this["N_POINTS"]},
+                )
+                .groupby(dummy_argo_uid)
+                .max()
+            )
 
         df = idx.to_dataframe()
         df = (
@@ -320,23 +332,39 @@ def index(self):
     def domain(self):
         """Space/time domain of the dataset
 
-            This is different from a usual argopy ``box`` because dates are in :class:`numpy.datetime64` format.
+        This is different from a usual argopy ``box`` because dates are in :class:`numpy.datetime64` format.
         """
         this_ds = self._obj
-        if 'PRES_ADJUSTED' in this_ds.data_vars:
-            Pmin = np.nanmin((np.min(this_ds['PRES'].values), np.min(this_ds['PRES_ADJUSTED'].values)))
-            Pmax = np.nanmax((np.max(this_ds['PRES'].values), np.max(this_ds['PRES_ADJUSTED'].values)))
+        if "PRES_ADJUSTED" in this_ds.data_vars:
+            Pmin = np.nanmin(
+                (
+                    np.min(this_ds["PRES"].values),
+                    np.min(this_ds["PRES_ADJUSTED"].values),
+                )
+            )
+            Pmax = np.nanmax(
+                (
+                    np.max(this_ds["PRES"].values),
+                    np.max(this_ds["PRES_ADJUSTED"].values),
+                )
+            )
         else:
-            Pmin = np.min(this_ds['PRES'].values)
-            Pmax = np.max(this_ds['PRES'].values)
-
-        return [np.min(this_ds['LONGITUDE'].values), np.max(this_ds['LONGITUDE'].values),
-                np.min(this_ds['LATITUDE'].values), np.max(this_ds['LATITUDE'].values),
-                Pmin, Pmax,
-                np.min(this_ds['TIME'].values), np.max(this_ds['TIME'].values)]
+            Pmin = np.min(this_ds["PRES"].values)
+            Pmax = np.max(this_ds["PRES"].values)
+
+        return [
+            np.min(this_ds["LONGITUDE"].values),
+            np.max(this_ds["LONGITUDE"].values),
+            np.min(this_ds["LATITUDE"].values),
+            np.max(this_ds["LATITUDE"].values),
+            Pmin,
+            Pmax,
+            np.min(this_ds["TIME"].values),
+            np.max(this_ds["TIME"].values),
+        ]
 
     def point2profile(self, drop: bool = False):  # noqa: C901
-        """ Transform a collection of points into a collection of profiles
+        """Transform a collection of points into a collection of profiles
 
         A "point" is a single location for measurements in space and time
         A "point" is localised as unique UID based on WMO, CYCLE_NUMBER and DIRECTION variable values.
@@ -358,7 +386,7 @@ def point2profile(self, drop: bool = False):  # noqa: C901
         this = self._obj  # Should not be modified
 
         def fillvalue(da):
-            """ Return fillvalue for a dataarray """
+            """Return fillvalue for a dataarray"""
             # https://docs.scipy.org/doc/numpy/reference/generated/numpy.dtype.kind.html#numpy.dtype.kind
             if da.dtype.kind in ["U"]:
                 fillvalue = " "
@@ -385,11 +413,16 @@ def fillvalue(da):
             .max()
             .values
         )
-        log.debug("point2profile: New dataset should be [N_PROF=%i, N_LEVELS=%i]" % (N_PROF, N_LEVELS))
+        log.debug(
+            "point2profile: New dataset should be [N_PROF=%i, N_LEVELS=%i]"
+            % (N_PROF, N_LEVELS)
+        )
         assert N_PROF * N_LEVELS >= len(this["N_POINTS"])
         if N_LEVELS == 1:
-            log.debug("point2profile: This dataset has a single vertical level, thus final variables will only have a N_PROF "
-                      "dimension and no N_LEVELS")
+            log.debug(
+                "point2profile: This dataset has a single vertical level, thus final variables will only have a N_PROF "
+                "dimension and no N_LEVELS"
+            )
 
         # Store the initial set of coordinates:
         coords_list = list(this.coords)
@@ -406,7 +439,10 @@ def fillvalue(da):
                 try:
                     count[i_prof, iv] = len(np.unique(prof[vname]))
                 except Exception:
-                    log.error("point2profile: An error happened when dealing with the '%s' data variable" % vname)
+                    log.error(
+                        "point2profile: An error happened when dealing with the '%s' data variable"
+                        % vname
+                    )
                     raise
 
         # Variables with a unique value for each profiles:
@@ -455,7 +491,7 @@ def fillvalue(da):
                     y = new_ds[vname].values
                     x = prof[vname].values
                     try:
-                        y[i_prof, 0: len(x)] = x
+                        y[i_prof, 0 : len(x)] = x
                     except Exception:
                         print(vname, "input", x.shape, "output", y[i_prof, :].shape)
                         raise
@@ -468,14 +504,18 @@ def fillvalue(da):
 
         # Restore coordinate variables:
         new_ds = new_ds.set_coords([c for c in coords_list if c in new_ds])
-        new_ds['N_PROF'] = np.arange(N_PROF)
-        if 'N_LEVELS' in new_ds['LATITUDE'].dims:
-            new_ds['LATITUDE'] = new_ds['LATITUDE'].isel(N_LEVELS=0)  # Make sure LAT is (N_PROF) and not (N_PROF, N_LEVELS)
-            new_ds['LONGITUDE'] = new_ds['LONGITUDE'].isel(N_LEVELS=0)
+        new_ds["N_PROF"] = np.arange(N_PROF)
+        if "N_LEVELS" in new_ds["LATITUDE"].dims:
+            new_ds["LATITUDE"] = new_ds["LATITUDE"].isel(
+                N_LEVELS=0
+            )  # Make sure LAT is (N_PROF) and not (N_PROF, N_LEVELS)
+            new_ds["LONGITUDE"] = new_ds["LONGITUDE"].isel(N_LEVELS=0)
 
         # Misc formatting
         new_ds = new_ds.sortby("TIME")
-        new_ds = new_ds.argo.cast_types() if not drop else cast_Argo_variable_type(new_ds)
+        new_ds = (
+            new_ds.argo.cast_types() if not drop else cast_Argo_variable_type(new_ds)
+        )
         new_ds = new_ds[np.sort(new_ds.data_vars)]
         new_ds.encoding = self.encoding  # Preserve low-level encoding information
         new_ds.attrs = self.attrs  # Preserve original attributes
@@ -485,7 +525,7 @@ def fillvalue(da):
         return new_ds
 
     def profile2point(self):
-        """ Convert a collection of profiles to a collection of points
+        """Convert a collection of profiles to a collection of points
 
         A "point" is a single location for measurements in space and time
         A "point" is localised as unique UID based on WMO, CYCLE_NUMBER and DIRECTION variable values.
@@ -539,7 +579,7 @@ def profile2point(self):
     def filter_data_mode(  # noqa: C901
         self, keep_error: bool = True, errors: str = "raise"
     ):
-        """ Filter variables according to their data mode
+        """Filter variables according to their data mode
 
         This filter applies to <PARAM> and <PARAM_QC>
 
@@ -590,7 +630,7 @@ def safe_where_eq(xds, key, value):
                     xds = xds.drop_vars("TIME")
                     xds = xds.where(xds[key] == value, drop=True)
                     xds["TIME"] = xr.DataArray(
-                        np.empty((len(xds["N_POINTS"]),), dtype='datetime64[ns]'),
+                        np.empty((len(xds["N_POINTS"]),), dtype="datetime64[ns]"),
                         dims="N_POINTS",
                         attrs=TIME.attrs,
                     )
@@ -598,9 +638,9 @@ def safe_where_eq(xds, key, value):
                     return xds
 
         def ds_split_datamode(xds):
-            """ Create one dataset for each of the data_mode
+            """Create one dataset for each of the data_mode
 
-                Split full dataset into 3 datasets
+            Split full dataset into 3 datasets
             """
             # Real-time:
             argo_r = safe_where_eq(xds, "DATA_MODE", "R")
@@ -631,7 +671,7 @@ def ds_split_datamode(xds):
         def fill_adjusted_nan(this_ds, vname):
             """Fill in the adjusted field with the non-adjusted wherever it is NaN
 
-               Ensure to have values even for bad QC data in delayed mode
+            Ensure to have values even for bad QC data in delayed mode
             """
             ii = this_ds.where(np.isnan(this_ds[vname + "_ADJUSTED"]), drop=1)[
                 "N_POINTS"
@@ -642,13 +682,13 @@ def fill_adjusted_nan(this_ds, vname):
             return this_ds
 
         def merge_arrays(this_argo_r, this_argo_a, this_argo_d, this_vname):
-            """ Merge one variable from 3 DataArrays
+            """Merge one variable from 3 DataArrays
 
-                Based on xarray merge function with ’no_conflicts’: only values
-                which are not null in all datasets must be equal. The returned
-                dataset then contains the combination of all non-null values.
+            Based on xarray merge function with ’no_conflicts’: only values
+            which are not null in all datasets must be equal. The returned
+            dataset then contains the combination of all non-null values.
 
-                Return a xarray.DataArray
+            Return a xarray.DataArray
             """
 
             def merge_this(a1, a2, a3):
@@ -686,7 +726,7 @@ def merge_this(a1, a2, a3):
         #########
         ds = self._obj
         if "DATA_MODE" not in ds:
-            if errors == 'raise':
+            if errors == "raise":
                 raise InvalidDatasetStructure(
                     "Method only available for dataset with a 'DATA_MODE' variable "
                 )
@@ -753,7 +793,7 @@ def merge_this(a1, a2, a3):
     def filter_qc(  # noqa: C901
         self, QC_list=[1, 2], QC_fields="all", drop=True, mode="all", mask=False
     ):
-        """ Filter data set according to QC values
+        """Filter data set according to QC values
 
         Filter the dataset to keep points where ``all`` or ``any`` of the QC fields has a value in the list of
         integer QC flags.
@@ -815,7 +855,9 @@ def filter_qc(  # noqa: C901
             )
 
         if len(QC_fields) == 0:
-            this.argo._add_history("Variables selected according to QC (but found no QC variables)")
+            this.argo._add_history(
+                "Variables selected according to QC (but found no QC variables)"
+            )
             return this
 
         log.debug(
@@ -852,7 +894,7 @@ def filter_qc(  # noqa: C901
             return this_mask
 
     def filter_scalib_pres(self, force: str = "default", inplace: bool = True):
-        """ Filter variables according to OWC salinity calibration software requirements
+        """Filter variables according to OWC salinity calibration software requirements
 
         By default, this filter will return a dataset with raw PRES, PSAL and TEMP; and if PRES is adjusted,
         PRES variable will be replaced by PRES_ADJUSTED.
@@ -966,11 +1008,13 @@ def filter_researchmode(self) -> xr.Dataset:
 
         # Apply filter
         this = this.argo.filter_data_mode(errors="ignore")
-        if 'DATA_MODE' in this.data_vars:
-            this = this.where(this['DATA_MODE'] == 'D', drop=True)
+        if "DATA_MODE" in this.data_vars:
+            this = this.where(this["DATA_MODE"] == "D", drop=True)
         this = this.argo.filter_qc(QC_list=1)
-        if 'PRES_ERROR' in this.data_vars:  # PRES_ADJUSTED_ERROR was renamed PRES_ERROR by filter_data_mode
-            this = this.where(this['PRES_ERROR'] < 20, drop=True)
+        if (
+            "PRES_ERROR" in this.data_vars
+        ):  # PRES_ADJUSTED_ERROR was renamed PRES_ERROR by filter_data_mode
+            this = this.where(this["PRES_ERROR"] < 20, drop=True)
 
         # Manage output:
         if to_profile:
@@ -982,10 +1026,8 @@ def filter_researchmode(self) -> xr.Dataset:
             this = this.argo.cast_types()
         return this
 
-    def interp_std_levels(self,
-                          std_lev: list or np.array,
-                          axis: str = 'PRES'):
-        """ Interpolate measurements to standard pressure levels
+    def interp_std_levels(self, std_lev: list or np.array, axis: str = "PRES"):
+        """Interpolate measurements to standard pressure levels
 
         Parameters
         ----------
@@ -1075,13 +1117,17 @@ def interp_std_levels(self,
                 z_regridded_dim="Z_LEVELS",
             )
             ds_out[dv].attrs = this_dsp[dv].attrs  # Preserve attributes
-            if 'long_name' in ds_out[dv].attrs:
-                ds_out[dv].attrs['long_name'] = "Interpolated %s" % ds_out[dv].attrs['long_name']
+            if "long_name" in ds_out[dv].attrs:
+                ds_out[dv].attrs["long_name"] = (
+                    "Interpolated %s" % ds_out[dv].attrs["long_name"]
+                )
 
         ds_out = ds_out.rename({"remapped": "%s_INTERPOLATED" % axis})
         ds_out["%s_INTERPOLATED" % axis].attrs = this_dsp[axis].attrs
         if "long_name" in ds_out["%s_INTERPOLATED" % axis].attrs:
-            ds_out["%s_INTERPOLATED" % axis].attrs['long_name'] = "Standard %s levels" % axis
+            ds_out["%s_INTERPOLATED" % axis].attrs["long_name"] = (
+                "Standard %s levels" % axis
+            )
 
         for sv in solovars:
             ds_out[sv] = this_dsp[sv]
@@ -1109,7 +1155,7 @@ def groupby_pressure_bins(
         squeeze: bool = True,
         merge: bool = True,
     ):
-        """ Group measurements by pressure bins
+        """Group measurements by pressure bins
 
         This method can be used to subsample and align an irregular dataset (pressure not being similar in all profiles)
         on a set of pressure bins. The output dataset could then be used to perform statistics along the ``N_PROF`` dimension
@@ -1193,7 +1239,7 @@ def groupby_pressure_bins(
             bins = bins[np.where(h > 0)]
 
         def replace_i_level_values(this_da, this_i_level, new_values_along_profiles):
-            """ Convenience fct to update only one level of a ["N_PROF", "N_LEVELS"] xr.DataArray"""
+            """Convenience fct to update only one level of a ["N_PROF", "N_LEVELS"] xr.DataArray"""
             if this_da.dims == ("N_PROF", "N_LEVELS"):
                 values = this_da.values
                 values[:, this_i_level] = new_values_along_profiles
@@ -1203,11 +1249,11 @@ def replace_i_level_values(this_da, this_i_level, new_values_along_profiles):
             return this_da
 
         def nanmerge(x, y):
-            """ Merge two 1D array
+            """Merge two 1D array
 
-                Given 2 arrays x, y of 1 dimension, return a new array with:
-                - x values where x is not NaN
-                - y values where x is NaN
+            Given 2 arrays x, y of 1 dimension, return a new array with:
+            - x values where x is not NaN
+            - y values where x is NaN
             """
             z = x.copy()
             for i, v in enumerate(x):
@@ -1217,12 +1263,10 @@ def nanmerge(x, y):
 
         merged_is_nan = lambda l1, l2: len(  # noqa: E731
             np.unique(np.where(np.isnan(l1.values + l2.values)))
-        ) == len(
-            l1
-        )
+        ) == len(l1)
 
         def merge_bin_matching_levels(this_ds: xr.Dataset) -> xr.Dataset:
-            """ Levels merger of type 'bins' value
+            """Levels merger of type 'bins' value
 
             Merge pair of lines with the following pattern:
                nan,    VAL, VAL, nan,    VAL, VAL
@@ -1264,7 +1308,7 @@ def merge_bin_matching_levels(this_ds: xr.Dataset) -> xr.Dataset:
             return new_ds
 
         def merge_all_matching_levels(this_ds: xr.Dataset) -> xr.Dataset:
-            """ Levels merger
+            """Levels merger
 
             Merge any pair of levels with a "matching" pattern like this:
                VAL, VAL, VAL, nan, nan, VAL, nan, nan,
@@ -1315,7 +1359,7 @@ def merge_all_matching_levels(this_ds: xr.Dataset) -> xr.Dataset:
                 dv
                 for dv in list(this_dsp.data_vars)
                 if set(["N_LEVELS", "N_PROF"]) == set(this_dsp[dv].dims)
-                and dv not in DATA_TYPES['data']['str']
+                and dv not in DATA_TYPES["data"]["str"]
             ]
         else:
             datavars = [
@@ -1324,7 +1368,7 @@ def merge_all_matching_levels(this_ds: xr.Dataset) -> xr.Dataset:
                 if set(["N_LEVELS", "N_PROF"]) == set(this_dsp[dv].dims)
                 and "QC" not in dv
                 and "ERROR" not in dv
-                and dv not in DATA_TYPES['data']['str']
+                and dv not in DATA_TYPES["data"]["str"]
             ]
 
         # All other variables:
@@ -1388,7 +1432,7 @@ def teos10(  # noqa: C901
         vlist: list = ["SA", "CT", "SIG0", "N2", "PV", "PTEMP"],
         inplace: bool = True,
     ):
-        """ Add TEOS10 variables to the dataset
+        """Add TEOS10 variables to the dataset
 
         By default, adds: 'SA', 'CT'
         Other possible variables: 'SIG0', 'N2', 'PV', 'PTEMP', 'SOUND_SPEED'
@@ -1607,7 +1651,7 @@ def create_float_source(
         do_compression: bool = True,
         debug_output: bool = False,
     ):
-        """ Preprocess data for OWC software calibration
+        """Preprocess data for OWC software calibration
 
         This method can create a FLOAT SOURCE file (i.e. the .mat file that usually goes into /float_source/) for OWC software.
         The FLOAT SOURCE file is saved as:
@@ -1763,7 +1807,7 @@ def preprocess_one_float(
             select: str = "deep",
             debug_output: bool = False,
         ):
-            """ Run the entire preprocessing on a given dataset with one float data """
+            """Run the entire preprocessing on a given dataset with one float data"""
 
             # Add potential temperature:
             if "PTEMP" not in this_one:
@@ -1991,7 +2035,10 @@ def list_N_PROF_variables(self, uid=False):
                 try:
                     count[i_prof, iv] = len(np.unique(prof[vname]))
                 except Exception as e:
-                    print("An error happened when dealing with the '%s' data variable" % vname)
+                    print(
+                        "An error happened when dealing with the '%s' data variable"
+                        % vname
+                    )
                     raise (e)
 
         # Variables with a single unique value for each profile:
@@ -2005,8 +2052,8 @@ def list_N_PROF_variables(self, uid=False):
     def list_WMO_CYC(self):
         """Given a dataset, return a list with all possible (PLATFORM_NUMBER, CYCLE_NUMBER) tuple"""
         profiles = []
-        for wmo, grp in self._obj.groupby('PLATFORM_NUMBER'):
-            [profiles.append((wmo, cyc)) for cyc in np.unique(grp['CYCLE_NUMBER'])]
+        for wmo, grp in self._obj.groupby("PLATFORM_NUMBER"):
+            [profiles.append((wmo, cyc)) for cyc in np.unique(grp["CYCLE_NUMBER"])]
         return profiles
 
 
@@ -2017,18 +2064,19 @@ def open_Argo_dataset(filename_or_obj):
 
 
 class ArgoEngine(BackendEntrypoint):
-    """ Backend for Argo netCDF files based on the xarray netCDF4 engine
+    """Backend for Argo netCDF files based on the xarray netCDF4 engine
 
-        It can open any Argo ".nc" files with 'Argo' in their global attribute 'Conventions'.
+    It can open any Argo ".nc" files with 'Argo' in their global attribute 'Conventions'.
 
-        But it will not be detected as valid backend for netcdf files, so make
-        sure to specify ``engine="argo"`` in :func:`xarray.open_dataset`.
+    But it will not be detected as valid backend for netcdf files, so make
+    sure to specify ``engine="argo"`` in :func:`xarray.open_dataset`.
 
-        Examples
-        --------
-        >>> import xarray as xr
-        >>> ds = xr.open_dataset("dac/aoml/1901393/1901393_prof.nc", engine='argo')
+    Examples
+    --------
+    >>> import xarray as xr
+    >>> ds = xr.open_dataset("dac/aoml/1901393/1901393_prof.nc", engine='argo')
     """
+
     description = "Open Argo netCDF files (.nc)"
     url = "https://argopy.readthedocs.io/en/latest/generated/argopy.xarray.ArgoEngine.html#argopy.xarray.ArgoEngine"
 
@@ -2050,7 +2098,7 @@ def guess_can_open(self, filename_or_obj):
         except TypeError:
             return False
         if ext in {".nc"}:
-            attrs = xr.open_dataset(filename_or_obj, engine='netcdf4').attrs
-            return 'Conventions' in attrs and 'Argo' in attrs['Conventions']
+            attrs = xr.open_dataset(filename_or_obj, engine="netcdf4").attrs
+            return "Conventions" in attrs and "Argo" in attrs["Conventions"]
         else:
             return False
diff --git a/ci/envs_manager b/ci/envs_manager
index c1668504..de576f40 100755
--- a/ci/envs_manager
+++ b/ci/envs_manager
@@ -19,22 +19,21 @@ source ~/miniconda3/etc/profile.d/conda.sh
 # LIST ALL  AVAILABLE ENVIRONMENTS AND THE ASSOCIATED YAML FILE
 #########################
 declare -A ENV_LIST=(
-['argopy-docs']="requirements/py3.9-docs.yml"
-
-['argopy-py37-all-free']="requirements/py3.7-all-free.yml"
-['argopy-py37-core-free']="requirements/py3.7-core-free.yml"
-
-['argopy-py38-all-free']="requirements/py3.8-all-free.yml"
-['argopy-py38-all-pinned']="requirements/py3.8-all-pinned.yml"
-
-['argopy-py38-core-free']="requirements/py3.8-core-free.yml"
-['argopy-py38-core-pinned']="requirements/py3.8-core-pinned.yml"
+['argopy-docs-dev']="requirements/py3.9-docs-dev.yml"
+['argopy-docs-rtd']="requirements/py3.9-docs-rtd.yml"
 
 ['argopy-py39-all-free']="requirements/py3.9-all-free.yml"
 ['argopy-py39-all-pinned']="requirements/py3.9-all-pinned.yml"
 
 ['argopy-py39-core-pinned']="requirements/py3.9-core-pinned.yml"
 ['argopy-py39-core-free']="requirements/py3.9-core-free.yml"
+
+['argopy-py310-all-free']="requirements/py3.10-all-free.yml"
+['argopy-py310-all-pinned']="requirements/py3.10-all-pinned.yml"
+
+['argopy-py310-core-free']="requirements/py3.10-core-free.yml"
+['argopy-py310-core-pinned']="requirements/py3.10-core-pinned.yml"
+
 )
 
 
diff --git a/ci/requirements/py3.8-all-free.yml b/ci/requirements/py3.10-all-free.yml
similarity index 74%
rename from ci/requirements/py3.8-all-free.yml
rename to ci/requirements/py3.10-all-free.yml
index 6b802b9c..47da7b4b 100644
--- a/ci/requirements/py3.8-all-free.yml
+++ b/ci/requirements/py3.10-all-free.yml
@@ -2,7 +2,7 @@ name: argopy-tests
 channels:
   - conda-forge
 dependencies:
-  - python = 3.8
+  - python = 3.10.14
 
 # CORE:
   - aiohttp
@@ -14,7 +14,7 @@ dependencies:
   - requests
   - scipy
   - toolz
-  - xarray<2024.3
+  - xarray < 2024.3 # while https://github.com/pydata/xarray/issues/8909 is not solved
 
 # EXT.UTIL:
   - boto3
@@ -26,13 +26,14 @@ dependencies:
 # EXT.PERF:
   - dask
   - distributed
+  - h5netcdf
   - pyarrow
 
 # EXT.PLOT:
   - IPython
   - cartopy
   - ipykernel
-  - ipywidgets <= 7.8.0
+  - ipywidgets
   - matplotlib
   - pyproj
   - seaborn
@@ -43,9 +44,8 @@ dependencies:
   - bottleneck
   - cfgrib
   - cftime
-#  - conda
+  - codespell
   - flake8
-#  - nc-time-axis
   - numpy
   - pandas
   - pip
@@ -54,6 +54,8 @@ dependencies:
   - pytest-env
   - pytest-localftpserver
   - setuptools
+#  - sphinx
 
+# PIP:
   - pip:
-      - pytest-reportlog
\ No newline at end of file
+      - pytest-reportlog
diff --git a/ci/requirements/py3.10-all-pinned.yml b/ci/requirements/py3.10-all-pinned.yml
new file mode 100644
index 00000000..f20410d2
--- /dev/null
+++ b/ci/requirements/py3.10-all-pinned.yml
@@ -0,0 +1,61 @@
+name: argopy-tests
+channels:
+  - conda-forge
+dependencies:
+  - python = 3.10.14
+
+# CORE:
+  - aiohttp = 3.10.4
+  - decorator = 5.1.1
+  - erddapy = 2.2.0
+  - fsspec = 2024.6.1
+  - netCDF4 = 1.7.1
+  - packaging = 24.1
+  - requests = 2.32.3
+  - scipy = 1.14.0
+  - toolz = 0.12.1
+  - xarray = 2024.2.0
+
+# EXT.UTIL:
+  - boto3 = 1.35.0
+  - gsw = 3.6.19
+  - s3fs = 0.4.2
+  - tqdm = 4.66.5
+  - zarr = 2.18.2
+
+# EXT.PERF:
+  - dask = 2024.8.1
+  - distributed = 2024.8.1
+  - h5netcdf = 1.3.0
+  - pyarrow = 17.0.0
+
+# EXT.PLOT:
+  - IPython = 8.26.0
+  - cartopy = 0.23.0
+  - ipykernel = 6.29.5
+  - ipywidgets = 8.1.3
+  - matplotlib = 3.9.2
+  - pyproj = 3.6.1
+  - seaborn = 0.13.2
+
+# DEV:
+  - aiofiles = 24.1.0
+  - black = 24.8.0
+  - bottleneck = 1.4.0
+  - cfgrib = 0.9.14.0
+  - cftime = 1.6.4
+  - codespell = 2.3.0
+  - flake8 = 7.1.1
+  - numpy = 1.26.4
+  - pandas = 2.2.2
+  - pip = 24.2
+  - pytest = 8.3.2
+  - pytest-cov = 5.0.0
+  - pytest-env = 1.1.3
+  - pytest-localftpserver
+  - setuptools = 72.1.0
+#  - sphinx = -
+
+# PIP:
+  - pip:
+      - pytest-reportlog == 0.4.0
\ No newline at end of file
diff --git a/ci/requirements/py3.8-core-free.yml b/ci/requirements/py3.10-core-free.yml
similarity index 81%
rename from ci/requirements/py3.8-core-free.yml
rename to ci/requirements/py3.10-core-free.yml
index 8a0a8740..94f62622 100644
--- a/ci/requirements/py3.8-core-free.yml
+++ b/ci/requirements/py3.10-core-free.yml
@@ -2,7 +2,7 @@ name: argopy-tests
 channels:
   - conda-forge
 dependencies:
-  - python = 3.8
+  - python = 3.10.14
 
 # CORE:
   - aiohttp
@@ -14,7 +14,7 @@ dependencies:
   - requests
   - scipy
   - toolz
-  - xarray<2024.3
+  - xarray < 2024.3 # while https://github.com/pydata/xarray/issues/8909 is not solved
 
 # EXT.UTIL:
 #  - boto3
@@ -26,6 +26,7 @@ dependencies:
 # EXT.PERF:
 #  - dask
 #  - distributed
+#  - h5netcdf
 #  - pyarrow
 
 # EXT.PLOT:
@@ -43,9 +44,8 @@ dependencies:
   - bottleneck
   - cfgrib
   - cftime
-#  - conda
+  - codespell
   - flake8
-#  - nc-time-axis
   - numpy
   - pandas
   - pip
@@ -56,5 +56,6 @@ dependencies:
   - setuptools
 #  - sphinx
 
+# PIP:
   - pip:
       - pytest-reportlog
\ No newline at end of file
diff --git a/ci/requirements/py3.10-core-pinned.yml b/ci/requirements/py3.10-core-pinned.yml
new file mode 100644
index 00000000..95e4e481
--- /dev/null
+++ b/ci/requirements/py3.10-core-pinned.yml
@@ -0,0 +1,61 @@
+name: argopy-tests
+channels:
+  - conda-forge
+dependencies:
+  - python = 3.10.14
+
+# CORE:
+  - aiohttp = 3.10.4
+  - decorator = 5.1.1
+  - erddapy = 2.2.0
+  - fsspec = 2024.6.1
+  - netCDF4 = 1.7.1
+  - packaging = 24.1
+  - requests = 2.32.3
+  - scipy = 1.14.0
+  - toolz = 0.12.1
+  - xarray = 2024.2.0
+
+# EXT.UTIL:
+#  - boto3 = 1.35.0
+#  - gsw = 3.6.19
+#  - s3fs = 0.4.2
+#  - tqdm = 4.66.5
+#  - zarr = 2.18.2
+
+# EXT.PERF:
+#  - dask = 2024.8.1
+#  - distributed = 2024.8.1
+#  - h5netcdf = 1.3.0
+#  - pyarrow = 17.0.0
+
+# EXT.PLOT:
+#  - IPython = 8.26.0
+#  - cartopy = 0.23.0
+#  - ipykernel = 6.29.5
+#  - ipywidgets = 8.1.3
+#  - matplotlib = 3.9.2
+#  - pyproj = 3.6.1
+#  - seaborn = 0.13.2
+
+# DEV:
+  - aiofiles = 24.1.0
+  - black = 24.8.0
+  - bottleneck = 1.4.0
+  - cfgrib = 0.9.14.0
+  - cftime = 1.6.4
+  - codespell = 2.3.0
+  - flake8 = 7.1.1
+  - numpy = 1.26.4
+  - pandas = 2.2.2
+  - pip = 24.2
+  - pytest = 8.3.2
+  - pytest-cov = 5.0.0
+  - pytest-env = 1.1.3
+  - pytest-localftpserver
+  - setuptools = 72.1.0
+#  - sphinx = -
+
+# PIP:
+  - pip:
+      - pytest-reportlog == 0.4.0
\ No newline at end of file
diff --git a/ci/requirements/py3.8-all-pinned.yml b/ci/requirements/py3.8-all-pinned.yml
deleted file mode 100644
index 56ab3b01..00000000
--- a/ci/requirements/py3.8-all-pinned.yml
+++ /dev/null
@@ -1,61 +0,0 @@
-name: argopy-tests
-channels:
-  - conda-forge
-dependencies:
-  - python = 3.8
-
-# CORE:
-  - aiohttp = 3.8.6
-  - decorator = 5.1.1
-  - erddapy = 2.2.0
-  - fsspec = 2023.9.2
-  - netCDF4 = 1.6.4
-  - packaging = 23.2
-  - requests = 2.31.0
-  - scipy = 1.10.1
-  - toolz = 0.12.0
-  - xarray = 2023.1.0
-
-# EXT.UTIL:
-  - boto3 = 1.28.17
-  - gsw = 3.6.17
-  - s3fs = 2023.9.2
-  - tqdm = 4.66.1
-  - zarr = 2.13.3
-
-# EXT.PERF:
-  - dask = 2023.5.0
-  - distributed = 2023.5.0
-  - pyarrow = 13.0.0
-
-# EXT.PLOT:
-  - IPython = 8.12.2
-  - cartopy = 0.21.1
-  - ipykernel = 6.25.2
-  - ipywidgets = 7.8.0
-  - matplotlib = 3.7.3
-  - pyproj = 3.5.0
-  - seaborn = 0.13.0
-
-# DEV:
-  - aiofiles = 23.1.0
-  - black = 23.9.1
-  - bottleneck = 1.3.7
-  - cfgrib = 0.9.10.4
-  - cftime = 1.6.2
-#  - conda = -
-  - flake8 = 6.1.0
-#  - nc-time-axis = -
-  - numpy = 1.24.4
-  - pandas = 1.5.3
-  - pip = 23.2.1
-  - pytest = 7.4.2
-  - pytest-cov = 4.1.0
-  - pytest-env = 1.0.1
-  - pytest-localftpserver = 1.1.4
-  - setuptools = 68.2.2
-#  - sphinx = -
-
-# PIP:
-  - pip:
-      - pytest-reportlog == 0.4.0
diff --git a/ci/requirements/py3.8-core-pinned.yml b/ci/requirements/py3.8-core-pinned.yml
deleted file mode 100644
index 3eb19957..00000000
--- a/ci/requirements/py3.8-core-pinned.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-name: argopy-tests
-channels:
-  - conda-forge
-dependencies:
-  - python = 3.8
-
-# CORE:
-  - aiohttp = 3.8.6
-  - decorator = 5.1.1
-  - erddapy = 2.2.0
-  - fsspec = 2023.9.2
-  - netCDF4 = 1.6.4
-  - packaging = 23.2
-  - requests = 2.31.0
-  - scipy = 1.10.1
-  - toolz = 0.12.0
-  - xarray = 2023.1.0
-
-# EXT.UTIL:
-#  - boto3 = 1.28.17
-#  - gsw = 3.6.17
-#  - s3fs = 2023.9.2
-#  - tqdm = 4.66.1
-#  - zarr = 2.13.3
-
-# EXT.PERF:
-#  - dask = 2023.5.0
-#  - distributed = 2023.5.0
-#  - pyarrow = 13.0.0
-
-# EXT.PLOT:
-#  - IPython = 8.12.2
-#  - cartopy = 0.21.1
-#  - ipykernel = 6.25.2
-#  - ipywidgets = 7.8.0
-#  - matplotlib = 3.7.3
-#  - pyproj = 3.5.0
-#  - seaborn = 0.13.0
-
-# DEV:
-  - aiofiles = 23.1.0
-  - black = 23.9.1
-  - bottleneck = 1.3.7
-  - cfgrib = 0.9.10.4
-  - cftime = 1.6.2
-#  - conda = -
-  - flake8 = 6.1.0
-#  - nc-time-axis = -
-  - numpy = 1.24.4
-  - pandas = 1.5.3
-  - pip = 23.2.1
-  - pytest = 7.4.2
-  - pytest-cov = 4.1.0
-  - pytest-env = 1.0.1
-  - pytest-localftpserver = 1.1.4
-  - setuptools = 68.2.2
-#  - sphinx = -
-
-  - pip:
-      - pytest-reportlog == 0.4.0
diff --git a/ci/requirements/py3.9-all-free.yml b/ci/requirements/py3.9-all-free.yml
index 35286507..8219167e 100644
--- a/ci/requirements/py3.9-all-free.yml
+++ b/ci/requirements/py3.9-all-free.yml
@@ -2,7 +2,7 @@ name: argopy-tests
 channels:
   - conda-forge
 dependencies:
-  - python=3.9
+  - python = 3.9.19
 
 # CORE:
   - aiohttp
@@ -14,7 +14,7 @@ dependencies:
   - requests
   - scipy
   - toolz
-  - xarray<2024.3
+  - xarray < 2024.3 # while https://github.com/pydata/xarray/issues/8909 is not solved
 
 # EXT.UTIL:
   - boto3
@@ -35,6 +35,7 @@ dependencies:
   - ipykernel
   - ipywidgets
   - matplotlib
+  - pyproj
   - seaborn
 
 # DEV:
@@ -43,9 +44,8 @@ dependencies:
   - bottleneck
   - cfgrib
   - cftime
-#  - conda
+  - codespell
   - flake8
-#  - nc-time-axis
   - numpy
   - pandas
   - pip
@@ -54,6 +54,8 @@ dependencies:
   - pytest-env
   - pytest-localftpserver
   - setuptools
+#  - sphinx
 
+# PIP:
   - pip:
       - pytest-reportlog
diff --git a/ci/requirements/py3.9-all-pinned.yml b/ci/requirements/py3.9-all-pinned.yml
index e94f6fb3..62ca9fa9 100644
--- a/ci/requirements/py3.9-all-pinned.yml
+++ b/ci/requirements/py3.9-all-pinned.yml
@@ -2,58 +2,60 @@ name: argopy-tests
 channels:
   - conda-forge
 dependencies:
-  - python=3.9
+  - python = 3.9.19
 
 # CORE:
-  - aiohttp = 3.8.5
+  - aiohttp = 3.10.4
   - decorator = 5.1.1
   - erddapy = 2.2.0
-  - fsspec = 2023.9.2
-  - netCDF4 = 1.6.4
-  - packaging = 23.1
-  - requests = 2.31.0
-  - scipy = 1.11.3
-  - toolz = 0.12.0
-  - xarray = 2023.9.0
+  - fsspec = 2024.6.1
+  - netCDF4 = 1.7.1
+  - packaging = 24.1
+  - requests = 2.32.3
+  - scipy = 1.13.1
+  - toolz = 0.12.1
+  - xarray = 2024.2.0
 
 # EXT.UTIL:
-  - boto3 = 1.28.17
-  - gsw = 3.6.17
-  - s3fs = 2023.9.2
-  - tqdm = 4.66.1
-  - zarr = 2.16.1
+  - boto3 = 1.35.0
+  - gsw = 3.6.19
+  - s3fs = 0.4.2
+  - tqdm = 4.66.5
+  - zarr = 2.18.2
 
 # EXT.PERF:
-  - dask = 2023.9.2
-  - distributed = 2023.9.2
-  - h5netcdf = 1.2.0
-  - pyarrow = 13.0.0
+  - dask = 2024.8.0
+  - distributed = 2024.8.0
+  - h5netcdf = 1.3.0
+  - pyarrow = 17.0.0
 
 # EXT.PLOT:
-  - IPython = 8.15.0
-  - cartopy = 0.22.0
-  - ipykernel = 6.25.2
-  - ipywidgets = 8.1.1
-  - matplotlib = 3.8.0
-  - seaborn = 0.12.2
+  - IPython = 8.18.1
+  - cartopy = 0.23.0
+  - ipykernel = 6.29.5
+  - ipywidgets = 8.1.3
+  - matplotlib = 3.9.2
+  - pyproj = 3.6.1
+  - seaborn = 0.13.2
 
 # DEV:
-  - aiofiles = 23.1.0
-  - black = 23.9.1
-  - bottleneck = 1.3.7
-  - cfgrib = 0.9.10.4
-  - cftime = 1.6.2
-#  - conda = -
-  - flake8 = 6.1.0
-#  - nc-time-axis = -
-  - numpy = 1.26.0
-  - pandas = 2.1.1
-  - pip = 23.2.1
-  - pytest = 7.4.2
-  - pytest-cov = 4.1.0
-  - pytest-env = 1.0.1
-  - pytest-localftpserver = 1.1.4
-  - setuptools = 68.2.2
+  - aiofiles = 24.1.0
+  - black = 24.8.0
+  - bottleneck = 1.4.0
+  - cfgrib = 0.9.14.0
+  - cftime = 1.6.4
+  - codespell
+  - flake8 = 7.1.1
+  - numpy = 1.26.4
+  - pandas = 2.2.2
+  - pip = 24.2
+  - pytest = 8.3.2
+  - pytest-cov = 5.0.0
+  - pytest-env = 1.1.3
+  - pytest-localftpserver
+  - setuptools = 72.1.0
+#  - sphinx = -
 
+# PIP:
   - pip:
-      - pytest-reportlog==0.1.2
+      - pytest-reportlog == 0.4.0
diff --git a/ci/requirements/py3.9-core-free.yml b/ci/requirements/py3.9-core-free.yml
index 771eec21..c38813de 100644
--- a/ci/requirements/py3.9-core-free.yml
+++ b/ci/requirements/py3.9-core-free.yml
@@ -2,7 +2,7 @@ name: argopy-tests
 channels:
   - conda-forge
 dependencies:
-  - python=3.9
+  - python = 3.9.19
 
 # CORE:
   - aiohttp
@@ -14,7 +14,7 @@ dependencies:
   - requests
   - scipy
   - toolz
-  - xarray<2024.3
+  - xarray < 2024.3 # while https://github.com/pydata/xarray/issues/8909 is not solved
 
 # EXT.UTIL:
 #  - boto3
@@ -35,6 +35,7 @@ dependencies:
 #  - ipykernel
 #  - ipywidgets
 #  - matplotlib
+#  - pyproj
 #  - seaborn
 
 # DEV:
@@ -43,9 +44,8 @@ dependencies:
   - bottleneck
   - cfgrib
   - cftime
-#  - conda
+  - codespell
   - flake8
-#  - nc-time-axis
   - numpy
   - pandas
   - pip
@@ -54,6 +54,8 @@ dependencies:
   - pytest-env
   - pytest-localftpserver
   - setuptools
+#  - sphinx
 
+# PIP:
   - pip:
       - pytest-reportlog
diff --git a/ci/requirements/py3.9-core-pinned.yml b/ci/requirements/py3.9-core-pinned.yml
index cd6bd5f5..47051c8b 100644
--- a/ci/requirements/py3.9-core-pinned.yml
+++ b/ci/requirements/py3.9-core-pinned.yml
@@ -2,58 +2,60 @@ name: argopy-tests
 channels:
   - conda-forge
 dependencies:
-  - python=3.9
+  - python = 3.9.19
 
 # CORE:
-  - aiohttp = 3.8.5
+  - aiohttp = 3.10.4
   - decorator = 5.1.1
   - erddapy = 2.2.0
-  - fsspec = 2023.9.2
-  - netCDF4 = 1.6.4
-  - packaging = 23.1
-  - requests = 2.31.0
-  - scipy = 1.11.3
-  - toolz = 0.12.0
-  - xarray = 2023.9.0
+  - fsspec = 2024.6.1
+  - netCDF4 = 1.7.1
+  - packaging = 24.1
+  - requests = 2.32.3
+  - scipy = 1.13.1
+  - toolz = 0.12.1
+  - xarray = 2024.2.0
 
-# # EXT.UTIL:
-#   - boto3 = 1.28.17
-#   - gsw = 3.6.17
-#   - s3fs = 2023.9.2
-#   - tqdm = 4.66.1
-#   - zarr = 2.16.1
+# EXT.UTIL:
+#  - boto3 = 1.35.0
+#  - gsw = 3.6.19
+#  - s3fs = 0.4.2
+#  - tqdm = 4.66.5
+#  - zarr = 2.18.2
 
-# # EXT.PERF:
-#   - dask = 2023.9.2
-#   - distributed = 2023.9.2
-#   - h5netcdf = 1.2.0
-#   - pyarrow = 13.0.0
+# EXT.PERF:
+#  - dask = 2024.8.0
+#  - distributed = 2024.8.0
+#  - h5netcdf = 1.3.0
+#  - pyarrow = 17.0.0
 
-# # EXT.PLOT:
-#   - IPython = 8.15.0
-#   - cartopy = 0.22.0
-#   - ipykernel = 6.25.2
-#   - ipywidgets = 8.1.1
-#   - matplotlib = 3.8.0
-#   - seaborn = 0.12.2
+# EXT.PLOT:
+#  - IPython = 8.18.1
+#  - cartopy = 0.23.0
+#  - ipykernel = 6.29.5
+#  - ipywidgets = 8.1.3
+#  - matplotlib = 3.9.2
+#  - pyproj = 3.6.1
+#  - seaborn = 0.13.2
 
 # DEV:
-  - aiofiles = 23.1.0
-  - black = 23.9.1
-  - bottleneck = 1.3.7
-  - cfgrib = 0.9.10.4
-  - cftime = 1.6.2
-#  - conda = -
-  - flake8 = 6.1.0
-#  - nc-time-axis = -
-  - numpy = 1.26.0
-  - pandas = 2.1.1
-  - pip = 23.2.1
-  - pytest = 7.4.2
-  - pytest-cov = 4.1.0
-  - pytest-env = 1.0.1
-  - pytest-localftpserver = 1.1.4
-  - setuptools = 68.2.2
+  - aiofiles = 24.1.0
+  - black = 24.8.0
+  - bottleneck = 1.4.0
+  - cfgrib = 0.9.14.0
+  - cftime = 1.6.4
+  - codespell
+  - flake8 = 7.1.1
+  - numpy = 1.26.4
+  - pandas = 2.2.2
+  - pip = 24.2
+  - pytest = 8.3.2
+  - pytest-cov = 5.0.0
+  - pytest-env = 1.1.3
+  - pytest-localftpserver
+  - setuptools = 72.1.0
+#  - sphinx = -
 
+# PIP:
   - pip:
-      - pytest-reportlog==0.1.2
+      - pytest-reportlog == 0.4.0
diff --git a/ci/requirements/py3.9-docs.yml b/ci/requirements/py3.9-docs-dev.yml
similarity index 97%
rename from ci/requirements/py3.9-docs.yml
rename to ci/requirements/py3.9-docs-dev.yml
index 28f58ecb..9f883003 100644
--- a/ci/requirements/py3.9-docs.yml
+++ b/ci/requirements/py3.9-docs-dev.yml
@@ -1,4 +1,4 @@
-name: argopy-docs
+name: argopy-docs-dev
 channels:
   - conda-forge
 dependencies:
@@ -18,6 +18,7 @@ dependencies:
   - cartopy
   - cmocean
   - cftime
+  - cfgrib
   - decorator
   - distributed
   - ipython
@@ -40,7 +41,6 @@ dependencies:
   - sphinx-book-theme
   - pydata-sphinx-theme
   - codespell
-  - cfgrib
   - black
   - flake8
   - pytest-cov
diff --git a/ci/requirements/py3.9-docs-rtd.yml b/ci/requirements/py3.9-docs-rtd.yml
new file mode 100644
index 00000000..6efcae9c
--- /dev/null
+++ b/ci/requirements/py3.9-docs-rtd.yml
@@ -0,0 +1,59 @@
+name: argopy-docs-rtd
+channels:
+  - conda-forge
+dependencies:
+  - python=3.9
+  - xarray=2024.2.0
+  - scipy
+  - netcdf4
+  - dask
+#  - toolz
+  - erddapy
+  - fsspec
+  - gsw
+  - aiohttp
+#  - bottleneck
+  - s3fs
+  - boto3
+#  - cartopy
+#  - cmocean
+  - cftime
+  - cfgrib
+  - decorator
+  - distributed
+  - ipython
+  - matplotlib
+#  - numpy
+  - pandas
+#  - packaging
+  - pip
+#  - pytest
+#  - seaborn
+#  - setuptools
+  - zarr
+  - tqdm
+  - ipykernel
+  - ipywidgets
+  - nbsphinx
+  - jinja2
+  - sphinx-issues
+  - sphinx_rtd_theme
+  - sphinx-book-theme
+  - pydata-sphinx-theme
+#  - codespell
+#  - black
+#  - flake8
+#  - pytest-cov
+#  - pytest-env
+  - pip:
+    - Sphinx
+    - numpydoc
+    - readthedocs-sphinx-ext
+    - sphinx-autosummary-accessors
+    - pydocstyle
+    - sphinx-tabs
+    - sphinxext-rediraffe
+#    - sphinxext-opengraph
+    - git+https://github.com/sphinx-contrib/googleanalytics
+    - sphinx-copybutton
+    - sphinx-design
diff --git a/cli/show_versions b/cli/show_versions
index 52e5abbe..f25b2396 100755
--- a/cli/show_versions
+++ b/cli/show_versions
@@ -9,6 +9,16 @@ import locale
 import argparse
 import setuptools
 from importlib.metadata import version
+import shutil
+import json
+
+PIP_INSTALLED = {}
+try:
+    reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'list', '--format', 'json'])
+    reqs = json.loads(reqs.decode())
+    [PIP_INSTALLED.update({mod['name']: mod['version']}) for mod in reqs]
+except:
+    pass
 
 
 def get_sys_info():
@@ -79,6 +89,25 @@ def netcdf_and_hdf5_versions():
     return [("libhdf5", libhdf5_version), ("libnetcdf", libnetcdf_version)]
 
 
+def cli_version(cli_name):
+    try:
+        a = subprocess.run([cli_name, '--version'], capture_output=True)
+        return a.stdout.decode().strip("\n").replace(cli_name, '').strip()
+    except:
+        if shutil.which(cli_name):
+            return "- # installed"
+        else:
+            return "-"
+
+
+def pip_version(pip_name):
+    version = '-'
+    for name in [pip_name, pip_name.replace("_", "-"), pip_name.replace("-", "_")]:
+        if name in PIP_INSTALLED:
+            version = PIP_INSTALLED[name]
+    return version
+
+
 def get_version(module_name):
     ver = '-'
     try:
@@ -87,7 +116,15 @@ def get_version(module_name):
         try:
             ver = version(module_name)
         except importlib.metadata.PackageNotFoundError:
-            pass
+            try:
+                ver = pip_version(module_name)
+            except:
+                try:
+                    ver = cli_version(module_name)
+                except:
+                    pass
+    if sum([int(v == '0') for v in ver.split(".")]) == len(ver.split(".")):
+        ver = '-'
     return ver
 
 
@@ -151,9 +188,8 @@ def show_versions(file=sys.stdout, conda=False, free=False, core=False):  # noqa
             ("bottleneck", get_version),
             ("cftime", get_version),
             ("cfgrib", get_version),
-            ("conda", get_version),
+            ("codespell", cli_version),
             ("flake8", get_version),
-            ("nc_time_axis", get_version),
             ("numpy", get_version),  # will come with xarray and pandas
             ("pandas", get_version),  # will come with xarray
             ("pip", get_version),
@@ -165,7 +201,7 @@ def show_versions(file=sys.stdout, conda=False, free=False, core=False):  # noqa
             ("sphinx", get_version),
         ]),
         'pip': sorted([
-            ("pytest_reportlog", get_version),
+            ("pytest_reportlog", pip_version),
         ])
     }
 
@@ -176,19 +212,11 @@ def show_versions(file=sys.stdout, conda=False, free=False, core=False):  # noqa
         deps_blob = list()
         for (modname, ver_f) in deps:
             try:
-                if modname in sys.modules:
-                    mod = sys.modules[modname]
-                else:
-                    mod = importlib.import_module(modname)
+                ver = ver_f(modname)
+                deps_blob.append((modname, ver))
             except Exception:
-                deps_blob.append((modname, '-'))
-            else:
-                try:
-                    ver = ver_f(modname)
-                    deps_blob.append((modname, ver))
-                except Exception:
-                    # raise ValueError("Can't get version for '%s'" % modname)
-                    deps_blob.append((modname, "installed"))
+                # raise ValueError("Can't get version for '%s'" % modname)
+                deps_blob.append((modname, "installed"))
         DEPS_blob[level] = deps_blob
 
     # Print:
diff --git a/docs/impact.rst b/docs/impact.rst
index 51202c7d..4e1c83db 100644
--- a/docs/impact.rst
+++ b/docs/impact.rst
@@ -4,24 +4,28 @@ Impact of argopy
 Papers & proceedings mentioning argopy
 --------------------------------------
 
-- Bartlett, Jenna, "An investigation of geostationary satellite imagery to compare developing and non-developing African easterly waves" (2022). Theses and Dissertations. 5600. https://scholarsjunction.msstate.edu/td/5600
+- Bartlett, Jenna, "An investigation of geostationary satellite imagery to compare developing and non-developing African easterly waves" (2022). Thesis and Dissertations. 5600. https://scholarsjunction.msstate.edu/td/5600
 
 - Chafik, et.al, "The Faroe-Shetland Channel Jet: Structure, Variability, and Driving Mechanisms", 2023, JGR Oceans, https://doi.org/10.1029/2022JC019083
 
 - Dan E. Kelley, Jaimie Harbin, Clark Richards, "argoFloats: An R Package for Analyzing Argo Data", 2021 Frontiers in Marine Science, https://doi.org/10.3389/fmars.2021.635922
 
-- de Solo, Sofia M., "What makes a hurricane fall apart? A multi-platform assessment of tropical cyclone weakening By" (2021). Theses and Dissertations. 5274. https://scholarsjunction.msstate.edu/td/5274
+- de Solo, Sofia M., "What makes a hurricane fall apart? A multi-platform assessment of tropical cyclone weakening By" (2021). Thesis and Dissertations. 5274. https://scholarsjunction.msstate.edu/td/5274
 
 - Dunnington et al., (2021). argodata: An R interface to oceanographic data from the International Argo Program. Journal of Open Source Software, 6(68), 3659, https://doi.org/10.21105/joss.03659
 
+- Elipot S. , P Miron, M Curcic, K Santana, R Lumpkin (2024). Clouddrift: a Python package to accelerate the use of Lagrangian data for atmospheric, oceanic, and climate sciences. Journal of Open Source Software, 9(99), 6742,  https://joss.theoj.org/papers/10.21105/joss.06742
+
 - Gonzalez A., "The Argo Online School: An e-learning tool to get started with Argo" (2023), The Journal of Open Source Education (Under review)
 
-- Huda, Md Nurul, "Machine Learning for Improvement of Ocean Data Resolution for Weather Forecasting and Climatological Research" (2023). Theses and Dissertations, Virginia Tech, http://hdl.handle.net/10919/116504
+- Huda, Md Nurul, "Machine Learning for Improvement of Ocean Data Resolution for Weather Forecasting and Climatological Research" (2023). Thesis and Dissertations, Virginia Tech, http://hdl.handle.net/10919/116504
 
 - Steinberg, J. M., Piecuch, C. G., Hamlington, B. D., Thompson, P. R., & Coats, S. (2024). Influence of deep-ocean warming on coastal sea-level decadal trends in the Gulf of Mexico. Journal of Geophysical Research: Oceans, 129, e2023JC019681. https://doi.org/10.1029/2023JC019681
 
 - Zhang, Y. (2023). Python Data Analysis Techniques in Administrative Information Integration Management System. In: Atiquzzaman, M., Yen, N.Y., Xu, Z. (eds) Proceedings of the 4th International Conference on Big Data Analytics for Cyber-Physical System in Smart City - Volume 2. BDCPS 2022. Lecture Notes on Data Engineering and Communications Technologies, vol 168. Springer, Singapore. https://doi.org/10.1007/978-981-99-1157-8_35
 
+
+
 Other interesting mentions
 --------------------------
 
diff --git a/docs/install.rst b/docs/install.rst
index daf3b775..5669da97 100644
--- a/docs/install.rst
+++ b/docs/install.rst
@@ -32,18 +32,20 @@ Required dependencies
 
 - aiohttp
 - erddapy
-- fsspec < 2023.12.0 (more at :issue:`317`)
+- fsspec
 - netCDF4
 - scipy
 - toolz
-- xarray
+- xarray < 2024.3.0 (because of `this issue <https://github.com/pydata/xarray/issues/8909>`_)
 - requests
+- decorator
+- packaging
 
 Note that Erddapy_ is required because `erddap <https://coastwatch.pfeg.noaa.gov/erddap/information.html>`_ is the default data fetching backend.
 
 Requirement dependencies details can be found `here <https://github.com/euroargodev/argopy/network/dependencies#requirements.txt>`_.
 
-The **argopy** software is `continuously tested <https://github.com/euroargodev/argopy/actions?query=workflow%3Atests>`_ under latest OS (Linux, Mac OS and Windows) and with python versions 3.8 and 3.9
+The **argopy** software is `continuously tested <https://github.com/euroargodev/argopy/actions?query=workflow%3Atests>`_ under latest OS (Linux, Mac OS and Windows) and with python versions 3.9 and 3.10
 
 Optional dependencies
 ---------------------
diff --git a/docs/requirements.txt b/docs/requirements.txt
index d1a64f19..e8926d1d 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,54 +1,60 @@
+xarray==2024.2.0
+scipy>=1.5
+netcdf4>=1.5.4
+dask>=2021.9.1
+# toolz>=0.8.2
+erddapy>=0.7.2
+fsspec>=0.7.4 #, <2022.3.1
+gsw>=3.4.0
+aiohttp>=3.7.4
+# bottleneck>=1.3.2
+s3fs
+boto3
+cftime # >=1.5.1
+cfgrib # >=0.9.9.1
+decorator
+distributed>=2.30.0
+ipython<=8.26.0
+matplotlib>=3.4.0
+numpy==1.26.4
+pandas<3.0
+# packaging
 pip>=23.1
-# pkgconfig
+# pytest
+# seaborn>=0.11.0
+# setuptools
+zarr>=2.4.0
+tqdm>=4.50.2
 ipykernel>=6.22
-ipython<=8.26.0
 ipywidgets>=8.0
-urllib3<2
-
 nbsphinx>=0.9
-numpydoc>=1.1.0
 jinja2>=3.0.2
 sphinx_issues>=1.2.0
-gitpython>=3.1.2
-sphinx-autosummary-accessors>=0.1.2
-pydata-sphinx-theme>=0.4.3
-readthedocs-sphinx-ext
 sphinx-rtd-theme
 sphinx-book-theme
 # sphinx-bootstrap-theme
+pydata-sphinx-theme>=0.4.3
+Sphinx
+numpydoc>=1.1.0
+readthedocs-sphinx-ext
+sphinx-autosummary-accessors>=0.1.2
+pydocstyle
 sphinx-tabs
+sphinxext-rediraffe
+# sphinxext-opengraph
 git+https://github.com/sphinx-contrib/googleanalytics
 sphinx-copybutton
 sphinx-design
-sphinxext-rediraffe
 
-xarray>=0.16.1,<2024.3
-pandas<3.0
-scipy>=1.5
-scikit-learn>=0.23.2
-dask>=2021.9.1
-distributed>=2.30.0
-# bottleneck>=1.3.2
-# toolz>=0.8.2
 
-erddapy>=0.7.2
-gsw>=3.4.0
+# pkgconfig
+# urllib3<2
 
-s3fs
-boto3
-decorator
+# gitpython>=3.1.2
 
-fsspec>=0.7.4 #, <2022.3.1
-aiohttp>=3.7.4
+# scikit-learn>=0.23.2
 
-matplotlib>=3.4.0
 #cartopy>=0.18.0
-#seaborn>=0.11.0
 #geos>=0.2
 # cmocean
 
-zarr>=2.4.0
-netcdf4>=1.5.4
-cftime>=1.5.1
-cfgrib>=0.9.9.1
-tqdm>=4.50.2
diff --git a/docs/user-guide/working-with-argo-data/owc_workflow_eg.py b/docs/user-guide/working-with-argo-data/owc_workflow_eg.py
index 47f9dfda..e74bb0a0 100644
--- a/docs/user-guide/working-with-argo-data/owc_workflow_eg.py
+++ b/docs/user-guide/working-with-argo-data/owc_workflow_eg.py
@@ -1,4 +1,5 @@
-import os, shutil
+import os
+import shutil
 from pathlib import Path
 
 import pyowc as owc
diff --git a/docs/whats-new.rst b/docs/whats-new.rst
index 9d2665a0..4fb6404d 100644
--- a/docs/whats-new.rst
+++ b/docs/whats-new.rst
@@ -8,12 +8,12 @@ What's New
 |pypi dwn| |conda dwn|
 
 
-Coming up next
---------------
+v0.1.16 (xx Aug. 2024)
+----------------------
 
 **Features and front-end API**
 
-- **Support for AWS S3 index files**. This support is experimental and is primarily made available for benchmarking as part of the `ADMT working group on Argo cloud format activities <https://github.com/OneArgo/ADMT/issues/5>`_. The `ADMT working group discussion items are listed here <https://github.com/OneArgo/ADMT/discussions/categories/wg-on-best-format-to-serve-argo-data-from-the-cloud>`_. Both CORE and BGC index files are supported. The new :class:`ArgoIndex` not only support access to the AWS S3 index files but also implement improved performances for search methods on WMO and cycle numbers, using :class:`boto3.client.select_object_content` SQL queries. Indeed, the ``https`` and ``ftp`` default GDAC server index files are downloaded and loaded in memory before being searched. With ``s3``, index files can directly be queried on the server using SQL syntax; the full index is not necessarily downloaded. (:pr:`326`) by `G. Maze <http://www.github.com/gmaze>`_
+- **Support for AWS S3 index files**. This support is experimental and is primarily made available for benchmarking as part of the `ADMT working group on Argo cloud format activities <https://github.com/OneArgo/ADMT/issues/5>`_. The `ADMT working group discussion items are listed here <https://github.com/OneArgo/ADMT/discussions/categories/wg-on-best-format-to-serve-argo-data-from-the-cloud>`_. Both CORE and BGC index files are supported. The new :class:`ArgoIndex` not only support access to the AWS S3 index files but also implement improved performances for search methods on WMO and cycle numbers, using :class:`boto3.client.select_object_content` SQL queries. Indeed, the ``https`` and ``ftp`` default GDAC server index files are downloaded and loaded in memory before being searched. But with ``s3``, index files can directly be queried on the server using SQL syntax; the full index is not necessarily downloaded. (:pr:`326`) by `G. Maze <http://www.github.com/gmaze>`_
 
 .. code-block:: python
 
@@ -35,10 +35,12 @@ Coming up next
 
 **Internals**
 
-- Update Ifremer erddap server information. The Argo reference for DMQC (returned by the :class:`DataFetcher` fetcher with ``ds='ref'`` argument ) and Argo CTD-reference for DQMC (returned by the :class:`CTDRefDataFetcher` fetcher) now indicate the dataset version used. (:pr:`344`) by `G. Maze <http://www.github.com/gmaze>`_.
+- Drop support for Python 3.8, add support for Python 3.10. (:pr:`379`) by `G. Maze <http://www.github.com/gmaze>`_
 
 - Update :class:`argopy.ArgoNVSReferenceTables` to handle new NVS server output format. (:pr:`378`) by `G. Maze <http://www.github.com/gmaze>`_.
 
+- Update Ifremer erddap server information. The Argo reference for DMQC (returned by the :class:`DataFetcher` fetcher with ``ds='ref'`` argument ) and Argo CTD-reference for DQMC (returned by the :class:`CTDRefDataFetcher` fetcher) now indicate the dataset version used. (:pr:`344`) by `G. Maze <http://www.github.com/gmaze>`_.
+
 - Pin upper bound on xarray < 2024.3 to fix failing upstream tests because of ``AttributeError: 'ScipyArrayWrapper' object has no attribute 'oindex'``, `reported here <https://github.com/pydata/xarray/issues/8909>`_. (:pr:`326`) by `G. Maze <http://www.github.com/gmaze>`_
 
 - Fix :class:`argopy.ArgoDocs` that was not working with new Archimer webpage design, :issue:`351`. (:pr:`352`) by `G. Maze <http://www.github.com/gmaze>`_.
@@ -55,7 +57,6 @@ Coming up next
 
 - Drop support for erddapy < v0.8.0 (:pr:`344`) by `G. Maze <http://www.github.com/gmaze>`_.
 
-
 v0.1.15 (12 Dec. 2023)
 ----------------------
 
@@ -179,7 +180,7 @@ v0.1.14 (29 Sep. 2023)
 
 - New utility class :class:`utils.MonitoredThreadPoolExecutor` to handle parallelization with a multi-threading Pool that provide a notebook or terminal computation progress dashboard. This class is used by the httpstore open_mfdataset method for erddap requests.
 
-- New utilites to handle a collection of datasets: :func:`utils.drop_variables_not_in_all_datasets` will drop variables that are not in all datasets (the lowest common denominator) and :func:`utils.fill_variables_not_in_all_datasets` will add empty variables to dataset so that all the collection have the same data_vars and coords. These functions are used by stores to concat/merge a collection of datasets (chunks).
+- New utilities to handle a collection of datasets: :func:`utils.drop_variables_not_in_all_datasets` will drop variables that are not in all datasets (the lowest common denominator) and :func:`utils.fill_variables_not_in_all_datasets` will add empty variables to dataset so that all the collection have the same data_vars and coords. These functions are used by stores to concat/merge a collection of datasets (chunks).
 
 - :func:`related.load_dict` now relies on :class:`ArgoNVSReferenceTables` instead of static pickle files.
 
@@ -294,7 +295,7 @@ v0.1.14rc2 (27 Jul. 2023)
 
 - New utility class :class:`utils.MonitoredThreadPoolExecutor` to handle parallelization with a multi-threading Pool that provide a notebook or terminal computation progress dashboard. This class is used by the httpstore open_mfdataset method for erddap requests.
 
-- New utilites to handle a collection of datasets: :func:`utils.drop_variables_not_in_all_datasets` will drop variables that are not in all datasets (the lowest common denominator) and :func:`utils.fill_variables_not_in_all_datasets` will add empty variables to dataset so that all the collection have the same data_vars and coords. These functions are used by stores to concat/merge a collection of datasets (chunks).
+- New utilities to handle a collection of datasets: :func:`utils.drop_variables_not_in_all_datasets` will drop variables that are not in all datasets (the lowest common denominator) and :func:`utils.fill_variables_not_in_all_datasets` will add empty variables to dataset so that all the collection have the same data_vars and coords. These functions are used by stores to concat/merge a collection of datasets (chunks).
 
 - :func:`related.load_dict` now relies on :class:`ArgoNVSReferenceTables` instead of static pickle files.
 
diff --git a/docs/why.rst b/docs/why.rst
index 315671bb..a24c7803 100644
--- a/docs/why.rst
+++ b/docs/why.rst
@@ -6,7 +6,7 @@ Why argopy ?
 Surprisingly, the Argo community never provided its user base with a Python software to easily access and manipulate Argo measurements:
 **argopy** aims to fill this gap.
 
-Despite, or because, its tremendous success in data management and in developping good practices and well calibrated procedures [ADMT]_, the Argo dataset is very complex: with thousands of different variables, tens of reference tables and a `user manual <http://dx.doi.org/10.13155/29825>`_ more than 100 pages long:
+Despite, or because, its tremendous success in data management and in developing good practices and well calibrated procedures [ADMT]_, the Argo dataset is very complex: with thousands of different variables, tens of reference tables and a `user manual <http://dx.doi.org/10.13155/29825>`_ more than 100 pages long:
 **argopy** aims to help you navigate this complex realm.
 
 For non-experts of the Argo dataset, it has become rather complicated to get access to Argo measurements.
diff --git a/readthedocs.yml b/readthedocs.yml
index 269de29c..dc94528c 100644
--- a/readthedocs.yml
+++ b/readthedocs.yml
@@ -12,7 +12,7 @@ build:
 #    image: latest
     os: "ubuntu-22.04"
     tools:
-        python: "3.8"
+        python: "3.9"
 
 python:
    install:
diff --git a/setup.py b/setup.py
index 5187feae..4fb7aaa1 100644
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
 
 setuptools.setup(
     name="argopy",
-    version="0.1.15",
+    version="0.1.16",
     author="argopy Developers",
     author_email="gmaze@ifremer.fr",
     description="A python library for Argo data beginners and experts",
@@ -22,7 +22,6 @@
     package_data={"argopy": ["static/assets/*", "static/css/*"]},
     install_requires=requirements,
     classifiers=[
-        "Programming Language :: Python :: 3.8",
         "Programming Language :: Python :: 3.9",
         "Programming Language :: Python :: 3.10",
         "Topic :: Scientific/Engineering",