Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
129 changes: 129 additions & 0 deletions xarray/backends/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,25 @@ def load_dataset(filename_or_obj: T_PathFileOrDataStore, **kwargs) -> Dataset:
See Also
--------
open_dataset

Examples
--------
>>> import numpy as np
>>> ds = xr.Dataset({"a": (("x",), np.arange(3))})
>>> ds.to_netcdf("example_load_ds.nc")

>>> ds_loaded = xr.load_dataset("example_load_ds.nc")
>>> ds_loaded
<xarray.Dataset> ...
Dimensions: (x: 3)
Dimensions without coordinates: x
Data variables:
a (x) ...

Clean up the example file:
>>> ds_loaded.close()
>>> import os
>>> os.remove("example_load_ds.nc")
"""
if "cache" in kwargs:
raise TypeError("cache has no effect in this context")
Expand All @@ -184,6 +203,23 @@ def load_dataarray(filename_or_obj: T_PathFileOrDataStore, **kwargs) -> DataArra
See Also
--------
open_dataarray

Examples
--------
>>> import numpy as np
>>> da = xr.DataArray(np.arange(3), dims="x", name="a")
>>> da.to_netcdf("example_load_da.nc")

>>> da_loaded = xr.load_dataarray("example_load_da.nc")
>>> da_loaded
<xarray.DataArray 'a' (x: 3)> ...
...
Dimensions without coordinates: x

Clean up the example file:
>>> da_loaded.close()
>>> import os
>>> os.remove("example_load_da.nc")
"""
if "cache" in kwargs:
raise TypeError("cache has no effect in this context")
Expand Down Expand Up @@ -575,6 +611,35 @@ class (a subclass of ``BackendEntrypoint``) can also be used.
See Also
--------
open_mfdataset
load_dataset
open_dataarray

Examples
--------
Open a dataset from a netCDF file. First, we create a dummy file for this example:

>>> import numpy as np
>>> ds = xr.Dataset({"a": (("x",), np.arange(3))})
>>> ds.to_netcdf("example.nc")

>>> ds_disk = xr.open_dataset("example.nc")
>>> ds_disk
<xarray.Dataset> ...
Dimensions: (x: 3)
Dimensions without coordinates: x
Data variables:
a (x) ...

Open a dataset from a remote OPeNDAP URL:

>>> ds = xr.open_dataset(
... "http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis.dailyavgs/surface/air.sig995.1948.nc"
... ) # doctest: +SKIP

Clean up the example file:
>>> ds_disk.close()
>>> import os
>>> os.remove("example.nc")
"""

if cache is None:
Expand Down Expand Up @@ -808,6 +873,26 @@ class (a subclass of ``BackendEntrypoint``) can also be used.
See also
--------
open_dataset
load_dataarray

Examples
--------
Open a DataArray from a netCDF file. First, we create a dummy file:

>>> import numpy as np
>>> da = xr.DataArray(np.arange(3), dims="x", name="a")
>>> da.to_netcdf("example_da.nc")

>>> da_disk = xr.open_dataarray("example_da.nc")
>>> da_disk
<xarray.DataArray 'a' (x: 3)> ...
...
Dimensions without coordinates: x

Clean up the example file:
>>> da_disk.close()
>>> import os
>>> os.remove("example_da.nc")
"""

dataset = open_dataset(
Expand Down Expand Up @@ -1291,6 +1376,31 @@ def open_groups(
xarray.open_datatree
xarray.open_dataset
xarray.DataTree.from_dict

Examples
--------
Open groups from a netCDF file as a dictionary of Datasets. First, create a file with groups:

>>> import numpy as np
>>> ds1 = xr.Dataset({"a": (("x",), np.arange(3))})
>>> ds2 = xr.Dataset({"b": (("y",), np.arange(2))})
>>> ds1.to_netcdf("example_groups.nc", group="group1", mode="w") # doctest: +SKIP
>>> ds2.to_netcdf("example_groups.nc", group="group2", mode="a") # doctest: +SKIP

>>> groups = xr.open_groups("example_groups.nc") # doctest: +SKIP
>>> sorted(groups.keys()) # doctest: +SKIP
['group1', 'group2']
>>> groups["group1"] # doctest: +SKIP
<xarray.Dataset>
Dimensions: (x: 3)
Dimensions without coordinates: x
Data variables:
a (x) ...

Clean up the example file:
>>> groups.close() # doctest: +SKIP
>>> import os # doctest: +SKIP
>>> os.remove("example_groups.nc") # doctest: +SKIP
"""
if cache is None:
cache = chunks is None
Expand Down Expand Up @@ -1554,6 +1664,25 @@ class (a subclass of ``BackendEntrypoint``) can also be used.

Examples
--------
>>> import numpy as np
>>> ds1 = xr.Dataset({"a": (("x",), np.arange(3))}, coords={"x": [0, 1, 2]})
>>> ds2 = xr.Dataset({"a": (("x",), np.arange(3))}, coords={"x": [3, 4, 5]})
>>> ds1.to_netcdf("example_mf_1.nc") # doctest: +SKIP
>>> ds2.to_netcdf("example_mf_2.nc") # doctest: +SKIP
>>> ds = xr.open_mfdataset(
... ["example_mf_1.nc", "example_mf_2.nc"], combine="by_coords"
... ) # doctest: +SKIP
>>> ds # doctest: +SKIP
<xarray.Dataset> ...
Dimensions: (x: 6)
Coordinates:
* x (x) ...
Data variables:
a (x) ...
>>> import os # doctest: +SKIP
>>> os.remove("example_mf_1.nc") # doctest: +SKIP
>>> os.remove("example_mf_2.nc") # doctest: +SKIP

A user might want to pass additional arguments into ``preprocess`` when
applying some operation to many individual files that are being opened. One route
to do this is through the use of ``functools.partial``.
Expand Down
4 changes: 2 additions & 2 deletions xarray/backends/writers.py
Original file line number Diff line number Diff line change
Expand Up @@ -561,13 +561,13 @@ def save_mfdataset(

>>> ds = xr.Dataset(
... {"a": ("time", np.linspace(0, 1, 48))},
... coords={"time": pd.date_range("2010-01-01", freq="ME", periods=48)},
... coords={"time": pd.date_range("2010-01-01", freq="MS", periods=48)},
... )
>>> ds
<xarray.Dataset> Size: 768B
Dimensions: (time: 48)
Coordinates:
* time (time) datetime64[ns] 384B 2010-01-31 2010-02-28 ... 2013-12-31
* time (time) datetime64[ns] 384B 2010-01-01 2010-02-01 ... 2013-12-01
Data variables:
a (time) float64 384B 0.0 0.02128 0.04255 ... 0.9574 0.9787 1.0
>>> years, datasets = zip(*ds.groupby("time.year"))
Expand Down
14 changes: 14 additions & 0 deletions xarray/coding/frequencies.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,20 @@ def infer_freq(index):
If the index is not datetime-like.
ValueError
If there are fewer than three values or the index is not 1D.

See Also
--------
pandas.infer_freq

Examples
--------
>>> import pandas as pd
>>> times = pd.date_range("2000-01-01", periods=5, freq="D")
>>> xr.infer_freq(times)
'D'
>>> times = pd.date_range("2000-01-01", periods=5, freq="2h")
>>> xr.infer_freq(times)
'2h'
"""
from xarray.core.dataarray import DataArray
from xarray.core.variable import Variable
Expand Down
10 changes: 10 additions & 0 deletions xarray/computation/computation.py
Original file line number Diff line number Diff line change
Expand Up @@ -821,6 +821,16 @@ def polyval(
--------
xarray.DataArray.polyfit
numpy.polynomial.polynomial.polyval

Examples
--------
>>> import numpy as np
>>> x = xr.DataArray(np.arange(5), dims="x")
>>> coeffs = xr.DataArray([1, 2, 3], dims="degree", coords={"degree": [0, 1, 2]})
>>> xr.polyval(x, coeffs)
<xarray.DataArray (x: 5)> ...
...
Dimensions without coordinates: x
"""

if degree_dim not in coeffs._indexes:
Expand Down
20 changes: 20 additions & 0 deletions xarray/conventions.py
Original file line number Diff line number Diff line change
Expand Up @@ -564,6 +564,26 @@ def decode_cf(
Returns
-------
decoded : Dataset

See Also
--------
encode_dataset_coordinates
decode_cf_variable

Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> ds = xr.Dataset({"temp": (("time",), np.arange(5))})
>>> ds["time"] = pd.date_range("2000-01-01", periods=5)
>>> decoded = xr.conventions.decode_cf(ds)
>>> decoded
<xarray.Dataset> ...
Dimensions: (time: 5)
Coordinates:
* time (time) ...
Data variables:
temp (time) ...
"""
from xarray.backends.common import AbstractDataStore
from xarray.core.dataset import Dataset
Expand Down
43 changes: 42 additions & 1 deletion xarray/core/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -787,6 +787,13 @@ def compute(self, **kwargs) -> Self:
Dataset.load_async
DataArray.compute
Variable.compute

Examples
--------
>>> ds = xr.Dataset({"a": (("x",), [1, 2, 3])}).chunk({"x": 1})
>>> computed = ds.compute()
>>> type(computed["a"].data)
<class 'numpy.ndarray'>
"""
new = self.copy(deep=False)
return new.load(**kwargs)
Expand Down Expand Up @@ -831,7 +838,13 @@ def persist(self, **kwargs) -> Self:
See Also
--------
dask.persist
"""

Examples
--------
>>> ds = xr.Dataset({"a": (("x",), [1, 2, 3])}).chunk({"x": 1})
>>> persisted = ds.persist()
>>> type(persisted["a"].data)
<class 'dask.array.core.Array'>"""
new = self.copy(deep=False)
return new._persist_inplace(**kwargs)

Expand Down Expand Up @@ -6185,6 +6198,34 @@ def drop_dims(
obj : Dataset
The dataset without the given dimensions (or any variables
containing those dimensions).

See Also
--------
Dataset.drop_vars
DataArray.drop_vars

Examples
--------
>>> ds = xr.Dataset(
... {"a": (("x", "y"), [[1, 2], [3, 4]]), "b": (("x",), [5, 6])},
... coords={"x": [0, 1], "y": [0, 1]},
... )
>>> ds
<xarray.Dataset> ...
Dimensions: (x: 2, y: 2)
Coordinates:
* x (x) ...
* y (y) ...
Data variables:
a (x, y) ...
b (x) ...
>>> ds.drop_dims("y")
<xarray.Dataset> ...
Dimensions: (x: 2)
Coordinates:
* x (x) ...
Data variables:
b (x) ...
"""
if errors not in ["raise", "ignore"]:
raise ValueError('errors must be either "raise" or "ignore"')
Expand Down
11 changes: 10 additions & 1 deletion xarray/core/options.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,8 +350,17 @@ def get_options():
Get options for xarray.

See Also
----------
--------
set_options

Examples
--------
>>> original_width = xr.get_options()["display_width"]
>>> with xr.set_options(display_width=original_width + 10):
... xr.get_options()["display_width"] == original_width + 10
...
True
>>> xr.get_options()["display_width"] == original_width
True
"""
return FrozenDict(OPTIONS)
15 changes: 15 additions & 0 deletions xarray/structure/chunks.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,21 @@ def unify_chunks(*objects: Dataset | DataArray) -> tuple[Dataset | DataArray, ..
See Also
--------
dask.array.core.unify_chunks

Examples
--------
>>> import numpy as np # doctest: +SKIP
>>> da1 = xr.DataArray(np.arange(6).reshape(2, 3), dims=("x", "y")).chunk(
... {"x": 1}
... ) # doctest: +SKIP
>>> da2 = xr.DataArray(np.arange(6).reshape(2, 3), dims=("x", "y")).chunk(
... {"y": 1}
... ) # doctest: +SKIP
>>> da1, da2 = xr.unify_chunks(da1, da2) # doctest: +SKIP
>>> da1.chunks # doctest: +SKIP
((1, 1), (3,))
>>> da2.chunks # doctest: +SKIP
((1, 1), (3,))
"""
from xarray.core.dataarray import DataArray

Expand Down
Loading