diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000..b1d1ed0 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,36 @@ +name: Tests + +on: + pull_request: + push: + branches: [master,main] + +jobs: + run: + runs-on: ${{ matrix.os }} + strategy: + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10"] + os: [windows-latest, ubuntu-latest, macos-latest] + fail-fast: false + + steps: + - uses: actions/checkout@v3 + + - name: Setup Micromamba + uses: mamba-org/provision-with-micromamba@main + with: + environment-file: false + + - name: Python ${{ matrix.python-version }} + shell: bash -l {0} + run: | + micromamba create --name TEST python=${{ matrix.python-version }} --file requirements.txt --file requirements-dev.txt --channel conda-forge + micromamba activate TEST + python -m pip install -e . --no-deps --force-reinstall + + - name: Tests + shell: bash -l {0} + run: | + micromamba activate TEST + python -m pytest -rxs tests diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index d994cf1..0000000 --- a/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: python - -sudo: false - -env: - - CONDA="python=2.7" - - CONDA="python=3.4" - - CONDA="python=3.5" - -before_install: - - wget http://bit.ly/miniconda -O miniconda.sh - - bash miniconda.sh -b -p $HOME/miniconda - - export PATH="$HOME/miniconda/bin:$PATH" - - conda config --set always_yes yes --set changeps1 no - - conda update conda - - conda config --add channels conda-forge --force - - travis_retry conda create --name TEST $CONDA --file requirements.txt --file requirements-dev.txt - - source activate TEST - - conda info - -install: - # Test source distribution. - - python setup.py sdist && version=$(python setup.py --version) && pushd dist && pip install yaml2ncml-${version}.tar.gz && popd - -script: - # Test must be run inside the `test` directory because the yaml2ncml expect the data path to be relative. - - find . -type f -name "*.py" | xargs flake8 --max-line-length=100 - - cd tests && py.test diff --git a/LICENSE.txt b/LICENSE.txt index b71aa47..806e1cf 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/README.rst b/README.rst index 861b9e1..de40982 100644 --- a/README.rst +++ b/README.rst @@ -36,7 +36,7 @@ roms.yaml title: "USGS-CMG-COAWST Model: CBLAST2007 Ripples with SWAN-40m res" summary: "Simulation of hydrodynamics and bottom stress south of Marthas Vineyard, MA using the COAWST modeling system. These results are from the 40m inner nest of a four-level nested simulation." - + project: - CMG_Portal - Sandy_Portal @@ -85,9 +85,9 @@ roms.yaml pattern: .*test_nc4_[0-9]{4}\.nc$ -Notes on the YAML file: +Notes on the YAML file: -1. The aggregation `dir:` is the directory where the data (e.g. NetCDF files) are located, relative to the directory where the NcML will be. In the above example, the NetCDF files are located in a subdirectory called "Output". If the NetCDF files will be in the same directory as the NcML file, specify `dir: '.'`. +1. The aggregation `dir:` is the directory where the data (e.g. NetCDF files) are located, relative to the directory where the NcML will be. In the above example, the NetCDF files are located in a subdirectory called "Output". If the NetCDF files will be in the same directory as the NcML file, specify `dir: '.'`. 2. Specify that all variables should appear in the aggregation (none excluded) like this: diff --git a/requirements.txt b/requirements.txt index 243e56e..525b7c5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ docopt netCDF4 ruamel.yaml -six diff --git a/setup.py b/setup.py index 5a46928..44b82c4 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,6 @@ import os import sys + from setuptools import setup from setuptools.command.test import test as TestCommand @@ -7,11 +8,12 @@ class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) - self.test_args = ['--verbose'] + self.test_args = ["--verbose"] self.test_suite = True def run_tests(self): import pytest + errno = pytest.main(self.test_args) sys.exit(errno) @@ -19,11 +21,11 @@ def run_tests(self): def extract_version(): version = None fdir = os.path.dirname(__file__) - fnme = os.path.join(fdir, 'yaml2ncml', '__init__.py') + fnme = os.path.join(fdir, "yaml2ncml", "__init__.py") with open(fnme) as fd: for line in fd: - if (line.startswith('__version__')): - _, version = line.split('=') + if line.startswith("__version__"): + _, version = line.split("=") version = version.strip()[1:-1] break return version @@ -33,35 +35,37 @@ def extract_version(): def read(*parts): - return open(os.path.join(rootpath, *parts), 'r').read() + return open(os.path.join(rootpath, *parts)).read() -long_description = '{}\n{}'.format(read('README.rst'), read('CHANGES.txt')) -LICENSE = read('LICENSE.txt') +long_description = "{}\n{}".format(read("README.rst"), read("CHANGES.txt")) +LICENSE = read("LICENSE.txt") -with open('requirements.txt') as f: +with open("requirements.txt") as f: require = f.readlines() install_requires = [r.strip() for r in require] -setup(name='yaml2ncml', - version=extract_version(), - packages=['yaml2ncml'], - license=LICENSE, - description='ncML aggregation from YAML specifications', - long_description=long_description, - author='Rich Signell', - author_email='rsignell@usgs.gov', - install_requires=install_requires, - entry_points=dict(console_scripts=[ - 'yaml2ncml = yaml2ncml.yaml2ncml:main'] - ), - url='https://github.com/rsignell-usgs/yaml2ncml', - keywords=['YAML', 'ncml'], - classifiers=['Development Status :: 4 - Beta', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'License :: OSI Approved :: MIT License'], - tests_require=['pytest'], - cmdclass=dict(test=PyTest), - zip_safe=False) +setup( + name="yaml2ncml", + version=extract_version(), + packages=["yaml2ncml"], + license=LICENSE, + description="ncML aggregation from YAML specifications", + long_description=long_description, + author="Rich Signell", + author_email="rsignell@usgs.gov", + install_requires=install_requires, + entry_points=dict(console_scripts=["yaml2ncml = yaml2ncml.yaml2ncml:main"]), + url="https://github.com/rsignell-usgs/yaml2ncml", + keywords=["YAML", "ncml"], + classifiers=[ + "Development Status :: 4 - Beta", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", + "License :: OSI Approved :: MIT License", + ], + tests_require=["pytest"], + cmdclass=dict(test=PyTest), + zip_safe=False, +) diff --git a/tests/base_roms_test.ncml b/tests/base_roms_test.ncml index 2272142..b0cadbf 100644 --- a/tests/base_roms_test.ncml +++ b/tests/base_roms_test.ncml @@ -547,6 +547,6 @@ - + diff --git a/tests/roms_0.yaml b/tests/roms_0.yaml index fb75531..cc07ae9 100644 --- a/tests/roms_0.yaml +++ b/tests/roms_0.yaml @@ -50,6 +50,6 @@ variables: aggregation: time_var: ocean_time - dir: data/ + dir: tests/data/ sample_file: test_nc4_0001.nc pattern: .*test_nc4_[0-9]{4}\.nc$ diff --git a/tests/test_cli.py b/tests/test_cli.py index 4c7588d..19ef85f 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,7 +1,4 @@ -from __future__ import (absolute_import, division, print_function) - import pytest -from docopt import DocoptExit from docopt import docopt from yaml2ncml import yaml2ncml @@ -10,18 +7,18 @@ def test_noarg_call(): - with pytest.raises(DocoptExit): + with pytest.raises(SystemExit): yaml2ncml.main() def test_mandatory_arg(): - fin = 'roms.yaml' + fin = "roms.yaml" args = docopt(__doc__, [fin]) - assert args['INFILE'] == fin + assert args["INFILE"] == fin def test_optional_arg(): - fin = 'test6.ncml' - fout = '--output=test6.ncml' + fin = "test6.ncml" + fout = "--output=test6.ncml" args = docopt(__doc__, [fin, fout]) - assert args['--output'] == fout.split('=')[1] + assert args["--output"] == fout.split("=")[1] diff --git a/tests/test_yaml2ncml.py b/tests/test_yaml2ncml.py index 1c6b7cf..551ff20 100644 --- a/tests/test_yaml2ncml.py +++ b/tests/test_yaml2ncml.py @@ -1,27 +1,29 @@ -from __future__ import (absolute_import, division, print_function) - import subprocess import tempfile +from pathlib import Path import pytest import ruamel.yaml as yaml from yaml2ncml import build +path = Path(__file__).parent.resolve() + def test_call(): - output = subprocess.check_output(['yaml2ncml', 'roms_0.yaml']) - with open('base_roms_test.ncml') as f: - expected = f.read() - assert output.decode() == expected + fname = str(path.joinpath("roms_0.yaml")) + output = subprocess.check_output(["yaml2ncml", fname]).decode() + output = [line.strip() for line in output.splitlines()] + with path.joinpath("base_roms_test.ncml").open() as f: + expected = [line.strip() for line in f.read().splitlines()] + assert output == expected def test_save_file(): - outfile = tempfile.mktemp(suffix='.ncml') - subprocess.call(['yaml2ncml', - 'roms_0.yaml', - '--output={}'.format(outfile)]) - with open('base_roms_test.ncml') as f: + outfile = tempfile.mktemp(suffix=".ncml") + fname = str(path.joinpath("roms_0.yaml")) + subprocess.call(["yaml2ncml", fname, f"--output={outfile}"]) + with path.joinpath("base_roms_test.ncml").open() as f: expected = f.read() with open(outfile) as f: output = f.read() @@ -29,13 +31,11 @@ def test_save_file(): @pytest.fixture -def load_ymal(fname='roms_1.yaml'): - with open(fname, 'r') as stream: - yml = yaml.load(stream, Loader=yaml.RoundTripLoader) - return yml +def load_ymal(fname=path.joinpath("roms_1.yaml")): + with open(fname) as stream: + yield yaml.load(stream, Loader=yaml.RoundTripLoader) -def test_bad_yaml(): +def test_bad_yaml(load_ymal): with pytest.raises(ValueError): - yml = load_ymal(fname='roms_1.yaml') - build(yml) + build(load_ymal) diff --git a/yaml2ncml/__init__.py b/yaml2ncml/__init__.py index c3b64ab..7ea2982 100644 --- a/yaml2ncml/__init__.py +++ b/yaml2ncml/__init__.py @@ -1,11 +1,5 @@ -from __future__ import (absolute_import, division, print_function) +from yaml2ncml.yaml2ncml import build, main -from yaml2ncml.yaml2ncml import main, build +__version__ = "0.7.3" - -__version__ = '0.7.2' - -__all__ = [ - 'main', - 'build' - ] +__all__ = ["main", "build"] \ No newline at end of file diff --git a/yaml2ncml/yaml2ncml.py b/yaml2ncml/yaml2ncml.py index 9e85ccd..8410d31 100644 --- a/yaml2ncml/yaml2ncml.py +++ b/yaml2ncml/yaml2ncml.py @@ -1,18 +1,11 @@ -from __future__ import (absolute_import, division, print_function) - -import os import sys +from pathlib import Path import netCDF4 import ruamel.yaml as yaml - -from six import raise_from from docopt import docopt -__all__ = [ - 'main', - 'build' - ] +__all__ = ["main", "build"] __doc__ = """ Generate ncml based on a yaml file. @@ -38,9 +31,9 @@ def str_att(name, value): if isinstance(value, list): try: - value = ','.join(value) + value = ",".join(value) except TypeError as e: - raise_from(ValueError('Expected `str` got {!r}'.format(value)), e) + raise ValueError(f"Expected `str` got {value}") from e msg = ' \n' return msg.format(name, value) @@ -48,68 +41,75 @@ def str_att(name, value): def header(): text = '\n\n' - text += str_att('Conventions', 'CF-1.6, SGRID-0.1, ACDD-1.3') - text += str_att('cdm_data_type', 'Grid') + text += str_att("Conventions", "CF-1.6, SGRID-0.1, ACDD-1.3") + text += str_att("cdm_data_type", "Grid") return text def footer(text): - text += '\n' + text += "\n" return text def add_global_atts(text, a): - d = a['dataset'] + d = a["dataset"] for key, value in d.items(): # Handle simple attribute pairs first. - if key in ['id', 'license', 'summary', 'title', 'project', - 'naming_authority', 'references', - 'acknowledgment','keywords_vocabulary']: + if key in [ + "id", + "license", + "summary", + "title", + "project", + "naming_authority", + "references", + "acknowledgment", + "keywords_vocabulary", + ]: text += str_att(key, value) - elif key in ['creator', 'publisher']: + elif key in ["creator", "publisher"]: email = value.get("email", None) if email: - text += str_att('_'.join([key, 'email']), email) + text += str_att("_".join([key, "email"]), email) url = value.get("url", None) if url: - text += str_att('_'.join([key, 'url']), url) + text += str_att("_".join([key, "url"]), url) name = value.get("name", None) if name: - text += str_att('_'.join([key, 'name']), name) + text += str_att("_".join([key, "name"]), name) institution = value.get("institution", None) if institution: - text += str_att(key, institution) - elif key in ['contributor']: + text += str_att("_".join([key, "institution"]), institution) + elif key in ["contributor"]: role = value.get("role", None) - text += str_att('_'.join([key, 'role']), role) + text += str_att("_".join([key, "role"]), role) email = value.get("email", None) if email: - text += str_att('_'.join([key, 'email']), email) + text += str_att("_".join([key, "email"]), email) url = value.get("url", None) if url: - text += str_att('_'.join([key, 'url']), url) + text += str_att("_".join([key, "url"]), url) name = value.get("name", None) if name: - text += str_att('_'.join([key, 'name']), name) + text += str_att("_".join([key, "name"]), name) institution = value.get("institution", None) if institution: - text += str_att('_'.join([key, 'institution']), institution) - elif key in ['date']: + text += str_att("_".join([key, "institution"]), institution) + elif key in ["date"]: created = value.get("created", None) if created: - text += str_att('_'.join([key, 'creation']), created) + text += str_att("_".join([key, "creation"]), created) modified = value.get("modified", None) if modified: - text += str_att('_'.join([key, 'revision']), modified) + text += str_att("_".join([key, "revision"]), modified) issued = value.get("issued", None) if issued: - text += str_att('_'.join([key, 'publication']), issued) + text += str_att("_".join([key, "publication"]), issued) return text def add_bed_coord(text, a): - ncfile = os.path.join(a['aggregation']['dir'], - a['aggregation']['sample_file']) + ncfile = Path(a["aggregation"]["dir"], a["aggregation"]["sample_file"]) nc = netCDF4.Dataset(ncfile) bed_coord_var = """ @@ -118,43 +118,50 @@ def add_bed_coord(text, a): \n """ - if 'Nbed' in nc.dimensions.keys(): + if "Nbed" in nc.dimensions.keys(): text += bed_coord_var return text def add_var_atts(text, a): - ncfile = os.path.join(a['aggregation']['dir'], - a['aggregation']['sample_file']) + ncfile = Path(a["aggregation"]["dir"], a["aggregation"]["sample_file"]) nc = netCDF4.Dataset(ncfile) ncv = nc.variables # Get a list of all variables more than 1D. vars = [var for var, vart in ncv.items() if vart.ndim > 1] + # identify all the rho, u and v vars -# identify all the rho, u and v vars - - rho_vars = [var for var in vars if 'eta_rho' in - ncv[var].dimensions and 'xi_rho' in ncv[var].dimensions] - u_vars = [var for var in vars if 'eta_u' in - ncv[var].dimensions and 'xi_u' in ncv[var].dimensions] - v_vars = [var for var in vars if 'eta_v' in - ncv[var].dimensions and 'xi_v' in ncv[var].dimensions] + rho_vars = [ + var + for var in vars + if "eta_rho" in ncv[var].dimensions and "xi_rho" in ncv[var].dimensions + ] + u_vars = [ + var + for var in vars + if "eta_u" in ncv[var].dimensions and "xi_u" in ncv[var].dimensions + ] + v_vars = [ + var + for var in vars + if "eta_v" in ncv[var].dimensions and "xi_v" in ncv[var].dimensions + ] vars_all = set(vars) - vars_include = set(a['variables']['include']) - vars_exclude = set(a['variables']['exclude']) + vars_include = set(a["variables"]["include"]) + vars_exclude = set(a["variables"]["exclude"]) -# include/exclude only variables that actually occur in variable list + # include/exclude only variables that actually occur in variable list vars_include = vars_all.intersection(vars_include) vars_exclude = vars_all.intersection(vars_exclude) -# If there are variables excluded, exclude them and keep all rest. -# If no variables are excluded, take just the included variables -# If no variables are included or excluded, take all variables (leave -# list of variables unchanged) + # If there are variables excluded, exclude them and keep all rest. + # If no variables are excluded, take just the included variables + # If no variables are included or excluded, take all variables (leave + # list of variables unchanged) if vars_exclude: vars_display = vars_all - vars_all.intersection(vars_exclude) @@ -164,46 +171,46 @@ def add_var_atts(text, a): else: vars_display = vars_all -# remove some variables we never want (if they exist) - Tobc = set(['Tobc_in', 'Tobc_out']) + # remove some variables we never want (if they exist) + Tobc = {"Tobc_in", "Tobc_out"} vars_display = vars_display - vars_display.intersection(Tobc) vars_display = list(vars_display) -# add the variable attributes: S-grid stuff, display=T|F, ... + # add the variable attributes: S-grid stuff, display=T|F, ... for var in vars: - text += '\n'.format(var) + text += f'\n' try: - text += str_att('standard_name', cf[var]) - except: + text += str_att("standard_name", cf[var]) + except Exception: pass - text += str_att('grid', 'grid') + text += str_att("grid", "grid") - if 'Nbed' in ncv[var].dimensions: - text += str_att('coordinates', ncv[var].coordinates+' Nbed') + if "Nbed" in ncv[var].dimensions: + text += str_att("coordinates", ncv[var].coordinates + " Nbed") if var in vars_display: - text += str_att('display', 'True') + text += str_att("display", "True") else: - text += str_att('display', 'False') + text += str_att("display", "False") - text += str_att('coverage_content_type', 'modelResult') + text += str_att("coverage_content_type", "modelResult") if var in rho_vars: - text += str_att('location', 'face') + text += str_att("location", "face") elif var in u_vars: - text += str_att('location', 'edge1') + text += str_att("location", "edge1") elif var in v_vars: - text += str_att('location', 'edge2') - text += '\n\n' + text += str_att("location", "edge2") + text += "\n\n" -# write standard_name for time coordinate variable - var = 'ocean_time' + # write standard_name for time coordinate variable + var = "ocean_time" if var in ncv.keys(): try: - text += '\n\n'.format(var) - text += str_att('standard_name', cf[var]) - text += '\n\n' - except: + text += f'\n\n' + text += str_att("standard_name", cf[var]) + text += "\n\n" + except Exception: pass nc.close() @@ -230,25 +237,28 @@ def write_grid_var(text): def add_aggregation_scan(text, a): - agg = a['aggregation'] - text += '\n'.format( - agg['time_var']) - text += '\n\n'\ - .format(agg['dir'], agg['pattern']) # noqa + agg = a["aggregation"] + text += '\n'.format(agg["time_var"]) + text += '\n\n'.format( + agg["dir"], + agg["pattern"], + ) # noqa return text -# Map ROMS variables to CF standard_names. -cf = dict(ocean_time='time', - zeta='sea_surface_height_above_geopotential_datum', - temp='sea_water_potential_temperature', - salt='sea_water_salinity', - u='sea_water_x_velocity', - v='sea_water_y_velocity', - ubar='barotropic_sea_water_x_velocity', - vbar='barotropic_sea_water_y_velocity', - Hwave='sea_surface_wave_significant_height', - bed_thickness='sediment_bed_thickness') #sediment_bed_thickness not in CF standard_names +# Map ROMS variables to CF standard_names. +cf = { + "ocean_time": "time", + "zeta": "sea_surface_height_above_geopotential_datum", + "temp": "sea_water_potential_temperature", + "salt": "sea_water_salinity", + "u": "sea_water_x_velocity", + "v": "sea_water_y_velocity", + "ubar": "barotropic_sea_water_x_velocity", + "vbar": "barotropic_sea_water_y_velocity", + "Hwave": "sea_surface_wave_significant_height", + "bed_thickness": "sediment_bed_thickness", +} # sediment_bed_thickness not in CF standard_names def build(yml): @@ -263,17 +273,17 @@ def build(yml): def main(): - args = docopt(__doc__, version='0.6.0') - fname = args.get('INFILE') - fout = args.get('--output', None) + args = docopt(__doc__, version="0.6.0") + fname = args.get("INFILE") + fout = args.get("--output", None) - with open(fname, 'r') as stream: + with open(fname) as stream: yml = yaml.load(stream, Loader=yaml.RoundTripLoader) text = build(yml) if fout: - with open(fout, 'w') as f: - f.write("{:s}".format(text)) + with open(fout, "w") as f: + f.write(f"{text:s}") else: sys.stdout.write(text)