diff --git a/cmip6_downscaling/analysis/analysis.py b/cmip6_downscaling/analysis/analysis.py index 3683b138..fe1b61eb 100644 --- a/cmip6_downscaling/analysis/analysis.py +++ b/cmip6_downscaling/analysis/analysis.py @@ -82,7 +82,7 @@ def load_big_cities( if plot: ax = plt.axes(projection=ccrs.PlateCarree()) ax.stock_img() - for (lat, lon) in big_cities[['lat', 'lng']].values: + for lat, lon in big_cities[['lat', 'lng']].values: plt.plot( lon, lat, diff --git a/cmip6_downscaling/data/cmip.py b/cmip6_downscaling/data/cmip.py index 97c16120..94dd560a 100644 --- a/cmip6_downscaling/data/cmip.py +++ b/cmip6_downscaling/data/cmip.py @@ -57,7 +57,6 @@ def postprocess(ds: xr.Dataset, to_standard_calendar: bool = True) -> xr.Dataset ds = ds.reindex({"lat": ds.lat[::-1]}) if to_standard_calendar: - # checks calendar ds = convert_to_standard_calendar(ds) @@ -110,7 +109,6 @@ def load_cmip( Dataset or zarr group with CMIP data """ with dask.config.set(**{'array.slicing.split_large_chunks': False}): - col = intake.open_esm_datastore(config.get("data_catalog.cmip.json")) col_subset = col.search( diff --git a/cmip6_downscaling/disagg/terraclimate.py b/cmip6_downscaling/disagg/terraclimate.py index e16b9820..448d8c80 100644 --- a/cmip6_downscaling/disagg/terraclimate.py +++ b/cmip6_downscaling/disagg/terraclimate.py @@ -437,7 +437,6 @@ def model( soil_prev = awc for i, row in df.iterrows(): - radiation = row['srad'] * MGM2D_PER_WM2 # run snow routine diff --git a/cmip6_downscaling/disagg/wrapper.py b/cmip6_downscaling/disagg/wrapper.py index 83b263c2..9f479c37 100644 --- a/cmip6_downscaling/disagg/wrapper.py +++ b/cmip6_downscaling/disagg/wrapper.py @@ -91,7 +91,6 @@ def run_terraclimate_model(ds_in: xr.Dataset) -> xr.Dataset: ) with dask.config.set(scheduler='single-threaded'): - for index, mask_val in np.ndenumerate(ds_in['mask'].values): if not mask_val: # skip values outside the mask diff --git a/cmip6_downscaling/methods/common/containers.py b/cmip6_downscaling/methods/common/containers.py index a4877094..566f056f 100644 --- a/cmip6_downscaling/methods/common/containers.py +++ b/cmip6_downscaling/methods/common/containers.py @@ -43,7 +43,6 @@ def __str__(self) -> str: @dataclass class CMIP6Experiment: - model: str scenario: str member: str diff --git a/cmip6_downscaling/methods/common/utils.py b/cmip6_downscaling/methods/common/utils.py index f95fc9a5..26411104 100644 --- a/cmip6_downscaling/methods/common/utils.py +++ b/cmip6_downscaling/methods/common/utils.py @@ -288,7 +288,6 @@ def resample_wrapper(ds, freq='1MS'): def set_zarr_encoding(ds: xr.Dataset): - for da in ds.data_vars.values(): da.encoding = {'compressor': zarr.Blosc(clevel=1)} diff --git a/cmip6_downscaling/methods/gard/tasks.py b/cmip6_downscaling/methods/gard/tasks.py index 041a956d..9a533085 100644 --- a/cmip6_downscaling/methods/gard/tasks.py +++ b/cmip6_downscaling/methods/gard/tasks.py @@ -70,7 +70,6 @@ def coarsen_and_interpolate(fine_path: UPath, coarse_path: UPath) -> UPath: def _fit_and_predict_wrapper(xtrain, ytrain, xpred, scrf, run_parameters, dim='time'): - xpred = xpred.rename({'t2': 'time'}) scrf = scrf.rename({'t2': 'time'}) kws = default_none_kwargs(run_parameters.bias_correction_kwargs, copy=True) diff --git a/cmip6_downscaling/methods/maca/core.py b/cmip6_downscaling/methods/maca/core.py index f8c69f03..db0a5f74 100644 --- a/cmip6_downscaling/methods/maca/core.py +++ b/cmip6_downscaling/methods/maca/core.py @@ -321,7 +321,6 @@ def _make_template(da): # train a linear regression model for each day in coarsen GCM dataset, where the features are each coarsened observation # analogs, and examples are each pixels within the coarsened domain for i in range(len(y)): - # get data from the GCM day being downscaled yi = y.isel(ndays_in_gcm=i) # get data from the coarsened obs analogs diff --git a/cmip6_downscaling/runtimes.py b/cmip6_downscaling/runtimes.py index b776dd69..a301d9b3 100644 --- a/cmip6_downscaling/runtimes.py +++ b/cmip6_downscaling/runtimes.py @@ -87,7 +87,6 @@ def run_config(self) -> RunConfig: @cached_property def executor(self) -> Executor: - executor = DaskExecutor( cluster_kwargs={ 'resources': {'taskslots': 1}, diff --git a/cmip6_downscaling/utils.py b/cmip6_downscaling/utils.py index 6aa4d056..512abbe2 100644 --- a/cmip6_downscaling/utils.py +++ b/cmip6_downscaling/utils.py @@ -13,7 +13,6 @@ def str_to_hash(s: str) -> str: def write(ds: xr.Dataset | datatree.DataTree, target, use_cache: bool = True) -> str: - from .methods.common.utils import zmetadata_exists if use_cache and zmetadata_exists(target): diff --git a/flows/catalogs/era5.py b/flows/catalogs/era5.py index e0d35497..cb537470 100644 --- a/flows/catalogs/era5.py +++ b/flows/catalogs/era5.py @@ -61,7 +61,6 @@ def parse_era5(path): @task(log_stdout=True) def build_catalog(*, name: str, bucket: str) -> None: - import ecgtools print(ecgtools.__version__) diff --git a/flows/catalogs/final_catalog.py b/flows/catalogs/final_catalog.py index 1e53a73d..11de8558 100644 --- a/flows/catalogs/final_catalog.py +++ b/flows/catalogs/final_catalog.py @@ -22,7 +22,6 @@ def parse_store(store: str): - from ecgtools.builder import INVALID_ASSET, TRACEBACK try: @@ -54,13 +53,11 @@ def parse_store(store: str): } except Exception: - return {INVALID_ASSET: path, TRACEBACK: traceback.format_exc()} @task(log_stdout=True) def generate_intake_esm_catalog(*, intake_esm_catalog_bucket: str): - import ecgtools builder = ecgtools.Builder( @@ -102,7 +99,6 @@ def generate_intake_esm_catalog(*, intake_esm_catalog_bucket: str): def generate_minified_web_catalog( *, parent_catalog: str, web_catalog: str, cdn: str = None ) -> None: - with fsspec.open(parent_catalog) as f: data = json.load(f) @@ -146,7 +142,6 @@ def generate_minified_web_catalog( storage=runtime.storage, run_config=runtime.run_config, ) as flow: - parent_catalog = Parameter( 'parent-catalog', default="az://scratch/results/pyramids/combined-cmip6-era5-pyramids-catalog-web.json", diff --git a/flows/catalogs/web_catalog.py b/flows/catalogs/web_catalog.py index 1d38456f..c780b9bc 100644 --- a/flows/catalogs/web_catalog.py +++ b/flows/catalogs/web_catalog.py @@ -125,13 +125,11 @@ def get_license(source_id: str, derived_product: bool = False) -> dict: def parse_cmip6(store: str, root_path: str) -> dict[str, str]: - from ecgtools.builder import INVALID_ASSET, TRACEBACK from cmip6_downscaling.methods.common.utils import zmetadata_exists try: - path = UPath(store) if not zmetadata_exists(path): raise ValueError(f'{path} not a valid zarr store') @@ -260,7 +258,6 @@ def from_az_to_https(uri: str, root: str) -> str: def parse_cmip6_downscaled_pyramid( data, cdn: str, root_path: str, derived_product: bool = True ) -> list[dict]: - """ Parse metadata for given CMIP6 downscaled pyramid. @@ -435,7 +432,6 @@ def parse(path: str): def filter_version_results( *, minimum_version: str, maximum_version: str, exclude_local_version: bool, results: list[str] ) -> list[str]: - """ Filter the results by version. @@ -496,7 +492,6 @@ def get_cmip6_downscaled_pyramids( maximum_version: str = None, exclude_local_version: bool = True, ): - """ Get CMIP6 downscaled pyramids. @@ -554,7 +549,6 @@ def create_catalog( cmip6_downscaled_pyramids: list[dict] = None, era5_pyramids: list[dict] = None, ): - """ Create catalog. @@ -603,7 +597,6 @@ def create_catalog( with Flow( 'web-catalog', executor=runtime.executor, run_config=runtime.run_config, storage=runtime.storage ) as flow: - paths = Parameter('paths', default=['az://flow-outputs/results/cmip6-pyramids-raw']) web_catalog_path = Parameter( 'web-catalog-path', diff --git a/flows/cloud_flow_test.py b/flows/cloud_flow_test.py index 388bb735..39b380a1 100644 --- a/flows/cloud_flow_test.py +++ b/flows/cloud_flow_test.py @@ -36,7 +36,6 @@ def make_grid(shape): run_config=runtime.run_config, executor=runtime.executor, ) as flow: - nums = range(4) my_task.map(nums) diff --git a/flows/cmip6_raw_pyramids.py b/flows/cmip6_raw_pyramids.py index 824bc1cf..ca57b4d6 100644 --- a/flows/cmip6_raw_pyramids.py +++ b/flows/cmip6_raw_pyramids.py @@ -41,7 +41,6 @@ def get_assets( grid_label: list[str] | str = 'gn', member_id: list[str] | str = 'r1i1p1f1', ) -> list[tuple(str, str)]: - import intake cat = intake.open_esm_datastore(cat_url).search( diff --git a/flows/era5-resample-rechunk.py b/flows/era5-resample-rechunk.py index ceca94c5..b429bd07 100644 --- a/flows/era5-resample-rechunk.py +++ b/flows/era5-resample-rechunk.py @@ -26,7 +26,6 @@ def get_datasets_keys(*, catalog_path: str, start: int, stop: int): @task(log_stdout=True) def resample_to_daily(*, catalog_path: str, key: str): - import dask import intake import xarray as xr diff --git a/flows/gcm_obs_weights.py b/flows/gcm_obs_weights.py index a5e8dc5a..576fb1c6 100644 --- a/flows/gcm_obs_weights.py +++ b/flows/gcm_obs_weights.py @@ -58,7 +58,6 @@ def generate_weights(stores: list[dict[str, str]], method: str = 'bilinear') -> print(ds_out) for store in stores: - target_prefix = ( static_dir / store['source_id'] / store['table_id'] / store['grid_label'] / method ) @@ -128,7 +127,6 @@ def catalog(results): run_config=runtime.run_config, executor=runtime.executor, ) as flow: - cat_url = Parameter( 'cat_url', default='https://cmip6downscaling.blob.core.windows.net/cmip6/pangeo-cmip6.json' ) diff --git a/flows/methods/bcsd/flow.py b/flows/methods/bcsd/flow.py index 70e2698a..fe6b9221 100644 --- a/flows/methods/bcsd/flow.py +++ b/flows/methods/bcsd/flow.py @@ -45,7 +45,6 @@ with Flow( name="bcsd", storage=runtime.storage, run_config=runtime.run_config, executor=runtime.executor ) as flow: - run_parameters = make_run_parameters( method=Parameter("method"), obs=Parameter("obs"), diff --git a/flows/methods/deepsd/flow.py b/flows/methods/deepsd/flow.py index a5fce591..1add4fe1 100644 --- a/flows/methods/deepsd/flow.py +++ b/flows/methods/deepsd/flow.py @@ -26,7 +26,6 @@ with Flow( name="deepsd", storage=runtime.storage, run_config=runtime.run_config, executor=runtime.executor ) as flow: - run_parameters = make_run_parameters( method=Parameter("method"), obs=Parameter("obs"), @@ -136,7 +135,6 @@ ) if config.get('run_options.generate_pyramids'): - # since pyramids require full space we now rechunk everything into full # space before passing into pyramid step. we probably want to add a cleanup # to this step in particular since otherwise we will have an exact diff --git a/flows/methods/gard/flow.py b/flows/methods/gard/flow.py index 32db41b3..1063b83f 100644 --- a/flows/methods/gard/flow.py +++ b/flows/methods/gard/flow.py @@ -41,7 +41,6 @@ with Flow( name="gard", storage=runtime.storage, run_config=runtime.run_config, executor=runtime.executor ) as flow: - run_parameters = make_run_parameters( method=Parameter("method"), obs=Parameter("obs"), @@ -126,7 +125,6 @@ # analysis_location = run_analyses(model_output_path, run_parameters) if config.get('run_options.generate_pyramids'): - # since pyramids require full space we now rechunk everything into full # space before passing into pyramid step. we probably want to add a cleanup # to this step in particular since otherwise we will have an exact diff --git a/flows/methods/maca/flow.py b/flows/methods/maca/flow.py index 0548b795..ede762f1 100644 --- a/flows/methods/maca/flow.py +++ b/flows/methods/maca/flow.py @@ -147,7 +147,6 @@ ) if config.get('run_options.combine_regions'): - p['combined_analogs_full_time_path'] = combine_regions( regions=p['region_numbers'], region_paths=p['constructed_analogs_region_paths'], @@ -184,7 +183,6 @@ # analysis_location = run_analyses(p['final_bias_corrected_full_time_path'], run_parameters) if config.get('run_options.generate_pyramids'): - # make temporal summaries p['monthly_summary_full_space_path'] = rechunk( p['monthly_summary_path'], pattern='full_space' diff --git a/notebooks/check_model_completeness.ipynb b/notebooks/check_model_completeness.ipynb index 4c737771..f8b20083 100644 --- a/notebooks/check_model_completeness.ipynb +++ b/notebooks/check_model_completeness.ipynb @@ -70,7 +70,7 @@ "source": [ "models = []\n", "for scenario in [\"historical\", \"ssp245\", \"ssp370\", \"ssp585\"]:\n", - " for (gcm, ensemble_member) in gcms:\n", + " for gcm, ensemble_member in gcms:\n", " models.append(\"{}-{}\".format(gcm, scenario))\n", "path = get_store(\n", " \"carbonplan-downscaling\",\n", @@ -89,7 +89,7 @@ "outputs": [], "source": [ "for scenario in [\"historical\", \"ssp245\", \"ssp370\", \"ssp585\"]:\n", - " for (gcm, ensemble_member) in gcms:\n", + " for gcm, ensemble_member in gcms:\n", " path = get_store(\n", " \"carbonplan-downscaling\",\n", " zarr_template.format(gcm, scenario, ensemble_member),\n", diff --git a/notebooks/deepsd_data_prep.ipynb b/notebooks/deepsd_data_prep.ipynb index 72b9d705..22806203 100644 --- a/notebooks/deepsd_data_prep.ipynb +++ b/notebooks/deepsd_data_prep.ipynb @@ -849,7 +849,6 @@ "# for plotting with lat/lon potentially use the following\n", "# batch.isel(input_batch=1).tf..plot.pcolormesh(strings)\n", "for _ in range(10):\n", - "\n", " i = random.randint(0, len(batch.input_batch))\n", " patch = batch.isel(input_batch=i)\n", " print(patch.lat.min().values, patch.lat.max().values)\n", diff --git a/notebooks/deepsd_model_train.ipynb b/notebooks/deepsd_model_train.ipynb index bd714bc5..a89da4ce 100644 --- a/notebooks/deepsd_model_train.ipynb +++ b/notebooks/deepsd_model_train.ipynb @@ -211,7 +211,6 @@ "\n", "\n", "def read_and_decode(filename_queue, input_size, input_depth, output_depth):\n", - "\n", " reader = tf.compat.v1.TFRecordReader()\n", " _, serialized_example = reader.read(filename_queue)\n", " features = tf.compat.v1.parse_single_example(serialized_example, features=feature)\n", diff --git a/notebooks/prefect_cloud_runner.ipynb b/notebooks/prefect_cloud_runner.ipynb index 69395369..90668c6a 100644 --- a/notebooks/prefect_cloud_runner.ipynb +++ b/notebooks/prefect_cloud_runner.ipynb @@ -73,7 +73,6 @@ "\n", "\n", "def create_run_params_from_json(parameter_fpath: str) -> RunParameters:\n", - "\n", " df = pd.read_json(parameter_fpath)\n", " run_parameters = RunParameters(\n", " method=df.method.iloc[0],\n", @@ -96,7 +95,6 @@ "\n", "\n", "def run_flow(flow_id: str, param_file_path: str) -> list[str]:\n", - "\n", " json_path = pathlib.Path(param_file_path).read_text()\n", " flow_hash = str_to_hash(json_path)\n", " param_dict = json.loads(json_path)\n", diff --git a/notebooks/terraclimate_model.ipynb b/notebooks/terraclimate_model.ipynb index 5405d121..f9b44995 100644 --- a/notebooks/terraclimate_model.ipynb +++ b/notebooks/terraclimate_model.ipynb @@ -117,7 +117,6 @@ "# run our version of the terraclimate hydrology model for all points\n", "v2 = {}\n", "for k, df_point in data.items():\n", - "\n", " df = df_point.copy(deep=True)\n", "\n", " awc = df[\"awc\"][0]\n", @@ -147,7 +146,6 @@ "outputs": [], "source": [ "def plot_points(v1, v2, var=\"pdsi\", tslice=slice(200, 300)):\n", - "\n", " fig, axes = plt.subplots(nrows=2, ncols=len(v2), figsize=(24, 4), squeeze=False)\n", "\n", " for i, k in enumerate(v2):\n", diff --git a/tests/methods/common/test_tasks.py b/tests/methods/common/test_tasks.py index 608cbe30..580d2952 100644 --- a/tests/methods/common/test_tasks.py +++ b/tests/methods/common/test_tasks.py @@ -135,7 +135,6 @@ def test_get_experiment(run_parameters): def test_regrid(tmp_path): - pytest.importorskip('xesmf') ds = xr.tutorial.open_dataset('air_temperature').chunk({'time': 10, 'lat': -1, 'lon': -1}) diff --git a/tests/test_runtimes.py b/tests/test_runtimes.py index 741dbd5c..19e63c69 100644 --- a/tests/test_runtimes.py +++ b/tests/test_runtimes.py @@ -11,7 +11,6 @@ @pytest.mark.parametrize('runtime', [LocalRuntime, CIRuntime, PangeoRuntime]) def test_runtimes(runtime): - _runtime = runtime() assert isinstance(_runtime, BaseRuntime) diff --git a/tests/test_utils.py b/tests/test_utils.py index 2bef0fc8..9cf88463 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -46,7 +46,6 @@ def test_lon_to_180(shift): def test_to_standard_calendar(da_noleap): - da_std = to_standard_calendar(da_noleap) assert da_noleap.sizes['time'] == 365 assert da_std.sizes['time'] == 366