Skip to content

Commit 5ba8466

Browse files
authored
Merge branch 'main' into acordonez-patch-1
2 parents 2c92e21 + e133c6c commit 5ba8466

File tree

89 files changed

+1596
-860
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

89 files changed

+1596
-860
lines changed

pcmdi_metrics/__init__.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# put here the import calls to expose whatever we want to the user
2-
import logging
2+
import logging # noqa
33

44
LOG_LEVEL = logging.DEBUG
55
plog = logging.getLogger("pcmdi_metrics")
@@ -15,6 +15,4 @@
1515
plog.addHandler(ch)
1616
plog.setLevel(LOG_LEVEL)
1717
from . import io # noqa
18-
#from . import pcmdi # noqa
19-
#from . import mean_climate # noqa
2018
from .version import __git_sha1__, __git_tag_describe__, __version__ # noqa

pcmdi_metrics/cloud_feedback/cloud_feedback_driver.py

Lines changed: 31 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,10 @@
6161
with open(input_files_json) as f:
6262
ncfiles = json.load(f)
6363
else:
64-
print('Warning: input files were not explicitly given. They will be searched from ', path)
64+
print(
65+
"Warning: input files were not explicitly given. They will be searched from ",
66+
path,
67+
)
6568

6669
for exp in exps:
6770
filenames[exp] = dict()
@@ -169,15 +172,33 @@
169172
output_dict["RESULTS"][model] = OrderedDict()
170173
output_dict["RESULTS"][model][variant] = OrderedDict()
171174
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"] = OrderedDict()
172-
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"]["high_cloud_altitude"] = assessed_cld_fbk[0]
173-
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"]["tropical_marine_low_cloud"] = assessed_cld_fbk[1]
174-
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"]["tropical_anvil_cloud_area"] = assessed_cld_fbk[2]
175-
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"]["land_cloud_amount"] = assessed_cld_fbk[3]
176-
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"]["middle_latitude_marine_low_cloud_amount"] = assessed_cld_fbk[4]
177-
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"]["high_latitude_low_cloud_optical_depth"] = assessed_cld_fbk[5]
178-
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"]["implied_unassessed"] = assessed_cld_fbk[6]
179-
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"]["sum_of_assessed"] = assessed_cld_fbk[7]
180-
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"]["total_cloud_feedback"] = assessed_cld_fbk[8]
175+
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"][
176+
"high_cloud_altitude"
177+
] = assessed_cld_fbk[0]
178+
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"][
179+
"tropical_marine_low_cloud"
180+
] = assessed_cld_fbk[1]
181+
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"][
182+
"tropical_anvil_cloud_area"
183+
] = assessed_cld_fbk[2]
184+
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"][
185+
"land_cloud_amount"
186+
] = assessed_cld_fbk[3]
187+
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"][
188+
"middle_latitude_marine_low_cloud_amount"
189+
] = assessed_cld_fbk[4]
190+
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"][
191+
"high_latitude_low_cloud_optical_depth"
192+
] = assessed_cld_fbk[5]
193+
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"][
194+
"implied_unassessed"
195+
] = assessed_cld_fbk[6]
196+
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"][
197+
"sum_of_assessed"
198+
] = assessed_cld_fbk[7]
199+
output_dict["RESULTS"][model][variant]["assessed_cloud_feedback"][
200+
"total_cloud_feedback"
201+
] = assessed_cld_fbk[8]
181202
output_dict["RESULTS"][model][variant]["clim_cloud_rmse"] = climo_cld_rmse
182203
output_dict["RESULTS"][model][variant]["cloud_feedback_rmse"] = cld_fbk_rmse
183204
output_dict["RESULTS"][model][variant]["equilibrium_climate_sensitivity"] = ecs

pcmdi_metrics/cloud_feedback/lib/argparse_functions.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55

66
def AddParserArgument():
7-
87
P = CDPParser(
98
default_args_file=[],
109
description="Cloud feedback metrics",

pcmdi_metrics/cloud_feedback/lib/cal_CloudRadKernel_xr.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@
77
# to expert-assessed values from Sherwood et al (2020)
88
# =============================================
99

10+
from datetime import date
11+
1012
# IMPORT STUFF:
11-
import cftime
13+
import numpy as np
1214
import xarray as xr
1315
import xcdat as xc
14-
import numpy as np
1516
from global_land_mask import globe
16-
from datetime import date
1717

1818
# =============================================
1919
# define necessary information
@@ -190,7 +190,7 @@ def get_CRK_data(filepath):
190190
###########################################################################
191191
def get_kernel_regrid(ctl):
192192
# Read in data and map kernels to lat/lon
193-
193+
194194
f = xc.open_mfdataset(datadir + "cloud_kernels2.nc", decode_times=False)
195195
f = f.rename({"mo": "time", "tau_midpt": "tau", "p_midpt": "plev"})
196196
f["time"] = ctl["time"].copy()
@@ -370,7 +370,7 @@ def bony_sorting_part2(OKwaps, data, OKland, WTS, binedges):
370370
# Ensure that the area matrix has zeros rather than masked points
371371
CNTS = CNTS.where(~np.isnan(CNTS), 0.0)
372372

373-
if np.allclose(0.5, np.sum(CNTS.values, 1)) == False:
373+
if np.allclose(0.5, np.sum(CNTS.values, 1)) is False:
374374
print(
375375
"sum of fractional counts over all wapbins does not equal 0.5 (tropical fraction)"
376376
)
@@ -385,7 +385,7 @@ def bony_sorting_part2(OKwaps, data, OKland, WTS, binedges):
385385
v2b = (data * WTS).sum(dim=["lat", "lon"]).transpose("time", "tau", "plev")
386386

387387
# if np.allclose(v1.values,v2a.values)==False or np.allclose(v1.values,v2b.values)==False:
388-
if np.allclose(v1.values, v2b.values) == False:
388+
if np.allclose(v1.values, v2b.values) is False:
389389
print("Cannot reconstruct tropical average via summing regimes")
390390

391391
return DATA, CNTS # [time,wapbin]
@@ -602,7 +602,7 @@ def obscuration_terms3(c1, c2):
602602
L_R_bar = L_R_bar.where(L_R_bar >= 0, 0.0)
603603
F_bar = F_bar.where(F_bar >= 0, 0.0)
604604

605-
rep_L_bar = tile_uneven(L_bar, L12)
605+
# rep_L_bar = tile_uneven(L_bar, L12)
606606
rep_L_R_bar = tile_uneven(L_R_bar, L_R12)
607607
rep_F_bar = tile_uneven(F_bar, F12b)
608608

@@ -873,7 +873,7 @@ def klein_metrics(obs_clisccp, gcm_clisccp, LWkern, SWkern, WTS):
873873
# take only clouds with tau>3.6
874874
clisccp_bias_CPM = clisccp_bias[:, 2:, :] # 4 tau bins
875875
obs_clisccp_CPM = obs_clisccp[:, 2:, :] # 4 tau bins
876-
gcm_clisccp_CPM = gcm_clisccp[:, 2:, :] # 4 tau bins
876+
# gcm_clisccp_CPM = gcm_clisccp[:, 2:, :] # 4 tau bins
877877
LWkernel_CPM = LWkern[:, 2:, :] # 4 tau bins
878878
SWkernel_CPM = SWkern[:, 2:, :] # 4 tau bins
879879
NETkernel_CPM = SWkernel_CPM + LWkernel_CPM

pcmdi_metrics/cloud_feedback/lib/cld_fbks_ecs_assessment_v3.py

Lines changed: 25 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
"""
55

66
import json
7-
import string
87
import os
8+
import string
99
from datetime import date
1010

1111
import matplotlib as mpl
@@ -17,7 +17,7 @@
1717
HEIGHT = 0.45
1818

1919
# ######################################################
20-
# DEFINE COLORS FOR ECS COLORBAR
20+
# DEFINE COLORS FOR ECS COLORBAR
2121
# ######################################################
2222
cmap = plt.cm.RdBu_r # define the colormap
2323
# extract all colors from the .jet map
@@ -75,7 +75,6 @@
7575

7676
# ######################################################
7777
def get_expert_assessed_fbks():
78-
7978
# ######################################################
8079
# ############# WCRP ASSESSMENT VALUES #################
8180
# ######################################################
@@ -272,7 +271,6 @@ def get_fbks(cld_fbks, obsc_cld_fbks, cld_errs, ecs_dict):
272271

273272
# ######################################################
274273
def get_gcm_assessed_fbks(fbk_dict, obsc_fbk_dict):
275-
276274
# dictionary is structured: [region][sfc][sec][name]
277275

278276
assessed = []
@@ -364,7 +362,6 @@ def get_gcm_assessed_fbks(fbk_dict, obsc_fbk_dict):
364362

365363
# ######################################################
366364
def get_gcm_cld_errs(err_dict, name):
367-
368365
# dictionary is structured: [region][sfc][sec][name]
369366

370367
DATA = []
@@ -403,7 +400,6 @@ def get_gcm_cld_errs(err_dict, name):
403400

404401
# ######################################################
405402
def get_unassessed_fbks(fbk_dict, obsc_fbk_dict):
406-
407403
# dictionary is structured: [region][sfc][sec][name]
408404

409405
fbk_names = []
@@ -651,7 +647,6 @@ def label_models(ax, models5, models6):
651647

652648
# ######################################################
653649
def plot_expert():
654-
655650
(expert_cld_fbks, err_expert_cld_fbks, fbk_names) = get_expert_assessed_fbks()
656651
LN = len(fbk_names)
657652

@@ -880,10 +875,15 @@ def static_plot(assessed, ecs, models, fbk_names, gen, fig, gs):
880875

881876
# ######################################################
882877
def make_all_figs(
883-
cld_fbks6, obsc_cld_fbks6, cld_errs6, ecs_dict56, newmod,
884-
figdir=None, datadir=None,
885-
debug=False):
886-
878+
cld_fbks6,
879+
obsc_cld_fbks6,
880+
cld_errs6,
881+
ecs_dict56,
882+
newmod,
883+
figdir=None,
884+
datadir=None,
885+
debug=False,
886+
):
887887
# Set a unique marker for your new model
888888
MARK[newmod] = "<"
889889

@@ -998,7 +998,9 @@ def make_all_figs(
998998
# new axis for labeling all models
999999
ax = plt.subplot(gs[:10, 10:12])
10001000
label_models(ax, models5, models6)
1001-
plt.savefig(os.path.join(figdir, "WCRP_assessed_cld_fbks_amip-p4K.png"), bbox_inches="tight")
1001+
plt.savefig(
1002+
os.path.join(figdir, "WCRP_assessed_cld_fbks_amip-p4K.png"), bbox_inches="tight"
1003+
)
10021004
if debug:
10031005
print("make_all_figs: WCRP_assessed_cld_fbks_amip-p4K.png done")
10041006

@@ -1030,7 +1032,10 @@ def make_all_figs(
10301032
# new axis for labeling all models
10311033
ax = plt.subplot(gs[:10, 10:12])
10321034
label_models(ax, models5, models6)
1033-
plt.savefig(os.path.join(figdir, "WCRP_unassessed_cld_fbks_amip-p4K.png"), bbox_inches="tight")
1035+
plt.savefig(
1036+
os.path.join(figdir, "WCRP_unassessed_cld_fbks_amip-p4K.png"),
1037+
bbox_inches="tight",
1038+
)
10341039
if debug:
10351040
print("make_all_figs: WCRP_unassessed_cld_fbks_amip-p4K.png done")
10361041

@@ -1277,8 +1282,9 @@ def make_all_figs(
12771282
)
12781283
cb.ax.tick_params(labelsize=14)
12791284
ax2.set_ylabel(r"$\mathrm{E_{NET}}$", size=14)
1280-
plt.savefig(os.path.join(
1281-
figdir, "WCRP_assessed_RMSE_v_cldfbk2_amip-p4K.png"), bbox_inches="tight"
1285+
plt.savefig(
1286+
os.path.join(figdir, "WCRP_assessed_RMSE_v_cldfbk2_amip-p4K.png"),
1287+
bbox_inches="tight",
12821288
)
12831289

12841290
# #####################################################
@@ -1402,7 +1408,10 @@ def make_all_figs(
14021408
plt.title("b", fontsize=16, loc="left")
14031409
plt.ylim(0.04, 0.15)
14041410
plt.xlim(0.60, 1.65)
1405-
plt.savefig(os.path.join(figdir, "WCRP_totcldfbks2_v_E_NET_amip-p4K.png"), bbox_inches="tight")
1411+
plt.savefig(
1412+
os.path.join(figdir, "WCRP_totcldfbks2_v_E_NET_amip-p4K.png"),
1413+
bbox_inches="tight",
1414+
)
14061415

14071416
# ######################################################
14081417
# PRINT OUT THE TABLE OF EVERYTHING:

0 commit comments

Comments
 (0)