Skip to content

Commit

Permalink
add the option for extra_labels
Browse files Browse the repository at this point in the history
  • Loading branch information
scarlehoff committed Nov 13, 2023
1 parent 7f06c4b commit b119a6e
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 7 deletions.
24 changes: 18 additions & 6 deletions validphys2/src/validphys/commondataparser.py
Original file line number Diff line number Diff line change
Expand Up @@ -411,12 +411,19 @@ def plotting_options(self):
self.plotting.x_label = self.kinematics.get_label(self.plotting.plot_x)

# Swap the `figure_by` and `line_by` variables by k1/k2/k3
# unless this is something coming from the "extra labels"
if self.plotting.figure_by is not None:
new_fig_by = []
for var in self.plotting.figure_by:
fig_idx = self.kinematic_coverage.index(var)
used_idx.append(fig_idx)
new_fig_by.append(f"k{fig_idx + 1}")
if var in self.kinematic_coverage:
fig_idx = self.kinematic_coverage.index(var)
used_idx.append(fig_idx)
new_fig_by.append(f"k{fig_idx + 1}")
elif self.plotting.extra_labels is not None and var in self.plotting.extra_labels:
new_fig_by.append(var)
else:
raise ValueError(f"Cannot find {var} in the kinematic coverage or extra labels")

self.plotting.figure_by = new_fig_by

if self.plotting.line_by is not None:
Expand Down Expand Up @@ -524,9 +531,7 @@ def _parse_uncertainties(metadata):
)
# I'm guessing there will be a better way of doing this than calling dataframe twice for the same thing?
final_df = pd.DataFrame(
pd.DataFrame(uncyaml["bins"]).values,
columns=mindex,
index=range(1, metadata.ndata + 1),
pd.DataFrame(uncyaml["bins"]).values, columns=mindex, index=range(1, metadata.ndata + 1)
)
final_df.index.name = _INDEX_NAME
all_df.append(final_df)
Expand Down Expand Up @@ -665,6 +670,12 @@ def parse_commondata_new(metadata):
100 / commondata_table["data"], axis="index"
)

# TODO: this will be removed because the old ones will be loaded with the new names
# but during the implementation this is useful for the cuts (filters.py, __call__)
names_file = metadata.path_kinematics.parent.parent / "dataset_names.yml"
names_dict = yaml.YAML().load(names_file)
legacy_name = names_dict.get(metadata.name)

return CommonData(
setname=metadata.name,
ndata=metadata.ndata,
Expand All @@ -674,6 +685,7 @@ def parse_commondata_new(metadata):
commondata_table=commondata_table,
systype_table=systype_table,
legacy=False,
legacy_name=legacy_name,
kin_variables=metadata.kinematic_coverage,
)

Expand Down
1 change: 1 addition & 0 deletions validphys2/src/validphys/coredata.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,7 @@ class CommonData:
systype_table: pd.DataFrame = dataclasses.field(repr=False)
systematics_table: pd.DataFrame = dataclasses.field(init=None, repr=False)
legacy: bool
legacy_name: Optional[str] = None
kin_variables: Optional[list] = None

def __post_init__(self):
Expand Down
2 changes: 1 addition & 1 deletion validphys2/src/validphys/filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,7 @@ def __call__(self, dataset, idat):
# is different to the case where the rule does apply,
# but the point was cut out by the rule.
if (
dataset.setname != self.dataset
(dataset.setname != self.dataset and dataset.legacy_name != self.dataset)
and process_name != self.process_type
and self.process_type != "DIS_ALL"
):
Expand Down

0 comments on commit b119a6e

Please sign in to comment.