diff --git a/validphys2/src/validphys/config.py b/validphys2/src/validphys/config.py index e54981d5c4..d2f6ac335e 100644 --- a/validphys2/src/validphys/config.py +++ b/validphys2/src/validphys/config.py @@ -53,15 +53,7 @@ class Environment(Environment): """Container for information to be filled at run time""" - def __init__( - self, - *, - this_folder=None, - net=True, - upload=False, - dry=False, - **kwargs, - ): + def __init__(self, *, this_folder=None, net=True, upload=False, dry=False, **kwargs): if this_folder: self.this_folder = pathlib.Path(this_folder) @@ -112,10 +104,7 @@ def parse_func(self, item, **kwargs): origsig = inspect.signature(f) parse_func = functools.wraps(f)(parse_func) - params = [ - *list(currsig.parameters.values())[:2], - *list(origsig.parameters.values())[2:], - ] + params = [*list(currsig.parameters.values())[:2], *list(origsig.parameters.values())[2:]] parse_func.__signature__ = inspect.Signature(parameters=params) @@ -143,9 +132,7 @@ def parse_pdf(self, name: str): pdf = self.loader.check_pdf(name) except PDFNotFound as e: raise ConfigError( - "Bad PDF: {} not installed".format(name), - name, - self.loader.available_pdfs, + "Bad PDF: {} not installed".format(name), name, self.loader.available_pdfs ) from e except LoaderError as e: raise ConfigError(e) from e @@ -166,10 +153,7 @@ def parse_theoryid(self, theoryID: (str, int)): return self.loader.check_theoryID(theoryID) except LoaderError as e: raise ConfigError( - str(e), - theoryID, - self.loader.available_theories, - display_alternatives="all", + str(e), theoryID, self.loader.available_theories, display_alternatives="all" ) def parse_use_cuts(self, use_cuts: (bool, str)): @@ -208,9 +192,7 @@ def produce_replicas(self, nreplica: int): return NSList(range(1, nreplica + 1), nskey="replica") def produce_inclusive_use_scalevar_uncertainties( - self, - use_scalevar_uncertainties: bool = False, - point_prescription: (str, None) = None, + self, use_scalevar_uncertainties: bool = False, point_prescription: (str, None) = None ): """Whether to use a scale variation uncertainty theory covmat. Checks whether a point prescription is included in the runcard and if so @@ -249,11 +231,7 @@ def produce_fitcontextwithcuts(self, fit, fitinputcontext): theoryid = fitinputcontext["theoryid"] data_input = fitinputcontext["data_input"] - return { - "dataset_inputs": data_input, - "theoryid": theoryid, - "use_cuts": CutsPolicy.FROMFIT, - } + return {"dataset_inputs": data_input, "theoryid": theoryid, "use_cuts": CutsPolicy.FROMFIT} def produce_fitenvironment(self, fit, fitinputcontext): """Like fitcontext, but additionally forcing various other @@ -420,12 +398,7 @@ def parse_dataset_input(self, dataset: Mapping): # Abuse ConfigError to get the suggestions. log.warning(ConfigError(f"Key '{k}' in dataset_input not known.", k, known_keys)) return DataSetInput( - name=name, - sys=sysnum, - cfac=cfac, - frac=frac, - weight=weight, - custom_group=custom_group, + name=name, sys=sysnum, cfac=cfac, frac=frac, weight=weight, custom_group=custom_group ) def parse_use_fitcommondata(self, do_use: bool): @@ -440,10 +413,7 @@ def produce_commondata(self, *, dataset_input, use_fitcommondata=False, fit=None sysnum = dataset_input.sys try: return self.loader.check_commondata( - setname=name, - sysnum=sysnum, - use_fitcommondata=use_fitcommondata, - fit=fit, + setname=name, sysnum=sysnum, use_fitcommondata=use_fitcommondata, fit=fit ) except DataNotFoundError as e: raise ConfigError(str(e), name, self.loader.available_datasets) from e @@ -529,13 +499,7 @@ def _produce_similarity_cuts(self, commondata): matched_cuts = self._produce_matched_cuts(commondata) inps = [] for i, ns in enumerate(nss): - with self.set_context( - ns=self._curr_ns.new_child( - { - **ns, - } - ) - ): + with self.set_context(ns=self._curr_ns.new_child({**ns})): # TODO: find a way to not duplicate this and use a dict # instead of a linear search _, dins = self.parse_from_(None, "dataset_inputs", write=False) @@ -665,10 +629,7 @@ def produce_experiment_from_input(self, experiment_input, theoryid, use_cuts, fi input. NOTE: This might be deprecated in the future.""" return { "experiment": self.parse_experiment( - experiment_input.as_dict(), - theoryid=theoryid, - use_cuts=use_cuts, - fit=fit, + experiment_input.as_dict(), theoryid=theoryid, use_cuts=use_cuts, fit=fit ) } @@ -683,9 +644,7 @@ def produce_sep_mult(self, separate_multiplicative=None): @configparser.explicit_node def produce_dataset_inputs_fitting_covmat( - self, - theory_covmat_flag=False, - use_thcovmat_in_fitting=False, + self, theory_covmat_flag=False, use_thcovmat_in_fitting=False ): """ Produces the correct covmat to be used in fitting_data_dict according @@ -701,10 +660,7 @@ def produce_dataset_inputs_fitting_covmat( @configparser.explicit_node def produce_dataset_inputs_sampling_covmat( - self, - sep_mult, - theory_covmat_flag=False, - use_thcovmat_in_sampling=False, + self, sep_mult, theory_covmat_flag=False, use_thcovmat_in_sampling=False ): """ Produces the correct covmat to be used in make_replica according @@ -760,11 +716,7 @@ def produce_loaded_theory_covmat( raise ValueError("More than one theory_covmat file in folder tables") theorypath = output_path / "tables" / generic_path theory_covmat = pd.read_csv( - theorypath, - index_col=[0, 1, 2], - header=[0, 1, 2], - sep="\t|,", - engine="python", + theorypath, index_col=[0, 1, 2], header=[0, 1, 2], sep="\t|,", engine="python" ).fillna(0) # change ordering according to exp_covmat (so according to runcard order) tmp = theory_covmat.droplevel(0, axis=0).droplevel(0, axis=1) @@ -877,12 +829,7 @@ def produce_matched_datasets_from_dataspecs(self, dataspecs): inner_spec_list = inres["dataspecs"] = [] for ispec, spec in enumerate(dataspecs): # Passing spec by referene - d = ChainMap( - { - "dataset_input": all_names[ispec][k], - }, - spec, - ) + d = ChainMap({"dataset_input": all_names[ispec][k]}, spec) inner_spec_list.append(d) res.append(inres) res.sort(key=lambda x: (x["process"], x["dataset_name"])) @@ -906,12 +853,7 @@ def produce_matched_positivity_from_dataspecs(self, dataspecs): l = inres["dataspecs"] = [] for ispec, spec in enumerate(dataspecs): # Passing spec by referene - d = ChainMap( - { - "posdataset": all_names[ispec][k], - }, - spec, - ) + d = ChainMap({"posdataset": all_names[ispec][k]}, spec) l.append(d) res.append(inres) res.sort(key=lambda x: (x["posdataset_name"])) @@ -993,11 +935,7 @@ def parse_use_t0(self, do_use_t0: bool): return do_use_t0 # TODO: Find a good name for this - def produce_t0set( - self, - t0pdfset=None, - use_t0=False, - ): + def produce_t0set(self, t0pdfset=None, use_t0=False): """Return the t0set if use_t0 is True and None otherwise. Raises an error if t0 is requested but no t0set is given. """ @@ -1408,11 +1346,8 @@ def produce_defaults( level and those inside a ``filter_defaults`` mapping. """ from validphys.filters import default_filter_settings_input - if ( - q2min is not None - and "q2min" in filter_defaults - and q2min != filter_defaults["q2min"] - ): + + if q2min is not None and "q2min" in filter_defaults and q2min != filter_defaults["q2min"]: raise ConfigError("q2min defined multiple times with different values") if w2min is not None and "w2min" in filter_defaults and w2min != filter_defaults["w2min"]: raise ConfigError("w2min defined multiple times with different values") @@ -1446,15 +1381,10 @@ def produce_defaults( if maxTau is not None and defaults_loaded: log.warning("Using maxTau from runcard") filter_defaults["maxTau"] = maxTau - + return filter_defaults - def produce_data( - self, - data_input, - *, - group_name="data", - ): + def produce_data(self, data_input, *, group_name="data"): """A set of datasets where correlated systematics are taken into account """ @@ -1466,9 +1396,7 @@ def produce_data( return DataGroupSpec(name=group_name, datasets=datasets, dsinputs=data_input) def _parse_data_input_from_( - self, - parse_from_value: (str, type(None)), - additional_context: (dict, type(None)) = None, + self, parse_from_value: (str, type(None)), additional_context: (dict, type(None)) = None ): """Function which parses the ``data_input`` from a namespace. Usage is similar to :py:meth:`self.parse_from_` except this function bridges @@ -1588,11 +1516,7 @@ def produce_processed_metadata_group(self, processed_data_grouping, metadata_gro return processed_data_grouping return metadata_group - def produce_group_dataset_inputs_by_metadata( - self, - data_input, - processed_metadata_group, - ): + def produce_group_dataset_inputs_by_metadata(self, data_input, processed_metadata_group): """Take the data and the processed_metadata_group key and attempt to group the data, returns a list where each element specifies the data_input for a single group and the group_name diff --git a/validphys2/src/validphys/scalevariations/pointprescriptions.yaml b/validphys2/src/validphys/scalevariations/pointprescriptions.yaml index cfe7a83f8d..df569b5573 100644 --- a/validphys2/src/validphys/scalevariations/pointprescriptions.yaml +++ b/validphys2/src/validphys/scalevariations/pointprescriptions.yaml @@ -13,9 +13,9 @@ 'n3lo ad covmat': ['(0, 0, 0, 0)','(1, 0, 0, 0)','(2, 0, 0, 0)','(3, 0, 0, 0)','(4, 0, 0, 0)','(5, 0, 0, 0)','(6, 0, 0, 0)','(7, 0, 0, 0)','(8, 0, 0, 0)','(9, 0, 0, 0)','(10, 0, 0, 0)','(11, 0, 0, 0)','(12, 0, 0, 0)','(13, 0, 0, 0)','(14, 0, 0, 0)','(15, 0, 0, 0)','(16, 0, 0, 0)','(17, 0, 0, 0)','(18, 0, 0, 0)','(19, 0, 0, 0)','(0, 1, 0, 0)','(0, 2, 0, 0)','(0, 3, 0, 0)','(0, 4, 0, 0)','(0, 5, 0, 0)','(0, 6, 0, 0)','(0, 7, 0, 0)','(0, 8, 0, 0)','(0, 9, 0, 0)','(0, 10, 0, 0)','(0, 11, 0, 0)','(0, 12, 0, 0)','(0, 13, 0, 0)','(0, 14, 0, 0)','(0, 15, 0, 0)','(0, 16, 0, 0)','(0, 17, 0, 0)','(0, 18, 0, 0)','(0, 19, 0, 0)','(0, 20, 0, 0)','(0, 21, 0, 0)','(0, 0, 1, 0)','(0, 0, 2, 0)','(0, 0, 3, 0)','(0, 0, 4, 0)','(0, 0, 5, 0)','(0, 0, 6, 0)','(0, 0, 7, 0)','(0, 0, 8, 0)','(0, 0, 9, 0)','(0, 0, 10, 0)','(0, 0, 11, 0)','(0, 0, 12, 0)','(0, 0, 13, 0)','(0, 0, 14, 0)','(0, 0, 15, 0)','(0, 0, 0, 1)','(0, 0, 0, 2)','(0, 0, 0, 3)','(0, 0, 0, 4)','(0, 0, 0, 5)','(0, 0, 0, 6)'] # N3LO full IHOU: Anomalous dimension theory covmat + DIS massive coefficient functions 'n3lo ihou': ['(0, 0, 0, 0)','(1, 0, 0, 0)','(2, 0, 0, 0)','(3, 0, 0, 0)','(4, 0, 0, 0)','(5, 0, 0, 0)','(6, 0, 0, 0)','(7, 0, 0, 0)','(8, 0, 0, 0)','(9, 0, 0, 0)','(10, 0, 0, 0)','(11, 0, 0, 0)','(12, 0, 0, 0)','(13, 0, 0, 0)','(14, 0, 0, 0)','(15, 0, 0, 0)','(16, 0, 0, 0)','(17, 0, 0, 0)','(18, 0, 0, 0)','(19, 0, 0, 0)','(0, 1, 0, 0)','(0, 2, 0, 0)','(0, 3, 0, 0)','(0, 4, 0, 0)','(0, 5, 0, 0)','(0, 6, 0, 0)','(0, 7, 0, 0)','(0, 8, 0, 0)','(0, 9, 0, 0)','(0, 10, 0, 0)','(0, 11, 0, 0)','(0, 12, 0, 0)','(0, 13, 0, 0)','(0, 14, 0, 0)','(0, 15, 0, 0)','(0, 16, 0, 0)','(0, 17, 0, 0)','(0, 18, 0, 0)','(0, 19, 0, 0)','(0, 20, 0, 0)','(0, 21, 0, 0)','(0, 0, 1, 0)','(0, 0, 2, 0)','(0, 0, 3, 0)','(0, 0, 4, 0)','(0, 0, 5, 0)','(0, 0, 6, 0)','(0, 0, 7, 0)','(0, 0, 8, 0)','(0, 0, 9, 0)','(0, 0, 10, 0)','(0, 0, 11, 0)','(0, 0, 12, 0)','(0, 0, 13, 0)','(0, 0, 14, 0)','(0, 0, 15, 0)','(0, 0, 0, 1)','(0, 0, 0, 2)','(0, 0, 0, 3)','(0, 0, 0, 4)','(0, 0, 0, 5)','(0, 0, 0, 6)','(-1, -1)','(1, 1)'] -# N3LO full IHOU + 7 point scale variations +# N3LO full IHOU + 7 point scale variations 'n3lo full thcovmat': ['(0, 0, 0, 0)','(1, 0, 0, 0)','(2, 0, 0, 0)','(3, 0, 0, 0)','(4, 0, 0, 0)','(5, 0, 0, 0)','(6, 0, 0, 0)','(7, 0, 0, 0)','(8, 0, 0, 0)','(9, 0, 0, 0)','(10, 0, 0, 0)','(11, 0, 0, 0)','(12, 0, 0, 0)','(13, 0, 0, 0)','(14, 0, 0, 0)','(15, 0, 0, 0)','(16, 0, 0, 0)','(17, 0, 0, 0)','(18, 0, 0, 0)','(19, 0, 0, 0)','(0, 1, 0, 0)','(0, 2, 0, 0)','(0, 3, 0, 0)','(0, 4, 0, 0)','(0, 5, 0, 0)','(0, 6, 0, 0)','(0, 7, 0, 0)','(0, 8, 0, 0)','(0, 9, 0, 0)','(0, 10, 0, 0)','(0, 11, 0, 0)','(0, 12, 0, 0)','(0, 13, 0, 0)','(0, 14, 0, 0)','(0, 15, 0, 0)','(0, 16, 0, 0)','(0, 17, 0, 0)','(0, 18, 0, 0)','(0, 19, 0, 0)','(0, 20, 0, 0)','(0, 21, 0, 0)','(0, 0, 1, 0)','(0, 0, 2, 0)','(0, 0, 3, 0)','(0, 0, 4, 0)','(0, 0, 5, 0)','(0, 0, 6, 0)','(0, 0, 7, 0)','(0, 0, 8, 0)','(0, 0, 9, 0)','(0, 0, 10, 0)','(0, 0, 11, 0)','(0, 0, 12, 0)','(0, 0, 13, 0)','(0, 0, 14, 0)','(0, 0, 15, 0)','(0, 0, 0, 1)','(0, 0, 0, 2)','(0, 0, 0, 3)','(0, 0, 0, 4)','(0, 0, 0, 5)','(0, 0, 0, 6)','(2, 1)', '(0.5, 1)', '(1, 2)', '(1, 0.5)', '(2, 2)', '(0.5, 0.5)','(-1, -1)','(1, 1)'] -# N3LO full IHOU + 3 point scale variations for datasets with no N3LO correcttions +# N3LO full IHOU + 3 point scale variations for datasets with no N3LO correcttions 'n3lo 3pt missing': ['(0, 0, 0, 0)','(1, 0, 0, 0)','(2, 0, 0, 0)','(3, 0, 0, 0)','(4, 0, 0, 0)','(5, 0, 0, 0)','(6, 0, 0, 0)','(7, 0, 0, 0)','(8, 0, 0, 0)','(9, 0, 0, 0)','(10, 0, 0, 0)','(11, 0, 0, 0)','(12, 0, 0, 0)','(13, 0, 0, 0)','(14, 0, 0, 0)','(15, 0, 0, 0)','(16, 0, 0, 0)','(17, 0, 0, 0)','(18, 0, 0, 0)','(19, 0, 0, 0)','(0, 1, 0, 0)','(0, 2, 0, 0)','(0, 3, 0, 0)','(0, 4, 0, 0)','(0, 5, 0, 0)','(0, 6, 0, 0)','(0, 7, 0, 0)','(0, 8, 0, 0)','(0, 9, 0, 0)','(0, 10, 0, 0)','(0, 11, 0, 0)','(0, 12, 0, 0)','(0, 13, 0, 0)','(0, 14, 0, 0)','(0, 15, 0, 0)','(0, 16, 0, 0)','(0, 17, 0, 0)','(0, 18, 0, 0)','(0, 19, 0, 0)','(0, 20, 0, 0)','(0, 21, 0, 0)','(0, 0, 1, 0)','(0, 0, 2, 0)','(0, 0, 3, 0)','(0, 0, 4, 0)','(0, 0, 5, 0)','(0, 0, 6, 0)','(0, 0, 7, 0)','(0, 0, 8, 0)','(0, 0, 9, 0)','(0, 0, 10, 0)','(0, 0, 11, 0)','(0, 0, 12, 0)','(0, 0, 13, 0)','(0, 0, 14, 0)','(0, 0, 15, 0)','(0, 0, 0, 1)','(0, 0, 0, 2)','(0, 0, 0, 3)','(0, 0, 0, 4)','(0, 0, 0, 5)','(0, 0, 0, 6)', '(1, 0.5 missing)', '(1, 2 missing)','(-1, -1)','(1, 1)'] # N3LO full IHOU + 3 point scale variations for hadronic dasasets 'n3lo 3pt hadronic': ['(0, 0, 0, 0)','(1, 0, 0, 0)','(2, 0, 0, 0)','(3, 0, 0, 0)','(4, 0, 0, 0)','(5, 0, 0, 0)','(6, 0, 0, 0)','(7, 0, 0, 0)','(8, 0, 0, 0)','(9, 0, 0, 0)','(10, 0, 0, 0)','(11, 0, 0, 0)','(12, 0, 0, 0)','(13, 0, 0, 0)','(14, 0, 0, 0)','(15, 0, 0, 0)','(16, 0, 0, 0)','(17, 0, 0, 0)','(18, 0, 0, 0)','(19, 0, 0, 0)','(0, 1, 0, 0)','(0, 2, 0, 0)','(0, 3, 0, 0)','(0, 4, 0, 0)','(0, 5, 0, 0)','(0, 6, 0, 0)','(0, 7, 0, 0)','(0, 8, 0, 0)','(0, 9, 0, 0)','(0, 10, 0, 0)','(0, 11, 0, 0)','(0, 12, 0, 0)','(0, 13, 0, 0)','(0, 14, 0, 0)','(0, 15, 0, 0)','(0, 16, 0, 0)','(0, 17, 0, 0)','(0, 18, 0, 0)','(0, 19, 0, 0)','(0, 20, 0, 0)','(0, 21, 0, 0)','(0, 0, 1, 0)','(0, 0, 2, 0)','(0, 0, 3, 0)','(0, 0, 4, 0)','(0, 0, 5, 0)','(0, 0, 6, 0)','(0, 0, 7, 0)','(0, 0, 8, 0)','(0, 0, 9, 0)','(0, 0, 10, 0)','(0, 0, 11, 0)','(0, 0, 12, 0)','(0, 0, 13, 0)','(0, 0, 14, 0)','(0, 0, 15, 0)','(0, 0, 0, 1)','(0, 0, 0, 2)','(0, 0, 0, 3)','(0, 0, 0, 4)','(0, 0, 0, 5)','(0, 0, 0, 6)', '(1, 0.5 hadronic)', '(1, 2 hadronic)','(-1, -1)','(1, 1)'] diff --git a/validphys2/src/validphys/theorycovariance/construction.py b/validphys2/src/validphys/theorycovariance/construction.py index db1a370d26..04d9cbfe4a 100644 --- a/validphys2/src/validphys/theorycovariance/construction.py +++ b/validphys2/src/validphys/theorycovariance/construction.py @@ -28,12 +28,7 @@ theoryids_procs_central_values_no_table = collect(procs_central_values_no_table, ("theoryids",)) -collected_theoryids = collect( - "theoryids", - [ - "theoryconfig", - ], -) +collected_theoryids = collect("theoryids", ["theoryconfig"]) def make_scale_var_covmat(predictions): @@ -56,10 +51,7 @@ def make_scale_var_covmat(predictions): @check_correct_theory_combination def theory_covmat_singleprocess_no_table( - theoryids_procs_central_values_no_table, - procs_index, - theoryids, - fivetheories, + theoryids_procs_central_values_no_table, procs_index, theoryids, fivetheories ): """Calculates the theory covariance matrix for scale variations. The matrix is a dataframe indexed by procs_index.""" @@ -322,31 +314,25 @@ def covmat_9pt(name1, name2, deltas1, deltas2): s = 0.25 * sum(np.outer(d, d) for d in deltas1) else: s = (1 / 12) * ( - np.outer( - (deltas1[0] + deltas1[4] + deltas1[6]), - (deltas2[0] + deltas2[4] + deltas2[6]), - ) + np.outer((deltas1[0] + deltas1[4] + deltas1[6]), (deltas2[0] + deltas2[4] + deltas2[6])) + np.outer( - (deltas1[1] + deltas1[5] + deltas1[7]), - (deltas2[1] + deltas2[5] + deltas2[7]), + (deltas1[1] + deltas1[5] + deltas1[7]), (deltas2[1] + deltas2[5] + deltas2[7]) ) ) + (1 / 8) * (np.outer((deltas1[2] + deltas1[3]), (deltas2[2] + deltas2[3]))) return s + def covmat_n3lo_singlet(name1, name2, deltas1, deltas2): """Returns theory covariance sub-matrix for all the singlet splitting function variations. """ - n3lo_vars_dict = { - "gg": 19, - "gq": 21, - "qg": 15, - "qq": 6, - } + n3lo_vars_dict = {"gg": 19, "gq": 21, "qg": 15, "qq": 6} s_singlet_ad = 0 cnt = 0 for n_var in n3lo_vars_dict.values(): - s_singlet_ad += covmat_n3lo_ad(name1, name2, deltas1[cnt:cnt+n_var], deltas2[cnt:cnt+n_var]) + s_singlet_ad += covmat_n3lo_ad( + name1, name2, deltas1[cnt : cnt + n_var], deltas2[cnt : cnt + n_var] + ) cnt += n_var return s_singlet_ad @@ -357,7 +343,7 @@ def covmat_n3lo_ad(name1, name2, deltas1, deltas2): Normalization is given by: (n_pt - 1) - + where: * n_pt = number of point presctiption """ @@ -431,7 +417,7 @@ def covs_pt_prescrip( s_ad = covmat_n3lo_singlet(name1, name2, deltas1[:-2], deltas2[:-2]) s_cf = covmat_3pt(name1, name2, deltas1[-2:], deltas2[-2:]) s = s_ad + s_cf - # n3lo 3 pt MHOU see also + # n3lo 3 pt MHOU see also # see https://github.com/NNPDF/papers/blob/e2ac1832cf4a36dab83a696564eaa75a4e55f5d2/minutes/minutes-2023-08-18.txt#L148-L157 elif l == 66: s_ad = covmat_n3lo_singlet(name1, name2, deltas1[:-4], deltas2[:-4]) @@ -737,10 +723,7 @@ def experimentplustheory_normcovmat_singleprocess( @table def experimentplusblocktheory_normcovmat( - procs_covmat, - theory_block_diag_covmat, - procs_data_values, - experimentplustheory_normcovmat, + procs_covmat, theory_block_diag_covmat, procs_data_values, experimentplustheory_normcovmat ): """Calculates the experiment + theory covariance matrix for scale variations normalised to data, block diagonal by data set.""" @@ -750,10 +733,7 @@ def experimentplusblocktheory_normcovmat( @table def experimentplustheory_normcovmat_custom( - procs_covmat, - theory_covmat_custom, - procs_data_values, - experimentplustheory_normcovmat, + procs_covmat, theory_covmat_custom, procs_data_values, experimentplustheory_normcovmat ): """Calculates the experiment + theory covariance matrix for scale variations normalised to data, correlations by process type.""" @@ -853,11 +833,7 @@ def abs_chi2_data_theory_dataset(each_dataset_results, total_covmat_datasets): chi2s = all_chi2_theory(datresults, covmat) central_result = central_chi2_theory(datresults, covmat) chi2data_array.append( - Chi2Data( - th_result.stats_class(chi2s[:, np.newaxis]), - central_result, - len(data_result), - ) + Chi2Data(th_result.stats_class(chi2s[:, np.newaxis]), central_result, len(data_result)) ) return chi2data_array @@ -870,11 +846,7 @@ def abs_chi2_data_theory_proc(procs_results, total_covmat_procs): chi2s = all_chi2_theory(expresults, covmat) central_result = central_chi2_theory(expresults, covmat) chi2data_array.append( - Chi2Data( - th_result.stats_class(chi2s[:, np.newaxis]), - central_result, - len(data_result), - ) + Chi2Data(th_result.stats_class(chi2s[:, np.newaxis]), central_result, len(data_result)) ) return chi2data_array diff --git a/validphys2/src/validphys/theorycovariance/tests.py b/validphys2/src/validphys/theorycovariance/tests.py index 88f5460927..4a10dbca99 100644 --- a/validphys2/src/validphys/theorycovariance/tests.py +++ b/validphys2/src/validphys/theorycovariance/tests.py @@ -215,8 +215,7 @@ def theory_covmat_custom_dataspecs( thx_corrmat = collect( - "theory_corrmat_custom_dataspecs", - ["combined_shift_and_theory_dataspecs", "theoryconfig"], + "theory_corrmat_custom_dataspecs", ["combined_shift_and_theory_dataspecs", "theoryconfig"] ) shx_corrmat = collect( @@ -225,8 +224,7 @@ def theory_covmat_custom_dataspecs( ) thx_covmat = collect( - "theory_covmat_custom_dataspecs", - ["combined_shift_and_theory_dataspecs", "theoryconfig"], + "theory_covmat_custom_dataspecs", ["combined_shift_and_theory_dataspecs", "theoryconfig"] ) combined_dataspecs_results = collect(