Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ATLAS lepton rapidity 8 TeV data #2071

Merged
merged 13 commits into from
Jun 11, 2024
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
data_central:
- 353.152
- 345.985
- 336.195
- 322.483
- 303.973
- 273.198
- 173.171
91 changes: 91 additions & 0 deletions nnpdf_data/nnpdf_data/new_commondata/ATLAS_Z0_8TEV_ZMASS/filter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@

import yaml
import numpy as np

from filter_utils import get_kinematics, get_data_values, get_systematics


def filter_ATLAS_Z0_8TEV_data_kinetic():
"""
writes data central values and kinematics
to respective .yaml file
"""
kin = get_kinematics()
central_values = get_data_values()

data_central_yaml = {"data_central": central_values}
kinematics_yaml = {"bins": kin}

# write central values and kinematics to yaml file
with open("data.yaml", "w") as file:
yaml.dump(data_central_yaml, file, sort_keys=False)

with open("kinematics.yaml", "w") as file:
yaml.dump(kinematics_yaml, file, sort_keys=False)


def filter_ATLAS_Z0_8TEV_uncertainties():
"""
writes uncertainties to respective .yaml file
"""
systematics = get_systematics()

# load correlation matrix from .txt file
corr_matrix = np.loadtxt("rawdata/zy.txt")

# generate covariance matrix from correlation matrix
tot_systematics = np.array([syst[0]['value'] for syst in systematics['tot']])

# TODO: this should be done with utils.correlation_to_covariance once that is merged in master
cov_matrix_no_lumi = np.outer(tot_systematics, tot_systematics) * corr_matrix

# add lumi uncertainty
lumi_unc = np.array([syst[0]['value'] for syst in systematics['lumi']])
lumi_cov = lumi_unc[:, None] @ lumi_unc[:, None].T

# add covariances
cov_matrix = cov_matrix_no_lumi + lumi_cov

# compute decomposition of covariance matrix so as to get artificial systematics
# TODO: use utils once merged in master
lamb, mat = np.linalg.eig(cov_matrix)
art_sys = np.multiply(np.sqrt(lamb), mat)

uncertainties = []

for i, unc in enumerate(art_sys.T):

name = f"artificial_uncertainty_{i+1}"
values = [unc[i] for i in range(len(unc))]
uncertainties.append([{"name": name, "values": values}])

# error definition
error_definitions = {}
errors = []

for sys in uncertainties:

error_definitions[sys[0]['name']] = {
"description": f"{sys[0]['name']}",
"treatment": "ADD",
"type": "CORR",
}

for i in range(cov_matrix.shape[0]):
error_value = {}

for sys in uncertainties:
error_value[sys[0]['name']] = float(sys[0]['values'][i])

errors.append(error_value)

uncertainties_yaml = {"definitions": error_definitions, "bins": errors}

# write uncertainties
with open(f"uncertainties.yaml", 'w') as file:
yaml.dump(uncertainties_yaml, file, sort_keys=False)


if __name__ == "__main__":
filter_ATLAS_Z0_8TEV_data_kinetic()
filter_ATLAS_Z0_8TEV_uncertainties()
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
import yaml


def get_kinematics():
"""
returns the relevant kinematics values.
Parameters
----------
table : list
version : int
integer read from metadata.yaml that
indicated the version of the hepdata
tables
Returns
-------
list
list containing the kinematic values for all
hepdata tables
"""
kin = []

hepdata_table = f"rawdata/HEPData-ins2698794-v1-Table_9.yaml"

with open(hepdata_table, 'r') as file:
input = yaml.safe_load(file)

for yll in input["independent_variables"][0]['values']:
kin_value = {
'y': {
'min': yll['low'],
'mid': 0.5 * (yll['low'] + yll['high']),
'max': yll['high'],
},
'm_Z2': {'min': None, 'mid': 8317.44, 'max': None},
'sqrts': {'min': None, 'mid': 8000.0, 'max': None},
}

kin.append(kin_value)

return kin


def get_data_values():
"""
returns the central data.
Parameters
----------
tables : list
list that enumerates the table number
version : int
integer read from metadata.yaml that
indicated the version of the hepdata
tables
Returns
-------
list
list containing the central values for all
hepdata tables
"""

data_central = []

hepdata_table = f"rawdata/HEPData-ins2698794-v1-Table_9.yaml"

with open(hepdata_table, 'r') as file:
input = yaml.safe_load(file)

values = input['dependent_variables'][0]['values']

for value in values:
data_central.append(value['value'])

return data_central


def get_systematics():
""" """
tot_uncertainties = []
lumi_uncertainties = []

hepdata_table = f"rawdata/HEPData-ins2698794-v1-Table_9.yaml"

with open(hepdata_table, 'r') as file:
input = yaml.safe_load(file)

dependent_vars = input['dependent_variables'][0]

# skip 1st entry as these are central data values
for err_values in dependent_vars['values']:

tot_uncertainties.append(
[
{
"name": err_values['errors'][0]['label'],
"value": err_values['errors'][0]['symerror'],
}
]
)
lumi_uncertainties.append(
[
{
"name": err_values['errors'][1]['label'],
"value": err_values['errors'][1]['symerror'],
}
]
)

return {'tot': tot_uncertainties, 'lumi': lumi_uncertainties}
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
bins:
- y:
min: 0.4
mid: 0.6000000000000001
max: 0.8
m_Z2:
min: null
mid: 8317.44
max: null
sqrts:
min: null
mid: 8000.0
max: null
- y:
min: 0.8
mid: 1.0
max: 1.2
m_Z2:
min: null
mid: 8317.44
max: null
sqrts:
min: null
mid: 8000.0
max: null
- y:
min: 1.2
mid: 1.4
max: 1.6
m_Z2:
min: null
mid: 8317.44
max: null
sqrts:
min: null
mid: 8000.0
max: null
- y:
min: 1.6
mid: 1.8
max: 2
m_Z2:
min: null
mid: 8317.44
max: null
sqrts:
min: null
mid: 8000.0
max: null
- y:
min: 2
mid: 2.2
max: 2.4
m_Z2:
min: null
mid: 8317.44
max: null
sqrts:
min: null
mid: 8000.0
max: null
- y:
min: 2.4
mid: 2.5999999999999996
max: 2.8
m_Z2:
min: null
mid: 8317.44
max: null
sqrts:
min: null
mid: 8000.0
max: null
- y:
min: 2.8
mid: 3.2
max: 3.6
m_Z2:
min: null
mid: 8317.44
max: null
sqrts:
min: null
mid: 8000.0
max: null
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# Generalia
setname: "ATLAS_Z0_8TEV_ZMASS"

version: 1

version_comment: "Initial implementation"

# References
arXiv:
url: "https://arxiv.org/pdf/2309.09318"
iNSPIRE:
url: "https://inspirehep.net/literature/2698794"
hepdata:
url: "https://www.hepdata.net/record/149333"
version: 1

nnpdf_metadata:
nnpdf31_process: "DY NC"
experiment: "ATLAS"


implemented_observables:

- observable_name: "LL"
observable:
description: "ATLAS 8 TeV, Z boson rapidity distribution in full phase space of decay leptons"
label: r"$d\\sigma/d|\|y_{ll}||$"
units: "[pb]"

ndata: 7

tables: [9]
process_type: DY_Z_Y

plotting:
dataset_label: 'ATLAS 8 TeV $Z \to l^+ l^-$, absolute rapidity'
kinematics_override: identity
x_scale: linear
plot_x: y

kinematic_coverage: [y, m_Z2, sqrts]

kinematics:
variables:
y: {description: "Z > l+ l- absolute rapidity", label: '$|y_{ll}|$', units: ""}
m_Z2: {description: "Z boson mass squared", label: '$M_Z^2$', units: "GeV"}
sqrts: {description: "center of mass energy", label: '$\sqrt(s)$', units: "GeV"}
file: kinematics.yaml

# Data
data_central: data.yaml
data_uncertainties:
- uncertainties.yaml

# Theory
theory:
FK_tables:
- - ATLAS_Z0_8TEV_ZMASS
operation: 'null'
Loading