Skip to content

Commit

Permalink
Merge branch 'master' into n3lo_dis_fit
Browse files Browse the repository at this point in the history
  • Loading branch information
giacomomagni committed Jun 19, 2023
2 parents c281c9e + 1e5f78e commit 2fb3725
Show file tree
Hide file tree
Showing 5 changed files with 171 additions and 76 deletions.
2 changes: 1 addition & 1 deletion conda-recipe/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ requirements:
- sphinxcontrib-bibtex
- curio >=1.0
- pineappl >=0.5.8
- eko >=0.13.4,<0.14
- eko >=0.13.5,<0.14
- banana-hep >=0.6.8
- fiatlux

Expand Down
92 changes: 49 additions & 43 deletions n3fit/src/evolven3fit_new/eko_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
}

NFREF_DEFAULT = 5
NF0_DEFAULT = 4


def construct_eko_cards(
Expand Down Expand Up @@ -58,51 +57,77 @@ def construct_eko_cards(
theory = Loader().check_theoryID(theoryID).get_description()
theory.pop("FNS")
theory.update(theory_card_dict)
if "nfref" not in theory:
theory["nfref"] = NFREF_DEFAULT
if "nf0" not in theory:
theory["nf0"] = NF0_DEFAULT


# Prepare the thresholds according to MaxNfPdf
thresholds = {"c": theory["kcThr"], "b": theory["kbThr"], "t": theory["ktThr"]}
if theory["MaxNfPdf"] < 5:
thresholds["b"] = np.inf
if theory["MaxNfPdf"] < 6:
thresholds["t"] = np.inf


if "nfref" not in theory:
theory["nfref"] = NFREF_DEFAULT

# Set nf_0 according to the fitting scale unless set explicitly
mu0 = theory["Q0"]
if "nf0" not in theory:
if mu0 < theory["mc"] * thresholds["c"]:
theory["nf0"] = 3
elif mu0 < theory["mb"] * thresholds["b"]:
theory["nf0"] = 4
elif mu0 < theory["mt"] * thresholds["t"]:
theory["nf0"] = 5
else:
theory["nf0"] = 6

# Setting the thresholds in the theory card to inf if necessary
theory.update({"kbThr":thresholds["b"], "ktThr":thresholds["t"] })
theory.update({"kbThr": thresholds["b"], "ktThr": thresholds["t"]})

# The Legacy function is able to construct a theory card for eko starting from an NNPDF theory
legacy_class = runcards.Legacy(theory, {})
theory_card = legacy_class.new_theory

# if Qedref = Qref alphaem is running, otherwise it's fixed
if theory["QED"] > 0:
if np.isclose(theory["Qref"], theory["Qedref"]):
theory_card.couplings.em_running = True

# construct operator card
# Generate the q2grid, if q_fin and q_points are None, use `nf0` to select a default
q2_grid = utils.generate_q2grid(
theory["Q0"],
mu0,
q_fin,
q_points,
{theory["mb"]: thresholds["b"], theory["mt"]: thresholds["t"]},
{
theory["mc"]: thresholds["c"],
theory["mb"]: thresholds["b"],
theory["mt"]: thresholds["t"],
},
theory["nf0"],
)

# construct operator card
op_card = default_op_card
masses = np.array([theory["mc"], theory["mb"], theory["mt"]]) ** 2
thresholds_ratios = np.array([thresholds["c"], thresholds["b"], thresholds["t"]]) ** 2

atlas = Atlas(
matching_scales=MatchingScales(masses * thresholds_ratios),
origin=(theory["Q0"] ** 2, theory["nf0"]),
)
op_card.update(
{
"mu0": theory["Q0"],
"mugrid": [(float(np.sqrt(q2)), int(nf_default(q2, atlas))) for q2 in q2_grid],
}
origin=(mu0**2, theory["nf0"]),
)

# Create the eko operator q2grid
# This is a grid which contains information on (q, nf)
# in VFNS values at the matching scales need to be doubled so that they are considered in both sides

ep = 1e-4
mugrid = []
for q2 in q2_grid:
q = float(np.sqrt(q2))
if nf_default(q2 + ep, atlas) != nf_default(q2 - ep, atlas):
nf_l = int(nf_default(q2 - ep, atlas))
nf_u = int(nf_default(q2 + ep, atlas))
mugrid.append((q, nf_l))
mugrid.append((q, nf_u))
else:
mugrid.append((q, int(nf_default(q2, atlas))))

op_card.update({"mu0": theory["Q0"], "mugrid": mugrid})

op_card["xgrid"] = x_grid
# Specific defaults for evolven3fit evolution
if theory["ModEv"] == "TRN":
Expand All @@ -121,22 +146,3 @@ def construct_eko_cards(

op_card = runcards.OperatorCard.from_dict(op_card)
return theory_card, op_card


def split_evolgrid(evolgrid):
"""Split the evolgrid in blocks according to the number of flavors and repeating the last entry of one block in the first entry of the next."""
evolgrid_index_list = []
evolgrid.sort()
starting_nf = evolgrid[0][1]
for evo_point in evolgrid:
current_nf = evo_point[1]
if current_nf != starting_nf:
evolgrid_index_list.append(evolgrid.index(evo_point))
starting_nf = current_nf
start_index = 0
evolgrid_list = []
for index in evolgrid_index_list:
evolgrid_list.append(evolgrid[start_index : index + 1])
start_index = index
evolgrid_list.append(evolgrid[start_index:])
return evolgrid_list
23 changes: 16 additions & 7 deletions n3fit/src/evolven3fit_new/evolve.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from collections import defaultdict
import logging
import pathlib
import sys
Expand Down Expand Up @@ -145,7 +146,7 @@ def load_fit(usr_path):
"""
nnfitpath = usr_path / "nnfit"
pdf_dict = {}
for yaml_file in nnfitpath.glob("replica_*/*.exportgrid"):
for yaml_file in nnfitpath.glob(f"replica_*/{usr_path.name}.exportgrid"):
data = yaml.safe_load(yaml_file.read_text(encoding="UTF-8"))
pdf_dict[yaml_file.parent.stem] = data
return pdf_dict
Expand Down Expand Up @@ -178,19 +179,27 @@ def evolve_exportgrid(exportgrid, eko, x_grid, qed):
# generate block to dump
targetgrid = eko.bases.targetgrid.tolist()

def ev_pdf(pid, x, Q2):
return x * evolved_pdf[Q2]["pdfs"][pid][targetgrid.index(x)]
# Finally separate by nf block (and order per nf/q)
by_nf = defaultdict(list)
for q, nf in sorted(eko.evolgrid, key=lambda ep: ep[1]):
by_nf[nf].append(q)
q2block_per_nf = {nf: sorted(qs) for nf, qs in by_nf.items()}

evolgrid_list = eko_utils.split_evolgrid(eko.evolgrid)
blocks = []
for evgrid in evolgrid_list:
for nf, q2grid in q2block_per_nf.items():

def pdf_xq2(pid, x, Q2):
x_idx = targetgrid.index(x)
return x * evolved_pdf[(Q2, nf)]["pdfs"][pid][x_idx]

block = genpdf.generate_block(
ev_pdf,
pdf_xq2,
xgrid=targetgrid,
evolgrid=evgrid,
sorted_q2grid=q2grid,
pids=basis_rotation.flavor_basis_pids,
)
blocks.append(block)

return blocks


Expand Down
74 changes: 69 additions & 5 deletions n3fit/src/evolven3fit_new/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from reportengine.compat import yaml
from validphys.pdfbases import PIDS_DICT

DEFAULT_Q2GRID = (
Q2GRID_Nf04 = (
np.array(
[
1.6500000e00,
Expand All @@ -22,7 +22,6 @@
3.8800751e00,
4.3584516e00,
4.9200000e00,
4.9200000e00,
5.5493622e00,
6.2897452e00,
7.1650687e00,
Expand Down Expand Up @@ -65,6 +64,62 @@
** 2
)

Q2GRID_Nf03 = (
np.array(
[
1.0000000e00,
1.0768843e00,
1.1642787e00,
1.2640247e00,
1.3783565e00,
1.5100000e00,
1.6573843e00,
1.8279487e00,
2.0263188e00,
2.2582323e00,
2.5308507e00,
2.8531703e00,
3.2365690e00,
3.6955380e00,
4.2486693e00,
4.9200000e00,
5.6571821e00,
6.5475141e00,
7.6300446e00,
8.9555329e00,
1.0590474e01,
1.2622686e01,
1.5169120e01,
1.8386905e01,
2.2489085e01,
2.7767274e01,
3.4624624e01,
4.3624282e01,
5.5561424e01,
7.1571582e01,
9.3295496e01,
1.2313315e02,
1.6464038e02,
2.2315640e02,
3.0681103e02,
4.2816505e02,
6.0692308e02,
8.7449251e02,
1.2817733e03,
1.9127020e03,
2.9082314e03,
4.5095982e03,
7.1379509e03,
1.1543948e04,
1.9094934e04,
3.2338760e04,
5.6137084e04,
1.0000000e05,
]
)
** 2
)


class LhapdfLike:
"""
Expand Down Expand Up @@ -123,15 +178,22 @@ def get_theoryID_from_runcard(usr_path):
return my_runcard["theory"]["theoryid"]


def generate_q2grid(Q0, Qfin, Q_points, match_dict):
def generate_q2grid(Q0, Qfin, Q_points, match_dict, nf0=None):
"""Generate the q2grid used in the final evolved pdfs or use the default grid if Qfin or Q_points is
not provided.
match_dict contains the couples (mass : factor) where factor is the number to be multiplied to mass
in order to obtain the relative matching scale.
"""
if Qfin is None and Q_points is None:
return DEFAULT_Q2GRID
if nf0 == 4:
return Q2GRID_Nf04
elif nf0 == 3:
return Q2GRID_Nf03
elif nf0 is None:
raise ValueError("In order to use a default grid, a value of nf0 must be provided")
else:
raise NotImplementedError(f"No default grid in Q available for {nf0=}")
elif Qfin is None or Q_points is None:
raise ValueError("q_fin and q_points must be specified either both or none of them")
else:
Expand All @@ -148,7 +210,9 @@ def generate_q2grid(Q0, Qfin, Q_points, match_dict):
frac_of_point = np.log(match_scale / Q_ini) / np.log(Qfin / Q0)
num_points = int(Q_points * frac_of_point)
num_points_list.append(num_points)
grids.append(np.geomspace(Q_ini**2, match_scale**2, num=num_points))
grids.append(
np.geomspace(Q_ini**2, match_scale**2, num=num_points, endpoint=False)
)
Q_ini = match_scale
num_points = Q_points - sum(num_points_list)
grids.append(np.geomspace(Q_ini**2, Qfin**2, num=num_points))
Expand Down
56 changes: 36 additions & 20 deletions n3fit/src/n3fit/tests/test_evolven3fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,6 @@ def assert_sorted(arr, title):
raise ValueError(f"The values of {title} are not sorted!")


def check_consecutive_members(grid, value):
"""Check if the first occurrence of value in grid is followed by value again"""
return np.allclose(grid[list(grid).index(value) + 1], value)


def check_lhapdf_info(info_path):
"""Check the LHAPDF info file is correct"""
info = yaml.load(info_path.open("r", encoding="utf-8"))
Expand Down Expand Up @@ -77,24 +72,40 @@ def check_lhapdf_dat(dat_path, info):
# Use allclose here to avoid failing because of a different in the 7th place
np.testing.assert_allclose(q[-1], info["QMax"])


def test_generate_q2grid():
"""Tests the creation of the default grids for different values of nf
and whether the matched grid is generating points in the desired locations
"""
# nf 3, q0 = 1.0
grid = utils.generate_q2grid(None, None, None, {}, 3)
assert grid[0] == 1.0**2
# nf 4, q0 = 1.65
grid = utils.generate_q2grid(None, None, None, {}, 4)
assert grid[0] == 1.65**2

for nf in [1, 2, 5, 6]:
with pytest.raises(NotImplementedError):
grid = utils.generate_q2grid(None, None, None, {}, nf)

with pytest.raises(ValueError):
grid = utils.generate_q2grid(None, None, None, {})

def test_utils():
# Testing the default grid
grid = utils.generate_q2grid(1.65, None, None, {})
assert_allclose(1.65**2, grid[0])
assert len(grid) == 50
# We expect the bottom mass to be repeated twice because it is intended once in 4 flavors and once in 5 flavors.
assert check_consecutive_members(grid, 4.92**2)
# Testing if the points of the matching are correctly repeated twice
matched_grid = utils.generate_q2grid(1.65, 1.0e5, 100, {4.92: 2.0, 100: 1.0})
assert len(matched_grid) == 100
t1 = 4.92 * 2.0
t2 = 100.0 * 1.0

assert_allclose((1.65) ** 2, matched_grid[0])
assert_allclose((1.0e5) ** 2, matched_grid[-1])
assert check_consecutive_members(matched_grid, (4.92 * 2.0) ** 2)
assert check_consecutive_members(matched_grid, (100.0 * 1.0) ** 2)
assert t1**2 in matched_grid
assert t2**2 in matched_grid


def test_utils():
# Testing the fake LHAPDF class
q20 = 1.65**2
x_grid = np.geomspace(1.0e-7, 1.0, 30)
fake_grids = [[x * (1.0 - x) for x in x_grid] for pid in PIDS_DICT.keys()]
fake_grids = [[x * (1.0 - x) for x in x_grid] for _ in PIDS_DICT.keys()]
pdf_grid = dict([(pid, v) for pid, v in zip(range(len(PIDS_DICT)), fake_grids)])
my_PDF = utils.LhapdfLike(pdf_grid, q20, x_grid)
assert my_PDF.hasFlavor(6)
Expand Down Expand Up @@ -133,10 +144,15 @@ def test_eko_utils(tmp_path):
t_card_dict["order"][0] == pto + 1
) # This is due to a different convention in eko orders due to QED
assert_allclose(op_card_dict["xgrid"], x_grid)
assert_allclose(op_card_dict["mugrid"][0], (1.65, 4))
# In theory 162 the charm threshold is at 1.51
# and we should find two entries, one for nf=3 and another one for nf=4
assert_allclose(op_card_dict["mugrid"][0], (1.51, 3))
assert_allclose(op_card_dict["mugrid"][1], (1.51, 4))
# Then (with the number of points we chosen it will happen in position 2,3
# we will find the bottom threshold at two different nf
assert_allclose(op_card_dict["mugrid"][2], (4.92, 4))
assert_allclose(op_card_dict["mugrid"][3], (4.92, 5))
assert_allclose(op_card_dict["mugrid"][-1], (q_fin, 5))
# In this case there are not enough points to have twice the bottom matching scale
assert_allclose(op_card_dict["mugrid"][1], (4.92, 5))
# Testing computation of eko
save_path = tmp_path / "ekotest.tar"
runner.solve(t_card, op_card, save_path)
Expand Down

0 comments on commit 2fb3725

Please sign in to comment.