Skip to content

Commit

Permalink
Make nn works
Browse files Browse the repository at this point in the history
  • Loading branch information
Radonirinaunimi committed May 12, 2023
1 parent 66fb744 commit af6f859
Show file tree
Hide file tree
Showing 7 changed files with 42 additions and 28 deletions.
4 changes: 3 additions & 1 deletion n3fit/src/n3fit/backends/keras_backend/operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,6 +368,7 @@ def backend_function(fun_name, *args, **kwargs):
fun = getattr(K, fun_name)
return fun(*args, **kwargs)


# Phyics-Related Operations
def extract_neutron_pdf(raw_pdf, mask, axis=2):
"""Take a raw (bound)-proton PDFs and extract the neutron-bound
Expand Down Expand Up @@ -399,4 +400,5 @@ def add_target_dependence(xgrid, a_value):
# in the input. Now, as opposed to the free-proton fit the input
# (still called `xgrid`) is a two-dimensional array.
# TODO: Find a better (again) to propagate the A-dependence
return np.stack((xgrid, np.repeat(a_value, xgrid.size)), axis=-1)
a_value_expand = np.full(xgrid.shape, a_value)
return np.stack((xgrid, a_value_expand), axis=-1)
9 changes: 5 additions & 4 deletions n3fit/src/n3fit/layers/DIS.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,10 @@ def construct_pdf(pdf, target, m_mask, n_mask):
# Compute bound-neutron PDF out of the bound-proton and construct
# the nuclear-PDFs to be convoluted with the FK tables.
if a_value != z_value:
neutron_pdf = op.extract_neutron_pdf(pdf_masked, n_mask)
neutron_pdf = op.extract_neutron_pdf(pdf, n_mask)
neutron_pdf_masked = op.boolean_mask(neutron_pdf, m_mask, axis=2)
# Compute nulcear/proton PDF out of the bound-neutron/proton PDFs
pdf_masked = z_value * pdf_masked + (a_value - z_value) * neutron_pdf
pdf_masked = z_value * pdf_masked + (a_value - z_value) * neutron_pdf_masked
# TODO: Check the Normalization if Applicable to ALL observables
pdf_masked /= a_value

Expand Down Expand Up @@ -84,8 +85,8 @@ def call(self, pdf):
if self.many_masks:
# In the case of nuclear fit, a DIS dataset might contain different x
if self.splitting:
splitted_pdf = op.split(pdf_raw, self.splitting, axis=1)
for mask, target, pdf, fk in zip(self.all_masks, self.target_info, splitted_pdf, self.fktables):
splitted_pdf = op.split(pdf, self.splitting, axis=1)
for mask, target, pdf, fktable in zip(self.all_masks, self.target_info, splitted_pdf, self.fktables):
pdf_masked = construct_pdf(pdf, target, mask, self.neutron_mask)
res = op.tensor_product(pdf_masked, fktable, axes=[(1, 2), (2, 1)])
results.append(res)
Expand Down
5 changes: 3 additions & 2 deletions n3fit/src/n3fit/layers/observable.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ def is_unique(list_of_arrays):
return False
return True


def generate_neutron_mask(number_fl):
"""Generate the mask to compute the neutron-bound PDFs from the
proton ones. Assumming `isospin asymmetry` the relation between
Expand Down Expand Up @@ -77,11 +78,11 @@ def __init__(self, fktable_data, fktable_arr, target_info, operation_name, nfl=1
self.xgrids = []
self.fktables = []
for fkdata, fk, info in zip(fktable_data, fktable_arr, target_info):
xgrids_wa = op.add_target_dependence(fkdata.xgrid, info['A'])
xgrids_shape.append(fkdata.xgrid.size)
basis.append(fkdata.luminosity_mapping)
self.fktables.append(op.numpy_to_tensor(fk))
self.xgrids.append(np.expand_dims(xgrids_wa, axis=0))
xgrids_wa = op.add_target_dependence(fkdata.xgrid, info['A'])
self.xgrids.append(xgrids_wa)

# check how many xgrids this dataset needs
if is_unique(self.xgrids):
Expand Down
2 changes: 1 addition & 1 deletion n3fit/src/n3fit/layers/preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def build(self, input_shape):
super(Preprocessing, self).build(input_shape)

def call(self, inputs, **kwargs):
x = op.unstack(inputs, axis=1)[0]
x = inputs
pdf_list = []
for i in range(0, self.output_dim * 2, 2):
pdf_list.append(x ** (1 - self.kernel[i][0]) * (1 - x) ** self.kernel[i + 1][0])
Expand Down
34 changes: 20 additions & 14 deletions n3fit/src/n3fit/model_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,16 +215,18 @@ def observable_generator(
)

# If the observable layer found that all input grids are equal, the splitting will be None
# otherwise the different xgrids need to be stored separately
# otherwise the different xgrids need to be stored separately.
# By introducing the `A`-dependence, the `xgrids`inputs should now be extracted from the
# `Observable` class.
# Note: for pineappl grids, obs_layer_tr.splitting should always be None
if obs_layer_tr.splitting is None:
xgrid = obs_layer_tr.xgrids[0]
model_inputs.append(xgrid)
dataset_xsizes.append(xgrid.shape[1])
dataset_xsizes.append(xgrid.shape[0])
else:
xgrids = obs_layer_tr.xgrids
model_inputs += xgrids
dataset_xsizes.append(sum([i.shape[1] for i in xgrids]))
dataset_xsizes.append(sum([i.shape[0] for i in xgrids]))

model_obs_tr.append(obs_layer_tr)
model_obs_vl.append(obs_layer_vl)
Expand All @@ -236,8 +238,8 @@ def observable_generator(
model_inputs = model_inputs[0:1]
dataset_xsizes = dataset_xsizes[0:1]

# Reshape all inputs arrays to be (1, nx, 2)
model_inputs = np.concatenate(model_inputs, axis=1)
# Reshape all inputs arrays to be (nx, 2)
model_inputs = np.concatenate(model_inputs, axis=0)

full_nx = sum(dataset_xsizes)
if spec_dict["positivity"]:
Expand Down Expand Up @@ -528,7 +530,7 @@ def pdfNN_layer_generator(
impose_sumrule = "All"

if scaler:
inp = 2
raise ValueError("Feature Scaling is not Supported yet!")

if activations is None:
activations = ["tanh", "linear"]
Expand Down Expand Up @@ -630,20 +632,24 @@ def dense_me(x):
)

# Apply preprocessing and basis
def layer_fitbasis(x):
"""The tensor x has a expected shape of (1, None, {2,4})
where x[...,0] corresponds to the feature_scaled input and
x[...,-1] the original input.
def layer_fitbasis(nn_input):
"""The tensor T has a expected shape of (1, None, {2,4}).
In principle, T[1, None, :2] represents the scaled input
while T[1, None, 2:] represents the original input.
"""
x_scaled = op.op_gather_keep_dims(x, 0, axis=-1)
x_original = op.op_gather_keep_dims(x, -1, axis=-1)
# Extract the x value from the original input. The resulting
# shape should be (1, None, 1)
x_input = op.op_gather_keep_dims(nn_input, 0, axis=-1)
# TODO: Fix case where Feature Scaling is ON
# import ipdb; ipdb.set_trace()

nn_output = dense_me(x_scaled)
nn_output = dense_me(nn_input)
if subtract_one:
nn_at_one = dense_me(layer_x_eq_1)
nn_output = op.op_subtract([nn_output, nn_at_one])

ret = op.op_multiply([nn_output, layer_preproc(x_original)])
ret = op.op_multiply([nn_output, layer_preproc(x_input)])
# ret = nn_output
if basis_rotation.is_identity():
# if we don't need to rotate basis we don't want spurious layers
return ret
Expand Down
4 changes: 2 additions & 2 deletions n3fit/src/n3fit/model_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,15 +342,15 @@ def _xgrid_generation(self):
inputs_unique.append(igrid)

# Concatenate the unique inputs
input_arr = np.concatenate(inputs_unique, axis=1).T
input_arr = np.concatenate(inputs_unique, axis=0)
if self._scaler:
# Apply feature scaling if given
input_arr = self._scaler(input_arr)
input_layer = op.numpy_to_input(input_arr)

# The PDF model will be called with a concatenation of all inputs
# now the output needs to be splitted so that each experiment takes its corresponding input
sp_ar = [[i.shape[1] for i in inputs_unique]]
sp_ar = [[i.shape[0] for i in inputs_unique]]
sp_kw = {"axis": 1}
sp_layer = op.as_layer(
op.split, op_args=sp_ar, op_kwargs=sp_kw, name="pdf_split"
Expand Down
12 changes: 8 additions & 4 deletions n3fit/src/n3fit/msr.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,20 +74,24 @@ def msr_impose(nx=int(2e3), mode='All', scaler=None):
normalizer = MSR_Normalization(mode=mode)

# 5. Make the xgrid array into a backend input layer so it can be given to the normalization
xgrid_input = op.numpy_to_input(xgrid, name="integration_grid")
# TODO: For the time being, only A=1 is used during the integration. This needs to be modified
xinput_wa = op.add_target_dependence(xgrid, a_value=1)
nn_input = op.numpy_to_input(xinput_wa, name="integration_grid")

# Finally prepare a function which will take as input the output of the PDF model
# and will return it appropiately normalized.
def apply_normalization(layer_pdf):
"""
layer_pdf: output of the PDF, unnormalized, ready for the fktable
"""
x_original = op.op_gather_keep_dims(xgrid_input, -1, axis=-1)
pdf_integrand = op.op_multiply([division_by_x(x_original), layer_pdf(xgrid_input)])
x_original = op.op_gather_keep_dims(nn_input, 0, axis=-1)
pdf_integrand = op.op_multiply([division_by_x(x_original), layer_pdf(nn_input)])
# import ipdb; ipdb.set_trace()
normalization = normalizer(integrator(pdf_integrand))

def ultimate_pdf(x):
return layer_pdf(x)*normalization

return ultimate_pdf

return apply_normalization, xgrid_input
return apply_normalization, nn_input

0 comments on commit af6f859

Please sign in to comment.