diff --git a/mcdc/__init__.py b/mcdc/__init__.py index d12edfcc..bc43b278 100644 --- a/mcdc/__init__.py +++ b/mcdc/__init__.py @@ -20,6 +20,7 @@ weight_roulette, IC_generator, dsm, + uq, print_card, reset_cards, ) diff --git a/mcdc/card.py b/mcdc/card.py index 88cd4056..98bd8a6c 100644 --- a/mcdc/card.py +++ b/mcdc/card.py @@ -31,6 +31,7 @@ def reset(self): "total": False, "current": False, "eddington": False, + "exit": False, "mesh": make_card_mesh(), } @@ -116,6 +117,14 @@ def reset(self): "IC_cycle_stretch": 1.0, "branchless_collision": False, "dsm_order": 1, + "uq": False, + } + + self.uq_deltas = { + "tag": "Uq", + "nuclides": [], + "materials": [], + "surfaces": [], } @@ -152,6 +161,7 @@ def make_card_nuclide(G, J): card["sensitivity"] = False card["sensitivity_ID"] = 0 card["dsm_Np"] = 1.0 + card["uq"] = False return card @@ -176,6 +186,7 @@ def make_card_material(N_nuclide, G, J): card["chi_s"] = np.zeros([G, G]) card["chi_p"] = np.zeros([G, G]) card["sensitivity"] = False + card["uq"] = False return card @@ -284,3 +295,17 @@ def make_card_mesh(): "azi": np.array([-PI, PI]), "g": np.array([-INF, INF]), } + + +def make_card_uq(): + return { + "tag": "t", + "ID": -1, + "key": "k", + "mean": 0.0, + "delta": 0.0, + "distribution": "d", + "rng_seed": 0, + "group": False, + "group_group": False, + } diff --git a/mcdc/constant.py b/mcdc/constant.py index f7d87cab..e85a13e2 100644 --- a/mcdc/constant.py +++ b/mcdc/constant.py @@ -48,3 +48,4 @@ SEED_SPLIT_SOURCE_PRECURSOR = nb.uint64(0x546F6464) SEED_SPLIT_BANK = nb.uint64(0x5279616E) SEED_SPLIT_PARTICLE = nb.uint64(0) +SEED_SPLIT_UQ = nb.uint(0x5368656261) diff --git a/mcdc/input_.py b/mcdc/input_.py index 250c1d93..1210c432 100644 --- a/mcdc/input_.py +++ b/mcdc/input_.py @@ -17,6 +17,7 @@ make_card_universe, make_card_lattice, make_card_source, + make_card_uq, ) from mcdc.constant import ( GYRATION_RADIUS_ALL, @@ -1279,6 +1280,83 @@ def dsm(order=1): card["dsm_order"] = order +def uq(**kw): + def append_card(delta_card, global_tag): + delta_card["distribution"] = dist + delta_card["flags"] = [] + for key in kw.keys(): + check_support(parameter["tag"] + " parameter", key, parameter_list, False) + delta_card["flags"].append(key) + delta_card[key] = kw[key] + mcdc.input_deck.uq_deltas[global_tag].append(delta_card) + + mcdc.input_deck.technique["uq"] = True + # Make sure N_batch > 1 + if mcdc.input_deck.setting["N_batch"] <= 1: + print_error("Must set N_batch>1 with mcdc.setting() prior to mcdc.uq() call.") + + # Check uq parameter + parameter_ = check_support( + "uq parameter", + list(kw)[0], + ["nuclide", "material", "surface", "source"], + False, + ) + parameter = kw[parameter_] + del kw[parameter_] + parameter["uq"] = True + + # Confirm supplied distribution + check_requirement("uq", kw, ["distribution"]) + dist = check_support("distribution", kw["distribution"], ["uniform"], False) + del kw["distribution"] + + # Only remaining keywords should be the parameter delta(s) + + if parameter["tag"] == "Material": + parameter_list = [ + "capture", + "scatter", + "fission", + "nu_s", + "nu_p", + "nu_d", + "chi_p", + "chi_d", + "speed", + "decay", + ] + global_tag = "materials" + if parameter["N_nuclide"] == 1: + nuc_card = make_card_nuclide(parameter["G"], parameter["J"]) + nuc_card["ID"] = parameter["nuclide_IDs"][0] + append_card(nuc_card, "nuclides") + delta_card = make_card_material( + parameter["N_nuclide"], parameter["G"], parameter["J"] + ) + for name in ["ID", "nuclide_IDs", "nuclide_densities"]: + delta_card[name] = parameter[name] + elif parameter["tag"] == "Nuclide": + parameter_list = [ + "capture", + "scatter", + "fission", + "nu_s", + "nu_p", + "nu_d", + "chi_p", + "chi_d", + "speed", + "decay", + ] + global_tag = "nuclides" + delta_card = make_card_nuclide(parameter["G"], parameter["J"]) + delta_card["ID"] = parameter["ID"] + append_card(delta_card, global_tag) + # elif parameter['tag'] is 'Surface': + # elif parameter['tag'] is 'Source': + + # ============================================================================== # Util # ============================================================================== diff --git a/mcdc/kernel.py b/mcdc/kernel.py index 58aaf272..795a42c6 100644 --- a/mcdc/kernel.py +++ b/mcdc/kernel.py @@ -153,6 +153,16 @@ def rng_from_seed(seed): return rng_(seed) / RNG_MOD +@njit +def rng_array(seed, shape, size): + xi = np.zeros(size) + for i in range(size): + xi_seed = split_seed(i, seed) + xi[i] = rng_from_seed(xi_seed) + xi = xi.reshape(shape) + return xi + + # ============================================================================= # Particle source operations # ============================================================================= @@ -958,6 +968,14 @@ def surface_bc(P, surface, trans): surface_reflect(P, surface, trans) +@njit +def surface_exit_evaluate(P): + if P["surface_ID"] == 0: + return 0 + else: + return 1 + + @njit def surface_reflect(P, surface, trans): ux = P["ux"] @@ -1267,6 +1285,19 @@ def score_tracklength(P, distance, mcdc): score_eddington(s, g, t, x, y, z, flux, P, tally["score"]["eddington"]) +@njit +def score_exit(P, x, mcdc): + tally = mcdc["tally"] + material = mcdc["materials"][P["material_ID"]] + + s = P["sensitivity_ID"] + mu, azi = mesh_get_angular_index(P, tally["mesh"]) + g = mesh_get_energy_index(P, tally["mesh"]) + + flux = P["w"] / abs(P["ux"]) + score_flux(s, g, 0, x, 0, 0, mu, azi, flux, tally["score"]["exit"]) + + @njit def score_flux(s, g, t, x, y, z, mu, azi, flux, score): score["bin"][s, g, t, x, y, z, mu, azi] += flux @@ -1822,6 +1853,13 @@ def surface_crossing(P, mcdc): # Record old material for sensitivity quantification material_ID_old = P["material_ID"] + # Tally particle exit + if mcdc["tally"]["exit"] and not P["alive"]: + # Reflectance if P["surface_ID"] == 0, else transmittance + exit_idx = surface_exit_evaluate(P) + # Score on tally + score_exit(P, exit_idx, mcdc) + # Check new cell? if P["alive"] and not surface["reflective"]: cell = mcdc["cells"][P["cell_ID"]] @@ -3407,3 +3445,253 @@ def binary_search(val, grid): else: right = mid - 1 return int(right) + + +# ============================================================================= +# Variance Deconvolution +# ============================================================================= + + +@njit +def uq_resample(mean, delta, info): + # Currently only uniform distribution + shape = mean.shape + size = mean.size + xi = rng_array(info["rng_seed"], shape, size) + + return mean + (2 * xi - 1) * delta + + +@njit +def reset_material(mcdc, idm, material_uq): + # Assumes all nuclides have already been re-sampled + # Basic XS + material = mcdc["materials"][idm] + for tag in literal_unroll(("capture", "scatter", "fission", "total")): + if material_uq["flags"][tag]: + material[tag][:] = 0.0 + for n in range(material["N_nuclide"]): + nuc1 = mcdc["nuclides"][material["nuclide_IDs"][n]] + density = material["nuclide_densities"][n] + material[tag] += nuc1[tag] * density + + # Effective speed + if material_uq["flags"]["speed"]: + material["speed"][:] = 0.0 + for n in range(material["N_nuclide"]): + nuc2 = mcdc["nuclides"][material["nuclide_IDs"][n]] + density = material["nuclide_densities"][n] + material["speed"] += nuc2["speed"] * nuc2["total"] * density + if max(material["total"]) == 0.0: + material["speed"][:] = nuc2["speed"][:] + else: + material["speed"] /= material["total"] + + # Calculate effective spectra and multiplicities of scattering and prompt fission + G = material["G"] + if max(material["scatter"]) > 0.0: + shape = material["chi_s"].shape + nuSigmaS = np.zeros(shape) + for i in range(material["N_nuclide"]): + nuc3 = mcdc["nuclides"][material["nuclide_IDs"][i]] + density = material["nuclide_densities"][i] + SigmaS = np.diag(nuc3["scatter"]) * density + nu_s = np.diag(nuc3["nu_s"]) + chi_s = np.ascontiguousarray(nuc3["chi_s"].transpose()) + nuSigmaS += chi_s.dot(nu_s.dot(SigmaS)) + chi_nu_s = nuSigmaS.dot(np.diag(1.0 / material["scatter"])) + material["nu_s"] = np.sum(chi_nu_s, axis=0) + material["chi_s"] = np.ascontiguousarray( + chi_nu_s.dot(np.diag(1.0 / material["nu_s"])).transpose() + ) + if max(material["fission"]) > 0.0: + nuSigmaF = np.zeros((G, G), dtype=float) + for n in range(material["N_nuclide"]): + nuc4 = mcdc["nuclides"][material["nuclide_IDs"][n]] + density = material["nuclide_densities"][n] + SigmaF = np.diag(nuc4["fission"]) * density + nu_p = np.diag(nuc4["nu_p"]) + chi_p = np.ascontiguousarray(np.transpose(nuc4["chi_p"])) + nuSigmaF += chi_p.dot(nu_p.dot(SigmaF)) + chi_nu_p = nuSigmaF.dot(np.diag(1.0 / material["fission"])) + material["nu_p"] = np.sum(chi_nu_p, axis=0) + # Required because the below function otherwise returns an F-contiguous array + material["chi_p"] = np.ascontiguousarray( + np.transpose(chi_nu_p.dot(np.diag(1.0 / material["nu_p"]))) + ) + + # Calculate delayed and total fission multiplicities + if max(material["fission"]) > 0.0: + material["nu_f"][:] = material["nu_p"][:] + for j in range(material["J"]): + total = np.zeros(material["G"]) + for n in range(material["N_nuclide"]): + nuc5 = mcdc["nuclides"][material["nuclide_IDs"][n]] + density = material["nuclide_densities"][n] + total += nuc5["nu_d"][:, j] * nuc5["fission"] * density + material["nu_d"][:, j] = total / material["fission"] + material["nu_f"] += material["nu_d"][:, j] + + +@njit +def reset_nuclide(nuclide, nuclide_uq): + for name in literal_unroll( + ("speed", "decay", "capture", "fission", "nu_s", "nu_p") + ): + if nuclide_uq["flags"][name]: + nuclide[name] = uq_resample( + nuclide_uq["mean"][name], nuclide_uq["delta"][name], nuclide_uq["info"] + ) + + if nuclide_uq["flags"]["scatter"]: + scatter = uq_resample( + nuclide_uq["mean"]["scatter"], + nuclide_uq["delta"]["scatter"], + nuclide_uq["info"], + ) + nuclide["scatter"] = np.sum(scatter, 0) + nuclide["chi_s"][:, :] = np.swapaxes(scatter, 0, 1)[:, :] + for g in range(nuclide["G"]): + if nuclide["scatter"][g] > 0.0: + nuclide["chi_s"][g, :] /= nuclide["scatter"][g] + + if nuclide_uq["flags"]["total"]: + nuclide["total"][:] = ( + nuclide["capture"] + nuclide["scatter"] + nuclide["fission"] + ) + + if nuclide_uq["flags"]["nu_d"]: + nu_d = uq_resample( + nuclide_uq["mean"]["nu_d"], nuclide_uq["delta"]["nu_d"], nuclide_uq["info"] + ) + nuclide["nu_d"][:, :] = np.swapaxes(nu_d, 0, 1)[:, :] + + if nuclide_uq["flags"]["nu_f"]: # True if either nu_p or nu_d is true + nuclide["nu_f"] = nuclide["nu_p"] + for j in range(nuclide["J"]): + nuclide["nu_f"] += nuclide["nu_d"][:, j] + + # Prompt fission spectrum (If G == 1, all ones) + if nuclide_uq["flags"]["chi_p"]: + chi_p = uq_resample( + nuclide_uq["mean"]["chi_p"], + nuclide_uq["delta"]["chi_p"], + nuclide_uq["info"], + ) + nuclide["chi_p"][:, :] = np.swapaxes(chi_p, 0, 1)[:, :] + # Normalize + for g in range(nuclide["G"]): + if np.sum(nuclide["chi_p"][g, :]) > 0.0: + nuclide["chi_p"][g, :] /= np.sum(nuclide["chi_p"][g, :]) + + # Delayed fission spectrum (matrix of size JxG) + if nuclide_uq["flags"]["chi_d"]: + chi_d = uq_resample( + nuclide_uq["mean"]["chi_d"], + nuclide_uq["delta"]["chi_d"], + nuclide_uq["info"], + ) + # Transpose: [gout, dg] -> [dg, gout] + nuclide["chi_d"][:, :] = np.swapaxes(chi_d, 0, 1)[:, :] + # Normalize + for dg in range(nuclide["J"]): + if np.sum(nuclide["chi_d"][dg, :]) > 0.0: + nuclide["chi_d"][dg, :] /= np.sum(nuclide["chi_d"][dg, :]) + + +@njit +def uq_reset(mcdc, seed): + # Types of uq parameters: materials, nuclides + N = len(mcdc["technique"]["uq_"]["nuclides"]) + for i in range(N): + mcdc["technique"]["uq_"]["nuclides"][i]["info"]["rng_seed"] = split_seed( + i, seed + ) + idn = mcdc["technique"]["uq_"]["nuclides"][i]["info"]["ID"] + reset_nuclide(mcdc["nuclides"][idn], mcdc["technique"]["uq_"]["nuclides"][i]) + + M = len(mcdc["technique"]["uq_"]["materials"]) + for i in range(M): + mcdc["technique"]["uq_"]["materials"][i]["info"]["rng_seed"] = split_seed( + i, seed + ) + idm = mcdc["technique"]["uq_"]["materials"][i]["info"]["ID"] + reset_material(mcdc, idm, mcdc["technique"]["uq_"]["materials"][i]) + + +@njit +def uq_tally_closeout_history(mcdc): + tally = mcdc["tally"] + uq_tally = mcdc["technique"]["uq_tally"] + + for name in literal_unroll(score_list): + if uq_tally[name]: + uq_score_closeout_history(tally["score"][name], uq_tally["score"][name]) + + +@njit +def uq_score_closeout_history(score, uq_score): + # Assumes N_batch > 1 + # Accumulate square of history score, but continue to accumulate bin + history_bin = score["bin"] - uq_score["batch_bin"] + uq_score["batch_var"][:] += history_bin**2 + uq_score["batch_bin"] = score["bin"] + + +@njit +def uq_tally_closeout_batch(mcdc): + uq_tally = mcdc["technique"]["uq_tally"] + + for name in literal_unroll(score_list): + if uq_tally[name]: + # Reset bin + uq_tally["score"][name]["batch_bin"].fill(0.0) + uq_reduce_bin(uq_tally["score"][name]) + + +@njit +def uq_reduce_bin(score): + # MPI Reduce + buff = np.zeros_like(score["batch_var"]) + with objmode(): + MPI.COMM_WORLD.Reduce(np.array(score["batch_var"]), buff, MPI.SUM, 0) + score["batch_var"][:] = buff + + +@njit +def uq_tally_closeout(mcdc): + tally = mcdc["tally"] + uq_tally = mcdc["technique"]["uq_tally"] + + for name in literal_unroll(score_list): + # Uq_tally implies tally, but tally does not imply uq_tally + if uq_tally[name]: + uq_score_closeout(name, mcdc) + elif tally[name]: + score_closeout(tally["score"][name], mcdc) + + +@njit +def uq_score_closeout(name, mcdc): + score = mcdc["tally"]["score"][name] + uq_score = mcdc["technique"]["uq_tally"]["score"][name] + + N_history = mcdc["setting"]["N_particle"] + + # At this point, score["sdev"] is still just the sum of the squared mean from every batch + uq_score["batch_var"] = (uq_score["batch_var"] / N_history - score["sdev"]) / ( + N_history - 1 + ) + + # If we're here, N_batch > 1 + N_history = mcdc["setting"]["N_batch"] + + # Store results + score["mean"][:] = score["mean"] / N_history + uq_score["batch_var"] /= N_history + uq_score["batch_bin"] = (score["sdev"] - N_history * np.square(score["mean"])) / ( + N_history - 1 + ) + score["sdev"][:] = np.sqrt( + (score["sdev"] / N_history - np.square(score["mean"])) / (N_history - 1) + ) diff --git a/mcdc/loop.py b/mcdc/loop.py index b30528a3..8762098c 100644 --- a/mcdc/loop.py +++ b/mcdc/loop.py @@ -35,6 +35,9 @@ def loop_fixed_source(mcdc): if mcdc["setting"]["N_batch"] > 1: with objmode(): print_header_batch(mcdc) + if mcdc["technique"]["uq"]: + seed_uq = kernel.split_seed(seed_batch, SEED_SPLIT_UQ) + kernel.uq_reset(mcdc, seed_uq) # Loop over time censuses for idx_census in range(mcdc["setting"]["N_census"]): @@ -65,9 +68,15 @@ def loop_fixed_source(mcdc): # Tally history closeout kernel.tally_reduce_bin(mcdc) kernel.tally_closeout_history(mcdc) + # Uq closeout + if mcdc["technique"]["uq"]: + kernel.uq_tally_closeout_batch(mcdc) # Tally closeout - kernel.tally_closeout(mcdc) + if mcdc["technique"]["uq"]: + kernel.uq_tally_closeout(mcdc) + else: + kernel.tally_closeout(mcdc) # ========================================================================= @@ -180,6 +189,10 @@ def loop_source(seed, mcdc): if not mcdc["setting"]["mode_eigenvalue"] and mcdc["setting"]["N_batch"] == 1: kernel.tally_closeout_history(mcdc) + # Tally history closeout for multi-batch uq simulation + if mcdc["technique"]["uq"]: + kernel.uq_tally_closeout_history(mcdc) + # Progress printout percent = (idx_work + 1.0) / mcdc["mpi_work_size"] if mcdc["setting"]["progress_bar"] and int(percent * 100.0) > N_prog: diff --git a/mcdc/main.py b/mcdc/main.py index c8443312..00d3b9ee 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -168,6 +168,8 @@ def prepare(): type_.make_type_lattice(input_deck.lattices) type_.make_type_source(G) type_.make_type_tally(N_tally_scores, input_deck.tally) + type_.make_type_uq_tally(N_tally_scores, input_deck.tally) + type_.make_type_uq(input_deck.uq_deltas, G, J) type_.make_type_setting(input_deck) type_.make_type_technique(N_particle, G, input_deck.technique) type_.make_type_global(input_deck) @@ -306,6 +308,7 @@ def prepare(): "iQMC", "IC_generator", "branchless_collision", + "uq", ]: mcdc["technique"][name] = input_deck.technique[name] @@ -405,6 +408,61 @@ def prepare(): # Threshold mcdc["technique"]["dsm_order"] = input_deck.technique["dsm_order"] + # ========================================================================= + # Variance Deconvolution - UQ + # ========================================================================= + if mcdc["technique"]["uq"]: + # Assumes that all tallies will also be uq tallies + for name in type_.uq_tally.names: + if name != "score": + mcdc["technique"]["uq_tally"][name] = input_deck.tally[name] + + M = len(input_deck.uq_deltas["materials"]) + for i in range(M): + idm = input_deck.uq_deltas["materials"][i]["ID"] + mcdc["technique"]["uq_"]["materials"][i]["info"]["ID"] = idm + mcdc["technique"]["uq_"]["materials"][i]["info"][ + "distribution" + ] = input_deck.uq_deltas["materials"][i]["distribution"] + for name in input_deck.uq_deltas["materials"][i]["flags"]: + mcdc["technique"]["uq_"]["materials"][i]["flags"][name] = True + mcdc["technique"]["uq_"]["materials"][i]["delta"][ + name + ] = input_deck.uq_deltas["materials"][i][name] + flags = mcdc["technique"]["uq_"]["materials"][i]["flags"] + if flags["capture"] or flags["scatter"] or flags["fission"]: + flags["total"] = True + flags["speed"] = True + if flags["nu_p"] or flags["nu_d"]: + flags["nu_f"] = True + if mcdc["materials"][idm]["N_nuclide"] > 1: + for name in type_.uq_mat.names: + mcdc["technique"]["uq_"]["materials"][i]["mean"][ + name + ] = input_deck.materials[idm][name] + + N = len(input_deck.uq_deltas["nuclides"]) + for i in range(N): + mcdc["technique"]["uq_"]["nuclides"][i]["info"][ + "distribution" + ] = input_deck.uq_deltas["nuclides"][i]["distribution"] + idn = input_deck.uq_deltas["nuclides"][i]["ID"] + mcdc["technique"]["uq_"]["nuclides"][i]["info"]["ID"] = idn + for name in type_.uq_nuc.names: + mcdc["technique"]["uq_"]["nuclides"][i]["mean"][ + name + ] = input_deck.nuclides[idn][name] + for name in input_deck.uq_deltas["nuclides"][i]["flags"]: + mcdc["technique"]["uq_"]["nuclides"][i]["flags"][name] = True + mcdc["technique"]["uq_"]["nuclides"][i]["delta"][ + name + ] = input_deck.uq_deltas["nuclides"][i][name] + flags = mcdc["technique"]["uq_"]["nuclides"][i]["flags"] + if flags["capture"] or flags["scatter"] or flags["fission"]: + flags["total"] = True + if flags["nu_p"] or flags["nu_d"]: + flags["nu_f"] = True + # ========================================================================= # MPI # ========================================================================= @@ -578,6 +636,17 @@ def generate_hdf5(mcdc): "tally/" + name_h5 + "/sdev", data=np.squeeze(T["score"][name]["sdev"]), ) + if mcdc["technique"]["uq_tally"][name]: + mc_var = mcdc["technique"]["uq_tally"]["score"][name][ + "batch_var" + ] + tot_var = mcdc["technique"]["uq_tally"]["score"][name][ + "batch_bin" + ] + f.create_dataset( + "tally/" + name_h5 + "/uq_var", + data=np.squeeze(tot_var - mc_var), + ) # Eigenvalues if mcdc["setting"]["mode_eigenvalue"]: diff --git a/mcdc/type_.py b/mcdc/type_.py index 35a83d7b..2b6a851a 100644 --- a/mcdc/type_.py +++ b/mcdc/type_.py @@ -143,6 +143,7 @@ def make_type_nuclide(G, J): ("sensitivity", bool_), ("sensitivity_ID", int64), ("dsm_Np", float64), + ("uq", bool_), ] ) @@ -169,6 +170,7 @@ def make_type_material(G, J, Nmax_nuclide): ("chi_s", float64, (G, G)), ("chi_p", float64, (G, G)), ("sensitivity", bool_), + ("uq", bool_), ] ) @@ -326,6 +328,7 @@ def make_type_source(G): "total", "current", "eddington", + "exit", ) @@ -356,6 +359,7 @@ def make_type_score(shape): ["total", (Ns, Ng, Nt, Nx, Ny, Nz, Nmu, N_azi)], ["current", (Ns, Ng, Nt, Nx, Ny, Nz, 3)], ["eddington", (Ns, Ng, Nt, Nx, Ny, Nz, 6)], + ["exit", (Ns, Ng, Nt, 2, Ny, Nz, Nmu, N_azi)], ] # Add score flags to structure @@ -443,6 +447,7 @@ def make_type_technique(N_particle, G, card): ("iQMC", bool_), ("IC_generator", bool_), ("branchless_collision", bool_), + ("uq", bool_), ] # ========================================================================= @@ -569,10 +574,131 @@ def make_type_technique(N_particle, G, card): ("dsm_order", int64), ] + # ========================================================================= + # Variance Deconvolution + # ========================================================================= + struct += [("uq_tally", uq_tally), ("uq_", uq)] + # Finalize technique type technique = np.dtype(struct) +# UQ +def make_type_uq_tally(Ns, tally_card): + global uq_tally + + def make_type_uq_score(shape): + return np.dtype( + [ + ("batch_bin", float64, shape), + ("batch_var", float64, shape), + ] + ) + + # Tally estimator flags + struct = [] + + # Mesh, but doesn't need to be added + mesh, Nx, Ny, Nz, Nt, Nmu, N_azi, Ng = make_type_mesh(tally_card["mesh"]) + + # Scores and shapes + scores_shapes = [ + ["flux", (Ns, Ng, Nt, Nx, Ny, Nz, Nmu, N_azi)], + ["density", (Ns, Ng, Nt, Nx, Ny, Nz, Nmu, N_azi)], + ["fission", (Ns, Ng, Nt, Nx, Ny, Nz, Nmu, N_azi)], + ["total", (Ns, Ng, Nt, Nx, Ny, Nz, Nmu, N_azi)], + ["current", (Ns, Ng, Nt, Nx, Ny, Nz, 3)], + ["eddington", (Ns, Ng, Nt, Nx, Ny, Nz, 6)], + ["exit", (Ns, Ng, Nt, 2, Ny, Nz, Nmu, N_azi)], + ] + + # Add score flags to structure + for i in range(len(scores_shapes)): + name = scores_shapes[i][0] + struct += [(name, bool_)] + + # Add scores to structure + scores_struct = [] + for i in range(len(scores_shapes)): + name = scores_shapes[i][0] + shape = scores_shapes[i][1] + if not tally_card[name]: + shape = (0,) * len(shape) + scores_struct += [(name, make_type_uq_score(shape))] + scores = np.dtype(scores_struct) + struct += [("score", scores)] + + # Make tally structure + uq_tally = np.dtype(struct) + + +def make_type_uq(uq_deck, G, J): + global uq, uq_nuc, uq_mat + + # def make_type_parameter(shape): + # return np.dtype( + # [ + # ("tag", str_), # nuclides, materials, surfaces, sources + # ("ID", int64), + # ("key", str_), + # ("mean", float64, shape), + # ("delta", float64, shape), + # ("distribution", str_), + # ("rng_seed", uint64), + # ] + # ) + + def make_type_parameter(G, J, decay=False): + # Fields are things that can have deltas + struct = [ + ("speed", float64, (G,)), + ("capture", float64, (G,)), + ("scatter", float64, (G, G)), + ("fission", float64, (G,)), + ("nu_s", float64, (G,)), + ("nu_p", float64, (G,)), + ("nu_d", float64, (G, J)), + ("chi_p", float64, (G, G)), + ] + struct += [("decay", float64, (J,)), ("chi_d", float64, (J, G))] + return np.dtype(struct) + + uq_nuc = make_type_parameter(G, J, True) + uq_mat = make_type_parameter(G, J) + + flags = np.dtype( + [ + ("speed", bool_), + ("decay", bool_), + ("total", bool_), + ("capture", bool_), + ("scatter", bool_), + ("fission", bool_), + ("nu_s", bool_), + ("nu_f", bool_), + ("nu_p", bool_), + ("nu_d", bool_), + ("chi_s", bool_), + ("chi_p", bool_), + ("chi_d", bool_), + ] + ) + info = np.dtype([("distribution", str_), ("ID", int64), ("rng_seed", uint64)]) + + container = np.dtype( + [("mean", uq_nuc), ("delta", uq_mat), ("flags", flags), ("info", info)] + ) + + N_nuclide = len(uq_deck["nuclides"]) + N_material = len(uq_deck["materials"]) + uq = np.dtype( + [("nuclides", container, (N_nuclide,)), ("materials", container, (N_material,))] + ) + + +param_names = ["tag", "ID", "key", "mean", "delta", "distribution", "rng_seed"] + + # ============================================================================== # Global # ============================================================================== diff --git a/test/regression/fixed_source/variance_deconv/__init__.py b/test/regression/fixed_source/variance_deconv/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/regression/fixed_source/variance_deconv/answer.h5 b/test/regression/fixed_source/variance_deconv/answer.h5 new file mode 100644 index 00000000..4a0b3908 Binary files /dev/null and b/test/regression/fixed_source/variance_deconv/answer.h5 differ diff --git a/test/regression/fixed_source/variance_deconv/test.py b/test/regression/fixed_source/variance_deconv/test.py new file mode 100644 index 00000000..08b75a3d --- /dev/null +++ b/test/regression/fixed_source/variance_deconv/test.py @@ -0,0 +1,61 @@ +import numpy as np +import h5py + +import mcdc + + +def test(): + # ============================================================================= + # Set model and run + # ============================================================================= + + m1 = mcdc.material(capture=np.array([0.90])) + m2 = mcdc.material(capture=np.array([0.15])) + m3 = mcdc.material(capture=np.array([0.60])) + + s1 = mcdc.surface("plane-x", x=-1.0, bc="vacuum") + s2 = mcdc.surface("plane-x", x=2.0) + s3 = mcdc.surface("plane-x", x=5.0) + s4 = mcdc.surface("plane-x", x=6.0, bc="vacuum") + + mcdc.cell([+s1, -s2], m1) + mcdc.cell([+s2, -s3], m2) + mcdc.cell([+s3, -s4], m3) + + mcdc.source(point=[0.0, 0.0, 0.0], direction=[1.0, 0.0, 0.0]) + + scores = ["exit"] + mcdc.tally(scores=scores, x=np.linspace(0.0, 6.0, 2)) + + mcdc.setting(N_particle=1e1, N_batch=1e1, progress_bar=False) + + mcdc.uq(material=m1, distribution="uniform", capture=np.array([0.7])) + mcdc.uq(material=m2, distribution="uniform", capture=np.array([0.12])) + mcdc.uq(material=m3, distribution="uniform", capture=np.array([0.5])) + + mcdc.run() + + # ========================================================================= + # Check output + # ========================================================================= + + output = h5py.File("output.h5", "r") + answer = h5py.File("answer.h5", "r") + for score in scores: + name = "tally/" + score + "/mean" + a = output[name][:] + b = answer[name][:] + assert np.isclose(a, b).all() + + name = "tally/" + score + "/sdev" + a = output[name][:] + b = answer[name][:] + assert np.isclose(a, b).all() + + name = "tally/" + score + "/uq_var" + a = output[name][:] + b = answer[name][:] + assert np.isclose(a, b).all() + + output.close() + answer.close()