diff --git a/examples/fixed_source/cooper_combo/input.py b/examples/fixed_source/cooper_combo/input.py index 952a6083..c84c4f4e 100644 --- a/examples/fixed_source/cooper_combo/input.py +++ b/examples/fixed_source/cooper_combo/input.py @@ -56,12 +56,12 @@ mcdc.tally.mesh_tally( scores=["flux"], - x=np.linspace(0.0, 4.0, 40), - y=np.linspace(0.0, 4.0, 40), + x=np.linspace(0.0, 4.0, 41), + y=np.linspace(0.0, 4.0, 41), ) # Setting -mcdc.setting(N_particle=50) +mcdc.setting(N_particle=1e2) mcdc.implicit_capture() # Run diff --git a/examples/fixed_source/kobayashi3-TD/input.py b/examples/fixed_source/kobayashi3-TD/input.py index 4bdea735..98bef5e1 100644 --- a/examples/fixed_source/kobayashi3-TD/input.py +++ b/examples/fixed_source/kobayashi3-TD/input.py @@ -60,15 +60,26 @@ scores=["flux"], x=np.linspace(0.0, 60.0, 31), y=np.linspace(0.0, 100.0, 51), - t=np.linspace(0.0, 200.0, 21), + # t=np.linspace(0.0, 200.0, 21), + # g=np.array([-0.5, 3.5, 6.5]) # fast (0, 1, 2, 3) and thermal (4, 5, 6) groups ) mcdc.tally.cell_tally(source_cell, scores=["flux"]) mcdc.tally.cell_tally(void_cell, scores=["flux"]) mcdc.tally.cell_tally(shield_cell, scores=["flux"]) + +mcdc.tally.cs_tally( + N_cs_bins=[150], + cs_bin_size=[8.0, 8.0], + x=np.linspace(0.0, 60.0, 31), + y=np.linspace(0.0, 100.0, 51), + scores=["flux"], +) + + # Setting -mcdc.setting(N_particle=80) +mcdc.setting(N_particle=1e2) # Run mcdc.run() diff --git a/examples/fixed_source/kobayashi3-TD/process.py b/examples/fixed_source/kobayashi3-TD/process.py index 6e37fc1c..211b2d89 100644 --- a/examples/fixed_source/kobayashi3-TD/process.py +++ b/examples/fixed_source/kobayashi3-TD/process.py @@ -11,6 +11,10 @@ # Results with h5py.File("output.h5", "r") as f: + cs_recon = f["tallies/cs_tally_0/flux/reconstruction"][:] + plt.imshow(cs_recon) + plt.show() + tallies = f["tallies/mesh_tally_0"] flux = tallies["flux"] grid = tallies["grid"] @@ -30,20 +34,3 @@ print( f'cell {i+1} mean = {flux_score["mean"][()]}, sdev = {flux_score["sdev"][()]}' ) - -fig, ax = plt.subplots() -cax = ax.pcolormesh(X, Y, phi[0], vmin=phi[0].min(), vmax=phi[0].max()) -text = ax.text(0.02, 1.02, "", transform=ax.transAxes) -ax.set_aspect("equal", "box") -ax.set_xlabel("$y$ [cm]") -ax.set_ylabel("$x$ [cm]") - - -def animate(i): - cax.set_array(phi[i]) - cax.set_clim(phi[i].min(), phi[i].max()) - text.set_text(r"$t \in [%.1f,%.1f]$ s" % (t[i], t[i + 1])) - - -anim = animation.FuncAnimation(fig, animate, interval=10, frames=len(t) - 1) -plt.show() diff --git a/examples/fixed_source/sphere_in_cube/input.py b/examples/fixed_source/sphere_in_cube/input.py index 61157306..3a858582 100644 --- a/examples/fixed_source/sphere_in_cube/input.py +++ b/examples/fixed_source/sphere_in_cube/input.py @@ -7,7 +7,7 @@ # Homogeneous pure-fission sphere inside a pure-scattering cube # Set materials -pure_f = mcdc.material(fission=np.array([1.0]), nu_p=np.array([1.2])) +pure_f = mcdc.material(fission=np.array([1.0]), nu_p=np.array([1.1])) pure_s = mcdc.material(scatter=np.array([[1.0]])) # Set surfaces @@ -40,17 +40,24 @@ # ============================================================================= mcdc.tally.mesh_tally( scores=["fission"], - x=np.linspace(0.0, 4.0, 2), - y=np.linspace(0.0, 4.0, 2), - z=np.linspace(0.0, 4.0, 2), + x=np.linspace(0.0, 4.0, 41), + y=np.linspace(0.0, 4.0, 41), + # z=np.linspace(0.0, 4.0, 41), # t=np.linspace(0.0, 200.0, 2), ) - mcdc.tally.cell_tally(sphere_cell, scores=["fission"]) +mcdc.tally.cs_tally( + N_cs_bins=[150], + cs_bin_size=np.array([3.0, 3.0]), + x=np.linspace(0.0, 4.0, 41), + y=np.linspace(0.0, 4.0, 41), + scores=["fission"], +) + # Setting -mcdc.setting(N_particle=100) +mcdc.setting(N_particle=1e3) mcdc.implicit_capture() # Run diff --git a/examples/fixed_source/sphere_in_cube/process.py b/examples/fixed_source/sphere_in_cube/process.py index 3cc7373f..c77451d0 100644 --- a/examples/fixed_source/sphere_in_cube/process.py +++ b/examples/fixed_source/sphere_in_cube/process.py @@ -1,23 +1,62 @@ import h5py +import numpy as np +import matplotlib.pyplot as plt +import scipy.fft as spfft +import cvxpy as cp + +# Note: there are some lines in main.py with np.save(...) that I added +# for ease of post-processing, like getting the center points used and +# the sampling matrix S. None are required for the input file and this +# script to run, but may be useful for debugging purposes -# Load results with h5py.File("output.h5", "r") as f: - # print(f["tallies"].keys()) - print(f["input_deck"]["cell_tallies"].keys()) + S = f["tallies"]["cs_tally_0"]["S"][:] + recon = f["tallies"]["cs_tally_0"]["fission"]["reconstruction"] + plt.imshow(recon) + plt.title("Reconstruction, $\lambda$ = 0.5") # assuming l in main.py remains at 0.5 + plt.colorbar() + plt.show() + + cs_results = f["tallies"]["cs_tally_0"]["fission"]["mean"][:] + + mesh_results = f["tallies"]["mesh_tally_0"]["fission"]["mean"][:] + plt.imshow(mesh_results) + plt.title("mesh results") + plt.colorbar() + plt.show() + +Nx = 40 +Ny = 40 +N_fine_cells = Nx * Ny + +# Can use this for post-processing +# mesh_b = S @ mesh_results.flatten() +# b = mesh_b + +# Use this for analyzing the in-situ results +cs_b = cs_results +b = cs_b - for i in range(len(f["input_deck"]["cell_tallies"])): - fission_score = f[f"tallies/cell_tally_{i}/fission"] +# Constructing T and A +idct_basis_x = spfft.idct(np.identity(Nx), axis=0) +idct_basis_y = spfft.idct(np.identity(Ny), axis=0) - print( - f'for sphere {i+1}, mean = {fission_score["mean"][()]}, sdev = {fission_score["sdev"][()]}' - ) +T_inv = np.kron(idct_basis_y, idct_basis_x) +A = S @ T_inv - # print(fission_score["mean"][()]) - # print(fission_score["sdev"][()]) +# Basis pursuit denoising solver - change l to get different results +vx = cp.Variable(N_fine_cells) +l = 10 +objective = cp.Minimize(0.5 * cp.norm(A @ vx - b, 2) + l * cp.norm(vx, 1)) +prob = cp.Problem(objective) +result = prob.solve(verbose=False) +sparse_solution = np.array(vx.value).squeeze() - # print(f"fission_score mean = {fission_score["mean"][()]}") - # print(f"fission_score mean = {fission_score["sdev"][()]}") +# Obtaining the reconstruction +recon = T_inv @ sparse_solution +recon_reshaped = recon.reshape(Ny, Nx) - # cell = f["tallies/cell_tally_0/fission"] - # print(f'sphere1 mean = {cell["mean"][()]}') - # print(f'sphere2 sdev = {cell["sdev"][()]}') +plt.imshow(recon_reshaped) +plt.title(f"Reconstruction, $\lambda$ = {l}") +plt.colorbar() +plt.show() diff --git a/mcdc/card.py b/mcdc/card.py index 02f11716..91af4a9a 100644 --- a/mcdc/card.py +++ b/mcdc/card.py @@ -414,3 +414,16 @@ def __init__(self, cell_ID): # Set card data self.cell_ID = cell_ID self.N_bin = 1 + + +class CSTallyCard(TallyCard): + def __init__(self): + TallyCard.__init__(self, "CS tally") + + # Set card data + self.x = np.array([-INF, INF]) + self.y = np.array([-INF, INF]) + self.z = np.array([-INF, INF]) + self.N_bin = 1 + self.N_cs_bins = 1 + self.cs_bin_size = np.array([1.0, 1.0]) diff --git a/mcdc/global_.py b/mcdc/global_.py index 6cfbf94f..6dbb7f80 100644 --- a/mcdc/global_.py +++ b/mcdc/global_.py @@ -36,6 +36,7 @@ def reset(self): self.mesh_tallies = [] self.surface_tallies = [] self.cell_tallies = [] + self.cs_tallies = [] self.setting = { "tag": "Setting", diff --git a/mcdc/kernel.py b/mcdc/kernel.py index b7112521..7d9b5ff9 100644 --- a/mcdc/kernel.py +++ b/mcdc/kernel.py @@ -2007,7 +2007,142 @@ def score_cell_tally(P_arr, distance, tally, data, mcdc): SigmaF = get_MacroXS(XS_FISSION, material, P_arr, mcdc) score = flux * SigmaF - tally_bin[TALLY_SCORE, cell_idx] += score + adapt.global_add(tally_bin, (TALLY_SCORE, cell_idx + i), round(score)) + # tally_bin[TALLY_SCORE, cell_idx + i] += score + + +@njit +def score_cs_tally(P_arr, distance, tally, data, mcdc): + # Each time that this function is called, EVERY cs bin needs to be checked to see if the particle is in it. + # The particle needs to score into all the bins that it is within + P = P_arr[0] + tally_bin = data[TALLY] + material = mcdc["materials"][P["material_ID"]] + N_cs_bins = tally["filter"]["N_cs_bins"] + + cs_bin_size = tally["filter"]["cs_bin_size"] + cs_centers = tally["filter"]["cs_centers"] + + stride = tally["stride"] + bin_idx = stride["tally"] + + # Particle 4D direction + ux = P["ux"] + uy = P["uy"] + uz = P["uz"] + ut = 1.0 / physics.get_speed(P_arr, mcdc) + + # Particle initial and final coordinate + x = P["x"] + y = P["y"] + z = P["z"] + t = P["t"] + x_final = x + ux * distance + y_final = y + uy * distance + z_final = z + uz * distance + t_final = t + ut * distance + + # Check each coarse bin + for j in range(N_cs_bins): + center = np.array([cs_centers[0][j], cs_centers[1][j]]) + start = np.array([x, y]) + end = np.array([x_final, y_final]) + + distance_inside = calculate_distance_in_coarse_bin( + start, end, distance, center, cs_bin_size + ) + + # Last bin covers the whole problem + if j == N_cs_bins - 1: + cs_bin_size_full_problem = np.array([INF, INF], dtype=np.float64) + distance_inside = calculate_distance_in_coarse_bin( + start, end, distance, center, cs_bin_size_full_problem + ) + + distance_in_bin = np.minimum(distance, distance_inside) # this line is good + + # Calculate flux and other scores + flux = distance_in_bin * P["w"] + for i in range(tally["N_score"]): + score_type = tally["scores"][i] + if score_type == SCORE_FLUX: + score = flux + elif score_type == SCORE_DENSITY: + score = flux / physics.get_speed(P_arr, mcdc) + elif score_type == SCORE_TOTAL: + SigmaT = get_MacroXS(XS_TOTAL, material, P_arr, mcdc) + score = flux * SigmaT + elif score_type == SCORE_FISSION: + SigmaF = get_MacroXS(XS_FISSION, material, P_arr, mcdc) + score = flux * SigmaF + + adapt.global_add( + tally_bin, + (TALLY_SCORE, bin_idx + j * tally["N_score"] + i), + round(score), + ) + + +@njit +def cs_clip(p, q, t0, t1): + if p < 0: + t = q / p + if t > t1: + return False, t0, t1 + if t > t0: + t0 = t + elif p > 0: + t = q / p + if t < t0: + return False, t0, t1 + if t < t1: + t1 = t + elif q < 0: + return False, t0, t1 + return True, t0, t1 + + +@njit +def cs_tracklength_in_box(start, end, x_min, x_max, y_min, y_max): + # Uses Liang-Barsky algorithm for finding tracklength in box + t0, t1 = 0.0, 1.0 + dx = end[0] - start[0] + dy = end[1] - start[1] + + # Perform clipping for each boundary + result, t0, t1 = cs_clip(-dx, start[0] - x_min, t0, t1) + if not result: + return 0.0 + result, t0, t1 = cs_clip(dx, x_max - start[0], t0, t1) + if not result: + return 0.0 + result, t0, t1 = cs_clip(-dy, start[1] - y_min, t0, t1) + if not result: + return 0.0 + result, t0, t1 = cs_clip(dy, y_max - start[1], t0, t1) + if not result: + return 0.0 + + # Update start and end points based on clipping results + if t1 < 1: + end = start + t1 * np.array([dx, dy]) + if t0 > 0: + start = start + t0 * np.array([dx, dy]) + + return np.linalg.norm(end - start) + + +@njit +def calculate_distance_in_coarse_bin(start, end, distance, center, cs_bin_size): + # Edges of the coarse bin + x_min = center[0] - cs_bin_size[0] / 2 + x_max = center[0] + cs_bin_size[0] / 2 + y_min = center[1] - cs_bin_size[1] / 2 + y_max = center[1] + cs_bin_size[1] / 2 + + distance_inside = cs_tracklength_in_box(start, end, x_min, x_max, y_min, y_max) + + return distance_inside @njit @@ -2030,20 +2165,12 @@ def tally_reduce(data, mcdc): @njit def tally_accumulate(data, mcdc): - - # print(f'fixed_source_data = {data[TALLY][TALLY_SCORE][-1]}') - tally_bin = data[TALLY] - # print(data[0][0, 30000], data[0][0, 30001]) N_bin = tally_bin.shape[1] - # print(f'N_bin = {N_bin}') - for i in range(N_bin): # Accumulate score and square of score into sum and sum_sq score = tally_bin[TALLY_SCORE, i] - # if (score != 0 and i > 29999): - # print(f'score = {score}, i = {i}') tally_bin[TALLY_SUM, i] += score tally_bin[TALLY_SUM_SQ, i] += score * score @@ -2412,6 +2539,11 @@ def move_to_event(P_arr, data, mcdc): ID = cell["tally_IDs"][i] tally = mcdc["cell_tallies"][ID] score_cell_tally(P_arr, distance, tally, data, mcdc) + + # CS tallies + for tally in mcdc["cs_tallies"]: + score_cs_tally(P_arr, distance, tally, data, mcdc) + if mcdc["setting"]["mode_eigenvalue"]: eigenvalue_tally(P_arr, distance, mcdc) diff --git a/mcdc/main.py b/mcdc/main.py index 0226351c..f2778de3 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -1,12 +1,12 @@ import argparse, os, sys import importlib.metadata import mcdc.config as config - import matplotlib.pyplot as plt -from matplotlib import colors as mpl_colors - import numba as nb - +from matplotlib import colors as mpl_colors +import scipy.fft as spfft +from scipy.stats.qmc import Halton +import cvxpy as cp from mcdc.card import UniverseCard from mcdc.print_ import ( @@ -86,6 +86,11 @@ def run(): loop_fixed_source(data_arr, mcdc_arr) mcdc["runtime_simulation"] = MPI.Wtime() - simulation_start + # Compressed sensing reconstruction + N_cs_bins = mcdc["cs_tallies"]["filter"]["N_cs_bins"][0] + if N_cs_bins != 0: + cs_reconstruct(data, mcdc) + # Output: generate hdf5 output files output_start = MPI.Wtime() generate_hdf5(data, mcdc) @@ -99,6 +104,122 @@ def run(): closeout(mcdc) +def calculate_cs_A(data, mcdc): + x_grid = mcdc["mesh_tallies"]["filter"]["x"][0] + y_grid = mcdc["mesh_tallies"]["filter"]["y"][0] + Nx = len(x_grid) - 1 + Ny = len(y_grid) - 1 + + N_cs_bins = mcdc["cs_tallies"]["filter"]["N_cs_bins"][0] + cs_bin_size = mcdc["cs_tallies"]["filter"]["cs_bin_size"][0] + + S = [[] for _ in range(N_cs_bins)] + + [x_centers, y_centers] = mcdc["cs_tallies"]["filter"]["cs_centers"][0] + x_centers[-1] = (x_grid[-1] + x_grid[0]) / 2 + y_centers[-1] = (y_grid[-1] + y_grid[0]) / 2 + + # Calculate the overlap grid for each bin, and flatten into a row of S + for ibin in range(N_cs_bins): + if ibin == N_cs_bins - 1: + # could just change to -INF, INF + cs_bin_size = np.array([x_grid[-1] + x_grid[0], y_grid[-1] + y_grid[0]]) + + bin_x_min = x_centers[ibin] - cs_bin_size[0] / 2 + bin_x_max = x_centers[ibin] + cs_bin_size[0] / 2 + bin_y_min = y_centers[ibin] - cs_bin_size[1] / 2 + bin_y_max = y_centers[ibin] + cs_bin_size[1] / 2 + + overlap = np.zeros((len(y_grid) - 1, len(x_grid) - 1)) + + for i in range(len(y_grid) - 1): + for j in range(len(x_grid) - 1): + cell_x_min = x_grid[j] + cell_x_max = x_grid[j + 1] + cell_y_min = y_grid[i] + cell_y_max = y_grid[i + 1] + + # Calculate overlap in x and y directions + overlap_x = np.maximum( + 0, + np.minimum(bin_x_max, cell_x_max) + - np.maximum(bin_x_min, cell_x_min), + ) + overlap_y = np.maximum( + 0, + np.minimum(bin_y_max, cell_y_max) + - np.maximum(bin_y_min, cell_y_min), + ) + + # Calculate fractional overlap + cell_area = (cell_x_max - cell_x_min) * (cell_y_max - cell_y_min) + overlap[i, j] = (overlap_x * overlap_y) / cell_area + + S[ibin] = overlap.flatten() + S = np.array(S) + mcdc["cs_tallies"]["filter"]["cs_S"] = S + + assert np.allclose(S[-1], np.ones(Nx * Ny)), "Last row of S must be all ones" + assert S.shape[1] == Nx * Ny, "Size of S must match Nx * Ny." + assert ( + S.shape[1] == mcdc["cs_tallies"]["N_bin"][0] + ), "Size of S must match number of cells in desired mesh tally" + + # TODO: can this be done in a different way? idk + # Construct the DCT matrix T + idct_basis_x = spfft.idct(np.identity(Nx), axis=0) + idct_basis_y = spfft.idct(np.identity(Ny), axis=0) + + T_inv = np.kron(idct_basis_y, idct_basis_x) + A = S @ T_inv + return A, T_inv + + +def calculate_cs_sparse_solution(data, mcdc, A, b): + N_fine_cells = mcdc["cs_tallies"]["N_bin"][0] + + # setting up the problem with CVXPY + vx = cp.Variable(N_fine_cells) + + # Basis pursuit denoising + l = 0.5 + objective = cp.Minimize(0.5 * cp.norm(A @ vx - b, 2) + l * cp.norm(vx, 1)) + prob = cp.Problem(objective) + result = prob.solve(verbose=False) + + # # Basis pursuit + # objective = cp.Minimize(cp.norm(vx, 1)) + # constraints = [A @ vx == b] + # prob = cp.Problem(objective, constraints) + # result = prob.solve(verbose=True) + # print(f'vx.value = {vx.value}') + + # formatting the sparse solution + sparse_solution = np.array(vx.value).squeeze() + + return sparse_solution + + +def cs_reconstruct(data, mcdc): + tally_bin = data[TALLY] + tally = mcdc["cs_tallies"][0] + stride = tally["stride"] + bin_idx = stride["tally"] + N_cs_bins = tally["filter"]["N_cs_bins"] + Nx = len(mcdc["mesh_tallies"]["filter"]["x"][0]) - 1 + Ny = len(mcdc["mesh_tallies"]["filter"]["y"][0]) - 1 + + b = tally_bin[TALLY_SUM, bin_idx : bin_idx + N_cs_bins] + + A, T_inv = calculate_cs_A(data, mcdc) + x = calculate_cs_sparse_solution(data, mcdc, A, b) + + recon = T_inv @ x + recon_reshaped = recon.reshape(Ny, Nx) + + tally["filter"]["cs_reconstruction"] = recon_reshaped + + # ============================================================================= # utilities for handling discrepancies between input and program types # ============================================================================= @@ -297,6 +418,28 @@ def dd_mesh_bounds(idx): return mesh_xn, mesh_xp, mesh_yn, mesh_yp, mesh_zn, mesh_zp +def generate_cs_centers(mcdc, N_dim=3, seed=123456789): + N_cs_bins = int(mcdc["cs_tallies"]["filter"]["N_cs_bins"]) + x_lims = ( + mcdc["cs_tallies"]["filter"]["x"][0][-1], + mcdc["cs_tallies"]["filter"]["x"][0][0], + ) + y_lims = ( + mcdc["cs_tallies"]["filter"]["y"][0][-1], + mcdc["cs_tallies"]["filter"]["y"][0][0], + ) + + # Generate Halton sequence according to the seed + halton_seq = Halton(d=N_dim, seed=seed) + points = halton_seq.random(n=N_cs_bins) + + # Extract x and y coordinates as tuples separately, scaled to the problem dimensions + x_coords = tuple(points[:, 0] * (x_lims[1] - x_lims[0]) + x_lims[0]) + y_coords = tuple(points[:, 1] * (y_lims[1] - y_lims[0]) + y_lims[0]) + + return (x_coords, y_coords) + + def prepare(): """ Preparing the MC transport simulation: @@ -359,6 +502,7 @@ def prepare(): type_.make_type_mesh_tally(input_deck) type_.make_type_surface_tally(input_deck) type_.make_type_cell_tally(input_deck) + type_.make_type_cs_tally(input_deck) type_.make_type_setting(input_deck) type_.make_type_uq(input_deck) type_.make_type_domain_decomp(input_deck) @@ -681,6 +825,7 @@ def prepare(): N_mesh_tally = len(input_deck.mesh_tallies) N_surface_tally = len(input_deck.surface_tallies) N_cell_tally = len(input_deck.cell_tallies) + N_cs_tally = len(input_deck.cs_tallies) tally_size = 0 # Mesh tallies @@ -880,22 +1025,69 @@ def prepare(): # Filter strides stride = N_score if Nt > 1: - mcdc["mesh_tallies"][i]["stride"]["t"] = stride + mcdc["cell_tallies"][i]["stride"]["t"] = stride stride *= Nt if Ng > 1: - mcdc["mesh_tallies"][i]["stride"]["g"] = stride + mcdc["cell_tallies"][i]["stride"]["g"] = stride stride *= Ng if N_azi > 1: - mcdc["mesh_tallies"][i]["stride"]["azi"] = stride + mcdc["cell_tallies"][i]["stride"]["azi"] = stride stride *= N_azi if Nmu > 1: - mcdc["mesh_tallies"][i]["stride"]["mu"] = stride + mcdc["cell_tallies"][i]["stride"]["mu"] = stride stride *= Nmu # Set tally stride and accumulate total tally size mcdc["cell_tallies"][i]["stride"]["tally"] = tally_size tally_size += mcdc["cell_tallies"][i]["N_bin"] + # CS tallies + for i in range(N_cs_tally): + # Direct assignment + copy_field(mcdc["cs_tallies"][i], input_deck.cs_tallies[i], "N_bin") + + mcdc["cs_tallies"][i]["filter"]["N_cs_bins"] = input_deck.cs_tallies[ + i + ].N_cs_bins[0] + mcdc["cs_tallies"][i]["filter"]["cs_bin_size"] = input_deck.cs_tallies[ + i + ].cs_bin_size[0] + + # Filters (variables with possible different sizes) + if not input_deck.technique["domain_decomposition"]: + for name in ["x", "y", "z", "t", "mu", "azi", "g"]: + N = len(getattr(input_deck.cs_tallies[i], name)) + mcdc["cs_tallies"][i]["filter"][name][:N] = getattr( + input_deck.cs_tallies[i], name + ) + + mcdc["cs_tallies"][i]["filter"]["cs_centers"] = generate_cs_centers(mcdc) + + # Set tally scores + N_score = len(input_deck.cs_tallies[i].scores) + mcdc["cs_tallies"][i]["N_score"] = N_score + for j in range(N_score): + score_name = input_deck.cs_tallies[i].scores[j] + score_type = None + if score_name == "flux": + score_type = SCORE_FLUX + elif score_name == "density": + score_type = SCORE_DENSITY + elif score_name == "total": + score_type = SCORE_TOTAL + elif score_name == "fission": + score_type = SCORE_FISSION + elif score_name == "net-current": + score_type = SCORE_NET_CURRENT + mcdc["cs_tallies"][i]["scores"][j] = score_type + + # Update N_bin + mcdc["cs_tallies"][i]["N_bin"] *= N_score + + # Set tally stride and accumulate total tally size + mcdc["cs_tallies"][i]["stride"]["tally"] = tally_size + tally_size += mcdc["cs_tallies"][i]["filter"]["N_cs_bins"] + # Set tally data if not input_deck.technique["uq"]: tally = np.zeros((3, tally_size), dtype=type_.float64) @@ -1323,7 +1515,10 @@ def prepare(): def cardlist_to_h5group(dictlist, input_group, name): - main_group = input_group.create_group(name + "s") + if name[-1] != "s": + main_group = input_group.create_group(name + "s") + else: + main_group = input_group.create_group(name) for item in dictlist: group = main_group.create_group(name + "_%i" % getattr(item, "ID")) card_to_h5group(item, group) @@ -1349,7 +1544,10 @@ def card_to_h5group(card, group): def dictlist_to_h5group(dictlist, input_group, name): - main_group = input_group.create_group(name + "s") + if name[-1] != "s": + main_group = input_group.create_group(name + "s") + else: + main_group = input_group.create_group(name) for item in dictlist: group = main_group.create_group(name + "_%i" % item["ID"]) dict_to_h5group(item, group) @@ -1444,11 +1642,12 @@ def generate_hdf5(data, mcdc): input_deck.mesh_tallies, input_group, "mesh_tallies" ) cardlist_to_h5group( - input_deck.surface_tallies, input_group, "surface_tally" + input_deck.surface_tallies, input_group, "surface_tallies" ) cardlist_to_h5group( input_deck.cell_tallies, input_group, "cell_tallies" ) + cardlist_to_h5group(input_deck.cs_tallies, input_group, "cs_tallies") dict_to_h5group(input_deck.setting, input_group.create_group("setting")) dict_to_h5group( input_deck.technique, input_group.create_group("technique") @@ -1602,34 +1801,79 @@ def generate_hdf5(data, mcdc): N_bin = tally["N_bin"] start = tally["stride"]["tally"] tally_bin = data[TALLY][:, start : start + N_bin] + tally_bin = tally_bin.reshape(shape) - # print(f'data = {data[TALLY][2]}') - # if data[TALLY][1].all() == data[TALLY][2].all: - # print('hell yeah') - # print(f'data keys = {data[TALLY].dtype.names}') + # Roll tally so that score is in the front + tally_bin = np.rollaxis(tally_bin, 1, 0) - # print(f'tally_bin = {tally_bin}') + # Iterate over scores + for i in range(N_score): + score_type = tally["scores"][i] + score_tally_bin = np.squeeze(tally_bin[i]) + if score_type == SCORE_FLUX: + score_name = "flux" + elif score_type == SCORE_NET_CURRENT: + score_name = "net-current" + elif score_type == SCORE_FISSION: + score_name = "fission" + group_name = "tallies/cell_tally_%i/%s/" % (ID, score_name) + + mean = score_tally_bin[TALLY_SUM] + sdev = score_tally_bin[TALLY_SUM_SQ] + f.create_dataset(group_name + "mean", data=mean) + f.create_dataset(group_name + "sdev", data=sdev) + if mcdc["technique"]["uq"]: + mc_var = score_tally_bin[TALLY_UQ_BATCH_VAR] + tot_var = score_tally_bin[TALLY_UQ_BATCH] + uq_var = tot_var - mc_var + f.create_dataset(group_name + "uq_var", data=uq_var) + + # CS tallies + for ID, tally in enumerate(mcdc["cs_tallies"]): + if mcdc["technique"]["iQMC"]: + break + N_cs_bins = tally["filter"]["N_cs_bins"] + + # Shape + N_score = tally["N_score"] + + if not mcdc["technique"]["uq"]: + shape = (3, N_cs_bins, N_score) + else: + shape = (5, N_cs_bins, N_score) + + # Reshape tally + start = tally["stride"]["tally"] + tally_bin = data[TALLY][:, start : start + N_cs_bins] tally_bin = tally_bin.reshape(shape) # Roll tally so that score is in the front - tally_bin = np.rollaxis(tally_bin, 1, 0) + tally_bin = np.rollaxis(tally_bin, 2, 0) # Iterate over scores for i in range(N_score): score_type = tally["scores"][i] score_tally_bin = np.squeeze(tally_bin[i]) - # print(f'score_tally_bin = {score_tally_bin}') if score_type == SCORE_FLUX: score_name = "flux" elif score_type == SCORE_NET_CURRENT: score_name = "net-current" elif score_type == SCORE_FISSION: score_name = "fission" - group_name = "tallies/cell_tally_%i/%s/" % (ID, score_name) + group_name = "tallies/cs_tally_%i/%s/" % (ID, score_name) + + center_points = tally["filter"]["cs_centers"] + S = tally["filter"]["cs_S"] + reconstruction = tally["filter"]["cs_reconstruction"] + + f.create_dataset( + "tallies/cs_tally_%i/center_points" % (ID), data=center_points + ) + f.create_dataset("tallies/cs_tally_%i/S" % (ID), data=S) + f.create_dataset(group_name + "reconstruction", data=reconstruction) mean = score_tally_bin[TALLY_SUM] - # print(f'ID = {ID}, score_name = {score_name}, mean = {mean}') sdev = score_tally_bin[TALLY_SUM_SQ] f.create_dataset(group_name + "mean", data=mean) diff --git a/mcdc/tally.py b/mcdc/tally.py index f798af84..e1ff98e2 100644 --- a/mcdc/tally.py +++ b/mcdc/tally.py @@ -9,6 +9,7 @@ MeshTallyCard, SurfaceTallyCard, CellTallyCard, + CSTallyCard, ) from mcdc.constant import ( INF, @@ -161,6 +162,21 @@ def surface_tally( def cell_tally(cell, scores=["flux"]): + """ + Create a tally card to collect MC solutions. + + Parameters + ---------- + cell : CellCard + Cell to which the tally is attached to + scores : list of str {"flux", "net-current"} + List of physical quantities to be scored. + + Returns + ------- + CellTallyCard + The tally card. + """ # Make tally card card = CellTallyCard(cell.ID) @@ -189,3 +205,71 @@ def cell_tally(cell, scores=["flux"]): global_.input_deck.cell_tallies.append(card) return card + + +def cs_tally( + N_cs_bins=10, + cs_bin_size=([1.0, 1.0]), + x=np.array([-INF, INF]), + y=np.array([-INF, INF]), + z=np.array([-INF, INF]), + t=np.array([-INF, INF]), + mu=np.array([-1.0, 1.0]), + azi=np.array([-PI, PI]), + g=np.array([-INF, INF]), + E=np.array([0.0, INF]), + scores=["flux"], +): + # Make tally card + card = CSTallyCard() + + # Set ID + card.ID = len(global_.input_deck.cs_tallies) + + # Set mesh + card.x = x + card.y = y + card.z = z + + # Set bin properties, convert bin size to problem units + card.N_cs_bins = N_cs_bins + card.cs_bin_size[0] = cs_bin_size[0] / (len(x) - 1) * (x[-1] - x[0]) + card.cs_bin_size[1] = cs_bin_size[1] / (len(y) - 1) * (y[-1] - y[0]) + + # Set other filters + card.t = t + card.mu = mu + card.azi = azi + + # Set energy group grid + if type(g) == type("string") and g == "all": + G = global_.input_deck.materials[0].G + card.g = np.linspace(0, G, G + 1) - 0.5 + else: + card.g = g + if global_.input_deck.setting["mode_CE"]: + card.g = E + + # Calculate total number bins + Nx = len(card.x) - 1 + Ny = len(card.y) - 1 + Nz = len(card.z) - 1 + Nt = len(card.t) - 1 + Nmu = len(card.mu) - 1 + N_azi = len(card.azi) - 1 + Ng = len(card.g) - 1 + card.N_bin = Nx * Ny * Nz * Nt * Nmu * N_azi * Ng + + # Scores + for s in scores: + score_checked = check_support( + "score type", + s, + ["flux", "total", "fission", "density"], + ) + card.scores.append(score_checked) + + # Add to deck + global_.input_deck.cs_tallies.append(card) + + return card diff --git a/mcdc/type_.py b/mcdc/type_.py index 29141425..cc81f4ee 100644 --- a/mcdc/type_.py +++ b/mcdc/type_.py @@ -44,6 +44,7 @@ mesh_tally = None surface_tally = None cell_tally = None +cs_tally = None technique = None global_ = None @@ -912,6 +913,89 @@ def make_type_cell_tally(input_deck): cell_tally = into_dtype(struct) +def make_type_cs_tally(input_deck): + global cs_tally + struct = [] + + # Maximum numbers of mesh and filter grids and scores + Nmax_x = 2 + Nmax_y = 2 + Nmax_z = 2 + Nmax_t = 2 + Nmax_mu = 2 + Nmax_azi = 2 + Nmax_g = 2 + Nmax_score = 1 + N_cs_centers = 1 + for card in input_deck.cs_tallies: + Nmax_x = max(Nmax_x, len(card.x)) + Nmax_y = max(Nmax_y, len(card.y)) + Nmax_z = max(Nmax_z, len(card.z)) + Nmax_t = max(Nmax_t, len(card.t)) + Nmax_mu = max(Nmax_mu, len(card.mu)) + Nmax_azi = max(Nmax_azi, len(card.azi)) + Nmax_g = max(Nmax_g, len(card.g)) + Nmax_score = max(Nmax_score, len(card.scores)) + N_cs_centers = card.N_cs_bins[0] + + # # reduce tally sizes for subdomains + # if input_deck.technique["domain_decomposition"]: + # Nmax_x, Nmax_y, Nmax_z = dd_meshtally(input_deck) + + # Set the filter + filter_ = [ + ("N_cs_bins", int), + ("cs_bin_size", float64, (2,)), + ( + "cs_centers", + float64, + ( + 2, + N_cs_centers, + ), + ), + ("cs_S", float64, (N_cs_centers, (Nmax_x - 1) * (Nmax_y - 1))), + ("cs_reconstruction", float64, ((Nmax_y - 1), (Nmax_x - 1))), + ("x", float64, (Nmax_x,)), + ("y", float64, (Nmax_y,)), + ("z", float64, (Nmax_z,)), + ("t", float64, (Nmax_t,)), + ("mu", float64, (Nmax_mu,)), + ("azi", float64, (Nmax_azi,)), + ("g", float64, (Nmax_g,)), + ] + + struct += [("filter", filter_)] + + # Tally strides + stride = [ + ("tally", int64), + ("sensitivity", int64), + ("mu", int64), + ("azi", int64), + ("g", int64), + ("t", int64), + ("x", int64), + ("y", int64), + ("z", int64), + # ("N_cs_bins", int64), # TODO: get rid of this line? + ] + struct += [("stride", stride)] + + # Total number of bins (will be used for the reconstruction) + # TODO: Might be able to get rid of this (just get N_bin from the mesh) + struct += [("N_bin", int64)] + + # Number of compressed sensing bins + # struct += [("N_cs_bins", int64)] + + # Scores + struct += [("N_score", int64), ("scores", int64, (Nmax_score,))] + + # Make tally structure + cs_tally = into_dtype(struct) + + # ============================================================================== # Setting # ============================================================================== @@ -1335,6 +1419,7 @@ def make_type_global(input_deck): N_mesh_tally = len(input_deck.mesh_tallies) N_surface_tally = len(input_deck.surface_tallies) N_cell_tally = len(input_deck.cell_tallies) + N_cs_tally = len(input_deck.cs_tallies) # Cell data sizes N_cell_surface = sum([len(x.surface_IDs) for x in input_deck.cells]) @@ -1409,6 +1494,7 @@ def make_type_global(input_deck): ("mesh_tallies", mesh_tally, (N_mesh_tally,)), ("surface_tallies", surface_tally, (N_surface_tally,)), ("cell_tallies", cell_tally, (N_cell_tally,)), + ("cs_tallies", cs_tally, (N_cs_tally,)), ("setting", setting), ("technique", technique), ("domain_decomp", domain_decomp), diff --git a/pyproject.toml b/pyproject.toml index 85c16f5f..5afecf3c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,6 +59,7 @@ dependencies = [ "black", "sympy", "pre-commit", + "cvxpy" ] [project.optional-dependencies] docs = ["sphinx==7.2.6", "furo", "sphinx_toolbox"]