diff --git a/examples/fixed_source/sphere_in_cube/center_points.npy b/examples/fixed_source/sphere_in_cube/center_points.npy deleted file mode 100644 index 0fef328c..00000000 Binary files a/examples/fixed_source/sphere_in_cube/center_points.npy and /dev/null differ diff --git a/examples/fixed_source/sphere_in_cube/input.py b/examples/fixed_source/sphere_in_cube/input.py index d53a4e0d..3a858582 100644 --- a/examples/fixed_source/sphere_in_cube/input.py +++ b/examples/fixed_source/sphere_in_cube/input.py @@ -49,8 +49,8 @@ mcdc.tally.cell_tally(sphere_cell, scores=["fission"]) mcdc.tally.cs_tally( - N_cs_bins=[1601], - cs_bin_size=np.array([1.0, 1.0]), + N_cs_bins=[150], + cs_bin_size=np.array([3.0, 3.0]), x=np.linspace(0.0, 4.0, 41), y=np.linspace(0.0, 4.0, 41), scores=["fission"], diff --git a/examples/fixed_source/sphere_in_cube/process.py b/examples/fixed_source/sphere_in_cube/process.py index fdf3e1cd..d7896a54 100644 --- a/examples/fixed_source/sphere_in_cube/process.py +++ b/examples/fixed_source/sphere_in_cube/process.py @@ -4,56 +4,58 @@ import scipy.fft as spfft import cvxpy as cp -# Load results -with h5py.File("output.h5", "r") as f: - # trying to compare cs results and mesh results - center_points = np.load("center_points.npy") - cs_results = f["tallies"]["cs_tally_0"]["fission"]["mean"][:] - cs_alphas = cs_results[:-1] / np.max(cs_results[:-1]) - # plt.scatter(center_points[0][:-1], center_points[1][:-1], alpha=cs_alphas) - # plt.show() +# Note: there are some lines in main.py with np.save(...) that I added +# for ease of post-processing, like getting the center points used and +# the sampling matrix S. None are required for the input file and this +# script to run, but may be useful for debugging purposes - mesh_results = f["tallies"]["mesh_tally_0"]["fission"]["mean"][:] - plt.imshow(cs_results[:-1].reshape(mesh_results.shape)) - plt.title("cs") - plt.show() - plt.imshow(mesh_results) - plt.title("mesh") +with h5py.File("output.h5", "r") as f: + S = f["tallies"]["cs_tally_0"]["S"][:] + recon = f["tallies"]["cs_tally_0"]["fission"]["reconstruction"] + plt.imshow(recon) + plt.title("Reconstruction, $\lambda$ = 0.5") # assuming l in main.py remains at 0.5 + plt.colorbar() plt.show() - print(f"cs_results = {cs_results}") - print(f"sh_results = {mesh_results.flatten()}") + cs_results = f["tallies"]["cs_tally_0"]["fission"]["mean"][:] + # mesh_results = f["tallies"]["mesh_tally_0"]["fission"]["mean"][:] + # plt.imshow(mesh_results) + # plt.title("mesh results") + # plt.show() Nx = 40 Ny = 40 -S = np.load("sphere_S.npy") -mesh_b = S @ mesh_results.flatten() -cs_b = cs_results - +N_fine_cells = Nx * Ny +# Can use this for post-processing +# mesh_b = S @ mesh_results.flatten() # b = mesh_b -# print(f'shape of mesh b = {mesh_b.shape}') -# print(f'shape of cs b = {cs_b.shape}') - -# idct_basis_x = spfft.idct(np.identity(Nx), axis=0) -# idct_basis_y = spfft.idct(np.identity(Ny), axis=0) - -# T_inv = np.kron(idct_basis_y, idct_basis_x) -# A = S @ T_inv -# N_fine_cells = 1600 - -# vx = cp.Variable(N_fine_cells) -# l = 0 -# objective = cp.Minimize(0.5 * cp.norm(A @ vx - b, 2) + l * cp.norm(vx, 1)) -# prob = cp.Problem(objective) -# result = prob.solve(verbose=False) -# sparse_solution = np.array(vx.value).squeeze() - -# recon = T_inv @ sparse_solution -# recon_reshaped = recon.reshape(Ny, Nx) - -# plt.imshow(recon_reshaped) -# plt.title('reconstruction') -# plt.show() +# Use this for analyzing the in-situ results +cs_b = cs_results +b = cs_b + +# Constructing T and A +idct_basis_x = spfft.idct(np.identity(Nx), axis=0) +idct_basis_y = spfft.idct(np.identity(Ny), axis=0) + +T_inv = np.kron(idct_basis_y, idct_basis_x) +A = S @ T_inv + +# Basis pursuit denoising solver - change l to get different results +vx = cp.Variable(N_fine_cells) +l = 10 +objective = cp.Minimize(0.5 * cp.norm(A @ vx - b, 2) + l * cp.norm(vx, 1)) +prob = cp.Problem(objective) +result = prob.solve(verbose=False) +sparse_solution = np.array(vx.value).squeeze() + +# Obtaining the reconstruction +recon = T_inv @ sparse_solution +recon_reshaped = recon.reshape(Ny, Nx) + +plt.imshow(recon_reshaped) +plt.title(f"Reconstruction, $\lambda$ = {l}") +plt.colorbar() +plt.show() diff --git a/examples/fixed_source/sphere_in_cube/sparse_solution.npy b/examples/fixed_source/sphere_in_cube/sparse_solution.npy deleted file mode 100644 index a8121470..00000000 Binary files a/examples/fixed_source/sphere_in_cube/sparse_solution.npy and /dev/null differ diff --git a/examples/fixed_source/sphere_in_cube/sphere_S.npy b/examples/fixed_source/sphere_in_cube/sphere_S.npy deleted file mode 100644 index 1d4ef27b..00000000 Binary files a/examples/fixed_source/sphere_in_cube/sphere_S.npy and /dev/null differ diff --git a/mcdc/main.py b/mcdc/main.py index 8882a396..bdc5a274 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -93,7 +93,7 @@ def run(): loop_fixed_source(data_arr, mcdc_arr) mcdc["runtime_simulation"] = MPI.Wtime() - simulation_start - # Compressed sensing reconstruction - after the sim runs and gets results + # Compressed sensing reconstruction N_cs_bins = mcdc["cs_tallies"]["filter"]["N_cs_bins"][0] if N_cs_bins != 0: cs_reconstruct(data, mcdc) @@ -151,7 +151,6 @@ def calculate_cs_A(data, mcdc): [x_centers, y_centers] = mcdc["cs_tallies"]["filter"]["cs_centers"][0] x_centers[-1] = (x_grid[-1] + x_grid[0]) / 2 y_centers[-1] = (y_grid[-1] + y_grid[0]) / 2 - np.save("center_points.npy", mcdc["cs_tallies"]["filter"]["cs_centers"][0]) # Calculate the overlap grid for each bin, and flatten into a row of S for ibin in range(N_cs_bins): @@ -190,19 +189,13 @@ def calculate_cs_A(data, mcdc): S[ibin] = overlap.flatten() S = np.array(S) - - S_summed = np.sum(S, axis=0).reshape(Ny, Nx) - plt.imshow(S_summed) - plt.colorbar() - plt.title("S_Summed") - plt.show() + mcdc["cs_tallies"]["filter"]["cs_S"] = S assert np.allclose(S[-1], np.ones(Nx * Ny)), "Last row of S must be all ones" assert S.shape[1] == Nx * Ny, "Size of S must match Nx * Ny." assert ( S.shape[1] == mcdc["cs_tallies"]["N_bin"][0] ), "Size of S must match number of cells in desired mesh tally" - np.save("sphere_S.npy", S) # TODO: can this be done in a different way? idk # Construct the DCT matrix T @@ -221,7 +214,7 @@ def calculate_cs_sparse_solution(data, mcdc, A, b): vx = cp.Variable(N_fine_cells) # Basis pursuit denoising - l = 0 + l = 0.5 objective = cp.Minimize(0.5 * cp.norm(A @ vx - b, 2) + l * cp.norm(vx, 1)) prob = cp.Problem(objective) result = prob.solve(verbose=False) @@ -241,9 +234,6 @@ def calculate_cs_sparse_solution(data, mcdc, A, b): def cs_reconstruct(data, mcdc): tally_bin = data[TALLY] - - print(tally_bin.shape) - tally = mcdc["cs_tallies"][0] stride = tally["stride"] bin_idx = stride["tally"] @@ -253,38 +243,13 @@ def cs_reconstruct(data, mcdc): b = tally_bin[TALLY_SUM, bin_idx : bin_idx + N_cs_bins] - # print(f"measurements b = {b}") - A, T_inv = calculate_cs_A(data, mcdc) x = calculate_cs_sparse_solution(data, mcdc, A, b) - np.save("sparse_solution.npy", x) - # print(f"sparse solution shape = {x.shape}") - # print(x) - recon = T_inv @ x - # print(f"recon.shape = {recon.shape}") - # print(f"recon = {recon}") - recon_reshaped = recon.reshape(Ny, Nx) - plt.imshow(recon_reshaped) - plt.title("Reconstruction") - plt.colorbar() - plt.show() - - return recon - # reconstruction - - # # Find the sparse solution, and then find the reconstruction - # sparse_solution[res] = finding_sparse_solution(A[res], fluxes[res].size, b, bpdn_lambda) - # reconstruction[res] = np.reshape(un_matrix[res] @ sparse_solution[res], fluxes[res].shape) - # rescaled_recons[res] = downsample_to_resolution(reconstruction[res], input_flux.shape) - - # if return_centers: - # return reconstruction, input_flux, rescaled_recons, bin_centers[0] - # else: - # return reconstruction, input_flux, rescaled_recons + tally["filter"]["cs_reconstruction"] = recon_reshaped # ============================================================================= @@ -486,42 +451,25 @@ def dd_mesh_bounds(idx): def generate_cs_centers(mcdc, N_dim=3, seed=123456789): - # N_cs_bins = int(mcdc["cs_tallies"]["filter"]["N_cs_bins"]) - # x_lims = ( - # mcdc["cs_tallies"]["filter"]["x"][0][-1], - # mcdc["cs_tallies"]["filter"]["x"][0][0], - # ) - # y_lims = ( - # mcdc["cs_tallies"]["filter"]["y"][0][-1], - # mcdc["cs_tallies"]["filter"]["y"][0][0], - # ) - - # # Generate Halton sequence according to the seed - # halton_seq = Halton(d=N_dim, seed=seed) - # points = halton_seq.random(n=N_cs_bins) - - # # Extract x and y coordinates as tuples separately, scaled to the problem dimensions - # x_coords = tuple(points[:, 0] * (x_lims[1] - x_lims[0]) + x_lims[0]) - # y_coords = tuple(points[:, 1] * (y_lims[1] - y_lims[0]) + y_lims[0]) - - # Generate the centers of the cells - x_centers = np.linspace(0.05, 3.95, 40) - y_centers = np.linspace(0.05, 3.95, 40) - - # Create the meshgrid for all cell centers - X, Y = np.meshgrid(x_centers, y_centers) - - # Flatten the arrays to get the list of coordinates - x_coords = X.flatten() - y_coords = Y.flatten() + N_cs_bins = int(mcdc["cs_tallies"]["filter"]["N_cs_bins"]) + x_lims = ( + mcdc["cs_tallies"]["filter"]["x"][0][-1], + mcdc["cs_tallies"]["filter"]["x"][0][0], + ) + y_lims = ( + mcdc["cs_tallies"]["filter"]["y"][0][-1], + mcdc["cs_tallies"]["filter"]["y"][0][0], + ) - x_coords_list = x_coords.tolist() - y_coords_list = y_coords.tolist() + # Generate Halton sequence according to the seed + halton_seq = Halton(d=N_dim, seed=seed) + points = halton_seq.random(n=N_cs_bins) - x_coords_list.append(2.0) - y_coords_list.append(2.0) + # Extract x and y coordinates as tuples separately, scaled to the problem dimensions + x_coords = tuple(points[:, 0] * (x_lims[1] - x_lims[0]) + x_lims[0]) + y_coords = tuple(points[:, 1] * (y_lims[1] - y_lims[0]) + y_lims[0]) - return (x_coords_list, y_coords_list) + return (x_coords, y_coords) def prepare(): @@ -1948,7 +1896,14 @@ def generate_hdf5(data, mcdc): group_name = "tallies/cs_tally_%i/%s/" % (ID, score_name) center_points = tally["filter"]["cs_centers"] - f.create_dataset(group_name + "center_points", data=center_points) + S = tally["filter"]["cs_S"] + reconstruction = tally["filter"]["cs_reconstruction"] + + f.create_dataset( + "tallies/cs_tally_%i/center_points" % (ID), data=center_points + ) + f.create_dataset("tallies/cs_tally_%i/S" % (ID), data=S) + f.create_dataset(group_name + "reconstruction", data=reconstruction) mean = score_tally_bin[TALLY_SUM] sdev = score_tally_bin[TALLY_SUM_SQ] diff --git a/mcdc/tally.py b/mcdc/tally.py index cd61a77d..e1ff98e2 100644 --- a/mcdc/tally.py +++ b/mcdc/tally.py @@ -233,14 +233,9 @@ def cs_tally( # Set bin properties, convert bin size to problem units card.N_cs_bins = N_cs_bins - print(f"tally.py, {card.cs_bin_size[0]}") - print(type(card.cs_bin_size), card.cs_bin_size.shape) card.cs_bin_size[0] = cs_bin_size[0] / (len(x) - 1) * (x[-1] - x[0]) card.cs_bin_size[1] = cs_bin_size[1] / (len(y) - 1) * (y[-1] - y[0]) - print(f"tally.py, {card.cs_bin_size[0]}") - print(type(card.cs_bin_size), card.cs_bin_size.shape) - # Set other filters card.t = t card.mu = mu diff --git a/mcdc/type_.py b/mcdc/type_.py index af421d3d..be513907 100644 --- a/mcdc/type_.py +++ b/mcdc/type_.py @@ -954,6 +954,8 @@ def make_type_cs_tally(input_deck): N_cs_centers, ), ), + ("cs_S", float64, (N_cs_centers, (Nmax_x - 1) * (Nmax_y - 1))), + ("cs_reconstruction", float64, ((Nmax_x - 1), (Nmax_y - 1))), ("x", float64, (Nmax_x,)), ("y", float64, (Nmax_y,)), ("z", float64, (Nmax_z,)),