Skip to content

Commit

Permalink
Responding reviews
Browse files Browse the repository at this point in the history
  • Loading branch information
hidekb committed Jan 17, 2025
1 parent e26d439 commit a91eaf5
Show file tree
Hide file tree
Showing 11 changed files with 28 additions and 197 deletions.
16 changes: 9 additions & 7 deletions maintainer/benchmarks/lb.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@
parser.add_argument("--blocks_per_mpi_rank", action="store", nargs=3,
type=int, default=[1, 1, 1], required=False,
help="blocks per mpi rank")
parser.add_argument("--weak_scaling", action="store_true", required=False,
help="The measurement of weak scaling")

args = parser.parse_args()

Expand Down Expand Up @@ -104,15 +106,15 @@
lb_grid = 3 * [lb_grid]
box_l = 3 * [box_l]

print(f"box length: {box_l}")
print(f"LB shape: {lb_grid}")
print(f"LB agrid: {agrid:.3f}")

blocks_per_mpi_rank = args.blocks_per_mpi_rank

# System
#############################################################
system.box_l = box_l
if args.weak_scaling:
system.box_l = box_l * system.cell_system.node_grid
print(f"box length: {system.box_l}")
print(f"LB shape: {lb_grid}")
print(f"LB agrid: {agrid:.3f}")


# Integration parameters
#############################################################
Expand Down Expand Up @@ -150,7 +152,7 @@
if args.multi_gpu:
system.cuda_init_handle.call_method("set_device_id_per_rank")
lbf = lb_class(agrid=agrid, tau=system.time_step, kinematic_viscosity=1.,
density=1., single_precision=args.single_precision, blocks_per_mpi_rank=blocks_per_mpi_rank)
density=1., single_precision=args.single_precision, blocks_per_mpi_rank=args.blocks_per_mpi_rank)
system.lb = lbf
if n_part:
system.thermostat.set_lb(LB_fluid=lbf, gamma=1., seed=42)
Expand Down
166 changes: 0 additions & 166 deletions maintainer/benchmarks/lb_weakscaling.py

This file was deleted.

2 changes: 1 addition & 1 deletion src/python/espressomd/detail/walberla.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def required_keys(self):
return self.valid_keys()

def default_params(self):
return {}
return {"blocks_per_mpi_rank": [1, 1, 1]}

def get_node_indices_inside_shape(self, shape):
if not isinstance(shape, espressomd.shapes.Shape):
Expand Down
4 changes: 2 additions & 2 deletions src/python/espressomd/lb.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ class LBFluidWalberla(HydrodynamicInteraction,
single_precision : :obj:`bool`, optional
Use single-precision floating-point arithmetic.
blocks_per_mpi_rank : (3,) array_like of :obj:`int`, optional
Ditribute more than one block to each CPU.
Distribute more than one block to each CPU.
Methods
-------
Expand Down Expand Up @@ -242,7 +242,7 @@ def validate_params(self, params):
if "agrid" not in params:
raise ValueError("missing argument 'lattice' or 'agrid'")
params["lattice"] = LatticeWalberla(
agrid=params.pop("agrid"), n_ghost_layers=1, blocks_per_mpi_rank=params.get("blocks_per_mpi_rank"))
agrid=params.pop("agrid"), n_ghost_layers=1, blocks_per_mpi_rank=params.pop("blocks_per_mpi_rank"))
elif "agrid" in params:
raise ValueError("cannot provide both 'lattice' and 'agrid'")

Expand Down
3 changes: 1 addition & 2 deletions src/script_interface/walberla/LBFluid.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,7 @@ void LBFluidGPU::make_instance(VariantMap const &params) {
auto const visc = get_value<double>(params, "kinematic_viscosity");
auto const dens = get_value<double>(params, "density");
auto const precision = get_value<bool>(params, "single_precision");
auto const blocks_per_mpi_rank = get_value_or<Utils::Vector3i>(
params, "blocks_per_mpi_rank", Utils::Vector3i{{1, 1, 1}});
auto const blocks_per_mpi_rank = get_value<Utils::Vector3i>(m_lattice->get_parameter("blocks_per_mpi_rank"));
if (blocks_per_mpi_rank != Utils::Vector3i{{1, 1, 1}}) {
throw std::runtime_error(
"Using more than one block per MPI rank is not supported for GPU LB");
Expand Down
13 changes: 3 additions & 10 deletions src/script_interface/walberla/LatticeWalberla.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,17 +63,10 @@ class LatticeWalberla : public AutoParameters<LatticeWalberla> {
auto const &box_geo = *::System::get_system().box_geo;
m_agrid = get_value<double>(args, "agrid");
m_box_l = get_value_or<Utils::Vector3d>(args, "_box_l", box_geo.length());
m_blocks_per_mpi_rank = get_value_or<Utils::Vector3i>(
args, "blocks_per_mpi_rank", Utils::Vector3i{{1, 1, 1}});
m_blocks_per_mpi_rank = get_value<Utils::Vector3i>(args, "blocks_per_mpi_rank");
auto const n_ghost_layers = get_value<int>(args, "n_ghost_layers");
auto const block_grid =
Utils::Vector3i{{static_cast<int>(::communicator.node_grid[0] *
m_blocks_per_mpi_rank[0]),
static_cast<int>(::communicator.node_grid[1] *
m_blocks_per_mpi_rank[1]),
static_cast<int>(::communicator.node_grid[2] *
m_blocks_per_mpi_rank[2])}};

auto const block_grid = Utils::hadamard_product(::communicator.node_grid,
m_blocks_per_mpi_rank);
context()->parallel_try_catch([&]() {
if (m_agrid <= 0.) {
throw std::domain_error("Parameter 'agrid' must be > 0");
Expand Down
5 changes: 5 additions & 0 deletions src/walberla_bridge/src/utils/types_conversion.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,11 @@ inline Utils::VectorXd<9> to_vector9d(Matrix3<float> const &m) {
double_c(m[6]), double_c(m[7]), double_c(m[8])};
}
inline Utils::Vector3i to_vector3i(Vector3<float> const &v) {
#ifndef NDEBUG
for (auto const i : {0u, 1u, 2u}) {
assert(std::abs(static_cast<double>(v[i] - static_cast<int>(v[i])) < 1e-5);
}
#endif
return Utils::Vector3i{
{static_cast<int>(v[0]), static_cast<int>(v[1]), static_cast<int>(v[2])}};
}
Expand Down
6 changes: 2 additions & 4 deletions testsuite/python/lb.py
Original file line number Diff line number Diff line change
Expand Up @@ -517,11 +517,9 @@ def test_agrid_rounding(self):
phi = 0.05
lj_sig = 1.0
l = (n_part * 4. / 3. * np.pi * (lj_sig / 2.)**3 / phi)**(1. / 3.)
system.box_l = l * np.array(system.cell_system.node_grid)
if hasattr(self, 'blocks_per_mpi_rank'):
system.box_l = [
l] * 3 * np.array(system.cell_system.node_grid) * np.array(self.blocks_per_mpi_rank)
else:
system.box_l = [l] * 3 * np.array(system.cell_system.node_grid)
system.box_l = system.box_l * np.array(self.blocks_per_mpi_rank)
lbf = self.lb_class(agrid=l / 31, density=1, kinematic_viscosity=1, kT=0,
tau=system.time_step, **self.lb_params)
system.lb = lbf
Expand Down
2 changes: 1 addition & 1 deletion testsuite/python/lb_couette_xy.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#
# Copyright (C) 2021-2023 The ESPResSo project
# Copyright (C) 2021-2025 The ESPResSo project
#
# This file is part of ESPResSo.
#
Expand Down
4 changes: 2 additions & 2 deletions testsuite/python/lb_mass_conservation.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class LBMassCommon:

"""Check the lattice-Boltzmann mass conservation."""

system = espressomd.System(box_l=[6.0, 6.0, 6.0])
system = espressomd.System(box_l=[4.0, 4.0, 4.0])
system.time_step = TIME_STEP
system.cell_system.skin = 0.4 * AGRID

Expand Down Expand Up @@ -99,7 +99,7 @@ class LBMassWalberlaSinglePrecisionGPU(LBMassCommon, ut.TestCase):
@utx.skipIfMissingFeatures(["WALBERLA"])
class LBMassWalberlaDoublePrecisionBlocksCPU(LBMassCommon, ut.TestCase):
lb_class = espressomd.lb.LBFluidWalberla
blocks_per_mpi_rank = [2, 2, 2]
blocks_per_mpi_rank = [1, 1, 2]
lb_params = {"single_precision": False,
"blocks_per_mpi_rank": blocks_per_mpi_rank}
atol = 1e-10
Expand Down
4 changes: 2 additions & 2 deletions testsuite/python/lb_shear.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
DENS = 2.3
TIME_STEP = 0.02
# Box size will be H +2 AGRID to make room for walls.
# The number of grid cells should be divisible by four and 3 in all directions
# for testing on multiple mpi nodes.
# The number of grid cells should be divisible by four and 2 in all directions
# for testing on multiple mpi nodes and multiple blocks per mpirank.
H = 10 * AGRID
W = 6 * AGRID
SHEAR_VELOCITY = 0.3
Expand Down

0 comments on commit a91eaf5

Please sign in to comment.