Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor Communication Classes #338

Open
wants to merge 16 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 16 additions & 14 deletions src/parallel/ALLLoadBalancer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,33 +5,35 @@
*/

#include "ALLLoadBalancer.h"
ALLLoadBalancer::ALLLoadBalancer(std::array<double, 3> boxMin, std::array<double, 3> boxMax, double gamma,
MPI_Comm comm, std::array<size_t, 3> globalSize,
std::array<size_t, 3> localCoordinates, std::array<double, 3> minimalPartitionSize)
: _all(3 /*dim*/, gamma) {
std::vector<Point> points;
points.emplace_back(3, boxMin.data());
points.emplace_back(3, boxMax.data());
ALLLoadBalancer::ALLLoadBalancer(const std::array<double, 3> &boxMin, const std::array<double, 3> &boxMax, double gamma,
MPI_Comm comm, const std::array<size_t, 3> &globalSize,
const std::array<size_t, 3> &localCoordinates,
const std::array<double, 3> &minimalPartitionSize)
: _all(3 /*dim*/, gamma), _minimalPartitionSize(minimalPartitionSize) {
// convert input into non-const vector because that is what ALL expects
std::vector<Point> points {
{3, boxMin.data()},
{3, boxMax.data()},
};
_all.set_vertices(points);
std::array<int, 3> global_size{static_cast<int>(globalSize[0]), static_cast<int>(globalSize[1]),
// convert input into non-const int arrays because that is what ALL expects
std::array<int, 3> globalSizeIntArray{static_cast<int>(globalSize[0]), static_cast<int>(globalSize[1]),
static_cast<int>(globalSize[2])};
std::array<int, 3> coords{static_cast<int>(localCoordinates[0]), static_cast<int>(localCoordinates[1]),
static_cast<int>(localCoordinates[2])};
_all.set_proc_grid_params(coords.data(), global_size.data());
_all.set_proc_grid_params(coords.data(), globalSizeIntArray.data());
_all.set_communicator(comm);

_coversWholeDomain = {globalSize[0] == 1, global_size[1] == 1, global_size[2] == 1};

_minimalPartitionSize = minimalPartitionSize;
_coversWholeDomain = {globalSizeIntArray[0] == 1, globalSizeIntArray[1] == 1, globalSizeIntArray[2] == 1};
}
std::tuple<std::array<double, 3>, std::array<double, 3>> ALLLoadBalancer::rebalance(double work) {
_all.set_work(work);
_all.setup(ALL_LB_t::STAGGERED);
_all.set_min_domain_size(ALL_LB_t::STAGGERED, _minimalPartitionSize.data());
_all.balance(ALL_LB_t::STAGGERED);
auto resultVertices = _all.get_result_vertices();
std::array<double, 3> boxMin{resultVertices[0].x(0), resultVertices[0].x(1), resultVertices[0].x(2)};
std::array<double, 3> boxMax{resultVertices[1].x(0), resultVertices[1].x(1), resultVertices[1].x(2)};
_all.set_vertices(resultVertices);
const std::array<double, 3> boxMin{resultVertices[0].x(0), resultVertices[0].x(1), resultVertices[0].x(2)};
const std::array<double, 3> boxMax{resultVertices[1].x(0), resultVertices[1].x(1), resultVertices[1].x(2)};
return std::make_tuple(boxMin, boxMax);
}
8 changes: 4 additions & 4 deletions src/parallel/ALLLoadBalancer.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,17 @@

class ALLLoadBalancer : public LoadBalancer {
public:
ALLLoadBalancer(std::array<double, 3> boxMin, std::array<double, 3> boxMax, double gamma, MPI_Comm comm,
std::array<size_t, 3> globalSize, std::array<size_t, 3> localCoordinates,
std::array<double, 3> minimalPartitionSize);
ALLLoadBalancer(const std::array<double, 3> &boxMin, const std::array<double, 3> &boxMax, double gamma, MPI_Comm comm,
const std::array<size_t, 3>& globalSize, const std::array<size_t, 3>& localCoordinates,
const std::array<double, 3>& minimalPartitionSize);

~ALLLoadBalancer() override = default;
std::tuple<std::array<double, 3>, std::array<double, 3>> rebalance(double work) override;
void readXML(XMLfileUnits& xmlconfig) override {
// nothing yet.
}

std::array<bool, 3> getCoversWholeDomain() override { return _coversWholeDomain; }
const std::array<bool, 3>& getCoversWholeDomain() const override { return _coversWholeDomain; }

private:
ALL<double, double> _all;
Expand Down
6 changes: 3 additions & 3 deletions src/parallel/DomainDecomposition.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,15 +87,15 @@ bool DomainDecomposition::queryBalanceAndExchangeNonBlocking(bool /*forceRebalan
void DomainDecomposition::balanceAndExchange(double /*lastTraversalTime*/, bool /*forceRebalancing*/, ParticleContainer* moleculeContainer,
Domain* domain) {
if (sendLeavingWithCopies()) {
Log::global_log->debug() << "DD: Sending Leaving and Halos." << std::endl;
Log::global_log->debug() << "DD: Sending Leaving and Halos.\n";
DomainDecompMPIBase::exchangeMoleculesMPI(moleculeContainer, domain, LEAVING_AND_HALO_COPIES);
} else {
Log::global_log->debug() << "DD: Sending Leaving." << std::endl;
Log::global_log->debug() << "DD: Sending Leaving.\n";
DomainDecompMPIBase::exchangeMoleculesMPI(moleculeContainer, domain, LEAVING_ONLY);
#ifndef MARDYN_AUTOPAS
moleculeContainer->deleteOuterParticles();
#endif
Log::global_log->debug() << "DD: Sending Halos." << std::endl;
Log::global_log->debug() << "DD: Sending Halos.\n";
DomainDecompMPIBase::exchangeMoleculesMPI(moleculeContainer, domain, HALO_COPIES);
}
}
Expand Down
29 changes: 15 additions & 14 deletions src/parallel/GeneralDomainDecomposition.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,22 +36,23 @@ void GeneralDomainDecomposition::initializeALL() {
Log::global_log->info() << "gridSize:" << gridSize[0] << ", " << gridSize[1] << ", " << gridSize[2] << std::endl;
Log::global_log->info() << "gridCoords:" << gridCoords[0] << ", " << gridCoords[1] << ", " << gridCoords[2] << std::endl;
std::tie(_boxMin, _boxMax) = initializeRegularGrid(_domainLength, gridSize, gridCoords);
if (_forceLatchingToLinkedCellsGrid and not _gridSize.has_value()) {
if (_forceLatchingToLinkedCellsGrid and not _latchGridSize.has_value()) {
std::array<double, 3> forcedGridSize{};
for(size_t dim = 0; dim < 3; ++dim){
size_t numCells = _domainLength[dim] / _interactionLength;
// if we calculate 3.5 cells per dim there is only space for 3 -> floor
const auto numCells = std::floor(_domainLength[dim] / _interactionLength);
forcedGridSize[dim] = _domainLength[dim] / numCells;
}
_gridSize = forcedGridSize;
_latchGridSize = forcedGridSize;
}
if (_gridSize.has_value()) {
if (_latchGridSize.has_value()) {
std::tie(_boxMin, _boxMax) = latchToGridSize(_boxMin, _boxMax);
}
#ifdef ENABLE_ALLLBL
// Increased slightly to prevent rounding errors.
const double safetyFactor = 1. + 1.e-10;
const std::array<double, 3> minimalDomainSize =
_gridSize.has_value() ? *_gridSize
_latchGridSize.has_value() ? *_latchGridSize
: std::array{_interactionLength * safetyFactor, _interactionLength * safetyFactor,
_interactionLength * safetyFactor};

Expand Down Expand Up @@ -96,17 +97,17 @@ void GeneralDomainDecomposition::balanceAndExchange(double lastTraversalTime, bo
moleculeContainer->deleteOuterParticles();

// rebalance
Log::global_log->info() << "rebalancing..." << std::endl;
Log::global_log->debug() << "rebalancing..." << std::endl;

Log::global_log->set_mpi_output_all();
Log::global_log->debug() << "work:" << lastTraversalTime << std::endl;
Log::global_log->set_mpi_output_root(0);
auto [newBoxMin, newBoxMax] = _loadBalancer->rebalance(lastTraversalTime);
if (_gridSize.has_value()) {
if (_latchGridSize.has_value()) {
std::tie(newBoxMin, newBoxMax) = latchToGridSize(newBoxMin, newBoxMax);
}
// migrate the particles, this will rebuild the moleculeContainer!
Log::global_log->info() << "migrating particles" << std::endl;
Log::global_log->debug() << "migrating particles" << std::endl;
migrateParticles(domain, moleculeContainer, newBoxMin, newBoxMax);

#ifndef MARDYN_AUTOPAS
Expand All @@ -119,9 +120,9 @@ void GeneralDomainDecomposition::balanceAndExchange(double lastTraversalTime, bo
_boxMax = newBoxMax;

// init communication partners
Log::global_log->info() << "updating communication partners" << std::endl;
Log::global_log->debug() << "updating communication partners" << std::endl;
initCommPartners(moleculeContainer, domain);
Log::global_log->info() << "rebalancing finished" << std::endl;
Log::global_log->debug() << "rebalancing finished" << std::endl;
DomainDecompMPIBase::exchangeMoleculesMPI(moleculeContainer, domain, HALO_COPIES);
} else {
if (sendLeavingWithCopies()) {
Expand Down Expand Up @@ -250,7 +251,7 @@ void GeneralDomainDecomposition::migrateParticles(Domain* domain, ParticleContai

void GeneralDomainDecomposition::initCommPartners(ParticleContainer* moleculeContainer,
Domain* domain) { // init communication partners
auto coversWholeDomain = _loadBalancer->getCoversWholeDomain();
const auto coversWholeDomain = _loadBalancer->getCoversWholeDomain();
for (int d = 0; d < DIMgeom; ++d) {
// this needs to be updated for proper initialization of the neighbours
_neighbourCommunicationScheme->setCoverWholeDomain(d, coversWholeDomain[d]);
Expand Down Expand Up @@ -292,12 +293,12 @@ void GeneralDomainDecomposition::readXML(XMLfileUnits& xmlconfig) {
<< strings.size() << "!" << std::endl;
mardyn_exit(8134);
}
_gridSize = {std::stod(strings[0]), std::stod(strings[1]), std::stod(strings[2])};
_latchGridSize = {std::stod(strings[0]), std::stod(strings[1]), std::stod(strings[2])};
} else {
double gridSize = std::stod(gridSizeString);
_gridSize = {gridSize, gridSize, gridSize};
_latchGridSize = {gridSize, gridSize, gridSize};
}
for (auto gridSize : *_gridSize) {
for (auto gridSize : *_latchGridSize) {
if (gridSize < _interactionLength) {
Log::global_log->error() << "GeneralDomainDecomposition's gridSize (" << gridSize
<< ") is smaller than the interactionLength (" << _interactionLength
Expand Down
6 changes: 3 additions & 3 deletions src/parallel/GeneralDomainDecomposition.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ class GeneralDomainDecomposition : public DomainDecompMPIBase {
std::pair<std::array<double, 3>, std::array<double, 3>> latchToGridSize(std::array<double, 3> boxMin,
std::array<double, 3> boxMax) {
for (size_t ind = 0; ind < 3; ++ind) {
double currentGridSize = (*_gridSize)[ind];
const double currentGridSize = (*_latchGridSize)[ind];
// For boxmin, the lower domain boundary is 0, so that's always fine!
boxMin[ind] = std::round(boxMin[ind] / currentGridSize) * currentGridSize;
// update boxmax only if it isn't at the very top of the domain!
Expand All @@ -197,10 +197,10 @@ class GeneralDomainDecomposition : public DomainDecompMPIBase {
size_t _initFrequency{500};

/**
* Optionally safe a given grid size on which the process boundaries are bound/latched.
* Optionally, give a grid size (=3D size of one grid cell) on which the process boundaries are bound/latched.
* If no value is given, it is not used.
*/
std::optional<std::array<double, 3>> _gridSize{};
std::optional<std::array<double, 3>> _latchGridSize{};

/**
* Bool that indicates whether a grid should be forced even if no gridSize is set.
Expand Down
2 changes: 1 addition & 1 deletion src/parallel/LoadBalancer.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,5 +40,5 @@ class LoadBalancer {
* Indicates if the current process / MPI rank spans the full length of a dimension.
* @return Array of bools, for each dimension one value: true, iff the process spans the entire domain along this dimension.
*/
virtual std::array<bool, 3> getCoversWholeDomain() = 0;
virtual const std::array<bool, 3>& getCoversWholeDomain() const = 0;
};
Loading
Loading