From 2f8772b9aa27c09f3420553b21ec58afb20fd910 Mon Sep 17 00:00:00 2001 From: Hugh Carson Date: Mon, 21 Aug 2023 14:40:01 -0400 Subject: [PATCH] Minor cleanup --- palace/drivers/basesolver.hpp | 2 +- palace/drivers/drivensolver.cpp | 23 +++--- palace/drivers/drivensolver.hpp | 2 +- palace/drivers/eigensolver.cpp | 8 +- palace/drivers/eigensolver.hpp | 2 +- palace/drivers/electrostaticsolver.cpp | 18 ++--- palace/drivers/electrostaticsolver.hpp | 2 +- palace/drivers/magnetostaticsolver.cpp | 17 ++--- palace/drivers/magnetostaticsolver.hpp | 2 +- palace/drivers/transientsolver.cpp | 1 - palace/drivers/transientsolver.hpp | 2 +- palace/fem/coefficient.hpp | 1 - palace/fem/multigrid.hpp | 18 ++--- palace/linalg/errorestimator.cpp | 101 +++++++++++-------------- palace/linalg/fluxprojector.cpp | 15 ++-- palace/linalg/fluxprojector.hpp | 6 +- palace/models/postoperator.cpp | 1 - palace/models/postoperator.hpp | 2 +- palace/utils/errorindicators.cpp | 5 +- palace/utils/errorindicators.hpp | 5 +- 20 files changed, 103 insertions(+), 130 deletions(-) diff --git a/palace/drivers/basesolver.hpp b/palace/drivers/basesolver.hpp index fa6f0e438..2aa043499 100644 --- a/palace/drivers/basesolver.hpp +++ b/palace/drivers/basesolver.hpp @@ -18,7 +18,7 @@ class ParMesh; namespace palace { -struct ErrorIndicators; +class ErrorIndicators; class IoData; class PostOperator; class Timer; diff --git a/palace/drivers/drivensolver.cpp b/palace/drivers/drivensolver.cpp index 16c0b232e..fc6cea873 100644 --- a/palace/drivers/drivensolver.cpp +++ b/palace/drivers/drivensolver.cpp @@ -119,7 +119,6 @@ ErrorIndicators DrivenSolver::SweepUniform(SpaceOperator &spaceop, PostOperator // simply by setting diagonal entries of the system matrix for the corresponding dofs. // Because the Dirichlet BC is always homogenous, no special elimination is required on // the RHS. Assemble the linear system for the initial frequency (so we can call - // KspSolver::SetOperators). Compute everything at the first frequency step. auto K = spaceop.GetStiffnessMatrix(Operator::DIAG_ONE); auto C = spaceop.GetDampingMatrix(Operator::DIAG_ZERO); @@ -149,7 +148,7 @@ ErrorIndicators DrivenSolver::SweepUniform(SpaceOperator &spaceop, PostOperator // Initialize structures for storing and reducing the results of error estimation. ErrorIndicators indicators(spaceop.GlobalTrueVSize()); const auto error_reducer = ErrorReductionOperator(); - auto update_error_indicators = + auto UpdateErrorIndicators = [&timer, &estimator, &indicators, &error_reducer, &postop](const auto &E) { auto ind = estimator(E); @@ -163,7 +162,7 @@ ErrorIndicators DrivenSolver::SweepUniform(SpaceOperator &spaceop, PostOperator // Main frequency sweep loop. double omega = omega0; auto t0 = timer.Now(); - for (int step = step0; step < nstep; ++step) + for (int step = step0; step < nstep; step++) { const double freq = iodata.DimensionalizeValue(IoData::ValueType::FREQUENCY, omega); Mpi::Print("\nIt {:d}/{:d}: ω/2π = {:.3e} GHz (elapsed time = {:.2e} s)\n", step + 1, @@ -187,12 +186,10 @@ ErrorIndicators DrivenSolver::SweepUniform(SpaceOperator &spaceop, PostOperator Mpi::Print("\n"); ksp.Mult(RHS, E); timer.solve_time += timer.Lap(); - - update_error_indicators(E); + UpdateErrorIndicators(E); // Compute B = -1/(iω) ∇ x E on the true dofs, and set the internal GridFunctions in // PostOperator for all postprocessing operations. - double E_elec = 0.0, E_mag = 0.0; Curl->Mult(E, B); B *= -1.0 / (1i * omega); @@ -220,7 +217,6 @@ ErrorIndicators DrivenSolver::SweepUniform(SpaceOperator &spaceop, PostOperator // Increment frequency. omega += delta_omega; } - SaveMetadata(ksp); return indicators; } @@ -286,7 +282,7 @@ ErrorIndicators DrivenSolver::SweepAdaptive(SpaceOperator &spaceop, PostOperator // the online stage. ErrorIndicators indicators(spaceop.GlobalTrueVSize()); ErrorReductionOperator error_reducer; - auto update_error_indicators = + auto UpdateErrorIndicators = [&local_timer, &estimator, &indicators, &error_reducer](const auto &E) { error_reducer(indicators, estimator(E)); @@ -296,12 +292,12 @@ ErrorIndicators DrivenSolver::SweepAdaptive(SpaceOperator &spaceop, PostOperator prom.SolveHDM(omega0, E); // Print matrix stats at first HDM solve local_timer.solve_time += local_timer.Lap(); - update_error_indicators(E); + UpdateErrorIndicators(E); prom.AddHDMSample(omega0, E); local_timer.construct_time += local_timer.Lap(); prom.SolveHDM(omega0 + (nstep - step0 - 1) * delta_omega, E); local_timer.solve_time += local_timer.Lap(); - update_error_indicators(E); + UpdateErrorIndicators(E); prom.AddHDMSample(omega0 + (nstep - step0 - 1) * delta_omega, E); local_timer.construct_time += local_timer.Lap(); @@ -327,10 +323,10 @@ ErrorIndicators DrivenSolver::SweepAdaptive(SpaceOperator &spaceop, PostOperator max_error); prom.SolveHDM(omega_star, E); local_timer.solve_time += local_timer.Lap(); - update_error_indicators(E); + UpdateErrorIndicators(E); prom.AddHDMSample(omega_star, E); local_timer.construct_time += local_timer.Lap(); - ++iter; + iter++; } Mpi::Print("\nAdaptive sampling{} {:d} frequency samples:\n" " n = {:d}, error = {:.3e}, tol = {:.3e}\n", @@ -397,10 +393,9 @@ ErrorIndicators DrivenSolver::SweepAdaptive(SpaceOperator &spaceop, PostOperator timer.postpro_time += timer.Lap() - (timer.io_time - io_time_prev); // Increment frequency. - ++step; + step++; omega += delta_omega; } - return indicators; } diff --git a/palace/drivers/drivensolver.hpp b/palace/drivers/drivensolver.hpp index 855321272..c3264d8bd 100644 --- a/palace/drivers/drivensolver.hpp +++ b/palace/drivers/drivensolver.hpp @@ -19,7 +19,7 @@ namespace palace { class CurlFluxErrorEstimator; -struct ErrorIndicators; +class ErrorIndicators; class IoData; class LumpedPortOperator; class PostOperator; diff --git a/palace/drivers/eigensolver.cpp b/palace/drivers/eigensolver.cpp index 0d6d9ee28..7e3110a34 100644 --- a/palace/drivers/eigensolver.cpp +++ b/palace/drivers/eigensolver.cpp @@ -265,7 +265,7 @@ ErrorIndicators EigenSolver::Solve(const std::vectorGetEigenvector(i, E); - Curl->Mult(E, B); B *= -1.0 / (1i * omega); + timer.postpro_time += timer.Lap(); if (i < iodata.solver.eigenmode.n) { // Only update the error indicator for targetted modes. - update_error_indicators(E); + UpdateErrorIndicators(E); } postop.SetEGridFunction(E); postop.SetBGridFunction(B); - postop.UpdatePorts(spaceop.GetLumpedPortOp(), omega.real()); // Postprocess the mode. @@ -321,7 +320,6 @@ ErrorIndicators EigenSolver::Solve(const std::vector> &me // handled eliminating the rows and columns of the system matrix for the corresponding // dofs. The eliminated matrix is stored in order to construct the RHS vector for nonzero // prescribed BC values. - timer.Lap(); LaplaceOperator laplaceop(iodata, mesh); auto K = laplaceop.GetStiffnessMatrix(); @@ -72,16 +71,15 @@ ElectrostaticSolver::Solve(const std::vector> &me timer.postpro_time += timer.Lap(); // Next terminal. - ++step; + step++; } + // Construct estimator and reducer for error indicators. GradFluxErrorEstimator estimator(iodata, laplaceop.GetMaterialOp(), mesh, laplaceop.GetH1Space()); - - // Evaluate error estimator and reduce over all auto indicators = ErrorIndicators(laplaceop.GlobalTrueVSize()); ErrorReductionOperator error_reducer; - auto update_error_indicators = + auto UpdateErrorIndicators = [&timer, &estimator, &indicators, &error_reducer](const auto &V) { error_reducer(indicators, estimator(V)); @@ -90,7 +88,7 @@ ElectrostaticSolver::Solve(const std::vector> &me timer.est_construction_time += timer.Lap(); // Compute and reduce the error indicators for each solution. - std::for_each(V.begin(), V.end(), update_error_indicators); + std::for_each(V.begin(), V.end(), UpdateErrorIndicators); // Register the indicator field used to drive the overall adaptation. postop.SetIndicatorGridFunction(indicators.local_error_indicators); @@ -100,7 +98,6 @@ ElectrostaticSolver::Solve(const std::vector> &me SaveMetadata(ksp); Postprocess(laplaceop, postop, V, timer); timer.postpro_time += timer.Lap() - (timer.io_time - io_time_prev); - return indicators; } @@ -129,7 +126,6 @@ void ElectrostaticSolver::Postprocess(LaplaceOperator &laplaceop, PostOperator & // for all postprocessing operations. E = 0.0; Grad->AddMult(V[i], E, -1.0); - postop.SetEGridFunction(E); postop.SetVGridFunction(V[i]); double Ue = postop.GetEFieldEnergy(); @@ -146,13 +142,13 @@ void ElectrostaticSolver::Postprocess(LaplaceOperator &laplaceop, PostOperator & // Diagonal: C_ii = 2 U_e(V_i) / V_i². C(i, i) = Cm(i, i) = 2.0 * Ue; - ++i; + i++; } // Off-diagonals: C_ij = U_e(V_i + V_j) / (V_i V_j) - 1/2 (V_i/V_j C_ii + V_j/V_i C_jj). - for (i = 0; i < C.Height(); ++i) + for (i = 0; i < C.Height(); i++) { - for (int j = 0; j < C.Width(); ++j) + for (int j = 0; j < C.Width(); j++) { if (j < i) { diff --git a/palace/drivers/electrostaticsolver.hpp b/palace/drivers/electrostaticsolver.hpp index b5c07db51..1e9112819 100644 --- a/palace/drivers/electrostaticsolver.hpp +++ b/palace/drivers/electrostaticsolver.hpp @@ -23,7 +23,7 @@ class ParMesh; namespace palace { -struct ErrorIndicators; +class ErrorIndicators; class IoData; class LaplaceOperator; class PostOperator; diff --git a/palace/drivers/magnetostaticsolver.cpp b/palace/drivers/magnetostaticsolver.cpp index f0ece3bc0..4ac1f3307 100644 --- a/palace/drivers/magnetostaticsolver.cpp +++ b/palace/drivers/magnetostaticsolver.cpp @@ -71,7 +71,7 @@ MagnetostaticSolver::Solve(const std::vector> &me timer.postpro_time += timer.Lap(); // Next source. - ++step; + step++; } CurlFluxErrorEstimator estimator(iodata, curlcurlop.GetMaterialOp(), mesh, @@ -80,7 +80,7 @@ MagnetostaticSolver::Solve(const std::vector> &me // Initialize structures for storing and reducing the results of error estimation. ErrorIndicators indicators(curlcurlop.GlobalTrueVSize()); ErrorReductionOperator error_reducer; - auto update_error_indicators = + auto UpdateErrorIndicators = [&timer, &estimator, &indicators, &error_reducer](const auto &A) { error_reducer(indicators, estimator(A)); @@ -89,7 +89,7 @@ MagnetostaticSolver::Solve(const std::vector> &me timer.est_construction_time += timer.Lap(); // Compute and reduce the error indicators for each solution. - std::for_each(A.begin(), A.end(), update_error_indicators); + std::for_each(A.begin(), A.end(), UpdateErrorIndicators); // Register the indicator field used to drive the overall adaptation. postop.SetIndicatorGridFunction(indicators.local_error_indicators); @@ -99,7 +99,6 @@ MagnetostaticSolver::Solve(const std::vector> &me SaveMetadata(ksp); Postprocess(curlcurlop, postop, A, timer); timer.postpro_time += timer.Lap() - (timer.io_time - io_time_prev); - return indicators; } @@ -150,13 +149,13 @@ void MagnetostaticSolver::Postprocess(CurlCurlOperator &curlcurlop, PostOperator // Diagonal: M_ii = 2 U_m(A_i) / I_i². M(i, i) = Mm(i, i) = 2.0 * Um / (Iinc(i) * Iinc(i)); - ++i; + i++; } // Off-diagonals: M_ij = U_m(A_i + A_j) / (I_i I_j) - 1/2 (I_i/I_j M_ii + I_j/I_i M_jj). - for (i = 0; i < M.Height(); ++i) + for (i = 0; i < M.Height(); i++) { - for (int j = 0; j < M.Width(); ++j) + for (int j = 0; j < M.Width(); j++) { if (j < i) { @@ -223,10 +222,10 @@ void MagnetostaticSolver::PostprocessTerminals(const SurfaceCurrentOperator &sur mat(i, j) * scale, table.w, table.p, (idx2 == surf_j_op.rbegin()->first) ? "" : ","); // clang-format on - ++j; + j++; } output.print("\n"); - ++i; + i++; } }; const double H = iodata.DimensionalizeValue(IoData::ValueType::INDUCTANCE, 1.0); diff --git a/palace/drivers/magnetostaticsolver.hpp b/palace/drivers/magnetostaticsolver.hpp index 38af65987..812ecfee0 100644 --- a/palace/drivers/magnetostaticsolver.hpp +++ b/palace/drivers/magnetostaticsolver.hpp @@ -21,7 +21,7 @@ namespace palace { class CurlCurlOperator; -struct ErrorIndicators; +class ErrorIndicators; class IoData; class PostOperator; class SurfaceCurrentOperator; diff --git a/palace/drivers/transientsolver.cpp b/palace/drivers/transientsolver.cpp index d24ff0bfd..265840b87 100644 --- a/palace/drivers/transientsolver.cpp +++ b/palace/drivers/transientsolver.cpp @@ -127,7 +127,6 @@ TransientSolver::Solve(const std::vector> &mesh, // Increment time step. step++; } - SaveMetadata(timeop.GetLinearSolver()); return ErrorIndicators(spaceop.GlobalTrueVSize()); } diff --git a/palace/drivers/transientsolver.hpp b/palace/drivers/transientsolver.hpp index 2593ad578..ee20d76b7 100644 --- a/palace/drivers/transientsolver.hpp +++ b/palace/drivers/transientsolver.hpp @@ -19,7 +19,7 @@ class ParMesh; namespace palace { -struct ErrorIndicators; +class ErrorIndicators; class IoData; class LumpedPortOperator; class PostOperator; diff --git a/palace/fem/coefficient.hpp b/palace/fem/coefficient.hpp index 63c8d801b..2e903db32 100644 --- a/palace/fem/coefficient.hpp +++ b/palace/fem/coefficient.hpp @@ -499,7 +499,6 @@ class CurlFluxCoefficient : public mfem::VectorCoefficient const mfem::IntegrationPoint &ip) override { V.SetSize(3); - coef.Eval(muinv, T, ip); X.GetCurl(T, curl); muinv.Mult(curl, V); diff --git a/palace/fem/multigrid.hpp b/palace/fem/multigrid.hpp index ac7fcb919..3db6e308e 100644 --- a/palace/fem/multigrid.hpp +++ b/palace/fem/multigrid.hpp @@ -127,22 +127,21 @@ template inline mfem::ParFiniteElementSpaceHierarchy ConstructFiniteElementSpaceHierarchy( int mg_max_levels, bool mg_legacy_transfer, int pa_order_threshold, const std::vector> &mesh, - const std::vector> &fecs, int dim = 1, - int ordering = mfem::Ordering::byNODES) + const std::vector> &fecs, int dim = 1) { MFEM_VERIFY(!mesh.empty() && !fecs.empty(), "Empty mesh or FE collection for FE space construction!"); int coarse_mesh_l = std::max(0, static_cast(mesh.size() + fecs.size()) - 1 - mg_max_levels); - auto *fespace = new mfem::ParFiniteElementSpace(mesh[coarse_mesh_l].get(), fecs[0].get(), - dim, ordering); + auto *fespace = + new mfem::ParFiniteElementSpace(mesh[coarse_mesh_l].get(), fecs[0].get(), dim); mfem::ParFiniteElementSpaceHierarchy fespaces(mesh[coarse_mesh_l].get(), fespace, false, true); // h-refinement for (std::size_t l = coarse_mesh_l + 1; l < mesh.size(); l++) { - fespace = new mfem::ParFiniteElementSpace(mesh[l].get(), fecs[0].get(), dim, ordering); + fespace = new mfem::ParFiniteElementSpace(mesh[l].get(), fecs[0].get(), dim); auto *P = new ParOperator( std::make_unique(fespaces.GetFinestFESpace(), *fespace), @@ -153,8 +152,7 @@ inline mfem::ParFiniteElementSpaceHierarchy ConstructFiniteElementSpaceHierarchy // p-refinement for (std::size_t l = 1; l < fecs.size(); l++) { - fespace = - new mfem::ParFiniteElementSpace(mesh.back().get(), fecs[l].get(), dim, ordering); + fespace = new mfem::ParFiniteElementSpace(mesh.back().get(), fecs[l].get(), dim); ParOperator *P; if (!mg_legacy_transfer && mfem::DeviceCanUseCeed()) @@ -185,11 +183,11 @@ mfem::ParFiniteElementSpaceHierarchy ConstructFiniteElementSpaceHierarchy( int mg_max_levels, bool mg_legacy_transfer, int pa_order_threshold, const Container &mesh, const std::vector> &fecs, const mfem::Array &dbc_marker, std::vector> &dbc_tdof_lists, - int dim = 1, int ordering = mfem::Ordering::byNODES) + int dim = 1) { dbc_tdof_lists.clear(); - auto fespaces = ConstructFiniteElementSpaceHierarchy( - mg_max_levels, mg_legacy_transfer, pa_order_threshold, mesh, fecs, dim, ordering); + auto fespaces = ConstructFiniteElementSpaceHierarchy(mg_max_levels, mg_legacy_transfer, + pa_order_threshold, mesh, fecs, dim); for (int l = 0; l < fespaces.GetNumLevels(); l++) { fespaces.GetFESpaceAtLevel(l).GetEssentialTrueDofs(dbc_marker, diff --git a/palace/linalg/errorestimator.cpp b/palace/linalg/errorestimator.cpp index afdfcd0ff..a09c656d2 100644 --- a/palace/linalg/errorestimator.cpp +++ b/palace/linalg/errorestimator.cpp @@ -23,18 +23,19 @@ CurlFluxErrorEstimator::CurlFluxErrorEstimator( : mat_op(mat_op), fes(fes), smooth_flux_fecs(ConstructFECollections( iodata.solver.order, mesh.back()->Dimension(), iodata.solver.linear.mg_max_levels, - iodata.solver.linear.mg_coarsen_type, false)), // TODO: pc_lor? + iodata.solver.linear.mg_coarsen_type, iodata.solver.linear.pc_mat_lor)), smooth_flux_fes(ConstructFiniteElementSpaceHierarchy( iodata.solver.linear.mg_max_levels, iodata.solver.linear.mg_legacy_transfer, iodata.solver.pa_order_threshold, mesh, smooth_flux_fecs)), - smooth_projector(smooth_flux_fes, iodata.solver.linear.tol, 200, 0, iodata.solver.pa_order_threshold), + smooth_projector(smooth_flux_fes, iodata.solver.linear.tol, 200, 0, + iodata.solver.pa_order_threshold), coarse_flux_fec(iodata.solver.order, mesh.back()->Dimension(), mfem::BasisType::GaussLobatto), coarse_flux_fes(mesh.back().get(), &coarse_flux_fec, mesh.back()->Dimension()), scalar_mass_matrices(fes.GetNE()), smooth_to_coarse_embed(fes.GetNE()) { mfem::MassIntegrator mass_integrator; - for (int e = 0; e < fes.GetNE(); ++e) + for (int e = 0; e < fes.GetNE(); e++) { // Loop over each element, and save an elemental mass matrix. // Will exploit the fact that vector L2 mass matrix components are independent. @@ -74,11 +75,10 @@ Vector CurlFluxErrorEstimator::operator()(const ComplexVector &v) const const auto smooth_flux_rhs = ComplexVector(rhs_from_coef(smooth_flux_fes.GetFinestFESpace(), real_coef), rhs_from_coef(smooth_flux_fes.GetFinestFESpace(), imag_coef)); - local_timer.construct_time += local_timer.Lap(); - // Given the RHS vector of non-smooth flux, construct a flux projector and - // perform mass matrix inversion in the appropriate space, giving f = M⁻¹ f̂. + // Given the RHS vector of non-smooth flux, construct a flux projector and perform mass + // matrix inversion in the appropriate space, giving f = M⁻¹ f̂. auto build_flux = [](const FluxProjector &proj, const ComplexVector &flux_coef) { // Use a copy construction to match appropriate size. @@ -87,11 +87,10 @@ Vector CurlFluxErrorEstimator::operator()(const ComplexVector &v) const return flux; }; auto smooth_flux = build_flux(smooth_projector, smooth_flux_rhs); - local_timer.solve_time += local_timer.Lap(); - // Given a complex solution represented with a PetscParVector, build a - // ComplexGridFunction for evaluation. + // Given a complex solution represented with a PetscParVector, build a ComplexGridFunction + // for evaluation. auto build_func = [](const ComplexVector &f, mfem::ParFiniteElementSpace &fes) { mfem::ParComplexGridFunction flux(&fes); @@ -105,20 +104,18 @@ Vector CurlFluxErrorEstimator::operator()(const ComplexVector &v) const auto smooth_flux_func = build_func(smooth_flux, smooth_flux_fes.GetFinestFESpace()); mfem::ParComplexGridFunction coarse_flux_func(&coarse_flux_fes); - local_timer.construct_time += local_timer.Lap(); + coarse_flux_func.real().ProjectCoefficient(real_coef); coarse_flux_func.imag().ProjectCoefficient(imag_coef); - local_timer.solve_time += local_timer.Lap(); - // Loop over elements, embed the smooth flux into the coarse flux space, then - // compute squared integral using a component-wise mass matrix. + // Loop over elements, embed the smooth flux into the coarse flux space, then compute + // squared integral using a component-wise mass matrix. Vector smooth_vec, coarse_vec, sub_vec, estimates(nelem); estimates = 0.0; double normalization = 0.0; - - for (int e = 0; e < fes.GetNE(); ++e) + for (int e = 0; e < fes.GetNE(); e++) { // real smooth_flux_func.real().GetElementDofValues(e, smooth_vec); @@ -126,14 +123,14 @@ Vector CurlFluxErrorEstimator::operator()(const ComplexVector &v) const const int ndof = coarse_vec.Size() / 3; sub_vec.SetSize(ndof); - for (int c = 0; c < 3; ++c) + for (int c = 0; c < 3; c++) { sub_vec.MakeRef(coarse_vec, c * ndof); normalization += scalar_mass_matrices[e].InnerProduct(sub_vec, sub_vec); } smooth_to_coarse_embed[e].AddMult(smooth_vec, coarse_vec, -1.0); - for (int c = 0; c < 3; ++c) + for (int c = 0; c < 3; c++) { sub_vec.MakeRef(coarse_vec, c * ndof); estimates[e] += scalar_mass_matrices[e].InnerProduct(sub_vec, sub_vec); @@ -143,14 +140,14 @@ Vector CurlFluxErrorEstimator::operator()(const ComplexVector &v) const smooth_flux_func.imag().GetElementDofValues(e, smooth_vec); coarse_flux_func.imag().GetElementDofValues(e, coarse_vec); - for (int c = 0; c < 3; ++c) + for (int c = 0; c < 3; c++) { sub_vec.MakeRef(coarse_vec, c * ndof); normalization += scalar_mass_matrices[e].InnerProduct(sub_vec, sub_vec); } smooth_to_coarse_embed[e].AddMult(smooth_vec, coarse_vec, -1.0); - for (int c = 0; c < 3; ++c) + for (int c = 0; c < 3; c++) { sub_vec.MakeRef(coarse_vec, c * ndof); estimates[e] += scalar_mass_matrices[e].InnerProduct(sub_vec, sub_vec); @@ -165,7 +162,6 @@ Vector CurlFluxErrorEstimator::operator()(const ComplexVector &v) const std::for_each(estimates.begin(), estimates.end(), [&normalization](auto &x) { x /= normalization; }); - local_timer.postpro_time += local_timer.Lap(); if constexpr (false) @@ -226,8 +222,8 @@ Vector CurlFluxErrorEstimator::operator()(const Vector &v) const const auto smooth_flux_rhs = rhs_from_coef(smooth_flux_fes.GetFinestFESpace(), coef); local_timer.construct_time += local_timer.Lap(); - // Given the RHS vector of non-smooth flux, construct a flux projector and - // perform mass matrix inversion in the appropriate space, giving f = M⁻¹ f̂. + // Given the RHS vector of non-smooth flux, construct a flux projector and perform mass + // matrix inversion in the appropriate space, giving f = M⁻¹ f̂. auto build_flux = [](const FluxProjector &proj, const Vector &flux_coef) { // Use a copy construction to match appropriate size. @@ -236,11 +232,10 @@ Vector CurlFluxErrorEstimator::operator()(const Vector &v) const return flux; }; auto smooth_flux = build_flux(smooth_projector, smooth_flux_rhs); - local_timer.solve_time += local_timer.Lap(); - // Given a complex solution represented with a PetscParVector, build a - // ComplexGridFunction for evaluation. + // Given a complex solution represented with a PetscParVector, build a ComplexGridFunction + // for evaluation. auto build_func = [](const Vector &f, mfem::ParFiniteElementSpace &fes) { mfem::ParGridFunction flux(&fes); @@ -252,33 +247,31 @@ Vector CurlFluxErrorEstimator::operator()(const Vector &v) const auto smooth_flux_func = build_func(smooth_flux, smooth_flux_fes.GetFinestFESpace()); mfem::ParGridFunction coarse_flux_func(&coarse_flux_fes); - local_timer.construct_time += local_timer.Lap(); - coarse_flux_func.ProjectCoefficient(coef); + coarse_flux_func.ProjectCoefficient(coef); local_timer.solve_time += local_timer.Lap(); - // Loop over elements, embed the smooth flux into the coarse flux space, then - // compute squared integral using a component-wise mass matrix. + // Loop over elements, embed the smooth flux into the coarse flux space, then compute + // squared integral using a component-wise mass matrix. Vector smooth_vec, coarse_vec, sub_vec, estimates(nelem); estimates = 0.0; double normalization = 0.0; - - for (int e = 0; e < fes.GetNE(); ++e) + for (int e = 0; e < fes.GetNE(); e++) { smooth_flux_func.GetElementDofValues(e, smooth_vec); coarse_flux_func.GetElementDofValues(e, coarse_vec); const int ndof = coarse_vec.Size() / 3; sub_vec.SetSize(ndof); - for (int c = 0; c < 3; ++c) + for (int c = 0; c < 3; c++) { sub_vec.MakeRef(coarse_vec, c * ndof); normalization += scalar_mass_matrices[e].InnerProduct(sub_vec, sub_vec); } smooth_to_coarse_embed[e].AddMult(smooth_vec, coarse_vec, -1.0); - for (int c = 0; c < 3; ++c) + for (int c = 0; c < 3; c++) { sub_vec.MakeRef(coarse_vec, c * ndof); estimates[e] += scalar_mass_matrices[e].InnerProduct(sub_vec, sub_vec); @@ -293,7 +286,6 @@ Vector CurlFluxErrorEstimator::operator()(const Vector &v) const std::for_each(estimates.begin(), estimates.end(), [&normalization](auto &x) { x /= normalization; }); - local_timer.postpro_time += local_timer.Lap(); if constexpr (false) @@ -334,13 +326,16 @@ GradFluxErrorEstimator::GradFluxErrorEstimator( mfem::ParFiniteElementSpace &fes) : mat_op(mat_op), fes(fes), smooth_flux_fecs(ConstructFECollections( - iodata.solver.order, mesh.back()->Dimension(), iodata.solver.linear.mg_max_levels, + iodata.solver.order, mesh.back()->Dimension(), iodata.solver.linear.mg_max_levels, iodata.solver.linear.mg_coarsen_type, false)), - smooth_flux_component_fes(utils::ConstructFiniteElementSpaceHierarchy( - iodata.solver.linear.mg_max_levels, iodata.solver.linear.mg_legacy_transfer, - iodata.solver.pa_order_threshold, mesh, smooth_flux_fecs)), - smooth_flux_fes(mesh.back().get(), smooth_flux_fecs.back().get(), mesh.back()->Dimension()), - smooth_projector(smooth_flux_component_fes, iodata.solver.linear.tol, 200, 0, iodata.solver.pa_order_threshold), + smooth_flux_component_fes( + utils::ConstructFiniteElementSpaceHierarchy( + iodata.solver.linear.mg_max_levels, iodata.solver.linear.mg_legacy_transfer, + iodata.solver.pa_order_threshold, mesh, smooth_flux_fecs)), + smooth_flux_fes(mesh.back().get(), smooth_flux_fecs.back().get(), + mesh.back()->Dimension()), + smooth_projector(smooth_flux_component_fes, iodata.solver.linear.tol, 200, 0, + iodata.solver.pa_order_threshold), coarse_flux_fec(iodata.solver.order, mesh.back()->Dimension(), mfem::BasisType::GaussLobatto), coarse_flux_fes(mesh.back().get(), &coarse_flux_fec, mesh.back()->Dimension()), @@ -348,7 +343,7 @@ GradFluxErrorEstimator::GradFluxErrorEstimator( { mfem::MassIntegrator mass_integrator; - for (int e = 0; e < fes.GetNE(); ++e) + for (int e = 0; e < fes.GetNE(); e++) { // Loop over each element, and save an elemental mass matrix. // Will exploit the fact that vector L2 mass matrix components are independent. @@ -387,9 +382,8 @@ Vector GradFluxErrorEstimator::operator()(const Vector &v) const auto smooth_flux_rhs = rhs_from_coef(smooth_flux_fes, coef); local_timer.construct_time += local_timer.Lap(); - // Given the RHS vector of non-smooth flux, construct a flux projector and - // perform component wise mass matrix inversion in the appropriate space, - // giving fᵢ = M⁻¹ f̂ᵢ. + // Given the RHS vector of non-smooth flux, construct a flux projector and perform + // component wise mass matrix inversion in the appropriate space, giving fᵢ = M⁻¹ f̂ᵢ. auto build_flux = [](const FluxProjector &proj, Vector &rhs) { // Use a copy construction to match appropriate size. @@ -402,7 +396,7 @@ Vector GradFluxErrorEstimator::operator()(const Vector &v) const MFEM_ASSERT(ndof % 3 == 0, "!"); Vector flux_comp, rhs_comp; - for (std::size_t i = 0; i < 3; ++i) + for (int i = 0; i < 3; i++) { flux_comp.MakeRef(flux, i * stride, stride); rhs_comp.MakeRef(rhs, i * stride, stride); @@ -413,7 +407,6 @@ Vector GradFluxErrorEstimator::operator()(const Vector &v) const }; auto smooth_flux = build_flux(smooth_projector, smooth_flux_rhs); - local_timer.solve_time += local_timer.Lap(); // Given a solution represented with a Vector, build a GridFunction for evaluation. @@ -431,14 +424,12 @@ Vector GradFluxErrorEstimator::operator()(const Vector &v) const local_timer.construct_time += local_timer.Lap(); coarse_flux.ProjectCoefficient(coef); - local_timer.solve_time += local_timer.Lap(); Vector coarse_vec, smooth_vec, coarse_sub_vec, smooth_sub_vec, estimates(nelem); estimates = 0.0; - double normalization = 0.0; - for (int e = 0; e < fes.GetNE(); ++e) + for (int e = 0; e < fes.GetNE(); e++) { coarse_flux.GetElementDofValues(e, coarse_vec); smooth_flux_func.GetElementDofValues(e, smooth_vec); @@ -452,18 +443,15 @@ Vector GradFluxErrorEstimator::operator()(const Vector &v) const coarse_sub_vec.SetSize(ndof); smooth_sub_vec.SetSize(ndof); - for (int c = 0; c < 3; ++c) + for (int c = 0; c < 3; c++) { coarse_sub_vec.MakeRef(coarse_vec, c * ndof); smooth_sub_vec.MakeRef(smooth_vec, c * ndof); normalization += scalar_mass_matrices[e].InnerProduct(coarse_sub_vec, coarse_sub_vec); - - // Embed - smooth_to_coarse_embed[e].AddMult(smooth_sub_vec, coarse_sub_vec, -1.0); - - // Integrate - estimates[e] += scalar_mass_matrices[e].InnerProduct(coarse_sub_vec, coarse_sub_vec); + smooth_to_coarse_embed[e].AddMult(smooth_sub_vec, coarse_sub_vec, -1.0); // Embed + estimates[e] += scalar_mass_matrices[e].InnerProduct(coarse_sub_vec, + coarse_sub_vec); // Integrate } estimates[e] = std::sqrt(estimates[e]); @@ -474,7 +462,6 @@ Vector GradFluxErrorEstimator::operator()(const Vector &v) const std::for_each(estimates.begin(), estimates.end(), [&normalization](auto &x) { x /= normalization; }); - local_timer.postpro_time += local_timer.Lap(); if constexpr (false) diff --git a/palace/linalg/fluxprojector.cpp b/palace/linalg/fluxprojector.cpp index 2101abb6d..6e1c6a217 100644 --- a/palace/linalg/fluxprojector.cpp +++ b/palace/linalg/fluxprojector.cpp @@ -17,7 +17,8 @@ namespace palace // Given a finite element space hierarchy, construct a vector of mass matrix // operators corresponding to each level. -std::unique_ptr BuildMassMatrixOperator(mfem::ParFiniteElementSpaceHierarchy &h, int pa_order_threshold) +std::unique_ptr BuildMassMatrixOperator(mfem::ParFiniteElementSpaceHierarchy &h, + int pa_order_threshold) { constexpr int skip_zeros = 0; const bool is_scalar_FE_space = @@ -25,7 +26,7 @@ std::unique_ptr BuildMassMatrixOperator(mfem::ParFiniteElementSpaceHie // Assemble the bilinear form operator auto M = std::make_unique(h.GetNumLevels()); - for (int l = 0; l < h.GetNumLevels(); ++l) + for (int l = 0; l < h.GetNumLevels(); l++) { auto &h_l = h.GetFESpaceAtLevel(l); auto m = std::make_unique(&h_l); @@ -42,7 +43,9 @@ std::unique_ptr BuildMassMatrixOperator(mfem::ParFiniteElementSpaceHie } auto M_l = std::make_unique( - utils::AssembleOperator(std::move(m), true, (l > 0) ? pa_order_threshold : 100, skip_zeros), h_l); + utils::AssembleOperator(std::move(m), true, (l > 0) ? pa_order_threshold : 100, + skip_zeros), + h_l); // Set the essential dofs (none). M->AddOperator(std::move(M_l)); @@ -51,12 +54,14 @@ std::unique_ptr BuildMassMatrixOperator(mfem::ParFiniteElementSpaceHie } FluxProjector::FluxProjector(mfem::ParFiniteElementSpaceHierarchy &smooth_flux_fes, - double tol, int max_it, int print_level, int pa_order_threshold) + double tol, int max_it, int print_level, + int pa_order_threshold) : M(BuildMassMatrixOperator(smooth_flux_fes, pa_order_threshold)) { // The system matrix for the projection is real and SPD. For the coarse-level AMG solve, // we don't use an exact solve on the coarsest level. - auto amg = std::make_unique>(std::make_unique(1, 1, 0)); + auto amg = + std::make_unique>(std::make_unique(1, 1, 0)); auto gmg = std::make_unique>( std::move(amg), smooth_flux_fes, nullptr, 1, 1, 2, 1.0, 0.0, true, pa_order_threshold); diff --git a/palace/linalg/fluxprojector.hpp b/palace/linalg/fluxprojector.hpp index d3fb43ecc..eccda9fc0 100644 --- a/palace/linalg/fluxprojector.hpp +++ b/palace/linalg/fluxprojector.hpp @@ -33,8 +33,8 @@ class FluxProjector mutable Vector tmp; public: - FluxProjector(mfem::ParFiniteElementSpaceHierarchy &smooth_flux_fes, - double tol = 1e-12, int max_it = 200, int print_level = 1, int pa_order_threshold = 1); + FluxProjector(mfem::ParFiniteElementSpaceHierarchy &smooth_flux_fes, double tol = 1e-12, + int max_it = 200, int print_level = 1, int pa_order_threshold = 1); // Given a vector of dof defining the flux, compute the smooth flux IN PLACE. void Mult(Vector &x) const @@ -53,8 +53,6 @@ class FluxProjector y = x; Mult(y); } - - // TODO: Add in an ArrayMult optimization }; } // namespace palace diff --git a/palace/models/postoperator.cpp b/palace/models/postoperator.cpp index 9f8d1c871..487419a31 100644 --- a/palace/models/postoperator.cpp +++ b/palace/models/postoperator.cpp @@ -655,7 +655,6 @@ void PostOperator::WriteFields(int step, double time) const paraview.SetTime(time); paraview_bdr.SetCycle(step); paraview_bdr.SetTime(time); - if (first_save) { mfem::ParMesh &mesh = diff --git a/palace/models/postoperator.hpp b/palace/models/postoperator.hpp index 8a10ed064..943a1dfa2 100644 --- a/palace/models/postoperator.hpp +++ b/palace/models/postoperator.hpp @@ -21,7 +21,7 @@ namespace palace { class CurlCurlOperator; -struct ErrorIndicators; +class ErrorIndicators; class IoData; class LaplaceOperator; class LumpedPortOperator; diff --git a/palace/utils/errorindicators.cpp b/palace/utils/errorindicators.cpp index 67ba34631..ca7eec892 100644 --- a/palace/utils/errorindicators.cpp +++ b/palace/utils/errorindicators.cpp @@ -11,8 +11,7 @@ namespace palace void ErrorReductionOperator::operator()(ErrorIndicators &ebar, const Vector &ind, double p) const { - - // Compute the global indicator across all processors + // Compute the global indicator across all processors. auto comm = Mpi::World(); double candidate_global_error_indicator = std::transform_reduce(ind.begin(), ind.end(), 0.0, std::plus(), @@ -45,7 +44,7 @@ void ErrorReductionOperator::operator()(ErrorIndicators &ebar, const Vector &ind } // Another sample has been added, increment for the running average lambda. - ++n; + n++; } } // namespace palace diff --git a/palace/utils/errorindicators.hpp b/palace/utils/errorindicators.hpp index ca8782172..b6d551f25 100644 --- a/palace/utils/errorindicators.hpp +++ b/palace/utils/errorindicators.hpp @@ -10,9 +10,10 @@ namespace palace { // Storage for error estimation results from the solve. Required in the AMR loop. An error -// indicator is non-negative, whilst an error estimate is signed. -struct ErrorIndicators +// indicator is non-negative, while an error estimate is signed. +class ErrorIndicators { +public: ErrorIndicators(int ndof) : ndof(ndof) {} ErrorIndicators() = delete; ErrorIndicators(const ErrorIndicators &) = default;