Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve field energy integration for electrostatic and magnetostatic simulations #211

Merged
merged 4 commits into from
Mar 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ The format of this changelog is based on
- Changed the smooth flux space for the electrostatic error estimator to fix performance
on problems with material interfaces.
- Fixed a bug related to mesh cleaning for unspecified domains and mesh partitioning.
- Change computation of domain energy postprocessing for electrostatic and magnetostatic
simulation types in order to improve performance.

## [0.12.0] - 2023-12-21

Expand Down
5 changes: 3 additions & 2 deletions palace/drivers/eigensolver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ EigenSolver::Solve(const std::vector<std::unique_ptr<Mesh>> &mesh) const
int num_conv = eigen->Solve();
{
std::complex<double> lambda = (num_conv > 0) ? eigen->GetEigenvalue(0) : 0.0;
Mpi::Print(" Found {:d} converged eigenvalue{}{}\n\n", num_conv,
Mpi::Print(" Found {:d} converged eigenvalue{}{}\n", num_conv,
(num_conv > 1) ? "s" : "",
(num_conv > 0)
? fmt::format(" (first = {:.3e}{:+.3e}i)", lambda.real(), lambda.imag())
Expand All @@ -271,7 +271,7 @@ EigenSolver::Solve(const std::vector<std::unique_ptr<Mesh>> &mesh) const
SaveMetadata(*ksp);

// Calculate and record the error indicators.
Mpi::Print("Computing solution error estimates\n\n");
Mpi::Print("\nComputing solution error estimates\n");
CurlFluxErrorEstimator<ComplexVector> estimator(
spaceop.GetMaterialOp(), spaceop.GetNDSpace(), iodata.solver.linear.estimator_tol,
iodata.solver.linear.estimator_max_it, 0);
Expand All @@ -291,6 +291,7 @@ EigenSolver::Solve(const std::vector<std::unique_ptr<Mesh>> &mesh) const
}

// Postprocess the results.
Mpi::Print("\n");
for (int i = 0; i < num_conv; i++)
{
// Get the eigenvalue and relative error.
Expand Down
14 changes: 4 additions & 10 deletions palace/drivers/electrostaticsolver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,13 +90,9 @@ ErrorIndicator ElectrostaticSolver::Postprocess(LaplaceOperator &laplaceop,
int nstep = static_cast<int>(terminal_sources.size());
mfem::DenseMatrix C(nstep), Cm(nstep);
Vector E(Grad.Height()), Vij(Grad.Width());
if (iodata.solver.electrostatic.n_post > 0)
{
Mpi::Print("\n");
}

// Calculate and record the error indicators.
Mpi::Print("Computing solution error estimates\n\n");
Mpi::Print("\nComputing solution error estimates\n");
GradFluxErrorEstimator estimator(laplaceop.GetMaterialOp(), laplaceop.GetH1Space(),
iodata.solver.linear.estimator_tol,
iodata.solver.linear.estimator_max_it, 0);
Expand All @@ -113,16 +109,16 @@ ErrorIndicator ElectrostaticSolver::Postprocess(LaplaceOperator &laplaceop,
// for all postprocessing operations.
E = 0.0;
Grad.AddMult(V[i], E, -1.0);
postop.SetEGridFunction(E);
postop.SetVGridFunction(V[i]);
postop.SetEGridFunction(E);
double Ue = postop.GetEFieldEnergy();
PostprocessDomains(postop, "i", i, idx, Ue, 0.0, 0.0, 0.0);
PostprocessSurfaces(postop, "i", i, idx, Ue, 0.0, 1.0, 0.0);
PostprocessProbes(postop, "i", i, idx);
if (i < iodata.solver.electrostatic.n_post)
{
PostprocessFields(postop, i, idx, (i == 0) ? &indicator : nullptr);
Mpi::Print("Wrote fields to disk for terminal {:d}\n", idx);
Mpi::Print("{}Wrote fields to disk for terminal {:d}\n", (i == 0) ? "\n" : "", idx);
}
if (i == 0)
{
Expand All @@ -149,9 +145,7 @@ ErrorIndicator ElectrostaticSolver::Postprocess(LaplaceOperator &laplaceop,
else if (j > i)
{
linalg::AXPBYPCZ(1.0, V[i], 1.0, V[j], 0.0, Vij);
E = 0.0;
Grad.AddMult(Vij, E, -1.0);
postop.SetEGridFunction(E);
postop.SetVGridFunction(Vij);
double Ue = postop.GetEFieldEnergy();
C(i, j) = Ue - 0.5 * (C(i, i) + C(j, j));
Cm(i, j) = -C(i, j);
Expand Down
13 changes: 4 additions & 9 deletions palace/drivers/magnetostaticsolver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,13 +94,9 @@ ErrorIndicator MagnetostaticSolver::Postprocess(CurlCurlOperator &curlcurlop,
mfem::DenseMatrix M(nstep), Mm(nstep);
Vector B(Curl.Height()), Aij(Curl.Width());
Vector Iinc(nstep);
if (iodata.solver.magnetostatic.n_post > 0)
{
Mpi::Print("\n");
}

// Calculate and record the error indicators.
Mpi::Print("Computing solution error estimates\n\n");
Mpi::Print("\nComputing solution error estimates\n");
CurlFluxErrorEstimator<Vector> estimator(
curlcurlop.GetMaterialOp(), curlcurlop.GetNDSpace(),
iodata.solver.linear.estimator_tol, iodata.solver.linear.estimator_max_it, 0);
Expand All @@ -121,16 +117,16 @@ ErrorIndicator MagnetostaticSolver::Postprocess(CurlCurlOperator &curlcurlop,
// Compute B = ∇ x A on the true dofs, and set the internal GridFunctions in
// PostOperator for all postprocessing operations.
Curl.Mult(A[i], B);
postop.SetBGridFunction(B);
postop.SetAGridFunction(A[i]);
postop.SetBGridFunction(B);
double Um = postop.GetHFieldEnergy();
PostprocessDomains(postop, "i", i, idx, 0.0, Um, 0.0, 0.0);
PostprocessSurfaces(postop, "i", i, idx, 0.0, Um, 0.0, Iinc(i));
PostprocessProbes(postop, "i", i, idx);
if (i < iodata.solver.magnetostatic.n_post)
{
PostprocessFields(postop, i, idx, (i == 0) ? &indicator : nullptr);
Mpi::Print("Wrote fields to disk for terminal {:d}\n", idx);
Mpi::Print("{}Wrote fields to disk for source {:d}\n", (i == 0) ? "\n" : "", idx);
}
if (i == 0)
{
Expand All @@ -157,8 +153,7 @@ ErrorIndicator MagnetostaticSolver::Postprocess(CurlCurlOperator &curlcurlop,
else if (j > i)
{
linalg::AXPBYPCZ(1.0, A[i], 1.0, A[j], 0.0, Aij);
Curl.Mult(Aij, B);
postop.SetBGridFunction(B);
postop.SetAGridFunction(Aij);
double Um = postop.GetHFieldEnergy();
M(i, j) = Um / (Iinc(i) * Iinc(j)) -
0.5 * (M(i, i) * Iinc(i) / Iinc(j) + M(j, j) * Iinc(j) / Iinc(i));
Expand Down
8 changes: 5 additions & 3 deletions palace/models/curlcurloperator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,16 +130,18 @@ void PrintHeader(const mfem::ParFiniteElementSpace &h1_fespace,
: "Full");

const auto &mesh = *nd_fespace.GetParMesh();
const auto geom_types = mesh::CheckElements(mesh).GetGeomTypes();
Mpi::Print(" Mesh geometries:\n");
for (auto geom : mesh::CheckElements(mesh).GetGeomTypes())
for (auto geom : geom_types)
{
const auto *fe = nd_fespace.FEColl()->FiniteElementForGeometry(geom);
MFEM_VERIFY(fe, "MFEM does not support ND spaces on geometry = "
<< mfem::Geometry::Name[geom] << "!");
const int q_order = fem::DefaultIntegrationOrder::Get(mesh, geom);
Mpi::Print(" {}: P = {:d}, Q = {:d} (quadrature order = {:d})\n",
Mpi::Print(" {}: P = {:d}, Q = {:d} (quadrature order = {:d}){}\n",
mfem::Geometry::Name[geom], fe->GetDof(),
mfem::IntRules.Get(geom, q_order).GetNPoints(), q_order);
mfem::IntRules.Get(geom, q_order).GetNPoints(), q_order,
(geom == geom_types.back()) ? "" : ",");
}

Mpi::Print("\nAssembling multigrid hierarchy:\n");
Expand Down
124 changes: 94 additions & 30 deletions palace/models/domainpostoperator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,74 +16,138 @@ namespace palace
{

DomainPostOperator::DomainPostOperator(const IoData &iodata, const MaterialOperator &mat_op,
const FiniteElementSpace *nd_fespace,
const FiniteElementSpace *rt_fespace)
const FiniteElementSpace &nd_fespace,
const FiniteElementSpace &rt_fespace)
{
// Mass operators are always partially assembled.
if (nd_fespace)
MFEM_VERIFY(nd_fespace.GetFEColl().GetMapType(nd_fespace.Dimension()) ==
mfem::FiniteElement::H_CURL &&
rt_fespace.GetFEColl().GetMapType(nd_fespace.Dimension()) ==
mfem::FiniteElement::H_DIV,
"Unexpected finite element space types for domain energy postprocessing!");
{
// Construct ND mass matrix to compute the electric field energy integral as:
// E_elec = 1/2 Re{∫_Ω Dᴴ E dV} as (M_eps * e)ᴴ e.
// Only the real part of the permeability contributes to the energy (imaginary part
// cancels out in the inner product due to symmetry).
MaterialPropertyCoefficient epsilon_func(mat_op.GetAttributeToMaterial(),
mat_op.GetPermittivityReal());
BilinearForm m_nd(*nd_fespace);
m_nd.AddDomainIntegrator<VectorFEMassIntegrator>(epsilon_func);
M_ND = m_nd.PartialAssemble();
D.SetSize(M_ND->Height());
BilinearForm m(nd_fespace);
m.AddDomainIntegrator<VectorFEMassIntegrator>(epsilon_func);
M_elec = m.PartialAssemble();
D.SetSize(M_elec->Height());
D.UseDevice(true);
}

if (rt_fespace)
{
// Construct RT mass matrix to compute the magnetic field energy integral as:
// E_mag = 1/2 Re{∫_Ω Bᴴ H dV} as (M_muinv * b)ᴴ b.
MaterialPropertyCoefficient muinv_func(mat_op.GetAttributeToMaterial(),
mat_op.GetInvPermeability());
BilinearForm m_rt(*rt_fespace);
m_rt.AddDomainIntegrator<VectorFEMassIntegrator>(muinv_func);
M_RT = m_rt.PartialAssemble();
H.SetSize(M_RT->Height());
BilinearForm m(rt_fespace);
m.AddDomainIntegrator<VectorFEMassIntegrator>(muinv_func);
M_mag = m.PartialAssemble();
H.SetSize(M_mag->Height());
H.UseDevice(true);
}

// Use the provided domain postprocessing indices for postprocessing the electric and
// magnetic field energy in specific regions of the domain.
for (const auto &[idx, data] : iodata.domains.postpro.energy)
{
std::unique_ptr<Operator> M_ND_i, M_RT_i;
if (nd_fespace)
std::unique_ptr<Operator> M_elec_i, M_mag_i;
{
MaterialPropertyCoefficient epsilon_func(mat_op.GetAttributeToMaterial(),
mat_op.GetPermittivityReal());
epsilon_func.RestrictCoefficient(mat_op.GetCeedAttributes(data.attributes));
BilinearForm m_nd_i(*nd_fespace);
m_nd_i.AddDomainIntegrator<VectorFEMassIntegrator>(epsilon_func);
M_ND_i = m_nd_i.PartialAssemble();
BilinearForm m(nd_fespace);
m.AddDomainIntegrator<VectorFEMassIntegrator>(epsilon_func);
M_elec_i = m.PartialAssemble();
}
if (rt_fespace)
{
MaterialPropertyCoefficient muinv_func(mat_op.GetAttributeToMaterial(),
mat_op.GetInvPermeability());
muinv_func.RestrictCoefficient(mat_op.GetCeedAttributes(data.attributes));
BilinearForm m_rt_i(*rt_fespace);
m_rt_i.AddDomainIntegrator<VectorFEMassIntegrator>(muinv_func);
M_RT_i = m_rt_i.PartialAssemble();
BilinearForm m(rt_fespace);
m.AddDomainIntegrator<VectorFEMassIntegrator>(muinv_func);
M_mag_i = m.PartialAssemble();
}
M_i.emplace(idx, std::make_pair(std::move(M_elec_i), std::move(M_mag_i)));
}
}

DomainPostOperator::DomainPostOperator(const IoData &iodata, const MaterialOperator &mat_op,
const FiniteElementSpace &fespace)
{
const auto map_type = fespace.GetFEColl().GetMapType(fespace.Dimension());
if (map_type == mfem::FiniteElement::VALUE)
{
// H1 space for voltage and electric field energy.
{
MaterialPropertyCoefficient epsilon_func(mat_op.GetAttributeToMaterial(),
mat_op.GetPermittivityReal());
BilinearForm m(fespace);
m.AddDomainIntegrator<DiffusionIntegrator>(epsilon_func);
M_elec = m.PartialAssemble();
D.SetSize(M_elec->Height());
D.UseDevice(true);
}

for (const auto &[idx, data] : iodata.domains.postpro.energy)
{
std::unique_ptr<Operator> M_elec_i;
{
MaterialPropertyCoefficient epsilon_func(mat_op.GetAttributeToMaterial(),
mat_op.GetPermittivityReal());
epsilon_func.RestrictCoefficient(mat_op.GetCeedAttributes(data.attributes));
BilinearForm m(fespace);
m.AddDomainIntegrator<DiffusionIntegrator>(epsilon_func);
M_elec_i = m.PartialAssemble();
}
M_i.emplace(idx, std::make_pair(std::move(M_elec_i), nullptr));
}
M_i.emplace(idx, std::make_pair(std::move(M_ND_i), std::move(M_RT_i)));
}
else if (map_type == mfem::FiniteElement::H_CURL)
{
// H(curl) space for magnetic vector potential and magnetic field energy.
{
MaterialPropertyCoefficient muinv_func(mat_op.GetAttributeToMaterial(),
mat_op.GetInvPermeability());
BilinearForm m(fespace);
m.AddDomainIntegrator<CurlCurlIntegrator>(muinv_func);
M_mag = m.PartialAssemble();
H.SetSize(M_mag->Height());
H.UseDevice(true);
}

for (const auto &[idx, data] : iodata.domains.postpro.energy)
{
std::unique_ptr<Operator> M_mag_i;
{
MaterialPropertyCoefficient muinv_func(mat_op.GetAttributeToMaterial(),
mat_op.GetInvPermeability());
muinv_func.RestrictCoefficient(mat_op.GetCeedAttributes(data.attributes));
BilinearForm m(fespace);
m.AddDomainIntegrator<CurlCurlIntegrator>(muinv_func);
M_mag_i = m.PartialAssemble();
}
M_i.emplace(idx, std::make_pair(nullptr, std::move(M_mag_i)));
}
}
else
{
MFEM_ABORT("Unexpected finite element space type for domain energy postprocessing!");
}
}

double DomainPostOperator::GetElectricFieldEnergy(const GridFunction &E) const
{
if (M_ND)
if (M_elec)
{
M_ND->Mult(E.Real(), D);
M_elec->Mult(E.Real(), D);
double dot = linalg::LocalDot(E.Real(), D);
if (E.HasImag())
{
M_ND->Mult(E.Imag(), D);
M_elec->Mult(E.Imag(), D);
dot += linalg::LocalDot(E.Imag(), D);
}
Mpi::GlobalSum(1, &dot, E.GetComm());
Expand All @@ -96,13 +160,13 @@ double DomainPostOperator::GetElectricFieldEnergy(const GridFunction &E) const

double DomainPostOperator::GetMagneticFieldEnergy(const GridFunction &B) const
{
if (M_RT)
if (M_mag)
{
M_RT->Mult(B.Real(), H);
M_mag->Mult(B.Real(), H);
double dot = linalg::LocalDot(B.Real(), H);
if (B.HasImag())
{
M_RT->Mult(B.Imag(), H);
M_mag->Mult(B.Imag(), H);
dot += linalg::LocalDot(B.Imag(), H);
}
Mpi::GlobalSum(1, &dot, B.GetComm());
Expand All @@ -118,7 +182,7 @@ double DomainPostOperator::GetDomainElectricFieldEnergy(int idx,
{
// Compute the electric field energy integral for only a portion of the domain.
auto it = M_i.find(idx);
MFEM_VERIFY(it != M_i.end() && it->second.first,
MFEM_VERIFY(it != M_i.end(),
"Invalid domain index when postprocessing domain electric field energy!");
if (!it->second.first)
{
Expand Down
13 changes: 9 additions & 4 deletions palace/models/domainpostoperator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,22 +19,27 @@ class IoData;
class MaterialOperator;

//
// A class handling domain postprocessing.
// Class to handle domain energy postprocessing. We use a leading factor of 1/2 instead of
// 1/4 even though the eigenmodes are peak phasors and not RMS normalized because the same
// peak phasors are used to compute the voltages/currents which are 2x the time-averaged
// values. This correctly yields an EPR of 1 in cases where expected.
//
class DomainPostOperator
{
private:
// Bilinear forms for computing field energy integrals over domains.
std::unique_ptr<Operator> M_ND, M_RT;
std::unique_ptr<Operator> M_elec, M_mag;
std::map<int, std::pair<std::unique_ptr<Operator>, std::unique_ptr<Operator>>> M_i;

// Temporary vectors for inner product calculations.
mutable Vector D, H;

public:
DomainPostOperator(const IoData &iodata, const MaterialOperator &mat_op,
const FiniteElementSpace *nd_fespace,
const FiniteElementSpace *rt_fespace);
const FiniteElementSpace &nd_fespace,
const FiniteElementSpace &rt_fespace);
DomainPostOperator(const IoData &iodata, const MaterialOperator &mat_op,
const FiniteElementSpace &fespace);

// Access data structures for postprocessing domains.
const auto &GetDomains() const { return M_i; }
Expand Down
12 changes: 7 additions & 5 deletions palace/models/laplaceoperator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -149,16 +149,18 @@ void PrintHeader(const mfem::ParFiniteElementSpace &h1_fespace,
: "Full");

const auto &mesh = *h1_fespace.GetParMesh();
const auto geom_types = mesh::CheckElements(mesh).GetGeomTypes();
Mpi::Print(" Mesh geometries:\n");
for (auto geom : mesh::CheckElements(mesh).GetGeomTypes())
for (auto geom : geom_types)
{
const auto *fe = h1_fespace.FEColl()->FiniteElementForGeometry(geom);
MFEM_VERIFY(fe, "MFEM does not support H1 spaces on geometry = "
const auto *fe = nd_fespace.FEColl()->FiniteElementForGeometry(geom);
MFEM_VERIFY(fe, "MFEM does not support ND spaces on geometry = "
<< mfem::Geometry::Name[geom] << "!");
const int q_order = fem::DefaultIntegrationOrder::Get(mesh, geom);
Mpi::Print(" {}: P = {:d}, Q = {:d} (quadrature order = {:d})\n",
Mpi::Print(" {}: P = {:d}, Q = {:d} (quadrature order = {:d}){}\n",
mfem::Geometry::Name[geom], fe->GetDof(),
mfem::IntRules.Get(geom, q_order).GetNPoints(), q_order);
mfem::IntRules.Get(geom, q_order).GetNPoints(), q_order,
(geom == geom_types.back()) ? "" : ",");
}

Mpi::Print("\nAssembling multigrid hierarchy:\n");
Expand Down
Loading