From 6d146aa831950fa4fb1efefa87632e745fcd20ee Mon Sep 17 00:00:00 2001 From: PhilipDeegan Date: Fri, 1 Oct 2021 22:29:56 +0200 Subject: [PATCH] SAMRAI: Algorithm/schedule per quantity component (#589) --- pyphare/pyphare/core/gridlayout.py | 18 +- pyphare/pyphare/core/phare_utilities.py | 16 + pyphare/pyphare/pharesee/geometry.py | 12 +- pyphare/pyphare/pharesee/hierarchy.py | 21 +- .../test_pharesee/test_geometry_2d.py | 9 +- readme.md | 2 +- .../data/field/field_variable_fill_pattern.h | 8 +- .../field_linear_time_interpolate.h | 56 +--- src/amr/messengers/communicators.h | 274 ++++++++---------- .../hybrid_hybrid_messenger_strategy.h | 12 +- src/amr/messengers/quantity_communicator.h | 178 ++++-------- src/amr/multiphysics_integrator.h | 2 +- src/simulator/simulator.h | 8 +- tests/functional/alfven_wave/alfven_wave1d.py | 8 +- tests/functional/harris/CMakeLists.txt | 5 +- tests/functional/tdtagged/td1dtagged.py | 12 +- tests/simulator/CMakeLists.txt | 2 + tests/simulator/__init__.py | 35 +++ .../advance/test_fields_advance_2d.py | 4 +- tests/simulator/refinement/CMakeLists.txt | 16 + tests/simulator/refinement/test_2d_10_core.py | 187 ++++++++++++ tests/simulator/refinement/test_2d_2_core.py | 184 ++++++++++++ tests/simulator/test_advance.py | 122 +++++--- 23 files changed, 791 insertions(+), 400 deletions(-) create mode 100644 tests/simulator/refinement/CMakeLists.txt create mode 100644 tests/simulator/refinement/test_2d_10_core.py create mode 100644 tests/simulator/refinement/test_2d_2_core.py diff --git a/pyphare/pyphare/core/gridlayout.py b/pyphare/pyphare/core/gridlayout.py index e6432a59c..857fdbab5 100644 --- a/pyphare/pyphare/core/gridlayout.py +++ b/pyphare/pyphare/core/gridlayout.py @@ -3,7 +3,7 @@ import math import numpy as np -from .phare_utilities import listify +from .phare_utilities import is_scalar, listify from .box import Box directions = ["x", "y", "z"] @@ -204,7 +204,23 @@ def allocSizeDerived(self, interpOrder, centering, nbrCells): return size + def AMRPointToLocal(self, point): + return point - self.box.lower + self.physicalStartIndex(self.interp_order, "dual") + + def AMRBoxToLocal(self, box): + local = box.copy() + local.lower = self.AMRPointToLocal(local.lower) + local.upper = self.AMRPointToLocal(local.upper) + return local + + def AMRToLocal(self, toLocal): + assert not is_scalar(toLocal) + if isinstance(toLocal, Box): + return self.AMRBoxToLocal(toLocal) + return self.AMRPointToLocal(toLocal) + def AMRIndexToLocal(self, dim, index): + assert is_scalar(index) dualStart = self.physicalStartIndex(self.interp_order, "dual") return index - self.box.lower[dim] + dualStart diff --git a/pyphare/pyphare/core/phare_utilities.py b/pyphare/pyphare/core/phare_utilities.py index ac00300cf..78961c364 100644 --- a/pyphare/pyphare/core/phare_utilities.py +++ b/pyphare/pyphare/core/phare_utilities.py @@ -143,3 +143,19 @@ def print_trace(): import sys, traceback _, _, tb = sys.exc_info() traceback.print_tb(tb) + + +def deep_copy(item, memo, excludes=[]): + from copy import deepcopy + + clazz = item.__class__ + that = clazz.__new__(clazz) + memo[id(item)] = that + for key, value in item.__dict__.items(): + # some objects are not picklable so cannot be deepcopied eg. h5py datasets + if key in excludes: + setattr(that, key, value) + else: + setattr(that, key, deepcopy(value, memo)) + return that + diff --git a/pyphare/pyphare/pharesee/geometry.py b/pyphare/pyphare/pharesee/geometry.py index 0397949f0..d259fa8a6 100644 --- a/pyphare/pyphare/pharesee/geometry.py +++ b/pyphare/pyphare/pharesee/geometry.py @@ -412,18 +412,18 @@ def level_ghost_boxes(hierarchy, quantities, levelNbrs=[], time=None): for gabox in ghostAreaBoxes: - remaining = boxm.remove(gabox, check_patches[0].box) + remaining = gabox - check_patches[0].box for patch in check_patches[1:]: - tmp = remaining + tmp = [] remove = [] for i, rem in enumerate(remaining): if rem * patch.box is not None: - tmp += boxm.remove(rem, patch.box) remove.append(i) - for rm in remove: - del tmp[rm] - remaining = tmp + tmp += rem - patch.box + for rm in reversed(remove): + del remaining[rm] + remaining += tmp if ilvl not in lvl_gaboxes: lvl_gaboxes[ilvl] = {} diff --git a/pyphare/pyphare/pharesee/hierarchy.py b/pyphare/pyphare/pharesee/hierarchy.py index 481197662..cfaf1dcb4 100644 --- a/pyphare/pyphare/pharesee/hierarchy.py +++ b/pyphare/pyphare/pharesee/hierarchy.py @@ -9,7 +9,7 @@ from ..core.box import Box from ..core.gridlayout import GridLayout import matplotlib.pyplot as plt -from ..core.phare_utilities import refinement_ratio +from ..core.phare_utilities import refinement_ratio, deep_copy @@ -31,6 +31,10 @@ def __init__(self, layout, quantity): self.layout = layout + def __deepcopy__(self, memo): + no_copy_keys = ["dataset"] # do not copy these things + return deep_copy(self, memo, no_copy_keys) + class FieldData(PatchData): @@ -80,8 +84,9 @@ def select(self, box): overlap = box * gbox if overlap is not None: - lower = self.layout.AMRIndexToLocal(dim=box.ndim - 1, index=overlap.lower) - upper = self.layout.AMRIndexToLocal(dim=box.ndim - 1, index=overlap.upper) + lower = self.layout.AMRToLocal(overlap.lower) + upper = self.layout.AMRToLocal(overlap.upper) + if box.ndim == 1: return self.dataset[lower[0] : upper[0] + 1] if box.ndim == 2: @@ -198,6 +203,16 @@ def __repr__(self): return self.__str__() + def copy(self): + """does not copy patchdatas.datasets (see class PatchData)""" + from copy import deepcopy + return deepcopy(self) + + def __copy__(self): + return self.copy() + + + class PatchLevel: """is a collection of patches """ diff --git a/pyphare/pyphare_tests/test_pharesee/test_geometry_2d.py b/pyphare/pyphare_tests/test_pharesee/test_geometry_2d.py index fbafa8e0b..6325322df 100644 --- a/pyphare/pyphare_tests/test_pharesee/test_geometry_2d.py +++ b/pyphare/pyphare_tests/test_pharesee/test_geometry_2d.py @@ -366,14 +366,10 @@ class ParticleLevelGhostGeometryTest(AGeometryTest): Box([10, -1], [10, 3]), Box([10, 14], [10, 23]), Box([10, 34], [10, 40]), - Box([0, -1], [9, -1]), - Box([0, 40], [9, 40]), Box([19, -1], [19, 3]), Box([19, 14], [19, 23]), Box([19, 34], [19, 40]), Box([30, -1], [30, 40]), - Box([20, -1], [29, -1]), - Box([20, 40], [29, 40]), Box([10, 3], [19, 3]), Box([10, 14], [19, 14]), Box([10, 23], [19, 23]), @@ -400,8 +396,6 @@ def test_level_ghost_boxes(self, refinement_boxes, expected): [actual["boxes"] for actual in lvl_gaboxes[ilvl][key]], [] ) - self.assertEqual(expected[ilvl], ghost_area_box_list) - fig = hierarchy.plot_2d_patches( ilvl, collections=[ @@ -419,6 +413,9 @@ def test_level_ghost_boxes(self, refinement_boxes, expected): ), ) fig.savefig(f"{type(self).__name__}_lvl_{ilvl}_{self._testMethodName}.png") + self.assertEqual(expected[ilvl], ghost_area_box_list) + + @data( ( diff --git a/readme.md b/readme.md index e3f587944..5a69c1a38 100644 --- a/readme.md +++ b/readme.md @@ -66,7 +66,7 @@ PhD, PostDoc, [contact us](mailto:phare@lpp.polytechnique.fr). ### Active core team - [Nicolas Aunai](https://github.com/nicolasaunai) -- [Philip Deegan](https://github.com/Dekken) +- [Philip Deegan](https://github.com/PhilipDeegan) - [Roch Smets](https://github.com/rochsmets) - [Andrea Ciardi](https://sites.google.com/site/andreaciardihomepage/home) diff --git a/src/amr/data/field/field_variable_fill_pattern.h b/src/amr/data/field/field_variable_fill_pattern.h index 3eadb92cf..4e1fc1ef1 100644 --- a/src/amr/data/field/field_variable_fill_pattern.h +++ b/src/amr/data/field/field_variable_fill_pattern.h @@ -29,22 +29,20 @@ namespace PHARE::amr // This class is mostly a copy of BoxGeometryVariableFillPattern class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern { -protected: +public: FieldFillPattern(std::optional overwrite_interior) : opt_overwrite_interior_{overwrite_interior} { } -public: - template static auto make_shared(std::shared_ptr const& samrai_op) { auto const& op = dynamic_cast(*samrai_op); if (op.node_only) - return std::make_shared(std::nullopt); + return std::make_shared(std::nullopt); - return std::make_shared(false); + return std::make_shared(false); } diff --git a/src/amr/data/field/time_interpolate/field_linear_time_interpolate.h b/src/amr/data/field/time_interpolate/field_linear_time_interpolate.h index ba5fe00fc..93f4d2c6d 100644 --- a/src/amr/data/field/time_interpolate/field_linear_time_interpolate.h +++ b/src/amr/data/field/time_interpolate/field_linear_time_interpolate.h @@ -21,6 +21,12 @@ using core::dirZ; template class FieldLinearTimeInterpolate : public SAMRAI::hier::TimeInterpolateOperator { + static std::size_t constexpr dim = GridLayoutT::dimension; + static_assert(dim > 0 && dim <= 3); + + using PhysicalQuantity = decltype(std::declval().physicalQuantity()); + using FieldDataT = FieldData; + public: using GridLayoutImpl = typename GridLayoutT::implT; @@ -30,51 +36,34 @@ class FieldLinearTimeInterpolate : public SAMRAI::hier::TimeInterpolateOperator } - - virtual ~FieldLinearTimeInterpolate() = default; - - void timeInterpolate(SAMRAI::hier::PatchData& destData, SAMRAI::hier::Box const& where, SAMRAI::hier::BoxOverlap const& /*overlap*/, SAMRAI::hier::PatchData const& srcDataOld, SAMRAI::hier::PatchData const& srcDataNew) const override { - // - auto& fieldDataDest = dynamic_cast(destData); auto const& fieldDataSrcOld = dynamic_cast(srcDataOld); auto const& fieldDataSrcNew = dynamic_cast(srcDataNew); double const interpTime = fieldDataDest.getTime(); - - double const oldTime = fieldDataSrcOld.getTime(); - double const newTime = fieldDataSrcNew.getTime(); - - - double const alpha = (interpTime - oldTime) / (newTime - oldTime); - - auto const& layout = fieldDataDest.gridLayout; - - - - auto& fieldDest = fieldDataDest.field; + double const oldTime = fieldDataSrcOld.getTime(); + double const newTime = fieldDataSrcNew.getTime(); + double const alpha = (interpTime - oldTime) / (newTime - oldTime); auto const& fieldSrcOld = fieldDataSrcOld.field; auto const& fieldSrcNew = fieldDataSrcNew.field; + auto& fieldDest = fieldDataDest.field; - - - auto qty = fieldDest.physicalQuantity(); - - + auto const& layout = fieldDataDest.gridLayout; auto const whereLayout = FieldGeometry::layoutFromBox(where, layout); bool const withGhost{true}; + auto qty = fieldDest.physicalQuantity(); auto const interpolateBox = FieldGeometry::toFieldBox( where, qty, whereLayout, !withGhost); @@ -86,12 +75,8 @@ class FieldLinearTimeInterpolate : public SAMRAI::hier::TimeInterpolateOperator auto srcGhostBox = FieldGeometry::toFieldBox( fieldDataSrcNew.getBox(), qty, fieldDataSrcNew.gridLayout, withGhost); - auto const localDestBox - = AMRToLocal(static_cast>(finalBox), ghostBox); - - auto const localSrcBox - = AMRToLocal(static_cast>(finalBox), srcGhostBox); - + auto const localDestBox = AMRToLocal(finalBox, ghostBox); + auto const localSrcBox = AMRToLocal(finalBox, srcGhostBox); if constexpr (dim == 1) { @@ -149,20 +134,7 @@ class FieldLinearTimeInterpolate : public SAMRAI::hier::TimeInterpolateOperator } } } - else - { - static_assert(dim > 0 && dim <= 3); - } } - - - - -private: - static std::size_t constexpr dim = GridLayoutImpl::dimension; - - using PhysicalQuantity = decltype(std::declval().physicalQuantity()); - using FieldDataT = FieldData; }; } // namespace PHARE::amr diff --git a/src/amr/messengers/communicators.h b/src/amr/messengers/communicators.h index 16d9c111e..b55d08783 100644 --- a/src/amr/messengers/communicators.h +++ b/src/amr/messengers/communicators.h @@ -20,7 +20,8 @@ namespace amr InitField, InitInteriorPart, LevelBorderParticles, - InteriorGhostParticles + InteriorGhostParticles, + SharedBorder }; @@ -94,79 +95,95 @@ namespace amr { for (auto& [_, refiner] : refiners_) { - auto& algo = refiner.algo; auto levelNumber = level->getLevelNumber(); - - // for GhostField we need schedules that take on the level where there is an overlap - // (there is always for patches lying inside the level) - // and goes to coarser level where there is not (patch lying on the level border) - // Create a communication schedule that communicates data within a single level and - // interpolates data from coarser hierarchy levels where needed. - - // Data will be communicated from the interiors of the source data on the given - // level to the interiors and ghosts of destination data on the same level where - // those sources and destinations overlap. Where they do not overlap, - // data will be interpolated from source data on coarser levels in the patch - // hierarchy. Data is time interpolated between old and new sources on coarser - // levels when and where time interpolation is needed and copied from the source - // components on the patch level into the destination components otherwise. Note - // that the next coarser level number must correspond to a level in the hierarchy - // that represents a region of coarser index space than the destination level. Note - // that the schedule remains valid as long as the levels involved in its creation do - // not change; thus, it can be used for multiple data communication cycles. - if constexpr (Type == RefinerType::GhostField) - { - auto schedule = algo->createSchedule( - level, level->getNextCoarserHierarchyLevelNumber(), hierarchy); - refiner.add(schedule, levelNumber); - } - - // this createSchedule overload is used to initialize fields. - // note that here we must take that createsSchedule() overload and put nullptr as - // src since we want to take from coarser level everywhere. using the createSchedule - // overload that takes level, next_coarser_level only would result in interior ghost - // nodes to be filled with interior of neighbor patches but there is nothing there. - else if constexpr (Type == RefinerType::InitField) - { - refiner.add(algo->createSchedule(level, nullptr, levelNumber - 1, hierarchy), - levelNumber); - } - - - // here we create the schedule that will intialize the particles that lie within the - // interior of the patches (no ghost, no coarse to fine). We take almost the same - // overload as for fields above but the version that takes a PatchLevelFillPattern. - // Here the PatchLevelInteriorFillPattern is used because we want to fill particles - // only within the interior of the patches of the level. The reason is that filling - // the their ghost regions with refined particles would not ensure the ghosts to be - // clones of neighbor patches particles if the splitting from coarser levels is not - // deterministic. - else if constexpr (Type == RefinerType::InitInteriorPart) - { - refiner.add(algo->createSchedule( - std::make_shared(), - level, nullptr, levelNumber - 1, hierarchy), - levelNumber); - } - - // here we create a schedule that will refine particles from coarser level and put - // them into the level coarse to fine boundary. These are the levelGhostParticlesOld - // particles. we thus take the same createSchedule overload as above but pass it a - // PatchLevelBorderFillPattern. - else if constexpr (Type == RefinerType::LevelBorderParticles) - { - refiner.add(algo->createSchedule( - std::make_shared(), - level, nullptr, levelNumber - 1, hierarchy), - levelNumber); - } - - // this branch is used to create a schedule that will transfer particles into the - // patches' ghost zones. - else if constexpr (Type == RefinerType::InteriorGhostParticles) + for (auto& algo : refiner.algos) { - refiner.add(algo->createSchedule(level), levelNumber); + // for GhostField we need schedules that take on the level where there is an + // overlap (there is always for patches lying inside the level) and goes to + // coarser level where there is not (patch lying on the level border) Create a + // communication schedule that communicates data within a single level and + // interpolates data from coarser hierarchy levels where needed. + + // Data will be communicated from the interiors of the source data on the given + // level to the interiors and ghosts of destination data on the same level where + // those sources and destinations overlap. Where they do not overlap, + // data will be interpolated from source data on coarser levels in the patch + // hierarchy. Data is time interpolated between old and new sources on coarser + // levels when and where time interpolation is needed and copied from the source + // components on the patch level into the destination components otherwise. Note + // that the next coarser level number must correspond to a level in the + // hierarchy that represents a region of coarser index space than the + // destination level. Note that the schedule remains valid as long as the levels + // involved in its creation do not change; thus, it can be used for multiple + // data communication cycles. + if constexpr (Type == RefinerType::GhostField) + { + refiner.add( + algo, + algo->createSchedule(level, level->getNextCoarserHierarchyLevelNumber(), + hierarchy), + levelNumber); + } + + // this createSchedule overload is used to initialize fields. + // note that here we must take that createsSchedule() overload and put nullptr + // as src since we want to take from coarser level everywhere. using the + // createSchedule overload that takes level, next_coarser_level only would + // result in interior ghost nodes to be filled with interior of neighbor patches + // but there is nothing there. + else if constexpr (Type == RefinerType::InitField) + { + refiner.add( + algo, algo->createSchedule(level, nullptr, levelNumber - 1, hierarchy), + levelNumber); + } + + + // here we create the schedule that will intialize the particles that lie within + // the interior of the patches (no ghost, no coarse to fine). We take almost the + // same overload as for fields above but the version that takes a + // PatchLevelFillPattern. Here the PatchLevelInteriorFillPattern is used because + // we want to fill particles only within the interior of the patches of the + // level. The reason is that filling the their ghost regions with refined + // particles would not ensure the ghosts to be clones of neighbor patches + // particles if the splitting from coarser levels is not deterministic. + else if constexpr (Type == RefinerType::InitInteriorPart) + { + refiner.add( + algo, + algo->createSchedule( + std::make_shared(), + level, nullptr, levelNumber - 1, hierarchy), + levelNumber); + } + + // here we create a schedule that will refine particles from coarser level and + // put them into the level coarse to fine boundary. These are the + // levelGhostParticlesOld particles. we thus take the same createSchedule + // overload as above but pass it a PatchLevelBorderFillPattern. + else if constexpr (Type == RefinerType::LevelBorderParticles) + { + refiner.add( + algo, + algo->createSchedule( + std::make_shared(), + level, nullptr, levelNumber - 1, hierarchy), + levelNumber); + } + + // this branch is used to create a schedule that will transfer particles into + // the patches' ghost zones. + else if constexpr (Type == RefinerType::InteriorGhostParticles) + { + refiner.add(algo, algo->createSchedule(level), levelNumber); + } + + // schedule to synchronize shared border values, and not include refinement + else if constexpr (Type == RefinerType::SharedBorder) + { + refiner.add(algo, algo->createSchedule(level), levelNumber); + } } } } @@ -188,20 +205,11 @@ namespace amr { for (auto& [key, communicator] : refiners_) { - if (communicator.algo == nullptr) - { - throw std::runtime_error("Algorithm is nullptr"); - } + if (communicator.algos.size() == 0) + throw std::runtime_error("Algorithms are not configured"); - auto schedule = communicator.findSchedule(levelNumber); - if (schedule) - { - (*schedule)->fillData(initDataTime); - } - else - { - throw std::runtime_error("Error - schedule cannot be found for this level"); - } + for (auto const& algo : communicator.algos) + communicator.findSchedule(algo, levelNumber)->fillData(initDataTime); } } @@ -218,25 +226,28 @@ namespace amr { for (auto& [key, refiner] : refiners_) { - auto& algo = refiner.algo; - - auto const& level = hierarchy->getPatchLevel(levelNumber); - - if constexpr (Type == RefinerType::LevelBorderParticles) - { - std::cout << "regriding adding levelghostparticles on level " << levelNumber - << " " << key << "\n"; - auto schedule = algo->createSchedule( - std::make_shared(), level, - oldLevel, level->getNextCoarserHierarchyLevelNumber(), hierarchy); - schedule->fillData(initDataTime); - } - else + for (auto& algo : refiner.algos) { - std::cout << "regriding adding " << key << " on level " << levelNumber << " \n"; - auto schedule = algo->createSchedule( - level, oldLevel, level->getNextCoarserHierarchyLevelNumber(), hierarchy); - schedule->fillData(initDataTime); + auto const& level = hierarchy->getPatchLevel(levelNumber); + + if constexpr (Type == RefinerType::LevelBorderParticles) + { + std::cout << "regriding adding levelghostparticles on level " << levelNumber + << " " << key << "\n"; + auto schedule = algo->createSchedule( + std::make_shared(), level, + oldLevel, level->getNextCoarserHierarchyLevelNumber(), hierarchy); + schedule->fillData(initDataTime); + } + else + { + std::cout << "regriding adding " << key << " on level " << levelNumber + << " \n"; + auto schedule = algo->createSchedule( + level, oldLevel, level->getNextCoarserHierarchyLevelNumber(), + hierarchy); + schedule->fillData(initDataTime); + } } } } @@ -254,35 +265,18 @@ namespace amr template void fill(VecFieldT& vec, int const levelNumber, double const fillTime) { - auto schedule = findSchedule_(vec.name(), levelNumber); - if (schedule) - { - (*schedule)->fillData(fillTime); - } - else - { - throw std::runtime_error("no schedule for " + vec.name()); - } - } - + if (refiners_.count(vec.name()) == 0) + throw std::runtime_error("no refiner for " + vec.name()); + auto& refiner = refiners_[vec.name()]; - private: - std::optional> - findSchedule_(std::string const& name, int levelNumber) - { - if (auto mapIter = refiners_.find(name); mapIter != std::end(refiners_)) - { - return mapIter->second.findSchedule(levelNumber); - } - else - { - return std::nullopt; - } + for (auto const& algo : refiner.algos) + refiner.findSchedule(algo, levelNumber)->fillData(fillTime); } + private: std::map> refiners_; }; @@ -321,15 +315,12 @@ namespace amr void registerLevel(std::shared_ptr const& hierarchy, std::shared_ptr const& level) { - for (auto& [_, synchronizer] : synchronizers_) - { - auto& algo = synchronizer.algo; - auto levelNumber = level->getLevelNumber(); - auto const& coarseLevel = hierarchy->getPatchLevel(levelNumber - 1); + auto levelNumber = level->getLevelNumber(); + auto const& coarseLevel = hierarchy->getPatchLevel(levelNumber - 1); - auto schedule = algo->createSchedule(coarseLevel, level); - synchronizer.add(std::move(schedule), levelNumber); - } + for (auto& [_, synchronizer] : synchronizers_) + for (auto& algo : synchronizer.algos) + synchronizer.add(algo, algo->createSchedule(coarseLevel, level), levelNumber); } @@ -338,20 +329,11 @@ namespace amr { for (auto& [key, synchronizer] : synchronizers_) { - if (synchronizer.algo == nullptr) - { - throw std::runtime_error("Algorithm is nullptr"); - } + if (synchronizer.algos.size() == 0) + throw std::runtime_error("Algorithms are not configured"); - auto schedule = synchronizer.findSchedule(levelNumber); - if (schedule) - { - (*schedule)->coarsenData(); - } - else - { - throw std::runtime_error("Error - schedule cannot be found for this level"); - } + for (auto const& algo : synchronizer.algos) + synchronizer.findSchedule(algo, levelNumber)->coarsenData(); } } diff --git a/src/amr/messengers/hybrid_hybrid_messenger_strategy.h b/src/amr/messengers/hybrid_hybrid_messenger_strategy.h index 4ca7e075c..cc992bc2c 100644 --- a/src/amr/messengers/hybrid_hybrid_messenger_strategy.h +++ b/src/amr/messengers/hybrid_hybrid_messenger_strategy.h @@ -644,10 +644,10 @@ namespace amr * needed, at t_coarse. These are typically internal variables of the messenger, like * Eold or Bold. */ + template void fillRefiners_(std::vector const& ghostVecs, VecFieldDescriptor const& modelVec, - VecFieldDescriptor const& oldModelVec, - RefinerPool& refiners, + VecFieldDescriptor const& oldModelVec, RefinerPool& refiners, std::shared_ptr& refineOp) { for (auto const& ghostVec : ghostVecs) @@ -658,10 +658,10 @@ namespace amr } + template void fillRefiners_(std::vector const& ghostVecs, VecFieldDescriptor const& modelVec, - VecFieldDescriptor const& oldModelVec, - RefinerPool& refiners) + VecFieldDescriptor const& oldModelVec, RefinerPool& refiners) { fillRefiners_(ghostVecs, modelVec, oldModelVec, refiners, fieldRefineOp_); } @@ -734,14 +734,14 @@ namespace amr //! store communicators for magnetic fields that need ghosts to be filled - RefinerPool magneticSharedNodes_; + RefinerPool magneticSharedNodes_; RefinerPool magneticGhosts_; //! store communicators for magnetic fields that need to be initialized RefinerPool magneticInit_; //! store refiners for electric fields that need ghosts to be filled - RefinerPool electricSharedNodes_; + RefinerPool electricSharedNodes_; RefinerPool electricGhosts_; //! store communicators for electric fields that need to be initializes diff --git a/src/amr/messengers/quantity_communicator.h b/src/amr/messengers/quantity_communicator.h index 175ac1952..6ff4b56ef 100644 --- a/src/amr/messengers/quantity_communicator.h +++ b/src/amr/messengers/quantity_communicator.h @@ -30,53 +30,6 @@ namespace PHARE { namespace amr { - /* When determining which DataFactory to use to create the correct geometry - * Samrai compares existing items within the RefinerPool - * See: SAMRAI::xfer::RefineClasses::getEquivalenceClassIndex" - * If we do not specify a separate VariableFillPattern for each item sent to - * Algo->registerRefine : we run the risk of all items only using the first registered - * DataFactory, thus all items will receive the Geometry of that item. - * We "hack" this as there is a typeid()== check on the variablefill pattern types - */ - class XVariableFillPattern : public SAMRAI::xfer::BoxGeometryVariableFillPattern - { - }; - - class YVariableFillPattern : public SAMRAI::xfer::BoxGeometryVariableFillPattern - { - }; - - class ZVariableFillPattern : public SAMRAI::xfer::BoxGeometryVariableFillPattern - { - }; - - class XFieldFillPattern : public FieldFillPattern - { - public: - XFieldFillPattern(std::optional overwrite_interior) - : FieldFillPattern{overwrite_interior} - { - } - }; - - class YFieldFillPattern : public FieldFillPattern - { - public: - YFieldFillPattern(std::optional overwrite_interior) - : FieldFillPattern{overwrite_interior} - { - } - }; - - class ZFieldFillPattern : public FieldFillPattern - { - public: - ZFieldFillPattern(std::optional overwrite_interior) - : FieldFillPattern{overwrite_interior} - { - } - }; - struct Refiner { using schedule_type = SAMRAI::xfer::RefineSchedule; @@ -90,10 +43,10 @@ namespace amr }; template - using is_refiner = std::enable_if_t>; + static constexpr auto is_refiner = std::is_same_v; template - using is_synchronizer = std::enable_if_t>; + static constexpr auto is_synchronizer = std::is_same_v; @@ -104,46 +57,36 @@ namespace amr private: using Schedule = typename ComType::schedule_type; using Algorithm = typename ComType::algorithm_type; - std::map> schedules_; + std::map>> schedules_; public: - std::unique_ptr algo; - + std::vector> algos; - - template> - Communicator() - : algo{std::make_unique()} + auto& add_algorithm() { - } - - - - template, typename = Dummy> - Communicator() - : algo{std::make_unique( - SAMRAI::tbox::Dimension{dimension})} + if constexpr (is_refiner) + return algos.emplace_back(std::make_unique()); - { + if constexpr (is_synchronizer) + return algos.emplace_back(std::make_unique( + SAMRAI::tbox::Dimension{dimension})); } - /** * @brief findSchedule returns the schedule at a given level number if there is one * (optional). */ - std::optional> findSchedule(int levelNumber) const + auto& findSchedule(std::unique_ptr const& algo, int levelNumber) const { - if (auto mapIter = schedules_.find(levelNumber); mapIter != std::end(schedules_)) - { - return mapIter->second; - } - else - { - return std::nullopt; - } + if (!schedules_.count(levelNumber)) + throw std::runtime_error("no schedule for level " + std::to_string(levelNumber)); + + if (!schedules_.at(levelNumber).count(algo.get())) + throw std::runtime_error("Algorithm has not been registered with Communicator"); + + return schedules_.at(levelNumber).at(algo.get()); } @@ -152,12 +95,19 @@ namespace amr * @brief add is used to add a refine schedule for the given level number. * Note that already existing schedules at this level number are overwritten. */ - void add(std::shared_ptr schedule, int levelNumber) + void add(std::unique_ptr const& algo, std::shared_ptr& schedule, + int levelNumber) { // for shared border node value sync schedule->setDeterministicUnpackOrderingFlag(true); - schedules_[levelNumber] = std::move(schedule); + schedules_[levelNumber][algo.get()] = schedule; + } + + void add(std::unique_ptr const& algo, std::shared_ptr&& schedule, + int levelNumber) + { + add(algo, schedule, levelNumber); } }; @@ -191,12 +141,7 @@ namespace amr std::shared_ptr refineOp, std::shared_ptr timeOp) { - std::shared_ptr xVariableFillPattern - = FieldFillPattern::make_shared(refineOp); - std::shared_ptr yVariableFillPattern - = FieldFillPattern::make_shared(refineOp); - std::shared_ptr zVariableFillPattern - = FieldFillPattern::make_shared(refineOp); + auto variableFillPattern = FieldFillPattern::make_shared(refineOp); Communicator com; @@ -210,20 +155,19 @@ namespace amr if (src_id && dest_id && old_id) { - // dest, src, old, new, scratch - com.algo->registerRefine(*dest_id, // dest - *src_id, // source at same time - *old_id, // source at past time (for time interp) - *new_id, // source at future time (for time interp) - *dest_id, // scratch - refineOp, timeOp, fillPattern); + com.add_algorithm()->registerRefine( + *dest_id, // dest + *src_id, // source at same time + *old_id, // source at past time (for time interp) + *new_id, // source at future time (for time interp) + *dest_id, // scratch + refineOp, timeOp, fillPattern); } }; - // register refine operators for each component of the vecfield - registerRefine(ghost.xName, model.xName, oldModel.xName, xVariableFillPattern); - registerRefine(ghost.yName, model.yName, oldModel.yName, yVariableFillPattern); - registerRefine(ghost.zName, model.zName, oldModel.zName, zVariableFillPattern); + registerRefine(ghost.xName, model.xName, oldModel.xName, variableFillPattern); + registerRefine(ghost.yName, model.yName, oldModel.yName, variableFillPattern); + registerRefine(ghost.zName, model.zName, oldModel.zName, variableFillPattern); return com; } @@ -240,24 +184,18 @@ namespace amr std::shared_ptr const& rm, std::shared_ptr refineOp) { - std::shared_ptr xVariableFillPattern - = std::make_shared(); - std::shared_ptr yVariableFillPattern - = std::make_shared(); - std::shared_ptr zVariableFillPattern - = std::make_shared(); Communicator com; - auto registerRefine = [&com, &rm, &refineOp](std::string name, auto& fillPattern) { + auto registerRefine = [&com, &rm, &refineOp](std::string name) { auto id = rm->getID(name); if (id) { - com.algo->registerRefine(*id, *id, *id, refineOp, fillPattern); + com.add_algorithm()->registerRefine(*id, *id, *id, refineOp); } }; - registerRefine(descriptor.xName, xVariableFillPattern); - registerRefine(descriptor.yName, yVariableFillPattern); - registerRefine(descriptor.zName, zVariableFillPattern); + registerRefine(descriptor.xName); + registerRefine(descriptor.yName); + registerRefine(descriptor.zName); return com; } @@ -274,15 +212,14 @@ namespace amr std::shared_ptr const& rm, std::shared_ptr refineOp) { - Communicator communicator; + Communicator com; auto id = rm->getID(name); if (id) { - communicator.algo->registerRefine(*id, *id, *id, refineOp); + com.add_algorithm()->registerRefine(*id, *id, *id, refineOp); } - - return communicator; + return com; } @@ -297,27 +234,19 @@ namespace amr std::shared_ptr const& rm, std::shared_ptr coarsenOp) { - std::shared_ptr xVariableFillPattern - = std::make_shared(); - std::shared_ptr yVariableFillPattern - = std::make_shared(); - std::shared_ptr zVariableFillPattern - = std::make_shared(); - Communicator com; - auto registerCoarsen = [&com, &rm, &coarsenOp](std::string name, auto& fillpattern) // - { + auto registerCoarsen = [&com, &rm, &coarsenOp](std::string name) { auto id = rm->getID(name); if (id) { - com.algo->registerCoarsen(*id, *id, coarsenOp, fillpattern); + com.add_algorithm()->registerCoarsen(*id, *id, coarsenOp); } }; - registerCoarsen(descriptor.xName, xVariableFillPattern); - registerCoarsen(descriptor.yName, yVariableFillPattern); - registerCoarsen(descriptor.zName, zVariableFillPattern); + registerCoarsen(descriptor.xName); + registerCoarsen(descriptor.yName); + registerCoarsen(descriptor.zName); return com; } @@ -334,8 +263,9 @@ namespace amr auto id = rm->getID(name); if (id) - com.algo->registerCoarsen(*id, *id, coarsenOp); - + { + com.add_algorithm()->registerCoarsen(*id, *id, coarsenOp); + } return com; } diff --git a/src/amr/multiphysics_integrator.h b/src/amr/multiphysics_integrator.h index 088836d0d..397b3c53d 100644 --- a/src/amr/multiphysics_integrator.h +++ b/src/amr/multiphysics_integrator.h @@ -314,7 +314,7 @@ namespace solver } } - if (oldLevel != nullptr) + if (isRegridding) { // regriding the current level has broken schedules for which // this level is the source or destination diff --git a/src/simulator/simulator.h b/src/simulator/simulator.h index ecf17dec0..a4405a2cd 100644 --- a/src/simulator/simulator.h +++ b/src/simulator/simulator.h @@ -68,7 +68,11 @@ class Simulator : public ISimulator Simulator(PHARE::initializer::PHAREDict const& dict, std::shared_ptr const& hierarchy); - + ~Simulator() + { + if (coutbuf != nullptr) + std::cout.rdbuf(coutbuf); + } static constexpr std::size_t dimension = _dimension; static constexpr std::size_t interp_order = _interp_order; @@ -97,7 +101,7 @@ class Simulator : public ISimulator auto find_model(std::string name); std::ofstream log_out{".log/" + std::to_string(core::mpi::rank()) + ".out"}; - std::streambuf* coutbuf; + std::streambuf* coutbuf = nullptr; std::shared_ptr hierarchy_; std::unique_ptr integrator_; diff --git a/tests/functional/alfven_wave/alfven_wave1d.py b/tests/functional/alfven_wave/alfven_wave1d.py index 5b04e03e3..1e874f3f3 100644 --- a/tests/functional/alfven_wave/alfven_wave1d.py +++ b/tests/functional/alfven_wave/alfven_wave1d.py @@ -155,15 +155,17 @@ def phase_speed(run_path, ampl, xmax): def main(): - from pybindlibs.cpp import mpi_rank + from pyphare.cpp import cpp_lib + cpp = cpp_lib() + from pyphare.pharesee.run import Run from pyphare.pharesee.hierarchy import flat_finest_field config() - Simulator(gv.sim).initialize().run() + Simulator(gv.sim).run() - if mpi_rank() == 0: + if cpp.mpi_rank() == 0: vphi, t, phi, a, k = phase_speed(".", 0.01, 1000) diff --git a/tests/functional/harris/CMakeLists.txt b/tests/functional/harris/CMakeLists.txt index ec501e1ef..67ce171db 100644 --- a/tests/functional/harris/CMakeLists.txt +++ b/tests/functional/harris/CMakeLists.txt @@ -11,6 +11,7 @@ if(HighFive AND testMPI) ## These test use dump diagnostics so require HighFive! # exec level 11 # mpirun -n 10 - phare_mpi_python3_exec(11 10 harris_2d harris_2d.py ${CMAKE_CURRENT_BINARY_DIR}) - + if(testMPI) + phare_mpi_python3_exec(11 10 harris_2d harris_2d.py ${CMAKE_CURRENT_BINARY_DIR}) + endif(testMPI) endif() diff --git a/tests/functional/tdtagged/td1dtagged.py b/tests/functional/tdtagged/td1dtagged.py index 22265a2c2..e97cf194a 100644 --- a/tests/functional/tdtagged/td1dtagged.py +++ b/tests/functional/tdtagged/td1dtagged.py @@ -266,19 +266,19 @@ def by_fit(x, x0,L): def main(): - from pybindlibs.cpp import mpi_rank + from pyphare.cpp import cpp_lib + cpp = cpp_lib() + withTagging(diagdir="withTagging") - simulator = Simulator(gv.sim) - simulator.initialize().run() + Simulator(gv.sim).run() gv.sim = None noRefinement(diagdir="noRefinement") - simulator = Simulator(gv.sim) - simulator.initialize().run() + Simulator(gv.sim).run() gv.sim = None - if mpi_rank() == 0: + if cpp.mpi_rank() == 0: make_figure() diff --git a/tests/simulator/CMakeLists.txt b/tests/simulator/CMakeLists.txt index c82102ad7..ee2371833 100644 --- a/tests/simulator/CMakeLists.txt +++ b/tests/simulator/CMakeLists.txt @@ -14,6 +14,7 @@ phare_python3_exec(9 sim-refineParticlNbr refined_particle_nbr.py ${CMAKE_CURREN if(HighFive) ## These test use dump diagnostics so require HighFive! phare_python3_exec(9 diagnostics test_diagnostics.py ${CMAKE_CURRENT_BINARY_DIR}) # serial or n = 2 + if(testMPI) phare_mpi_python3_exec(9 3 diagnostics test_diagnostics.py ${CMAKE_CURRENT_BINARY_DIR}) phare_mpi_python3_exec(9 4 diagnostics test_diagnostics.py ${CMAKE_CURRENT_BINARY_DIR}) @@ -25,5 +26,6 @@ endif() configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.py ${CMAKE_CURRENT_BINARY_DIR}/config.py @ONLY) add_subdirectory(initialize) +add_subdirectory(refinement) add_subdirectory(advance) diff --git a/tests/simulator/__init__.py b/tests/simulator/__init__.py index e8f3f7004..dad4fe1d7 100644 --- a/tests/simulator/__init__.py +++ b/tests/simulator/__init__.py @@ -143,3 +143,38 @@ def populate_simulation(dim, interp, **input): ElectronModel(closure="isothermal", Te=0.12) return simulation + + + + +def diff_boxes(self, slice1, slice2, box, atol=None): + if atol is not None: + ignore = np.isclose(slice1, slice2, atol=atol, rtol=0) + def _diff(slice0): + slice0[ignore] = 0 # set values which are within atol range to 0 + return slice0 + diff = np.abs(_diff(slice1.copy()) - _diff(slice2.copy())) + else: + diff = np.abs(slice1 - slice2) + + boxes = [] + if box.ndim == 1: + x1 = np.where(diff != 0) + for x in zip(x1): + x = x+box.lower[0] + boxes += [Box([x], [x])] + elif box.ndim == 2: + x1, y1 = np.where(diff != 0) + for x, y in zip(x1, y1): + x = x+box.lower[0] + y = y+box.lower[1] + boxes += [Box([x, y], [x, y])] + elif box.ndim == 3: + x1, y1, z1 = np.where(diff != 0) + for x, y, z in zip(x1, y1, z1): + x = x+box.lower[0] + y = y+box.lower[1] + z = z+box.lower[2] + boxes += [Box([x, y, z], [x, y, z])] + return boxes + diff --git a/tests/simulator/advance/test_fields_advance_2d.py b/tests/simulator/advance/test_fields_advance_2d.py index 3e58b0121..7ccfbc387 100644 --- a/tests/simulator/advance/test_fields_advance_2d.py +++ b/tests/simulator/advance/test_fields_advance_2d.py @@ -71,15 +71,15 @@ def test_field_coarsening_via_subcycles(self, interp_order, refinement_boxes): @data( # only supports a hierarchy with 2 levels - # *per_interp(({"L0": [Box2D(0, 4)]})), # fails? + *per_interp(({"L0": [Box2D(0, 4)]})), *per_interp(({"L0": [Box2D(10, 14)]})), *per_interp(({"L0": [Box2D(0, 4), Box2D(10, 14)]})), *per_interp(({"L0": [Box2D(0, 4), Box2D(5, 9), Box2D(10, 14)]})), *per_interp(({"L0": [Box2D(20, 24)]})), - # *per_interp(({"L0": [Box2D(30, 34)]})), # fails? ) @unpack def test_field_level_ghosts_via_subcycles_and_coarser_interpolation(self, interp_order, refinement_boxes): + print(f"{self._testMethodName}_{ndim}d") self._test_field_level_ghosts_via_subcycles_and_coarser_interpolation(ndim, interp_order, refinement_boxes) diff --git a/tests/simulator/refinement/CMakeLists.txt b/tests/simulator/refinement/CMakeLists.txt new file mode 100644 index 000000000..f9bf0d99c --- /dev/null +++ b/tests/simulator/refinement/CMakeLists.txt @@ -0,0 +1,16 @@ +cmake_minimum_required (VERSION 3.9) + +project(test-simulator-refinement) + +if(NOT ${PHARE_PROJECT_DIR} STREQUAL ${CMAKE_BINARY_DIR}) + file(GLOB PYFILES "*.py") + file(COPY ${PYFILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) +endif() + +if(HighFive) + ## These test use dump diagnostics so require HighFive! + if(testMPI) + phare_mpi_python3_exec(9 2 simple_2d_refinement test_2d_2_core.py ${CMAKE_CURRENT_BINARY_DIR}) + phare_mpi_python3_exec(11 10 complex_2d_refinement test_2d_10_core.py ${CMAKE_CURRENT_BINARY_DIR}) + endif(testMPI) +endif() diff --git a/tests/simulator/refinement/test_2d_10_core.py b/tests/simulator/refinement/test_2d_10_core.py new file mode 100644 index 000000000..ff6fb4152 --- /dev/null +++ b/tests/simulator/refinement/test_2d_10_core.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python3 + +""" + Complex test to force Yee Lattice centering discrepancies due to SAMRAI Schedule/Algorithm + grouping of quantity components with distinct geometries. + + Should be run with 10 cores + mpirun -n 10 tests/simulator/refinement/test_2d_10_core.py +""" + + +import pyphare.pharein as ph #lgtm [py/import-and-import-from] +from pyphare.pharein import Simulation +from pyphare.pharein import MaxwellianFluidModel +from pyphare.pharein import ElectromagDiagnostics,FluidDiagnostics, ParticleDiagnostics +from pyphare.pharein import ElectronModel +from pyphare.core.box import Box +import pyphare.core.box as boxm +from pyphare.simulator.simulator import Simulator, startMPI +from pyphare.pharein import global_vars as gv + +import numpy as np +import matplotlib.pyplot as plt +import matplotlib as mpl +mpl.use('Agg') + + +def config(diag_outputs, model_init={}, refinement_boxes=None): + ph.global_vars.sim = None + + Simulation( + smallest_patch_size=10, + largest_patch_size=20, + time_step_nbr= 1, + final_time= 0.001, + #boundary_types="periodic", + cells=(50, 100), + dl=(0.40, 0.40), + #refinement="tagging", + #max_nbr_levels = 3, + refinement_boxes=refinement_boxes, + hyper_resistivity=0.0050, + resistivity=0.001, + diag_options={"format": "phareh5", + "options": {"dir": diag_outputs, + "mode":"overwrite", "fine_dump_lvl_max": 10}} + ) + def density(x, y): + from pyphare.pharein.global_vars import sim + Lx = sim.simulation_domain()[0] + return 1. + + def bx(x, y): + return 0.1 + + def by(x, y): + from pyphare.pharein.global_vars import sim + Lx = sim.simulation_domain()[0] + Ly = sim.simulation_domain()[1] + return 0.2 + + def bz(x, y): + return 1. + + def T(x, y): + return 1. + + def vx(x, y): + return 1.0 + + def vy(x, y): + return 0. + + def vz(x, y): + return 0. + + def vthx(x, y): + return np.sqrt(T(x, y)) + + def vthy(x, y): + return np.sqrt(T(x, y)) + + def vthz(x, y): + return np.sqrt(T(x, y)) + + vvv = { + "vbulkx": vx, "vbulky": vy, "vbulkz": vz, + "vthx": vthx, "vthy": vthy, "vthz": vthz, + "nbr_part_per_cell":100, + "init": model_init, + } + + MaxwellianFluidModel( + bx=bx, by=by, bz=bz, + protons={"charge": 1, "density": density, **vvv} + ) + + ElectronModel(closure="isothermal", Te=0.0) + sim = ph.global_vars.sim + dt = 1*sim.time_step + nt = sim.final_time/dt+1 + timestamps = dt * np.arange(nt) + + for quantity in ["E", "B"]: + ElectromagDiagnostics( + quantity=quantity, + write_timestamps=timestamps, + compute_timestamps=timestamps, + ) + + for quantity in ["density", "bulkVelocity"]: + FluidDiagnostics( + quantity=quantity, + write_timestamps=timestamps, + compute_timestamps=timestamps, + ) + + return sim + +def get_time(path, time=None, datahier = None): + if time is not None: + time = "{:.10f}".format(time) + from pyphare.pharesee.hierarchy import hierarchy_from + datahier = hierarchy_from(h5_filename=path+"/EM_E.h5", time=time, hier=datahier) + datahier = hierarchy_from(h5_filename=path+"/EM_B.h5", time=time, hier=datahier) + return datahier + +def get_hier(path): + return get_time(path) + +from tests.simulator.test_advance import AdvanceTestBase +from pyphare.cpp import cpp_lib +cpp = cpp_lib() +test = AdvanceTestBase(rethrow=True) # change to False for debugging images +L0_diags = "phare_outputs/test_x_homo_0" +L0L1_diags = "phare_outputs/test_x_homo_1" + + +def make_fig(hier, fig_name, ilvl, extra_collections=[]): + if cpp.mpi_rank() == 0: + l0_in_l1 = [boxm.refine(p.box, 2) for p in hier.level(0).patches] + collections=[{ + "boxes": l0_in_l1, + "facecolor": "grey", + }] + if 1 in hier.levels(): + l1_over_l0 = [p.box for p in hier.level(1).patches] + collections += [{ + "boxes": l1_over_l0, + "facecolor": "yellow", + }] + hier.plot_2d_patches(1, + collections=collections + extra_collections, + ).savefig(fig_name+".png") + +def check_hier(hier): + for ilvl, lvl in hier.levels().items(): + for patch in lvl.patches: + assert all(patch.box.shape > 5) + return hier + +def post_advance(new_time): + if cpp.mpi_rank() == 0: + L0_datahier = check_hier(get_hier(L0_diags)) + L0L1_datahier = check_hier(get_hier(L0L1_diags)) + extra_collections = [] + errors = test.base_test_overlaped_fields_are_equal(L0L1_datahier, new_time) + errors = test.base_test_field_level_ghosts_via_subcycles_and_coarser_interpolation(L0_datahier, L0L1_datahier) + print(f"errors {type(errors)}") + if isinstance(errors, list): + extra_collections += [{ + "boxes": errors, + "facecolor": "black", + }] + make_fig(L0L1_datahier, L0L1_diags.split("/")[-1], 1, extra_collections) + +def main(): + import random + startMPI() + rando = random.randint(0, 1e10) + Simulator(config(L0_diags, {"seed": rando})).run().reset() + refinement_boxes={"L0": {"B0": [( 7, 40), ( 20, 60)]}} + sim = config(L0L1_diags, {"seed": rando}, refinement_boxes) + Simulator(sim, post_advance=post_advance).run() + +if __name__=="__main__": + main() diff --git a/tests/simulator/refinement/test_2d_2_core.py b/tests/simulator/refinement/test_2d_2_core.py new file mode 100644 index 000000000..56d276a2d --- /dev/null +++ b/tests/simulator/refinement/test_2d_2_core.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 + +""" + Simple test to force Yee Lattice centering discrepancies due to SAMRAI Schedule/Algorithm + grouping of quantity components with distinct geometries. + + Should be run with 2 cores + mpirun -n 2 tests/simulator/refinement/test_2d_2_core.py +""" + + +import pyphare.pharein as ph #lgtm [py/import-and-import-from] +from pyphare.pharein import Simulation +from pyphare.pharein import MaxwellianFluidModel +from pyphare.pharein import ElectromagDiagnostics,FluidDiagnostics, ParticleDiagnostics +from pyphare.pharein import ElectronModel +from pyphare.core.box import Box +import pyphare.core.box as boxm +from pyphare.simulator.simulator import Simulator, startMPI +from pyphare.pharein import global_vars as gv + +import numpy as np +import matplotlib.pyplot as plt +import matplotlib as mpl +mpl.use('Agg') + + +def config(diag_outputs, model_init={}, refinement_boxes=None): + ph.global_vars.sim = None + + Simulation( + # smallest_patch_size=6, + # largest_patch_size=(30, 15), + time_step_nbr= 1, + final_time= 0.001, + #boundary_types="periodic", + cells=(30, 30), + dl=(0.1, 0.1), + #refinement="tagging", + #max_nbr_levels = 3, + refinement_boxes=refinement_boxes, + #refinement_boxes={"L0": {"B0": [( 10, 11), ( 15, 16)]}}, + hyper_resistivity=0,# 0.0050, + resistivity=0,# 0.001, + diag_options={"format": "phareh5", + "options": {"dir": diag_outputs, + "mode":"overwrite", "fine_dump_lvl_max": 10}} + ) + + def density(x, y): + return 1. + + def bx(x, y): + return 0.1 + + def by(x, y): + return 0.2 + + def bz(x, y): + return 1. + + def T(x, y): + return 1. + + def vx(x, y): + return 1.0 + + def vy(x, y): + return 0. + + def vz(x, y): + return 0. + + def vthx(x, y): + return np.sqrt(T(x, y)) + + def vthy(x, y): + return np.sqrt(T(x, y)) + + def vthz(x, y): + return np.sqrt(T(x, y)) + + vvv = { + "vbulkx": vx, "vbulky": vy, "vbulkz": vz, + "vthx": vthx, "vthy": vthy, "vthz": vthz, + "nbr_part_per_cell":100, + "init": model_init, + } + + MaxwellianFluidModel( + bx=bx, by=by, bz=bz, + protons={"charge": 1, "density": density, **vvv} + ) + + ElectronModel(closure="isothermal", Te=0.0) + sim = ph.global_vars.sim + dt = 1*sim.time_step + nt = sim.final_time/dt+1 + timestamps = dt * np.arange(nt) + + for quantity in ["E", "B"]: + ElectromagDiagnostics( + quantity=quantity, + write_timestamps=timestamps, + compute_timestamps=timestamps, + ) + + for quantity in ["density", "bulkVelocity"]: + FluidDiagnostics( + quantity=quantity, + write_timestamps=timestamps, + compute_timestamps=timestamps, + ) + + return sim + + +def make_fig(hier, fig_name, ilvl, extra_collections=[]): + if cpp.mpi_rank() == 0: + l0_in_l1 = [boxm.refine(p.box, 2) for p in hier.level(0).patches] + collections=[{ + "boxes": l0_in_l1, + "facecolor": "grey", + }] + if 1 in hier.levels(): + l1_over_l0 = [p.box for p in hier.level(1).patches] + collections += [{ + "boxes": l1_over_l0, + "facecolor": "yellow", + }] + hier.plot_2d_patches(1, + collections=collections + extra_collections, + ).savefig(fig_name+".png") + +def get_time(path, time=None, datahier = None): + if time is not None: + time = "{:.10f}".format(time) + from pyphare.pharesee.hierarchy import hierarchy_from + datahier = hierarchy_from(h5_filename=path+"/EM_E.h5", time=time, hier=datahier) + datahier = hierarchy_from(h5_filename=path+"/EM_B.h5", time=time, hier=datahier) + return datahier + +def get_hier(path): + return get_time(path) + +from tests.simulator.test_advance import AdvanceTestBase +from pyphare.cpp import cpp_lib +cpp = cpp_lib() +test = AdvanceTestBase(rethrow=True) # change to False for debugging images +L0_diags = "phare_outputs/test_homo_0" +L0L1_diags = "phare_outputs/test_homo_1" + +def post_advance_0(new_time): + if cpp.mpi_rank() == 0: + pass + +def post_advance_1(new_time): + if cpp.mpi_rank() == 0: + L0_datahier = get_hier(L0_diags) + L0L1_datahier = get_hier(L0L1_diags) + extra_collections = [] + errors = test.base_test_overlaped_fields_are_equal(L0L1_datahier, new_time) + errors = test.base_test_field_level_ghosts_via_subcycles_and_coarser_interpolation(L0_datahier, L0L1_datahier) + if isinstance(errors, list): + extra_collections += [{ + "boxes": errors, + "facecolor": "black", + }] + make_fig(L0L1_datahier, L0L1_diags.split("/")[-1], 1, extra_collections) + +def main(): + import random + startMPI() + rando = random.randint(0, 1e10) + + refinement_boxes={"L0": {"B0": [( 10, 10), ( 14, 14)]}} + + Simulator(config(L0_diags, {"seed": rando}), post_advance=post_advance_0).run().reset() + sim = config(L0L1_diags, {"seed": rando}, refinement_boxes) + Simulator(sim, post_advance=post_advance_1).run() + +if __name__=="__main__": + main() + diff --git a/tests/simulator/test_advance.py b/tests/simulator/test_advance.py index 1fde4c599..7f1c42293 100644 --- a/tests/simulator/test_advance.py +++ b/tests/simulator/test_advance.py @@ -17,9 +17,22 @@ import unittest from ddt import ddt, data, unpack from tests.diagnostic import all_timestamps +from tests.simulator import diff_boxes @ddt class AdvanceTestBase(unittest.TestCase): + def pop(kwargs): + keys = ["rethrow"] + for key in keys: + if key in kwargs: + kwargs.pop(key) + return kwargs + + def __init__(self, *args, **kwargs): + super(AdvanceTestBase, self).__init__(*args, **AdvanceTestBase.pop(kwargs.copy())) + self.rethrow_ = True + if "rethrow" in kwargs: + self.rethrow_ = kwargs["rethrow"] def ddt_test_id(self): return self._testMethodName.split("_")[-1] @@ -194,11 +207,8 @@ def vthz(*xyz): return mom_hier - - def base_test_overlaped_fields_are_equal(self, datahier, coarsest_time): checks = 0 - for ilvl, overlaps in hierarchy_overlaps(datahier, coarsest_time).items(): for overlap in overlaps: pd1, pd2 = overlap["pdatas"] @@ -233,9 +243,15 @@ def base_test_overlaped_fields_are_equal(self, datahier, coarsest_time): slice1 = data1[loc_b1.lower[0]:loc_b1.upper[0] + 1, loc_b1.lower[1]:loc_b1.upper[1] + 1] slice2 = data2[loc_b2.lower[0]:loc_b2.upper[0] + 1, loc_b2.lower[1]:loc_b2.upper[1] + 1] - assert slice1.dtype == np.float64 - np.testing.assert_equal(slice1, slice2) - checks += 1 + try: + assert slice1.dtype == np.float64 + np.testing.assert_equal(slice1, slice2) + checks += 1 + except AssertionError as e: + print("AssertionError", pd1.name, e) + if self.rethrow_: + raise e + return diff_boxes(slice1, slice2, box) return checks @@ -395,6 +411,7 @@ def _test_field_coarsening_via_subcycles(self, dim, interp_order, refinement_box afterCoarse = np.copy(coarse_pdDataset) # change values that should be updated to make failure obvious + assert(dim < 3) # update if dim == 1: afterCoarse[dataBox.lower[0] : dataBox.upper[0] + 1] = -144123 if dim == 2: @@ -407,42 +424,20 @@ def _test_field_coarsening_via_subcycles(self, dim, interp_order, refinement_box + def base_test_field_level_ghosts_via_subcycles_and_coarser_interpolation(self, L0_datahier, L0L1_datahier, quantities=None): - def _test_field_level_ghosts_via_subcycles_and_coarser_interpolation(self, ndim, interp_order, refinement_boxes): - """ - This test runs two virtually identical simulations for one step. - L0_datahier has no refined levels - L0L1_datahier has one refined level - - This is done to compare L0 values that haven't received the coarsened values of L1 because there is no L1, - to the level field ghost of L1 of L0L1_datahier - - The simulations are no longer comparable after the first advance, so this test cannot work beyond that. - """ - print("test_field_coarsening_via_subcycles for dim/interp : {}/{}".format(ndim, interp_order)) + if quantities is None: + quantities = [f"{EM}{xyz}" for EM in ["E", "B"] for xyz in ["x", "y", "z"]] from tests.amr.data.field.refine.test_refine_field import refine_time_interpolate from pyphare.pharein import global_vars - import random - rando = random.randint(0, 1e10) - - def _getHier(diag_dir, boxes=[]): - return self.getHierarchy(interp_order, boxes, "eb", cells=60, - time_step_nbr=1, largest_patch_size=15, - diag_outputs=diag_dir, extra_diag_options={"fine_dump_lvl_max": 10}, time_step=0.001, - model_init={"seed": rando}, ndim=ndim - ) - def assert_time_in_hier(*ts): for t in ts: self.assertIn(L0L1_datahier.format_timestamp(t), L0L1_datahier.times()) - L0_datahier = _getHier(f"phare_lvl_ghost_interpolation_L0_diags/{ndim}/{interp_order}/{self.ddt_test_id()}") - L0L1_datahier = _getHier( - f"phare_lvl_ghost_interpolation_L0L1_diags/{ndim}/{interp_order}/{self.ddt_test_id()}", refinement_boxes - ) - + checks = 0 + ndim = global_vars.sim.ndim lvl_steps = global_vars.sim.level_time_steps assert len(lvl_steps) == 2, "this test is only configured for L0 -> L1 refinement comparisons" @@ -454,21 +449,22 @@ def assert_time_in_hier(*ts): fine_subcycle_times = [] for fine_subcycle in range(global_vars.sim.level_step_nbr[fine_ilvl] + 1): - fine_subcycle_time = coarsest_time_before + (lvl_steps[fine_ilvl] * fine_subcycle) + fine_subcycle_time = coarsest_time_before + (lvl_steps[fine_ilvl] * fine_subcycle) assert_time_in_hier(fine_subcycle_time) fine_subcycle_times += [fine_subcycle_time] - quantities = [f"{EM}{xyz}" for EM in ["E", "B"] for xyz in ["x", "y", "z"]] interpolated_fields = refine_time_interpolate( L0_datahier, quantities, coarse_ilvl, coarsest_time_before, coarsest_time_after, fine_subcycle_times ) + error_boxes = [] checks = 0 for fine_subcycle_time in fine_subcycle_times: fine_level_qty_ghost_boxes = level_ghost_boxes(L0L1_datahier, quantities, fine_ilvl, fine_subcycle_time) for qty in quantities: for fine_level_ghost_box_data in fine_level_qty_ghost_boxes[qty]: + fine_subcycle_pd = fine_level_ghost_box_data["pdata"] for fine_level_ghost_box_info in fine_level_ghost_box_data["boxes"]: @@ -485,13 +481,12 @@ def assert_time_in_hier(*ts): upper_dims = fine_level_ghost_box.lower > fine_subcycle_pd.box.upper for refinedInterpolatedField in interpolated_fields[qty][fine_subcycle_time]: - # level_ghost_boxes() includes periodic shifts, which we want to ignore here I think - is_periodic_shifted = refinedInterpolatedField.box * fine_subcycle_pd.box is None - lvlOverlap = refinedInterpolatedField.ghost_box * fine_level_ghost_box - if lvlOverlap is not None and not is_periodic_shifted: - - fine_ghostbox_data = fine_subcycle_pd[fine_level_ghost_box] - refinedInterpGhostBox_data = refinedInterpolatedField[fine_level_ghost_box] + lvlOverlap = refinedInterpolatedField.box * fine_level_ghost_box + if lvlOverlap is not None: + box = lvlOverlap + fine_ghostbox_data = fine_subcycle_pd[box] + refinedInterpGhostBox_data = refinedInterpolatedField[box] + assert fine_ghostbox_data.shape == refinedInterpGhostBox_data.shape fine_ds = fine_subcycle_pd.dataset if ndim == 1: # verify selecting start/end of L1 dataset from ghost box @@ -502,10 +497,49 @@ def assert_time_in_hier(*ts): assert refinedInterpGhostBox_data.shape == fine_subcycle_pd.ghosts_nbr assert fine_ghostbox_data.shape == fine_subcycle_pd.ghosts_nbr - - np.testing.assert_allclose(fine_ghostbox_data, refinedInterpGhostBox_data, atol=1e-15, rtol=0) + try: + np.testing.assert_allclose(fine_ghostbox_data, refinedInterpGhostBox_data, atol=1e-15, rtol=0) + except AssertionError as e: + print(f"FAIL level ghost subcycle_coarsening qty {qty}", e) + if self.rethrow_: + raise e + error_boxes += diff_boxes(fine_ghostbox_data, refinedInterpGhostBox_data, box, atol=1e-15) checks += 1 + if len(error_boxes): return error_boxes + return checks + + + + + def _test_field_level_ghosts_via_subcycles_and_coarser_interpolation(self, ndim, interp_order, refinement_boxes): + """ + This test runs two virtually identical simulations for one step. + L0_datahier has no refined levels + L0L1_datahier has one refined level + + This is done to compare L0 values that haven't received the coarsened values of L1 because there is no L1, + to the level field ghost of L1 of L0L1_datahier + + The simulations are no longer comparable after the first advance, so this test cannot work beyond that. + """ + print("test_field_coarsening_via_subcycles for dim/interp : {}/{}".format(ndim, interp_order)) + + import random + rando = random.randint(0, 1e10) + + def _getHier(diag_dir, boxes=[]): + return self.getHierarchy(interp_order, boxes, "eb", cells=30, + time_step_nbr=1, largest_patch_size=15, + diag_outputs=diag_dir, extra_diag_options={"fine_dump_lvl_max": 10}, time_step=0.001, + model_init={"seed": rando}, ndim=ndim + ) + L0_datahier = _getHier(f"phare_lvl_ghost_interpolation_L0_diags/{ndim}/{interp_order}/{self.ddt_test_id()}") + L0L1_datahier = _getHier( + f"phare_lvl_ghost_interpolation_L0L1_diags/{ndim}/{interp_order}/{self.ddt_test_id()}", refinement_boxes + ) + quantities = [f"{EM}{xyz}" for EM in ["E", "B"] for xyz in ["x", "y", "z"]] + checks = self.base_test_field_level_ghosts_via_subcycles_and_coarser_interpolation(L0_datahier, L0L1_datahier, quantities) self.assertGreater(checks, len(refinement_boxes["L0"]) * len(quantities))