From ad7454e382f79a9933cb3aab5b0feb08999098eb Mon Sep 17 00:00:00 2001 From: Greg Sjaardema Date: Wed, 28 Aug 2024 16:38:07 -0600 Subject: [PATCH 01/21] IOSS: Clean up formatting (clang-format) --- .../libraries/ioss/src/Ioss_DynamicTopology.h | 88 +- .../libraries/ioss/src/Ioss_StructuredBlock.C | 34 +- .../libraries/ioss/src/adios/AdiosWrapper.C | 9 +- .../ioss/src/adios/Ioad_DatabaseIO.h | 25 +- .../src/catalyst/Iocatalyst_CatalystLogging.C | 2 +- .../ioss/src/catalyst/Iocatalyst_DatabaseIO.h | 54 +- .../libraries/ioss/src/cgns/Iocgns_Utils.C | 10 +- .../libraries/ioss/src/cgns/Iocgns_Utils.h | 2 +- .../ioss/src/exodus/Ioex_BaseDatabaseIO.C | 61 +- .../ioss/src/exodus/Ioex_BaseDatabaseIO.h | 10 +- .../ioss/src/exodus/Ioex_DatabaseIO.C | 106 +- .../ioss/src/exodus/Ioex_DecompositionData.C | 19 +- .../ioss/src/exodus/Ioex_ParallelDatabaseIO.C | 8 +- .../ioss/src/exodus/Ioex_ParallelDatabaseIO.h | 3 +- .../libraries/ioss/src/exodus/Ioex_Utils.C | 2 +- .../text_mesh/Iotm_TextMeshAdjacencyGraph.h | 2 +- .../src/unit_tests/UnitTestDynamicTopology.C | 1611 ++++++++--------- 17 files changed, 1024 insertions(+), 1022 deletions(-) diff --git a/packages/seacas/libraries/ioss/src/Ioss_DynamicTopology.h b/packages/seacas/libraries/ioss/src/Ioss_DynamicTopology.h index 76de048fd4..3921d8e3a6 100644 --- a/packages/seacas/libraries/ioss/src/Ioss_DynamicTopology.h +++ b/packages/seacas/libraries/ioss/src/Ioss_DynamicTopology.h @@ -20,7 +20,7 @@ #include // for ostream #include -#include // for string, operator< +#include // for string, operator< #include namespace Ioss { @@ -44,16 +44,18 @@ namespace Ioss { * - TOPOLOGY_UNKNOWN: Something else, catchall option. */ enum TopologyModified { - TOPOLOGY_SAME = ( 0), //!< No change, also used for initialization - TOPOLOGY_REORDER = (1U << 0), //!< Data structures reordered on processor, no change between procs. - TOPOLOGY_SHUFFLE = (1U << 1), //!< Globally the same, data moved among processors. - TOPOLOGY_HADAPT = (1U << 2), //!< Elements split/combined; not moved cross-proc - TOPOLOGY_GEOMETRY = (1U << 3), //!< Geometry (mesh coordinates) modified. Restart needs to know this. - TOPOLOGY_CREATEFACE = (1U << 4), //!< Face/Edge are created/deleted. - TOPOLOGY_CREATEELEM = (1U << 5), //!< Elements are created/deleted. - TOPOLOGY_CREATENODE = (1U << 6), //!< Nodes are created/deleted. - TOPOLOGY_CREATEASSEMBLY = (1U << 7), //!< Assemblies are created/deleted. - TOPOLOGY_UNKNOWN = (1U << 8), //!< Unknown change, recreate from scratch. + TOPOLOGY_SAME = (0), //!< No change, also used for initialization + TOPOLOGY_REORDER = + (1U << 0), //!< Data structures reordered on processor, no change between procs. + TOPOLOGY_SHUFFLE = (1U << 1), //!< Globally the same, data moved among processors. + TOPOLOGY_HADAPT = (1U << 2), //!< Elements split/combined; not moved cross-proc + TOPOLOGY_GEOMETRY = + (1U << 3), //!< Geometry (mesh coordinates) modified. Restart needs to know this. + TOPOLOGY_CREATEFACE = (1U << 4), //!< Face/Edge are created/deleted. + TOPOLOGY_CREATEELEM = (1U << 5), //!< Elements are created/deleted. + TOPOLOGY_CREATENODE = (1U << 6), //!< Nodes are created/deleted. + TOPOLOGY_CREATEASSEMBLY = (1U << 7), //!< Assemblies are created/deleted. + TOPOLOGY_UNKNOWN = (1U << 8), //!< Unknown change, recreate from scratch. }; enum class FileControlOption { CONTROL_NONE, CONTROL_AUTO_MULTI_FILE, CONTROL_AUTO_GROUP_FILE }; @@ -61,14 +63,13 @@ namespace Ioss { class IOSS_EXPORT DynamicTopologyObserver { public: - DynamicTopologyObserver(Region *region) - : m_region(region) {} + DynamicTopologyObserver(Region *region) : m_region(region) {} virtual ~DynamicTopologyObserver() {} - virtual void reset_topology_modification_all(); - virtual void reset_topology_modification(); - virtual void set_topology_modification(unsigned int type); + virtual void reset_topology_modification_all(); + virtual void reset_topology_modification(); + virtual void set_topology_modification(unsigned int type); virtual unsigned int get_topology_modification() const; virtual unsigned int get_cumulative_topology_modification() const; @@ -88,8 +89,8 @@ namespace Ioss { void register_region(Region *region); Region *get_region() const { return m_region; } - void register_notifier(DynamicTopologyNotifier *notifier); - DynamicTopologyNotifier* get_notifier() const { return m_notifier; } + void register_notifier(DynamicTopologyNotifier *notifier); + DynamicTopologyNotifier *get_notifier() const { return m_notifier; } virtual void define_model(); virtual void write_model(); @@ -107,9 +108,9 @@ namespace Ioss { DynamicTopologyNotifier *m_notifier{nullptr}; - void check_region() const; + void check_region() const; IOSS_NODISCARD const ParallelUtils &util() const; - void synchronize_topology_modified_flags(); + void synchronize_topology_modified_flags(); void set_topology_modification_nl(unsigned int type); @@ -117,18 +118,19 @@ namespace Ioss { DynamicTopologyObserver(); }; - class IOSS_EXPORT DynamicTopologyNotifier { public: - DynamicTopologyNotifier(const std::string& model_name) - : m_modelName(model_name) {} + DynamicTopologyNotifier(const std::string &model_name) : m_modelName(model_name) {} virtual ~DynamicTopologyNotifier() = default; std::string name() const { return m_modelName; } - std::vector> get_observers() const { return m_observers; } + std::vector> get_observers() const + { + return m_observers; + } void register_observer(std::shared_ptr observer); @@ -138,13 +140,12 @@ namespace Ioss { void set_topology_modification(unsigned int type); - template - bool has_observer_type() const + template bool has_observer_type() const { bool found = false; - for(const std::shared_ptr& observer : m_observers) { - if (dynamic_cast(observer.get()) != nullptr) { + for (const std::shared_ptr &observer : m_observers) { + if (dynamic_cast(observer.get()) != nullptr) { found = true; break; } @@ -152,13 +153,13 @@ namespace Ioss { return found; } - template + template std::vector> get_observer_type() const { std::vector> typed_observers; - for(const std::shared_ptr &observer : m_observers) { - ObserverType* typed_observer = dynamic_cast(observer.get()); + for (const std::shared_ptr &observer : m_observers) { + ObserverType *typed_observer = dynamic_cast(observer.get()); if (typed_observer != nullptr) { typed_observers.push_back(std::dynamic_pointer_cast(observer)); } @@ -168,37 +169,38 @@ namespace Ioss { } private: - const std::string m_modelName; + const std::string m_modelName; std::vector> m_observers; }; - class IOSS_EXPORT DynamicTopologyBroker { public: static DynamicTopologyBroker *broker(); - void register_model(const std::string& model_name); - void remove_model(const std::string& model_name); + void register_model(const std::string &model_name); + void remove_model(const std::string &model_name); void clear_models(); - std::shared_ptr get_notifier(const std::string& model_name) const; - std::vector> get_observers(const std::string& model_name) const; + std::shared_ptr get_notifier(const std::string &model_name) const; + std::vector> + get_observers(const std::string &model_name) const; - void register_observer(const std::string& model_name, std::shared_ptr observer); - void register_observer(const std::string& model_name, std::shared_ptr observer, Region& region); + void register_observer(const std::string &model_name, + std::shared_ptr observer); + void register_observer(const std::string &model_name, + std::shared_ptr observer, Region ®ion); - void reset_topology_modification(const std::string& model_name); - void set_topology_modification(const std::string& model_name, unsigned int type); + void reset_topology_modification(const std::string &model_name); + void set_topology_modification(const std::string &model_name, unsigned int type); private: DynamicTopologyBroker() {}; - DynamicTopologyBroker(DynamicTopologyBroker&); + DynamicTopologyBroker(DynamicTopologyBroker &); std::map> m_notifiers; }; - class IOSS_EXPORT DynamicTopologyFileControl { public: diff --git a/packages/seacas/libraries/ioss/src/Ioss_StructuredBlock.C b/packages/seacas/libraries/ioss/src/Ioss_StructuredBlock.C index 7c8891124a..de43f983ca 100644 --- a/packages/seacas/libraries/ioss/src/Ioss_StructuredBlock.C +++ b/packages/seacas/libraries/ioss/src/Ioss_StructuredBlock.C @@ -540,14 +540,18 @@ namespace Ioss { { auto lhzc = this->m_zoneConnectivity; auto rhzc = rhs.m_zoneConnectivity; - Ioss::sort(lhzc.begin(), lhzc.end(), [](const ZoneConnectivity &l, const ZoneConnectivity &r) { - return l.m_connectionName < r.m_connectionName;}); - Ioss::sort(rhzc.begin(), rhzc.end(), [](const ZoneConnectivity &l, const ZoneConnectivity &r) { - return l.m_connectionName < r.m_connectionName;}); + Ioss::sort(lhzc.begin(), lhzc.end(), + [](const ZoneConnectivity &l, const ZoneConnectivity &r) { + return l.m_connectionName < r.m_connectionName; + }); + Ioss::sort(rhzc.begin(), rhzc.end(), + [](const ZoneConnectivity &l, const ZoneConnectivity &r) { + return l.m_connectionName < r.m_connectionName; + }); if (!vec_equal(lhzc, rhzc)) { - fmt::print(Ioss::OUTPUT(), "StructuredBlock: Zone Connectivity mismatch (size {} vs {})\n", - this->m_zoneConnectivity.size(), rhs.m_zoneConnectivity.size()); - same = false; + fmt::print(Ioss::OUTPUT(), "StructuredBlock: Zone Connectivity mismatch (size {} vs {})\n", + this->m_zoneConnectivity.size(), rhs.m_zoneConnectivity.size()); + same = false; } } @@ -558,13 +562,17 @@ namespace Ioss { { auto lhbc = this->m_boundaryConditions; auto rhbc = rhs.m_boundaryConditions; - Ioss::sort(lhbc.begin(), lhbc.end(), [](const BoundaryCondition &l, const BoundaryCondition &r) { - return l.m_bcName < r.m_bcName;}); - Ioss::sort(rhbc.begin(), rhbc.end(), [](const BoundaryCondition &l, const BoundaryCondition &r) { - return l.m_bcName < r.m_bcName;}); + Ioss::sort(lhbc.begin(), lhbc.end(), + [](const BoundaryCondition &l, const BoundaryCondition &r) { + return l.m_bcName < r.m_bcName; + }); + Ioss::sort(rhbc.begin(), rhbc.end(), + [](const BoundaryCondition &l, const BoundaryCondition &r) { + return l.m_bcName < r.m_bcName; + }); if (!vec_equal(lhbc, rhbc)) { - fmt::print(Ioss::OUTPUT(), "StructuredBlock: Boundary Conditions mismatch\n"); - same = false; + fmt::print(Ioss::OUTPUT(), "StructuredBlock: Boundary Conditions mismatch\n"); + same = false; } } diff --git a/packages/seacas/libraries/ioss/src/adios/AdiosWrapper.C b/packages/seacas/libraries/ioss/src/adios/AdiosWrapper.C index 6efb3353df..aace3d30a6 100644 --- a/packages/seacas/libraries/ioss/src/adios/AdiosWrapper.C +++ b/packages/seacas/libraries/ioss/src/adios/AdiosWrapper.C @@ -13,13 +13,12 @@ namespace Ioad { AdiosWrapper::AdiosWrapper(Ioss_MPI_Comm comm, const std::string &filename, bool is_input, unsigned long rank, const Ioss::PropertyManager &properties) #if ADIOS2_USE_MPI - : adios2::ADIOS(comm), + : adios2::ADIOS(comm), #else - : adios2::ADIOS(), + : adios2::ADIOS(), #endif - adios2::IO(IOInit(properties, is_input)), - adios2::Engine(EngineInit(filename, is_input)), m_Rank(rank), m_Communicator(comm), - m_OpenStep(false) + adios2::IO(IOInit(properties, is_input)), adios2::Engine(EngineInit(filename, is_input)), + m_Rank(rank), m_Communicator(comm), m_OpenStep(false) { } diff --git a/packages/seacas/libraries/ioss/src/adios/Ioad_DatabaseIO.h b/packages/seacas/libraries/ioss/src/adios/Ioad_DatabaseIO.h index 54b0caef8b..b21c65d3b3 100644 --- a/packages/seacas/libraries/ioss/src/adios/Ioad_DatabaseIO.h +++ b/packages/seacas/libraries/ioss/src/adios/Ioad_DatabaseIO.h @@ -60,18 +60,19 @@ namespace Ioad { size_t data_size) const override; int64_t get_field_internal(const Ioss::ElementBlock *eb, const Ioss::Field &field, void *data, size_t data_size) const override; - int64_t get_field_internal(const Ioss::StructuredBlock */* sb */, const Ioss::Field &/*field*/, - void */*data*/, size_t /*data_size*/) const override + int64_t get_field_internal(const Ioss::StructuredBlock * /* sb */, + const Ioss::Field & /*field*/, void * /*data*/, + size_t /*data_size*/) const override { return -1; } - int64_t get_field_internal(const Ioss::Assembly */*sb*/, const Ioss::Field &/*field*/, void */*data*/, - size_t /*data_size*/) const override + int64_t get_field_internal(const Ioss::Assembly * /*sb*/, const Ioss::Field & /*field*/, + void * /*data*/, size_t /*data_size*/) const override { return -1; } - int64_t get_field_internal(const Ioss::Blob */*sb*/, const Ioss::Field &/*field*/, void */*data*/, - size_t /*data_size*/) const override + int64_t get_field_internal(const Ioss::Blob * /*sb*/, const Ioss::Field & /*field*/, + void * /*data*/, size_t /*data_size*/) const override { return -1; } @@ -120,18 +121,18 @@ namespace Ioad { size_t data_size) const override; int64_t put_field_internal(const Ioss::CommSet *cs, const Ioss::Field &field, void *data, size_t data_size) const override; - int64_t put_field_internal(const Ioss::StructuredBlock */*sb*/, const Ioss::Field &/*field*/, - void */*data*/, size_t /*data_size*/) const override + int64_t put_field_internal(const Ioss::StructuredBlock * /*sb*/, const Ioss::Field & /*field*/, + void * /*data*/, size_t /*data_size*/) const override { return -1; } - int64_t put_field_internal(const Ioss::Assembly */*sb*/, const Ioss::Field &/*field*/, void */*data*/, - size_t /*data_size*/) const override + int64_t put_field_internal(const Ioss::Assembly * /*sb*/, const Ioss::Field & /*field*/, + void * /*data*/, size_t /*data_size*/) const override { return -1; } - int64_t put_field_internal(const Ioss::Blob */*sb*/, const Ioss::Field &/*field*/, void */*data*/, - size_t /*data_size*/) const override + int64_t put_field_internal(const Ioss::Blob * /*sb*/, const Ioss::Field & /*field*/, + void * /*data*/, size_t /*data_size*/) const override { return -1; } diff --git a/packages/seacas/libraries/ioss/src/catalyst/Iocatalyst_CatalystLogging.C b/packages/seacas/libraries/ioss/src/catalyst/Iocatalyst_CatalystLogging.C index a426ffe4c8..f2dd02216f 100644 --- a/packages/seacas/libraries/ioss/src/catalyst/Iocatalyst_CatalystLogging.C +++ b/packages/seacas/libraries/ioss/src/catalyst/Iocatalyst_CatalystLogging.C @@ -5,10 +5,10 @@ // See packages/seacas/LICENSE for details #include +#include #include #include #include -#include namespace Iocatalyst { diff --git a/packages/seacas/libraries/ioss/src/catalyst/Iocatalyst_DatabaseIO.h b/packages/seacas/libraries/ioss/src/catalyst/Iocatalyst_DatabaseIO.h index 743cebe6fa..d28f6a7d80 100644 --- a/packages/seacas/libraries/ioss/src/catalyst/Iocatalyst_DatabaseIO.h +++ b/packages/seacas/libraries/ioss/src/catalyst/Iocatalyst_DatabaseIO.h @@ -14,8 +14,8 @@ #include "Ioss_Region.h" // for Region, SideSetContainer, etc #include "Ioss_SideSet.h" // for SideBlockContainer, SideSet -#include "Ioss_Field.h" // for Field, etc #include "Iocatalyst_CatalystManager.h" +#include "Ioss_Field.h" // for Field, etc #include // for std::unique_ptr @@ -135,8 +135,8 @@ namespace Iocatalyst { size_t data_size) const override; int64_t get_field_internal(const Ioss::Assembly *as, const Ioss::Field &field, void *data, size_t data_size) const override; - int64_t get_field_internal(const Ioss::Blob *bl, const Ioss::Field &field, - void *data, size_t data_size) const override; + int64_t get_field_internal(const Ioss::Blob *bl, const Ioss::Field &field, void *data, + size_t data_size) const override; int64_t get_field_internal(const Ioss::StructuredBlock *sb, const Ioss::Field &field, void *data, size_t data_size) const override; @@ -144,30 +144,30 @@ namespace Iocatalyst { size_t *data_size) const override; int64_t get_zc_field_internal(const Ioss::NodeBlock *nb, const Ioss::Field &field, void **data, size_t *data_size) const override; - int64_t get_zc_field_internal(const Ioss::EdgeBlock *eb, const Ioss::Field &field, - void **data, size_t *data_size) const override; - int64_t get_zc_field_internal(const Ioss::FaceBlock *fb, const Ioss::Field &field, - void **data, size_t *data_size) const override; + int64_t get_zc_field_internal(const Ioss::EdgeBlock *eb, const Ioss::Field &field, void **data, + size_t *data_size) const override; + int64_t get_zc_field_internal(const Ioss::FaceBlock *fb, const Ioss::Field &field, void **data, + size_t *data_size) const override; int64_t get_zc_field_internal(const Ioss::ElementBlock *eb, const Ioss::Field &field, void **data, size_t *data_size) const override; - int64_t get_zc_field_internal(const Ioss::SideBlock *sb, const Ioss::Field &field, - void **data, size_t *data_size) const override; - int64_t get_zc_field_internal(const Ioss::NodeSet *ns, const Ioss::Field &field, - void **data, size_t *data_size) const override; - int64_t get_zc_field_internal(const Ioss::EdgeSet *es, const Ioss::Field &field, - void **data, size_t *data_size) const override; - int64_t get_zc_field_internal(const Ioss::FaceSet *fs, const Ioss::Field &field, - void **data, size_t *data_size) const override; - int64_t get_zc_field_internal(const Ioss::ElementSet *es, const Ioss::Field &field, - void **data, size_t *data_size) const override; - int64_t get_zc_field_internal(const Ioss::SideSet *ss, const Ioss::Field &field, - void **data, size_t *data_size) const override; - int64_t get_zc_field_internal(const Ioss::CommSet *cs, const Ioss::Field &field, - void **data, size_t *data_size) const override; - int64_t get_zc_field_internal(const Ioss::Assembly *as, const Ioss::Field &field, - void **data, size_t *data_size) const override; - int64_t get_zc_field_internal(const Ioss::Blob *bl, const Ioss::Field &field, - void **data, size_t *data_size) const override; + int64_t get_zc_field_internal(const Ioss::SideBlock *sb, const Ioss::Field &field, void **data, + size_t *data_size) const override; + int64_t get_zc_field_internal(const Ioss::NodeSet *ns, const Ioss::Field &field, void **data, + size_t *data_size) const override; + int64_t get_zc_field_internal(const Ioss::EdgeSet *es, const Ioss::Field &field, void **data, + size_t *data_size) const override; + int64_t get_zc_field_internal(const Ioss::FaceSet *fs, const Ioss::Field &field, void **data, + size_t *data_size) const override; + int64_t get_zc_field_internal(const Ioss::ElementSet *es, const Ioss::Field &field, void **data, + size_t *data_size) const override; + int64_t get_zc_field_internal(const Ioss::SideSet *ss, const Ioss::Field &field, void **data, + size_t *data_size) const override; + int64_t get_zc_field_internal(const Ioss::CommSet *cs, const Ioss::Field &field, void **data, + size_t *data_size) const override; + int64_t get_zc_field_internal(const Ioss::Assembly *as, const Ioss::Field &field, void **data, + size_t *data_size) const override; + int64_t get_zc_field_internal(const Ioss::Blob *bl, const Ioss::Field &field, void **data, + size_t *data_size) const override; int64_t get_zc_field_internal(const Ioss::StructuredBlock *sb, const Ioss::Field &field, void **data, size_t *data_size) const override; @@ -197,8 +197,8 @@ namespace Iocatalyst { size_t data_size) const override; int64_t put_field_internal(const Ioss::Assembly *as, const Ioss::Field &field, void *data, size_t data_size) const override; - int64_t put_field_internal(const Ioss::Blob *bl, const Ioss::Field &field, - void *data, size_t data_size) const override; + int64_t put_field_internal(const Ioss::Blob *bl, const Ioss::Field &field, void *data, + size_t data_size) const override; int64_t put_field_internal(const Ioss::StructuredBlock *sb, const Ioss::Field &field, void *data, size_t data_size) const override; diff --git a/packages/seacas/libraries/ioss/src/cgns/Iocgns_Utils.C b/packages/seacas/libraries/ioss/src/cgns/Iocgns_Utils.C index 84bbd54c37..92ce1d1a6b 100644 --- a/packages/seacas/libraries/ioss/src/cgns/Iocgns_Utils.C +++ b/packages/seacas/libraries/ioss/src/cgns/Iocgns_Utils.C @@ -1938,7 +1938,7 @@ Iocgns::Utils::resolve_processor_shared_nodes(Ioss::Region ®ion, int my_proce std::vector>> shared_nodes(blocks.size() + 1); for (auto &owner_block : blocks) { - int owner_zone = owner_block->get_property("zone").get_int(); + int owner_zone = owner_block->get_property("zone").get_int(); for (const auto &zgc : owner_block->m_zoneConnectivity) { assert(zgc.m_donorProcessor >= 0); assert(zgc.m_ownerProcessor >= 0); @@ -1950,10 +1950,10 @@ Iocgns::Utils::resolve_processor_shared_nodes(Ioss::Region ®ion, int my_proce // don't store or access any "bulk" data on it. auto donor_block = region.get_structured_block(zgc.m_donorName); assert(donor_block != nullptr); - int donor_zone = donor_block->get_property("zone").get_int(); - std::vector i_range = zgc.get_range(1); - std::vector j_range = zgc.get_range(2); - std::vector k_range = zgc.get_range(3); + int donor_zone = donor_block->get_property("zone").get_int(); + std::vector i_range = zgc.get_range(1); + std::vector j_range = zgc.get_range(2); + std::vector k_range = zgc.get_range(3); for (auto &k : k_range) { for (auto &j : j_range) { for (auto &i : i_range) { diff --git a/packages/seacas/libraries/ioss/src/cgns/Iocgns_Utils.h b/packages/seacas/libraries/ioss/src/cgns/Iocgns_Utils.h index 874f0dd79f..e8438dc1ec 100644 --- a/packages/seacas/libraries/ioss/src/cgns/Iocgns_Utils.h +++ b/packages/seacas/libraries/ioss/src/cgns/Iocgns_Utils.h @@ -301,7 +301,7 @@ namespace Iocgns { static void write_state_meta_data(int file_ptr, const Ioss::Region ®ion, bool is_parallel_io); static size_t common_write_metadata(int file_ptr, const Ioss::Region ®ion, - std::vector &zone_offset, bool is_parallel); + std::vector &zone_offset, bool is_parallel); static size_t resolve_nodes(Ioss::Region ®ion, int my_processor, bool is_parallel); IOSS_NODISCARD static std::vector>> resolve_processor_shared_nodes(Ioss::Region ®ion, int my_processor); diff --git a/packages/seacas/libraries/ioss/src/exodus/Ioex_BaseDatabaseIO.C b/packages/seacas/libraries/ioss/src/exodus/Ioex_BaseDatabaseIO.C index 5cb2caec64..d0a36c9fe0 100644 --- a/packages/seacas/libraries/ioss/src/exodus/Ioex_BaseDatabaseIO.C +++ b/packages/seacas/libraries/ioss/src/exodus/Ioex_BaseDatabaseIO.C @@ -4,6 +4,7 @@ // // See packages/seacas/LICENSE for details +#include #include #include #include @@ -18,7 +19,6 @@ #include #include #include -#include #include "Ioex_Utils.h" #include "Ioss_Assembly.h" @@ -88,7 +88,7 @@ namespace { template void write_attribute_names(int exoid, ex_entity_type type, const std::vector &entities); - void query_groups(int exoid, Ioss::NameList& names, bool return_full_names); + void query_groups(int exoid, Ioss::NameList &names, bool return_full_names); class AssemblyTreeFilter { @@ -508,23 +508,23 @@ namespace Ioex { bool success = false; Ioss::SerializeIO serializeIO_(this); - int exoid = get_file_pointer(); + int exoid = get_file_pointer(); - int group_name_length = ex_inquire_int(exoid, EX_INQ_GROUP_NAME_LEN); - std::vector group_name(group_name_length+1, '\0'); + int group_name_length = ex_inquire_int(exoid, EX_INQ_GROUP_NAME_LEN); + std::vector group_name(group_name_length + 1, '\0'); // Get name of this group... int idum; float rdum; - int ierr = ex_inquire(exoid, EX_INQ_GROUP_NAME, &idum, &rdum, group_name.data()); + int ierr = ex_inquire(exoid, EX_INQ_GROUP_NAME, &idum, &rdum, group_name.data()); if (ierr < 0) { std::ostringstream errmsg; - fmt::print(errmsg, "ERROR: Could not open root group of group named '{}' in file '{}'.\n", m_groupName, - get_filename()); + fmt::print(errmsg, "ERROR: Could not open root group of group named '{}' in file '{}'.\n", + m_groupName, get_filename()); IOSS_ERROR(errmsg); } - m_groupName = std::string(group_name.data()); + m_groupName = std::string(group_name.data()); m_exodusFilePtr = ex_inquire_int(exoid, EX_INQ_GROUP_ROOT); if (m_exodusFilePtr < 0) { @@ -543,7 +543,7 @@ namespace Ioex { bool success = false; Ioss::SerializeIO serializeIO_(this); - int exoid = get_file_pointer(); + int exoid = get_file_pointer(); m_groupName = group_name; ex_get_group_id(exoid, m_groupName.c_str(), &m_exodusFilePtr); @@ -564,7 +564,7 @@ namespace Ioex { if (!is_input()) { // Get existing file pointer... Ioss::SerializeIO serializeIO_(this); - int exoid = get_file_pointer(); + int exoid = get_file_pointer(); // Check name for '/' which is not allowed since it is the // separator character in a full group path @@ -3221,13 +3221,12 @@ namespace Ioex { write_coordinate_frames(get_file_pointer(), get_region()->get_coordinate_frames()); } - Ioss::NameList BaseDatabaseIO::groups_describe_nl(bool return_full_names) { Ioss::SerializeIO serializeIO_(this); Ioss::NameList names; - int group_root = ex_inquire_int(get_file_pointer(), EX_INQ_GROUP_ROOT); + int group_root = ex_inquire_int(get_file_pointer(), EX_INQ_GROUP_ROOT); query_groups(group_root, names, return_full_names); return names; @@ -3259,21 +3258,24 @@ namespace Ioex { int BaseDatabaseIO::num_child_group_nl() { Ioss::SerializeIO serializeIO_(this); - int exoid = get_file_pointer(); - exoid = ex_inquire_int(exoid, EX_INQ_GROUP_ROOT); - int num_children = ex_inquire_int(exoid, EX_INQ_NUM_CHILD_GROUPS); + int exoid = get_file_pointer(); + exoid = ex_inquire_int(exoid, EX_INQ_GROUP_ROOT); + int num_children = ex_inquire_int(exoid, EX_INQ_NUM_CHILD_GROUPS); return num_children; } bool BaseDatabaseIO::open_child_group_nl(int index) { - if(index < 0) return false; + if (index < 0) + return false; Ioss::SerializeIO serializeIO_(this); - int exoid = get_file_pointer(); - int num_children = ex_inquire_int(exoid, EX_INQ_NUM_CHILD_GROUPS); - if(num_children == 0) return true; + int exoid = get_file_pointer(); + int num_children = ex_inquire_int(exoid, EX_INQ_NUM_CHILD_GROUPS); + if (num_children == 0) + return true; - if(index >= num_children) return false; + if (index >= num_children) + return false; std::vector children(num_children); @@ -3284,8 +3286,8 @@ namespace Ioex { exoid = children[index]; - int group_name_length = ex_inquire_int(exoid, EX_INQ_GROUP_NAME_LEN); - std::vector group_name(group_name_length+1, '\0'); + int group_name_length = ex_inquire_int(exoid, EX_INQ_GROUP_NAME_LEN); + std::vector group_name(group_name_length + 1, '\0'); // Get name of this group... int idum; @@ -3296,7 +3298,7 @@ namespace Ioex { } m_exodusFilePtr = exoid; - m_groupName = std::string(group_name.data()); + m_groupName = std::string(group_name.data()); return true; } @@ -3603,13 +3605,13 @@ namespace { #endif } - void query_groups(int exoid, Ioss::NameList& names, bool return_full_names) + void query_groups(int exoid, Ioss::NameList &names, bool return_full_names) { int idum; float rdum; - int group_name_length = ex_inquire_int(exoid, EX_INQ_GROUP_NAME_LEN); - std::vector group_name(group_name_length+1, '\0'); + int group_name_length = ex_inquire_int(exoid, EX_INQ_GROUP_NAME_LEN); + std::vector group_name(group_name_length + 1, '\0'); // Get name of this group... int ierr = ex_inquire(exoid, EX_INQ_GROUP_NAME, &idum, &rdum, group_name.data()); @@ -3617,14 +3619,15 @@ namespace { Ioex::exodus_error(exoid, __LINE__, __func__, __FILE__); } - if(return_full_names) { + if (return_full_names) { std::fill(group_name.begin(), group_name.end(), '\0'); ierr = ex_inquire(exoid, EX_INQ_FULL_GROUP_NAME, &idum, &rdum, group_name.data()); if (ierr < 0) { Ioex::exodus_error(exoid, __LINE__, __func__, __FILE__); } names.push_back(std::string(group_name.data())); - } else { + } + else { names.push_back(std::string(group_name.data())); } diff --git a/packages/seacas/libraries/ioss/src/exodus/Ioex_BaseDatabaseIO.h b/packages/seacas/libraries/ioss/src/exodus/Ioex_BaseDatabaseIO.h index b6f18fc756..3f69655746 100644 --- a/packages/seacas/libraries/ioss/src/exodus/Ioex_BaseDatabaseIO.h +++ b/packages/seacas/libraries/ioss/src/exodus/Ioex_BaseDatabaseIO.h @@ -101,11 +101,11 @@ namespace Ioex { void release_memory_nl() override; - int num_child_group_nl() override; - bool open_child_group_nl(int index) override; - bool open_root_group_nl() override; - bool open_group_nl(const std::string &group_name) override; - bool create_subgroup_nl(const std::string &group_name) override; + int num_child_group_nl() override; + bool open_child_group_nl(int index) override; + bool open_root_group_nl() override; + bool open_group_nl(const std::string &group_name) override; + bool create_subgroup_nl(const std::string &group_name) override; Ioss::NameList groups_describe_nl(bool return_full_names) override; bool begin_nl(Ioss::State state) override; diff --git a/packages/seacas/libraries/ioss/src/exodus/Ioex_DatabaseIO.C b/packages/seacas/libraries/ioss/src/exodus/Ioex_DatabaseIO.C index d84be09252..66658f7374 100644 --- a/packages/seacas/libraries/ioss/src/exodus/Ioex_DatabaseIO.C +++ b/packages/seacas/libraries/ioss/src/exodus/Ioex_DatabaseIO.C @@ -643,8 +643,8 @@ namespace Ioex { void DatabaseIO::get_step_times_nl() { - bool exists = false; - double last_time = DBL_MAX; + bool exists = false; + double last_time = DBL_MAX; std::vector tsteps(0); if (dbUsage == Ioss::WRITE_HISTORY) { @@ -685,60 +685,62 @@ namespace Ioex { Ioss::SerializeIO serializeIO_(this); m_timestepCount = ex_inquire_int(get_file_pointer(), EX_INQ_TIME); } - // Need to sync timestep count across ranks if parallel... - if (isParallel) { - auto min_timestep_count = util().global_minmax(m_timestepCount, Ioss::ParallelUtils::DO_MIN); - if (min_timestep_count == 0) { - auto max_timestep_count = util().global_minmax(m_timestepCount, Ioss::ParallelUtils::DO_MAX); - if (max_timestep_count != 0) { - if (myProcessor == 0) { - // NOTE: Don't want to warn on all processors if the - // timestep count is zero on some, but not all ranks. - fmt::print(Ioss::WarnOut(), - "At least one database has no timesteps. No times will be read on ANY" - " database for consistency.\n"); - } - } - } - m_timestepCount = min_timestep_count; - } - - if (m_timestepCount <= 0) { - return; + // Need to sync timestep count across ranks if parallel... + if (isParallel) { + auto min_timestep_count = + util().global_minmax(m_timestepCount, Ioss::ParallelUtils::DO_MIN); + if (min_timestep_count == 0) { + auto max_timestep_count = + util().global_minmax(m_timestepCount, Ioss::ParallelUtils::DO_MAX); + if (max_timestep_count != 0) { + if (myProcessor == 0) { + // NOTE: Don't want to warn on all processors if the + // timestep count is zero on some, but not all ranks. + fmt::print(Ioss::WarnOut(), + "At least one database has no timesteps. No times will be read on ANY" + " database for consistency.\n"); + } + } } + m_timestepCount = min_timestep_count; + } - // For an exodus file, timesteps are global and are stored in the region. - // Read the timesteps and add to the region - tsteps.resize(m_timestepCount, -std::numeric_limits::max()); - - // The `EXODUS_CALL_GET_ALL_TIMES=NO` is typically only used in - // isSerialParallel mode and the client is responsible for - // making sure that the step times are handled correctly. All - // databases will know about the number of timesteps, but if - // this is skipped, then the times will all be zero. Use case - // is that in isSerialParallel, each call to - // `ex_get_all_times` for all files is performed sequentially, - // so if you have hundreds to thousands of files, the time for - // the call is additive and since timesteps are record - // variables in netCDF, accessing the data for all timesteps - // involves lseeks throughout the file. - bool call_ex_get_all_times = true; - Ioss::Utils::check_set_bool_property(properties, "EXODUS_CALL_GET_ALL_TIMES", - call_ex_get_all_times); - if (call_ex_get_all_times) { - Ioss::SerializeIO serializeIO_(this); - int error = ex_get_all_times(get_file_pointer(), Data(tsteps)); - if (error < 0) { - Ioex::exodus_error(get_file_pointer(), __LINE__, __func__, __FILE__); - } + if (m_timestepCount <= 0) { + return; + } + + // For an exodus file, timesteps are global and are stored in the region. + // Read the timesteps and add to the region + tsteps.resize(m_timestepCount, -std::numeric_limits::max()); + + // The `EXODUS_CALL_GET_ALL_TIMES=NO` is typically only used in + // isSerialParallel mode and the client is responsible for + // making sure that the step times are handled correctly. All + // databases will know about the number of timesteps, but if + // this is skipped, then the times will all be zero. Use case + // is that in isSerialParallel, each call to + // `ex_get_all_times` for all files is performed sequentially, + // so if you have hundreds to thousands of files, the time for + // the call is additive and since timesteps are record + // variables in netCDF, accessing the data for all timesteps + // involves lseeks throughout the file. + bool call_ex_get_all_times = true; + Ioss::Utils::check_set_bool_property(properties, "EXODUS_CALL_GET_ALL_TIMES", + call_ex_get_all_times); + if (call_ex_get_all_times) { + Ioss::SerializeIO serializeIO_(this); + int error = ex_get_all_times(get_file_pointer(), Data(tsteps)); + if (error < 0) { + Ioex::exodus_error(get_file_pointer(), __LINE__, __func__, __FILE__); } + } - // See if the "last_written_time" attribute exists and if it - // does, check that it matches the largest time in 'tsteps'. - { - Ioss::SerializeIO serializeIO_(this); - exists = Ioex::read_last_time_attribute(get_file_pointer(), &last_time); - } + // See if the "last_written_time" attribute exists and if it + // does, check that it matches the largest time in 'tsteps'. + { + Ioss::SerializeIO serializeIO_(this); + exists = Ioex::read_last_time_attribute(get_file_pointer(), &last_time); + } if (exists && isParallel) { // Assume that if it exists on 1 processor, it exists on diff --git a/packages/seacas/libraries/ioss/src/exodus/Ioex_DecompositionData.C b/packages/seacas/libraries/ioss/src/exodus/Ioex_DecompositionData.C index ee3fc6c16d..f2d09484a5 100644 --- a/packages/seacas/libraries/ioss/src/exodus/Ioex_DecompositionData.C +++ b/packages/seacas/libraries/ioss/src/exodus/Ioex_DecompositionData.C @@ -291,14 +291,15 @@ namespace Ioex { "exodus", filename, Ioss::READ_RESTART, Ioss::ParallelUtils::comm_self(), properties); Ioss::Region region(dbi, "line_decomp_region"); - Ioss::DecompUtils::line_decompose( - region, m_processorCount, m_decomposition.m_method, m_decomposition.m_decompExtra, - element_to_proc_global, INT(0)); - - if (m_decomposition.m_showHWM || m_decomposition.m_showProgress) { - auto work_per_rank = Ioss::DecompUtils::get_work_per_rank(element_to_proc_global, m_processorCount); - Ioss::DecompUtils::output_decomposition_statistics(work_per_rank); - } + Ioss::DecompUtils::line_decompose(region, m_processorCount, m_decomposition.m_method, + m_decomposition.m_decompExtra, element_to_proc_global, + INT(0)); + + if (m_decomposition.m_showHWM || m_decomposition.m_showProgress) { + auto work_per_rank = + Ioss::DecompUtils::get_work_per_rank(element_to_proc_global, m_processorCount); + Ioss::DecompUtils::output_decomposition_statistics(work_per_rank); + } } // Now broadcast the parts of the `element_to_proc_global` // vector to the owning ranks in the initial linear @@ -328,7 +329,7 @@ namespace Ioex { } if (m_decomposition.m_lineDecomp) { - // Do not combine into previous if block since we want to release memory for + // Do not combine into previous if block since we want to release memory for // the local vectors in that block before allocating the large adjacency vector. generate_adjacency_list(filePtr, m_decomposition); } diff --git a/packages/seacas/libraries/ioss/src/exodus/Ioex_ParallelDatabaseIO.C b/packages/seacas/libraries/ioss/src/exodus/Ioex_ParallelDatabaseIO.C index 8ce9dd59b7..3467a1c25a 100644 --- a/packages/seacas/libraries/ioss/src/exodus/Ioex_ParallelDatabaseIO.C +++ b/packages/seacas/libraries/ioss/src/exodus/Ioex_ParallelDatabaseIO.C @@ -920,7 +920,7 @@ namespace Ioex { std::vector tsteps(0); { - timestep_count = ex_inquire_int(get_file_pointer(), EX_INQ_TIME); + timestep_count = ex_inquire_int(get_file_pointer(), EX_INQ_TIME); m_timestepCount = timestep_count; if (timestep_count <= 0) { return; @@ -4809,7 +4809,7 @@ namespace Ioex { void ParallelDatabaseIO::output_processor_id_map(Ioss::Region *region, INT /*dummy*/) { std::vector proc_id(elementCount, myProcessor); - const auto &blocks = region->get_element_blocks(); + const auto &blocks = region->get_element_blocks(); for (const auto &block : blocks) { put_field_internal(block, block->get_field("proc_id"), Data(proc_id), -1); } @@ -4870,10 +4870,10 @@ namespace Ioex { add_processor_id_map(region); output_other_metadata(); if (int_byte_size_api() == 8) { - output_processor_id_map(region, int64_t(0)); + output_processor_id_map(region, int64_t(0)); } else { - output_processor_id_map(region, int(0)); + output_processor_id_map(region, int(0)); } } } diff --git a/packages/seacas/libraries/ioss/src/exodus/Ioex_ParallelDatabaseIO.h b/packages/seacas/libraries/ioss/src/exodus/Ioex_ParallelDatabaseIO.h index 2ab679de54..4bf66f5895 100644 --- a/packages/seacas/libraries/ioss/src/exodus/Ioex_ParallelDatabaseIO.h +++ b/packages/seacas/libraries/ioss/src/exodus/Ioex_ParallelDatabaseIO.h @@ -197,8 +197,7 @@ namespace Ioex { void write_entity_transient_field(const Ioss::Field &field, const Ioss::GroupingEntity *ge, int64_t count, void *variables) const; void write_meta_data(Ioss::IfDatabaseExistsBehavior behavior) override; - template - void output_processor_id_map(Ioss::Region *region, INT /*dummy*/); + template void output_processor_id_map(Ioss::Region *region, INT /*dummy*/); // Read related metadata and store it in the region... void read_region(); diff --git a/packages/seacas/libraries/ioss/src/exodus/Ioex_Utils.C b/packages/seacas/libraries/ioss/src/exodus/Ioex_Utils.C index 12ac92d9e6..8cf3cf2b3f 100644 --- a/packages/seacas/libraries/ioss/src/exodus/Ioex_Utils.C +++ b/packages/seacas/libraries/ioss/src/exodus/Ioex_Utils.C @@ -16,8 +16,8 @@ #include #include #include -#include #include +#include #include "Ioss_BasisVariableType.h" #include "Ioss_CoordinateFrame.h" diff --git a/packages/seacas/libraries/ioss/src/text_mesh/Iotm_TextMeshAdjacencyGraph.h b/packages/seacas/libraries/ioss/src/text_mesh/Iotm_TextMeshAdjacencyGraph.h index 253d9f47ac..093cea5f08 100644 --- a/packages/seacas/libraries/ioss/src/text_mesh/Iotm_TextMeshAdjacencyGraph.h +++ b/packages/seacas/libraries/ioss/src/text_mesh/Iotm_TextMeshAdjacencyGraph.h @@ -94,7 +94,7 @@ namespace Iotm { struct FaceConnection { FaceConnection() - : thisSide(INVALID_SIDE), thatElement(INVALID_INDEX), thatSide(INVALID_SIDE){}; + : thisSide(INVALID_SIDE), thatElement(INVALID_INDEX), thatSide(INVALID_SIDE) {}; FaceConnection(int thisSide_, IndexType otherElement_, int otherSide_) : thisSide(thisSide_), thatElement(otherElement_), thatSide(otherSide_) diff --git a/packages/seacas/libraries/ioss/src/unit_tests/UnitTestDynamicTopology.C b/packages/seacas/libraries/ioss/src/unit_tests/UnitTestDynamicTopology.C index 7d18b1e14b..6177c0c9a4 100644 --- a/packages/seacas/libraries/ioss/src/unit_tests/UnitTestDynamicTopology.C +++ b/packages/seacas/libraries/ioss/src/unit_tests/UnitTestDynamicTopology.C @@ -21,10 +21,10 @@ #include // for unlink #include "Ionit_Initializer.h" -#include "Ioss_DatabaseIO.h" // for DatabaseIO #include "Ioss_DBUsage.h" +#include "Ioss_DatabaseIO.h" // for DatabaseIO #include "Ioss_ElementBlock.h" -#include "Ioss_Field.h" // for Field, etc +#include "Ioss_Field.h" // for Field, etc #include "Ioss_FileInfo.h" #include "Ioss_IOFactory.h" #include "Ioss_NodeBlock.h" @@ -36,1076 +36,1063 @@ #include "exodus/Ioex_DatabaseIO.h" namespace { -std::string get_many_block_mesh_desc(unsigned numBlocks) -{ - std::ostringstream oss; - std::vector elementIds(numBlocks); - std::iota(elementIds.begin(), elementIds.end(), 1); - - unsigned proc = 0; - for (unsigned i = 0; i < numBlocks; ++i) { - unsigned elemId = elementIds[i]; - unsigned firstNodeId = i * 4 + 1; - oss << proc << "," << elemId << ",HEX_8,"; - for (unsigned node = firstNodeId; node < firstNodeId + 8; ++node) { - oss << node << ","; - } - unsigned blockId = i + 1; - oss << "block_" << blockId; + std::string get_many_block_mesh_desc(unsigned numBlocks) + { + std::ostringstream oss; + std::vector elementIds(numBlocks); + std::iota(elementIds.begin(), elementIds.end(), 1); + + unsigned proc = 0; + for (unsigned i = 0; i < numBlocks; ++i) { + unsigned elemId = elementIds[i]; + unsigned firstNodeId = i * 4 + 1; + oss << proc << "," << elemId << ",HEX_8,"; + for (unsigned node = firstNodeId; node < firstNodeId + 8; ++node) { + oss << node << ","; + } + unsigned blockId = i + 1; + oss << "block_" << blockId; + + if (i < numBlocks - 1) { + oss << "\n"; + } - if (i < numBlocks - 1) { - oss << "\n"; + proc++; } - proc++; - } + oss << "|coordinates:"; - oss << "|coordinates:"; + std::vector planeCoords = {0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0}; - std::vector planeCoords = {0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0}; + for (double coord : planeCoords) { + oss << coord << ","; + } - for (double coord : planeCoords) { - oss << coord << ","; - } + for (unsigned i = 1; i <= numBlocks; ++i) { + for (unsigned point = 0; point < 4; ++point) { + planeCoords[3 * point + 2] += 1; + } - for (unsigned i = 1; i <= numBlocks; ++i) { - for (unsigned point = 0; point < 4; ++point) { - planeCoords[3 * point + 2] += 1; + for (double coord : planeCoords) { + oss << coord << ","; + } } - for (double coord : planeCoords) { - oss << coord << ","; - } + return oss.str(); } - return oss.str(); -} + void define_model(const Ioss::Region &i_region, Ioss::Region &o_region) + { + Ioss::DatabaseIO *o_database = o_region.get_database(); -void define_model(const Ioss::Region &i_region, Ioss::Region &o_region) -{ - Ioss::DatabaseIO *o_database = o_region.get_database(); + o_region.begin_mode(Ioss::STATE_DEFINE_MODEL); - o_region.begin_mode(Ioss::STATE_DEFINE_MODEL); + auto &nodeblocks = o_region.get_node_blocks(); - auto& nodeblocks = o_region.get_node_blocks(); + Ioss::NodeBlock *i_nb = i_region.get_node_blocks()[0]; + int64_t spatial_dim = 3; + int64_t num_nodes = i_nb->entity_count(); + Ioss::NodeBlock *o_nb = new Ioss::NodeBlock(o_database, "nodeblock_1", num_nodes, spatial_dim); + o_region.add(o_nb); - Ioss::NodeBlock *i_nb = i_region.get_node_blocks()[0]; - int64_t spatial_dim = 3; - int64_t num_nodes = i_nb->entity_count(); - Ioss::NodeBlock *o_nb = new Ioss::NodeBlock(o_database, "nodeblock_1", num_nodes, spatial_dim); - o_region.add(o_nb); + for (Ioss::ElementBlock *i_eb : i_region.get_element_blocks()) { + Ioss::ElementBlock *o_eb = new Ioss::ElementBlock( + o_database, i_eb->name(), i_eb->topology()->name(), i_eb->entity_count()); + o_eb->property_add(i_eb->get_property("id")); + o_region.add(o_eb); + } - for (Ioss::ElementBlock *i_eb : i_region.get_element_blocks()) { - Ioss::ElementBlock *o_eb = new Ioss::ElementBlock( - o_database, i_eb->name(), i_eb->topology()->name(), i_eb->entity_count()); - o_eb->property_add(i_eb->get_property("id")); - o_region.add(o_eb); + o_region.end_mode(Ioss::STATE_DEFINE_MODEL); } - o_region.end_mode(Ioss::STATE_DEFINE_MODEL); -} + void write_model(const Ioss::Region &i_region, Ioss::Region &o_region) + { + Ioss::NodeBlock *i_nb = i_region.get_node_blocks()[0]; + Ioss::NodeBlock *o_nb = o_region.get_node_blocks()[0]; -void write_model(const Ioss::Region &i_region, Ioss::Region &o_region) -{ - Ioss::NodeBlock *i_nb = i_region.get_node_blocks()[0]; - Ioss::NodeBlock *o_nb = o_region.get_node_blocks()[0]; + o_region.begin_mode(Ioss::STATE_MODEL); + std::vector coordinates; + std::vector node_ids; + i_nb->get_field_data("ids", node_ids); + i_nb->get_field_data("mesh_model_coordinates", coordinates); - o_region.begin_mode(Ioss::STATE_MODEL); - std::vector coordinates; - std::vector node_ids; - i_nb->get_field_data("ids", node_ids); - i_nb->get_field_data("mesh_model_coordinates", coordinates); + o_nb->put_field_data("ids", node_ids); + o_nb->put_field_data("mesh_model_coordinates", coordinates); - o_nb->put_field_data("ids", node_ids); - o_nb->put_field_data("mesh_model_coordinates", coordinates); + for (Ioss::ElementBlock *i_eb : i_region.get_element_blocks()) { + Ioss::ElementBlock *o_eb = o_region.get_element_block(i_eb->name()); + std::vector elem_ids; + std::vector connectivity; - for (Ioss::ElementBlock *i_eb : i_region.get_element_blocks()) { - Ioss::ElementBlock *o_eb = o_region.get_element_block(i_eb->name()); - std::vector elem_ids; - std::vector connectivity; + i_eb->get_field_data("ids", elem_ids); + i_eb->get_field_data("connectivity", connectivity); - i_eb->get_field_data("ids", elem_ids); - i_eb->get_field_data("connectivity", connectivity); + o_eb->put_field_data("ids", elem_ids); + o_eb->put_field_data("connectivity", connectivity); + } - o_eb->put_field_data("ids", elem_ids); - o_eb->put_field_data("connectivity", connectivity); + o_region.end_mode(Ioss::STATE_MODEL); } - o_region.end_mode(Ioss::STATE_MODEL); -} - -void define_transient(const Ioss::Region &i_region, Ioss::Region &o_region, - const std::string &elemFieldName) -{ - o_region.begin_mode(Ioss::STATE_DEFINE_TRANSIENT); + void define_transient(const Ioss::Region &i_region, Ioss::Region &o_region, + const std::string &elemFieldName) + { + o_region.begin_mode(Ioss::STATE_DEFINE_TRANSIENT); - for (Ioss::ElementBlock *o_eb : o_region.get_element_blocks()) { - size_t num_elem = o_eb->get_property("entity_count").get_int(); - std::string storage = "scalar"; + for (Ioss::ElementBlock *o_eb : o_region.get_element_blocks()) { + size_t num_elem = o_eb->get_property("entity_count").get_int(); + std::string storage = "scalar"; - Ioss::Field field(elemFieldName, Ioss::Field::REAL, storage, 1, Ioss::Field::Field::TRANSIENT, - num_elem); - o_eb->field_add(field); + Ioss::Field field(elemFieldName, Ioss::Field::REAL, storage, 1, Ioss::Field::Field::TRANSIENT, + num_elem); + o_eb->field_add(field); + } + o_region.end_mode(Ioss::STATE_DEFINE_TRANSIENT); } - o_region.end_mode(Ioss::STATE_DEFINE_TRANSIENT); -} -int write_transient(Ioss::Region &o_region, const std::string &elemFieldName, const double time) -{ - o_region.begin_mode(Ioss::STATE_TRANSIENT); - int step = o_region.add_state(time); - o_region.begin_state(step); + int write_transient(Ioss::Region &o_region, const std::string &elemFieldName, const double time) + { + o_region.begin_mode(Ioss::STATE_TRANSIENT); + int step = o_region.add_state(time); + o_region.begin_state(step); + + for (Ioss::ElementBlock *o_eb : o_region.get_element_blocks()) { + size_t num_elem = o_eb->get_property("entity_count").get_int(); - for (Ioss::ElementBlock *o_eb : o_region.get_element_blocks()) { - size_t num_elem = o_eb->get_property("entity_count").get_int(); + std::vector field_data(num_elem); + std::vector elem_ids; - std::vector field_data(num_elem); - std::vector elem_ids; + o_eb->get_field_data("ids", elem_ids); + for (size_t i = 0; i < elem_ids.size(); i++) { + field_data[i] = (double)elem_ids[i] + 100 * time; + } - o_eb->get_field_data("ids", elem_ids); - for (size_t i = 0; i < elem_ids.size(); i++) { - field_data[i] = (double)elem_ids[i] + 100*time; + o_eb->put_field_data(elemFieldName, field_data); } - o_eb->put_field_data(elemFieldName, field_data); + o_region.end_state(step); + o_region.end_mode(Ioss::STATE_TRANSIENT); + + return step; } - o_region.end_state(step); - o_region.end_mode(Ioss::STATE_TRANSIENT); + class Observer : public Ioss::DynamicTopologyObserver + { + public: + Observer(Ioss::Region &inputRegion_, const std::string &elemFieldName_, + const Ioss::FileControlOption fileControlOption_) + : Ioss::DynamicTopologyObserver(nullptr), inputRegion(inputRegion_), + elemFieldName(elemFieldName_), fileControlOption(fileControlOption_) + { + } - return step; -} + virtual ~Observer() {} -class Observer : public Ioss::DynamicTopologyObserver -{ -public: - Observer(Ioss::Region& inputRegion_, - const std::string &elemFieldName_, - const Ioss::FileControlOption fileControlOption_) - : Ioss::DynamicTopologyObserver(nullptr) - , inputRegion(inputRegion_) - , elemFieldName(elemFieldName_) - , fileControlOption(fileControlOption_) - {} + void define_model() override { ::define_model(inputRegion, *get_region()); } - virtual ~Observer() {} + void write_model() override { ::write_model(inputRegion, *get_region()); } - void define_model() override - { - ::define_model(inputRegion, *get_region()); - } + void define_transient() override + { + ::define_transient(inputRegion, *get_region(), elemFieldName); + } - void write_model() override - { - ::write_model(inputRegion, *get_region()); - } + Ioss::FileControlOption get_control_option() const override { return fileControlOption; } - void define_transient() override - { - ::define_transient(inputRegion, *get_region(), elemFieldName); - } + private: + Observer(); + + Ioss::Region &inputRegion; + const std::string elemFieldName; + Ioss::FileControlOption fileControlOption; + }; - Ioss::FileControlOption get_control_option() const override + struct OutputParams { - return fileControlOption; - } + OutputParams(const std::string &outFile_) : outFile(outFile_) {} -private: - Observer(); + OutputParams(const std::string &outFile_, const std::string &elemFieldName_) + : outFile(outFile_), elemFieldName(elemFieldName_) + { + } - Ioss::Region& inputRegion; - const std::string elemFieldName; - Ioss::FileControlOption fileControlOption; -}; + void set_data(const std::vector &output_times_, const std::vector &output_steps_, + const std::vector &modification_steps_) + { + ASSERT_EQ(output_times_.size(), output_steps_.size()); + ASSERT_EQ(output_times_.size(), modification_steps_.size()); -struct OutputParams { - OutputParams(const std::string& outFile_) - : outFile(outFile_) {} + size_t numSteps = output_times_.size(); + for (auto i = 1; i < numSteps; i++) { + // Monotone increasing + ASSERT_TRUE(output_times_[i] > output_times_[i - 1]); + } - OutputParams(const std::string& outFile_, const std::string& elemFieldName_) - : outFile(outFile_) - , elemFieldName(elemFieldName_) {} + output_times = output_times_; + output_steps = output_steps_; + modification_steps = modification_steps_; + } - void set_data(const std::vector& output_times_, - const std::vector& output_steps_, - const std::vector& modification_steps_) - { - ASSERT_EQ(output_times_.size(), output_steps_.size()); - ASSERT_EQ(output_times_.size(), modification_steps_.size()); + void set_data(const std::vector &output_steps_, + const std::vector &modification_steps_) + { + ASSERT_EQ(output_steps_.size(), modification_steps_.size()); - size_t numSteps = output_times_.size(); - for(auto i=1; i output_times_[i-1]); + size_t numSteps = output_steps_.size(); + for (size_t i = 0; i < numSteps; i++) { + output_times.push_back((double)i); + } + output_steps = output_steps_; + modification_steps = modification_steps_; } - output_times = output_times_; - output_steps = output_steps_; - modification_steps = modification_steps_; - } + struct OutputParams &add(const double time, const bool do_output, const bool do_modification) + { + size_t numSteps = output_times.size(); + if (numSteps > 0) { + // Monotone increasing + EXPECT_TRUE(time > output_times[numSteps - 1]); + } - void set_data(const std::vector& output_steps_, - const std::vector& modification_steps_) - { - ASSERT_EQ(output_steps_.size(), modification_steps_.size()); + output_times.push_back(time); + output_steps.push_back(do_output); + modification_steps.push_back(do_modification); - size_t numSteps = output_steps_.size(); - for(size_t i=0; i 0) { - // Monotone increasing - EXPECT_TRUE(time > output_times[numSteps-1]); + void clear() + { + output_times.clear(); + output_steps.clear(); + modification_steps.clear(); } - output_times.push_back(time); - output_steps.push_back(do_output); - modification_steps.push_back(do_modification); - - return *this; - } + std::string outFile{"file.g"}; + std::string elemFieldName{"elem_field"}; + std::vector output_times; + std::vector output_steps; + std::vector modification_steps; + }; - void clear() + void do_output(Ioss::Region &o_region, const OutputParams ¶ms, size_t step, double &minTime, + int &maxStep, bool &doneOutputAfterModification) { - output_times.clear(); - output_steps.clear(); - modification_steps.clear(); - } - - std::string outFile{"file.g"}; - std::string elemFieldName{"elem_field"}; - std::vector output_times; - std::vector output_steps; - std::vector modification_steps; -}; - -void do_output(Ioss::Region &o_region, - const OutputParams& params, - size_t step, - double& minTime, - int& maxStep, - bool& doneOutputAfterModification) -{ - if(params.output_steps[step]) { - if(!doneOutputAfterModification) { - minTime = params.output_times[step]; - } + if (params.output_steps[step]) { + if (!doneOutputAfterModification) { + minTime = params.output_times[step]; + } - write_transient(o_region, params.elemFieldName, params.output_times[step]); + write_transient(o_region, params.elemFieldName, params.output_times[step]); - auto min_result = o_region.get_min_time(); - EXPECT_EQ(1, min_result.first); - EXPECT_NEAR(minTime, min_result.second, 1.0e-6); + auto min_result = o_region.get_min_time(); + EXPECT_EQ(1, min_result.first); + EXPECT_NEAR(minTime, min_result.second, 1.0e-6); - auto max_result = o_region.get_max_time(); - EXPECT_EQ(maxStep, max_result.first); - EXPECT_NEAR(params.output_times[step], max_result.second, 1.0e-6); + auto max_result = o_region.get_max_time(); + EXPECT_EQ(maxStep, max_result.first); + EXPECT_NEAR(params.output_times[step], max_result.second, 1.0e-6); - maxStep++; - doneOutputAfterModification = true; + maxStep++; + doneOutputAfterModification = true; + } } -} -void run_topology_change(const Ioss::Region& i_region, - Ioss::Region &o_region, - const OutputParams& params) -{ - auto observer = o_region.get_mesh_modification_observer(); + void run_topology_change(const Ioss::Region &i_region, Ioss::Region &o_region, + const OutputParams ¶ms) + { + auto observer = o_region.get_mesh_modification_observer(); - define_model(i_region, o_region); - write_model(i_region, o_region); + define_model(i_region, o_region); + write_model(i_region, o_region); - define_transient(i_region, o_region, params.elemFieldName); + define_transient(i_region, o_region, params.elemFieldName); - auto numSteps = params.output_steps.size(); + auto numSteps = params.output_steps.size(); - int maxStep = 1; + int maxStep = 1; - double minTime = numSteps > 0 ? params.output_times[0] : 0.0; - double maxTime = numSteps > 0 ? params.output_times[0] : 0.0; + double minTime = numSteps > 0 ? params.output_times[0] : 0.0; + double maxTime = numSteps > 0 ? params.output_times[0] : 0.0; - bool doneOutputAfterModification = true; + bool doneOutputAfterModification = true; - for(size_t i=0; iset_topology_modification(Ioss::TOPOLOGY_UNKNOWN); - maxStep = 1; - doneOutputAfterModification = false; - } + for (size_t i = 0; i < numSteps; i++) { + if (params.modification_steps[i]) { + observer->set_topology_modification(Ioss::TOPOLOGY_UNKNOWN); + maxStep = 1; + doneOutputAfterModification = false; + } - do_output(o_region, params, i, minTime, maxStep, doneOutputAfterModification); + do_output(o_region, params, i, minTime, maxStep, doneOutputAfterModification); + } } -} -void cleanup_simple_multi_files(const std::string &outFile) -{ - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); + void cleanup_simple_multi_files(const std::string &outFile) + { + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - std::string file1 = Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); - unlink(file1.c_str()); + std::string file1 = + Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); + unlink(file1.c_str()); - std::string file2 = Ioss::Utils::decode_filename(outFile + "-s0002", util.parallel_rank(), util.parallel_size()); - unlink(file2.c_str()); + std::string file2 = Ioss::Utils::decode_filename(outFile + "-s0002", util.parallel_rank(), + util.parallel_size()); + unlink(file2.c_str()); - std::string file3 = Ioss::Utils::decode_filename(outFile + "-s0003", util.parallel_rank(), util.parallel_size()); - unlink(file3.c_str()); + std::string file3 = Ioss::Utils::decode_filename(outFile + "-s0003", util.parallel_rank(), + util.parallel_size()); + unlink(file3.c_str()); - std::string file4 = Ioss::Utils::decode_filename(outFile + "-s0004", util.parallel_rank(), util.parallel_size()); - unlink(file4.c_str()); -} + std::string file4 = Ioss::Utils::decode_filename(outFile + "-s0004", util.parallel_rank(), + util.parallel_size()); + unlink(file4.c_str()); + } -void run_multi_file_simple_topology_change(const OutputParams& params) -{ - Ioss::Init::Initializer io; - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); + void run_multi_file_simple_topology_change(const OutputParams ¶ms) + { + Ioss::Init::Initializer io; + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - int numBlocks = util.parallel_size(); + int numBlocks = util.parallel_size(); - std::string meshDesc = get_many_block_mesh_desc(numBlocks); + std::string meshDesc = get_many_block_mesh_desc(numBlocks); - Ioss::PropertyManager propertyManager; + Ioss::PropertyManager propertyManager; - Ioss::DatabaseIO *i_database = Ioss::IOFactory::create("textmesh", meshDesc, Ioss::READ_MODEL, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region i_region(i_database, "input_model"); - EXPECT_TRUE(i_database != nullptr); - EXPECT_TRUE(i_database->ok(true)); + Ioss::DatabaseIO *i_database = Ioss::IOFactory::create( + "textmesh", meshDesc, Ioss::READ_MODEL, Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region i_region(i_database, "input_model"); + EXPECT_TRUE(i_database != nullptr); + EXPECT_TRUE(i_database->ok(true)); - Ioss::DatabaseIO *o_database = Ioss::IOFactory::create("exodus", params.outFile, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region o_region(o_database, "output_model"); - EXPECT_TRUE(o_database != nullptr); - EXPECT_TRUE(o_database->ok(true)); + Ioss::DatabaseIO *o_database = + Ioss::IOFactory::create("exodus", params.outFile, Ioss::WRITE_RESULTS, + Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region o_region(o_database, "output_model"); + EXPECT_TRUE(o_database != nullptr); + EXPECT_TRUE(o_database->ok(true)); - auto fileControlOption = Ioss::FileControlOption::CONTROL_AUTO_MULTI_FILE; - auto observer = std::make_shared(i_region, params.elemFieldName, fileControlOption); - o_region.register_mesh_modification_observer(observer); + auto fileControlOption = Ioss::FileControlOption::CONTROL_AUTO_MULTI_FILE; + auto observer = std::make_shared(i_region, params.elemFieldName, fileControlOption); + o_region.register_mesh_modification_observer(observer); - run_topology_change(i_region, o_region, params); -} + run_topology_change(i_region, o_region, params); + } -TEST(TestDynamicWrite, multi_file_simple_topology_modification) -{ - std::string outFile("multiFileManyBlocks.g"); - std::string elemFieldName = "elem_field"; + TEST(TestDynamicWrite, multi_file_simple_topology_modification) + { + std::string outFile("multiFileManyBlocks.g"); + std::string elemFieldName = "elem_field"; - OutputParams params(outFile, elemFieldName); + OutputParams params(outFile, elemFieldName); - std::vector output_steps{true , true, true, true, true, true}; - std::vector modification_steps{false, true, false, true, true, false}; + std::vector output_steps{true, true, true, true, true, true}; + std::vector modification_steps{false, true, false, true, true, false}; - params.set_data(output_steps, modification_steps); + params.set_data(output_steps, modification_steps); - cleanup_simple_multi_files(outFile); - run_multi_file_simple_topology_change(params); - cleanup_simple_multi_files(outFile); -} + cleanup_simple_multi_files(outFile); + run_multi_file_simple_topology_change(params); + cleanup_simple_multi_files(outFile); + } -void cleanup_cyclic_multi_files(const std::string &outFile) -{ - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); + void cleanup_cyclic_multi_files(const std::string &outFile) + { + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - std::string file1 = Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); - unlink(file1.c_str()); + std::string file1 = + Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); + unlink(file1.c_str()); - std::string file2 = Ioss::Utils::decode_filename(outFile + "-A", util.parallel_rank(), util.parallel_size()); - unlink(file2.c_str()); + std::string file2 = + Ioss::Utils::decode_filename(outFile + "-A", util.parallel_rank(), util.parallel_size()); + unlink(file2.c_str()); - std::string file3 = Ioss::Utils::decode_filename(outFile + "-B", util.parallel_rank(), util.parallel_size()); - unlink(file3.c_str()); + std::string file3 = + Ioss::Utils::decode_filename(outFile + "-B", util.parallel_rank(), util.parallel_size()); + unlink(file3.c_str()); - std::string file4 = Ioss::Utils::decode_filename(outFile + "-C", util.parallel_rank(), util.parallel_size()); - unlink(file4.c_str()); -} + std::string file4 = + Ioss::Utils::decode_filename(outFile + "-C", util.parallel_rank(), util.parallel_size()); + unlink(file4.c_str()); + } -void run_multi_file_cyclic_topology_change(const OutputParams& params) -{ - Ioss::Init::Initializer io; - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); + void run_multi_file_cyclic_topology_change(const OutputParams ¶ms) + { + Ioss::Init::Initializer io; + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - int numBlocks = util.parallel_size(); + int numBlocks = util.parallel_size(); - std::string meshDesc = get_many_block_mesh_desc(numBlocks); + std::string meshDesc = get_many_block_mesh_desc(numBlocks); - Ioss::PropertyManager propertyManager; + Ioss::PropertyManager propertyManager; - Ioss::DatabaseIO *i_database = Ioss::IOFactory::create("textmesh", meshDesc, Ioss::READ_MODEL, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region i_region(i_database, "input_model"); - EXPECT_TRUE(i_database != nullptr); - EXPECT_TRUE(i_database->ok(true)); + Ioss::DatabaseIO *i_database = Ioss::IOFactory::create( + "textmesh", meshDesc, Ioss::READ_MODEL, Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region i_region(i_database, "input_model"); + EXPECT_TRUE(i_database != nullptr); + EXPECT_TRUE(i_database->ok(true)); - Ioss::DatabaseIO *o_database = Ioss::IOFactory::create("exodus", params.outFile, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region o_region(o_database, "output_model"); - EXPECT_TRUE(o_database != nullptr); - EXPECT_TRUE(o_database->ok(true)); + Ioss::DatabaseIO *o_database = + Ioss::IOFactory::create("exodus", params.outFile, Ioss::WRITE_RESULTS, + Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region o_region(o_database, "output_model"); + EXPECT_TRUE(o_database != nullptr); + EXPECT_TRUE(o_database->ok(true)); - auto fileControlOption = Ioss::FileControlOption::CONTROL_AUTO_MULTI_FILE; - auto observer = std::make_shared(i_region, params.elemFieldName, fileControlOption); - o_region.register_mesh_modification_observer(observer); + auto fileControlOption = Ioss::FileControlOption::CONTROL_AUTO_MULTI_FILE; + auto observer = std::make_shared(i_region, params.elemFieldName, fileControlOption); + o_region.register_mesh_modification_observer(observer); - o_region.set_file_cyclic_count(3); - run_topology_change(i_region, o_region, params); -} + o_region.set_file_cyclic_count(3); + run_topology_change(i_region, o_region, params); + } -TEST(TestDynamicWrite, multi_file_cyclic_topology_modification) -{ - std::string outFile("cyclicMultiFileManyBlocks.g"); - std::string elemFieldName = "elem_field"; + TEST(TestDynamicWrite, multi_file_cyclic_topology_modification) + { + std::string outFile("cyclicMultiFileManyBlocks.g"); + std::string elemFieldName = "elem_field"; - OutputParams params(outFile, elemFieldName); + OutputParams params(outFile, elemFieldName); - std::vector output_times{0.0 , 0.5 , 1.5 , 1.75, 2.0 , 3.0}; - std::vector output_steps{true , true, true , true, true, true }; - std::vector modification_steps{false, true, false, true, true, false}; + std::vector output_times{0.0, 0.5, 1.5, 1.75, 2.0, 3.0}; + std::vector output_steps{true, true, true, true, true, true}; + std::vector modification_steps{false, true, false, true, true, false}; - params.set_data(output_times, output_steps, modification_steps); + params.set_data(output_times, output_steps, modification_steps); - cleanup_cyclic_multi_files(outFile); - run_multi_file_cyclic_topology_change(params); - cleanup_cyclic_multi_files(outFile); -} + cleanup_cyclic_multi_files(outFile); + run_multi_file_cyclic_topology_change(params); + cleanup_cyclic_multi_files(outFile); + } -void fill_group_gold_names(const int numFileGroups, - std::vector& gold_names, - std::vector& gold_full_names) -{ - gold_names.clear(); - gold_full_names.clear(); + void fill_group_gold_names(const int numFileGroups, std::vector &gold_names, + std::vector &gold_full_names) + { + gold_names.clear(); + gold_full_names.clear(); - gold_names.push_back("/"); - gold_full_names.push_back("/"); + gold_names.push_back("/"); + gold_full_names.push_back("/"); - for(int i=1; i<=numFileGroups; i++) { - std::ostringstream oss; - oss << Ioss::DynamicTopologyFileControl::group_prefix(); - oss << i; + for (int i = 1; i <= numFileGroups; i++) { + std::ostringstream oss; + oss << Ioss::DynamicTopologyFileControl::group_prefix(); + oss << i; - gold_names.push_back(oss.str()); - gold_full_names.push_back("/" + oss.str()); + gold_names.push_back(oss.str()); + gold_full_names.push_back("/" + oss.str()); + } } -} -void test_group_names(Ioss::DatabaseIO *database) -{ - Ioss::NameList names = database->groups_describe(false); - Ioss::NameList full_names = database->groups_describe(true); + void test_group_names(Ioss::DatabaseIO *database) + { + Ioss::NameList names = database->groups_describe(false); + Ioss::NameList full_names = database->groups_describe(true); - std::vector gold_names; - std::vector gold_full_names; + std::vector gold_names; + std::vector gold_full_names; - fill_group_gold_names(database->num_child_group(), gold_names, gold_full_names); + fill_group_gold_names(database->num_child_group(), gold_names, gold_full_names); - EXPECT_EQ(gold_names, names); - EXPECT_EQ(gold_full_names, full_names); -} + EXPECT_EQ(gold_names, names); + EXPECT_EQ(gold_full_names, full_names); + } -void cleanup_single_file(const std::string &outFile) -{ - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); + void cleanup_single_file(const std::string &outFile) + { + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - std::string file1 = Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); - unlink(file1.c_str()); -} + std::string file1 = + Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); + unlink(file1.c_str()); + } -void run_single_file_simple_topology_change(const OutputParams& params) -{ - Ioss::Init::Initializer io; - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); + void run_single_file_simple_topology_change(const OutputParams ¶ms) + { + Ioss::Init::Initializer io; + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - int numBlocks = util.parallel_size(); + int numBlocks = util.parallel_size(); - std::string meshDesc = get_many_block_mesh_desc(numBlocks); + std::string meshDesc = get_many_block_mesh_desc(numBlocks); - Ioss::PropertyManager propertyManager; + Ioss::PropertyManager propertyManager; - Ioss::DatabaseIO *i_database = Ioss::IOFactory::create("textmesh", meshDesc, Ioss::READ_MODEL, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region i_region(i_database, "input_model"); - EXPECT_TRUE(i_database != nullptr); - EXPECT_TRUE(i_database->ok(true)); + Ioss::DatabaseIO *i_database = Ioss::IOFactory::create( + "textmesh", meshDesc, Ioss::READ_MODEL, Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region i_region(i_database, "input_model"); + EXPECT_TRUE(i_database != nullptr); + EXPECT_TRUE(i_database->ok(true)); - propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); - propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); - Ioss::DatabaseIO *o_database = Ioss::IOFactory::create("exodus", params.outFile, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region o_region(o_database, "output_model"); - EXPECT_TRUE(o_database != nullptr); - EXPECT_TRUE(o_database->ok(true)); + propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); + propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); + Ioss::DatabaseIO *o_database = + Ioss::IOFactory::create("exodus", params.outFile, Ioss::WRITE_RESULTS, + Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region o_region(o_database, "output_model"); + EXPECT_TRUE(o_database != nullptr); + EXPECT_TRUE(o_database->ok(true)); - auto fileControlOption = Ioss::FileControlOption::CONTROL_AUTO_GROUP_FILE; - auto observer = std::make_shared(i_region, params.elemFieldName, fileControlOption); - o_region.register_mesh_modification_observer(observer); + auto fileControlOption = Ioss::FileControlOption::CONTROL_AUTO_GROUP_FILE; + auto observer = std::make_shared(i_region, params.elemFieldName, fileControlOption); + o_region.register_mesh_modification_observer(observer); - run_topology_change(i_region, o_region, params); - test_group_names(o_database); -} + run_topology_change(i_region, o_region, params); + test_group_names(o_database); + } -TEST(TestDynamicWrite, single_file_simple_topology_modification) -{ - std::string outFile("singleFileManyBlocks.g"); - std::string elemFieldName = "elem_field"; + TEST(TestDynamicWrite, single_file_simple_topology_modification) + { + std::string outFile("singleFileManyBlocks.g"); + std::string elemFieldName = "elem_field"; - OutputParams params(outFile, elemFieldName); + OutputParams params(outFile, elemFieldName); - params.add(0.0, true, false) + params.add(0.0, true, false) .add(1.0, true, true) .add(2.0, true, false) .add(3.0, true, true) .add(4.0, true, true) .add(5.0, true, false); - cleanup_single_file(outFile); - run_single_file_simple_topology_change(params); - cleanup_single_file(outFile); -} - -TEST(TestDynamicWrite, single_file_groups_not_enabled) -{ - Ioss::Init::Initializer io; - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - - int numBlocks = util.parallel_size(); - if(numBlocks > 1) GTEST_SKIP(); - - std::string meshDesc = "0,1,HEX_8,1,2,3,4,5,6,7,8,block_1" - "|coordinates:0,0,0,1,0,0,1,1,0,0,1,0,0,0,1,1,0,1,1,1,1,0,1,1"; - - Ioss::PropertyManager propertyManager; - - Ioss::DatabaseIO *i_database = Ioss::IOFactory::create("textmesh", meshDesc, Ioss::READ_MODEL, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region i_region(i_database, "input_model"); - EXPECT_TRUE(i_database != nullptr); - EXPECT_TRUE(i_database->ok(true)); - - std::string outFile("singleFileGroupsNotEnabled.g"); - std::string elemFieldName = "elem_field"; - cleanup_single_file(outFile); - - // Need the line below to allow this to pass - // propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); - propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); - Ioss::DatabaseIO *o_database = Ioss::IOFactory::create("exodus", outFile, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region o_region(o_database, "output_model"); - EXPECT_TRUE(o_database != nullptr); - EXPECT_TRUE(o_database->ok(true)); - - auto fileControlOption = Ioss::FileControlOption::CONTROL_AUTO_GROUP_FILE; - auto observer = std::make_shared(i_region, elemFieldName, fileControlOption); - EXPECT_THROW(o_region.register_mesh_modification_observer(observer), std::runtime_error); - cleanup_single_file(outFile); -} - -TEST(TestDynamicWrite, create_subgroup_with_file_reopen) -{ - std::string outFile("subgroupManyBlocks.g"); - std::string elemFieldName = "elem_field"; - - Ioss::Init::Initializer io; - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - - std::string file1 = Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); - unlink(file1.c_str()); - - int numBlocks = util.parallel_size(); - if(numBlocks > 1) GTEST_SKIP(); - - std::string meshDesc = "0,1,HEX_8,1,2,3,4,5,6,7,8,block_1" - "|coordinates:0,0,0,1,0,0,1,1,0,0,1,0,0,0,1,1,0,1,1,1,1,0,1,1"; - - Ioss::PropertyManager propertyManager; - - Ioss::DatabaseIO *i_database = Ioss::IOFactory::create("textmesh", meshDesc, Ioss::READ_MODEL, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region i_region(i_database, "input_model"); - EXPECT_TRUE(i_database != nullptr); - EXPECT_TRUE(i_database->ok(true)); + cleanup_single_file(outFile); + run_single_file_simple_topology_change(params); + cleanup_single_file(outFile); + } + TEST(TestDynamicWrite, single_file_groups_not_enabled) { - propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); - Ioss::DatabaseIO *o_database = Ioss::IOFactory::create("exodus", outFile, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); + Ioss::Init::Initializer io; + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); + + int numBlocks = util.parallel_size(); + if (numBlocks > 1) + GTEST_SKIP(); + + std::string meshDesc = "0,1,HEX_8,1,2,3,4,5,6,7,8,block_1" + "|coordinates:0,0,0,1,0,0,1,1,0,0,1,0,0,0,1,1,0,1,1,1,1,0,1,1"; + + Ioss::PropertyManager propertyManager; + + Ioss::DatabaseIO *i_database = Ioss::IOFactory::create( + "textmesh", meshDesc, Ioss::READ_MODEL, Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region i_region(i_database, "input_model"); + EXPECT_TRUE(i_database != nullptr); + EXPECT_TRUE(i_database->ok(true)); + + std::string outFile("singleFileGroupsNotEnabled.g"); + std::string elemFieldName = "elem_field"; + cleanup_single_file(outFile); + + // Need the line below to allow this to pass + // propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); + propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); + Ioss::DatabaseIO *o_database = Ioss::IOFactory::create( + "exodus", outFile, Ioss::WRITE_RESULTS, Ioss::ParallelUtils::comm_world(), propertyManager); Ioss::Region o_region(o_database, "output_model"); EXPECT_TRUE(o_database != nullptr); EXPECT_TRUE(o_database->ok(true)); - o_database->create_subgroup("GROUP_1"); + + auto fileControlOption = Ioss::FileControlOption::CONTROL_AUTO_GROUP_FILE; + auto observer = std::make_shared(i_region, elemFieldName, fileControlOption); + EXPECT_THROW(o_region.register_mesh_modification_observer(observer), std::runtime_error); + cleanup_single_file(outFile); } + TEST(TestDynamicWrite, create_subgroup_with_file_reopen) { - propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); - Ioss::DatabaseIO *o_database = Ioss::IOFactory::create("exodus", outFile, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region o_region(o_database, "output_model"); - EXPECT_TRUE(o_database != nullptr); - EXPECT_TRUE(o_database->ok(true)); + std::string outFile("subgroupManyBlocks.g"); + std::string elemFieldName = "elem_field"; + + Ioss::Init::Initializer io; + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); + + std::string file1 = + Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); + unlink(file1.c_str()); + + int numBlocks = util.parallel_size(); + if (numBlocks > 1) + GTEST_SKIP(); + + std::string meshDesc = "0,1,HEX_8,1,2,3,4,5,6,7,8,block_1" + "|coordinates:0,0,0,1,0,0,1,1,0,0,1,0,0,0,1,1,0,1,1,1,1,0,1,1"; + + Ioss::PropertyManager propertyManager; + + Ioss::DatabaseIO *i_database = Ioss::IOFactory::create( + "textmesh", meshDesc, Ioss::READ_MODEL, Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region i_region(i_database, "input_model"); + EXPECT_TRUE(i_database != nullptr); + EXPECT_TRUE(i_database->ok(true)); + + { + propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); + Ioss::DatabaseIO *o_database = + Ioss::IOFactory::create("exodus", outFile, Ioss::WRITE_RESULTS, + Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region o_region(o_database, "output_model"); + EXPECT_TRUE(o_database != nullptr); + EXPECT_TRUE(o_database->ok(true)); + o_database->create_subgroup("GROUP_1"); + } - // Group pointer is automatically at first child - o_database->create_subgroup("GROUP_2"); + { + propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); + Ioss::DatabaseIO *o_database = + Ioss::IOFactory::create("exodus", outFile, Ioss::WRITE_RESULTS, + Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region o_region(o_database, "output_model"); + EXPECT_TRUE(o_database != nullptr); + EXPECT_TRUE(o_database->ok(true)); - Ioss::NameList names = o_database->groups_describe(false); - Ioss::NameList full_names = o_database->groups_describe(true); + // Group pointer is automatically at first child + o_database->create_subgroup("GROUP_2"); - std::vector gold_names{"/", "GROUP_1", "GROUP_2"}; - std::vector gold_full_names{"/", "/GROUP_1", "/GROUP_1/GROUP_2"}; + Ioss::NameList names = o_database->groups_describe(false); + Ioss::NameList full_names = o_database->groups_describe(true); - EXPECT_EQ(gold_names, names); - EXPECT_EQ(gold_full_names, full_names); + std::vector gold_names{"/", "GROUP_1", "GROUP_2"}; + std::vector gold_full_names{"/", "/GROUP_1", "/GROUP_1/GROUP_2"}; + + EXPECT_EQ(gold_names, names); + EXPECT_EQ(gold_full_names, full_names); + } + + unlink(file1.c_str()); } - unlink(file1.c_str()); -} + TEST(TestDynamicWrite, create_subgroup_with_file_persistence_and_child_group) + { + std::string outFile("subgroupManyBlocks.g"); + std::string elemFieldName = "elem_field"; -TEST(TestDynamicWrite, create_subgroup_with_file_persistence_and_child_group) -{ - std::string outFile("subgroupManyBlocks.g"); - std::string elemFieldName = "elem_field"; + Ioss::Init::Initializer io; + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - Ioss::Init::Initializer io; - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); + std::string file1 = + Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); + unlink(file1.c_str()); - std::string file1 = Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); - unlink(file1.c_str()); + int numBlocks = util.parallel_size(); + if (numBlocks > 1) + GTEST_SKIP(); - int numBlocks = util.parallel_size(); - if(numBlocks > 1) GTEST_SKIP(); + std::string meshDesc = "0,1,HEX_8,1,2,3,4,5,6,7,8,block_1" + "|coordinates:0,0,0,1,0,0,1,1,0,0,1,0,0,0,1,1,0,1,1,1,1,0,1,1"; - std::string meshDesc = "0,1,HEX_8,1,2,3,4,5,6,7,8,block_1" - "|coordinates:0,0,0,1,0,0,1,1,0,0,1,0,0,0,1,1,0,1,1,1,1,0,1,1"; + Ioss::PropertyManager propertyManager; - Ioss::PropertyManager propertyManager; + Ioss::DatabaseIO *i_database = Ioss::IOFactory::create( + "textmesh", meshDesc, Ioss::READ_MODEL, Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region i_region(i_database, "input_model"); + EXPECT_TRUE(i_database != nullptr); + EXPECT_TRUE(i_database->ok(true)); - Ioss::DatabaseIO *i_database = Ioss::IOFactory::create("textmesh", meshDesc, Ioss::READ_MODEL, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region i_region(i_database, "input_model"); - EXPECT_TRUE(i_database != nullptr); - EXPECT_TRUE(i_database->ok(true)); + { + propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); + propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); + Ioss::DatabaseIO *o_database = + Ioss::IOFactory::create("exodus", outFile, Ioss::WRITE_RESULTS, + Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region o_region(o_database, "output_model"); + EXPECT_TRUE(o_database != nullptr); + EXPECT_TRUE(o_database->ok(true)); - { - propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); - propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); - Ioss::DatabaseIO *o_database = Ioss::IOFactory::create("exodus", outFile, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region o_region(o_database, "output_model"); - EXPECT_TRUE(o_database != nullptr); - EXPECT_TRUE(o_database->ok(true)); + o_database->create_subgroup("GROUP_1"); - o_database->create_subgroup("GROUP_1"); + // Group pointer is at "GROUP_1" ... "GROUP_2" is a child + o_database->create_subgroup("GROUP_2"); - // Group pointer is at "GROUP_1" ... "GROUP_2" is a child - o_database->create_subgroup("GROUP_2"); + Ioss::NameList names = o_database->groups_describe(false); + Ioss::NameList full_names = o_database->groups_describe(true); - Ioss::NameList names = o_database->groups_describe(false); - Ioss::NameList full_names = o_database->groups_describe(true); + std::vector gold_names{"/", "GROUP_1", "GROUP_2"}; + std::vector gold_full_names{"/", "/GROUP_1", "/GROUP_1/GROUP_2"}; - std::vector gold_names{"/", "GROUP_1", "GROUP_2"}; - std::vector gold_full_names{"/", "/GROUP_1", "/GROUP_1/GROUP_2"}; + EXPECT_EQ(gold_names, names); + EXPECT_EQ(gold_full_names, full_names); + } - EXPECT_EQ(gold_names, names); - EXPECT_EQ(gold_full_names, full_names); + unlink(file1.c_str()); } - unlink(file1.c_str()); -} + TEST(TestDynamicWrite, create_subgroup_with_file_persistence_and_no_child_group) + { + std::string outFile("subgroupManyBlocks.g"); + std::string elemFieldName = "elem_field"; -TEST(TestDynamicWrite, create_subgroup_with_file_persistence_and_no_child_group) -{ - std::string outFile("subgroupManyBlocks.g"); - std::string elemFieldName = "elem_field"; + Ioss::Init::Initializer io; + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - Ioss::Init::Initializer io; - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); + std::string file1 = + Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); + unlink(file1.c_str()); - std::string file1 = Ioss::Utils::decode_filename(outFile, util.parallel_rank(), util.parallel_size()); - unlink(file1.c_str()); + int numBlocks = util.parallel_size(); + if (numBlocks > 1) + GTEST_SKIP(); - int numBlocks = util.parallel_size(); - if(numBlocks > 1) GTEST_SKIP(); + std::string meshDesc = "0,1,HEX_8,1,2,3,4,5,6,7,8,block_1" + "|coordinates:0,0,0,1,0,0,1,1,0,0,1,0,0,0,1,1,0,1,1,1,1,0,1,1"; - std::string meshDesc = "0,1,HEX_8,1,2,3,4,5,6,7,8,block_1" - "|coordinates:0,0,0,1,0,0,1,1,0,0,1,0,0,0,1,1,0,1,1,1,1,0,1,1"; + Ioss::PropertyManager propertyManager; - Ioss::PropertyManager propertyManager; + Ioss::DatabaseIO *i_database = Ioss::IOFactory::create( + "textmesh", meshDesc, Ioss::READ_MODEL, Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region i_region(i_database, "input_model"); + EXPECT_TRUE(i_database != nullptr); + EXPECT_TRUE(i_database->ok(true)); - Ioss::DatabaseIO *i_database = Ioss::IOFactory::create("textmesh", meshDesc, Ioss::READ_MODEL, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region i_region(i_database, "input_model"); - EXPECT_TRUE(i_database != nullptr); - EXPECT_TRUE(i_database->ok(true)); + { + propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); + propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); + Ioss::DatabaseIO *o_database = + Ioss::IOFactory::create("exodus", outFile, Ioss::WRITE_RESULTS, + Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region o_region(o_database, "output_model"); + EXPECT_TRUE(o_database != nullptr); + EXPECT_TRUE(o_database->ok(true)); - { - propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); - propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); - Ioss::DatabaseIO *o_database = Ioss::IOFactory::create("exodus", outFile, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region o_region(o_database, "output_model"); - EXPECT_TRUE(o_database != nullptr); - EXPECT_TRUE(o_database->ok(true)); + o_database->create_subgroup("GROUP_1"); - o_database->create_subgroup("GROUP_1"); + // Group pointer is reset to root group + EXPECT_TRUE(o_database->open_root_group()); + o_database->create_subgroup("GROUP_2"); - // Group pointer is reset to root group - EXPECT_TRUE(o_database->open_root_group()); - o_database->create_subgroup("GROUP_2"); + Ioss::NameList names = o_database->groups_describe(false); + Ioss::NameList full_names = o_database->groups_describe(true); - Ioss::NameList names = o_database->groups_describe(false); - Ioss::NameList full_names = o_database->groups_describe(true); + std::vector gold_names{"/", "GROUP_1", "GROUP_2"}; + std::vector gold_full_names{"/", "/GROUP_1", "/GROUP_2"}; - std::vector gold_names{"/", "GROUP_1", "GROUP_2"}; - std::vector gold_full_names{"/", "/GROUP_1", "/GROUP_2"}; + EXPECT_EQ(gold_names, names); + EXPECT_EQ(gold_full_names, full_names); + } - EXPECT_EQ(gold_names, names); - EXPECT_EQ(gold_full_names, full_names); + unlink(file1.c_str()); } - unlink(file1.c_str()); -} - + void run_topology_change_with_multiple_output(const Ioss::Region &i_region, + Ioss::Region &o_region1, Ioss::Region &o_region2, + const OutputParams ¶ms1, + const OutputParams ¶ms2) + { + ASSERT_EQ(params1.modification_steps, params2.modification_steps); -void run_topology_change_with_multiple_output(const Ioss::Region& i_region, - Ioss::Region &o_region1, - Ioss::Region &o_region2, - const OutputParams& params1, - const OutputParams& params2) -{ - ASSERT_EQ(params1.modification_steps, params2.modification_steps); + auto observer1 = o_region1.get_mesh_modification_observer(); + auto observer2 = o_region2.get_mesh_modification_observer(); - auto observer1 = o_region1.get_mesh_modification_observer(); - auto observer2 = o_region2.get_mesh_modification_observer(); + define_model(i_region, o_region1); + write_model(i_region, o_region1); + define_transient(i_region, o_region1, params1.elemFieldName); - define_model(i_region, o_region1); - write_model(i_region, o_region1); - define_transient(i_region, o_region1, params1.elemFieldName); + define_model(i_region, o_region2); + write_model(i_region, o_region2); + define_transient(i_region, o_region2, params2.elemFieldName); - define_model(i_region, o_region2); - write_model(i_region, o_region2); - define_transient(i_region, o_region2, params2.elemFieldName); + auto numSteps = params1.output_steps.size(); - auto numSteps = params1.output_steps.size(); + int maxStep1 = 1; + int maxStep2 = 1; - int maxStep1 = 1; - int maxStep2 = 1; + double minTime1 = numSteps > 0 ? params1.output_times[0] : 0.0; + double minTime2 = numSteps > 0 ? params2.output_times[0] : 0.0; - double minTime1 = numSteps > 0 ? params1.output_times[0] : 0.0; - double minTime2 = numSteps > 0 ? params2.output_times[0] : 0.0; + bool doneOutputAfterModification1 = true; + bool doneOutputAfterModification2 = true; - bool doneOutputAfterModification1 = true; - bool doneOutputAfterModification2 = true; + for (size_t i = 0; i < numSteps; i++) { + if (params1.modification_steps[i]) { + EXPECT_TRUE(params2.modification_steps[i]); - for(size_t i=0; iset_topology_modification(Ioss::TOPOLOGY_UNKNOWN); + maxStep1 = 1; + maxStep2 = 1; - observer1->set_topology_modification(Ioss::TOPOLOGY_UNKNOWN); - maxStep1 = 1; - maxStep2 = 1; + EXPECT_EQ(Ioss::TOPOLOGY_UNKNOWN, observer1->get_topology_modification()); + EXPECT_EQ(Ioss::TOPOLOGY_UNKNOWN, observer2->get_topology_modification()); - EXPECT_EQ(Ioss::TOPOLOGY_UNKNOWN, observer1->get_topology_modification()); - EXPECT_EQ(Ioss::TOPOLOGY_UNKNOWN, observer2->get_topology_modification()); + doneOutputAfterModification1 = false; + doneOutputAfterModification2 = false; + } - doneOutputAfterModification1 = false; - doneOutputAfterModification2 = false; + do_output(o_region1, params1, i, minTime1, maxStep1, doneOutputAfterModification1); + do_output(o_region2, params2, i, minTime2, maxStep2, doneOutputAfterModification2); } - - do_output(o_region1, params1, i, minTime1, maxStep1, doneOutputAfterModification1); - do_output(o_region2, params2, i, minTime2, maxStep2, doneOutputAfterModification2); } -} - -void run_single_file_simple_topology_change_with_multiple_output(const std::string& model, - const OutputParams& params1, - const OutputParams& params2) -{ - Ioss::Init::Initializer io; - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - - auto broker = Ioss::DynamicTopologyBroker::broker(); - broker->register_model(model); - - int numBlocks = util.parallel_size(); - std::string meshDesc = get_many_block_mesh_desc(numBlocks); - - Ioss::PropertyManager propertyManager; - - Ioss::DatabaseIO *i_database = Ioss::IOFactory::create("textmesh", meshDesc, Ioss::READ_MODEL, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region i_region(i_database, "input_model"); - EXPECT_TRUE(i_database != nullptr); - EXPECT_TRUE(i_database->ok(true)); - - propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); - propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); - - Ioss::DatabaseIO *o_database1 = Ioss::IOFactory::create("exodus", params1.outFile, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region o_region1(o_database1, "region1"); - EXPECT_TRUE(o_database1 != nullptr); - EXPECT_TRUE(o_database1->ok(true)); - - auto fileControlOption = Ioss::FileControlOption::CONTROL_AUTO_GROUP_FILE; - auto observer1 = std::make_shared(i_region, params1.elemFieldName, fileControlOption); - broker->register_observer(model, observer1, o_region1); - - Ioss::DatabaseIO *o_database2 = Ioss::IOFactory::create("exodus", params2.outFile, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region o_region2(o_database2, "region2"); - EXPECT_TRUE(o_database2 != nullptr); - EXPECT_TRUE(o_database2->ok(true)); - - auto observer2 = std::make_shared(i_region, params2.elemFieldName, fileControlOption); - broker->register_observer(model, observer2, o_region2); - - run_topology_change_with_multiple_output(i_region, o_region1, o_region2, params1, params2); - - test_group_names(o_database1); - test_group_names(o_database2); -} - -TEST(TestDynamicWrite, single_file_simple_topology_modification_with_multiple_output) -{ - std::string outFile1("singleFileManyBlocks1.g"); - std::string outFile2("singleFileManyBlocks2.g"); - std::string elemFieldName = "elem_field"; - std::string model = "multiple-output"; - - OutputParams params1(outFile1, elemFieldName); - - params1.add(0.0, true , false) - .add(1.0, true , true) - .add(2.0, false, false) - .add(3.0, true , true) - .add(4.0, true , false) - .add(5.0, true , true); - - OutputParams params2(outFile2, elemFieldName); - - params2.add(0.0, true , false) - .add(1.0, true , true) - .add(2.0, true , false) - .add(3.0, false, true) - .add(4.0, true , false) - .add(5.0, true , true); - - cleanup_single_file(outFile1); - cleanup_single_file(outFile2); - run_single_file_simple_topology_change_with_multiple_output(model, params1, params2); - cleanup_single_file(outFile1); - cleanup_single_file(outFile2); -} - -TEST(TestDynamicWrite, same_model_triggers_same_modification_for_all_observers) -{ - Ioss::Init::Initializer io; - Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - - std::string outFile1("sameModelManyBlocks1.g"); - std::string outFile2("sameModelManyBlocks2.g"); - std::string elemFieldName("elem_field"); - std::string model("same-model"); - - auto broker = Ioss::DynamicTopologyBroker::broker(); - broker->register_model(model); - - std::string file1 = Ioss::Utils::decode_filename(outFile1, util.parallel_rank(), util.parallel_size()); - std::string file2 = Ioss::Utils::decode_filename(outFile2, util.parallel_rank(), util.parallel_size()); - - unlink(file1.c_str()); - unlink(file2.c_str()); + void run_single_file_simple_topology_change_with_multiple_output(const std::string &model, + const OutputParams ¶ms1, + const OutputParams ¶ms2) + { + Ioss::Init::Initializer io; + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - int numBlocks = util.parallel_size(); - if(numBlocks > 1) GTEST_SKIP(); + auto broker = Ioss::DynamicTopologyBroker::broker(); + broker->register_model(model); - std::string meshDesc = get_many_block_mesh_desc(numBlocks); + int numBlocks = util.parallel_size(); - Ioss::PropertyManager propertyManager; - - Ioss::DatabaseIO *i_database = Ioss::IOFactory::create("textmesh", meshDesc, Ioss::READ_MODEL, - Ioss::ParallelUtils::comm_world(), - propertyManager); - Ioss::Region i_region(i_database, "input_model"); - EXPECT_TRUE(i_database != nullptr); - EXPECT_TRUE(i_database->ok(true)); + std::string meshDesc = get_many_block_mesh_desc(numBlocks); + + Ioss::PropertyManager propertyManager; + + Ioss::DatabaseIO *i_database = Ioss::IOFactory::create( + "textmesh", meshDesc, Ioss::READ_MODEL, Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region i_region(i_database, "input_model"); + EXPECT_TRUE(i_database != nullptr); + EXPECT_TRUE(i_database->ok(true)); - { propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); - Ioss::DatabaseIO *o_database1 = Ioss::IOFactory::create("exodus", outFile1, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); + Ioss::DatabaseIO *o_database1 = + Ioss::IOFactory::create("exodus", params1.outFile, Ioss::WRITE_RESULTS, + Ioss::ParallelUtils::comm_world(), propertyManager); Ioss::Region o_region1(o_database1, "region1"); EXPECT_TRUE(o_database1 != nullptr); EXPECT_TRUE(o_database1->ok(true)); auto fileControlOption = Ioss::FileControlOption::CONTROL_AUTO_GROUP_FILE; - auto observer1 = std::make_shared(i_region, elemFieldName, fileControlOption); + auto observer1 = std::make_shared(i_region, params1.elemFieldName, fileControlOption); broker->register_observer(model, observer1, o_region1); - Ioss::DatabaseIO *o_database2 = Ioss::IOFactory::create("exodus", outFile2, Ioss::WRITE_RESULTS, - Ioss::ParallelUtils::comm_world(), - propertyManager); + Ioss::DatabaseIO *o_database2 = + Ioss::IOFactory::create("exodus", params2.outFile, Ioss::WRITE_RESULTS, + Ioss::ParallelUtils::comm_world(), propertyManager); Ioss::Region o_region2(o_database2, "region2"); EXPECT_TRUE(o_database2 != nullptr); EXPECT_TRUE(o_database2->ok(true)); - auto observer2 = std::make_shared(i_region, elemFieldName, fileControlOption); + auto observer2 = std::make_shared(i_region, params2.elemFieldName, fileControlOption); broker->register_observer(model, observer2, o_region2); - EXPECT_EQ(Ioss::TOPOLOGY_SAME, observer1->get_topology_modification()); - EXPECT_EQ(Ioss::TOPOLOGY_SAME, observer2->get_topology_modification()); - - observer1->set_topology_modification(Ioss::TOPOLOGY_UNKNOWN); + run_topology_change_with_multiple_output(i_region, o_region1, o_region2, params1, params2); - EXPECT_EQ(Ioss::TOPOLOGY_UNKNOWN, observer1->get_topology_modification()); - EXPECT_EQ(Ioss::TOPOLOGY_UNKNOWN, observer2->get_topology_modification()); + test_group_names(o_database1); + test_group_names(o_database2); } - unlink(file1.c_str()); - unlink(file2.c_str()); -} + TEST(TestDynamicWrite, single_file_simple_topology_modification_with_multiple_output) + { + std::string outFile1("singleFileManyBlocks1.g"); + std::string outFile2("singleFileManyBlocks2.g"); + std::string elemFieldName = "elem_field"; + std::string model = "multiple-output"; -void test_single_file_simple_topology_change_data(Ioss::Region& i_region, const std::string& elemFieldName, - int gold_step, double gold_time) -{ - i_region.begin_state(gold_step); - for (Ioss::ElementBlock *i_eb : i_region.get_element_blocks()) { - size_t num_elem = i_eb->get_property("entity_count").get_int(); + OutputParams params1(outFile1, elemFieldName); - std::vector field_data(num_elem); - std::vector elem_ids; + params1.add(0.0, true, false) + .add(1.0, true, true) + .add(2.0, false, false) + .add(3.0, true, true) + .add(4.0, true, false) + .add(5.0, true, true); - i_eb->get_field_data(elemFieldName, field_data); - i_eb->get_field_data("ids", elem_ids); + OutputParams params2(outFile2, elemFieldName); - for (size_t i = 0; i < elem_ids.size(); i++) { - double gold_value = (double)elem_ids[i] + 100*gold_time; - EXPECT_NEAR(gold_value, field_data[i], 1.0e-6); - } + params2.add(0.0, true, false) + .add(1.0, true, true) + .add(2.0, true, false) + .add(3.0, false, true) + .add(4.0, true, false) + .add(5.0, true, true); + + cleanup_single_file(outFile1); + cleanup_single_file(outFile2); + run_single_file_simple_topology_change_with_multiple_output(model, params1, params2); + cleanup_single_file(outFile1); + cleanup_single_file(outFile2); } -} -void read_and_test_single_file_simple_topology_change(const OutputParams& params) -{ - Ioss::PropertyManager propertyManager; + TEST(TestDynamicWrite, same_model_triggers_same_modification_for_all_observers) + { + Ioss::Init::Initializer io; + Ioss::ParallelUtils util(Ioss::ParallelUtils::comm_world()); - Ioss::DatabaseIO *i_database = Ioss::IOFactory::create("exodus", params.outFile, Ioss::READ_RESTART, - Ioss::ParallelUtils::comm_world(), - propertyManager); + std::string outFile1("sameModelManyBlocks1.g"); + std::string outFile2("sameModelManyBlocks2.g"); + std::string elemFieldName("elem_field"); + std::string model("same-model"); - test_group_names(i_database); + auto broker = Ioss::DynamicTopologyBroker::broker(); + broker->register_model(model); - Ioss::Region i_region(i_database, "input_model"); - EXPECT_TRUE(i_database != nullptr); - EXPECT_TRUE(i_database->ok(true)); + std::string file1 = + Ioss::Utils::decode_filename(outFile1, util.parallel_rank(), util.parallel_size()); + std::string file2 = + Ioss::Utils::decode_filename(outFile2, util.parallel_rank(), util.parallel_size()); - auto numSteps = params.output_steps.size(); + unlink(file1.c_str()); + unlink(file2.c_str()); - int numMods = 0; + int numBlocks = util.parallel_size(); + if (numBlocks > 1) + GTEST_SKIP(); - int maxStep = 1; + std::string meshDesc = get_many_block_mesh_desc(numBlocks); - double minTime = numSteps > 0 ? params.output_times[0] : 0.0; - double maxTime = numSteps > 0 ? params.output_times[0] : 0.0; + Ioss::PropertyManager propertyManager; - bool doneOutputAfterModification = true; + Ioss::DatabaseIO *i_database = Ioss::IOFactory::create( + "textmesh", meshDesc, Ioss::READ_MODEL, Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region i_region(i_database, "input_model"); + EXPECT_TRUE(i_database != nullptr); + EXPECT_TRUE(i_database->ok(true)); - Ioss::NameList names = i_database->groups_describe(false); + { + propertyManager.add(Ioss::Property("ENABLE_FILE_GROUPS", 1)); + propertyManager.add(Ioss::Property("APPEND_OUTPUT", Ioss::DB_APPEND_GROUP)); - for(size_t i=0; iok(true)); - for(size_t j=i+1; j(i_region, elemFieldName, fileControlOption); + broker->register_observer(model, observer1, o_region1); - maxTime = params.output_times[j]; + Ioss::DatabaseIO *o_database2 = + Ioss::IOFactory::create("exodus", outFile2, Ioss::WRITE_RESULTS, + Ioss::ParallelUtils::comm_world(), propertyManager); + Ioss::Region o_region2(o_database2, "region2"); + EXPECT_TRUE(o_database2 != nullptr); + EXPECT_TRUE(o_database2->ok(true)); + + auto observer2 = std::make_shared(i_region, elemFieldName, fileControlOption); + broker->register_observer(model, observer2, o_region2); + + EXPECT_EQ(Ioss::TOPOLOGY_SAME, observer1->get_topology_modification()); + EXPECT_EQ(Ioss::TOPOLOGY_SAME, observer2->get_topology_modification()); + + observer1->set_topology_modification(Ioss::TOPOLOGY_UNKNOWN); + + EXPECT_EQ(Ioss::TOPOLOGY_UNKNOWN, observer1->get_topology_modification()); + EXPECT_EQ(Ioss::TOPOLOGY_UNKNOWN, observer2->get_topology_modification()); } - if(params.modification_steps[i]) { - numMods++; - maxStep = 1; + unlink(file1.c_str()); + unlink(file2.c_str()); + } + + void test_single_file_simple_topology_change_data(Ioss::Region &i_region, + const std::string &elemFieldName, int gold_step, + double gold_time) + { + i_region.begin_state(gold_step); + for (Ioss::ElementBlock *i_eb : i_region.get_element_blocks()) { + size_t num_elem = i_eb->get_property("entity_count").get_int(); + + std::vector field_data(num_elem); + std::vector elem_ids; - EXPECT_TRUE(i_region.load_group_mesh(names[numMods+1])); + i_eb->get_field_data(elemFieldName, field_data); + i_eb->get_field_data("ids", elem_ids); - doneOutputAfterModification = false; + for (size_t i = 0; i < elem_ids.size(); i++) { + double gold_value = (double)elem_ids[i] + 100 * gold_time; + EXPECT_NEAR(gold_value, field_data[i], 1.0e-6); + } } + } + + void read_and_test_single_file_simple_topology_change(const OutputParams ¶ms) + { + Ioss::PropertyManager propertyManager; + + Ioss::DatabaseIO *i_database = + Ioss::IOFactory::create("exodus", params.outFile, Ioss::READ_RESTART, + Ioss::ParallelUtils::comm_world(), propertyManager); + + test_group_names(i_database); + + Ioss::Region i_region(i_database, "input_model"); + EXPECT_TRUE(i_database != nullptr); + EXPECT_TRUE(i_database->ok(true)); + + auto numSteps = params.output_steps.size(); + + int numMods = 0; + + int maxStep = 1; + + double minTime = numSteps > 0 ? params.output_times[0] : 0.0; + double maxTime = numSteps > 0 ? params.output_times[0] : 0.0; + + bool doneOutputAfterModification = true; + + Ioss::NameList names = i_database->groups_describe(false); - if(params.output_steps[i]) { - if(!doneOutputAfterModification) { - minTime = params.output_times[i]; + for (size_t i = 0; i < numSteps; i++) { + maxTime = params.output_times[i]; + + for (size_t j = i + 1; j < numSteps; j++) { + if (params.modification_steps[j]) { + maxTime = params.output_times[j - 1]; + break; + } + + maxTime = params.output_times[j]; } - auto min_result = i_region.get_min_time(); - EXPECT_EQ(1, min_result.first); - EXPECT_NEAR(minTime, min_result.second, 1.0e-6); - test_single_file_simple_topology_change_data(i_region, params.elemFieldName, 1, minTime); - if((((i+1) < numSteps) && params.modification_steps[i+1]) || (i == (numSteps-1))) { - auto max_result = i_region.get_max_time(); - EXPECT_EQ(maxStep, max_result.first); - EXPECT_NEAR(maxTime, max_result.second, 1.0e-6); + if (params.modification_steps[i]) { + numMods++; + maxStep = 1; + + EXPECT_TRUE(i_region.load_group_mesh(names[numMods + 1])); - test_single_file_simple_topology_change_data(i_region, params.elemFieldName, maxStep, maxTime); + doneOutputAfterModification = false; } - maxStep++; - doneOutputAfterModification = true; + if (params.output_steps[i]) { + if (!doneOutputAfterModification) { + minTime = params.output_times[i]; + } + auto min_result = i_region.get_min_time(); + EXPECT_EQ(1, min_result.first); + EXPECT_NEAR(minTime, min_result.second, 1.0e-6); + test_single_file_simple_topology_change_data(i_region, params.elemFieldName, 1, minTime); + + if ((((i + 1) < numSteps) && params.modification_steps[i + 1]) || (i == (numSteps - 1))) { + auto max_result = i_region.get_max_time(); + EXPECT_EQ(maxStep, max_result.first); + EXPECT_NEAR(maxTime, max_result.second, 1.0e-6); + + test_single_file_simple_topology_change_data(i_region, params.elemFieldName, maxStep, + maxTime); + } + + maxStep++; + doneOutputAfterModification = true; + } } } -} -TEST(TestDynamicRead, single_file_simple_topology_modification) -{ - std::string outFile("singleFileManyBlocks.g"); - std::string elemFieldName = "elem_field"; + TEST(TestDynamicRead, single_file_simple_topology_modification) + { + std::string outFile("singleFileManyBlocks.g"); + std::string elemFieldName = "elem_field"; - OutputParams params(outFile, elemFieldName); + OutputParams params(outFile, elemFieldName); - params.add(0.0, true, false) + params.add(0.0, true, false) .add(1.0, true, true) .add(2.0, true, false) .add(3.0, true, true) .add(4.0, true, true) .add(5.0, true, false); - cleanup_single_file(outFile); - run_single_file_simple_topology_change(params); - read_and_test_single_file_simple_topology_change(params); - cleanup_single_file(outFile); -} - -} - + cleanup_single_file(outFile); + run_single_file_simple_topology_change(params); + read_and_test_single_file_simple_topology_change(params); + cleanup_single_file(outFile); + } +} // namespace From b9401e37d3ee853e7b994c29abe84b9fccc00e68 Mon Sep 17 00:00:00 2001 From: Greg Sjaardema Date: Thu, 29 Aug 2024 15:01:08 -0600 Subject: [PATCH 02/21] EJOIN: Check that if user specified subsetting variables, all of those variables were found --- .../applications/ejoin/EJ_SystemInterface.C | 2 +- .../seacas/applications/ejoin/EJ_Version.h | 4 +- packages/seacas/applications/ejoin/EJoin.C | 152 ++++++++++++++---- 3 files changed, 126 insertions(+), 32 deletions(-) diff --git a/packages/seacas/applications/ejoin/EJ_SystemInterface.C b/packages/seacas/applications/ejoin/EJ_SystemInterface.C index f1aeecd6d9..0c90c12276 100644 --- a/packages/seacas/applications/ejoin/EJ_SystemInterface.C +++ b/packages/seacas/applications/ejoin/EJ_SystemInterface.C @@ -540,7 +540,7 @@ void SystemInterface::show_version() { fmt::print("EJoin\n" "\t(A code for merging Exodus databases; with or without results data.)\n" - "\t(Version: {}) Modified: {}\n", + "\t(Version: {}) Modified: {}\n\n", qainfo[2], qainfo[1]); } diff --git a/packages/seacas/applications/ejoin/EJ_Version.h b/packages/seacas/applications/ejoin/EJ_Version.h index 9389859444..e90e978956 100644 --- a/packages/seacas/applications/ejoin/EJ_Version.h +++ b/packages/seacas/applications/ejoin/EJ_Version.h @@ -9,6 +9,6 @@ static const std::array qainfo{ "ejoin", - "2024/08/06", - "1.6.4", + "2024/08/29", + "1.6.5", }; diff --git a/packages/seacas/applications/ejoin/EJoin.C b/packages/seacas/applications/ejoin/EJoin.C index 8cbbe7315c..6f6c2f4565 100644 --- a/packages/seacas/applications/ejoin/EJoin.C +++ b/packages/seacas/applications/ejoin/EJoin.C @@ -31,6 +31,7 @@ #include #include #include +#include #include "EJ_CodeTypes.h" #include "EJ_SystemInterface.h" @@ -44,16 +45,22 @@ #endif namespace { + std::string tsFormat = "[%H:%M:%S] "; + unsigned int debug_level = 0; + bool valid_variable(const std::string &variable, size_t id, const StringIdVector &variable_list); - void define_global_fields(Ioss::Region &output_region, RegionVector &part_mesh, + bool check_variable_mismatch(const std::string &type, const StringIdVector &variable_list, + const Ioss::NameList &fields); + + bool define_global_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list); - void define_nodal_fields(Ioss::Region &output_region, RegionVector &part_mesh, + bool define_nodal_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list, SystemInterface &interFace); - void define_element_fields(Ioss::Region &output_region, RegionVector &part_mesh, + bool define_element_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list); - void define_nset_fields(Ioss::Region &output_region, RegionVector &part_mesh, + bool define_nset_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list); - void define_sset_fields(Ioss::Region &output_region, RegionVector &part_mesh, + bool define_sset_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list); void define_nodal_nodeset_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list, SystemInterface &interFace); @@ -120,9 +127,6 @@ namespace { } } -} // namespace - -namespace { void transfer_elementblock(Ioss::Region ®ion, Ioss::Region &output_region, bool create_assemblies, bool debug); void transfer_assembly(const Ioss::Region ®ion, Ioss::Region &output_region, bool debug); @@ -140,15 +144,9 @@ namespace { const std::string &field_name); } // namespace -std::string tsFormat = "[%H:%M:%S] "; - -// prototypes - template double ejoin(SystemInterface &interFace, std::vector &part_mesh, INT dummy); -unsigned int debug_level = 0; - int main(int argc, char *argv[]) { #ifdef SEACAS_HAVE_MPI @@ -467,22 +465,29 @@ double ejoin(SystemInterface &interFace, std::vector &part_mesh, output_region.begin_mode(Ioss::STATE_DEFINE_TRANSIENT); - define_global_fields(output_region, part_mesh, interFace.global_var_names()); + bool error = false; + error |= define_global_fields(output_region, part_mesh, interFace.global_var_names()); - define_nodal_fields(output_region, part_mesh, interFace.node_var_names(), interFace); + error |= define_nodal_fields(output_region, part_mesh, interFace.node_var_names(), interFace); define_nodal_nodeset_fields(output_region, part_mesh, interFace.node_var_names(), interFace); - define_element_fields(output_region, part_mesh, interFace.elem_var_names()); + error |= define_element_fields(output_region, part_mesh, interFace.elem_var_names()); if (!interFace.omit_nodesets()) { - define_nset_fields(output_region, part_mesh, interFace.nset_var_names()); + error |= define_nset_fields(output_region, part_mesh, interFace.nset_var_names()); } if (!interFace.omit_sidesets()) { - define_sset_fields(output_region, part_mesh, interFace.sset_var_names()); + error |= define_sset_fields(output_region, part_mesh, interFace.sset_var_names()); } output_region.end_mode(Ioss::STATE_DEFINE_TRANSIENT); + if (error) { + fmt::print(stderr, + "ERROR: Specified field(s) (see above) were not found. Fix input and rerun.\n\n"); + exit(EXIT_FAILURE); + } + // Get database times... // Different parts can have either no times or the times must match //! \todo If ts_min, ts_max, ts_step is specified, then only check steps in that range... @@ -772,6 +777,8 @@ namespace { void define_nodal_nodeset_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list, SystemInterface &interFace) { + // This routine does not check that all variables in `variable_list` have been + // found since the checking has already been done in define_nodal_fields. if (!variable_list.empty() && variable_list[0].first == "none") { return; } @@ -1328,11 +1335,12 @@ namespace { oge->put_field_data(field_name, data); } - void define_global_fields(Ioss::Region &output_region, RegionVector &part_mesh, + bool define_global_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list) { + bool error = false; if (!variable_list.empty() && variable_list[0].first == "none") { - return; + return error; } for (const auto &pm : part_mesh) { Ioss::NameList fields = pm->field_describe(Ioss::Field::REDUCTION); @@ -1343,13 +1351,23 @@ namespace { } } } + // Now that we have defined all fields, check `variable_list` and make + // sure that all fields that have been explicitly specified now exist + // on `output_region`... + if (!variable_list.empty() && variable_list[0].first != "all") { + // The user has specified at least one variable... + Ioss::NameList fields = output_region.field_describe(Ioss::Field::REDUCTION); + error = check_variable_mismatch("Global", variable_list, fields); + } + return error; } - void define_nodal_fields(Ioss::Region &output_region, RegionVector &part_mesh, + bool define_nodal_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list, SystemInterface &interFace) { + bool error = false; if (!variable_list.empty() && variable_list[0].first == "none") { - return; + return error; } Ioss::NodeBlock *onb = output_region.get_node_blocks()[0]; SMART_ASSERT(onb != nullptr); @@ -1369,15 +1387,28 @@ namespace { } } } + // Now that we have defined all fields, check `variable_list` and make + // sure that all fields that have been explicitly specified now exist + // on `output_region`... + if (!variable_list.empty() && variable_list[0].first != "all") { + // The user has specified at least one variable... + Ioss::NameList fields = onb->field_describe(Ioss::Field::REDUCTION); + error = check_variable_mismatch("Nodal", variable_list, fields); + } + return error; } - void define_element_fields(Ioss::Region &output_region, RegionVector &part_mesh, + bool define_element_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list) { + bool error = false; // Element Block Fields... if (!variable_list.empty() && variable_list[0].first == "none") { - return; + return error; } + bool subsetting_fields = !variable_list.empty() && variable_list[0].first != "all"; + Ioss::NameList defined_fields; + for (const auto &pm : part_mesh) { const Ioss::ElementBlockContainer &iebs = pm->get_element_blocks(); for (const auto &ieb : iebs) { @@ -1395,22 +1426,38 @@ namespace { if (valid_variable(field_name, id, variable_list)) { Ioss::Field field = ieb->get_field(field_name); oeb->field_add(std::move(field)); + if (subsetting_fields) { + defined_fields.push_back(field_name); + } } } } } } } + // Now that we have defined all fields, check `variable_list` and make + // sure that all fields that have been explicitly specified now exist + // on `output_region`... + if (subsetting_fields) { + // The user has specified at least one variable... + Ioss::Utils::uniquify(defined_fields); + error = check_variable_mismatch("Element", variable_list, defined_fields); + } + return error; } - void define_nset_fields(Ioss::Region &output_region, RegionVector &part_mesh, + bool define_nset_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list) { + bool error = false; // Nodeset fields... if (!variable_list.empty() && variable_list[0].first == "none") { - return; + return error; } + bool subsetting_fields = !variable_list.empty() && variable_list[0].first != "all"; + Ioss::NameList defined_fields; + for (const auto &pm : part_mesh) { const Ioss::NodeSetContainer &ins = pm->get_nodesets(); for (const auto &in : ins) { @@ -1429,19 +1476,35 @@ namespace { if (valid_variable(field_name, id, variable_list)) { Ioss::Field field = in->get_field(field_name); ons->field_add(std::move(field)); + if (subsetting_fields) { + defined_fields.push_back(field_name); + } } } } } } + // Now that we have defined all fields, check `variable_list` and make + // sure that all fields that have been explicitly specified now exist + // on `output_region`... + if (subsetting_fields) { + // The user has specified at least one variable... + Ioss::Utils::uniquify(defined_fields); + error = check_variable_mismatch("Nodeset", variable_list, defined_fields); + } + return error; } - void define_sset_fields(Ioss::Region &output_region, RegionVector &part_mesh, + bool define_sset_fields(Ioss::Region &output_region, RegionVector &part_mesh, const StringIdVector &variable_list) { + bool error = false; if (!variable_list.empty() && variable_list[0].first == "none") { - return; + return error; } + bool subsetting_fields = !variable_list.empty() && variable_list[0].first != "all"; + Ioss::NameList defined_fields; + const auto &os = output_region.get_sidesets(); Ioss::SideBlockContainer out_eb; @@ -1469,6 +1532,9 @@ namespace { if (valid_variable(field_name, id, variable_list)) { Ioss::Field field = eb->get_field(field_name); (*II)->field_add(std::move(field)); + if (subsetting_fields) { + defined_fields.push_back(field_name); + } } } ++II; @@ -1476,6 +1542,15 @@ namespace { } } } + // Now that we have defined all fields, check `variable_list` and make + // sure that all fields that have been explicitly specified now exist + // on `output_region`... + if (subsetting_fields) { + // The user has specified at least one variable... + Ioss::Utils::uniquify(defined_fields); + error = check_variable_mismatch("Sideset", variable_list, defined_fields); + } + return error; } void transfer_fields(Ioss::GroupingEntity *ige, Ioss::GroupingEntity *oge, @@ -1516,6 +1591,25 @@ namespace { return false; } + bool check_variable_mismatch(const std::string &type, const StringIdVector &variable_list, + const Ioss::NameList &fields) + { + // Check all variables in `variable_list` and see if they are found in `fields` + if (variable_list.empty() || variable_list[0].first == "all") { + return false; // No error + } + + bool error = false; + for (const auto &var : variable_list) { + if (std::find(fields.begin(), fields.end(), var.first) == std::end(fields)) { + fmt::print(stderr, "ERROR: {} Variable '{}' was not found in the list of valid fields.\n", + type, var.first); + error = true; + } + } + return error; + } + void process_nset_omissions(RegionVector &part_mesh, const Omissions &omit) { size_t part_count = part_mesh.size(); From ab15e70caa5a8c020d4e19670766e545f6bd62c2 Mon Sep 17 00:00:00 2001 From: Greg Sjaardema Date: Thu, 29 Aug 2024 15:01:25 -0600 Subject: [PATCH 03/21] IOSS: Add missing pragma once --- packages/seacas/libraries/ioss/src/Ioss_use_fmt.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/seacas/libraries/ioss/src/Ioss_use_fmt.h b/packages/seacas/libraries/ioss/src/Ioss_use_fmt.h index 49a9cdae41..a8bf8c6108 100644 --- a/packages/seacas/libraries/ioss/src/Ioss_use_fmt.h +++ b/packages/seacas/libraries/ioss/src/Ioss_use_fmt.h @@ -9,7 +9,7 @@ // * ZoneConnectivity // * Field // * BoundaryCondition - +#pragma once #include #if FMT_VERSION >= 90000 From 08fcaee8dda30277b6aca5a2181217d3bf8ce995 Mon Sep 17 00:00:00 2001 From: Greg Sjaardema Date: Thu, 29 Aug 2024 15:01:54 -0600 Subject: [PATCH 04/21] IOSS: Test does not seem to work in parallel; making serial for now --- packages/seacas/libraries/ioss/src/main/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/seacas/libraries/ioss/src/main/CMakeLists.txt b/packages/seacas/libraries/ioss/src/main/CMakeLists.txt index 016df9b2bd..49c9f6d273 100644 --- a/packages/seacas/libraries/ioss/src/main/CMakeLists.txt +++ b/packages/seacas/libraries/ioss/src/main/CMakeLists.txt @@ -357,9 +357,9 @@ TRIBITS_ADD_ADVANCED_TEST(exodus_to_unstructured_cgns_file_per_state_to_exodus TEST_0 EXEC io_shell ARGS ${DECOMP_ARG} --file_per_state ${CMAKE_CURRENT_SOURCE_DIR}/test/8-block.g 8-block-link.cgns NOEXEPREFIX NOEXESUFFIX NUM_MPI_PROCS ${NPROCS} - TEST_1 EXEC io_shell ARGS ${JOIN_ARG} 8-block-link.cgns 8-block-rt.g + TEST_1 EXEC io_shell ARGS 8-block-link.cgns 8-block-rt.g NOEXEPREFIX NOEXESUFFIX - NUM_MPI_PROCS ${NPROCS} + NUM_MPI_PROCS 1 TEST_2 EXEC exodiff ARGS -stat -pedantic ${CMAKE_CURRENT_SOURCE_DIR}/test/8-block.g 8-block-rt.g DIRECTORY ../../../../applications/exodiff NOEXEPREFIX NOEXESUFFIX From 72724e845798c6f6386bc533eed45982d0af0e1d Mon Sep 17 00:00:00 2001 From: Greg Sjaardema Date: Wed, 4 Sep 2024 11:34:27 -0600 Subject: [PATCH 05/21] SCRIPTS: Better support for parallel not in ACCESS/bin --- packages/seacas/scripts/epup.in | 21 ++++++++++++++++++--- packages/seacas/scripts/pconjoin.in | 16 +++++++++++++--- 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/packages/seacas/scripts/epup.in b/packages/seacas/scripts/epup.in index fa9a25be5d..4457f6f126 100644 --- a/packages/seacas/scripts/epup.in +++ b/packages/seacas/scripts/epup.in @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright(C) 1999-2020 National Technology & Engineering Solutions +# Copyright(C) 1999-2024 National Technology & Engineering Solutions # of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with # NTESS, the U.S. Government retains certain rights in this software. # @@ -15,6 +15,10 @@ Usage: epup --subcycle ...normal epu options... ->->-> Send email to gdsjaar@sandia.gov for epup support.<-<-<- + Uses: GNU Parallel, + O. Tange (2018): GNU Parallel 2018, Mar 2018, ISBN 9781387509881, + DOI https://doi.org/10.5281/zenodo.1146014 + EPU_USAGE_EOF exit 1 } @@ -49,7 +53,8 @@ cycles=-1 pushd $(dirname "${0}") > /dev/null basedir=$(pwd -P) popd > /dev/null -if [ -x ${basedir}/epu -a -x ${basedir}/parallel -a -x ${basedir}/getopt.seacas ]; then + +if [ -x ${basedir}/epu -a -x ${basedir}/getopt.seacas ]; then ACCESS_BIN=$basedir elif [ "$ACCESS" == "" ]; then ACCESS_BIN=@ACCESSDIR@/bin @@ -57,8 +62,18 @@ else ACCESS_BIN=${ACCESS}/bin fi +if command -v ${basedir}/parallel >/dev/null 2>&1; then + PARALLEL=$basedir/parallel +elif command -v parallel >/dev/null 2>&1; then + PARALLEL=parallel +elif command -v ${ACCESS_BIN}/parallel >/dev/null 2>&1; then + PARALLEL=${ACCESS_BIN}/parallel +else + echo "ERROR: Could not find an executable named \"parallel\" for use with the script" + exit 1 +fi + EPU=${ACCESS_BIN}/epu -PARALLEL=${ACCESS_BIN}/parallel SEQ="seq -w" if [ $# -eq 0 ] ; then usage diff --git a/packages/seacas/scripts/pconjoin.in b/packages/seacas/scripts/pconjoin.in index d3364b96a6..e77d470799 100644 --- a/packages/seacas/scripts/pconjoin.in +++ b/packages/seacas/scripts/pconjoin.in @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright(C) 1999-2021, 2023 National Technology & Engineering Solutions +# Copyright(C) 1999-2024 National Technology & Engineering Solutions # of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with # NTESS, the U.S. Government retains certain rights in this software. # @@ -79,7 +79,7 @@ cycles=-1 pushd $(dirname "${0}") > /dev/null basedir=$(pwd -P) popd > /dev/null -if [ -x ${basedir}/conjoin -a -x ${basedir}/parallel -a -x ${basedir}/getopt.seacas ]; then +if [ -x ${basedir}/conjoin -a -x ${basedir}/getopt.seacas ]; then ACCESS_BIN=$basedir elif [ "$ACCESS" == "" ]; then ACCESS_BIN=@ACCESSDIR@/bin @@ -87,8 +87,18 @@ else ACCESS_BIN=${ACCESS}/bin fi +if command -v ${basedir}/parallel >/dev/null 2>&1; then + PARALLEL=$basedir/parallel +elif command -v parallel >/dev/null 2>&1; then + PARALLEL=parallel +elif command -v ${ACCESS_BIN}/parallel >/dev/null 2>&1; then + PARALLEL=${ACCESS_BIN}/parallel +else + echo "ERROR: Could not find an executable named \"parallel\" for use with the script" + exit 1 +fi + CONJOIN=${ACCESS_BIN}/conjoin -PARALLEL=${ACCESS_BIN}/parallel SEQ="seq -w" if [ $# -eq 0 ] ; then usage From 46488d7c680d767ca6b917e0376849b33526742e Mon Sep 17 00:00:00 2001 From: StepSecurity Bot Date: Wed, 4 Sep 2024 13:29:03 -0700 Subject: [PATCH 06/21] [StepSecurity] Apply security best practices (#482) Signed-off-by: StepSecurity Bot Co-authored-by: Greg Sjaardema --- .github/dependabot.yml | 21 ++++++ .github/workflows/build_external_lib.yml | 10 ++- .github/workflows/build_netcdf_no_hdf5.yml | 21 ++++-- .github/workflows/build_test.yml | 23 +++++-- .github/workflows/build_variant.yml | 21 ++++-- .github/workflows/cla.yml | 7 +- .github/workflows/codeql.yml | 78 ++++++++++++++++++++++ .github/workflows/coverity-scan.yml | 21 ++++-- .github/workflows/dependency-review.yml | 27 ++++++++ .github/workflows/docker-exodus.yml | 19 +++++- .github/workflows/docker-seacas.yml | 19 +++++- .github/workflows/intel-build.yml | 22 ++++-- .github/workflows/msys2.yml | 12 +++- .github/workflows/python-linting.yml | 12 +++- .github/workflows/scorecards.yml | 76 +++++++++++++++++++++ .github/workflows/spack.yml | 7 +- .github/workflows/stale.yml | 13 +++- .github/workflows/trailing.yml | 9 ++- .pre-commit-config.yaml | 22 ++++++ docker/exodus/Dockerfile | 2 +- docker/seacas/Dockerfile | 2 +- 21 files changed, 403 insertions(+), 41 deletions(-) create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/codeql.yml create mode 100644 .github/workflows/dependency-review.yml create mode 100644 .github/workflows/scorecards.yml create mode 100644 .pre-commit-config.yaml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..b438f7a08e --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,21 @@ +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: daily + + - package-ecosystem: pip + directory: /cmake/tribits/doc/sphinx + schedule: + interval: daily + + - package-ecosystem: docker + directory: /docker/exodus + schedule: + interval: daily + + - package-ecosystem: docker + directory: /docker/seacas + schedule: + interval: daily diff --git a/.github/workflows/build_external_lib.yml b/.github/workflows/build_external_lib.yml index 2aaaec9985..240af821c4 100644 --- a/.github/workflows/build_external_lib.yml +++ b/.github/workflows/build_external_lib.yml @@ -13,6 +13,9 @@ concurrency: group: ${{ github.workflow}}-${{ github.head_ref }} cancel-in-progress: true +permissions: + contents: read + jobs: build-deps: @@ -24,7 +27,12 @@ jobs: compiler: [ gnu, clang ] steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install System dependencies shell: bash -l {0} diff --git a/.github/workflows/build_netcdf_no_hdf5.yml b/.github/workflows/build_netcdf_no_hdf5.yml index 6274cca35f..bfe8605f3a 100644 --- a/.github/workflows/build_netcdf_no_hdf5.yml +++ b/.github/workflows/build_netcdf_no_hdf5.yml @@ -14,6 +14,9 @@ concurrency: group: ${{ github.workflow}}-${{ github.head_ref }} cancel-in-progress: true +permissions: + contents: read + jobs: build-deps: @@ -26,7 +29,12 @@ jobs: netcdf: [ 4.9.2 ] steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install System dependencies shell: bash -l {0} @@ -37,7 +45,7 @@ jobs: ### - name: Cache TPL-${{ matrix.compiler }}-${{ matrix.netcdf }} id: cache-TPL-mpi - uses: actions/cache@v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ~/environments/${{ matrix.compiler }}-${{ matrix.netcdf }} key: TPL-v5-${{ runner.os }}-${{ matrix.compiler }}-${{ matrix.netcdf }} @@ -84,7 +92,12 @@ jobs: } steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install System dependencies shell: bash -l {0} @@ -92,7 +105,7 @@ jobs: - name: Fetch TPL Cache id: cache-TPL - uses: actions/cache@v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ~/environments/${{ matrix.config.compiler }}-${{ matrix.netcdf }} key: TPL-v5-${{ runner.os }}-${{ matrix.config.compiler }}-${{ matrix.netcdf }} diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index ffbae76027..64b4b7f19d 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -12,6 +12,9 @@ concurrency: group: ${{ github.workflow}}-${{ github.head_ref }} cancel-in-progress: true +permissions: + contents: read + jobs: build-deps: @@ -26,7 +29,12 @@ jobs: cgns: [ 4.4.0 ] steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install System dependencies shell: bash -l {0} @@ -37,7 +45,7 @@ jobs: ### - name: Cache TPL-${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} id: cache-TPL - uses: actions/cache@v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ~/environments/${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} key: TPL-v2-${{ runner.os }}-${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} @@ -67,7 +75,12 @@ jobs: cgns: [ 4.4.0 ] steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install System dependencies shell: bash -l {0} @@ -75,7 +88,7 @@ jobs: - name: Fetch TPL Cache id: cache-TPL - uses: actions/cache@v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ~/environments/${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} key: TPL-v2-${{ runner.os }}-${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} @@ -88,7 +101,7 @@ jobs: # Configure and build ### - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: '3.11' diff --git a/.github/workflows/build_variant.yml b/.github/workflows/build_variant.yml index 186183506a..9defd37df3 100644 --- a/.github/workflows/build_variant.yml +++ b/.github/workflows/build_variant.yml @@ -14,6 +14,9 @@ concurrency: group: ${{ github.workflow}}-${{ github.head_ref }} cancel-in-progress: true +permissions: + contents: read + jobs: build-deps: @@ -27,7 +30,12 @@ jobs: cgns: [ 4.4.0 ] steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install System dependencies shell: bash -l {0} @@ -38,7 +46,7 @@ jobs: ### - name: Cache TPL-${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} id: cache-TPL-mpi - uses: actions/cache@v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ~/environments/${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} key: TPL-v2-${{ runner.os }}-${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} @@ -372,7 +380,12 @@ jobs: } steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install System dependencies shell: bash -l {0} @@ -380,7 +393,7 @@ jobs: - name: Fetch TPL Cache id: cache-TPL - uses: actions/cache@v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ~/environments/${{ matrix.config.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} key: TPL-v2-${{ runner.os }}-${{ matrix.config.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 56aa25d5de..ded21892a0 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -14,9 +14,14 @@ jobs: CLAAssistant: runs-on: ubuntu-latest steps: + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + - name: "CLA Assistant" if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target' - uses: contributor-assistant/github-action@v2.3.2 + uses: contributor-assistant/github-action@dbc1c64d82d3aad5072007a41fff2828ae6d23ec # v2.3.2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # the below token should have repo scope and must be manually added by you in the repository's secret diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000000..dd2b3778fb --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,78 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: ["master"] + pull_request: + # The branches below must be a subset of the branches above + branches: ["master"] + schedule: + - cron: "0 0 * * 1" + +permissions: + contents: read + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: ["cpp", "python"] + # CodeQL supports [ $supported-codeql-languages ] + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - name: Checkout repository + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + + # ℹī¸ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/coverity-scan.yml b/.github/workflows/coverity-scan.yml index d0bd83ade4..8868462441 100644 --- a/.github/workflows/coverity-scan.yml +++ b/.github/workflows/coverity-scan.yml @@ -3,6 +3,9 @@ on: push: branches: - coverity +permissions: + contents: read + jobs: build-deps: @@ -17,7 +20,12 @@ jobs: cgns: [ 4.4.0 ] steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install System dependencies shell: bash -l {0} @@ -28,7 +36,7 @@ jobs: ### - name: Cache TPL-${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} id: cache-TPL - uses: actions/cache@v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ~/environments/${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} key: TPL-v2-${{ runner.os }}-${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} @@ -58,7 +66,12 @@ jobs: cgns: [ 4.4.0 ] steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install System dependencies shell: bash -l {0} @@ -66,7 +79,7 @@ jobs: - name: Fetch TPL Cache id: cache-TPL - uses: actions/cache@v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ~/environments/${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} key: TPL-v2-${{ runner.os }}-${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml new file mode 100644 index 0000000000..bf2dcfbae9 --- /dev/null +++ b/.github/workflows/dependency-review.yml @@ -0,0 +1,27 @@ +# Dependency Review Action +# +# This Action will scan dependency manifest files that change as part of a Pull Request, +# surfacing known-vulnerable versions of the packages declared or updated in the PR. +# Once installed, if the workflow run is marked as required, +# PRs introducing known-vulnerable packages will be blocked from merging. +# +# Source repository: https://github.com/actions/dependency-review-action +name: 'Dependency Review' +on: [pull_request] + +permissions: + contents: read + +jobs: + dependency-review: + runs-on: ubuntu-latest + steps: + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - name: 'Checkout Repository' + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - name: 'Dependency Review' + uses: actions/dependency-review-action@5a2ce3f5b92ee19cbb1541a4984c76d921601d7c # v4.3.4 diff --git a/.github/workflows/docker-exodus.yml b/.github/workflows/docker-exodus.yml index 9e407d82dd..42a31b7387 100644 --- a/.github/workflows/docker-exodus.yml +++ b/.github/workflows/docker-exodus.yml @@ -7,18 +7,26 @@ concurrency: group: ${{ github.workflow}}-${{ github.head_ref }} cancel-in-progress: true +permissions: + contents: read + jobs: build-latest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Docker login - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Docker build and push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0 with: context: docker/exodus push: true @@ -30,6 +38,11 @@ jobs: runs-on: ubuntu-latest container: mrbuche/exodus steps: + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + - name: Docker pull and test run: | python -c 'import exodus3 as exodus' diff --git a/.github/workflows/docker-seacas.yml b/.github/workflows/docker-seacas.yml index aaf85126a2..dff5a88d0c 100644 --- a/.github/workflows/docker-seacas.yml +++ b/.github/workflows/docker-seacas.yml @@ -7,18 +7,26 @@ concurrency: group: ${{ github.workflow}}-${{ github.head_ref }} cancel-in-progress: true +permissions: + contents: read + jobs: build-latest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Docker login - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Docker build and push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0 with: context: docker/seacas push: true @@ -30,6 +38,11 @@ jobs: runs-on: ubuntu-latest container: mrbuche/seacas steps: + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + - name: Docker pull and test run: | cd /seacas/build/ && ctest --output-on-failure && cd diff --git a/.github/workflows/intel-build.yml b/.github/workflows/intel-build.yml index 012a105446..4b4692b12c 100644 --- a/.github/workflows/intel-build.yml +++ b/.github/workflows/intel-build.yml @@ -30,15 +30,20 @@ jobs: shell: bash --noprofile --norc {0} steps: + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + - name: Maximize build space - uses: easimon/maximize-build-space@master + uses: easimon/maximize-build-space@fc881a613ad2a34aca9c9624518214ebc21dfc0c # master with: root-reserve-mb: 30000 remove-dotnet: 'true' remove-android: 'true' remove-haskell: 'true' remove-codeql: 'true' - - uses: actions/checkout@v4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: setup repo run: | wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB @@ -64,7 +69,7 @@ jobs: ### - name: Cache TPL-${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} id: cache-TPL - uses: actions/cache@v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ~/environments/${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} key: TPL-v3intel-${{ runner.os }}-${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} @@ -92,15 +97,20 @@ jobs: cgns: [ 4.4.0 ] steps: + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + - name: Maximize build space - uses: easimon/maximize-build-space@master + uses: easimon/maximize-build-space@fc881a613ad2a34aca9c9624518214ebc21dfc0c # master with: root-reserve-mb: 30000 remove-dotnet: 'true' remove-android: 'true' remove-haskell: 'true' remove-codeql: 'true' - - uses: actions/checkout@v4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: setup repo run: | wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB @@ -123,7 +133,7 @@ jobs: - name: Fetch TPL Cache id: cache-TPL - uses: actions/cache@v4 + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ~/environments/${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} key: TPL-v3intel-${{ runner.os }}-${{ matrix.compiler }}-${{ matrix.hdf5 }}-${{ matrix.netcdf }}-${{ matrix.cgns }} diff --git a/.github/workflows/msys2.yml b/.github/workflows/msys2.yml index 135124f1f9..327b05048b 100644 --- a/.github/workflows/msys2.yml +++ b/.github/workflows/msys2.yml @@ -11,6 +11,9 @@ concurrency: group: ${{ github.workflow}}-${{ github.head_ref }} cancel-in-progress: true +permissions: + contents: read + jobs: build: runs-on: windows-latest @@ -18,8 +21,13 @@ jobs: run: shell: msys2 {0} steps: - - uses: actions/checkout@v4 - - uses: msys2/setup-msys2@v2 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: msys2/setup-msys2@ddf331adaebd714795f1042345e6ca57bd66cea8 # v2.24.1 with: msystem: MINGW64 update: true diff --git a/.github/workflows/python-linting.yml b/.github/workflows/python-linting.yml index 7e450e2408..dd8794bd5c 100644 --- a/.github/workflows/python-linting.yml +++ b/.github/workflows/python-linting.yml @@ -7,13 +7,21 @@ on: branches: - master workflow_dispatch: +permissions: + contents: read + jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: '3.10' - name: Install dependencies diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml new file mode 100644 index 0000000000..719be831c6 --- /dev/null +++ b/.github/workflows/scorecards.yml @@ -0,0 +1,76 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '20 7 * * 2' + push: + branches: ["master"] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + contents: read + actions: read + + steps: + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - name: "Checkout code" + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecards on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + with: + sarif_file: results.sarif diff --git a/.github/workflows/spack.yml b/.github/workflows/spack.yml index 5044d7710e..b1e6c47085 100644 --- a/.github/workflows/spack.yml +++ b/.github/workflows/spack.yml @@ -15,8 +15,13 @@ jobs: build: runs-on: ubuntu-22.04 steps: + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + - name: Set up Spack - uses: spack/setup-spack@v2 + uses: spack/setup-spack@5ab3c91bdefffffad9a7e45d1d156146afebb3a7 # v2.1.1 with: ref: develop # Spack version (examples: develop, releases/v0.21) buildcache: true # Configure oci://ghcr.io/spack/github-actions-buildcache diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 34045177cb..d2aabd0841 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -4,13 +4,24 @@ on: schedule: - cron: "30 1 * * *" +permissions: + contents: read + jobs: stale: + permissions: + issues: write # for actions/stale to close stale issues + pull-requests: write # for actions/stale to close stale PRs runs-on: ubuntu-latest steps: - - uses: actions/stale@v1 + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/stale@0649bd81195b7ac109fbf9dde113af7e58a78b8e # v1.1.1 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: 'Stale issue message' diff --git a/.github/workflows/trailing.yml b/.github/workflows/trailing.yml index 5bea2d1731..3f384fa063 100644 --- a/.github/workflows/trailing.yml +++ b/.github/workflows/trailing.yml @@ -7,5 +7,10 @@ jobs: name: Find Trailing Whitespace runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: harupy/find-trailing-whitespace@master + - name: Harden Runner + uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + with: + egress-policy: audit + + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: harupy/find-trailing-whitespace@56310d70ae8fd21afec8d4307d2d9ab6c15e7c5d # master diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..a24af8c9ad --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,22 @@ +repos: +- repo: https://github.com/gitleaks/gitleaks + rev: v8.16.3 + hooks: + - id: gitleaks +- repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 3.0.0 + hooks: + - id: shellcheck +- repo: https://github.com/pocc/pre-commit-hooks + rev: v1.3.5 + hooks: + - id: cpplint +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace +- repo: https://github.com/pylint-dev/pylint + rev: v2.17.2 + hooks: + - id: pylint diff --git a/docker/exodus/Dockerfile b/docker/exodus/Dockerfile index cf691d5dc7..c6f58c6a13 100644 --- a/docker/exodus/Dockerfile +++ b/docker/exodus/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:22.04 +FROM ubuntu:22.04@sha256:adbb90115a21969d2fe6fa7f9af4253e16d45f8d4c1e930182610c4731962658 ARG DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install --no-install-recommends -y \ diff --git a/docker/seacas/Dockerfile b/docker/seacas/Dockerfile index cd14bedba6..dec6e104a4 100644 --- a/docker/seacas/Dockerfile +++ b/docker/seacas/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:22.04 +FROM ubuntu:22.04@sha256:adbb90115a21969d2fe6fa7f9af4253e16d45f8d4c1e930182610c4731962658 ARG DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install --no-install-recommends -y \ From 88b005a3c839df596de3704db5f8d30b5bbfd320 Mon Sep 17 00:00:00 2001 From: Greg Sjaardema Date: Wed, 4 Sep 2024 15:10:43 -0600 Subject: [PATCH 07/21] EXODUS: Clean up some error output/handling --- packages/seacas/libraries/exodus/include/exodusII.h | 4 ++-- packages/seacas/libraries/exodus/src/ex_err.c | 3 ++- packages/seacas/libraries/exodus/src/ex_utils.c | 7 ++++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/packages/seacas/libraries/exodus/include/exodusII.h b/packages/seacas/libraries/exodus/include/exodusII.h index 90784f0b34..d3511618e9 100644 --- a/packages/seacas/libraries/exodus/include/exodusII.h +++ b/packages/seacas/libraries/exodus/include/exodusII.h @@ -1955,9 +1955,9 @@ enum ex_error_return_code { EX_LASTERR = -1003, /**< in ex_err, use existing err_num value */ EX_NULLENTITY = -1006, /**< null entity found */ EX_NOENTITY = -1007, /**< no entities of that type on database */ + EX_NOTFOUND = -1008, /**< could not find requested variable on database */ EX_INTSIZEMISMATCH = - -1008, /**< integer sizes do not match on input/output databases in ex_copy */ - EX_NOTFOUND = -1008, /**< could not find requested variable on database */ + -1009, /**< integer sizes do not match on input/output databases in ex_copy */ EX_FATAL = -1, /**< fatal error flag def */ EX_NOERR = 0, /**< no error flag def */ diff --git a/packages/seacas/libraries/exodus/src/ex_err.c b/packages/seacas/libraries/exodus/src/ex_err.c index a2400ab978..99f2429b1f 100644 --- a/packages/seacas/libraries/exodus/src/ex_err.c +++ b/packages/seacas/libraries/exodus/src/ex_err.c @@ -1,5 +1,5 @@ /* - * Copyright(C) 1999-2020, 2023 National Technology & Engineering Solutions + * Copyright(C) 1999-2020, 2023, 2024 National Technology & Engineering Solutions * of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with * NTESS, the U.S. Government retains certain rights in this software. * @@ -345,6 +345,7 @@ const char *ex_strerror(int err_num) case EX_INTERNAL: return "Internal logic error in exodus library."; case EX_NOTROOTID: return "File id is not the root id; it is a subgroup id."; case EX_NULLENTITY: return "Null entity found."; + case EX_NOTFOUND: return "Could not find requested variable on database."; case EX_INTSIZEMISMATCH: return "Integer sizes must match for input and output file in ex_copy."; case EX_MSG: return "Message printed; no error implied."; default: return nc_strerror(err_num); diff --git a/packages/seacas/libraries/exodus/src/ex_utils.c b/packages/seacas/libraries/exodus/src/ex_utils.c index a95f844529..7513e665c5 100644 --- a/packages/seacas/libraries/exodus/src/ex_utils.c +++ b/packages/seacas/libraries/exodus/src/ex_utils.c @@ -193,7 +193,7 @@ int exi_check_file_type(const char *path, int *type) FILE *fp; if (!(fp = fopen(path, "r"))) { char errmsg[MAX_ERR_LENGTH]; - snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: Could not open file '%s', error = %s.", path, + snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: Could not open file '%s',\n\t\terror = %s.", path, strerror(errno)); ex_err(__func__, errmsg, EX_WRONGFILETYPE); EX_FUNC_LEAVE(EX_FATAL); @@ -203,8 +203,9 @@ int exi_check_file_type(const char *path, int *type) fclose(fp); if (i != MAGIC_NUMBER_LEN) { char errmsg[MAX_ERR_LENGTH]; - snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: Could not read magic data from file '%s', err = %s.", - path, strerror(errno)); + snprintf(errmsg, MAX_ERR_LENGTH, + "ERROR: Could not read magic data from file '%s',\n\t\terror = %s.", path, + strerror(errno)); ex_err(__func__, errmsg, EX_WRONGFILETYPE); EX_FUNC_LEAVE(EX_FATAL); } From fb897c32803b71b2db9ffd5cfff0082897fe0c8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:29:32 -0600 Subject: [PATCH 08/21] Bump ubuntu from 22.04 to 24.04 in /docker/exodus (#483) Bumps ubuntu from 22.04 to 24.04. --- updated-dependencies: - dependency-name: ubuntu dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docker/exodus/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/exodus/Dockerfile b/docker/exodus/Dockerfile index c6f58c6a13..8acdf3b474 100644 --- a/docker/exodus/Dockerfile +++ b/docker/exodus/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:22.04@sha256:adbb90115a21969d2fe6fa7f9af4253e16d45f8d4c1e930182610c4731962658 +FROM ubuntu:24.04@sha256:8a37d68f4f73ebf3d4efafbcf66379bf3728902a8038616808f04e34a9ab63ee ARG DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install --no-install-recommends -y \ From 35c4663fbb8a89955d762e58a808895228e84680 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:29:47 -0600 Subject: [PATCH 09/21] Bump sphinx from 4.0.2 to 8.0.2 in /cmake/tribits/doc/sphinx (#484) Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 4.0.2 to 8.0.2. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/v8.0.2/CHANGES.rst) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v4.0.2...v8.0.2) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cmake/tribits/doc/sphinx/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/tribits/doc/sphinx/requirements.txt b/cmake/tribits/doc/sphinx/requirements.txt index 8c7b1fdb5d..bf765bad1d 100644 --- a/cmake/tribits/doc/sphinx/requirements.txt +++ b/cmake/tribits/doc/sphinx/requirements.txt @@ -1,2 +1,2 @@ -Sphinx==4.0.2 +Sphinx==8.0.2 sphinx-rtd-theme==0.5.2 From 4163bb136b23d3eb7f1854e634fb7fea42869d31 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:31:26 -0600 Subject: [PATCH 10/21] Bump contributor-assistant/github-action from 2.3.2 to 2.5.1 (#487) Bumps [contributor-assistant/github-action](https://github.com/contributor-assistant/github-action) from 2.3.2 to 2.5.1. - [Release notes](https://github.com/contributor-assistant/github-action/releases) - [Commits](https://github.com/contributor-assistant/github-action/compare/dbc1c64d82d3aad5072007a41fff2828ae6d23ec...f41946747f85d28e9a738f4f38dbcc74b69c7e0e) --- updated-dependencies: - dependency-name: contributor-assistant/github-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cla.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index ded21892a0..618a135cbf 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -21,7 +21,7 @@ jobs: - name: "CLA Assistant" if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target' - uses: contributor-assistant/github-action@dbc1c64d82d3aad5072007a41fff2828ae6d23ec # v2.3.2 + uses: contributor-assistant/github-action@f41946747f85d28e9a738f4f38dbcc74b69c7e0e # v2.5.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # the below token should have repo scope and must be manually added by you in the repository's secret From 069e0d6dd52bbb66afea2288180fb0ebd1788e64 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:31:45 -0600 Subject: [PATCH 11/21] Bump ossf/scorecard-action from 2.3.3 to 2.4.0 (#488) Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.3.3 to 2.4.0. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/dc50aa9510b46c811795eb24b2f1ba02a914e534...62b2cac7ed8198b15735ed49ab1e5cf35480ba46) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 719be831c6..d88aba7b67 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -41,7 +41,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3 + uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 with: results_file: results.sarif results_format: sarif From 02badd3d3853a1e95237e08e614da2c330cf7532 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:32:50 -0600 Subject: [PATCH 12/21] Bump docker/build-push-action from 5.4.0 to 6.7.0 (#491) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5.4.0 to 6.7.0. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/ca052bb54ab0790a636c9b5f226502c73d547a25...5cd11c3a4ced054e52742c5fd54dca954e0edd85) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker-exodus.yml | 2 +- .github/workflows/docker-seacas.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-exodus.yml b/.github/workflows/docker-exodus.yml index 42a31b7387..b7c51fa66e 100644 --- a/.github/workflows/docker-exodus.yml +++ b/.github/workflows/docker-exodus.yml @@ -26,7 +26,7 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Docker build and push - uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0 + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: docker/exodus push: true diff --git a/.github/workflows/docker-seacas.yml b/.github/workflows/docker-seacas.yml index dff5a88d0c..3f144095de 100644 --- a/.github/workflows/docker-seacas.yml +++ b/.github/workflows/docker-seacas.yml @@ -26,7 +26,7 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Docker build and push - uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0 + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: docker/seacas push: true From 86b530d35d8b3ce1bd1fa03c754f6323e9f97152 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:33:08 -0600 Subject: [PATCH 13/21] Bump actions/upload-artifact from 4.3.6 to 4.4.0 (#490) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.6 to 4.4.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/834a144ee995460fba8ed112a2fc961b36a5ec5a...50769540e7f4bd5e21e526ee35c689e35e0d6874) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index d88aba7b67..f06e457055 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -63,7 +63,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 with: name: SARIF file path: results.sarif From 9e1997620f8c58a7803e43c6f51852d7a9042e35 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:33:26 -0600 Subject: [PATCH 14/21] Bump actions/stale from 1.1.1 to 9.0.0 (#489) Bumps [actions/stale](https://github.com/actions/stale) from 1.1.1 to 9.0.0. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/0649bd81195b7ac109fbf9dde113af7e58a78b8e...28ca1036281a5e5922ead5184a1bbf96e5fc984e) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index d2aabd0841..c1ef3b866f 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -21,7 +21,7 @@ jobs: with: egress-policy: audit - - uses: actions/stale@0649bd81195b7ac109fbf9dde113af7e58a78b8e # v1.1.1 + - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: 'Stale issue message' From 908fcc961f9ad066e0bf969421141d2d32dd06d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:33:44 -0600 Subject: [PATCH 15/21] Bump ubuntu from 22.04 to 24.04 in /docker/seacas (#486) Bumps ubuntu from 22.04 to 24.04. --- updated-dependencies: - dependency-name: ubuntu dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docker/seacas/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/seacas/Dockerfile b/docker/seacas/Dockerfile index dec6e104a4..0c3b1b40a0 100644 --- a/docker/seacas/Dockerfile +++ b/docker/seacas/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:22.04@sha256:adbb90115a21969d2fe6fa7f9af4253e16d45f8d4c1e930182610c4731962658 +FROM ubuntu:24.04@sha256:8a37d68f4f73ebf3d4efafbcf66379bf3728902a8038616808f04e34a9ab63ee ARG DEBIAN_FRONTEND=noninteractive RUN apt-get update && \ apt-get install --no-install-recommends -y \ From 1edbdafb3388d42161c17321cdf03b4f51e23a4f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:44:11 -0600 Subject: [PATCH 16/21] Bump sphinx-rtd-theme from 0.5.2 to 2.0.0 in /cmake/tribits/doc/sphinx (#485) * Bump sphinx-rtd-theme from 0.5.2 to 2.0.0 in /cmake/tribits/doc/sphinx Bumps [sphinx-rtd-theme](https://github.com/readthedocs/sphinx_rtd_theme) from 0.5.2 to 2.0.0. - [Changelog](https://github.com/readthedocs/sphinx_rtd_theme/blob/master/docs/changelog.rst) - [Commits](https://github.com/readthedocs/sphinx_rtd_theme/compare/0.5.2...2.0.0) --- updated-dependencies: - dependency-name: sphinx-rtd-theme dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update requirements.txt Signed-off-by: Greg Sjaardema --------- Signed-off-by: dependabot[bot] Signed-off-by: Greg Sjaardema Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Greg Sjaardema --- cmake/tribits/doc/sphinx/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/tribits/doc/sphinx/requirements.txt b/cmake/tribits/doc/sphinx/requirements.txt index bf765bad1d..c0424a1414 100644 --- a/cmake/tribits/doc/sphinx/requirements.txt +++ b/cmake/tribits/doc/sphinx/requirements.txt @@ -1,2 +1,2 @@ Sphinx==8.0.2 -sphinx-rtd-theme==0.5.2 +sphinx-rtd-theme==2.0.0 From 3a08cff15b49ef30a93fccb22e71705837a03e67 Mon Sep 17 00:00:00 2001 From: Greg Sjaardema Date: Wed, 4 Sep 2024 16:50:20 -0600 Subject: [PATCH 17/21] CI: Update docker python version --- docker/exodus/Dockerfile | 2 +- docker/seacas/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/exodus/Dockerfile b/docker/exodus/Dockerfile index 8acdf3b474..8cb4c9fbfa 100644 --- a/docker/exodus/Dockerfile +++ b/docker/exodus/Dockerfile @@ -29,7 +29,7 @@ RUN apt-get update && \ zlib1g-dev && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* -RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.10 1 +RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.12 1 RUN git clone --depth 1 https://github.com/sandialabs/seacas.git WORKDIR /seacas RUN ./install-tpl.sh diff --git a/docker/seacas/Dockerfile b/docker/seacas/Dockerfile index 0c3b1b40a0..3f57b235c5 100644 --- a/docker/seacas/Dockerfile +++ b/docker/seacas/Dockerfile @@ -29,7 +29,7 @@ RUN apt-get update && \ zlib1g-dev && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* -RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.10 1 +RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.12 1 RUN git clone --depth 1 https://github.com/sandialabs/seacas.git WORKDIR /seacas RUN ./install-tpl.sh From 8daebe3c84a430ea7a07e6c41c906896db16a742 Mon Sep 17 00:00:00 2001 From: Greg Sjaardema Date: Thu, 5 Sep 2024 09:42:45 -0600 Subject: [PATCH 18/21] EXODUS: Analyzer fixes --- .../seacas/libraries/chaco/util/smalloc.c | 4 +- .../libraries/exodus_for/src/exo_jack.c | 67 +++++++++++++++++-- 2 files changed, 63 insertions(+), 8 deletions(-) diff --git a/packages/seacas/libraries/chaco/util/smalloc.c b/packages/seacas/libraries/chaco/util/smalloc.c index 9a4cca9274..e9fd2d1727 100644 --- a/packages/seacas/libraries/chaco/util/smalloc.c +++ b/packages/seacas/libraries/chaco/util/smalloc.c @@ -1,5 +1,5 @@ /* - * Copyright(C) 1999-2020 National Technology & Engineering Solutions + * Copyright(C) 1999-2020, 2024 National Technology & Engineering Solutions * of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with * NTESS, the U.S. Government retains certain rights in this software. * @@ -19,7 +19,7 @@ static struct smalloc_debug_data { int order; /* which smalloc call is it? */ size_t size; /* size of malloc invocation */ - double *ptr; /* memory location returned */ + void *ptr; /* memory location returned */ struct smalloc_debug_data *next; /* pointer to next element */ } *top = NULL; diff --git a/packages/seacas/libraries/exodus_for/src/exo_jack.c b/packages/seacas/libraries/exodus_for/src/exo_jack.c index c0fea886a7..b08bdb143d 100644 --- a/packages/seacas/libraries/exodus_for/src/exo_jack.c +++ b/packages/seacas/libraries/exodus_for/src/exo_jack.c @@ -79,6 +79,9 @@ static int *i8i4(int64_t size, const int64_t *i8) { int *i4 = malloc(size * sizeof(int)); + if (i4 == NULL) { + return NULL; + } for (int64_t i = 0; i < size; i++) { i4[i] = i8[i]; } @@ -245,6 +248,10 @@ void F2C(expini, EXPINI)(int *idexo, char *title, void_int *num_dim, void_int *n slen = titlelen; } char *name = malloc((slen + 1) * sizeof(char)); + if (name == NULL) { + *ierr = EX_MEMFAIL; + return; + } (void)ex_fstrncpy(name, title, slen); if (ex_int64_status(*idexo) & EX_BULK_INT64_API) { @@ -866,6 +873,10 @@ void F2C(expecpp, EXPECPP)(int *idexo, int *obj_type, entity_id *elem_blk_id, in return; } int *counts4 = i8i4(block.num_entry, counts); + if (counts4 == NULL) { + *ierr = EX_MEMFAIL; + return; + } *ierr = ex_put_entity_count_per_polyhedra(*idexo, (ex_entity_type)*obj_type, *elem_blk_id, counts4); free(counts4); @@ -2040,7 +2051,11 @@ void F2C(exgvan, EXGVAN)(int *idexo, char *var_type, int *num_vars, char *var_na void F2C(expvtt, EXPVTT)(int *idexo, int *num_entity, int *num_var, int64_t *var_tab, int *ierr) { int *var_tab4 = i8i4((int64_t)(*num_entity) * (int64_t)(*num_var), var_tab); - *ierr = ex_put_truth_table(*idexo, EX_ELEM_BLOCK, *num_entity, *num_var, var_tab4); + if (var_tab4 == NULL) { + *ierr = EX_MEMFAIL; + return; + } + *ierr = ex_put_truth_table(*idexo, EX_ELEM_BLOCK, *num_entity, *num_var, var_tab4); free(var_tab4); } #else @@ -2058,7 +2073,11 @@ void F2C(expvtt, EXPVTT)(int *idexo, int *num_entity, int *num_var, int *var_tab void F2C(expnstt, EXPNSTT)(int *idexo, int *num_entity, int *num_var, int64_t *var_tab, int *ierr) { int *var_tab4 = i8i4((int64_t)(*num_entity) * (int64_t)(*num_var), var_tab); - *ierr = ex_put_truth_table(*idexo, EX_NODE_SET, *num_entity, *num_var, var_tab4); + if (var_tab4 == NULL) { + *ierr = EX_MEMFAIL; + return; + } + *ierr = ex_put_truth_table(*idexo, EX_NODE_SET, *num_entity, *num_var, var_tab4); free(var_tab4); } #else @@ -2076,7 +2095,11 @@ void F2C(expnstt, EXPNSTT)(int *idexo, int *num_entity, int *num_var, int *var_t void F2C(expsstt, EXPSSTT)(int *idexo, int *num_entity, int *num_var, int64_t *var_tab, int *ierr) { int *var_tab4 = i8i4((int64_t)(*num_entity) * (int64_t)(*num_var), var_tab); - *ierr = ex_put_truth_table(*idexo, EX_SIDE_SET, *num_entity, *num_var, var_tab4); + if (var_tab4 == NULL) { + *ierr = EX_MEMFAIL; + return; + } + *ierr = ex_put_truth_table(*idexo, EX_SIDE_SET, *num_entity, *num_var, var_tab4); free(var_tab4); } #else @@ -2094,7 +2117,11 @@ void F2C(expsstt, EXPSSTT)(int *idexo, int *num_entity, int *num_var, int *var_t void F2C(exgvtt, EXGVTT)(int *idexo, int *num_entity, int *num_var, int64_t *var_tab, int *ierr) { int *var_tab4 = malloc(*num_entity * *num_var * sizeof(int)); - *ierr = ex_get_truth_table(*idexo, EX_ELEM_BLOCK, *num_entity, *num_var, var_tab4); + if (var_tab4 == NULL) { + *ierr = EX_MEMFAIL; + return; + } + *ierr = ex_get_truth_table(*idexo, EX_ELEM_BLOCK, *num_entity, *num_var, var_tab4); i4i8((int64_t)(*num_entity) * (int64_t)(*num_var), var_tab4, var_tab); free(var_tab4); #else @@ -2112,7 +2139,11 @@ void F2C(exgvtt, EXGVTT)(int *idexo, int *num_entity, int *num_var, int *var_tab void F2C(exgnstt, EXGNSTT)(int *idexo, int *num_entity, int *num_var, int64_t *var_tab, int *ierr) { int *var_tab4 = malloc(*num_entity * *num_var * sizeof(int)); - *ierr = ex_get_truth_table(*idexo, EX_NODE_SET, *num_entity, *num_var, var_tab4); + if (var_tab4 == NULL) { + *ierr = EX_MEMFAIL; + return; + } + *ierr = ex_get_truth_table(*idexo, EX_NODE_SET, *num_entity, *num_var, var_tab4); i4i8((int64_t)(*num_entity) * (int64_t)(*num_var), var_tab4, var_tab); free(var_tab4); } @@ -2131,7 +2162,11 @@ void F2C(exgnstt, EXGNSTT)(int *idexo, int *num_entity, int *num_var, int *var_t void F2C(exgsstt, EXGSSTT)(int *idexo, int *num_entity, int *num_var, int64_t *var_tab, int *ierr) { int *var_tab4 = malloc(*num_entity * *num_var * sizeof(int)); - *ierr = ex_get_truth_table(*idexo, EX_SIDE_SET, *num_entity, *num_var, var_tab4); + if (var_tab4 == NULL) { + *ierr = EX_MEMFAIL; + return; + } + *ierr = ex_get_truth_table(*idexo, EX_SIDE_SET, *num_entity, *num_var, var_tab4); i4i8((int64_t)(*num_entity) * (int64_t)(*num_var), var_tab4, var_tab); free(var_tab4); } @@ -2443,6 +2478,10 @@ void F2C(exgssc, EXGSSC)(int *idexo, entity_id *side_set_id, int64_t *side_set_n int64_t num_df_in_set = 0; ex_get_set_param(*idexo, EX_SIDE_SET, *side_set_id, &num_sides_in_set, &num_df_in_set); int *cnt_list = malloc(num_sides_in_set * sizeof(int)); + if (cnt_list == NULL) { + *ierr = EX_MEMFAIL; + return; + } *ierr = ex_get_side_set_node_count(*idexo, *side_set_id, cnt_list); @@ -2465,6 +2504,10 @@ void F2C(exgcssc, EXGCSSC)(int *idexo, int64_t *side_set_node_cnt_list, int *ier { int count = ex_inquire_int(*idexo, EX_INQ_SS_ELEM_LEN); int *cnt_list = malloc(count * sizeof(int)); + if (cnt_list == NULL) { + *ierr = EX_MEMFAIL; + return; + } *ierr = ex_get_concat_side_set_node_count(*idexo, cnt_list); @@ -2877,6 +2920,10 @@ void F2C(exgii, EXGII)(int *idne, int *nproc, int *nproc_in_f, char *ftype, int } char *file_type = (char *)malloc((slen + 1) * sizeof(char)); + if (file_type == NULL) { + *ierr = EX_MEMFAIL; + return; + } if ((*ierr = ex_get_init_info(*idne, nproc, nproc_in_f, file_type)) != 0) { char errmsg[MAX_ERR_LENGTH]; @@ -2914,6 +2961,10 @@ void F2C(expii, EXPII)(int *idne, int *nproc, int *nproc_in_f, char *ftype, int } char *file_type = (char *)malloc((slen + 1) * sizeof(char)); + if (file_type == NULL) { + *ierr = EX_MEMFAIL; + return; + } ex_fstrncpy(file_type, ftype, slen); @@ -3528,6 +3579,10 @@ void F2C(exgelt, EXGELT)(int *idne, entity_id *elem_blk_id, char *elem_type, int } char *etype = (char *)malloc((slen + 1) * sizeof(char)); + if (etype == NULL) { + *ierr = EX_MEMFAIL; + return; + } if ((*ierr = ex_get_elem_type(*idne, *elem_blk_id, etype)) != 0) { char errmsg[MAX_ERR_LENGTH]; From 1afd9c38a7b9c775289a0b701d3a0658516878d5 Mon Sep 17 00:00:00 2001 From: Greg Sjaardema Date: Thu, 5 Sep 2024 09:43:28 -0600 Subject: [PATCH 19/21] IOSS: Analyzer fixes --- packages/seacas/libraries/ioss/src/Ioss_VariableType.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/seacas/libraries/ioss/src/Ioss_VariableType.h b/packages/seacas/libraries/ioss/src/Ioss_VariableType.h index 4c19e0aff9..40d0ab5495 100644 --- a/packages/seacas/libraries/ioss/src/Ioss_VariableType.h +++ b/packages/seacas/libraries/ioss/src/Ioss_VariableType.h @@ -143,8 +143,8 @@ namespace Ioss { VariableType(const std::string &type, int comp_count, bool delete_me = false); private: - const std::string name_; - int componentCount; + const std::string name_{}; + int componentCount{}; static bool build_variable_type(const std::string &raw_type); }; From d464eb67b45777950b0364d67177b6c815519ffb Mon Sep 17 00:00:00 2001 From: Greg Sjaardema Date: Thu, 5 Sep 2024 15:49:05 -0600 Subject: [PATCH 20/21] EXODUS: Eliminate compiler warning --- packages/seacas/libraries/exodus/test/testwt-field-metadata.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/seacas/libraries/exodus/test/testwt-field-metadata.c b/packages/seacas/libraries/exodus/test/testwt-field-metadata.c index 642aa5ba5f..fe09569645 100644 --- a/packages/seacas/libraries/exodus/test/testwt-field-metadata.c +++ b/packages/seacas/libraries/exodus/test/testwt-field-metadata.c @@ -131,7 +131,7 @@ int main(int argc, char **argv) int cardinality = field.cardinality[0] != 0 ? field.cardinality[0] : ex_field_cardinality(field.type[0]); for (int i = 0; i < cardinality; i++) { - const char *name = ex_component_field_name(&field, (int[]){i + 1}); + const char *name = ex_component_field_name(&field, (int[]){i + 1, 0}); assert(strcmp(var_names[vname++], name) == 0); } } @@ -152,7 +152,7 @@ int main(int argc, char **argv) int cardinality = field.cardinality[0] != 0 ? field.cardinality[0] : ex_field_cardinality(field.type[0]); for (int i = 0; i < cardinality; i++) { - const char *name = ex_component_field_name(&field, (int[]){i + 1}); + const char *name = ex_component_field_name(&field, (int[]){i + 1, 0}); assert(strcmp(var_names[vname++], name) == 0); } } From 8aa86d2b55b54931d9137ab64a82cf364a438fbe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 17:08:45 -0600 Subject: [PATCH 21/21] Bump step-security/harden-runner from 2.9.1 to 2.10.0 (#492) Bumps [step-security/harden-runner](https://github.com/step-security/harden-runner) from 2.9.1 to 2.10.0. - [Release notes](https://github.com/step-security/harden-runner/releases) - [Commits](https://github.com/step-security/harden-runner/compare/5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde...446798f8213ac2e75931c1b0769676d927801858) --- updated-dependencies: - dependency-name: step-security/harden-runner dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build_external_lib.yml | 2 +- .github/workflows/build_netcdf_no_hdf5.yml | 4 ++-- .github/workflows/build_test.yml | 4 ++-- .github/workflows/build_variant.yml | 4 ++-- .github/workflows/cla.yml | 2 +- .github/workflows/codeql.yml | 2 +- .github/workflows/coverity-scan.yml | 4 ++-- .github/workflows/dependency-review.yml | 2 +- .github/workflows/docker-exodus.yml | 4 ++-- .github/workflows/docker-seacas.yml | 4 ++-- .github/workflows/intel-build.yml | 4 ++-- .github/workflows/msys2.yml | 2 +- .github/workflows/python-linting.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/spack.yml | 2 +- .github/workflows/stale.yml | 2 +- .github/workflows/trailing.yml | 2 +- 17 files changed, 24 insertions(+), 24 deletions(-) diff --git a/.github/workflows/build_external_lib.yml b/.github/workflows/build_external_lib.yml index 240af821c4..e05dacc24c 100644 --- a/.github/workflows/build_external_lib.yml +++ b/.github/workflows/build_external_lib.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/build_netcdf_no_hdf5.yml b/.github/workflows/build_netcdf_no_hdf5.yml index bfe8605f3a..5b3e552c1f 100644 --- a/.github/workflows/build_netcdf_no_hdf5.yml +++ b/.github/workflows/build_netcdf_no_hdf5.yml @@ -30,7 +30,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit @@ -93,7 +93,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 64b4b7f19d..406209b743 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -30,7 +30,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit @@ -76,7 +76,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/build_variant.yml b/.github/workflows/build_variant.yml index 9defd37df3..6e40a546bc 100644 --- a/.github/workflows/build_variant.yml +++ b/.github/workflows/build_variant.yml @@ -31,7 +31,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit @@ -381,7 +381,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 618a135cbf..d8211bfdcb 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index dd2b3778fb..81aaa641bf 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/coverity-scan.yml b/.github/workflows/coverity-scan.yml index 8868462441..85eec0357a 100644 --- a/.github/workflows/coverity-scan.yml +++ b/.github/workflows/coverity-scan.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit @@ -67,7 +67,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index bf2dcfbae9..3f8d3f09d8 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/docker-exodus.yml b/.github/workflows/docker-exodus.yml index b7c51fa66e..4341a7e44f 100644 --- a/.github/workflows/docker-exodus.yml +++ b/.github/workflows/docker-exodus.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit @@ -39,7 +39,7 @@ jobs: container: mrbuche/exodus steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/docker-seacas.yml b/.github/workflows/docker-seacas.yml index 3f144095de..68c21e2166 100644 --- a/.github/workflows/docker-seacas.yml +++ b/.github/workflows/docker-seacas.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit @@ -39,7 +39,7 @@ jobs: container: mrbuche/seacas steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/intel-build.yml b/.github/workflows/intel-build.yml index 4b4692b12c..a85e01990b 100644 --- a/.github/workflows/intel-build.yml +++ b/.github/workflows/intel-build.yml @@ -31,7 +31,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit @@ -98,7 +98,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/msys2.yml b/.github/workflows/msys2.yml index 327b05048b..fc1103eb07 100644 --- a/.github/workflows/msys2.yml +++ b/.github/workflows/msys2.yml @@ -22,7 +22,7 @@ jobs: shell: msys2 {0} steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/python-linting.yml b/.github/workflows/python-linting.yml index dd8794bd5c..eea129a789 100644 --- a/.github/workflows/python-linting.yml +++ b/.github/workflows/python-linting.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f06e457055..f176119399 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -31,7 +31,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/spack.yml b/.github/workflows/spack.yml index b1e6c47085..0a1a4d7b4e 100644 --- a/.github/workflows/spack.yml +++ b/.github/workflows/spack.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index c1ef3b866f..1a38a36668 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -17,7 +17,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit diff --git a/.github/workflows/trailing.yml b/.github/workflows/trailing.yml index 3f384fa063..94c161778d 100644 --- a/.github/workflows/trailing.yml +++ b/.github/workflows/trailing.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1 + uses: step-security/harden-runner@446798f8213ac2e75931c1b0769676d927801858 # v2.10.0 with: egress-policy: audit