From ecb13af93cb54d860e95cd799513c967ea61161f Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Mon, 2 Mar 2020 16:44:42 -0500 Subject: [PATCH 01/19] Create a build skeleton for the Charm++ backend --- CMakeLists.txt | 53 +++++++++++++++++++++++++++++++++----- cmake/unit.cmake | 15 +++++++++++ config/flecsi-config.h.in | 1 + flecsi/data/CMakeLists.txt | 12 +++++++++ flecsi/data/backend.hh | 4 +++ flecsi/exec/CMakeLists.txt | 13 ++++++++++ flecsi/exec/backend.hh | 4 +++ flecsi/io/CMakeLists.txt | 14 ++++++++++ flecsi/io/backend.hh | 4 +++ flecsi/run/CMakeLists.txt | 14 ++++++++++ flecsi/run/backend.hh | 4 +++ flecsi/run/types.hh | 14 ++++++++++ 12 files changed, 146 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 673429e6f..ffe512d54 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -104,8 +104,7 @@ if(NOT FORMAT_ONLY) #----------------------------------------------------------------------------# # Add options for runtime selection #----------------------------------------------------------------------------# - - set(FLECSI_RUNTIME_MODELS legion mpi hpx) + set(FLECSI_RUNTIME_MODELS legion mpi hpx charm) if(NOT FLECSI_RUNTIME_MODEL) list(GET FLECSI_RUNTIME_MODELS 0 FLECSI_RUNTIME_MODEL) @@ -129,9 +128,13 @@ if(NOT FORMAT_ONLY) elseif(FLECSI_RUNTIME_MODEL STREQUAL "hpx") set(ENABLE_MPI ON CACHE BOOL "Enable MPI" FORCE) set(ENABLE_HPX ON CACHE BOOL "Enable HPX" FORCE) + elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") + set(ENABLE_MPI ON CACHE BOOL "Enable MPI" FORCE) + set(ENABLE_LEGION ON CACHE BOOL "Enable Legion" FORCE) + set(ENABLE_CHARM ON CACHE BOOL "Enable Charm" FORCE) endif() - mark_as_advanced(ENABLE_MPI ENABLE_LEGION ENABLE_HPX) + mark_as_advanced(ENABLE_MPI ENABLE_LEGION ENABLE_HPX ENABLE_CHARM) #----------------------------------------------------------------------------# # Legion @@ -157,6 +160,14 @@ if(NOT FORMAT_ONLY) include(hpx) endif() + #----------------------------------------------------------------------------# + # Charm + #----------------------------------------------------------------------------# + + if(ENABLE_CHARM) + include(charm) + endif() + #----------------------------------------------------------------------------# # OpenMP. #----------------------------------------------------------------------------# @@ -255,10 +266,14 @@ if(NOT FORMAT_ONLY) message (FATAL_ERROR "MPI is required for the mpi runtime model") endif() - set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/execution/mpi) + set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/exec/mpi) set(FLECSI_RUNTIME_LIBRARIES ${DL_LIBS} ${MPI_LIBRARIES}) + #----------------------------------------------------------------------------# + # HPX interface + #----------------------------------------------------------------------------# + elseif(FLECSI_RUNTIME_MODEL STREQUAL "hpx") if(NOT HPX_FOUND) @@ -269,9 +284,35 @@ if(NOT FORMAT_ONLY) message (FATAL_ERROR "MPI is required for the hpx runtime model") endif() - set(FLECSI_RUNTIME_LIBRARIES ${DL_LIBS} ${MPI_LIBRARIES}) + set(FLECSI_RUNTIME_LIBRARIES ${DL_LIBS} ${MPI_LIBRARIES}) + + set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/exec/hpx) + + #----------------------------------------------------------------------------# + # Charm interface + #----------------------------------------------------------------------------# + elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") + + if(NOT MPI_${MPI_LANGUAGE}_FOUND) + message (FATAL_ERROR "MPI is required for the charm runtime model") + endif() + + if(NOT Legion_FOUND) + message (FATAL_ERROR "Legion is required for the charm runtime model") + endif() + + set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/exec/charm) - set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/execution/hpx) + set(FLECSI_RUNTIME_LIBRARIES ${DL_LIBS} ${Legion_LIBRARIES} + ${MPI_LIBRARIES}) + + list(APPEND FLECSI_INCLUDE_DEPENDENCIES ${Legion_INCLUDE_DIRS}) + + # + # Compacted storage interface + # + option(ENABLE_MAPPER_COMPACTION "Enable Legion Mapper compaction" ON) + mark_as_advanced(ENABLE_MAPPER_COMPACTION) #----------------------------------------------------------------------------# # Default diff --git a/cmake/unit.cmake b/cmake/unit.cmake index 3cab2b6e2..9e79d61eb 100644 --- a/cmake/unit.cmake +++ b/cmake/unit.cmake @@ -123,6 +123,21 @@ function(add_unit name) set(unit_policy_exec_preflags ${MPIEXEC_PREFLAGS}) set(unit_policy_exec_postflags ${MPIEXEC_POSTFLAGS}) + elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm" + AND MPI_${MPI_LANGUAGE}_FOUND + AND Legion_FOUND) + + set(unit_policy_flags ${Legion_CXX_FLAGS} + ${MPI_${MPI_LANGUAGE}_COMPILE_FLAGS}) + set(unit_policy_includes ${Legion_INCLUDE_DIRS} + ${MPI_${MPI_LANGUAGE}_INCLUDE_PATH}) + set(unit_policy_libraries ${Legion_LIBRARIES} ${Legion_LIB_FLAGS} + ${MPI_${MPI_LANGUAGE}_LIBRARIES}) + set(unit_policy_exec ${MPIEXEC}) + set(unit_policy_exec_threads ${MPIEXEC_NUMPROC_FLAG}) + set(unit_policy_exec_preflags ${MPIEXEC_PREFLAGS}) + set(unit_policy_exec_postflags ${MPIEXEC_POSTFLAGS}) + else() message(WARNING "invalid runtime") diff --git a/config/flecsi-config.h.in b/config/flecsi-config.h.in index 63800c5eb..28d47c5b1 100644 --- a/config/flecsi-config.h.in +++ b/config/flecsi-config.h.in @@ -11,6 +11,7 @@ #define FLECSI_RUNTIME_MODEL_legion 1 #define FLECSI_RUNTIME_MODEL_mpi 2 #define FLECSI_RUNTIME_MODEL_hpx 3 +#define FLECSI_RUNTIME_MODEL_charm 4 #cmakedefine FLECSI_RUNTIME_MODEL FLECSI_RUNTIME_MODEL_@FLECSI_RUNTIME_MODEL@ //----------------------------------------------------------------------------// diff --git a/flecsi/data/CMakeLists.txt b/flecsi/data/CMakeLists.txt index e3c4e87c9..5c6711d44 100644 --- a/flecsi/data/CMakeLists.txt +++ b/flecsi/data/CMakeLists.txt @@ -65,6 +65,18 @@ elseif(FLECSI_RUNTIME_MODEL STREQUAL "hpx") ${data_SOURCES} ) +elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") + + set(data_HEADERS + charm/policy.hh + charm/types.hh + ${data_HEADERS} + ) + + set(data_SOURCES + ${data_SOURCES} + ) + endif() #------------------------------------------------------------------------------# diff --git a/flecsi/data/backend.hh b/flecsi/data/backend.hh index 053722fa2..800831226 100644 --- a/flecsi/data/backend.hh +++ b/flecsi/data/backend.hh @@ -46,4 +46,8 @@ constexpr inline std::size_t logical_size = 1ul << 32; #include +#elif FLECSI_RUNTIME_MODEL == FLECSI_RUNTIME_MODEL_charm + +#include + #endif // FLECSI_RUNTIME_MODEL diff --git a/flecsi/exec/CMakeLists.txt b/flecsi/exec/CMakeLists.txt index 4efdc4df5..815f0e708 100644 --- a/flecsi/exec/CMakeLists.txt +++ b/flecsi/exec/CMakeLists.txt @@ -48,6 +48,19 @@ elseif(FLECSI_RUNTIME_MODEL STREQUAL "mpi") elseif(FLECSI_RUNTIME_MODEL STREQUAL "hpx") +elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") + + set(execution_HEADERS + ${execution_HEADERS} + charm/bind_accessors.hh + charm/task_wrapper.hh + charm/unbind_accessors.hh + charm/policy.hh + charm/future.hh + charm/task_prologue.hh + charm/reduction_wrapper.hh + ) + endif() #------------------------------------------------------------------------------# diff --git a/flecsi/exec/backend.hh b/flecsi/exec/backend.hh index 167166927..c08a87c18 100644 --- a/flecsi/exec/backend.hh +++ b/flecsi/exec/backend.hh @@ -65,4 +65,8 @@ auto execute(ARGS &&...); #include "flecsi/exec/hpx/policy.hh" +#elif FLECSI_RUNTIME_MODEL == FLECSI_RUNTIME_MODEL_charm + +#include + #endif // FLECSI_RUNTIME_MODEL diff --git a/flecsi/io/CMakeLists.txt b/flecsi/io/CMakeLists.txt index af718537e..0b183eac4 100644 --- a/flecsi/io/CMakeLists.txt +++ b/flecsi/io/CMakeLists.txt @@ -47,6 +47,13 @@ elseif(FLECSI_RUNTIME_MODEL STREQUAL "hpx") ${io_SOURCES} ) +elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") + + set(io_HEADERS + charm/policy.hh + ${io_HEADERS} + ) + endif() #------------------------------------------------------------------------------# @@ -78,6 +85,13 @@ add_unit(io test/leg/io.cc THREADS 4 ) +elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") + +ftest_add_unit(io + SOURCES + test/charm/io.cc + THREADS 4 +) endif() add_unit(io_index diff --git a/flecsi/io/backend.hh b/flecsi/io/backend.hh index 23c552064..84b380357 100644 --- a/flecsi/io/backend.hh +++ b/flecsi/io/backend.hh @@ -41,4 +41,8 @@ using field_reference_t = data::field_reference_t; #include +#elif FLECSI_RUNTIME_MODEL == FLECSI_RUNTIME_MODEL_charm + +#include + #endif // FLECSI_RUNTIME_MODEL diff --git a/flecsi/run/CMakeLists.txt b/flecsi/run/CMakeLists.txt index 5c54b0ecd..5bf5b5f7d 100644 --- a/flecsi/run/CMakeLists.txt +++ b/flecsi/run/CMakeLists.txt @@ -46,6 +46,20 @@ elseif(FLECSI_RUNTIME_MODEL STREQUAL "mpi") mpi/context.cc ) +elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") + + set(runtime_HEADERS + ${runtime_HEADERS} + charm/context.hh + charm/mapper.hh + charm/tasks.hh + ) + + set(runtime_SOURCES + ${runtime_SOURCES} + charm/context.cc + ) + endif() #------------------------------------------------------------------------------# diff --git a/flecsi/run/backend.hh b/flecsi/run/backend.hh index de4668ca7..e73410967 100644 --- a/flecsi/run/backend.hh +++ b/flecsi/run/backend.hh @@ -36,6 +36,10 @@ #include "hpx/context.hh" +#elif FLECSI_RUNTIME_MODEL == FLECSI_RUNTIME_MODEL_charm + +#include "charm/context.hh" + #endif // FLECSI_RUNTIME_MODEL namespace flecsi::run { diff --git a/flecsi/run/types.hh b/flecsi/run/types.hh index 8e278c117..0a1a96be5 100644 --- a/flecsi/run/types.hh +++ b/flecsi/run/types.hh @@ -70,6 +70,20 @@ const task_id_t TASK_ID_MAX = std::numeric_limits::max(); } // namespace flecsi +#elif FLECSI_RUNTIME_MODEL == FLECSI_RUNTIME_MODEL_charm + +#include + +namespace flecsi { + +using field_id_t = Legion::FieldID; +const field_id_t FIELD_ID_MAX = LEGION_MAX_APPLICATION_FIELD_ID; + +using task_id_t = Legion::TaskID; +const task_id_t TASK_ID_MAX = LEGION_MAX_APPLICATION_TASK_ID; + +} // namespace flecsi + #endif // FLECSI_RUNTIME_MODEL #include "flecsi/util/common.hh" From 0c42d77e6deb0660237ddcf51908aa1098348052 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Tue, 10 Mar 2020 16:49:26 -0400 Subject: [PATCH 02/19] Change CMake to allow for .ci compilation --- cmake/charm.cmake | 50 +++++++++++++++++++++++++++++++++++++++ config/flecsi-config.h.in | 1 + flecsi/run/CMakeLists.txt | 1 + 3 files changed, 52 insertions(+) create mode 100644 cmake/charm.cmake diff --git a/cmake/charm.cmake b/cmake/charm.cmake new file mode 100644 index 000000000..ee1764cab --- /dev/null +++ b/cmake/charm.cmake @@ -0,0 +1,50 @@ +#------------------------------------------------------------------------------# +# @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ +# /@@///// /@@ @@////@@ @@////// /@@ +# /@@ /@@ @@@@@ @@ // /@@ /@@ +# /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ +# /@@//// /@@/@@@@@@@/@@ ////////@@/@@ +# /@@ /@@/@@//// //@@ @@ /@@/@@ +# /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ +# // /// ////// ////// //////// // +# +# Copyright (c) 2016 Los Alamos National Laboratory, LLC +# All rights reserved +#------------------------------------------------------------------------------# + +option(ENABLE_CHARM "Enable Charm" OFF) + +if(ENABLE_CHARM) + + find_package(Legion REQUIRED) + + if(NOT Legion_FOUND) + message(FATAL_ERROR "Legion is required for this build configuration") + endif(NOT Legion_FOUND) + + set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} ${LEGION_INSTALL_DIRS}) + + include_directories(SYSTEM ${Legion_INCLUDE_DIRS}) + + add_definitions(-DLEGION_USE_CMAKE) + add_definitions(-DREALM_USE_CMAKE) + + list(APPEND FLECSI_LIBRARY_DEPENDENCIES ${Legion_LIBRARIES}) + + file(GLOB_RECURSE ci-files ${CMAKE_SOURCE_DIR}/flecsi/*.ci) + + foreach(in_file ${ci-files}) + get_filename_component(ci-output ${in_file} NAME_WE) + get_filename_component(ci-dir ${in_file} DIRECTORY) + string(APPEND ci-output ".decl.h") + set(all-ci-outputs ${all-cioutputs} ${ci-dir}/${ci-output}) + add_custom_command( + OUTPUT ${ci-dir}/${ci-output} + COMMAND ${CMAKE_CXX_COMPILER} ${in_file} + WORKING_DIRECTORY ${ci-dir} + DEPENDS ${in_file} + ) + endforeach() + message (STATUS "Created command for " ${all-ci-outputs}) + +endif(ENABLE_CHARM) diff --git a/config/flecsi-config.h.in b/config/flecsi-config.h.in index 28d47c5b1..6814f71cf 100644 --- a/config/flecsi-config.h.in +++ b/config/flecsi-config.h.in @@ -20,6 +20,7 @@ #cmakedefine FLECSI_ENABLE_MPI #cmakedefine FLECSI_ENABLE_LEGION +#cmakedefine FLECSI_ENABLE_CHARM //----------------------------------------------------------------------------// // Enable Legion thread-local storage interface diff --git a/flecsi/run/CMakeLists.txt b/flecsi/run/CMakeLists.txt index 5bf5b5f7d..a6af408b9 100644 --- a/flecsi/run/CMakeLists.txt +++ b/flecsi/run/CMakeLists.txt @@ -58,6 +58,7 @@ elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") set(runtime_SOURCES ${runtime_SOURCES} charm/context.cc + charm/context.decl.h ) endif() From 029f141948074d3d2e6fedce25f07dd37c94c535 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Mon, 15 Jun 2020 17:17:56 -0400 Subject: [PATCH 03/19] Create charm backend from copy of legion backend --- CMakeLists.txt | 2 +- flecsi/data/charm/policy.hh | 100 +++++++ flecsi/exec/charm/bind_accessors.hh | 131 +++++++++ flecsi/exec/charm/future.hh | 123 ++++++++ flecsi/exec/charm/policy.hh | 316 ++++++++++++++++++++ flecsi/exec/charm/reduction_wrapper.hh | 66 +++++ flecsi/exec/charm/task_prologue.hh | 242 +++++++++++++++ flecsi/exec/charm/task_wrapper.hh | 288 ++++++++++++++++++ flecsi/exec/charm/unbind_accessors.hh | 70 +++++ flecsi/run/CMakeLists.txt | 10 +- flecsi/run/charm/context.cc | 289 ++++++++++++++++++ flecsi/run/charm/context.decl.h | 331 +++++++++++++++++++++ flecsi/run/charm/context.def.h | 208 +++++++++++++ flecsi/run/charm/context.hh | 389 +++++++++++++++++++++++++ flecsi/run/charm/mapper.hh | 318 ++++++++++++++++++++ 15 files changed, 2877 insertions(+), 6 deletions(-) create mode 100644 flecsi/data/charm/policy.hh create mode 100644 flecsi/exec/charm/bind_accessors.hh create mode 100644 flecsi/exec/charm/future.hh create mode 100644 flecsi/exec/charm/policy.hh create mode 100644 flecsi/exec/charm/reduction_wrapper.hh create mode 100644 flecsi/exec/charm/task_prologue.hh create mode 100644 flecsi/exec/charm/task_wrapper.hh create mode 100644 flecsi/exec/charm/unbind_accessors.hh create mode 100644 flecsi/run/charm/context.cc create mode 100644 flecsi/run/charm/context.decl.h create mode 100644 flecsi/run/charm/context.def.h create mode 100644 flecsi/run/charm/context.hh create mode 100644 flecsi/run/charm/mapper.hh diff --git a/CMakeLists.txt b/CMakeLists.txt index ffe512d54..1b922b193 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,7 +34,7 @@ set(CMAKE_CXX_STANDARD 17) #------------------------------------------------------------------------------# if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0") + if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.4") message(FATAL_ERROR "Version 9.0 of gnu compilers required!") endif() endif() diff --git a/flecsi/data/charm/policy.hh b/flecsi/data/charm/policy.hh new file mode 100644 index 000000000..5d4499f1b --- /dev/null +++ b/flecsi/data/charm/policy.hh @@ -0,0 +1,100 @@ +/* + @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ + /@@///// /@@ @@////@@ @@////// /@@ + /@@ /@@ @@@@@ @@ // /@@ /@@ + /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ + /@@//// /@@/@@@@@@@/@@ ////////@@/@@ + /@@ /@@/@@//// //@@ @@ /@@/@@ + /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ + // /// ////// ////// //////// // + + Copyright (c) 2016, Triad National Security, LLC + All rights reserved. + */ +#pragma once + +/*! @file */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(FLECSI_ENABLE_LEGION) +#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#endif + +#include + +flog_register_tag(topologies); + +namespace flecsi { +namespace data { + +template +struct topology_id { + // NB: C-style cast supports private inheritance + topology_id() : id(runtime::context_t::instance().record(*(C *)this)) {} + topology_id(const topology_id &) : topology_id() {} + ~topology_id() { + runtime::context_t::instance().forget(id); + } + topology_id & operator=(const topology_id &) noexcept { + return *this; + } + + std::size_t id; +}; + +/*----------------------------------------------------------------------------* + Index Topology. + *----------------------------------------------------------------------------*/ + +inline topology_data::topology_data( + const type::coloring & coloring) + : topology_base(Legion::Domain::from_rect<1>( + LegionRuntime::Arrays::Rect<1>(0, coloring.size() - 1))), + colors(coloring.size()) { + + auto legion_runtime = Legion::Runtime::get_runtime(); + auto legion_context = Legion::Runtime::get_context(); + auto & flecsi_context = runtime::context_t::instance(); + + auto & field_info_store = flecsi_context.get_field_info_store( + topology::id(), storage_label_t::dense); + + Legion::FieldAllocator allocator = + legion_runtime->create_field_allocator(legion_context, field_space); + + for(auto const & fi : field_info_store) { + allocator.allocate_field(fi->type_size, fi->fid); + } // for + + allocate(); + + Legion::IndexPartition index_partition = + legion_runtime->create_equal_partition( + legion_context, index_space, index_space); + + color_partition = legion_runtime->get_logical_partition( + legion_context, logical_region, index_partition); +} + +/*----------------------------------------------------------------------------* + Unstructured Mesh Topology. + *----------------------------------------------------------------------------*/ + +inline topology_data::topology_data( + const type::coloring & coloring) { + (void)coloring; +} + +// NOTE THAT THE HANDLE TYPE FOR THIS TYPE WILL NEED TO CAPTURE THE +// UNDERLYING TOPOLOGY TYPE, i.e., topology::mesh_t + +} // namespace data +} // namespace flecsi diff --git a/flecsi/exec/charm/bind_accessors.hh b/flecsi/exec/charm/bind_accessors.hh new file mode 100644 index 000000000..4dc6a135c --- /dev/null +++ b/flecsi/exec/charm/bind_accessors.hh @@ -0,0 +1,131 @@ +/* + @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ + /@@///// /@@ @@////@@ @@////// /@@ + /@@ /@@ @@@@@ @@ // /@@ /@@ + /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ + /@@//// /@@/@@@@@@@/@@ ////////@@/@@ + /@@ /@@/@@//// //@@ @@ /@@/@@ + /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ + // /// ////// ////// //////// // + + Copyright (c) 2016, Triad National Security, LLC + All rights reserved. + */ +#pragma once + +/*! @file */ + +#include + +#if !defined(__FLECSI_PRIVATE__) +#error Do not include this file directly! +#endif + +#include "flecsi/data/accessor.hh" +#include "flecsi/data/privilege.hh" +#include "flecsi/data/topology_accessor.hh" +#include "flecsi/exec/charm/future.hh" +#include "flecsi/run/backend.hh" +#include "flecsi/util/demangle.hh" +#include "flecsi/util/tuple_walker.hh" + +#if !defined(FLECSI_ENABLE_LEGION) +#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#endif + +#include + +namespace flecsi { + +inline log::devel_tag bind_accessors_tag("bind_accessors"); + +namespace exec::charm { + +/*! + The bind_accessors_t type is called to walk the user task arguments inside of + an executing legion task to properly complete the users accessors, i.e., by + pointing the accessor \em view instances to the appropriate legion-mapped + buffers. + */ + +struct bind_accessors_t : public util::tuple_walker { + + /*! + Construct an bind_accessors_t instance. + + @param legion_runtime The Legion task runtime. + @param legion_context The Legion task runtime context. + */ + + bind_accessors_t(Legion::Runtime * legion_runtime, + Legion::Context & legion_context, + std::vector const & regions, + std::vector const & futures) + : legion_runtime_(legion_runtime), legion_context_(legion_context), + regions_(regions), futures_(futures) {} + + template + void visit(data::accessor & accessor) { + auto & reg = regions_[region++]; + + // Legion::FieldAccessor()), + const Legion::UnsafeFieldAccessor> + ac(reg, accessor.identifier(), sizeof(DATA_TYPE)); + const auto dom = legion_runtime_->get_index_space_domain( + legion_context_, reg.get_logical_region().get_index_space()); + const auto r = dom.get_rect<1>(); + + bind(accessor, + r.hi[0] - r.lo[0] + 1, + ac.ptr(Legion::Domain::DomainPointIterator(dom).p)); + } + + template + void visit(data::accessor & accessor) { + visit(accessor.get_base()); + } + + template + void visit(data::topology_accessor & a) { + a.bind([&](auto & x) { visit(x); }); // Clang 8.0.1 deems 'this' unused + } + + /*--------------------------------------------------------------------------* + Futures + *--------------------------------------------------------------------------*/ + template + void visit(exec::flecsi_future & future) { + future.legion_future_ = futures_[future_id]; + future_id++; + } + + /*--------------------------------------------------------------------------* + Non-FleCSI Data Types + *--------------------------------------------------------------------------*/ + + template + static typename std::enable_if_t< + !std::is_base_of_v> + visit(DATA_TYPE &) { + { + log::devel_guard guard(bind_accessors_tag); + flog_devel(info) << "Skipping argument with type " + << util::type() << std::endl; + } + } // visit + +private: + Legion::Runtime * legion_runtime_; + Legion::Context & legion_context_; + size_t region = 0; + const std::vector & regions_; + size_t future_id = 0; + const std::vector & futures_; + +}; // struct bind_accessors_t + +} // namespace exec::charm +} // namespace flecsi diff --git a/flecsi/exec/charm/future.hh b/flecsi/exec/charm/future.hh new file mode 100644 index 000000000..e27d0f75a --- /dev/null +++ b/flecsi/exec/charm/future.hh @@ -0,0 +1,123 @@ +/* + @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ + /@@///// /@@ @@////@@ @@////// /@@ + /@@ /@@ @@@@@ @@ // /@@ /@@ + /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ + /@@//// /@@/@@@@@@@/@@ ////////@@/@@ + /@@ /@@/@@//// //@@ @@ /@@/@@ + /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ + // /// ////// ////// //////// // + + Copyright (c) 2016, Los Alamos National Security, LLC + All rights reserved. + */ +#pragma once + +/*! @file */ + +#include + +#if !defined(__FLECSI_PRIVATE__) +#error Do not include this file directly! +#endif + +#include "flecsi/exec/launch.hh" +#include "flecsi/run/backend.hh" + +#if !defined(FLECSI_ENABLE_LEGION) +#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#endif + +#include + +#include +#include +#include + +namespace flecsi { +namespace exec { + +/*! + Base legion future type. + + @tparam Return The return type of the task. + @tparam Launch FleCSI launch type: single/index. + + @ingroup legion-execution +*/ +template +struct legion_future; + +/*! Partial specialization for the Legion:Future + + @tparam Return The return type of the task. + + @ingroup legion-execution + */ +template +struct legion_future { + + /*! + Wait on a task result. + */ + void wait() { + legion_future_.wait(); + } // wait + + /*! + Get a task result. + */ + Return get(bool silence_warnings = false) { + if constexpr(std::is_same_v) + return legion_future_.get_void_result(silence_warnings); + else + return legion_future_.get_result(silence_warnings); + } // get + + Legion::Future legion_future_; +}; // legion_future + +template +struct legion_future { + + explicit operator legion_future() const { + return {}; + } + + /*! + Wait on a task result. + */ + void wait(bool silence_warnings = false) { + legion_future_.wait_all_results(silence_warnings); + } // wait + + /*! + Get a task result. + */ + + Return get(size_t index = 0, bool silence_warnings = false) { + if constexpr(std::is_same_v) + return legion_future_.get_void_result(index, silence_warnings); + else + return legion_future_.get_result( + Legion::DomainPoint::from_point<1>( + LegionRuntime::Arrays::Point<1>(index)), + silence_warnings); + } // get + + Legion::FutureMap legion_future_; + +}; // struct legion_future + +//----------------------------------------------------------------------- + +template +using flecsi_future = legion_future; + +template +constexpr bool is_index_future = false; +template +constexpr bool is_index_future> = true; + +} // namespace exec +} // namespace flecsi diff --git a/flecsi/exec/charm/policy.hh b/flecsi/exec/charm/policy.hh new file mode 100644 index 000000000..18060bdbe --- /dev/null +++ b/flecsi/exec/charm/policy.hh @@ -0,0 +1,316 @@ +/* + @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ + /@@///// /@@ @@////@@ @@////// /@@ + /@@ /@@ @@@@@ @@ // /@@ /@@ + /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ + /@@//// /@@/@@@@@@@/@@ ////////@@/@@ + /@@ /@@/@@//// //@@ @@ /@@/@@ + /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ + // /// ////// ////// //////// // + + Copyright (c) 2016, Triad National Security, LLC + All rights reserved. + */ +#pragma once + +/*! @file */ + +#include + +#if !defined(__FLECSI_PRIVATE__) +#error Do not include this file directly! +#endif + +#include "flecsi/exec/launch.hh" +#include "flecsi/exec/charm/future.hh" +#include "flecsi/exec/charm/reduction_wrapper.hh" +#include "flecsi/exec/charm/task_prologue.hh" +#include "flecsi/exec/charm/task_wrapper.hh" +#include "flecsi/run/backend.hh" +#include "flecsi/util/demangle.hh" +#include "flecsi/util/function_traits.hh" +#include + +#include +#include +#include + +#if !defined(FLECSI_ENABLE_LEGION) +#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#endif + +#include + +namespace flecsi { + +inline log::devel_tag execution_tag("execution"); + +namespace exec { +namespace detail { + +// Remove const from under a reference, if there is one. +template +struct nonconst_ref { + using type = T; +}; + +template +struct nonconst_ref { + using type = T &; +}; + +template +using nonconst_ref_t = typename nonconst_ref::type; + +// Serialize a tuple of converted arguments (or references to existing +// arguments where possible). Note that is_constructible_v is true, so we have to check +// is_constructible_v instead. +template +auto +serial_arguments(std::tuple * /* to deduce PP */, AA &&... aa) { + static_assert((std::is_const_v> && ...), + "Tasks cannot accept non-const references"); + return util::serial_put(std::tuple &, nonconst_ref_t>, + const PP &, + std::decay_t>...>(std::forward(aa)...)); +} + +} // namespace detail +} // namespace exec + +template +decltype(auto) +reduce(ARGS &&... args) { + using namespace Legion; + using namespace exec; + + using traits_t = util::function_traits; + using RETURN = typename traits_t::return_type; + using param_tuple = typename traits_t::arguments_type; + + // This will guard the entire method + log::devel_guard guard(execution_tag); + + // Get the FleCSI runtime context + auto & flecsi_context = run::context::instance(); + + // Get the processor type. + constexpr auto processor_type = mask_to_processor_type(ATTRIBUTES); + + // Get the Legion runtime and context from the current task. + auto legion_runtime = Legion::Runtime::get_runtime(); + auto legion_context = Legion::Runtime::get_context(); + +#if defined(FLECSI_ENABLE_FLOG) + const size_t tasks_executed = flecsi_context.tasks_executed(); + if((tasks_executed > 0) && + (tasks_executed % FLOG_SERIALIZATION_INTERVAL == 0)) { + + size_t processes = flecsi_context.processes(); + LegionRuntime::Arrays::Rect<1> launch_bounds( + LegionRuntime::Arrays::Point<1>(0), + LegionRuntime::Arrays::Point<1>(processes - 1)); + Domain launch_domain = Domain::from_rect<1>(launch_bounds); + + constexpr auto red = [] { + return log::flog_t::instance().packets().size(); + }; + Legion::ArgumentMap arg_map; + Legion::IndexLauncher reduction_launcher(charm::task_id>, + launch_domain, + Legion::TaskArgument(NULL, 0), + arg_map); + + Legion::Future future = legion_runtime->execute_index_space( + legion_context, reduction_launcher, reduction_op>); + + if(future.get_result() > FLOG_SERIALIZATION_THRESHOLD) { + constexpr auto send = [] { + run::context::instance().set_mpi_task(log::send_to_one); + }; + Legion::IndexLauncher flog_mpi_launcher(charm::task_id>, + launch_domain, + Legion::TaskArgument(NULL, 0), + arg_map); + + flog_mpi_launcher.tag = run::FLECSI_MAPPER_FORCE_RANK_MATCH; + + // Launch the MPI task + auto future_mpi = + legion_runtime->execute_index_space(legion_context, flog_mpi_launcher); + + // Force synchronization + future_mpi.wait_all_results(true); + + // Handoff to the MPI runtime. + flecsi_context.handoff_to_mpi(legion_context, legion_runtime); + + // Wait for MPI to finish execution (synchronous). + flecsi_context.wait_on_mpi(legion_context, legion_runtime); + } // if + } // if +#endif // FLECSI_ENABLE_FLOG + + size_t domain_size = LAUNCH_DOMAIN.size(); + domain_size = domain_size == 0 ? flecsi_context.processes() : domain_size; + + ++flecsi_context.tasks_executed(); + + charm::task_prologue_t pro(domain_size); + pro.walk(args...); + + std::optional mpi_args; + std::vector buf; + if constexpr(processor_type == task_processor_type_t::mpi) { + // MPI tasks must be invoked collectively from one task on each rank. + // We therefore can transmit merely a pointer to a tuple of the arguments. + // util::serial_put deliberately doesn't support this, so just memcpy it. + mpi_args.emplace(std::forward(args)...); + const auto p = &*mpi_args; + buf.resize(sizeof p); + std::memcpy(buf.data(), &p, sizeof p); + } + else { + buf = detail::serial_arguments( + static_cast(nullptr), std::forward(args)...); + } + + //------------------------------------------------------------------------// + // Single launch + //------------------------------------------------------------------------// + + using wrap = charm::task_wrapper; + const auto task = charm::task_id(wrap::LegionProcessor)>; + + if constexpr(LAUNCH_DOMAIN == single) { + + static_assert(std::is_void_v, + "reductions are not supported for single tasks"); + + { + log::devel_guard guard(execution_tag); + flog_devel(info) << "Executing single task" << std::endl; + } + + TaskLauncher launcher(task, TaskArgument(buf.data(), buf.size())); + + // adding region requirements to the launcher + for(auto & req : pro.region_requirements()) { + launcher.add_region_requirement(req); + } // for + + // adding futures to the launcher + launcher.futures = std::move(pro).futures(); + + static_assert(!(is_index_future> || ...), + "can't use index future with single task"); + + if constexpr(processor_type == task_processor_type_t::toc || + processor_type == task_processor_type_t::loc) { + auto future = legion_runtime->execute_task(legion_context, launcher); + + return legion_future{future}; + } + else { + static_assert( + processor_type == task_processor_type_t::mpi, "Unknown launch type"); + flog_fatal("Invalid launch type!" + << std::endl + << "Legion backend does not support 'single' launch" + << " for MPI tasks yet"); + } + } + + //------------------------------------------------------------------------// + // Index launch + //------------------------------------------------------------------------// + + else { + + { + log::devel_guard guard(execution_tag); + flog_devel(info) << "Executing index task" << std::endl; + } + + LegionRuntime::Arrays::Rect<1> launch_bounds( + LegionRuntime::Arrays::Point<1>(0), + LegionRuntime::Arrays::Point<1>(domain_size - 1)); + Domain launch_domain = Domain::from_rect<1>(launch_bounds); + + Legion::ArgumentMap arg_map; + Legion::IndexLauncher launcher( + task, launch_domain, TaskArgument(buf.data(), buf.size()), arg_map); + + // adding region requirement to the launcher + for(auto & req : pro.region_requirements()) { + launcher.add_region_requirement(req); + } // for + + // adding futures to the launcher + launcher.futures = std::move(pro).futures(); + launcher.point_futures.assign( + pro.future_maps().begin(), pro.future_maps().end()); + + if constexpr(processor_type == task_processor_type_t::toc || + processor_type == task_processor_type_t::loc) { + flog_devel(info) << "Executing index launch on loc" << std::endl; + + if constexpr(!std::is_void_v) { + flog_devel(info) << "executing reduction logic for " + << util::type() << std::endl; + + Legion::Future future; + + future = legion_runtime->execute_index_space( + legion_context, launcher, reduction_op); + + return legion_future{future}; + } + else { + // Enqueue the task. + Legion::FutureMap future_map = + legion_runtime->execute_index_space(legion_context, launcher); + + return legion_future{future_map}; + } // else + } + else { + static_assert( + processor_type == task_processor_type_t::mpi, "Unknown launch type"); + launcher.tag = run::FLECSI_MAPPER_FORCE_RANK_MATCH; + + // Launch the MPI task + auto future = + legion_runtime->execute_index_space(legion_context, launcher); + // Force synchronization + future.wait_all_results(true); + + // Handoff to the MPI runtime. + flecsi_context.handoff_to_mpi(legion_context, legion_runtime); + + // Wait for MPI to finish execution (synchronous). + // We must keep mpi_args alive until then. + flecsi_context.wait_on_mpi(legion_context, legion_runtime); + + if constexpr(!std::is_void_v) { + // FIXME implement logic for reduction MPI task + flog_fatal("there is no implementation for the mpi" + " reduction task"); + } + else { + return legion_future{future}; + } + } + } // if constexpr + + // return 0; +} // execute_task + +} // namespace flecsi diff --git a/flecsi/exec/charm/reduction_wrapper.hh b/flecsi/exec/charm/reduction_wrapper.hh new file mode 100644 index 000000000..91151bddb --- /dev/null +++ b/flecsi/exec/charm/reduction_wrapper.hh @@ -0,0 +1,66 @@ +/* + @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ + /@@///// /@@ @@////@@ @@////// /@@ + /@@ /@@ @@@@@ @@ // /@@ /@@ + /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ + /@@//// /@@/@@@@@@@/@@ ////////@@/@@ + /@@ /@@/@@//// //@@ @@ /@@/@@ + /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ + // /// ////// ////// //////// // + + Copyright (c) 2016, Triad National Security, LLC + All rights reserved. + */ +#pragma once + +/*! @file */ + +#if !defined(__FLECSI_PRIVATE__) +#error Do not include this file directly! +#endif + +#include "flecsi/exec/fold.hh" +#include "flecsi/run/backend.hh" +#include "flecsi/util/demangle.hh" +#include + +#include + +namespace flecsi { + +inline log::devel_tag reduction_wrapper_tag("reduction_wrapper"); + +namespace exec { + +namespace detail { +/*! + Register the user-defined reduction operator with the runtime. +*/ + +template +void register_reduction(); + +inline Legion::ReductionOpID reduction_id; +} // namespace detail + +// NB: 0 is reserved by Legion. +template +inline const Legion::ReductionOpID reduction_op = + (run::context::instance().register_init(detail::register_reduction), + ++detail::reduction_id); + +template +void +detail::register_reduction() { + { + log::devel_guard guard(reduction_wrapper_tag); + flog_devel(info) << "registering reduction operation " << util::type() + << std::endl; + } + + // Register the operation with the Legion runtime + Legion::Runtime::register_reduction_op(reduction_op); +} + +} // namespace exec +} // namespace flecsi diff --git a/flecsi/exec/charm/task_prologue.hh b/flecsi/exec/charm/task_prologue.hh new file mode 100644 index 000000000..a3923d701 --- /dev/null +++ b/flecsi/exec/charm/task_prologue.hh @@ -0,0 +1,242 @@ +/* + @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ + /@@///// /@@ @@////@@ @@////// /@@ + /@@ /@@ @@@@@ @@ // /@@ /@@ + /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ + /@@//// /@@/@@@@@@@/@@ ////////@@/@@ + /@@ /@@/@@//// //@@ @@ /@@/@@ + /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ + // /// ////// ////// //////// // + + Copyright (c) 2016, Triad National Security, LLC + All rights reserved. + */ +#pragma once + +/*! @file */ + +#include + +#if !defined(__FLECSI_PRIVATE__) +#error Do not include this file directly! +#endif + +#include "flecsi/data/accessor.hh" +#include "flecsi/data/privilege.hh" +#include "flecsi/data/topology_accessor.hh" +#include "flecsi/exec/charm/future.hh" +#include "flecsi/run/backend.hh" +#include "flecsi/topo/global.hh" +#include "flecsi/topo/ntree/interface.hh" +#include "flecsi/topo/set/interface.hh" +#include "flecsi/topo/structured/interface.hh" +//#include "flecsi/topo/unstructured/interface.hh" +#include "flecsi/util/demangle.hh" +#include "flecsi/util/tuple_walker.hh" + +#if !defined(FLECSI_ENABLE_LEGION) +#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#endif + +#include + +namespace flecsi { + +inline log::devel_tag task_prologue_tag("task_prologue"); + +namespace exec::charm { + +/*! + The task_prologue_t type can be called to walk task args before the + task launcher is created. This allows us to gather region requirements + and to set state on the associated data handles \em before Legion gets + the task arguments tuple. + + @ingroup execution +*/ + +struct task_prologue_t { + + /*! + Construct an task_prologue_t instance. + + @param runtime The Legion task runtime. + @param context The Legion task runtime context. + */ + + task_prologue_t(const size_t & domain) : domain_(domain) {} + + std::vector const & region_requirements() const { + return region_reqs_; + } // region_requirements + + std::vector && futures() && { + return std::move(futures_); + } // futures + + std::vector const & future_maps() const { + return future_maps_; + } // future_maps + + /*! + Convert the template privileges to proper Legion privileges. + + @param mode privilege + */ + + static Legion::PrivilegeMode privilege_mode(size_t mode) { + switch(mode) { + case size_t(nu): + return WRITE_DISCARD; + case size_t(ro): + return READ_ONLY; + case size_t(wo): + return WRITE_DISCARD; + case size_t(rw): + return READ_WRITE; + default: + flog_fatal("invalid privilege mode"); + } // switch + + return NO_ACCESS; + } // privilege_mode + + template + void walk(const AA &... aa) { + walk(static_cast

(nullptr), aa...); + } + + /*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^* + The following methods are specializations on layout and client + type, potentially for every permutation thereof. + *^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/ + + template Space> + void visit(data::accessor * null_p, + const data::field_reference & ref) { + visit(get_null_base(null_p), ref.template cast()); + } + + /*--------------------------------------------------------------------------* + Global Topology + *--------------------------------------------------------------------------*/ + + template + void visit( + data::accessor * /* parameter */, + const data:: + field_reference & + ref) { + Legion::LogicalRegion region = ref.topology().get().logical_region; + + static_assert(privilege_count(PRIVILEGES) == 1, + "global topology accessor type only takes one privilege"); + + constexpr auto priv = get_privilege(0, PRIVILEGES); + + if(priv > partition_privilege_t::ro) + flog_assert(domain_ == 1, + "global can only be modified from within single launch task"); + + Legion::RegionRequirement rr(region, + priv > partition_privilege_t::ro ? privilege_mode(priv) : READ_ONLY, + EXCLUSIVE, + region); + + rr.add_field(ref.fid()); + region_reqs_.push_back(rr); + } // visit + + template Space, + class = std::enable_if_t == 1>> + void visit( + data::accessor * /* parameter */, + const data::field_reference & ref) { + auto & instance_data = ref.topology().get().template get_partition(); + + flog_assert(instance_data.colors() == domain_, + "attempting to pass field with " + << instance_data.colors() + << " partitions into task with launch domain of size " << domain_); + + static_assert(privilege_count(PRIVILEGES) == 1, + "accessors for this topology type take only one privilege"); + + Legion::RegionRequirement rr(instance_data.logical_partition, + 0, + privilege_mode(get_privilege(0, PRIVILEGES)), + EXCLUSIVE, + Legion::Runtime::get_runtime()->get_parent_logical_region( + instance_data.logical_partition)); + + rr.add_field(ref.fid()); + region_reqs_.push_back(rr); + } // visit + + template + void visit(data::topology_accessor * /* parameter */, + const data::topology_slot & slot) { + Topo::core::fields([&](auto & f) { + visit(static_cast *>(nullptr), + f(slot)); + }); + } + + /*--------------------------------------------------------------------------* + Futures + *--------------------------------------------------------------------------*/ + template + void visit(exec::flecsi_future *, + const exec::legion_future & + future) { + futures_.push_back(future.legion_future_); + } + + template + void visit(exec::flecsi_future *, + const exec::legion_future & future) { + future_maps_.push_back(future.legion_future_); + } + + /*--------------------------------------------------------------------------* + Non-FleCSI Data Types + *--------------------------------------------------------------------------*/ + + template + static void visit(DATA_TYPE &) { + static_assert(!std::is_base_of_v, + "Unknown task argument type"); + { + log::devel_guard guard(task_prologue_tag); + flog_devel(info) << "Skipping argument with type " + << util::type() << std::endl; + } + } // visit + +private: + // Argument types for which we don't also need the type of the parameter: + template + void visit(P *, DATA_TYPE & x) { + visit(x); + } // visit + + template + void walk(std::tuple * /* to deduce PP */, const AA &... aa) { + (visit(static_cast *>(nullptr), aa), ...); + } + + size_t domain_; + + std::vector region_reqs_; + std::vector futures_; + std::vector future_maps_; +}; // task_prologue_t + +} // namespace exec::charm +} // namespace flecsi diff --git a/flecsi/exec/charm/task_wrapper.hh b/flecsi/exec/charm/task_wrapper.hh new file mode 100644 index 000000000..cff999f14 --- /dev/null +++ b/flecsi/exec/charm/task_wrapper.hh @@ -0,0 +1,288 @@ +/* + @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ + /@@///// /@@ @@////@@ @@////// /@@ + /@@ /@@ @@@@@ @@ // /@@ /@@ + /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ + /@@//// /@@/@@@@@@@/@@ ////////@@/@@ + /@@ /@@/@@//// //@@ @@ /@@/@@ + /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ + // /// ////// ////// //////// // + + Copyright (c) 2016, Triad National Security, LLC + All rights reserved. + */ +#pragma once + +/*! @file */ + +#include + +#if !defined(__FLECSI_PRIVATE__) +#error Do not include this file directly! +#endif + +#include "flecsi/exec/charm/bind_accessors.hh" +#include "flecsi/exec/charm/future.hh" +#include "flecsi/exec/charm/unbind_accessors.hh" +#include "flecsi/exec/task_attributes.hh" +#include "flecsi/run/backend.hh" +#include "flecsi/util/common.hh" +#include "flecsi/util/function_traits.hh" +#include "flecsi/util/serialize.hh" +#include "unbind_accessors.hh" +#include + +#if !defined(FLECSI_ENABLE_LEGION) +#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#endif + +#include + +#include +#include + +namespace flecsi { + +inline log::devel_tag task_wrapper_tag("task_wrapper"); + +// Send and receive only the reference_base portion: +template +struct util::serial_convert> { + using type = data::accessor; + using Rep = std::size_t; + static Rep put(const type & r) { + return r.identifier(); + } + static type get(const Rep & r) { + return type(r); + } +}; +template +struct util::serial_convert> { + using type = data::accessor; + using Base = typename type::base_type; + static const Base & put(const type & a) { + return a.get_base(); + } + static type get(Base b) { + return b; + } +}; +// NB: topology_accessor is trivially copyable. + +template +struct util::serial_convert> { + using type = exec::flecsi_future; + struct Rep {}; + static Rep put(const type &) { + return {}; + } + static type get(const Rep &) { + return {}; + } +}; + +namespace exec::charm { +using run::charm::task; + +namespace detail { +inline task_id_t last_task; // 0 is the top-level task +/*! + Register a task with Legion. + + @tparam RETURN The return type of the task. + @tparam TASK The legion task. + \tparam A task attributes + + @ingroup legion-execution + */ + +template * TASK, std::size_t A> +void register_task(); + +template +struct decay : std::decay {}; +template +struct decay> { + using type = std::tuple...>; +}; + +template +auto +tuple_get(const Legion::Task & t) { + struct Check { + const std::byte *b, *e; + Check(const Legion::Task & t) + : b(static_cast(t.args)), e(b + t.arglen) {} + ~Check() { + flog_assert(b == e, "Bad Task::arglen"); + } + } ch(t); + return util::serial_get::type>(ch.b); +} +} // namespace detail + +/*! + Arbitrary index for each task. + + @tparam F Legion task function. + @tparam ATTRIBUTES A size_t holding the mask of the task attributes mask + \ref task_attributes_mask_t. + */ + +template +// 'extern' works around GCC bug #90493 +extern const task_id_t + task_id = (run::context::instance().register_init(detail::register_task< + typename util::function_traits::return_type, + F, + A>), + ++detail::last_task); + +template * TASK, std::size_t A> +void +detail::register_task() { + constexpr auto processor_type = mask_to_processor_type(A); + static_assert(processor_type != task_processor_type_t::mpi, + "Legion tasks cannot use MPI"); + + const std::string name = util::symbol<*TASK>(); + { + log::devel_guard guard(task_wrapper_tag); + flog_devel(info) << "registering pure Legion task " << name << std::endl; + } + + Legion::TaskVariantRegistrar registrar(task_id<*TASK, A>, name.c_str()); + Legion::Processor::Kind kind = processor_type == task_processor_type_t::toc + ? Legion::Processor::TOC_PROC + : Legion::Processor::LOC_PROC; + registrar.add_constraint(Legion::ProcessorConstraint(kind)); + registrar.set_leaf(leaf_task(A)); + registrar.set_inner(inner_task(A)); + registrar.set_idempotent(idempotent_task(A)); + + /* + This section of conditionals is necessary because there is still + a distinction between void and non-void task registration with + Legion. + */ + + if constexpr(std::is_same_v) { + Legion::Runtime::preregister_task_variant(registrar, name.c_str()); + } + else { + Legion::Runtime::preregister_task_variant( + registrar, name.c_str()); + } // if +} // registration_callback + +// A trivial wrapper for nullary functions. +template +auto +verb(const Legion::Task *, + const std::vector &, + Legion::Context, + Legion::Runtime *) { + return F(); +} + +/*! + The task_wrapper type provides execution + functions for user and MPI tasks. + + \tparam F the user task + \tparam P the target processor type + + @ingroup legion-execution + */ + +template // P is for specialization only +struct task_wrapper { + + using Traits = util::function_traits; + using RETURN = typename Traits::return_type; + using param_tuple = typename Traits::arguments_type; + + static constexpr task_processor_type_t LegionProcessor = P; + + /*! + Execution wrapper method for user tasks. + */ + + static RETURN execute(const Legion::Task * task, + const std::vector & regions, + Legion::Context context, + Legion::Runtime * runtime) { + { + log::devel_guard guard(task_wrapper_tag); + flog_devel(info) << "In execute_user_task" << std::endl; + } + + // Unpack task arguments + // TODO: Can we deserialize directly into the user's parameters (i.e., do + // without finalize_handles)? + auto task_args = detail::tuple_get(*task); + + bind_accessors_t bind_accessors(runtime, context, regions, task->futures); + bind_accessors.walk(task_args); + + if constexpr(std::is_same_v) { + apply(F, std::forward(task_args)); + + // FIXME: Refactor + // finalize_handles_t finalize_handles; + // finalize_handles.walk(task_args); + } + else { + RETURN result = apply(F, std::forward(task_args)); + + // FIXME: Refactor + // finalize_handles_t finalize_handles; + // finalize_handles.walk(task_args); + + return result; + } // if + } // execute_user_task + +}; // struct task_wrapper + +template +struct task_wrapper { + using Traits = util::function_traits; + using RETURN = typename Traits::return_type; + using param_tuple = typename Traits::arguments_type; + + static constexpr auto LegionProcessor = task_processor_type_t::loc; + + static void execute(const Legion::Task * task, + const std::vector &, + Legion::Context, + Legion::Runtime *) { + // FIXME: Refactor + // { + // log::devel_guard guard(task_wrapper_tag); + // flog_devel(info) << "In execute_mpi_task" << std::endl; + // } + + // Unpack task arguments. + param_tuple * p; + flog_assert(task->arglen == sizeof p, "Bad Task::arglen"); + std::memcpy(&p, task->args, sizeof p); + auto & mpi_task_args = *p; + + // FIXME: Refactor + // init_handles_t init_handles(runtime, context, regions, task->futures); + // init_handles.walk(mpi_task_args); + + // Set the MPI function and make the runtime active. + auto & c = run::context::instance(); + c.set_mpi_task([&] { apply(F, std::move(mpi_task_args)); }); + + // FIXME: Refactor + // finalize_handles_t finalize_handles; + // finalize_handles.walk(mpi_task_args); + } +}; + +} // namespace exec::charm +} // namespace flecsi diff --git a/flecsi/exec/charm/unbind_accessors.hh b/flecsi/exec/charm/unbind_accessors.hh new file mode 100644 index 000000000..5633ec5b8 --- /dev/null +++ b/flecsi/exec/charm/unbind_accessors.hh @@ -0,0 +1,70 @@ +/* + @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ + /@@///// /@@ @@////@@ @@////// /@@ + /@@ /@@ @@@@@ @@ // /@@ /@@ + /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ + /@@//// /@@/@@@@@@@/@@ ////////@@/@@ + /@@ /@@/@@//// //@@ @@ /@@/@@ + /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ + // /// ////// ////// //////// // + + Copyright (c) 2016, Triad National Security, LLC + All rights reserved. + */ +#pragma once + +/*! @file */ + +#include + +#if !defined(__FLECSI_PRIVATE__) +#error Do not include this file directly! +#endif + +#include "flecsi/data/accessor.hh" +#include "flecsi/data/privilege.hh" +#include "flecsi/run/context.hh" +#include "flecsi/util/demangle.hh" +#include "flecsi/util/tuple_walker.hh" + +#if !defined(FLECSI_ENABLE_LEGION) +#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#endif + +#include + +namespace flecsi { + +inline log::devel_tag unbind_accessors_tag("unbind_accessors"); + +namespace exec::charm { + +/*! + The unbind_accessors_t type is called to walk the user task arguments inside + of an executing legion task to properly unbind the user's accessors. + */ + +struct unbind_accessors_t : public util::tuple_walker { + + template + void visit(data::accessor &) { + } // visit + + /*--------------------------------------------------------------------------* + Non-FleCSI Data Types + *--------------------------------------------------------------------------*/ + + template + static typename std::enable_if_t< + !std::is_base_of_v> + visit(DATA_TYPE &) { + { + log::devel_guard guard(unbind_accessors_tag); + flog_devel(info) << "Skipping argument with type " + << util::type() << std::endl; + } + } // visit +}; // struct unbind_accessors_t + +} // namespace exec::charm +} // namespace flecsi diff --git a/flecsi/run/CMakeLists.txt b/flecsi/run/CMakeLists.txt index a6af408b9..96810bc05 100644 --- a/flecsi/run/CMakeLists.txt +++ b/flecsi/run/CMakeLists.txt @@ -48,17 +48,17 @@ elseif(FLECSI_RUNTIME_MODEL STREQUAL "mpi") elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") - set(runtime_HEADERS - ${runtime_HEADERS} + set(run_HEADERS + ${run_HEADERS} charm/context.hh charm/mapper.hh charm/tasks.hh ) - set(runtime_SOURCES - ${runtime_SOURCES} - charm/context.cc + set(run_SOURCES + ${run_SOURCES} charm/context.decl.h + charm/context.cc ) endif() diff --git a/flecsi/run/charm/context.cc b/flecsi/run/charm/context.cc new file mode 100644 index 000000000..92925ab64 --- /dev/null +++ b/flecsi/run/charm/context.cc @@ -0,0 +1,289 @@ +/* + @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ + /@@///// /@@ @@////@@ @@////// /@@ + /@@ /@@ @@@@@ @@ // /@@ /@@ + /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ + /@@//// /@@/@@@@@@@/@@ ////////@@/@@ + /@@ /@@/@@//// //@@ @@ /@@/@@ + /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ + // /// ////// ////// //////// // + + Copyright (c) 2016, Triad National Security, LLC + All rights reserved. + */ + +#include + +#if !defined(__FLECSI_PRIVATE__) +#define __FLECSI_PRIVATE__ +#endif + +#include "flecsi/exec/launch.hh" +#include "flecsi/exec/charm/task_wrapper.hh" +#include "flecsi/run/charm/context.hh" +#include "flecsi/run/charm/mapper.hh" +#include "flecsi/run/types.hh" +#include + +namespace flecsi::run { + +using namespace boost::program_options; +using exec::charm::task_id; + +/*----------------------------------------------------------------------------* + Legion top-level task. + *----------------------------------------------------------------------------*/ + +void +top_level_task(const Legion::Task *, + const std::vector &, + Legion::Context ctx, + Legion::Runtime * runtime) { + + context_t & context_ = context_t::instance(); + + /* + Initialize MPI interoperability. + */ + + context_.connect_with_mpi(ctx, runtime); + context_.wait_on_mpi(ctx, runtime); + + /* + Invoke the FleCSI runtime top-level action. + */ + + detail::data_guard(), + context_.exit_status() = (*context_.top_level_action_)(); + + /* + Finish up Legion runtime and fall back out to MPI. + */ + + context_.handoff_to_mpi(ctx, runtime); +} // top_level_task + +//----------------------------------------------------------------------------// +// Implementation of context_t::initialize. +//----------------------------------------------------------------------------// + +int +context_t::initialize(int argc, char ** argv, bool dependent) { + + if(dependent) { + int version, subversion; + MPI_Get_version(&version, &subversion); + +#if defined(GASNET_CONDUIT_MPI) + if(version == 3 && subversion > 0) { + int provided; + MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); + + if(provided < MPI_THREAD_MULTIPLE) { + std::cerr << "Your implementation of MPI does not support " + "MPI_THREAD_MULTIPLE which is required for use of the " + "GASNet MPI conduit with the Legion-MPI Interop!" + << std::endl; + std::abort(); + } // if + } + else { + // Initialize the MPI runtime + MPI_Init(&argc, &argv); + } // if +#else + MPI_Init(&argc, &argv); +#endif + } // if + + int rank, size; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + context::process_ = rank; + context::processes_ = size; + + auto status = context::initialize_generic(argc, argv, dependent); + + if(status != success && dependent) { + MPI_Finalize(); + } // if + + return status; +} // initialize + +//----------------------------------------------------------------------------// +// Implementation of context_t::finalize. +//----------------------------------------------------------------------------// + +void +context_t::finalize() { + context::finalize_generic(); + +#ifndef GASNET_CONDUIT_MPI + if(context::initialize_dependent_) { + MPI_Finalize(); + } // if +#endif +} // finalize + +//----------------------------------------------------------------------------// +// Implementation of context_t::start. +//----------------------------------------------------------------------------// + +int +context_t::start(const std::function & action) { + using namespace Legion; + + /* + Store the top-level action for invocation from the top-level task. + */ + + top_level_action_ = &action; + + /* + Setup Legion top-level task. + */ + + Runtime::set_top_level_task_id(FLECSI_TOP_LEVEL_TASK_ID); + + { + Legion::TaskVariantRegistrar registrar( + FLECSI_TOP_LEVEL_TASK_ID, "runtime_driver"); + registrar.add_constraint(ProcessorConstraint(Processor::LOC_PROC)); + registrar.set_replicable(); + Runtime::preregister_task_variant( + registrar, "runtime_driver"); + } // scope + + /* + Arg 0: MPI has initial control (true). + Arg 1: Number of MPI participants (1). + Arg 2: Number of Legion participants (1). + */ + + handshake_ = Legion::Runtime::create_handshake(true, 1, 1); + + /* + Register custom mapper. + */ + + Runtime::add_registration_callback(mapper_registration); + + /* + Configure interoperability layer. + */ + + Legion::Runtime::configure_MPI_interoperability(context::process_); + + context::start(); + + /* + Legion command-line arguments. + */ + + std::vector largv; + largv.push_back(argv_[0]); + + auto iss = std::istringstream{backend_}; + std::vector lsargv(std::istream_iterator{iss}, + std::istream_iterator()); + + for(auto & arg : lsargv) { + largv.push_back(&arg[0]); + } // for + + // FIXME: This needs to be gotten from Legion + context::threads_per_process_ = 1; + context::threads_ = context::processes_ * context::threads_per_process_; + + /* + Start Legion runtime. + */ + + { + log::devel_guard("context"); + + std::stringstream stream; + + stream << "Starting Legion runtime" << std::endl; + stream << "\targc: " << largv.size() << std::endl; + stream << "\targv: "; + + for(auto opt : largv) { + stream << opt << " "; + } // for + + stream << std::endl; + + flog_devel(info) << stream.str(); + } // scope + + Runtime::start(largv.size(), largv.data(), true); + + do { + handoff_to_legion(); + wait_on_legion(); + } while(invoke_mpi_task()); + + Legion::Runtime::wait_for_shutdown(); + + return context::exit_status(); +} // context_t::start + +//----------------------------------------------------------------------------// +// Implementation of context_t::handoff_to_mpi. +//----------------------------------------------------------------------------// + +void +context_t::handoff_to_mpi(Legion::Context & ctx, Legion::Runtime * runtime) { + Legion::ArgumentMap arg_map; + Legion::IndexLauncher handoff_to_mpi_launcher( + task_id>, + Legion::Domain::from_rect<1>(context_t::instance().all_processes()), + Legion::TaskArgument(NULL, 0), + arg_map); + + handoff_to_mpi_launcher.tag = FLECSI_MAPPER_FORCE_RANK_MATCH; + auto fm = runtime->execute_index_space(ctx, handoff_to_mpi_launcher); + + fm.wait_all_results(true); +} // context_t::handoff_to_mpi + +//----------------------------------------------------------------------------// +// Implementation of context_t::wait_on_mpi. +//----------------------------------------------------------------------------// + +Legion::FutureMap +context_t::wait_on_mpi(Legion::Context & ctx, Legion::Runtime * runtime) { + Legion::ArgumentMap arg_map; + Legion::IndexLauncher wait_on_mpi_launcher(task_id>, + Legion::Domain::from_rect<1>(context_t::instance().all_processes()), + Legion::TaskArgument(NULL, 0), + arg_map); + + wait_on_mpi_launcher.tag = FLECSI_MAPPER_FORCE_RANK_MATCH; + auto fm = runtime->execute_index_space(ctx, wait_on_mpi_launcher); + + fm.wait_all_results(true); + + return fm; +} // context_t::wait_on_mpi + +//----------------------------------------------------------------------------// +// Implementation of context_t::connect_with_mpi. +//----------------------------------------------------------------------------// + +void +context_t::connect_with_mpi(Legion::Context &, Legion::Runtime *) { + int size; + MPI_Comm_size(MPI_COMM_WORLD, &size); + + LegionRuntime::Arrays::Rect<1> launch_bounds( + LegionRuntime::Arrays::Point<1>(0), + LegionRuntime::Arrays::Point<1>(size - 1)); + + context_t::instance().set_all_processes(launch_bounds); +} // context_t::connect_with_mpi + +} // namespace flecsi::run diff --git a/flecsi/run/charm/context.decl.h b/flecsi/run/charm/context.decl.h new file mode 100644 index 000000000..06a1deca3 --- /dev/null +++ b/flecsi/run/charm/context.decl.h @@ -0,0 +1,331 @@ +#ifndef _DECL_context_H_ +#define _DECL_context_H_ +#include "charm++.h" +#include "envelope.h" +#include +#include "sdag.h" +namespace flecsi { +namespace run { +namespace charm { +/* DECLS: group ContextGroup: IrrGroup{ +ContextGroup(); +void top_level_task(); +}; + */ + class ContextGroup; + class CkIndex_ContextGroup; + class CProxy_ContextGroup; + class CProxyElement_ContextGroup; + class CProxySection_ContextGroup; +/* --------------- index object ------------------ */ +class CkIndex_ContextGroup:public CkIndex_IrrGroup{ + public: + typedef ContextGroup local_t; + typedef CkIndex_ContextGroup index_t; + typedef CProxy_ContextGroup proxy_t; + typedef CProxyElement_ContextGroup element_t; + typedef CProxySection_ContextGroup section_t; + + static int __idx; + static void __register(const char *s, size_t size); + /* DECLS: ContextGroup(); + */ + // Entry point registration at startup + + static int reg_ContextGroup_void(); + // Entry point index lookup + + inline static int idx_ContextGroup_void() { + static int epidx = reg_ContextGroup_void(); + return epidx; + } + + + static int ckNew() { return idx_ContextGroup_void(); } + + static void _call_ContextGroup_void(void* impl_msg, void* impl_obj); + + static void _call_sdag_ContextGroup_void(void* impl_msg, void* impl_obj); + /* DECLS: void top_level_task(); + */ + // Entry point registration at startup + + static int reg_top_level_task_void(); + // Entry point index lookup + + inline static int idx_top_level_task_void() { + static int epidx = reg_top_level_task_void(); + return epidx; + } + + + inline static int idx_top_level_task(void (ContextGroup::*)() ) { + return idx_top_level_task_void(); + } + + + + static int top_level_task() { return idx_top_level_task_void(); } + + static void _call_top_level_task_void(void* impl_msg, void* impl_obj); + + static void _call_sdag_top_level_task_void(void* impl_msg, void* impl_obj); +}; +/* --------------- element proxy ------------------ */ +class CProxyElement_ContextGroup: public CProxyElement_IrrGroup{ + public: + typedef ContextGroup local_t; + typedef CkIndex_ContextGroup index_t; + typedef CProxy_ContextGroup proxy_t; + typedef CProxyElement_ContextGroup element_t; + typedef CProxySection_ContextGroup section_t; + + + /* TRAM aggregators */ + + CProxyElement_ContextGroup(void) { + } + CProxyElement_ContextGroup(const IrrGroup *g) : CProxyElement_IrrGroup(g){ + } + CProxyElement_ContextGroup(CkGroupID _gid,int _onPE,CK_DELCTOR_PARAM) : CProxyElement_IrrGroup(_gid,_onPE,CK_DELCTOR_ARGS){ + } + CProxyElement_ContextGroup(CkGroupID _gid,int _onPE) : CProxyElement_IrrGroup(_gid,_onPE){ + } + + int ckIsDelegated(void) const + { return CProxyElement_IrrGroup::ckIsDelegated(); } + inline CkDelegateMgr *ckDelegatedTo(void) const + { return CProxyElement_IrrGroup::ckDelegatedTo(); } + inline CkDelegateData *ckDelegatedPtr(void) const + { return CProxyElement_IrrGroup::ckDelegatedPtr(); } + CkGroupID ckDelegatedIdx(void) const + { return CProxyElement_IrrGroup::ckDelegatedIdx(); } +inline void ckCheck(void) const {CProxyElement_IrrGroup::ckCheck();} +CkChareID ckGetChareID(void) const + {return CProxyElement_IrrGroup::ckGetChareID();} +CkGroupID ckGetGroupID(void) const + {return CProxyElement_IrrGroup::ckGetGroupID();} +operator CkGroupID () const { return ckGetGroupID(); } + + inline void setReductionClient(CkReductionClientFn fn,void *param=NULL) const + { CProxyElement_IrrGroup::setReductionClient(fn,param); } + inline void ckSetReductionClient(CkReductionClientFn fn,void *param=NULL) const + { CProxyElement_IrrGroup::ckSetReductionClient(fn,param); } + inline void ckSetReductionClient(CkCallback *cb) const + { CProxyElement_IrrGroup::ckSetReductionClient(cb); } +int ckGetGroupPe(void) const +{return CProxyElement_IrrGroup::ckGetGroupPe();} + + void ckDelegate(CkDelegateMgr *dTo,CkDelegateData *dPtr=NULL) + { CProxyElement_IrrGroup::ckDelegate(dTo,dPtr); } + void ckUndelegate(void) + { CProxyElement_IrrGroup::ckUndelegate(); } + void pup(PUP::er &p) + { CProxyElement_IrrGroup::pup(p); + } + void ckSetGroupID(CkGroupID g) { + CProxyElement_IrrGroup::ckSetGroupID(g); + } + ContextGroup* ckLocalBranch(void) const { + return ckLocalBranch(ckGetGroupID()); + } + static ContextGroup* ckLocalBranch(CkGroupID gID) { + return (ContextGroup*)CkLocalBranch(gID); + } +/* DECLS: ContextGroup(); + */ + + +/* DECLS: void top_level_task(); + */ + + void top_level_task(const CkEntryOptions *impl_e_opts=NULL); + +}; +/* ---------------- collective proxy -------------- */ +class CProxy_ContextGroup: public CProxy_IrrGroup{ + public: + typedef ContextGroup local_t; + typedef CkIndex_ContextGroup index_t; + typedef CProxy_ContextGroup proxy_t; + typedef CProxyElement_ContextGroup element_t; + typedef CProxySection_ContextGroup section_t; + + CProxy_ContextGroup(void) { + } + CProxy_ContextGroup(const IrrGroup *g) : CProxy_IrrGroup(g){ + } + CProxy_ContextGroup(CkGroupID _gid,CK_DELCTOR_PARAM) : CProxy_IrrGroup(_gid,CK_DELCTOR_ARGS){ } + CProxy_ContextGroup(CkGroupID _gid) : CProxy_IrrGroup(_gid){ } + CProxyElement_ContextGroup operator[](int onPE) const + {return CProxyElement_ContextGroup(ckGetGroupID(),onPE,CK_DELCTOR_CALL);} + + int ckIsDelegated(void) const + { return CProxy_IrrGroup::ckIsDelegated(); } + inline CkDelegateMgr *ckDelegatedTo(void) const + { return CProxy_IrrGroup::ckDelegatedTo(); } + inline CkDelegateData *ckDelegatedPtr(void) const + { return CProxy_IrrGroup::ckDelegatedPtr(); } + CkGroupID ckDelegatedIdx(void) const + { return CProxy_IrrGroup::ckDelegatedIdx(); } +inline void ckCheck(void) const {CProxy_IrrGroup::ckCheck();} +CkChareID ckGetChareID(void) const + {return CProxy_IrrGroup::ckGetChareID();} +CkGroupID ckGetGroupID(void) const + {return CProxy_IrrGroup::ckGetGroupID();} +operator CkGroupID () const { return ckGetGroupID(); } + + inline void setReductionClient(CkReductionClientFn fn,void *param=NULL) const + { CProxy_IrrGroup::setReductionClient(fn,param); } + inline void ckSetReductionClient(CkReductionClientFn fn,void *param=NULL) const + { CProxy_IrrGroup::ckSetReductionClient(fn,param); } + inline void ckSetReductionClient(CkCallback *cb) const + { CProxy_IrrGroup::ckSetReductionClient(cb); } + + void ckDelegate(CkDelegateMgr *dTo,CkDelegateData *dPtr=NULL) + { CProxy_IrrGroup::ckDelegate(dTo,dPtr); } + void ckUndelegate(void) + { CProxy_IrrGroup::ckUndelegate(); } + void pup(PUP::er &p) + { CProxy_IrrGroup::pup(p); + } + void ckSetGroupID(CkGroupID g) { + CProxy_IrrGroup::ckSetGroupID(g); + } + ContextGroup* ckLocalBranch(void) const { + return ckLocalBranch(ckGetGroupID()); + } + static ContextGroup* ckLocalBranch(CkGroupID gID) { + return (ContextGroup*)CkLocalBranch(gID); + } +/* DECLS: ContextGroup(); + */ + + static CkGroupID ckNew(const CkEntryOptions *impl_e_opts=NULL); + +/* DECLS: void top_level_task(); + */ + + void top_level_task(const CkEntryOptions *impl_e_opts=NULL); + + void top_level_task(int npes, int *pes, const CkEntryOptions *impl_e_opts=NULL); + + void top_level_task(CmiGroup &grp, const CkEntryOptions *impl_e_opts=NULL); + +}; +/* ---------------- section proxy -------------- */ +class CProxySection_ContextGroup: public CProxySection_IrrGroup{ + public: + typedef ContextGroup local_t; + typedef CkIndex_ContextGroup index_t; + typedef CProxy_ContextGroup proxy_t; + typedef CProxyElement_ContextGroup element_t; + typedef CProxySection_ContextGroup section_t; + + CProxySection_ContextGroup(void) { + } + CProxySection_ContextGroup(const IrrGroup *g) : CProxySection_IrrGroup(g){ + } + CProxySection_ContextGroup(const CkGroupID &_gid,const int *_pelist,int _npes, CK_DELCTOR_PARAM) : CProxySection_IrrGroup(_gid,_pelist,_npes,CK_DELCTOR_ARGS){ } + CProxySection_ContextGroup(const CkGroupID &_gid,const int *_pelist,int _npes, int factor = USE_DEFAULT_BRANCH_FACTOR) : CProxySection_IrrGroup(_gid,_pelist,_npes,factor){ } + CProxySection_ContextGroup(int n,const CkGroupID *_gid, int const * const *_pelist,const int *_npes, int factor = USE_DEFAULT_BRANCH_FACTOR) : CProxySection_IrrGroup(n,_gid,_pelist,_npes,factor){ } + CProxySection_ContextGroup(int n,const CkGroupID *_gid, int const * const *_pelist,const int *_npes, CK_DELCTOR_PARAM) : CProxySection_IrrGroup(n,_gid,_pelist,_npes,CK_DELCTOR_ARGS){ } + + int ckIsDelegated(void) const + { return CProxySection_IrrGroup::ckIsDelegated(); } + inline CkDelegateMgr *ckDelegatedTo(void) const + { return CProxySection_IrrGroup::ckDelegatedTo(); } + inline CkDelegateData *ckDelegatedPtr(void) const + { return CProxySection_IrrGroup::ckDelegatedPtr(); } + CkGroupID ckDelegatedIdx(void) const + { return CProxySection_IrrGroup::ckDelegatedIdx(); } +inline void ckCheck(void) const {CProxySection_IrrGroup::ckCheck();} +CkChareID ckGetChareID(void) const + {return CProxySection_IrrGroup::ckGetChareID();} +CkGroupID ckGetGroupID(void) const + {return CProxySection_IrrGroup::ckGetGroupID();} +operator CkGroupID () const { return ckGetGroupID(); } + + inline void setReductionClient(CkReductionClientFn fn,void *param=NULL) const + { CProxySection_IrrGroup::setReductionClient(fn,param); } + inline void ckSetReductionClient(CkReductionClientFn fn,void *param=NULL) const + { CProxySection_IrrGroup::ckSetReductionClient(fn,param); } + inline void ckSetReductionClient(CkCallback *cb) const + { CProxySection_IrrGroup::ckSetReductionClient(cb); } +inline int ckGetNumSections() const +{ return CProxySection_IrrGroup::ckGetNumSections(); } +inline CkSectionInfo &ckGetSectionInfo() +{ return CProxySection_IrrGroup::ckGetSectionInfo(); } +inline CkSectionID *ckGetSectionIDs() +{ return CProxySection_IrrGroup::ckGetSectionIDs(); } +inline CkSectionID &ckGetSectionID() +{ return CProxySection_IrrGroup::ckGetSectionID(); } +inline CkSectionID &ckGetSectionID(int i) +{ return CProxySection_IrrGroup::ckGetSectionID(i); } +inline CkGroupID ckGetGroupIDn(int i) const +{ return CProxySection_IrrGroup::ckGetGroupIDn(i); } +inline const int *ckGetElements() const +{ return CProxySection_IrrGroup::ckGetElements(); } +inline const int *ckGetElements(int i) const +{ return CProxySection_IrrGroup::ckGetElements(i); } +inline int ckGetNumElements() const +{ return CProxySection_IrrGroup::ckGetNumElements(); } +inline int ckGetNumElements(int i) const +{ return CProxySection_IrrGroup::ckGetNumElements(i); } + + void ckDelegate(CkDelegateMgr *dTo,CkDelegateData *dPtr=NULL) + { CProxySection_IrrGroup::ckDelegate(dTo,dPtr); } + void ckUndelegate(void) + { CProxySection_IrrGroup::ckUndelegate(); } + void pup(PUP::er &p) + { CProxySection_IrrGroup::pup(p); + } + void ckSetGroupID(CkGroupID g) { + CProxySection_IrrGroup::ckSetGroupID(g); + } + ContextGroup* ckLocalBranch(void) const { + return ckLocalBranch(ckGetGroupID()); + } + static ContextGroup* ckLocalBranch(CkGroupID gID) { + return (ContextGroup*)CkLocalBranch(gID); + } +/* DECLS: ContextGroup(); + */ + + +/* DECLS: void top_level_task(); + */ + + void top_level_task(const CkEntryOptions *impl_e_opts=NULL); + +}; +#define ContextGroup_SDAG_CODE +typedef CBaseT1CBase_ContextGroup; + +} // namespace charm + +} // namespace run + +} // namespace flecsi + +namespace flecsi { +namespace run { +namespace charm { +/* ---------------- method closures -------------- */ +class Closure_ContextGroup { + public: + + + struct top_level_task_2_closure; + +}; + +} // namespace charm + +} // namespace run + +} // namespace flecsi + +extern void _registercontext(void); +extern "C" void CkRegisterMainModule(void); +#endif diff --git a/flecsi/run/charm/context.def.h b/flecsi/run/charm/context.def.h new file mode 100644 index 000000000..cc9ba6562 --- /dev/null +++ b/flecsi/run/charm/context.def.h @@ -0,0 +1,208 @@ +namespace flecsi { +namespace run { +namespace charm { +/* ---------------- method closures -------------- */ +#ifndef CK_TEMPLATES_ONLY +#endif /* CK_TEMPLATES_ONLY */ + +#ifndef CK_TEMPLATES_ONLY + + struct Closure_ContextGroup::top_level_task_2_closure : public SDAG::Closure { + + + top_level_task_2_closure() { + init(); + } + top_level_task_2_closure(CkMigrateMessage*) { + init(); + } + void pup(PUP::er& __p) { + packClosure(__p); + } + virtual ~top_level_task_2_closure() { + } + PUPable_decl(SINGLE_ARG(top_level_task_2_closure)); + }; +#endif /* CK_TEMPLATES_ONLY */ + + +} // namespace charm + +} // namespace run + +} // namespace flecsi + +namespace flecsi { +namespace run { +namespace charm { +/* DEFS: group ContextGroup: IrrGroup{ +ContextGroup(); +void top_level_task(); +}; + */ +#ifndef CK_TEMPLATES_ONLY + int CkIndex_ContextGroup::__idx=0; +#endif /* CK_TEMPLATES_ONLY */ +#ifndef CK_TEMPLATES_ONLY +#endif /* CK_TEMPLATES_ONLY */ +#ifndef CK_TEMPLATES_ONLY +/* DEFS: ContextGroup(); + */ +#endif /* CK_TEMPLATES_ONLY */ + +#ifndef CK_TEMPLATES_ONLY +/* DEFS: void top_level_task(); + */ +void CProxyElement_ContextGroup::top_level_task(const CkEntryOptions *impl_e_opts) +{ + ckCheck(); + void *impl_msg = CkAllocSysMsg(impl_e_opts); + if (ckIsDelegated()) { + CkGroupMsgPrep(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID()); + ckDelegatedTo()->GroupSend(ckDelegatedPtr(),CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupPe(), ckGetGroupID()); + } else { + CkSendMsgBranch(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupPe(), ckGetGroupID(),0); + } +} +#endif /* CK_TEMPLATES_ONLY */ + +#ifndef CK_TEMPLATES_ONLY +/* DEFS: ContextGroup(); + */ +CkGroupID CProxy_ContextGroup::ckNew(const CkEntryOptions *impl_e_opts) +{ + void *impl_msg = CkAllocSysMsg(impl_e_opts); + UsrToEnv(impl_msg)->setMsgtype(BocInitMsg); + CkGroupID gId = CkCreateGroup(CkIndex_ContextGroup::__idx, CkIndex_ContextGroup::idx_ContextGroup_void(), impl_msg); + return gId; +} + +// Entry point registration function +int CkIndex_ContextGroup::reg_ContextGroup_void() { + int epidx = CkRegisterEp("ContextGroup()", + reinterpret_cast(_call_ContextGroup_void), 0, __idx, 0); + return epidx; +} + +void CkIndex_ContextGroup::_call_ContextGroup_void(void* impl_msg, void* impl_obj_void) +{ + ContextGroup* impl_obj = static_cast(impl_obj_void); + new (impl_obj_void) ContextGroup(); + if(UsrToEnv(impl_msg)->isVarSysMsg() == 0) + CkFreeSysMsg(impl_msg); +} +#endif /* CK_TEMPLATES_ONLY */ + +#ifndef CK_TEMPLATES_ONLY +/* DEFS: void top_level_task(); + */ +void CProxy_ContextGroup::top_level_task(const CkEntryOptions *impl_e_opts) +{ + ckCheck(); + void *impl_msg = CkAllocSysMsg(impl_e_opts); + if (ckIsDelegated()) { + CkGroupMsgPrep(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID()); + ckDelegatedTo()->GroupBroadcast(ckDelegatedPtr(),CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID()); + } else CkBroadcastMsgBranch(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID(),0); +} +void CProxy_ContextGroup::top_level_task(int npes, int *pes, const CkEntryOptions *impl_e_opts) { + void *impl_msg = CkAllocSysMsg(impl_e_opts); + CkSendMsgBranchMulti(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID(), npes, pes,0); +} +void CProxy_ContextGroup::top_level_task(CmiGroup &grp, const CkEntryOptions *impl_e_opts) { + void *impl_msg = CkAllocSysMsg(impl_e_opts); + CkSendMsgBranchGroup(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID(), grp,0); +} + +// Entry point registration function +int CkIndex_ContextGroup::reg_top_level_task_void() { + int epidx = CkRegisterEp("top_level_task()", + reinterpret_cast(_call_top_level_task_void), 0, __idx, 0); + return epidx; +} + +void CkIndex_ContextGroup::_call_top_level_task_void(void* impl_msg, void* impl_obj_void) +{ + ContextGroup* impl_obj = static_cast(impl_obj_void); + impl_obj->top_level_task(); + if(UsrToEnv(impl_msg)->isVarSysMsg() == 0) + CkFreeSysMsg(impl_msg); +} +PUPable_def(SINGLE_ARG(Closure_ContextGroup::top_level_task_2_closure)) +#endif /* CK_TEMPLATES_ONLY */ + +#ifndef CK_TEMPLATES_ONLY +/* DEFS: ContextGroup(); + */ +#endif /* CK_TEMPLATES_ONLY */ + +#ifndef CK_TEMPLATES_ONLY +/* DEFS: void top_level_task(); + */ +void CProxySection_ContextGroup::top_level_task(const CkEntryOptions *impl_e_opts) +{ + ckCheck(); + void *impl_msg = CkAllocSysMsg(impl_e_opts); + if (ckIsDelegated()) { + ckDelegatedTo()->GroupSectionSend(ckDelegatedPtr(),CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetNumSections(), ckGetSectionIDs()); + } else { + void *impl_msg_tmp; + for (int i=0; i +void flecsi::run::charm::CBase_ContextGroup::virtual_pup(PUP::er &p) { + recursive_pup(dynamic_cast(this), p); +} +#endif /* CK_TEMPLATES_ONLY */ diff --git a/flecsi/run/charm/context.hh b/flecsi/run/charm/context.hh new file mode 100644 index 000000000..3be1258d1 --- /dev/null +++ b/flecsi/run/charm/context.hh @@ -0,0 +1,389 @@ +/* + @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ + /@@///// /@@ @@////@@ @@////// /@@ + /@@ /@@ @@@@@ @@ // /@@ /@@ + /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ + /@@//// /@@/@@@@@@@/@@ ////////@@/@@ + /@@ /@@/@@//// //@@ @@ /@@/@@ + /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ + // /// ////// ////// //////// // + + Copyright (c) 2016, Triad National Security, LLC + All rights reserved. + */ +#pragma once + +/*! @file */ + +#include + +#if !defined(__FLECSI_PRIVATE__) +#error Do not include this file directly! +#endif + +#include "../context.hh" +//#include "flecsi/execution/launch.hh" +//#include "flecsi/execution/processor.hh" +#include +#include + +#if !defined(FLECSI_ENABLE_LEGION) +#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#endif + +#include + +#if !defined(FLECSI_ENABLE_MPI) +#error FLECSI_ENABLE_MPI not defined! This file depends on MPI! +#endif + +#include + +#if !defined(FLECSI_ENABLE_CHARM) +#error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! +#endif + +#include + +#include +#include +#include + +#include "context.decl.h" + +namespace flecsi::run { + +const size_t FLECSI_TOP_LEVEL_TASK_ID = 0; +const size_t FLECSI_MAPPER_FORCE_RANK_MATCH = 0x00001000; +const size_t FLECSI_MAPPER_COMPACTED_STORAGE = 0x00002000; +const size_t FLECSI_MAPPER_SUBRANK_LAUNCH = 0x00003000; +const size_t FLECSI_MAPPER_EXCLUSIVE_LR = 0x00004000; + +namespace charm { +template +using task = R(const Legion::Task *, + const std::vector &, + Legion::Context, + Legion::Runtime *); + +class ContextGroup : public CBase_ContextGroup { +public: + ContextGroup() { + CkPrintf("Group created on %i\n", CkMyPe()); + } + + void testEntry() { + CkPrintf("Hello from element %i\n", thisIndex); + contribute(CkCallback(CkCallback::ckExit)); + } +}; + +} + +struct context_t : context { + + /* + Friend declarations. Some parts of this interface are intentionally private + to avoid inadvertent corruption of initialization logic. + */ + + friend charm::task<> top_level_task, handoff_to_mpi_task, wait_on_mpi_task; + + /*! + The registration_function_t type defines a function type for + registration callbacks. + */ + + using registration_function_t = void (*)(); + + //--------------------------------------------------------------------------// + // Runtime. + //--------------------------------------------------------------------------// + + /* + Documentation for this interface is in the top-level context type. + */ + + int initialize(int argc, char ** argv, bool dependent); + + /* + Documentation for this interface is in the top-level context type. + */ + + void finalize(); + + /* + Documentation for this interface is in the top-level context type. + */ + + int start(const std::function &); + + /* + Documentation for this interface is in the top-level context type. + */ + + void clear() {} // clear + + /* + Documentation for this interface is in the top-level context type. + */ + + size_t process() const { + return context::process_; + } // process + + /* + Documentation for this interface is in the top-level context type. + */ + + size_t processes() const { + return context::processes_; + } // processes + + /* + Documentation for this interface is in the top-level context type. + */ + + size_t threads_per_process() const { + return context::threads_per_process_; + } // threads_per_process + + /* + Documentation for this interface is in the top-level context type. + */ + + size_t threads() const { + return context::threads_; + } // threads + + /* + Documentation for this interface is in the top-level context type. + */ + + static size_t task_depth() { + return Legion::Runtime::get_runtime() + ->get_current_task(Legion::Runtime::get_context()) + ->get_depth(); + } // task_depth + + /* + Documentation for this interface is in the top-level context type. + */ + + static size_t color() { + flog_assert( + task_depth() > 0, "this method can only be called from within a task"); + return Legion::Runtime::get_runtime() + ->get_current_task(Legion::Runtime::get_context()) + ->index_point.point_data[0]; + } // color + + /* + Documentation for this interface is in the top-level context type. + */ + + static size_t colors() { + flog_assert( + task_depth() > 0, "this method can only be called from within a task"); + return Legion::Runtime::get_runtime() + ->get_current_task(Legion::Runtime::get_context()) + ->index_domain.get_volume(); + } // colors + + /// Store a reference to the argument under a small unused positive integer. + /// Its type is forgotten. + template + std::size_t record(T & t) { + const auto tp = const_cast(static_cast(&t)); + if(auto & f = enumerated.front()) { // we have a free slot + auto & slot = *static_cast(f); + f = slot; + slot = tp; + return &slot - &f; + } + // NB: reallocation invalidates all zero of the free list pointers + enumerated.push_back(tp); + return enumerated.size() - 1; + } + /// Discard a recorded reference. Its index may be reused. + void forget(std::size_t i) { + void *&f = enumerated.front(), *&p = enumerated[i]; + p = f; + f = &p; + } + /// Obtain a reference from its index. + /// \tparam T the object's forgotten type + template + T & recall(std::size_t i) { + return *static_cast(enumerated[i]); + } + + //--------------------------------------------------------------------------// + // MPI interoperability. + //--------------------------------------------------------------------------// + + /*! + Set the MPI user task. When control is given to the MPI runtime + it will execute whichever function is currently set. + */ + + void set_mpi_task(std::function mpi_task) { + { + log::devel_guard guard(context_tag); + flog_devel(info) << "In set_mpi_task" << std::endl; + } + + mpi_task_ = std::move(mpi_task); + } + + /*! + Set the distributed-memory domain. + */ + + void set_all_processes(const LegionRuntime::Arrays::Rect<1> & all_processes) { + all_processes_ = all_processes; + } // all_processes + + /*! + Return the distributed-memory domain. + */ + + const LegionRuntime::Arrays::Rect<1> & all_processes() const { + return all_processes_; + } // all_processes + + /*! + Switch execution to the MPI runtime. + + @param ctx The Legion runtime context. + @param runtime The Legion task runtime pointer. + */ + + void handoff_to_mpi(Legion::Context & ctx, Legion::Runtime * runtime); + + /*! + Wait on the MPI runtime to finish the current task execution. + + @param ctx The Legion runtime context. + @param runtime The Legion task runtime pointer. + + @return A future map with the result of the task execution. + */ + + Legion::FutureMap wait_on_mpi(Legion::Context & ctx, + Legion::Runtime * runtime); + + /*! + Connect with the MPI runtime. + + @param ctx The Legion runtime context. + @param runtime The Legion task runtime pointer. + */ + + void connect_with_mpi(Legion::Context & ctx, Legion::Runtime * runtime); + + //--------------------------------------------------------------------------// + // Task interface. + //--------------------------------------------------------------------------// + + /*! + Register a task with the runtime. + + @param name The task name string. + @param callback The registration call back function. + \return task ID + */ + std::size_t register_task(std::string_view name, + const registration_function_t & callback) { + flog_devel(info) << "Registering task callback: " << name << std::endl; + + flog_assert( + task_registry_.size() < FLECSI_GENERATED_ID_MAX, "too many tasks"); + task_registry_.push_back(callback); + return task_registry_.size(); // 0 is the top-level task + } // register_task + +private: + /*! + Handoff to legion runtime from MPI. + */ + + void handoff_to_legion() { + { + log::devel_guard guard(context_tag); + flog_devel(info) << "In handoff_to_legion" << std::endl; + } + MPI_Barrier(MPI_COMM_WORLD); + handshake_.mpi_handoff_to_legion(); + } // handoff_to_legion + + /*! + Wait for Legion runtime to complete. + */ + + void wait_on_legion() { + { + log::devel_guard guard(context_tag); + flog_devel(info) << "In wait_on_legion" << std::endl; + } + + handshake_.mpi_wait_on_legion(); + MPI_Barrier(MPI_COMM_WORLD); + } // wait_on_legion + + // When GCC fixes bug #83258, these can be lambdas in the public functions: + /*! + Handoff to MPI from Legion. + */ + + static void mpi_handoff() { + instance().handshake_.legion_handoff_to_mpi(); + } + + /*! + Wait for MPI runtime to complete task execution. + */ + + static void mpi_wait() { + instance().handshake_.legion_wait_on_mpi(); + } + + /*! + Invoke the current MPI task, if any, and clear it. + + \return whether there was a task to invoke + */ + + bool invoke_mpi_task() { + const bool ret(mpi_task_); + if(ret) { + mpi_task_(); + mpi_task_ = nullptr; + } + return ret; + } // invoke_mpi_task + + /*--------------------------------------------------------------------------* + Runtime data. + *--------------------------------------------------------------------------*/ + + // The first element is the head of the free list. + std::vector enumerated = {nullptr}; + const std::function * top_level_action_ = nullptr; + + /*--------------------------------------------------------------------------* + Interoperability data members. + *--------------------------------------------------------------------------*/ + + std::function mpi_task_; + Legion::MPILegionHandshake handshake_; + LegionRuntime::Arrays::Rect<1> all_processes_; + + /*--------------------------------------------------------------------------* + Task data members. + *--------------------------------------------------------------------------*/ + + std::vector task_registry_; +}; + +} // namespace flecsi::run diff --git a/flecsi/run/charm/mapper.hh b/flecsi/run/charm/mapper.hh new file mode 100644 index 000000000..8ed3311e7 --- /dev/null +++ b/flecsi/run/charm/mapper.hh @@ -0,0 +1,318 @@ +/* + @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ + /@@///// /@@ @@////@@ @@////// /@@ + /@@ /@@ @@@@@ @@ // /@@ /@@ + /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ + /@@//// /@@/@@@@@@@/@@ ////////@@/@@ + /@@ /@@/@@//// //@@ @@ /@@/@@ + /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ + // /// ////// ////// //////// // + + Copyright (c) 2016, Triad National Security, LLC + All rights reserved. + */ +#pragma once + +/*! @file */ + +#include + +#if !defined(__FLECSI_PRIVATE__) +#error Do not include this file directly! +#endif + +#include "../backend.hh" + +#if !defined(FLECSI_ENABLE_LEGION) +#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#endif + +#include +#include +#include + +namespace flecsi { + +inline log::devel_tag legion_mapper_tag("legion_mapper"); + +namespace run { + +/* + The mpi_mapper_t - is a custom mapper that handles mpi-legion + interoperability in FLeCSI + + @ingroup legion-runtime +*/ + +class mpi_mapper_t : public Legion::Mapping::DefaultMapper +{ +public: + /*! + Contructor. Derives from the Legion's Default Mapper + + @param machine Machine type for Legion's Realm + @param _runtime Legion runtime + @param local processor type: currently supports only + LOC_PROC and TOC_PROC + */ + + mpi_mapper_t(Legion::Machine machine, + Legion::Runtime * _runtime, + Legion::Processor local) + : Legion::Mapping::DefaultMapper(_runtime->get_mapper_runtime(), + machine, + local, + "default"), + machine(machine) { + using legion_machine = Legion::Machine; + using legion_proc = Legion::Processor; + + legion_machine::ProcessorQuery pq = + legion_machine::ProcessorQuery(machine).same_address_space_as(local); + for(legion_machine::ProcessorQuery::iterator pqi = pq.begin(); + pqi != pq.end(); + ++pqi) { + legion_proc p = *pqi; + if(p.kind() == legion_proc::LOC_PROC) + local_cpus.push_back(p); + else if(p.kind() == legion_proc::TOC_PROC) + local_gpus.push_back(p); + else + continue; + + std::map & mem_map = proc_mem_map[p]; + + legion_machine::MemoryQuery mq = + legion_machine::MemoryQuery(machine).has_affinity_to(p); + for(legion_machine::MemoryQuery::iterator mqi = mq.begin(); + mqi != mq.end(); + ++mqi) { + Realm::Memory m = *mqi; + mem_map[m.kind()] = m; + + if(m.kind() == Realm::Memory::SYSTEM_MEM) + local_sysmem = m; + } // end for + } // end for + + { + log::devel_guard guard(legion_mapper_tag); + flog_devel(info) << "Mapper constructor" << std::endl + << "\tlocal: " << local << std::endl + << "\tcpus: " << local_cpus.size() << std::endl + << "\tgpus: " << local_gpus.size() << std::endl + << "\tsysmem: " << local_sysmem << std::endl; + } // scope + } // end mpi_mapper_t + + /*! + Destructor + */ + virtual ~mpi_mapper_t(){}; + + Legion::LayoutConstraintID default_policy_select_layout_constraints( + Legion::Mapping::MapperContext ctx, + Realm::Memory, + const Legion::RegionRequirement &, + Legion::Mapping::DefaultMapper::MappingKind, + bool /* constraint */, + bool & force_new_instances) { + // We always set force_new_instances to false since we are + // deciding to optimize for minimizing memory usage instead + // of avoiding Write-After-Read (WAR) dependences + force_new_instances = false; + std::vector ordering; + ordering.push_back(Legion::DimensionKind::DIM_Y); + ordering.push_back(Legion::DimensionKind::DIM_X); + ordering.push_back(Legion::DimensionKind::DIM_F); // SOA + Legion::OrderingConstraint ordering_constraint( + ordering, true /*contiguous*/); + Legion::LayoutConstraintSet layout_constraint; + layout_constraint.add_constraint(ordering_constraint); + + // Do the registration + Legion::LayoutConstraintID result = + runtime->register_layout(ctx, layout_constraint); + return result; + } + + /*! + Specialization of the map_task funtion for FLeCSI + By default, map_task will execute Legions map_task from DefaultMapper. + In the case the launcher has been tagged with the + "MAPPER_COMPACTED_STORAGE" tag, mapper will create single physical + instance for exclusive, shared and ghost partitions for each data handle + + @param ctx Mapper Context + @param task Legion's task + @param input Input information about task mapping + @param output Output information about task mapping + */ + + virtual void map_task(const Legion::Mapping::MapperContext ctx, + const Legion::Task & task, + const Legion::Mapping::Mapper::MapTaskInput & input, + Legion::Mapping::Mapper::MapTaskOutput & output) { + DefaultMapper::map_task(ctx, task, input, output); + + if((task.tag == FLECSI_MAPPER_COMPACTED_STORAGE) && + (task.regions.size() > 0)) { + + Legion::Memory target_mem = + DefaultMapper::default_policy_select_target_memory( + ctx, task.target_proc, task.regions[0]); + + // check if we get region requirements for "exclusive, shared and ghost" + // logical regions for each data handle + + // Filling out "layout_constraints" with the defaults + Legion::LayoutConstraintSet layout_constraints; + // No specialization + layout_constraints.add_constraint(Legion::SpecializedConstraint()); + layout_constraints.add_constraint(Legion::OrderingConstraint()); + // Constrained for the target memory kind + layout_constraints.add_constraint( + Legion::MemoryConstraint(target_mem.kind())); + // Have all the field for the instance available + std::vector all_fields; + layout_constraints.add_constraint(Legion::FieldConstraint()); + + // FIXME:: add colocation_constraints + Legion::ColocationConstraint colocation_constraints; + + for(size_t indx = 0; indx < task.regions.size(); indx++) { + + Legion::Mapping::PhysicalInstance result; + std::vector regions; + bool created; + + if(task.regions[indx].tag == FLECSI_MAPPER_EXCLUSIVE_LR) { + + flog_assert((task.regions.size() >= (indx + 2)), + "ERROR:: wrong number of regions passed to the task wirth \ + the tag = FLECSI_MAPPER_COMPACTED_STORAGE"); + + flog_assert((!task.regions[indx].region.exists()), + "ERROR:: pasing not existing REGION to the mapper"); + regions.push_back(task.regions[indx].region); + regions.push_back(task.regions[indx + 1].region); + regions.push_back(task.regions[indx + 2].region); + + flog_assert(runtime->find_or_create_physical_instance(ctx, + target_mem, + layout_constraints, + regions, + result, + created, + true /*acquire*/, + GC_NEVER_PRIORITY), + "FLeCSI mapper failed to allocate instance"); + + for(size_t j = 0; j < 3; j++) { + output.chosen_instances[indx + j].push_back(result); + } // for + + indx = indx + 2; + } + else { + + regions.push_back(task.regions[indx].region); + + flog_assert(runtime->find_or_create_physical_instance(ctx, + target_mem, + layout_constraints, + regions, + result, + created, + true /*acquire*/, + GC_NEVER_PRIORITY), + "FLeCSI mapper failed to allocate instance"); + + output.chosen_instances[indx].push_back(result); + + } // end if + } // end for + + } // end if + + } // map_task + + virtual void slice_task(const Legion::Mapping::MapperContext ctx, + const Legion::Task & task, + const Legion::Mapping::Mapper::SliceTaskInput & input, + Legion::Mapping::Mapper::SliceTaskOutput & output) { + + switch(task.tag) { + case FLECSI_MAPPER_SUBRANK_LAUNCH: + // expect a 1-D index domain + assert(input.domain.get_dim() == 1); + // send the whole domain to our local processor + output.slices.resize(1); + output.slices[0].domain = input.domain; + output.slices[0].proc = task.target_proc; + break; + + case FLECSI_MAPPER_FORCE_RANK_MATCH: { + // expect a 1-D index domain - each point goes to the corresponding node + assert(input.domain.get_dim() == 1); + LegionRuntime::Arrays::Rect<1> r = input.domain.get_rect<1>(); + + // go through all the CPU processors and find a representative for each + // node (i.e. address space) + std::map targets; + + Legion::Machine::ProcessorQuery pq = + Legion::Machine::ProcessorQuery(machine).only_kind( + Legion::Processor::LOC_PROC); + for(Legion::Machine::ProcessorQuery::iterator it = pq.begin(); + it != pq.end(); + ++it) { + Legion::Processor p = *it; + int a = p.address_space(); + if(targets.count(a) == 0) + targets[a] = p; + } + + output.slices.resize(1); + for(int a = r.lo[0]; a <= r.hi[0]; a++) { + assert(targets.count(a) > 0); + output.slices[0].domain = // Legion::Domain::from_rect<1>( + Legion::Rect<1>(a, a); + output.slices[0].proc = targets[a]; + } + break; + } + + default: + DefaultMapper::slice_task(ctx, task, input, output); + } + } + +private: + std::map> + proc_mem_map; + Realm::Memory local_sysmem; + Realm::Machine machine; +}; + +/*! + mapper_registration is used to replace DefaultMapper with mpi_mapper_t in + FLeCSI + + @ingroup legion-runtime + */ + +inline void +mapper_registration(Legion::Machine machine, + Legion::HighLevelRuntime * rt, + const std::set & local_procs) { + for(std::set::const_iterator it = local_procs.begin(); + it != local_procs.end(); + it++) { + mpi_mapper_t * mapper = new mpi_mapper_t(machine, rt, *it); + rt->replace_default_mapper(mapper, *it); + } +} // mapper registration + +} // namespace run +} // namespace flecsi From b97b0f8d1378e15536a6e18a30ebeafd8936cc1c Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Mon, 15 Jun 2020 17:23:53 -0400 Subject: [PATCH 04/19] Fix CMAKE for charm backend --- flecsi/data/CMakeLists.txt | 1 - flecsi/run/CMakeLists.txt | 1 - 2 files changed, 2 deletions(-) diff --git a/flecsi/data/CMakeLists.txt b/flecsi/data/CMakeLists.txt index 5c6711d44..1b6370949 100644 --- a/flecsi/data/CMakeLists.txt +++ b/flecsi/data/CMakeLists.txt @@ -69,7 +69,6 @@ elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") set(data_HEADERS charm/policy.hh - charm/types.hh ${data_HEADERS} ) diff --git a/flecsi/run/CMakeLists.txt b/flecsi/run/CMakeLists.txt index 96810bc05..e508b2d78 100644 --- a/flecsi/run/CMakeLists.txt +++ b/flecsi/run/CMakeLists.txt @@ -52,7 +52,6 @@ elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") ${run_HEADERS} charm/context.hh charm/mapper.hh - charm/tasks.hh ) set(run_SOURCES From a92b1ca614a541ae2acea59a9f3fdf0f33c50bfd Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Mon, 15 Jun 2020 18:48:31 -0400 Subject: [PATCH 05/19] Update context to use charm runtime instead of legion --- flecsi/data.hh | 12 +-- flecsi/run/charm/context.cc | 164 ++++++------------------------------ flecsi/run/charm/context.ci | 14 +++ flecsi/run/charm/context.hh | 36 ++------ 4 files changed, 53 insertions(+), 173 deletions(-) create mode 100644 flecsi/run/charm/context.ci diff --git a/flecsi/data.hh b/flecsi/data.hh index 80bd92b69..ba76c32c9 100644 --- a/flecsi/data.hh +++ b/flecsi/data.hh @@ -55,29 +55,29 @@ namespace detail { struct data_guard { struct global_guard { global_guard() { - global_topology.allocate({}); + //global_topology.allocate({}); } global_guard(global_guard &&) = delete; ~global_guard() { - global_topology.deallocate(); + //global_topology.deallocate(); } } g; struct color_guard { color_guard() { - process_coloring.allocate(run::context::instance().processes()); + //process_coloring.allocate(run::context::instance().processes()); } color_guard(color_guard &&) = delete; ~color_guard() { - process_coloring.deallocate(); + //process_coloring.deallocate(); } } c; struct process_guard { process_guard() { - process_topology.allocate(process_coloring.get()); + //process_topology.allocate(process_coloring.get()); } process_guard(process_guard &&) = delete; ~process_guard() { - process_topology.deallocate(); + //process_topology.deallocate(); } } p; }; diff --git a/flecsi/run/charm/context.cc b/flecsi/run/charm/context.cc index 92925ab64..688bd1801 100644 --- a/flecsi/run/charm/context.cc +++ b/flecsi/run/charm/context.cc @@ -25,43 +25,31 @@ #include "flecsi/run/types.hh" #include +#include + +#include "context.def.h" + namespace flecsi::run { using namespace boost::program_options; using exec::charm::task_id; -/*----------------------------------------------------------------------------* - Legion top-level task. - *----------------------------------------------------------------------------*/ +namespace charm { -void -top_level_task(const Legion::Task *, - const std::vector &, - Legion::Context ctx, - Legion::Runtime * runtime) { +ContextGroup::ContextGroup() { + CkPrintf("Group created on %i\n", CkMyPe()); +} +void ContextGroup::top_level_task() { + std::cout << "Executing the top level task" << std::endl; context_t & context_ = context_t::instance(); - - /* - Initialize MPI interoperability. - */ - - context_.connect_with_mpi(ctx, runtime); - context_.wait_on_mpi(ctx, runtime); - - /* - Invoke the FleCSI runtime top-level action. - */ - detail::data_guard(), context_.exit_status() = (*context_.top_level_action_)(); - - /* - Finish up Legion runtime and fall back out to MPI. - */ - - context_.handoff_to_mpi(ctx, runtime); -} // top_level_task + if (CkMyPe() == 0) { + CkStartQD(CkCallback(CkCallback::ckExit)); + } +} +} //----------------------------------------------------------------------------// // Implementation of context_t::initialize. @@ -71,42 +59,20 @@ int context_t::initialize(int argc, char ** argv, bool dependent) { if(dependent) { - int version, subversion; - MPI_Get_version(&version, &subversion); - -#if defined(GASNET_CONDUIT_MPI) - if(version == 3 && subversion > 0) { - int provided; - MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); - - if(provided < MPI_THREAD_MULTIPLE) { - std::cerr << "Your implementation of MPI does not support " - "MPI_THREAD_MULTIPLE which is required for use of the " - "GASNet MPI conduit with the Legion-MPI Interop!" - << std::endl; - std::abort(); - } // if + CharmBeginInit(argc, argv); + if (CkMyPe() == 0) { + cgProxy = charm::CProxy_ContextGroup::ckNew(); } - else { - // Initialize the MPI runtime - MPI_Init(&argc, &argv); - } // if -#else - MPI_Init(&argc, &argv); -#endif + CharmFinishInit(); } // if - int rank, size; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - MPI_Comm_size(MPI_COMM_WORLD, &size); - - context::process_ = rank; - context::processes_ = size; + context::process_ = CkMyPe(); + context::processes_ = CkNumPes(); auto status = context::initialize_generic(argc, argv, dependent); if(status != success && dependent) { - MPI_Finalize(); + CharmLibExit(); } // if return status; @@ -120,11 +86,9 @@ void context_t::finalize() { context::finalize_generic(); -#ifndef GASNET_CONDUIT_MPI if(context::initialize_dependent_) { - MPI_Finalize(); + CharmLibExit(); } // if -#endif } // finalize //----------------------------------------------------------------------------// @@ -141,92 +105,20 @@ context_t::start(const std::function & action) { top_level_action_ = &action; - /* - Setup Legion top-level task. - */ - - Runtime::set_top_level_task_id(FLECSI_TOP_LEVEL_TASK_ID); - - { - Legion::TaskVariantRegistrar registrar( - FLECSI_TOP_LEVEL_TASK_ID, "runtime_driver"); - registrar.add_constraint(ProcessorConstraint(Processor::LOC_PROC)); - registrar.set_replicable(); - Runtime::preregister_task_variant( - registrar, "runtime_driver"); - } // scope - - /* - Arg 0: MPI has initial control (true). - Arg 1: Number of MPI participants (1). - Arg 2: Number of Legion participants (1). - */ - - handshake_ = Legion::Runtime::create_handshake(true, 1, 1); - - /* - Register custom mapper. - */ - - Runtime::add_registration_callback(mapper_registration); - - /* - Configure interoperability layer. - */ - - Legion::Runtime::configure_MPI_interoperability(context::process_); - context::start(); /* Legion command-line arguments. */ - std::vector largv; - largv.push_back(argv_[0]); - - auto iss = std::istringstream{backend_}; - std::vector lsargv(std::istream_iterator{iss}, - std::istream_iterator()); - - for(auto & arg : lsargv) { - largv.push_back(&arg[0]); - } // for - - // FIXME: This needs to be gotten from Legion + // FIXME: This needs to be gotten from Charm context::threads_per_process_ = 1; context::threads_ = context::processes_ * context::threads_per_process_; - /* - Start Legion runtime. - */ - - { - log::devel_guard("context"); - - std::stringstream stream; - - stream << "Starting Legion runtime" << std::endl; - stream << "\targc: " << largv.size() << std::endl; - stream << "\targv: "; - - for(auto opt : largv) { - stream << opt << " "; - } // for - - stream << std::endl; - - flog_devel(info) << stream.str(); - } // scope - - Runtime::start(largv.size(), largv.data(), true); - - do { - handoff_to_legion(); - wait_on_legion(); - } while(invoke_mpi_task()); - - Legion::Runtime::wait_for_shutdown(); + if (context::process_ == 0) { + cgProxy.top_level_task(); + } + StartCharmScheduler(); return context::exit_status(); } // context_t::start diff --git a/flecsi/run/charm/context.ci b/flecsi/run/charm/context.ci new file mode 100644 index 000000000..2f63034b2 --- /dev/null +++ b/flecsi/run/charm/context.ci @@ -0,0 +1,14 @@ +mainmodule context { +namespace flecsi { +namespace run { +namespace charm { + + group ContextGroup { + entry ContextGroup(); + entry void top_level_task(); + } + +} +} +} +} diff --git a/flecsi/run/charm/context.hh b/flecsi/run/charm/context.hh index 3be1258d1..a2ba6dba9 100644 --- a/flecsi/run/charm/context.hh +++ b/flecsi/run/charm/context.hh @@ -68,14 +68,8 @@ using task = R(const Legion::Task *, class ContextGroup : public CBase_ContextGroup { public: - ContextGroup() { - CkPrintf("Group created on %i\n", CkMyPe()); - } - - void testEntry() { - CkPrintf("Hello from element %i\n", thisIndex); - contribute(CkCallback(CkCallback::ckExit)); - } + ContextGroup(); + void top_level_task(); }; } @@ -88,6 +82,7 @@ struct context_t : context { */ friend charm::task<> top_level_task, handoff_to_mpi_task, wait_on_mpi_task; + friend charm::ContextGroup; /*! The registration_function_t type defines a function type for @@ -286,23 +281,6 @@ struct context_t : context { // Task interface. //--------------------------------------------------------------------------// - /*! - Register a task with the runtime. - - @param name The task name string. - @param callback The registration call back function. - \return task ID - */ - std::size_t register_task(std::string_view name, - const registration_function_t & callback) { - flog_devel(info) << "Registering task callback: " << name << std::endl; - - flog_assert( - task_registry_.size() < FLECSI_GENERATED_ID_MAX, "too many tasks"); - task_registry_.push_back(callback); - return task_registry_.size(); // 0 is the top-level task - } // register_task - private: /*! Handoff to legion runtime from MPI. @@ -367,6 +345,8 @@ private: Runtime data. *--------------------------------------------------------------------------*/ + charm::CProxy_ContextGroup cgProxy; + // The first element is the head of the free list. std::vector enumerated = {nullptr}; const std::function * top_level_action_ = nullptr; @@ -378,12 +358,6 @@ private: std::function mpi_task_; Legion::MPILegionHandshake handshake_; LegionRuntime::Arrays::Rect<1> all_processes_; - - /*--------------------------------------------------------------------------* - Task data members. - *--------------------------------------------------------------------------*/ - - std::vector task_registry_; }; } // namespace flecsi::run From aa8ab2cb5180f7fda5d317aacf3d427d920f3bd4 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Tue, 16 Jun 2020 16:16:56 -0400 Subject: [PATCH 06/19] Remove rest of legion code from context --- flecsi/exec/charm/policy.hh | 15 ++- flecsi/exec/charm/task_wrapper.hh | 3 +- flecsi/run/charm/context.cc | 59 +----------- flecsi/run/charm/context.hh | 150 ++---------------------------- 4 files changed, 21 insertions(+), 206 deletions(-) diff --git a/flecsi/exec/charm/policy.hh b/flecsi/exec/charm/policy.hh index 18060bdbe..124ef9080 100644 --- a/flecsi/exec/charm/policy.hh +++ b/flecsi/exec/charm/policy.hh @@ -132,7 +132,8 @@ reduce(ARGS &&... args) { if(future.get_result() > FLOG_SERIALIZATION_THRESHOLD) { constexpr auto send = [] { - run::context::instance().set_mpi_task(log::send_to_one); + // TODO: This functionality was removed from charm context + //run::context::instance().set_mpi_task(log::send_to_one); }; Legion::IndexLauncher flog_mpi_launcher(charm::task_id>, launch_domain, @@ -149,10 +150,12 @@ reduce(ARGS &&... args) { future_mpi.wait_all_results(true); // Handoff to the MPI runtime. - flecsi_context.handoff_to_mpi(legion_context, legion_runtime); + // TODO: This functionality was removed from charm context + //flecsi_context.handoff_to_mpi(legion_context, legion_runtime); // Wait for MPI to finish execution (synchronous). - flecsi_context.wait_on_mpi(legion_context, legion_runtime); + // TODO: This functionality was removed from charm context + //flecsi_context.wait_on_mpi(legion_context, legion_runtime); } // if } // if #endif // FLECSI_ENABLE_FLOG @@ -293,11 +296,13 @@ reduce(ARGS &&... args) { future.wait_all_results(true); // Handoff to the MPI runtime. - flecsi_context.handoff_to_mpi(legion_context, legion_runtime); + // TODO: This functionality was removed from charm context + //flecsi_context.handoff_to_mpi(legion_context, legion_runtime); // Wait for MPI to finish execution (synchronous). // We must keep mpi_args alive until then. - flecsi_context.wait_on_mpi(legion_context, legion_runtime); + // TODO: This functionality was removed from charm context + //flecsi_context.wait_on_mpi(legion_context, legion_runtime); if constexpr(!std::is_void_v) { // FIXME implement logic for reduction MPI task diff --git a/flecsi/exec/charm/task_wrapper.hh b/flecsi/exec/charm/task_wrapper.hh index cff999f14..9dd61da59 100644 --- a/flecsi/exec/charm/task_wrapper.hh +++ b/flecsi/exec/charm/task_wrapper.hh @@ -276,7 +276,8 @@ struct task_wrapper { // Set the MPI function and make the runtime active. auto & c = run::context::instance(); - c.set_mpi_task([&] { apply(F, std::move(mpi_task_args)); }); + // TODO: Removed from context in charm backend + //c.set_mpi_task([&] { apply(F, std::move(mpi_task_args)); }); // FIXME: Refactor // finalize_handles_t finalize_handles; diff --git a/flecsi/run/charm/context.cc b/flecsi/run/charm/context.cc index 688bd1801..4d9c1f631 100644 --- a/flecsi/run/charm/context.cc +++ b/flecsi/run/charm/context.cc @@ -61,7 +61,7 @@ context_t::initialize(int argc, char ** argv, bool dependent) { if(dependent) { CharmBeginInit(argc, argv); if (CkMyPe() == 0) { - cgProxy = charm::CProxy_ContextGroup::ckNew(); + context_proxy_ = charm::CProxy_ContextGroup::ckNew(); } CharmFinishInit(); } // if @@ -116,66 +116,11 @@ context_t::start(const std::function & action) { context::threads_ = context::processes_ * context::threads_per_process_; if (context::process_ == 0) { - cgProxy.top_level_task(); + context_proxy_.top_level_task(); } StartCharmScheduler(); return context::exit_status(); } // context_t::start -//----------------------------------------------------------------------------// -// Implementation of context_t::handoff_to_mpi. -//----------------------------------------------------------------------------// - -void -context_t::handoff_to_mpi(Legion::Context & ctx, Legion::Runtime * runtime) { - Legion::ArgumentMap arg_map; - Legion::IndexLauncher handoff_to_mpi_launcher( - task_id>, - Legion::Domain::from_rect<1>(context_t::instance().all_processes()), - Legion::TaskArgument(NULL, 0), - arg_map); - - handoff_to_mpi_launcher.tag = FLECSI_MAPPER_FORCE_RANK_MATCH; - auto fm = runtime->execute_index_space(ctx, handoff_to_mpi_launcher); - - fm.wait_all_results(true); -} // context_t::handoff_to_mpi - -//----------------------------------------------------------------------------// -// Implementation of context_t::wait_on_mpi. -//----------------------------------------------------------------------------// - -Legion::FutureMap -context_t::wait_on_mpi(Legion::Context & ctx, Legion::Runtime * runtime) { - Legion::ArgumentMap arg_map; - Legion::IndexLauncher wait_on_mpi_launcher(task_id>, - Legion::Domain::from_rect<1>(context_t::instance().all_processes()), - Legion::TaskArgument(NULL, 0), - arg_map); - - wait_on_mpi_launcher.tag = FLECSI_MAPPER_FORCE_RANK_MATCH; - auto fm = runtime->execute_index_space(ctx, wait_on_mpi_launcher); - - fm.wait_all_results(true); - - return fm; -} // context_t::wait_on_mpi - -//----------------------------------------------------------------------------// -// Implementation of context_t::connect_with_mpi. -//----------------------------------------------------------------------------// - -void -context_t::connect_with_mpi(Legion::Context &, Legion::Runtime *) { - int size; - MPI_Comm_size(MPI_COMM_WORLD, &size); - - LegionRuntime::Arrays::Rect<1> launch_bounds( - LegionRuntime::Arrays::Point<1>(0), - LegionRuntime::Arrays::Point<1>(size - 1)); - - context_t::instance().set_all_processes(launch_bounds); -} // context_t::connect_with_mpi - } // namespace flecsi::run diff --git a/flecsi/run/charm/context.hh b/flecsi/run/charm/context.hh index a2ba6dba9..b990bb232 100644 --- a/flecsi/run/charm/context.hh +++ b/flecsi/run/charm/context.hh @@ -156,9 +156,8 @@ struct context_t : context { */ static size_t task_depth() { - return Legion::Runtime::get_runtime() - ->get_current_task(Legion::Runtime::get_context()) - ->get_depth(); + // TODO: Must be some way to get this from Charm runtime + return 0; } // task_depth /* @@ -168,9 +167,8 @@ struct context_t : context { static size_t color() { flog_assert( task_depth() > 0, "this method can only be called from within a task"); - return Legion::Runtime::get_runtime() - ->get_current_task(Legion::Runtime::get_context()) - ->index_point.point_data[0]; + CkAbort("Can't get color of a task yet\n"); + return 0; } // color /* @@ -180,9 +178,8 @@ struct context_t : context { static size_t colors() { flog_assert( task_depth() > 0, "this method can only be called from within a task"); - return Legion::Runtime::get_runtime() - ->get_current_task(Legion::Runtime::get_context()) - ->index_domain.get_volume(); + CkAbort("Can't get colors of a task yet\n"); + return 0; } // colors /// Store a reference to the argument under a small unused positive integer. @@ -213,151 +210,18 @@ struct context_t : context { return *static_cast(enumerated[i]); } - //--------------------------------------------------------------------------// - // MPI interoperability. - //--------------------------------------------------------------------------// - - /*! - Set the MPI user task. When control is given to the MPI runtime - it will execute whichever function is currently set. - */ - - void set_mpi_task(std::function mpi_task) { - { - log::devel_guard guard(context_tag); - flog_devel(info) << "In set_mpi_task" << std::endl; - } - - mpi_task_ = std::move(mpi_task); - } - - /*! - Set the distributed-memory domain. - */ - - void set_all_processes(const LegionRuntime::Arrays::Rect<1> & all_processes) { - all_processes_ = all_processes; - } // all_processes - - /*! - Return the distributed-memory domain. - */ - - const LegionRuntime::Arrays::Rect<1> & all_processes() const { - return all_processes_; - } // all_processes - - /*! - Switch execution to the MPI runtime. - - @param ctx The Legion runtime context. - @param runtime The Legion task runtime pointer. - */ - - void handoff_to_mpi(Legion::Context & ctx, Legion::Runtime * runtime); - - /*! - Wait on the MPI runtime to finish the current task execution. - - @param ctx The Legion runtime context. - @param runtime The Legion task runtime pointer. - - @return A future map with the result of the task execution. - */ - - Legion::FutureMap wait_on_mpi(Legion::Context & ctx, - Legion::Runtime * runtime); - - /*! - Connect with the MPI runtime. - - @param ctx The Legion runtime context. - @param runtime The Legion task runtime pointer. - */ - - void connect_with_mpi(Legion::Context & ctx, Legion::Runtime * runtime); - - //--------------------------------------------------------------------------// - // Task interface. - //--------------------------------------------------------------------------// - private: - /*! - Handoff to legion runtime from MPI. - */ - - void handoff_to_legion() { - { - log::devel_guard guard(context_tag); - flog_devel(info) << "In handoff_to_legion" << std::endl; - } - MPI_Barrier(MPI_COMM_WORLD); - handshake_.mpi_handoff_to_legion(); - } // handoff_to_legion - - /*! - Wait for Legion runtime to complete. - */ - - void wait_on_legion() { - { - log::devel_guard guard(context_tag); - flog_devel(info) << "In wait_on_legion" << std::endl; - } - - handshake_.mpi_wait_on_legion(); - MPI_Barrier(MPI_COMM_WORLD); - } // wait_on_legion - - // When GCC fixes bug #83258, these can be lambdas in the public functions: - /*! - Handoff to MPI from Legion. - */ - - static void mpi_handoff() { - instance().handshake_.legion_handoff_to_mpi(); - } - - /*! - Wait for MPI runtime to complete task execution. - */ - - static void mpi_wait() { - instance().handshake_.legion_wait_on_mpi(); - } - - /*! - Invoke the current MPI task, if any, and clear it. - - \return whether there was a task to invoke - */ - - bool invoke_mpi_task() { - const bool ret(mpi_task_); - if(ret) { - mpi_task_(); - mpi_task_ = nullptr; - } - return ret; - } // invoke_mpi_task /*--------------------------------------------------------------------------* Runtime data. *--------------------------------------------------------------------------*/ - charm::CProxy_ContextGroup cgProxy; + charm::CProxy_ContextGroup context_proxy_; // The first element is the head of the free list. std::vector enumerated = {nullptr}; const std::function * top_level_action_ = nullptr; - /*--------------------------------------------------------------------------* - Interoperability data members. - *--------------------------------------------------------------------------*/ - - std::function mpi_task_; - Legion::MPILegionHandshake handshake_; - LegionRuntime::Arrays::Rect<1> all_processes_; }; } // namespace flecsi::run From 793fb32ec5d9b5b12dee5f126ea7a23ddb2bf3d8 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Wed, 8 Jul 2020 16:39:44 -0400 Subject: [PATCH 07/19] Get tasks executing inline with non-flecsi data --- flecsi/exec/charm/bind_accessors.hh | 31 +++++++++++------------------ flecsi/exec/charm/policy.hh | 20 +++++++++++-------- flecsi/exec/charm/task_wrapper.hh | 29 +++++++++++---------------- 3 files changed, 36 insertions(+), 44 deletions(-) diff --git a/flecsi/exec/charm/bind_accessors.hh b/flecsi/exec/charm/bind_accessors.hh index 4dc6a135c..2e5349cae 100644 --- a/flecsi/exec/charm/bind_accessors.hh +++ b/flecsi/exec/charm/bind_accessors.hh @@ -57,30 +57,28 @@ struct bind_accessors_t : public util::tuple_walker { @param legion_context The Legion task runtime context. */ - bind_accessors_t(Legion::Runtime * legion_runtime, - Legion::Context & legion_context, - std::vector const & regions, - std::vector const & futures) - : legion_runtime_(legion_runtime), legion_context_(legion_context), - regions_(regions), futures_(futures) {} + bind_accessors_t(std::vector buf) + : buf_(buf) {} template void visit(data::accessor & accessor) { - auto & reg = regions_[region++]; + //auto & reg = regions_[region++]; // Legion::FieldAccessor()), - const Legion::UnsafeFieldAccessor> ac(reg, accessor.identifier(), sizeof(DATA_TYPE)); const auto dom = legion_runtime_->get_index_space_domain( legion_context_, reg.get_logical_region().get_index_space()); - const auto r = dom.get_rect<1>(); + const auto r = dom.get_rect<1>();*/ + CkPrintf("Visiting dense data\n"); + bind(accessor, sizeof(DATA_TYPE), buf_.data()[0]); - bind(accessor, + /*bind(accessor, r.hi[0] - r.lo[0] + 1, - ac.ptr(Legion::Domain::DomainPointIterator(dom).p)); + ac.ptr(Legion::Domain::DomainPointIterator(dom).p));*/ } template @@ -98,8 +96,8 @@ struct bind_accessors_t : public util::tuple_walker { *--------------------------------------------------------------------------*/ template void visit(exec::flecsi_future & future) { - future.legion_future_ = futures_[future_id]; - future_id++; + /*future.legion_future_ = futures_[future_id]; + future_id++;*/ } /*--------------------------------------------------------------------------* @@ -118,12 +116,7 @@ struct bind_accessors_t : public util::tuple_walker { } // visit private: - Legion::Runtime * legion_runtime_; - Legion::Context & legion_context_; - size_t region = 0; - const std::vector & regions_; - size_t future_id = 0; - const std::vector & futures_; + std::vector buf_; }; // struct bind_accessors_t diff --git a/flecsi/exec/charm/policy.hh b/flecsi/exec/charm/policy.hh index 124ef9080..fe5d8ba5e 100644 --- a/flecsi/exec/charm/policy.hh +++ b/flecsi/exec/charm/policy.hh @@ -104,11 +104,11 @@ reduce(ARGS &&... args) { constexpr auto processor_type = mask_to_processor_type(ATTRIBUTES); // Get the Legion runtime and context from the current task. - auto legion_runtime = Legion::Runtime::get_runtime(); - auto legion_context = Legion::Runtime::get_context(); + //auto legion_runtime = Legion::Runtime::get_runtime(); + //auto legion_context = Legion::Runtime::get_context(); #if defined(FLECSI_ENABLE_FLOG) - const size_t tasks_executed = flecsi_context.tasks_executed(); + /*const size_t tasks_executed = flecsi_context.tasks_executed(); if((tasks_executed > 0) && (tasks_executed % FLOG_SERIALIZATION_INTERVAL == 0)) { @@ -147,7 +147,7 @@ reduce(ARGS &&... args) { legion_runtime->execute_index_space(legion_context, flog_mpi_launcher); // Force synchronization - future_mpi.wait_all_results(true); + future_mpi.wait_all_results(true);*/ // Handoff to the MPI runtime. // TODO: This functionality was removed from charm context @@ -156,8 +156,8 @@ reduce(ARGS &&... args) { // Wait for MPI to finish execution (synchronous). // TODO: This functionality was removed from charm context //flecsi_context.wait_on_mpi(legion_context, legion_runtime); - } // if - } // if + //} // if + //} // if #endif // FLECSI_ENABLE_FLOG size_t domain_size = LAUNCH_DOMAIN.size(); @@ -189,9 +189,12 @@ reduce(ARGS &&... args) { //------------------------------------------------------------------------// using wrap = charm::task_wrapper; - const auto task = charm::task_id(wrap::LegionProcessor)>; + //const auto task = charm::task_id(wrap::LegionProcessor)>; + wrap::execute(buf); + return NULL; +#if 0 if constexpr(LAUNCH_DOMAIN == single) { static_assert(std::is_void_v, @@ -316,6 +319,7 @@ reduce(ARGS &&... args) { } // if constexpr // return 0; +#endif } // execute_task } // namespace flecsi diff --git a/flecsi/exec/charm/task_wrapper.hh b/flecsi/exec/charm/task_wrapper.hh index 9dd61da59..5ca6f7392 100644 --- a/flecsi/exec/charm/task_wrapper.hh +++ b/flecsi/exec/charm/task_wrapper.hh @@ -109,15 +109,15 @@ struct decay> { template auto -tuple_get(const Legion::Task & t) { +tuple_get(const std::vector& buf) { struct Check { const std::byte *b, *e; - Check(const Legion::Task & t) - : b(static_cast(t.args)), e(b + t.arglen) {} + Check(const std::vector& buf) + : b(buf.data()), e(b + buf.size()) {} ~Check() { - flog_assert(b == e, "Bad Task::arglen"); + flog_assert(b == e, "Bad vector::size()"); } - } ch(t); + } ch(buf); return util::serial_get::type>(ch.b); } } // namespace detail @@ -209,10 +209,7 @@ struct task_wrapper { Execution wrapper method for user tasks. */ - static RETURN execute(const Legion::Task * task, - const std::vector & regions, - Legion::Context context, - Legion::Runtime * runtime) { + static RETURN execute(std::vector buf) { { log::devel_guard guard(task_wrapper_tag); flog_devel(info) << "In execute_user_task" << std::endl; @@ -221,9 +218,9 @@ struct task_wrapper { // Unpack task arguments // TODO: Can we deserialize directly into the user's parameters (i.e., do // without finalize_handles)? - auto task_args = detail::tuple_get(*task); + auto task_args = detail::tuple_get(buf); - bind_accessors_t bind_accessors(runtime, context, regions, task->futures); + bind_accessors_t bind_accessors(buf); bind_accessors.walk(task_args); if constexpr(std::is_same_v) { @@ -254,10 +251,7 @@ struct task_wrapper { static constexpr auto LegionProcessor = task_processor_type_t::loc; - static void execute(const Legion::Task * task, - const std::vector &, - Legion::Context, - Legion::Runtime *) { + static void execute(std::vector buf) { // FIXME: Refactor // { // log::devel_guard guard(task_wrapper_tag); @@ -266,8 +260,8 @@ struct task_wrapper { // Unpack task arguments. param_tuple * p; - flog_assert(task->arglen == sizeof p, "Bad Task::arglen"); - std::memcpy(&p, task->args, sizeof p); + flog_assert(buf.size() == sizeof p, "Bad Task::arglen"); + std::memcpy(&p, buf.data(), sizeof p); auto & mpi_task_args = *p; // FIXME: Refactor @@ -277,6 +271,7 @@ struct task_wrapper { // Set the MPI function and make the runtime active. auto & c = run::context::instance(); // TODO: Removed from context in charm backend + apply(F, std::move(mpi_task_args)); //c.set_mpi_task([&] { apply(F, std::move(mpi_task_args)); }); // FIXME: Refactor From 0403fb8ae70b8a2bf8bb853e9eaa07bbe3a0a5e1 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Mon, 3 Aug 2020 15:35:05 -0400 Subject: [PATCH 08/19] Quick changes to get global tests passing with basic field reg --- flecsi/exec/charm/bind_accessors.hh | 10 +++--- flecsi/exec/charm/policy.hh | 6 ++-- flecsi/exec/charm/task_prologue.hh | 5 +-- flecsi/exec/charm/task_wrapper.hh | 35 +++++++++++---------- flecsi/run/charm/context.cc | 4 +++ flecsi/run/charm/context.hh | 48 +++++++++++++++++++++++------ 6 files changed, 72 insertions(+), 36 deletions(-) diff --git a/flecsi/exec/charm/bind_accessors.hh b/flecsi/exec/charm/bind_accessors.hh index 2e5349cae..310d454ac 100644 --- a/flecsi/exec/charm/bind_accessors.hh +++ b/flecsi/exec/charm/bind_accessors.hh @@ -57,7 +57,7 @@ struct bind_accessors_t : public util::tuple_walker { @param legion_context The Legion task runtime context. */ - bind_accessors_t(std::vector buf) + bind_accessors_t(std::vector& buf) : buf_(buf) {} template @@ -73,8 +73,10 @@ struct bind_accessors_t : public util::tuple_walker { const auto dom = legion_runtime_->get_index_space_domain( legion_context_, reg.get_logical_region().get_index_space()); const auto r = dom.get_rect<1>();*/ - CkPrintf("Visiting dense data\n"); - bind(accessor, sizeof(DATA_TYPE), buf_.data()[0]); + flog_assert(buf_.size() % sizeof(DATA_TYPE) == 0, "Bad buffer size\n"); + auto & flecsi_context = run::context::instance(); + DATA_TYPE* d = (DATA_TYPE*)flecsi_context.getField(accessor.identifier()); + bind(accessor, 1, d); /*bind(accessor, r.hi[0] - r.lo[0] + 1, @@ -116,7 +118,7 @@ struct bind_accessors_t : public util::tuple_walker { } // visit private: - std::vector buf_; + std::vector& buf_; }; // struct bind_accessors_t diff --git a/flecsi/exec/charm/policy.hh b/flecsi/exec/charm/policy.hh index fe5d8ba5e..0c0961dff 100644 --- a/flecsi/exec/charm/policy.hh +++ b/flecsi/exec/charm/policy.hh @@ -189,9 +189,9 @@ reduce(ARGS &&... args) { //------------------------------------------------------------------------// using wrap = charm::task_wrapper; - //const auto task = charm::task_id(wrap::LegionProcessor)>; - wrap::execute(buf); + const auto task = charm::task_id(wrap::LegionProcessor)>; + flecsi_context.execute(buf); return NULL; #if 0 diff --git a/flecsi/exec/charm/task_prologue.hh b/flecsi/exec/charm/task_prologue.hh index a3923d701..d945308df 100644 --- a/flecsi/exec/charm/task_prologue.hh +++ b/flecsi/exec/charm/task_prologue.hh @@ -130,7 +130,8 @@ struct task_prologue_t { const data:: field_reference & ref) { - Legion::LogicalRegion region = ref.topology().get().logical_region; + auto & flecsi_context = run::context::instance(); + flecsi_context.regField(ref.fid(), sizeof(DATA_TYPE)); static_assert(privilege_count(PRIVILEGES) == 1, "global topology accessor type only takes one privilege"); @@ -147,7 +148,7 @@ struct task_prologue_t { region); rr.add_field(ref.fid()); - region_reqs_.push_back(rr); + region_reqs_.push_back(rr);*/ } // visit template(); + std::cout << "Registering " << name << std::endl; { log::devel_guard guard(task_wrapper_tag); flog_devel(info) << "registering pure Legion task " << name << std::endl; } - Legion::TaskVariantRegistrar registrar(task_id<*TASK, A>, name.c_str()); - Legion::Processor::Kind kind = processor_type == task_processor_type_t::toc - ? Legion::Processor::TOC_PROC - : Legion::Processor::LOC_PROC; - registrar.add_constraint(Legion::ProcessorConstraint(kind)); - registrar.set_leaf(leaf_task(A)); - registrar.set_inner(inner_task(A)); - registrar.set_idempotent(idempotent_task(A)); + //Legion::TaskVariantRegistrar registrar(task_id<*TASK, A>, name.c_str()); + //Legion::Processor::Kind kind = processor_type == task_processor_type_t::toc + // ? Legion::Processor::TOC_PROC + // : Legion::Processor::LOC_PROC; + //registrar.add_constraint(Legion::ProcessorConstraint(kind)); + //registrar.set_leaf(leaf_task(A)); + //registrar.set_inner(inner_task(A)); + //registrar.set_idempotent(idempotent_task(A)); /* This section of conditionals is necessary because there is still @@ -167,13 +168,13 @@ detail::register_task() { Legion. */ - if constexpr(std::is_same_v) { - Legion::Runtime::preregister_task_variant(registrar, name.c_str()); - } - else { - Legion::Runtime::preregister_task_variant( - registrar, name.c_str()); - } // if + //if constexpr(std::is_same_v) { + // Legion::Runtime::preregister_task_variant(registrar, name.c_str()); + //} + //else { + // Legion::Runtime::preregister_task_variant( + // registrar, name.c_str()); + //} // if } // registration_callback // A trivial wrapper for nullary functions. @@ -209,7 +210,7 @@ struct task_wrapper { Execution wrapper method for user tasks. */ - static RETURN execute(std::vector buf) { + static RETURN execute(std::vector& buf) { { log::devel_guard guard(task_wrapper_tag); flog_devel(info) << "In execute_user_task" << std::endl; @@ -251,7 +252,7 @@ struct task_wrapper { static constexpr auto LegionProcessor = task_processor_type_t::loc; - static void execute(std::vector buf) { + static void execute(std::vector& buf) { // FIXME: Refactor // { // log::devel_guard guard(task_wrapper_tag); diff --git a/flecsi/run/charm/context.cc b/flecsi/run/charm/context.cc index 4d9c1f631..257321509 100644 --- a/flecsi/run/charm/context.cc +++ b/flecsi/run/charm/context.cc @@ -38,6 +38,10 @@ namespace charm { ContextGroup::ContextGroup() { CkPrintf("Group created on %i\n", CkMyPe()); + data = new std::byte[256]; + if (CkMyPe() != 0) { + run::context::instance().context_proxy_ = thisProxy; + } } void ContextGroup::top_level_task() { diff --git a/flecsi/run/charm/context.hh b/flecsi/run/charm/context.hh index b990bb232..73c6fca53 100644 --- a/flecsi/run/charm/context.hh +++ b/flecsi/run/charm/context.hh @@ -61,15 +61,32 @@ const size_t FLECSI_MAPPER_EXCLUSIVE_LR = 0x00004000; namespace charm { template -using task = R(const Legion::Task *, - const std::vector &, - Legion::Context, - Legion::Runtime *); +using task = R(std::vector&); class ContextGroup : public CBase_ContextGroup { public: ContextGroup(); void top_level_task(); + + template + void execute(std::vector& buf) { + depth++; + T::execute(buf); + depth--; + } + int task_depth() const { + return depth; + } + + void regField(std::size_t i, std::size_t s) {} + + std::byte* getField(std::size_t i) { + return data; + } + +private: + int depth; + std::byte* data; }; } @@ -155,19 +172,18 @@ struct context_t : context { Documentation for this interface is in the top-level context type. */ - static size_t task_depth() { + size_t task_depth() { // TODO: Must be some way to get this from Charm runtime - return 0; + return context_proxy_.ckLocalBranch()->task_depth(); } // task_depth /* Documentation for this interface is in the top-level context type. */ - static size_t color() { + size_t color() { flog_assert( task_depth() > 0, "this method can only be called from within a task"); - CkAbort("Can't get color of a task yet\n"); return 0; } // color @@ -175,10 +191,9 @@ struct context_t : context { Documentation for this interface is in the top-level context type. */ - static size_t colors() { + size_t colors() { flog_assert( task_depth() > 0, "this method can only be called from within a task"); - CkAbort("Can't get colors of a task yet\n"); return 0; } // colors @@ -210,6 +225,19 @@ struct context_t : context { return *static_cast(enumerated[i]); } + template + void execute(std::vector& buf) { + context_proxy_.ckLocalBranch()->execute(buf); + } + + void regField(std::size_t i, std::size_t s) { + context_proxy_.ckLocalBranch()->regField(i, s); + } + + std::byte* getField(std::size_t i) { + return context_proxy_.ckLocalBranch()->getField(i); + } + private: /*--------------------------------------------------------------------------* From e93e4fcc1ad2f7aa46285d75a18a2b65321e57d0 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Tue, 4 Aug 2020 15:48:15 -0400 Subject: [PATCH 09/19] Clean out some old legion code from exec --- flecsi/exec/charm/bind_accessors.hh | 18 +-- flecsi/exec/charm/policy.hh | 193 +--------------------------- flecsi/exec/charm/task_prologue.hh | 82 +----------- flecsi/exec/charm/task_wrapper.hh | 29 +---- 4 files changed, 15 insertions(+), 307 deletions(-) diff --git a/flecsi/exec/charm/bind_accessors.hh b/flecsi/exec/charm/bind_accessors.hh index 310d454ac..c6d458a05 100644 --- a/flecsi/exec/charm/bind_accessors.hh +++ b/flecsi/exec/charm/bind_accessors.hh @@ -62,25 +62,10 @@ struct bind_accessors_t : public util::tuple_walker { template void visit(data::accessor & accessor) { - //auto & reg = regions_[region++]; - - // Legion::FieldAccessor()), - /*const Legion::UnsafeFieldAccessor> - ac(reg, accessor.identifier(), sizeof(DATA_TYPE)); - const auto dom = legion_runtime_->get_index_space_domain( - legion_context_, reg.get_logical_region().get_index_space()); - const auto r = dom.get_rect<1>();*/ flog_assert(buf_.size() % sizeof(DATA_TYPE) == 0, "Bad buffer size\n"); auto & flecsi_context = run::context::instance(); DATA_TYPE* d = (DATA_TYPE*)flecsi_context.getField(accessor.identifier()); bind(accessor, 1, d); - - /*bind(accessor, - r.hi[0] - r.lo[0] + 1, - ac.ptr(Legion::Domain::DomainPointIterator(dom).p));*/ } template @@ -98,8 +83,7 @@ struct bind_accessors_t : public util::tuple_walker { *--------------------------------------------------------------------------*/ template void visit(exec::flecsi_future & future) { - /*future.legion_future_ = futures_[future_id]; - future_id++;*/ + CkAbort("Futures not yet supported\n"); } /*--------------------------------------------------------------------------* diff --git a/flecsi/exec/charm/policy.hh b/flecsi/exec/charm/policy.hh index 0c0961dff..967dd01f9 100644 --- a/flecsi/exec/charm/policy.hh +++ b/flecsi/exec/charm/policy.hh @@ -103,63 +103,6 @@ reduce(ARGS &&... args) { // Get the processor type. constexpr auto processor_type = mask_to_processor_type(ATTRIBUTES); - // Get the Legion runtime and context from the current task. - //auto legion_runtime = Legion::Runtime::get_runtime(); - //auto legion_context = Legion::Runtime::get_context(); - -#if defined(FLECSI_ENABLE_FLOG) - /*const size_t tasks_executed = flecsi_context.tasks_executed(); - if((tasks_executed > 0) && - (tasks_executed % FLOG_SERIALIZATION_INTERVAL == 0)) { - - size_t processes = flecsi_context.processes(); - LegionRuntime::Arrays::Rect<1> launch_bounds( - LegionRuntime::Arrays::Point<1>(0), - LegionRuntime::Arrays::Point<1>(processes - 1)); - Domain launch_domain = Domain::from_rect<1>(launch_bounds); - - constexpr auto red = [] { - return log::flog_t::instance().packets().size(); - }; - Legion::ArgumentMap arg_map; - Legion::IndexLauncher reduction_launcher(charm::task_id>, - launch_domain, - Legion::TaskArgument(NULL, 0), - arg_map); - - Legion::Future future = legion_runtime->execute_index_space( - legion_context, reduction_launcher, reduction_op>); - - if(future.get_result() > FLOG_SERIALIZATION_THRESHOLD) { - constexpr auto send = [] { - // TODO: This functionality was removed from charm context - //run::context::instance().set_mpi_task(log::send_to_one); - }; - Legion::IndexLauncher flog_mpi_launcher(charm::task_id>, - launch_domain, - Legion::TaskArgument(NULL, 0), - arg_map); - - flog_mpi_launcher.tag = run::FLECSI_MAPPER_FORCE_RANK_MATCH; - - // Launch the MPI task - auto future_mpi = - legion_runtime->execute_index_space(legion_context, flog_mpi_launcher); - - // Force synchronization - future_mpi.wait_all_results(true);*/ - - // Handoff to the MPI runtime. - // TODO: This functionality was removed from charm context - //flecsi_context.handoff_to_mpi(legion_context, legion_runtime); - - // Wait for MPI to finish execution (synchronous). - // TODO: This functionality was removed from charm context - //flecsi_context.wait_on_mpi(legion_context, legion_runtime); - //} // if - //} // if -#endif // FLECSI_ENABLE_FLOG - size_t domain_size = LAUNCH_DOMAIN.size(); domain_size = domain_size == 0 ? flecsi_context.processes() : domain_size; @@ -178,8 +121,7 @@ reduce(ARGS &&... args) { const auto p = &*mpi_args; buf.resize(sizeof p); std::memcpy(buf.data(), &p, sizeof p); - } - else { + } else { buf = detail::serial_arguments( static_cast(nullptr), std::forward(args)...); } @@ -191,135 +133,14 @@ reduce(ARGS &&... args) { using wrap = charm::task_wrapper; const auto task = charm::task_id(wrap::LegionProcessor)>; - flecsi_context.execute(buf); - return NULL; - -#if 0 - if constexpr(LAUNCH_DOMAIN == single) { - - static_assert(std::is_void_v, - "reductions are not supported for single tasks"); - - { - log::devel_guard guard(execution_tag); - flog_devel(info) << "Executing single task" << std::endl; - } - - TaskLauncher launcher(task, TaskArgument(buf.data(), buf.size())); - // adding region requirements to the launcher - for(auto & req : pro.region_requirements()) { - launcher.add_region_requirement(req); - } // for - - // adding futures to the launcher - launcher.futures = std::move(pro).futures(); - - static_assert(!(is_index_future> || ...), - "can't use index future with single task"); - - if constexpr(processor_type == task_processor_type_t::toc || - processor_type == task_processor_type_t::loc) { - auto future = legion_runtime->execute_task(legion_context, launcher); - - return legion_future{future}; - } - else { - static_assert( - processor_type == task_processor_type_t::mpi, "Unknown launch type"); - flog_fatal("Invalid launch type!" - << std::endl - << "Legion backend does not support 'single' launch" - << " for MPI tasks yet"); - } - } - - //------------------------------------------------------------------------// - // Index launch - //------------------------------------------------------------------------// - - else { - - { - log::devel_guard guard(execution_tag); - flog_devel(info) << "Executing index task" << std::endl; - } - - LegionRuntime::Arrays::Rect<1> launch_bounds( - LegionRuntime::Arrays::Point<1>(0), - LegionRuntime::Arrays::Point<1>(domain_size - 1)); - Domain launch_domain = Domain::from_rect<1>(launch_bounds); - - Legion::ArgumentMap arg_map; - Legion::IndexLauncher launcher( - task, launch_domain, TaskArgument(buf.data(), buf.size()), arg_map); - - // adding region requirement to the launcher - for(auto & req : pro.region_requirements()) { - launcher.add_region_requirement(req); - } // for - - // adding futures to the launcher - launcher.futures = std::move(pro).futures(); - launcher.point_futures.assign( - pro.future_maps().begin(), pro.future_maps().end()); - - if constexpr(processor_type == task_processor_type_t::toc || - processor_type == task_processor_type_t::loc) { - flog_devel(info) << "Executing index launch on loc" << std::endl; - - if constexpr(!std::is_void_v) { - flog_devel(info) << "executing reduction logic for " - << util::type() << std::endl; - - Legion::Future future; - - future = legion_runtime->execute_index_space( - legion_context, launcher, reduction_op); - - return legion_future{future}; - } - else { - // Enqueue the task. - Legion::FutureMap future_map = - legion_runtime->execute_index_space(legion_context, launcher); - - return legion_future{future_map}; - } // else - } - else { - static_assert( - processor_type == task_processor_type_t::mpi, "Unknown launch type"); - launcher.tag = run::FLECSI_MAPPER_FORCE_RANK_MATCH; - - // Launch the MPI task - auto future = - legion_runtime->execute_index_space(legion_context, launcher); - // Force synchronization - future.wait_all_results(true); - - // Handoff to the MPI runtime. - // TODO: This functionality was removed from charm context - //flecsi_context.handoff_to_mpi(legion_context, legion_runtime); - - // Wait for MPI to finish execution (synchronous). - // We must keep mpi_args alive until then. - // TODO: This functionality was removed from charm context - //flecsi_context.wait_on_mpi(legion_context, legion_runtime); - - if constexpr(!std::is_void_v) { - // FIXME implement logic for reduction MPI task - flog_fatal("there is no implementation for the mpi" - " reduction task"); - } - else { - return legion_future{future}; - } - } - } // if constexpr + // TODO: Right now we just execute tasks inline which doesn't expose any + // paralellism. Tasks should be converted to entry methods in charm or + // something similar, ie charm tasks. + flecsi_context.execute(buf); - // return 0; -#endif + // TODO: Should be able to return a future of some sort + return NULL; } // execute_task } // namespace flecsi diff --git a/flecsi/exec/charm/task_prologue.hh b/flecsi/exec/charm/task_prologue.hh index d945308df..5f6263d92 100644 --- a/flecsi/exec/charm/task_prologue.hh +++ b/flecsi/exec/charm/task_prologue.hh @@ -64,42 +64,7 @@ struct task_prologue_t { @param context The Legion task runtime context. */ - task_prologue_t(const size_t & domain) : domain_(domain) {} - - std::vector const & region_requirements() const { - return region_reqs_; - } // region_requirements - - std::vector && futures() && { - return std::move(futures_); - } // futures - - std::vector const & future_maps() const { - return future_maps_; - } // future_maps - - /*! - Convert the template privileges to proper Legion privileges. - - @param mode privilege - */ - - static Legion::PrivilegeMode privilege_mode(size_t mode) { - switch(mode) { - case size_t(nu): - return WRITE_DISCARD; - case size_t(ro): - return READ_ONLY; - case size_t(wo): - return WRITE_DISCARD; - case size_t(rw): - return READ_WRITE; - default: - flog_fatal("invalid privilege mode"); - } // switch - - return NO_ACCESS; - } // privilege_mode + task_prologue_t(const size_t & domain) {} template void walk(const AA &... aa) { @@ -132,23 +97,6 @@ struct task_prologue_t { ref) { auto & flecsi_context = run::context::instance(); flecsi_context.regField(ref.fid(), sizeof(DATA_TYPE)); - - static_assert(privilege_count(PRIVILEGES) == 1, - "global topology accessor type only takes one privilege"); - - constexpr auto priv = get_privilege(0, PRIVILEGES); - - if(priv > partition_privilege_t::ro) - flog_assert(domain_ == 1, - "global can only be modified from within single launch task"); - - Legion::RegionRequirement rr(region, - priv > partition_privilege_t::ro ? privilege_mode(priv) : READ_ONLY, - EXCLUSIVE, - region); - - rr.add_field(ref.fid()); - region_reqs_.push_back(rr);*/ } // visit template * /* parameter */, const data::field_reference & ref) { - auto & instance_data = ref.topology().get().template get_partition(); - - flog_assert(instance_data.colors() == domain_, - "attempting to pass field with " - << instance_data.colors() - << " partitions into task with launch domain of size " << domain_); - - static_assert(privilege_count(PRIVILEGES) == 1, - "accessors for this topology type take only one privilege"); - - Legion::RegionRequirement rr(instance_data.logical_partition, - 0, - privilege_mode(get_privilege(0, PRIVILEGES)), - EXCLUSIVE, - Legion::Runtime::get_runtime()->get_parent_logical_region( - instance_data.logical_partition)); - - rr.add_field(ref.fid()); - region_reqs_.push_back(rr); } // visit template @@ -196,13 +125,13 @@ struct task_prologue_t { void visit(exec::flecsi_future *, const exec::legion_future & future) { - futures_.push_back(future.legion_future_); + CkAbort("Futures not yet supported\n"); } template void visit(exec::flecsi_future *, const exec::legion_future & future) { - future_maps_.push_back(future.legion_future_); + CkAbort("Futures not yet supported\n"); } /*--------------------------------------------------------------------------* @@ -232,11 +161,6 @@ private: (visit(static_cast *>(nullptr), aa), ...); } - size_t domain_; - - std::vector region_reqs_; - std::vector futures_; - std::vector future_maps_; }; // task_prologue_t } // namespace exec::charm diff --git a/flecsi/exec/charm/task_wrapper.hh b/flecsi/exec/charm/task_wrapper.hh index 0783e84ff..91286d58c 100644 --- a/flecsi/exec/charm/task_wrapper.hh +++ b/flecsi/exec/charm/task_wrapper.hh @@ -153,31 +153,13 @@ detail::register_task() { flog_devel(info) << "registering pure Legion task " << name << std::endl; } - //Legion::TaskVariantRegistrar registrar(task_id<*TASK, A>, name.c_str()); - //Legion::Processor::Kind kind = processor_type == task_processor_type_t::toc - // ? Legion::Processor::TOC_PROC - // : Legion::Processor::LOC_PROC; - //registrar.add_constraint(Legion::ProcessorConstraint(kind)); - //registrar.set_leaf(leaf_task(A)); - //registrar.set_inner(inner_task(A)); - //registrar.set_idempotent(idempotent_task(A)); - - /* - This section of conditionals is necessary because there is still - a distinction between void and non-void task registration with - Legion. - */ + // TODO: At this point we would register some task information with the + // Charm++ runtime - //if constexpr(std::is_same_v) { - // Legion::Runtime::preregister_task_variant(registrar, name.c_str()); - //} - //else { - // Legion::Runtime::preregister_task_variant( - // registrar, name.c_str()); - //} // if } // registration_callback // A trivial wrapper for nullary functions. +// TODO: Need a charm++ replacement for this? template auto verb(const Legion::Task *, @@ -269,11 +251,8 @@ struct task_wrapper { // init_handles_t init_handles(runtime, context, regions, task->futures); // init_handles.walk(mpi_task_args); - // Set the MPI function and make the runtime active. - auto & c = run::context::instance(); - // TODO: Removed from context in charm backend + // TODO: Is more needed for synchronization with an "MPI" task? apply(F, std::move(mpi_task_args)); - //c.set_mpi_task([&] { apply(F, std::move(mpi_task_args)); }); // FIXME: Refactor // finalize_handles_t finalize_handles; From fd84b482d52fda27946f398417a07f926b383143 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Wed, 5 Aug 2020 16:01:49 -0400 Subject: [PATCH 10/19] More aggressively remove legion from charm build --- CMakeLists.txt | 17 +--- cmake/charm.cmake | 13 --- cmake/unit.cmake | 12 +-- flecsi/data/charm/policy.hh | 122 +++++++++++++++++++++++-- flecsi/exec/charm/bind_accessors.hh | 6 +- flecsi/exec/charm/future.hh | 53 ++++++----- flecsi/exec/charm/policy.hh | 7 +- flecsi/exec/charm/reduction_wrapper.hh | 14 ++- flecsi/exec/charm/task_prologue.hh | 10 +- flecsi/exec/charm/task_wrapper.hh | 11 +-- flecsi/exec/charm/unbind_accessors.hh | 6 +- flecsi/run/charm/context.cc | 2 - flecsi/run/charm/context.hh | 6 -- flecsi/run/charm/mapper.hh | 10 +- flecsi/run/types.hh | 11 ++- 15 files changed, 177 insertions(+), 123 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1b922b193..f11df1217 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -130,8 +130,8 @@ if(NOT FORMAT_ONLY) set(ENABLE_HPX ON CACHE BOOL "Enable HPX" FORCE) elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") set(ENABLE_MPI ON CACHE BOOL "Enable MPI" FORCE) - set(ENABLE_LEGION ON CACHE BOOL "Enable Legion" FORCE) set(ENABLE_CHARM ON CACHE BOOL "Enable Charm" FORCE) + set(ENABLE_LEGION OFF CACHE BOOL "Enable Legion" FORCE) endif() mark_as_advanced(ENABLE_MPI ENABLE_LEGION ENABLE_HPX ENABLE_CHARM) @@ -297,22 +297,9 @@ if(NOT FORMAT_ONLY) message (FATAL_ERROR "MPI is required for the charm runtime model") endif() - if(NOT Legion_FOUND) - message (FATAL_ERROR "Legion is required for the charm runtime model") - endif() - set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/exec/charm) - set(FLECSI_RUNTIME_LIBRARIES ${DL_LIBS} ${Legion_LIBRARIES} - ${MPI_LIBRARIES}) - - list(APPEND FLECSI_INCLUDE_DEPENDENCIES ${Legion_INCLUDE_DIRS}) - - # - # Compacted storage interface - # - option(ENABLE_MAPPER_COMPACTION "Enable Legion Mapper compaction" ON) - mark_as_advanced(ENABLE_MAPPER_COMPACTION) + set(FLECSI_RUNTIME_LIBRARIES ${DL_LIBS} ${MPI_LIBRARIES}) #----------------------------------------------------------------------------# # Default diff --git a/cmake/charm.cmake b/cmake/charm.cmake index ee1764cab..d9c048115 100644 --- a/cmake/charm.cmake +++ b/cmake/charm.cmake @@ -16,21 +16,8 @@ option(ENABLE_CHARM "Enable Charm" OFF) if(ENABLE_CHARM) - find_package(Legion REQUIRED) - - if(NOT Legion_FOUND) - message(FATAL_ERROR "Legion is required for this build configuration") - endif(NOT Legion_FOUND) - - set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} ${LEGION_INSTALL_DIRS}) - - include_directories(SYSTEM ${Legion_INCLUDE_DIRS}) - - add_definitions(-DLEGION_USE_CMAKE) add_definitions(-DREALM_USE_CMAKE) - list(APPEND FLECSI_LIBRARY_DEPENDENCIES ${Legion_LIBRARIES}) - file(GLOB_RECURSE ci-files ${CMAKE_SOURCE_DIR}/flecsi/*.ci) foreach(in_file ${ci-files}) diff --git a/cmake/unit.cmake b/cmake/unit.cmake index 9e79d61eb..6902a60e4 100644 --- a/cmake/unit.cmake +++ b/cmake/unit.cmake @@ -124,15 +124,11 @@ function(add_unit name) set(unit_policy_exec_postflags ${MPIEXEC_POSTFLAGS}) elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm" - AND MPI_${MPI_LANGUAGE}_FOUND - AND Legion_FOUND) + AND MPI_${MPI_LANGUAGE}_FOUND) - set(unit_policy_flags ${Legion_CXX_FLAGS} - ${MPI_${MPI_LANGUAGE}_COMPILE_FLAGS}) - set(unit_policy_includes ${Legion_INCLUDE_DIRS} - ${MPI_${MPI_LANGUAGE}_INCLUDE_PATH}) - set(unit_policy_libraries ${Legion_LIBRARIES} ${Legion_LIB_FLAGS} - ${MPI_${MPI_LANGUAGE}_LIBRARIES}) + set(unit_policy_flags ${MPI_${MPI_LANGUAGE}_COMPILE_FLAGS}) + set(unit_policy_includes ${MPI_${MPI_LANGUAGE}_INCLUDE_PATH}) + set(unit_policy_libraries ${MPI_${MPI_LANGUAGE}_LIBRARIES}) set(unit_policy_exec ${MPIEXEC}) set(unit_policy_exec_threads ${MPIEXEC_NUMPROC_FLAG}) set(unit_policy_exec_preflags ${MPIEXEC_PREFLAGS}) diff --git a/flecsi/data/charm/policy.hh b/flecsi/data/charm/policy.hh index 5d4499f1b..21fa24439 100644 --- a/flecsi/data/charm/policy.hh +++ b/flecsi/data/charm/policy.hh @@ -18,18 +18,16 @@ #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include -#if !defined(FLECSI_ENABLE_LEGION) -#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#if !defined(FLECSI_ENABLE_CHARM) +#error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! #endif -#include - flog_register_tag(topologies); namespace flecsi { @@ -42,6 +40,27 @@ struct topology_id { topology_id(const topology_id &) : topology_id() {} ~topology_id() { runtime::context_t::instance().forget(id); + +namespace charm { + +#if 0 +inline auto & +run() { + return *Legion::Runtime::get_runtime(); +} +inline auto +ctx() { + return Legion::Runtime::get_context(); +} + +template +struct unique_handle { + unique_handle() = default; + unique_handle(T t) : h(t) {} + unique_handle(unique_handle && u) noexcept : h(std::exchange(u.h, {})) {} + ~unique_handle() { + if(*this) // it's not clear whether empty handles can be deleted + (run().*D)(ctx(), h, false); } topology_id & operator=(const topology_id &) noexcept { return *this; @@ -95,6 +114,93 @@ inline topology_data::topology_data( // NOTE THAT THE HANDLE TYPE FOR THIS TYPE WILL NEED TO CAPTURE THE // UNDERLYING TOPOLOGY TYPE, i.e., topology::mesh_t +using unique_index_space = + unique_handle; +// Legion seems to be buggy with destroying partitions: +using unique_index_partition = Legion::IndexPartition; +using unique_field_space = + unique_handle; +using unique_logical_region = unique_handle; +using unique_logical_partition = Legion::LogicalPartition; + +inline unique_index_space +index1(std::size_t n) { + return run().create_index_space(ctx(), Legion::Rect<1>(0, n - 1)); +} +#endif + +struct region { + region(std::size_t n, const fields & fs) {} +#if 0 + : index_space(index1(n)), + field_space([&fs] { // TIP: IIFE (q.v.) allows statements here + auto & r = run(); + const auto c = ctx(); + unique_field_space ret = r.create_field_space(c); + Legion::FieldAllocator allocator = r.create_field_allocator(c, ret); + for(auto const & fi : fs) + allocator.allocate_field(fi->type_size, fi->fid); + return ret; + }()), + logical_region( + run().create_logical_region(ctx(), index_space, field_space)) {} + + unique_index_space index_space; + unique_field_space field_space; + unique_logical_region logical_region; +#endif +}; + +struct partition { + // TODO: support create_partition_by_image_range case + template + partition(const region & reg, + std::size_t n, + F f, + disjointness dis = {}, + completeness cpt = {}) {} +#if 0 + : color_space(index1(n)), + index_partition(run().create_partition_by_domain( + ctx(), + reg.index_space, + [&] { + std::map ret; + for(std::size_t i = 0; i < n; ++i) { + // NB: reg.index_space is assumed to be one-dimensional. + const auto [b, e] = f(i); + ret.try_emplace(i, Legion::Rect<1>(b, e - 1)); + } + return ret; + }(), + color_space, + true, + Legion::PartitionKind((dis + 2) % 3 + (cpt + 3) % 3 * 3))), + logical_partition(run().get_logical_partition(ctx(), + reg.logical_region, + index_partition)) {} +#endif + + std::size_t colors() const { + //return run().get_index_space_domain(color_space).get_volume(); + return 1; + } + +#if 0 + unique_index_space color_space; + unique_index_partition index_partition; + unique_logical_partition logical_partition; +#endif + + template + const partition & get_partition() const { + return *this; + } +}; +} // namespace charm + +using charm::region, charm::partition; // for backend-agnostic interface } // namespace data } // namespace flecsi diff --git a/flecsi/exec/charm/bind_accessors.hh b/flecsi/exec/charm/bind_accessors.hh index c6d458a05..50883875a 100644 --- a/flecsi/exec/charm/bind_accessors.hh +++ b/flecsi/exec/charm/bind_accessors.hh @@ -29,12 +29,10 @@ #include "flecsi/util/demangle.hh" #include "flecsi/util/tuple_walker.hh" -#if !defined(FLECSI_ENABLE_LEGION) -#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#if !defined(FLECSI_ENABLE_CHARM) +#error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! #endif -#include - namespace flecsi { inline log::devel_tag bind_accessors_tag("bind_accessors"); diff --git a/flecsi/exec/charm/future.hh b/flecsi/exec/charm/future.hh index e27d0f75a..57116483d 100644 --- a/flecsi/exec/charm/future.hh +++ b/flecsi/exec/charm/future.hh @@ -24,12 +24,10 @@ #include "flecsi/exec/launch.hh" #include "flecsi/run/backend.hh" -#if !defined(FLECSI_ENABLE_LEGION) -#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#if !defined(FLECSI_ENABLE_CHARM) +#error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! #endif -#include - #include #include #include @@ -38,7 +36,7 @@ namespace flecsi { namespace exec { /*! - Base legion future type. + Base charm future type. @tparam Return The return type of the task. @tparam Launch FleCSI launch type: single/index. @@ -46,22 +44,22 @@ namespace exec { @ingroup legion-execution */ template -struct legion_future; +struct charm_future; -/*! Partial specialization for the Legion:Future +/*! Partial specialization for the charm future @tparam Return The return type of the task. @ingroup legion-execution */ template -struct legion_future { +struct charm_future { /*! Wait on a task result. */ void wait() { - legion_future_.wait(); + //charm_future_.wait(); } // wait /*! @@ -69,18 +67,20 @@ struct legion_future { */ Return get(bool silence_warnings = false) { if constexpr(std::is_same_v) - return legion_future_.get_void_result(silence_warnings); + //return charm_future_.get_void_result(silence_warnings); + return; else - return legion_future_.get_result(silence_warnings); + return Return(); + //return charm_future_.get_result(silence_warnings); } // get - Legion::Future legion_future_; -}; // legion_future + //Legion::Future charm_future_; +}; // charm_future template -struct legion_future { +struct charm_future { - explicit operator legion_future() const { + explicit operator charm_future() const { return {}; } @@ -88,7 +88,7 @@ struct legion_future { Wait on a task result. */ void wait(bool silence_warnings = false) { - legion_future_.wait_all_results(silence_warnings); + //charm_future_.wait_all_results(silence_warnings); } // wait /*! @@ -97,27 +97,30 @@ struct legion_future { Return get(size_t index = 0, bool silence_warnings = false) { if constexpr(std::is_same_v) - return legion_future_.get_void_result(index, silence_warnings); + //return charm_future_.get_void_result(index, silence_warnings); + return; else - return legion_future_.get_result( - Legion::DomainPoint::from_point<1>( - LegionRuntime::Arrays::Point<1>(index)), - silence_warnings); + //return charm_future_.get_result( + // Legion::DomainPoint::from_point<1>( + // LegionRuntime::Arrays::Point<1>(index)), + // silence_warnings); + return Return(); } // get - Legion::FutureMap legion_future_; + //Legion::FutureMap charm_future_; -}; // struct legion_future +}; // struct charm_future //----------------------------------------------------------------------- + template -using flecsi_future = legion_future; +using flecsi_future = charm_future; template constexpr bool is_index_future = false; template -constexpr bool is_index_future> = true; +constexpr bool is_index_future> = true; } // namespace exec } // namespace flecsi diff --git a/flecsi/exec/charm/policy.hh b/flecsi/exec/charm/policy.hh index 967dd01f9..e3904f5cf 100644 --- a/flecsi/exec/charm/policy.hh +++ b/flecsi/exec/charm/policy.hh @@ -35,12 +35,10 @@ #include #include -#if !defined(FLECSI_ENABLE_LEGION) -#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#if !defined(FLECSI_ENABLE_CHARM) +#error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! #endif -#include - namespace flecsi { inline log::devel_tag execution_tag("execution"); @@ -87,7 +85,6 @@ template decltype(auto) reduce(ARGS &&... args) { - using namespace Legion; using namespace exec; using traits_t = util::function_traits; diff --git a/flecsi/exec/charm/reduction_wrapper.hh b/flecsi/exec/charm/reduction_wrapper.hh index 91151bddb..dd44e1397 100644 --- a/flecsi/exec/charm/reduction_wrapper.hh +++ b/flecsi/exec/charm/reduction_wrapper.hh @@ -24,8 +24,6 @@ #include "flecsi/util/demangle.hh" #include -#include - namespace flecsi { inline log::devel_tag reduction_wrapper_tag("reduction_wrapper"); @@ -40,14 +38,14 @@ namespace detail { template void register_reduction(); -inline Legion::ReductionOpID reduction_id; +//inline Legion::ReductionOpID reduction_id; } // namespace detail // NB: 0 is reserved by Legion. -template -inline const Legion::ReductionOpID reduction_op = - (run::context::instance().register_init(detail::register_reduction), - ++detail::reduction_id); +//template +//inline const Legion::ReductionOpID reduction_op = +// (run::context::instance().register_init(detail::register_reduction), +// ++detail::reduction_id); template void @@ -59,7 +57,7 @@ detail::register_reduction() { } // Register the operation with the Legion runtime - Legion::Runtime::register_reduction_op(reduction_op); + //Legion::Runtime::register_reduction_op(reduction_op); } } // namespace exec diff --git a/flecsi/exec/charm/task_prologue.hh b/flecsi/exec/charm/task_prologue.hh index 5f6263d92..95c6693fa 100644 --- a/flecsi/exec/charm/task_prologue.hh +++ b/flecsi/exec/charm/task_prologue.hh @@ -34,12 +34,10 @@ #include "flecsi/util/demangle.hh" #include "flecsi/util/tuple_walker.hh" -#if !defined(FLECSI_ENABLE_LEGION) -#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#if !defined(FLECSI_ENABLE_CHARM) +#error FLECSI_ENABLE_LEGION not defined! This file depends on Charm! #endif -#include - namespace flecsi { inline log::devel_tag task_prologue_tag("task_prologue"); @@ -123,14 +121,14 @@ struct task_prologue_t { *--------------------------------------------------------------------------*/ template void visit(exec::flecsi_future *, - const exec::legion_future & + const exec::charm_future & future) { CkAbort("Futures not yet supported\n"); } template void visit(exec::flecsi_future *, - const exec::legion_future & future) { + const exec::charm_future & future) { CkAbort("Futures not yet supported\n"); } diff --git a/flecsi/exec/charm/task_wrapper.hh b/flecsi/exec/charm/task_wrapper.hh index 91286d58c..82e4ff13d 100644 --- a/flecsi/exec/charm/task_wrapper.hh +++ b/flecsi/exec/charm/task_wrapper.hh @@ -32,12 +32,10 @@ #include "unbind_accessors.hh" #include -#if !defined(FLECSI_ENABLE_LEGION) -#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#if !defined(FLECSI_ENABLE_CHARM) +#error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! #endif -#include - #include #include @@ -162,10 +160,7 @@ detail::register_task() { // TODO: Need a charm++ replacement for this? template auto -verb(const Legion::Task *, - const std::vector &, - Legion::Context, - Legion::Runtime *) { +verb(std::vector& buf) { return F(); } diff --git a/flecsi/exec/charm/unbind_accessors.hh b/flecsi/exec/charm/unbind_accessors.hh index 5633ec5b8..1770dde55 100644 --- a/flecsi/exec/charm/unbind_accessors.hh +++ b/flecsi/exec/charm/unbind_accessors.hh @@ -27,12 +27,10 @@ #include "flecsi/util/demangle.hh" #include "flecsi/util/tuple_walker.hh" -#if !defined(FLECSI_ENABLE_LEGION) -#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#if !defined(FLECSI_ENABLE_CHARM) +#error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! #endif -#include - namespace flecsi { inline log::devel_tag unbind_accessors_tag("unbind_accessors"); diff --git a/flecsi/run/charm/context.cc b/flecsi/run/charm/context.cc index 257321509..4648f7c91 100644 --- a/flecsi/run/charm/context.cc +++ b/flecsi/run/charm/context.cc @@ -101,8 +101,6 @@ context_t::finalize() { int context_t::start(const std::function & action) { - using namespace Legion; - /* Store the top-level action for invocation from the top-level task. */ diff --git a/flecsi/run/charm/context.hh b/flecsi/run/charm/context.hh index 73c6fca53..0cd3c3bda 100644 --- a/flecsi/run/charm/context.hh +++ b/flecsi/run/charm/context.hh @@ -27,12 +27,6 @@ #include #include -#if !defined(FLECSI_ENABLE_LEGION) -#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! -#endif - -#include - #if !defined(FLECSI_ENABLE_MPI) #error FLECSI_ENABLE_MPI not defined! This file depends on MPI! #endif diff --git a/flecsi/run/charm/mapper.hh b/flecsi/run/charm/mapper.hh index 8ed3311e7..0290f3585 100644 --- a/flecsi/run/charm/mapper.hh +++ b/flecsi/run/charm/mapper.hh @@ -23,19 +23,16 @@ #include "../backend.hh" -#if !defined(FLECSI_ENABLE_LEGION) -#error FLECSI_ENABLE_LEGION not defined! This file depends on Legion! +#if !defined(FLECSI_ENABLE_CHARM) +#error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! #endif -#include -#include -#include - namespace flecsi { inline log::devel_tag legion_mapper_tag("legion_mapper"); namespace run { +#if 0 /* The mpi_mapper_t - is a custom mapper that handles mpi-legion @@ -313,6 +310,7 @@ mapper_registration(Legion::Machine machine, rt->replace_default_mapper(mapper, *it); } } // mapper registration +#endif } // namespace run } // namespace flecsi diff --git a/flecsi/run/types.hh b/flecsi/run/types.hh index 0a1a96be5..eb6f31307 100644 --- a/flecsi/run/types.hh +++ b/flecsi/run/types.hh @@ -72,15 +72,16 @@ const task_id_t TASK_ID_MAX = std::numeric_limits::max(); #elif FLECSI_RUNTIME_MODEL == FLECSI_RUNTIME_MODEL_charm -#include +#include +#include namespace flecsi { -using field_id_t = Legion::FieldID; -const field_id_t FIELD_ID_MAX = LEGION_MAX_APPLICATION_FIELD_ID; +using field_id_t = size_t; +const field_id_t FIELD_ID_MAX = std::numeric_limits::max(); -using task_id_t = Legion::TaskID; -const task_id_t TASK_ID_MAX = LEGION_MAX_APPLICATION_TASK_ID; +using task_id_t = size_t; +const task_id_t TASK_ID_MAX = std::numeric_limits::max(); } // namespace flecsi From 1f342c03738031de1a085d817f5313a301e560f4 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Thu, 6 Aug 2020 18:23:44 -0400 Subject: [PATCH 11/19] Add dummy future and make test tasks function properly --- flecsi/exec/charm/future.hh | 67 +++++++++++-------------------- flecsi/exec/charm/policy.hh | 19 +++++++-- flecsi/exec/charm/task_wrapper.hh | 8 +++- flecsi/run/charm/context.hh | 8 ++-- 4 files changed, 49 insertions(+), 53 deletions(-) diff --git a/flecsi/exec/charm/future.hh b/flecsi/exec/charm/future.hh index 57116483d..ea46d44cc 100644 --- a/flecsi/exec/charm/future.hh +++ b/flecsi/exec/charm/future.hh @@ -46,70 +46,51 @@ namespace exec { template struct charm_future; -/*! Partial specialization for the charm future - - @tparam Return The return type of the task. - - @ingroup legion-execution - */ -template -struct charm_future { +template +struct charm_future { + charm_future(Return r) : return_(r) {} /*! Wait on a task result. - */ - void wait() { - //charm_future_.wait(); + */ + void wait(bool silence_warnings = false) { + //charm_future_.wait_all_results(silence_warnings); } // wait /*! Get a task result. */ - Return get(bool silence_warnings = false) { - if constexpr(std::is_same_v) - //return charm_future_.get_void_result(silence_warnings); - return; - else - return Return(); - //return charm_future_.get_result(silence_warnings); + + Return get(size_t index = 0, bool silence_warnings = false) { + return return_; } // get - //Legion::Future charm_future_; -}; // charm_future + Return return_; +}; // struct charm_future -template -struct charm_future { +/*! Partial specialization for the charm future + + @tparam Return The return type of the task. - explicit operator charm_future() const { - return {}; - } + @ingroup legion-execution + */ +template +struct charm_future { /*! Wait on a task result. - */ - void wait(bool silence_warnings = false) { - //charm_future_.wait_all_results(silence_warnings); + */ + void wait() { + //charm_future_.wait(); } // wait /*! Get a task result. */ - - Return get(size_t index = 0, bool silence_warnings = false) { - if constexpr(std::is_same_v) - //return charm_future_.get_void_result(index, silence_warnings); - return; - else - //return charm_future_.get_result( - // Legion::DomainPoint::from_point<1>( - // LegionRuntime::Arrays::Point<1>(index)), - // silence_warnings); - return Return(); + void get(bool silence_warnings = false) { + return; } // get - - //Legion::FutureMap charm_future_; - -}; // struct charm_future +}; // charm_future //----------------------------------------------------------------------- diff --git a/flecsi/exec/charm/policy.hh b/flecsi/exec/charm/policy.hh index e3904f5cf..81588ffff 100644 --- a/flecsi/exec/charm/policy.hh +++ b/flecsi/exec/charm/policy.hh @@ -134,10 +134,21 @@ reduce(ARGS &&... args) { // TODO: Right now we just execute tasks inline which doesn't expose any // paralellism. Tasks should be converted to entry methods in charm or // something similar, ie charm tasks. - flecsi_context.execute(buf); - - // TODO: Should be able to return a future of some sort - return NULL; + if constexpr(LAUNCH_DOMAIN == single) { + if constexpr(std::is_same_v) { + flecsi_context.execute(buf); + return charm_future(); + } else { + return charm_future(flecsi_context.execute(buf)); + } + } else { + if constexpr(std::is_same_v) { + flecsi_context.execute(buf); + return charm_future(); + } else { + return charm_future(flecsi_context.execute(buf)); + } + } } // execute_task } // namespace flecsi diff --git a/flecsi/exec/charm/task_wrapper.hh b/flecsi/exec/charm/task_wrapper.hh index 82e4ff13d..deb872e52 100644 --- a/flecsi/exec/charm/task_wrapper.hh +++ b/flecsi/exec/charm/task_wrapper.hh @@ -229,7 +229,7 @@ struct task_wrapper { static constexpr auto LegionProcessor = task_processor_type_t::loc; - static void execute(std::vector& buf) { + static RETURN execute(std::vector& buf) { // FIXME: Refactor // { // log::devel_guard guard(task_wrapper_tag); @@ -247,7 +247,11 @@ struct task_wrapper { // init_handles.walk(mpi_task_args); // TODO: Is more needed for synchronization with an "MPI" task? - apply(F, std::move(mpi_task_args)); + if constexpr(std::is_same_v) { + apply(F, std::move(mpi_task_args)); + } else { + return apply(F, std::move(mpi_task_args)); + } // FIXME: Refactor // finalize_handles_t finalize_handles; diff --git a/flecsi/run/charm/context.hh b/flecsi/run/charm/context.hh index 0cd3c3bda..3f1941780 100644 --- a/flecsi/run/charm/context.hh +++ b/flecsi/run/charm/context.hh @@ -63,9 +63,9 @@ public: void top_level_task(); template - void execute(std::vector& buf) { + auto execute(std::vector& buf) { depth++; - T::execute(buf); + return T::execute(buf); depth--; } int task_depth() const { @@ -220,8 +220,8 @@ struct context_t : context { } template - void execute(std::vector& buf) { - context_proxy_.ckLocalBranch()->execute(buf); + auto execute(std::vector& buf) { + return context_proxy_.ckLocalBranch()->execute(buf); } void regField(std::size_t i, std::size_t s) { From 921383912072a534acf4a8b51d04ec8d2cd143eb Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Fri, 7 Aug 2020 15:46:08 -0400 Subject: [PATCH 12/19] Fix test build dependency --- cmake/unit.cmake | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmake/unit.cmake b/cmake/unit.cmake index 6902a60e4..05e5b1374 100644 --- a/cmake/unit.cmake +++ b/cmake/unit.cmake @@ -23,7 +23,14 @@ mark_as_advanced(ENABLE_EXPENSIVE_TESTS) if(ENABLE_UNIT_TESTS) enable_testing() - add_library(unit-main OBJECT ${CMAKE_SOURCE_DIR}/flecsi/util/unit/main.cc) + + if(FLECSI_RUNTIME_MODEL STREQUAL "charm") + # Ensure that decl and def headers are generated before tests are compiled + add_library(unit-main OBJECT + ${CMAKE_SOURCE_DIR}/flecsi/util/unit/main.cc ${all-ci-outputs}) + else() + add_library(unit-main OBJECT ${CMAKE_SOURCE_DIR}/flecsi/util/unit/main.cc) + endif() endif() function(add_unit name) From 50b1debdd6dd86a00622a73213c54cec67a82138 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Mon, 10 Aug 2020 16:20:45 -0400 Subject: [PATCH 13/19] Add more to field registration code --- flecsi/exec/charm/task_prologue.hh | 2 ++ flecsi/run/charm/context.cc | 1 - flecsi/run/charm/context.hh | 12 +++++++++--- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/flecsi/exec/charm/task_prologue.hh b/flecsi/exec/charm/task_prologue.hh index 95c6693fa..d54c0676a 100644 --- a/flecsi/exec/charm/task_prologue.hh +++ b/flecsi/exec/charm/task_prologue.hh @@ -105,6 +105,8 @@ struct task_prologue_t { void visit( data::accessor * /* parameter */, const data::field_reference & ref) { + auto & flecsi_context = run::context::instance(); + flecsi_context.regField(ref.fid(), sizeof(DATA_TYPE)); } // visit template diff --git a/flecsi/run/charm/context.cc b/flecsi/run/charm/context.cc index 4648f7c91..bc9cfbbfd 100644 --- a/flecsi/run/charm/context.cc +++ b/flecsi/run/charm/context.cc @@ -38,7 +38,6 @@ namespace charm { ContextGroup::ContextGroup() { CkPrintf("Group created on %i\n", CkMyPe()); - data = new std::byte[256]; if (CkMyPe() != 0) { run::context::instance().context_proxy_ = thisProxy; } diff --git a/flecsi/run/charm/context.hh b/flecsi/run/charm/context.hh index 3f1941780..9ee51740e 100644 --- a/flecsi/run/charm/context.hh +++ b/flecsi/run/charm/context.hh @@ -72,15 +72,21 @@ public: return depth; } - void regField(std::size_t i, std::size_t s) {} + void regField(std::size_t i, std::size_t s) { + if (data_map.count(i)) return; + data_map[i] = data.size(); + data.push_back(new std::byte[s]); + } std::byte* getField(std::size_t i) { - return data; + CkAssert(data_map.count(i)); + return data[data_map[i]]; } private: int depth; - std::byte* data; + std::unordered_map data_map; + std::vector data; }; } From 240a8b486a8f270ed8f96c450fc283bb31901779 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Tue, 11 Aug 2020 14:53:41 -0400 Subject: [PATCH 14/19] Remove decl and def files from repo --- flecsi/run/charm/context.decl.h | 331 -------------------------------- flecsi/run/charm/context.def.h | 208 -------------------- 2 files changed, 539 deletions(-) delete mode 100644 flecsi/run/charm/context.decl.h delete mode 100644 flecsi/run/charm/context.def.h diff --git a/flecsi/run/charm/context.decl.h b/flecsi/run/charm/context.decl.h deleted file mode 100644 index 06a1deca3..000000000 --- a/flecsi/run/charm/context.decl.h +++ /dev/null @@ -1,331 +0,0 @@ -#ifndef _DECL_context_H_ -#define _DECL_context_H_ -#include "charm++.h" -#include "envelope.h" -#include -#include "sdag.h" -namespace flecsi { -namespace run { -namespace charm { -/* DECLS: group ContextGroup: IrrGroup{ -ContextGroup(); -void top_level_task(); -}; - */ - class ContextGroup; - class CkIndex_ContextGroup; - class CProxy_ContextGroup; - class CProxyElement_ContextGroup; - class CProxySection_ContextGroup; -/* --------------- index object ------------------ */ -class CkIndex_ContextGroup:public CkIndex_IrrGroup{ - public: - typedef ContextGroup local_t; - typedef CkIndex_ContextGroup index_t; - typedef CProxy_ContextGroup proxy_t; - typedef CProxyElement_ContextGroup element_t; - typedef CProxySection_ContextGroup section_t; - - static int __idx; - static void __register(const char *s, size_t size); - /* DECLS: ContextGroup(); - */ - // Entry point registration at startup - - static int reg_ContextGroup_void(); - // Entry point index lookup - - inline static int idx_ContextGroup_void() { - static int epidx = reg_ContextGroup_void(); - return epidx; - } - - - static int ckNew() { return idx_ContextGroup_void(); } - - static void _call_ContextGroup_void(void* impl_msg, void* impl_obj); - - static void _call_sdag_ContextGroup_void(void* impl_msg, void* impl_obj); - /* DECLS: void top_level_task(); - */ - // Entry point registration at startup - - static int reg_top_level_task_void(); - // Entry point index lookup - - inline static int idx_top_level_task_void() { - static int epidx = reg_top_level_task_void(); - return epidx; - } - - - inline static int idx_top_level_task(void (ContextGroup::*)() ) { - return idx_top_level_task_void(); - } - - - - static int top_level_task() { return idx_top_level_task_void(); } - - static void _call_top_level_task_void(void* impl_msg, void* impl_obj); - - static void _call_sdag_top_level_task_void(void* impl_msg, void* impl_obj); -}; -/* --------------- element proxy ------------------ */ -class CProxyElement_ContextGroup: public CProxyElement_IrrGroup{ - public: - typedef ContextGroup local_t; - typedef CkIndex_ContextGroup index_t; - typedef CProxy_ContextGroup proxy_t; - typedef CProxyElement_ContextGroup element_t; - typedef CProxySection_ContextGroup section_t; - - - /* TRAM aggregators */ - - CProxyElement_ContextGroup(void) { - } - CProxyElement_ContextGroup(const IrrGroup *g) : CProxyElement_IrrGroup(g){ - } - CProxyElement_ContextGroup(CkGroupID _gid,int _onPE,CK_DELCTOR_PARAM) : CProxyElement_IrrGroup(_gid,_onPE,CK_DELCTOR_ARGS){ - } - CProxyElement_ContextGroup(CkGroupID _gid,int _onPE) : CProxyElement_IrrGroup(_gid,_onPE){ - } - - int ckIsDelegated(void) const - { return CProxyElement_IrrGroup::ckIsDelegated(); } - inline CkDelegateMgr *ckDelegatedTo(void) const - { return CProxyElement_IrrGroup::ckDelegatedTo(); } - inline CkDelegateData *ckDelegatedPtr(void) const - { return CProxyElement_IrrGroup::ckDelegatedPtr(); } - CkGroupID ckDelegatedIdx(void) const - { return CProxyElement_IrrGroup::ckDelegatedIdx(); } -inline void ckCheck(void) const {CProxyElement_IrrGroup::ckCheck();} -CkChareID ckGetChareID(void) const - {return CProxyElement_IrrGroup::ckGetChareID();} -CkGroupID ckGetGroupID(void) const - {return CProxyElement_IrrGroup::ckGetGroupID();} -operator CkGroupID () const { return ckGetGroupID(); } - - inline void setReductionClient(CkReductionClientFn fn,void *param=NULL) const - { CProxyElement_IrrGroup::setReductionClient(fn,param); } - inline void ckSetReductionClient(CkReductionClientFn fn,void *param=NULL) const - { CProxyElement_IrrGroup::ckSetReductionClient(fn,param); } - inline void ckSetReductionClient(CkCallback *cb) const - { CProxyElement_IrrGroup::ckSetReductionClient(cb); } -int ckGetGroupPe(void) const -{return CProxyElement_IrrGroup::ckGetGroupPe();} - - void ckDelegate(CkDelegateMgr *dTo,CkDelegateData *dPtr=NULL) - { CProxyElement_IrrGroup::ckDelegate(dTo,dPtr); } - void ckUndelegate(void) - { CProxyElement_IrrGroup::ckUndelegate(); } - void pup(PUP::er &p) - { CProxyElement_IrrGroup::pup(p); - } - void ckSetGroupID(CkGroupID g) { - CProxyElement_IrrGroup::ckSetGroupID(g); - } - ContextGroup* ckLocalBranch(void) const { - return ckLocalBranch(ckGetGroupID()); - } - static ContextGroup* ckLocalBranch(CkGroupID gID) { - return (ContextGroup*)CkLocalBranch(gID); - } -/* DECLS: ContextGroup(); - */ - - -/* DECLS: void top_level_task(); - */ - - void top_level_task(const CkEntryOptions *impl_e_opts=NULL); - -}; -/* ---------------- collective proxy -------------- */ -class CProxy_ContextGroup: public CProxy_IrrGroup{ - public: - typedef ContextGroup local_t; - typedef CkIndex_ContextGroup index_t; - typedef CProxy_ContextGroup proxy_t; - typedef CProxyElement_ContextGroup element_t; - typedef CProxySection_ContextGroup section_t; - - CProxy_ContextGroup(void) { - } - CProxy_ContextGroup(const IrrGroup *g) : CProxy_IrrGroup(g){ - } - CProxy_ContextGroup(CkGroupID _gid,CK_DELCTOR_PARAM) : CProxy_IrrGroup(_gid,CK_DELCTOR_ARGS){ } - CProxy_ContextGroup(CkGroupID _gid) : CProxy_IrrGroup(_gid){ } - CProxyElement_ContextGroup operator[](int onPE) const - {return CProxyElement_ContextGroup(ckGetGroupID(),onPE,CK_DELCTOR_CALL);} - - int ckIsDelegated(void) const - { return CProxy_IrrGroup::ckIsDelegated(); } - inline CkDelegateMgr *ckDelegatedTo(void) const - { return CProxy_IrrGroup::ckDelegatedTo(); } - inline CkDelegateData *ckDelegatedPtr(void) const - { return CProxy_IrrGroup::ckDelegatedPtr(); } - CkGroupID ckDelegatedIdx(void) const - { return CProxy_IrrGroup::ckDelegatedIdx(); } -inline void ckCheck(void) const {CProxy_IrrGroup::ckCheck();} -CkChareID ckGetChareID(void) const - {return CProxy_IrrGroup::ckGetChareID();} -CkGroupID ckGetGroupID(void) const - {return CProxy_IrrGroup::ckGetGroupID();} -operator CkGroupID () const { return ckGetGroupID(); } - - inline void setReductionClient(CkReductionClientFn fn,void *param=NULL) const - { CProxy_IrrGroup::setReductionClient(fn,param); } - inline void ckSetReductionClient(CkReductionClientFn fn,void *param=NULL) const - { CProxy_IrrGroup::ckSetReductionClient(fn,param); } - inline void ckSetReductionClient(CkCallback *cb) const - { CProxy_IrrGroup::ckSetReductionClient(cb); } - - void ckDelegate(CkDelegateMgr *dTo,CkDelegateData *dPtr=NULL) - { CProxy_IrrGroup::ckDelegate(dTo,dPtr); } - void ckUndelegate(void) - { CProxy_IrrGroup::ckUndelegate(); } - void pup(PUP::er &p) - { CProxy_IrrGroup::pup(p); - } - void ckSetGroupID(CkGroupID g) { - CProxy_IrrGroup::ckSetGroupID(g); - } - ContextGroup* ckLocalBranch(void) const { - return ckLocalBranch(ckGetGroupID()); - } - static ContextGroup* ckLocalBranch(CkGroupID gID) { - return (ContextGroup*)CkLocalBranch(gID); - } -/* DECLS: ContextGroup(); - */ - - static CkGroupID ckNew(const CkEntryOptions *impl_e_opts=NULL); - -/* DECLS: void top_level_task(); - */ - - void top_level_task(const CkEntryOptions *impl_e_opts=NULL); - - void top_level_task(int npes, int *pes, const CkEntryOptions *impl_e_opts=NULL); - - void top_level_task(CmiGroup &grp, const CkEntryOptions *impl_e_opts=NULL); - -}; -/* ---------------- section proxy -------------- */ -class CProxySection_ContextGroup: public CProxySection_IrrGroup{ - public: - typedef ContextGroup local_t; - typedef CkIndex_ContextGroup index_t; - typedef CProxy_ContextGroup proxy_t; - typedef CProxyElement_ContextGroup element_t; - typedef CProxySection_ContextGroup section_t; - - CProxySection_ContextGroup(void) { - } - CProxySection_ContextGroup(const IrrGroup *g) : CProxySection_IrrGroup(g){ - } - CProxySection_ContextGroup(const CkGroupID &_gid,const int *_pelist,int _npes, CK_DELCTOR_PARAM) : CProxySection_IrrGroup(_gid,_pelist,_npes,CK_DELCTOR_ARGS){ } - CProxySection_ContextGroup(const CkGroupID &_gid,const int *_pelist,int _npes, int factor = USE_DEFAULT_BRANCH_FACTOR) : CProxySection_IrrGroup(_gid,_pelist,_npes,factor){ } - CProxySection_ContextGroup(int n,const CkGroupID *_gid, int const * const *_pelist,const int *_npes, int factor = USE_DEFAULT_BRANCH_FACTOR) : CProxySection_IrrGroup(n,_gid,_pelist,_npes,factor){ } - CProxySection_ContextGroup(int n,const CkGroupID *_gid, int const * const *_pelist,const int *_npes, CK_DELCTOR_PARAM) : CProxySection_IrrGroup(n,_gid,_pelist,_npes,CK_DELCTOR_ARGS){ } - - int ckIsDelegated(void) const - { return CProxySection_IrrGroup::ckIsDelegated(); } - inline CkDelegateMgr *ckDelegatedTo(void) const - { return CProxySection_IrrGroup::ckDelegatedTo(); } - inline CkDelegateData *ckDelegatedPtr(void) const - { return CProxySection_IrrGroup::ckDelegatedPtr(); } - CkGroupID ckDelegatedIdx(void) const - { return CProxySection_IrrGroup::ckDelegatedIdx(); } -inline void ckCheck(void) const {CProxySection_IrrGroup::ckCheck();} -CkChareID ckGetChareID(void) const - {return CProxySection_IrrGroup::ckGetChareID();} -CkGroupID ckGetGroupID(void) const - {return CProxySection_IrrGroup::ckGetGroupID();} -operator CkGroupID () const { return ckGetGroupID(); } - - inline void setReductionClient(CkReductionClientFn fn,void *param=NULL) const - { CProxySection_IrrGroup::setReductionClient(fn,param); } - inline void ckSetReductionClient(CkReductionClientFn fn,void *param=NULL) const - { CProxySection_IrrGroup::ckSetReductionClient(fn,param); } - inline void ckSetReductionClient(CkCallback *cb) const - { CProxySection_IrrGroup::ckSetReductionClient(cb); } -inline int ckGetNumSections() const -{ return CProxySection_IrrGroup::ckGetNumSections(); } -inline CkSectionInfo &ckGetSectionInfo() -{ return CProxySection_IrrGroup::ckGetSectionInfo(); } -inline CkSectionID *ckGetSectionIDs() -{ return CProxySection_IrrGroup::ckGetSectionIDs(); } -inline CkSectionID &ckGetSectionID() -{ return CProxySection_IrrGroup::ckGetSectionID(); } -inline CkSectionID &ckGetSectionID(int i) -{ return CProxySection_IrrGroup::ckGetSectionID(i); } -inline CkGroupID ckGetGroupIDn(int i) const -{ return CProxySection_IrrGroup::ckGetGroupIDn(i); } -inline const int *ckGetElements() const -{ return CProxySection_IrrGroup::ckGetElements(); } -inline const int *ckGetElements(int i) const -{ return CProxySection_IrrGroup::ckGetElements(i); } -inline int ckGetNumElements() const -{ return CProxySection_IrrGroup::ckGetNumElements(); } -inline int ckGetNumElements(int i) const -{ return CProxySection_IrrGroup::ckGetNumElements(i); } - - void ckDelegate(CkDelegateMgr *dTo,CkDelegateData *dPtr=NULL) - { CProxySection_IrrGroup::ckDelegate(dTo,dPtr); } - void ckUndelegate(void) - { CProxySection_IrrGroup::ckUndelegate(); } - void pup(PUP::er &p) - { CProxySection_IrrGroup::pup(p); - } - void ckSetGroupID(CkGroupID g) { - CProxySection_IrrGroup::ckSetGroupID(g); - } - ContextGroup* ckLocalBranch(void) const { - return ckLocalBranch(ckGetGroupID()); - } - static ContextGroup* ckLocalBranch(CkGroupID gID) { - return (ContextGroup*)CkLocalBranch(gID); - } -/* DECLS: ContextGroup(); - */ - - -/* DECLS: void top_level_task(); - */ - - void top_level_task(const CkEntryOptions *impl_e_opts=NULL); - -}; -#define ContextGroup_SDAG_CODE -typedef CBaseT1CBase_ContextGroup; - -} // namespace charm - -} // namespace run - -} // namespace flecsi - -namespace flecsi { -namespace run { -namespace charm { -/* ---------------- method closures -------------- */ -class Closure_ContextGroup { - public: - - - struct top_level_task_2_closure; - -}; - -} // namespace charm - -} // namespace run - -} // namespace flecsi - -extern void _registercontext(void); -extern "C" void CkRegisterMainModule(void); -#endif diff --git a/flecsi/run/charm/context.def.h b/flecsi/run/charm/context.def.h deleted file mode 100644 index cc9ba6562..000000000 --- a/flecsi/run/charm/context.def.h +++ /dev/null @@ -1,208 +0,0 @@ -namespace flecsi { -namespace run { -namespace charm { -/* ---------------- method closures -------------- */ -#ifndef CK_TEMPLATES_ONLY -#endif /* CK_TEMPLATES_ONLY */ - -#ifndef CK_TEMPLATES_ONLY - - struct Closure_ContextGroup::top_level_task_2_closure : public SDAG::Closure { - - - top_level_task_2_closure() { - init(); - } - top_level_task_2_closure(CkMigrateMessage*) { - init(); - } - void pup(PUP::er& __p) { - packClosure(__p); - } - virtual ~top_level_task_2_closure() { - } - PUPable_decl(SINGLE_ARG(top_level_task_2_closure)); - }; -#endif /* CK_TEMPLATES_ONLY */ - - -} // namespace charm - -} // namespace run - -} // namespace flecsi - -namespace flecsi { -namespace run { -namespace charm { -/* DEFS: group ContextGroup: IrrGroup{ -ContextGroup(); -void top_level_task(); -}; - */ -#ifndef CK_TEMPLATES_ONLY - int CkIndex_ContextGroup::__idx=0; -#endif /* CK_TEMPLATES_ONLY */ -#ifndef CK_TEMPLATES_ONLY -#endif /* CK_TEMPLATES_ONLY */ -#ifndef CK_TEMPLATES_ONLY -/* DEFS: ContextGroup(); - */ -#endif /* CK_TEMPLATES_ONLY */ - -#ifndef CK_TEMPLATES_ONLY -/* DEFS: void top_level_task(); - */ -void CProxyElement_ContextGroup::top_level_task(const CkEntryOptions *impl_e_opts) -{ - ckCheck(); - void *impl_msg = CkAllocSysMsg(impl_e_opts); - if (ckIsDelegated()) { - CkGroupMsgPrep(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID()); - ckDelegatedTo()->GroupSend(ckDelegatedPtr(),CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupPe(), ckGetGroupID()); - } else { - CkSendMsgBranch(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupPe(), ckGetGroupID(),0); - } -} -#endif /* CK_TEMPLATES_ONLY */ - -#ifndef CK_TEMPLATES_ONLY -/* DEFS: ContextGroup(); - */ -CkGroupID CProxy_ContextGroup::ckNew(const CkEntryOptions *impl_e_opts) -{ - void *impl_msg = CkAllocSysMsg(impl_e_opts); - UsrToEnv(impl_msg)->setMsgtype(BocInitMsg); - CkGroupID gId = CkCreateGroup(CkIndex_ContextGroup::__idx, CkIndex_ContextGroup::idx_ContextGroup_void(), impl_msg); - return gId; -} - -// Entry point registration function -int CkIndex_ContextGroup::reg_ContextGroup_void() { - int epidx = CkRegisterEp("ContextGroup()", - reinterpret_cast(_call_ContextGroup_void), 0, __idx, 0); - return epidx; -} - -void CkIndex_ContextGroup::_call_ContextGroup_void(void* impl_msg, void* impl_obj_void) -{ - ContextGroup* impl_obj = static_cast(impl_obj_void); - new (impl_obj_void) ContextGroup(); - if(UsrToEnv(impl_msg)->isVarSysMsg() == 0) - CkFreeSysMsg(impl_msg); -} -#endif /* CK_TEMPLATES_ONLY */ - -#ifndef CK_TEMPLATES_ONLY -/* DEFS: void top_level_task(); - */ -void CProxy_ContextGroup::top_level_task(const CkEntryOptions *impl_e_opts) -{ - ckCheck(); - void *impl_msg = CkAllocSysMsg(impl_e_opts); - if (ckIsDelegated()) { - CkGroupMsgPrep(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID()); - ckDelegatedTo()->GroupBroadcast(ckDelegatedPtr(),CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID()); - } else CkBroadcastMsgBranch(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID(),0); -} -void CProxy_ContextGroup::top_level_task(int npes, int *pes, const CkEntryOptions *impl_e_opts) { - void *impl_msg = CkAllocSysMsg(impl_e_opts); - CkSendMsgBranchMulti(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID(), npes, pes,0); -} -void CProxy_ContextGroup::top_level_task(CmiGroup &grp, const CkEntryOptions *impl_e_opts) { - void *impl_msg = CkAllocSysMsg(impl_e_opts); - CkSendMsgBranchGroup(CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetGroupID(), grp,0); -} - -// Entry point registration function -int CkIndex_ContextGroup::reg_top_level_task_void() { - int epidx = CkRegisterEp("top_level_task()", - reinterpret_cast(_call_top_level_task_void), 0, __idx, 0); - return epidx; -} - -void CkIndex_ContextGroup::_call_top_level_task_void(void* impl_msg, void* impl_obj_void) -{ - ContextGroup* impl_obj = static_cast(impl_obj_void); - impl_obj->top_level_task(); - if(UsrToEnv(impl_msg)->isVarSysMsg() == 0) - CkFreeSysMsg(impl_msg); -} -PUPable_def(SINGLE_ARG(Closure_ContextGroup::top_level_task_2_closure)) -#endif /* CK_TEMPLATES_ONLY */ - -#ifndef CK_TEMPLATES_ONLY -/* DEFS: ContextGroup(); - */ -#endif /* CK_TEMPLATES_ONLY */ - -#ifndef CK_TEMPLATES_ONLY -/* DEFS: void top_level_task(); - */ -void CProxySection_ContextGroup::top_level_task(const CkEntryOptions *impl_e_opts) -{ - ckCheck(); - void *impl_msg = CkAllocSysMsg(impl_e_opts); - if (ckIsDelegated()) { - ckDelegatedTo()->GroupSectionSend(ckDelegatedPtr(),CkIndex_ContextGroup::idx_top_level_task_void(), impl_msg, ckGetNumSections(), ckGetSectionIDs()); - } else { - void *impl_msg_tmp; - for (int i=0; i -void flecsi::run::charm::CBase_ContextGroup::virtual_pup(PUP::er &p) { - recursive_pup(dynamic_cast(this), p); -} -#endif /* CK_TEMPLATES_ONLY */ From afafe8005e1daf619b38f8d372ec2424c16d6f8d Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Fri, 14 Aug 2020 13:44:36 -0400 Subject: [PATCH 15/19] Bring up to date with devel API changes --- CMakeLists.txt | 8 +- flecsi/data.hh | 12 +- flecsi/data/charm/policy.hh | 179 ++++------------------------ flecsi/exec/CMakeLists.txt | 4 +- flecsi/exec/backend.hh | 2 +- flecsi/exec/charm/bind_accessors.hh | 4 +- flecsi/exec/charm/future.hh | 84 +++++++------ flecsi/exec/charm/policy.hh | 106 +++++++++++----- flecsi/exec/charm/task_prologue.hh | 16 +-- flecsi/exec/charm/task_wrapper.hh | 27 ++++- 10 files changed, 191 insertions(+), 251 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f11df1217..5cbf84cd0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,7 +34,7 @@ set(CMAKE_CXX_STANDARD 17) #------------------------------------------------------------------------------# if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.4") + if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0") message(FATAL_ERROR "Version 9.0 of gnu compilers required!") endif() endif() @@ -266,7 +266,7 @@ if(NOT FORMAT_ONLY) message (FATAL_ERROR "MPI is required for the mpi runtime model") endif() - set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/exec/mpi) + set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/execution/mpi) set(FLECSI_RUNTIME_LIBRARIES ${DL_LIBS} ${MPI_LIBRARIES}) @@ -286,7 +286,7 @@ if(NOT FORMAT_ONLY) set(FLECSI_RUNTIME_LIBRARIES ${DL_LIBS} ${MPI_LIBRARIES}) - set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/exec/hpx) + set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/execution/hpx) #----------------------------------------------------------------------------# # Charm interface @@ -297,7 +297,7 @@ if(NOT FORMAT_ONLY) message (FATAL_ERROR "MPI is required for the charm runtime model") endif() - set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/exec/charm) + set(_runtime_path ${PROJECT_SOURCE_DIR}/flecsi/execution/charm) set(FLECSI_RUNTIME_LIBRARIES ${DL_LIBS} ${MPI_LIBRARIES}) diff --git a/flecsi/data.hh b/flecsi/data.hh index ba76c32c9..80bd92b69 100644 --- a/flecsi/data.hh +++ b/flecsi/data.hh @@ -55,29 +55,29 @@ namespace detail { struct data_guard { struct global_guard { global_guard() { - //global_topology.allocate({}); + global_topology.allocate({}); } global_guard(global_guard &&) = delete; ~global_guard() { - //global_topology.deallocate(); + global_topology.deallocate(); } } g; struct color_guard { color_guard() { - //process_coloring.allocate(run::context::instance().processes()); + process_coloring.allocate(run::context::instance().processes()); } color_guard(color_guard &&) = delete; ~color_guard() { - //process_coloring.deallocate(); + process_coloring.deallocate(); } } c; struct process_guard { process_guard() { - //process_topology.allocate(process_coloring.get()); + process_topology.allocate(process_coloring.get()); } process_guard(process_guard &&) = delete; ~process_guard() { - //process_topology.deallocate(); + process_topology.deallocate(); } } p; }; diff --git a/flecsi/data/charm/policy.hh b/flecsi/data/charm/policy.hh index 21fa24439..d22ba7459 100644 --- a/flecsi/data/charm/policy.hh +++ b/flecsi/data/charm/policy.hh @@ -15,188 +15,55 @@ /*! @file */ -#include -#include -#include -#include -#include -#include -#include -#include +#if !defined(__FLECSI_PRIVATE__) +#error Do not include this file directly! +#endif #if !defined(FLECSI_ENABLE_CHARM) #error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! #endif -flog_register_tag(topologies); +#include "flecsi/run/backend.hh" +#include "flecsi/topo/core.hh" // single_space namespace flecsi { namespace data { - -template -struct topology_id { - // NB: C-style cast supports private inheritance - topology_id() : id(runtime::context_t::instance().record(*(C *)this)) {} - topology_id(const topology_id &) : topology_id() {} - ~topology_id() { - runtime::context_t::instance().forget(id); - namespace charm { -#if 0 -inline auto & -run() { - return *Legion::Runtime::get_runtime(); -} -inline auto -ctx() { - return Legion::Runtime::get_context(); -} - -template -struct unique_handle { - unique_handle() = default; - unique_handle(T t) : h(t) {} - unique_handle(unique_handle && u) noexcept : h(std::exchange(u.h, {})) {} - ~unique_handle() { - if(*this) // it's not clear whether empty handles can be deleted - (run().*D)(ctx(), h, false); - } - topology_id & operator=(const topology_id &) noexcept { - return *this; - } - - std::size_t id; -}; - -/*----------------------------------------------------------------------------* - Index Topology. - *----------------------------------------------------------------------------*/ - -inline topology_data::topology_data( - const type::coloring & coloring) - : topology_base(Legion::Domain::from_rect<1>( - LegionRuntime::Arrays::Rect<1>(0, coloring.size() - 1))), - colors(coloring.size()) { - - auto legion_runtime = Legion::Runtime::get_runtime(); - auto legion_context = Legion::Runtime::get_context(); - auto & flecsi_context = runtime::context_t::instance(); - - auto & field_info_store = flecsi_context.get_field_info_store( - topology::id(), storage_label_t::dense); - - Legion::FieldAllocator allocator = - legion_runtime->create_field_allocator(legion_context, field_space); - - for(auto const & fi : field_info_store) { - allocator.allocate_field(fi->type_size, fi->fid); - } // for - - allocate(); - - Legion::IndexPartition index_partition = - legion_runtime->create_equal_partition( - legion_context, index_space, index_space); - - color_partition = legion_runtime->get_logical_partition( - legion_context, logical_region, index_partition); -} - -/*----------------------------------------------------------------------------* - Unstructured Mesh Topology. - *----------------------------------------------------------------------------*/ - -inline topology_data::topology_data( - const type::coloring & coloring) { - (void)coloring; -} - -// NOTE THAT THE HANDLE TYPE FOR THIS TYPE WILL NEED TO CAPTURE THE -// UNDERLYING TOPOLOGY TYPE, i.e., topology::mesh_t -using unique_index_space = - unique_handle; -// Legion seems to be buggy with destroying partitions: -using unique_index_partition = Legion::IndexPartition; -using unique_field_space = - unique_handle; -using unique_logical_region = unique_handle; -using unique_logical_partition = Legion::LogicalPartition; - -inline unique_index_space -index1(std::size_t n) { - return run().create_index_space(ctx(), Legion::Rect<1>(0, n - 1)); -} -#endif - struct region { - region(std::size_t n, const fields & fs) {} -#if 0 - : index_space(index1(n)), - field_space([&fs] { // TIP: IIFE (q.v.) allows statements here - auto & r = run(); - const auto c = ctx(); - unique_field_space ret = r.create_field_space(c); - Legion::FieldAllocator allocator = r.create_field_allocator(c, ret); - for(auto const & fi : fs) - allocator.allocate_field(fi->type_size, fi->fid); - return ret; - }()), - logical_region( - run().create_logical_region(ctx(), index_space, field_space)) {} + region(size2 s, const fields & fs) : s_(s) {} + size2 size() const { return s_; } - unique_index_space index_space; - unique_field_space field_space; - unique_logical_region logical_region; -#endif + size2 s_; }; struct partition { - // TODO: support create_partition_by_image_range case - template + using row = std::size_t; + static row make_row(std::size_t i, std::size_t n) { + return i; + } + static std::size_t row_size(const row& r) { + return 0; + } + partition(const region & reg) {} partition(const region & reg, - std::size_t n, - F f, - disjointness dis = {}, - completeness cpt = {}) {} -#if 0 - : color_space(index1(n)), - index_partition(run().create_partition_by_domain( - ctx(), - reg.index_space, - [&] { - std::map ret; - for(std::size_t i = 0; i < n; ++i) { - // NB: reg.index_space is assumed to be one-dimensional. - const auto [b, e] = f(i); - ret.try_emplace(i, Legion::Rect<1>(b, e - 1)); - } - return ret; - }(), - color_space, - true, - Legion::PartitionKind((dis + 2) % 3 + (cpt + 3) % 3 * 3))), - logical_partition(run().get_logical_partition(ctx(), - reg.logical_region, - index_partition)) {} -#endif + const partition & src, + field_id_t fid, + completeness cpt = incomplete) {} std::size_t colors() const { //return run().get_index_space_domain(color_space).get_volume(); return 1; } -#if 0 - unique_index_space color_space; - unique_index_partition index_partition; - unique_logical_partition logical_partition; -#endif - template const partition & get_partition() const { return *this; } + + void + update(const partition& src, field_id_t fid, completeness cpt = incomplete) { + } }; } // namespace charm diff --git a/flecsi/exec/CMakeLists.txt b/flecsi/exec/CMakeLists.txt index 815f0e708..04be86b1a 100644 --- a/flecsi/exec/CMakeLists.txt +++ b/flecsi/exec/CMakeLists.txt @@ -50,8 +50,8 @@ elseif(FLECSI_RUNTIME_MODEL STREQUAL "hpx") elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") - set(execution_HEADERS - ${execution_HEADERS} + set(exec_HEADERS + ${exec_HEADERS} charm/bind_accessors.hh charm/task_wrapper.hh charm/unbind_accessors.hh diff --git a/flecsi/exec/backend.hh b/flecsi/exec/backend.hh index c08a87c18..c176566da 100644 --- a/flecsi/exec/backend.hh +++ b/flecsi/exec/backend.hh @@ -67,6 +67,6 @@ auto execute(ARGS &&...); #elif FLECSI_RUNTIME_MODEL == FLECSI_RUNTIME_MODEL_charm -#include +#include #endif // FLECSI_RUNTIME_MODEL diff --git a/flecsi/exec/charm/bind_accessors.hh b/flecsi/exec/charm/bind_accessors.hh index 50883875a..dc666b23c 100644 --- a/flecsi/exec/charm/bind_accessors.hh +++ b/flecsi/exec/charm/bind_accessors.hh @@ -63,7 +63,7 @@ struct bind_accessors_t : public util::tuple_walker { flog_assert(buf_.size() % sizeof(DATA_TYPE) == 0, "Bad buffer size\n"); auto & flecsi_context = run::context::instance(); DATA_TYPE* d = (DATA_TYPE*)flecsi_context.getField(accessor.identifier()); - bind(accessor, 1, d); + bind(accessor, {d, 1}); } template @@ -80,7 +80,7 @@ struct bind_accessors_t : public util::tuple_walker { Futures *--------------------------------------------------------------------------*/ template - void visit(exec::flecsi_future & future) { + void visit(future & future) { CkAbort("Futures not yet supported\n"); } diff --git a/flecsi/exec/charm/future.hh b/flecsi/exec/charm/future.hh index ea46d44cc..23a9ae002 100644 --- a/flecsi/exec/charm/future.hh +++ b/flecsi/exec/charm/future.hh @@ -33,75 +33,85 @@ #include namespace flecsi { -namespace exec { -/*! - Base charm future type. +template +struct future { - @tparam Return The return type of the task. - @tparam Launch FleCSI launch type: single/index. + /*! + Wait on a task result. + */ + void wait(bool silence_warnings = false) const { + } // wait + + /*! + Get a task result. + */ + Return get(bool silence_warnings = false) const { + //return return_; + return return_; + } // get - @ingroup legion-execution -*/ -template -struct charm_future; + Return return_; +}; // struct future -template -struct charm_future { - charm_future(Return r) : return_(r) {} +template<> +struct future { /*! Wait on a task result. */ - void wait(bool silence_warnings = false) { - //charm_future_.wait_all_results(silence_warnings); + void wait(bool silence_warnings = false) const { } // wait /*! Get a task result. */ + void get(bool silence_warnings = false) const { + return; + } // get +}; // struct future + +template +struct future { + + /*! + Wait on a task result. + */ + void wait() const { + } // wait - Return get(size_t index = 0, bool silence_warnings = false) { + /*! + Get a task result. + */ + Return get(std::size_t index = 0, bool silence_warnings = false) const { return return_; } // get - Return return_; -}; // struct charm_future - -/*! Partial specialization for the charm future + std::size_t size() const { return 1; } - @tparam Return The return type of the task. + Return return_; +}; // future - @ingroup legion-execution - */ -template -struct charm_future { +template <> +struct future { /*! Wait on a task result. */ - void wait() { - //charm_future_.wait(); + void wait() const { } // wait /*! Get a task result. */ - void get(bool silence_warnings = false) { + void get(std::size_t index = 0, bool silence_warnings = false) const { return; } // get -}; // charm_future - -//----------------------------------------------------------------------- + std::size_t size() const { return 1; } -template -using flecsi_future = charm_future; +}; // future -template -constexpr bool is_index_future = false; -template -constexpr bool is_index_future> = true; +//----------------------------------------------------------------------- -} // namespace exec } // namespace flecsi diff --git a/flecsi/exec/charm/policy.hh b/flecsi/exec/charm/policy.hh index 81588ffff..821ef2877 100644 --- a/flecsi/exec/charm/policy.hh +++ b/flecsi/exec/charm/policy.hh @@ -69,26 +69,34 @@ auto serial_arguments(std::tuple * /* to deduce PP */, AA &&... aa) { static_assert((std::is_const_v> && ...), "Tasks cannot accept non-const references"); - return util::serial_put(std::tuple &, nonconst_ref_t>, - const PP &, - std::decay_t>...>(std::forward(aa)...)); + return util::serial_put &, nonconst_ref_t>, + const PP &, + std::decay_t>...>>( + {exec::replace_argument(std::forward(aa))...}); } -} // namespace detail -} // namespace exec +// Helper to deduce PP: +template +void +mpi_arguments(std::optional> & opt, AA &&... aa) { + opt.emplace(exec::replace_argument(std::forward(aa))...); +} -template -decltype(auto) -reduce(ARGS &&... args) { - using namespace exec; +template +struct tuple_prepend; +template +struct tuple_prepend> { + using type = std::tuple; +}; +} // namespace detail + +template +auto +reduce_internal(Args &&... args) { using traits_t = util::function_traits; - using RETURN = typename traits_t::return_type; + using return_t = typename traits_t::return_type; using param_tuple = typename traits_t::arguments_type; // This will guard the entire method @@ -98,14 +106,21 @@ reduce(ARGS &&... args) { auto & flecsi_context = run::context::instance(); // Get the processor type. - constexpr auto processor_type = mask_to_processor_type(ATTRIBUTES); - - size_t domain_size = LAUNCH_DOMAIN.size(); - domain_size = domain_size == 0 ? flecsi_context.processes() : domain_size; + constexpr auto processor_type = mask_to_processor_type(Attributes); - ++flecsi_context.tasks_executed(); + const auto domain_size = [&args..., &flecsi_context] { + if constexpr(processor_type == task_processor_type_t::mpi) { + return launch_size< + typename detail::tuple_prepend::type>( + launch_domain{flecsi_context.processes()}, args...); + } + else { + (void)flecsi_context; + return launch_size(args...); + } + }(); - charm::task_prologue_t pro(domain_size); + charm::task_prologue_t pro; pro.walk(args...); std::optional mpi_args; @@ -114,13 +129,13 @@ reduce(ARGS &&... args) { // MPI tasks must be invoked collectively from one task on each rank. // We therefore can transmit merely a pointer to a tuple of the arguments. // util::serial_put deliberately doesn't support this, so just memcpy it. - mpi_args.emplace(std::forward(args)...); + detail::mpi_arguments(mpi_args, std::forward(args)...); const auto p = &*mpi_args; buf.resize(sizeof p); std::memcpy(buf.data(), &p, sizeof p); } else { buf = detail::serial_arguments( - static_cast(nullptr), std::forward(args)...); + static_cast(nullptr), std::forward(args)...); } //------------------------------------------------------------------------// @@ -129,26 +144,53 @@ reduce(ARGS &&... args) { using wrap = charm::task_wrapper; const auto task = charm::task_id(wrap::LegionProcessor)>; + (Attributes & ~mpi) | 1 << static_cast(wrap::LegionProcessor)>; // TODO: Right now we just execute tasks inline which doesn't expose any // paralellism. Tasks should be converted to entry methods in charm or // something similar, ie charm tasks. - if constexpr(LAUNCH_DOMAIN == single) { - if constexpr(std::is_same_v) { + if constexpr(std::is_same_v) { + future f; + if constexpr(std::is_same_v) { flecsi_context.execute(buf); - return charm_future(); } else { - return charm_future(flecsi_context.execute(buf)); + f.return_ = flecsi_context.execute(buf); } + return f; } else { - if constexpr(std::is_same_v) { + future f; + if constexpr(std::is_same_v) { flecsi_context.execute(buf); - return charm_future(); } else { - return charm_future(flecsi_context.execute(buf)); + f.return_ = flecsi_context.execute(buf); } + return f; } -} // execute_task +} // reduce_internal + +} // namespace exec + +template +auto +reduce(Args &&... args) { + using namespace exec; + + // This will guard the entire method + log::devel_guard guard(execution_tag); + + // Get the FleCSI runtime context + auto & flecsi_context = run::context::instance(); + std::size_t & tasks_executed = flecsi_context.tasks_executed(); + ++tasks_executed; +#if defined(FLECSI_ENABLE_FLOG) + /*if(tasks_executed % FLOG_SERIALIZATION_INTERVAL == 0 && + reduce_internal, flecsi::mpi>() + .get() > FLOG_SERIALIZATION_THRESHOLD) + reduce_internal();*/ +#endif + + return reduce_internal( + std::forward(args)...); +} } // namespace flecsi diff --git a/flecsi/exec/charm/task_prologue.hh b/flecsi/exec/charm/task_prologue.hh index d54c0676a..62e15529f 100644 --- a/flecsi/exec/charm/task_prologue.hh +++ b/flecsi/exec/charm/task_prologue.hh @@ -62,7 +62,7 @@ struct task_prologue_t { @param context The Legion task runtime context. */ - task_prologue_t(const size_t & domain) {} + task_prologue_t() {} template void walk(const AA &... aa) { @@ -77,7 +77,7 @@ struct task_prologue_t { template Space> + typename Topo::index_space Space> void visit(data::accessor * null_p, const data::field_reference & ref) { visit(get_null_base(null_p), ref.template cast()); @@ -100,8 +100,8 @@ struct task_prologue_t { template Space, - class = std::enable_if_t == 1>> + typename Topo::index_space Space, + class = std::enable_if_t == 1>> void visit( data::accessor * /* parameter */, const data::field_reference & ref) { @@ -122,15 +122,15 @@ struct task_prologue_t { Futures *--------------------------------------------------------------------------*/ template - void visit(exec::flecsi_future *, - const exec::charm_future & + void visit(future *, + const future & future) { CkAbort("Futures not yet supported\n"); } template - void visit(exec::flecsi_future *, - const exec::charm_future & future) { + void visit(future *, + const future & future) { CkAbort("Futures not yet supported\n"); } diff --git a/flecsi/exec/charm/task_wrapper.hh b/flecsi/exec/charm/task_wrapper.hh index deb872e52..fc7853cd0 100644 --- a/flecsi/exec/charm/task_wrapper.hh +++ b/flecsi/exec/charm/task_wrapper.hh @@ -66,11 +66,32 @@ struct util::serial_convert> { return b; } }; -// NB: topology_accessor is trivially copyable. +template +struct util::serial, + std::enable_if_t>>> { + using type = data::topology_accessor; + template + static void put(P &, const type &) {} + static type get(const std::byte *&) { + return type(); + } +}; + +template +struct util::serial_convert> { + using type = exec::partial; + using Rep = typename type::Base; + static const Rep & put(const type & p) { + return p; + } + static type get(const Rep & t) { + return t; + } +}; template -struct util::serial_convert> { - using type = exec::flecsi_future; +struct util::serial_convert> { + using type = future; struct Rep {}; static Rep put(const type &) { return {}; From ffe7f7001b8b306c6b1593c499b734095d958cd6 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Fri, 14 Aug 2020 15:57:33 -0400 Subject: [PATCH 16/19] Minor cleanup and commenting --- flecsi/data/charm/policy.hh | 3 +- flecsi/exec/CMakeLists.txt | 2 - flecsi/exec/charm/bind_accessors.hh | 14 +- flecsi/exec/charm/future.hh | 9 +- flecsi/exec/charm/policy.hh | 3 +- flecsi/exec/charm/reduction_wrapper.hh | 64 ----- flecsi/exec/charm/task_prologue.hh | 15 +- flecsi/exec/charm/task_wrapper.hh | 44 +--- flecsi/exec/charm/unbind_accessors.hh | 68 ------ flecsi/run/charm/context.cc | 7 - flecsi/run/charm/context.hh | 9 +- flecsi/run/charm/mapper.hh | 316 ------------------------- 12 files changed, 33 insertions(+), 521 deletions(-) delete mode 100644 flecsi/exec/charm/reduction_wrapper.hh delete mode 100644 flecsi/exec/charm/unbind_accessors.hh delete mode 100644 flecsi/run/charm/mapper.hh diff --git a/flecsi/data/charm/policy.hh b/flecsi/data/charm/policy.hh index d22ba7459..8efee5db3 100644 --- a/flecsi/data/charm/policy.hh +++ b/flecsi/data/charm/policy.hh @@ -30,6 +30,8 @@ namespace flecsi { namespace data { namespace charm { +// TODO: These are just placeholder definitions for region and partition +// while the topo interface for Flecsi is still in flux. struct region { region(size2 s, const fields & fs) : s_(s) {} size2 size() const { return s_; } @@ -52,7 +54,6 @@ struct partition { completeness cpt = incomplete) {} std::size_t colors() const { - //return run().get_index_space_domain(color_space).get_volume(); return 1; } diff --git a/flecsi/exec/CMakeLists.txt b/flecsi/exec/CMakeLists.txt index 04be86b1a..e4bfc9fe2 100644 --- a/flecsi/exec/CMakeLists.txt +++ b/flecsi/exec/CMakeLists.txt @@ -54,11 +54,9 @@ elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") ${exec_HEADERS} charm/bind_accessors.hh charm/task_wrapper.hh - charm/unbind_accessors.hh charm/policy.hh charm/future.hh charm/task_prologue.hh - charm/reduction_wrapper.hh ) endif() diff --git a/flecsi/exec/charm/bind_accessors.hh b/flecsi/exec/charm/bind_accessors.hh index dc666b23c..9ff3bdf36 100644 --- a/flecsi/exec/charm/bind_accessors.hh +++ b/flecsi/exec/charm/bind_accessors.hh @@ -41,9 +41,9 @@ namespace exec::charm { /*! The bind_accessors_t type is called to walk the user task arguments inside of - an executing legion task to properly complete the users accessors, i.e., by - pointing the accessor \em view instances to the appropriate legion-mapped - buffers. + an executing task to properly complete the users accessors, i.e., by + pointing the accessor \em view instances to the appropriate data as managed by + the Charm++ backend. */ struct bind_accessors_t : public util::tuple_walker { @@ -51,13 +51,17 @@ struct bind_accessors_t : public util::tuple_walker { /*! Construct an bind_accessors_t instance. - @param legion_runtime The Legion task runtime. - @param legion_context The Legion task runtime context. + @param buf the buffer containing serialized arguments. + TODO: buf may not even be needed anymore */ bind_accessors_t(std::vector& buf) : buf_(buf) {} + /*! + Get field data from the Charm++ backend context and bind it to the accessor + TODO: For now this assumes a size of one + */ template void visit(data::accessor & accessor) { flog_assert(buf_.size() % sizeof(DATA_TYPE) == 0, "Bad buffer size\n"); diff --git a/flecsi/exec/charm/future.hh b/flecsi/exec/charm/future.hh index 23a9ae002..d3b86f0d3 100644 --- a/flecsi/exec/charm/future.hh +++ b/flecsi/exec/charm/future.hh @@ -34,6 +34,10 @@ namespace flecsi { +// TODO: These are all skeletal future implementations just to allow +// for compilation and test execution. Once asynchrony is introduced +// these should be setup to wrap Charm++ futures (similar to how the +// legion backend wraps legion futures). template struct future { @@ -47,7 +51,6 @@ struct future { Get a task result. */ Return get(bool silence_warnings = false) const { - //return return_; return return_; } // get @@ -90,7 +93,7 @@ struct future { std::size_t size() const { return 1; } Return return_; -}; // future +}; // struct future template <> struct future { @@ -110,7 +113,7 @@ struct future { std::size_t size() const { return 1; } -}; // future +}; // struct future //----------------------------------------------------------------------- diff --git a/flecsi/exec/charm/policy.hh b/flecsi/exec/charm/policy.hh index 821ef2877..45be2fe0b 100644 --- a/flecsi/exec/charm/policy.hh +++ b/flecsi/exec/charm/policy.hh @@ -23,7 +23,6 @@ #include "flecsi/exec/launch.hh" #include "flecsi/exec/charm/future.hh" -#include "flecsi/exec/charm/reduction_wrapper.hh" #include "flecsi/exec/charm/task_prologue.hh" #include "flecsi/exec/charm/task_wrapper.hh" #include "flecsi/run/backend.hh" @@ -144,7 +143,7 @@ reduce_internal(Args &&... args) { using wrap = charm::task_wrapper; const auto task = charm::task_id(wrap::LegionProcessor)>; + (Attributes & ~mpi) | 1 << static_cast(wrap::CharmProcessor)>; // TODO: Right now we just execute tasks inline which doesn't expose any // paralellism. Tasks should be converted to entry methods in charm or diff --git a/flecsi/exec/charm/reduction_wrapper.hh b/flecsi/exec/charm/reduction_wrapper.hh deleted file mode 100644 index dd44e1397..000000000 --- a/flecsi/exec/charm/reduction_wrapper.hh +++ /dev/null @@ -1,64 +0,0 @@ -/* - @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ - /@@///// /@@ @@////@@ @@////// /@@ - /@@ /@@ @@@@@ @@ // /@@ /@@ - /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ - /@@//// /@@/@@@@@@@/@@ ////////@@/@@ - /@@ /@@/@@//// //@@ @@ /@@/@@ - /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ - // /// ////// ////// //////// // - - Copyright (c) 2016, Triad National Security, LLC - All rights reserved. - */ -#pragma once - -/*! @file */ - -#if !defined(__FLECSI_PRIVATE__) -#error Do not include this file directly! -#endif - -#include "flecsi/exec/fold.hh" -#include "flecsi/run/backend.hh" -#include "flecsi/util/demangle.hh" -#include - -namespace flecsi { - -inline log::devel_tag reduction_wrapper_tag("reduction_wrapper"); - -namespace exec { - -namespace detail { -/*! - Register the user-defined reduction operator with the runtime. -*/ - -template -void register_reduction(); - -//inline Legion::ReductionOpID reduction_id; -} // namespace detail - -// NB: 0 is reserved by Legion. -//template -//inline const Legion::ReductionOpID reduction_op = -// (run::context::instance().register_init(detail::register_reduction), -// ++detail::reduction_id); - -template -void -detail::register_reduction() { - { - log::devel_guard guard(reduction_wrapper_tag); - flog_devel(info) << "registering reduction operation " << util::type() - << std::endl; - } - - // Register the operation with the Legion runtime - //Legion::Runtime::register_reduction_op(reduction_op); -} - -} // namespace exec -} // namespace flecsi diff --git a/flecsi/exec/charm/task_prologue.hh b/flecsi/exec/charm/task_prologue.hh index 62e15529f..380f6262e 100644 --- a/flecsi/exec/charm/task_prologue.hh +++ b/flecsi/exec/charm/task_prologue.hh @@ -30,7 +30,6 @@ #include "flecsi/topo/ntree/interface.hh" #include "flecsi/topo/set/interface.hh" #include "flecsi/topo/structured/interface.hh" -//#include "flecsi/topo/unstructured/interface.hh" #include "flecsi/util/demangle.hh" #include "flecsi/util/tuple_walker.hh" @@ -46,24 +45,14 @@ namespace exec::charm { /*! The task_prologue_t type can be called to walk task args before the - task launcher is created. This allows us to gather region requirements - and to set state on the associated data handles \em before Legion gets - the task arguments tuple. + task launcher is created. For now this is used to register data with + the Charm++ backend. @ingroup execution */ struct task_prologue_t { - /*! - Construct an task_prologue_t instance. - - @param runtime The Legion task runtime. - @param context The Legion task runtime context. - */ - - task_prologue_t() {} - template void walk(const AA &... aa) { walk(static_cast

(nullptr), aa...); diff --git a/flecsi/exec/charm/task_wrapper.hh b/flecsi/exec/charm/task_wrapper.hh index fc7853cd0..e33b7af6c 100644 --- a/flecsi/exec/charm/task_wrapper.hh +++ b/flecsi/exec/charm/task_wrapper.hh @@ -23,13 +23,11 @@ #include "flecsi/exec/charm/bind_accessors.hh" #include "flecsi/exec/charm/future.hh" -#include "flecsi/exec/charm/unbind_accessors.hh" #include "flecsi/exec/task_attributes.hh" #include "flecsi/run/backend.hh" #include "flecsi/util/common.hh" #include "flecsi/util/function_traits.hh" #include "flecsi/util/serialize.hh" -#include "unbind_accessors.hh" #include #if !defined(FLECSI_ENABLE_CHARM) @@ -107,13 +105,12 @@ using run::charm::task; namespace detail { inline task_id_t last_task; // 0 is the top-level task /*! - Register a task with Legion. + Register a task with the Charm backend. @tparam RETURN The return type of the task. - @tparam TASK The legion task. + @tparam TASK The task. \tparam A task attributes - @ingroup legion-execution */ template * TASK, std::size_t A> @@ -144,7 +141,7 @@ tuple_get(const std::vector& buf) { /*! Arbitrary index for each task. - @tparam F Legion task function. + @tparam F Task function. @tparam ATTRIBUTES A size_t holding the mask of the task attributes mask \ref task_attributes_mask_t. */ @@ -163,10 +160,9 @@ void detail::register_task() { constexpr auto processor_type = mask_to_processor_type(A); static_assert(processor_type != task_processor_type_t::mpi, - "Legion tasks cannot use MPI"); + "Charm tasks cannot use MPI"); const std::string name = util::symbol<*TASK>(); - std::cout << "Registering " << name << std::endl; { log::devel_guard guard(task_wrapper_tag); flog_devel(info) << "registering pure Legion task " << name << std::endl; @@ -178,7 +174,6 @@ detail::register_task() { } // registration_callback // A trivial wrapper for nullary functions. -// TODO: Need a charm++ replacement for this? template auto verb(std::vector& buf) { @@ -192,7 +187,6 @@ verb(std::vector& buf) { \tparam F the user task \tparam P the target processor type - @ingroup legion-execution */ template // P is for specialization only @@ -202,7 +196,7 @@ struct task_wrapper { using RETURN = typename Traits::return_type; using param_tuple = typename Traits::arguments_type; - static constexpr task_processor_type_t LegionProcessor = P; + static constexpr task_processor_type_t CharmProcessor = P; /*! Execution wrapper method for user tasks. @@ -224,18 +218,8 @@ struct task_wrapper { if constexpr(std::is_same_v) { apply(F, std::forward(task_args)); - - // FIXME: Refactor - // finalize_handles_t finalize_handles; - // finalize_handles.walk(task_args); - } - else { + } else { RETURN result = apply(F, std::forward(task_args)); - - // FIXME: Refactor - // finalize_handles_t finalize_handles; - // finalize_handles.walk(task_args); - return result; } // if } // execute_user_task @@ -248,35 +232,21 @@ struct task_wrapper { using RETURN = typename Traits::return_type; using param_tuple = typename Traits::arguments_type; - static constexpr auto LegionProcessor = task_processor_type_t::loc; + static constexpr auto CharmProcessor = task_processor_type_t::loc; static RETURN execute(std::vector& buf) { - // FIXME: Refactor - // { - // log::devel_guard guard(task_wrapper_tag); - // flog_devel(info) << "In execute_mpi_task" << std::endl; - // } - // Unpack task arguments. param_tuple * p; flog_assert(buf.size() == sizeof p, "Bad Task::arglen"); std::memcpy(&p, buf.data(), sizeof p); auto & mpi_task_args = *p; - // FIXME: Refactor - // init_handles_t init_handles(runtime, context, regions, task->futures); - // init_handles.walk(mpi_task_args); - // TODO: Is more needed for synchronization with an "MPI" task? if constexpr(std::is_same_v) { apply(F, std::move(mpi_task_args)); } else { return apply(F, std::move(mpi_task_args)); } - - // FIXME: Refactor - // finalize_handles_t finalize_handles; - // finalize_handles.walk(mpi_task_args); } }; diff --git a/flecsi/exec/charm/unbind_accessors.hh b/flecsi/exec/charm/unbind_accessors.hh deleted file mode 100644 index 1770dde55..000000000 --- a/flecsi/exec/charm/unbind_accessors.hh +++ /dev/null @@ -1,68 +0,0 @@ -/* - @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ - /@@///// /@@ @@////@@ @@////// /@@ - /@@ /@@ @@@@@ @@ // /@@ /@@ - /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ - /@@//// /@@/@@@@@@@/@@ ////////@@/@@ - /@@ /@@/@@//// //@@ @@ /@@/@@ - /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ - // /// ////// ////// //////// // - - Copyright (c) 2016, Triad National Security, LLC - All rights reserved. - */ -#pragma once - -/*! @file */ - -#include - -#if !defined(__FLECSI_PRIVATE__) -#error Do not include this file directly! -#endif - -#include "flecsi/data/accessor.hh" -#include "flecsi/data/privilege.hh" -#include "flecsi/run/context.hh" -#include "flecsi/util/demangle.hh" -#include "flecsi/util/tuple_walker.hh" - -#if !defined(FLECSI_ENABLE_CHARM) -#error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! -#endif - -namespace flecsi { - -inline log::devel_tag unbind_accessors_tag("unbind_accessors"); - -namespace exec::charm { - -/*! - The unbind_accessors_t type is called to walk the user task arguments inside - of an executing legion task to properly unbind the user's accessors. - */ - -struct unbind_accessors_t : public util::tuple_walker { - - template - void visit(data::accessor &) { - } // visit - - /*--------------------------------------------------------------------------* - Non-FleCSI Data Types - *--------------------------------------------------------------------------*/ - - template - static typename std::enable_if_t< - !std::is_base_of_v> - visit(DATA_TYPE &) { - { - log::devel_guard guard(unbind_accessors_tag); - flog_devel(info) << "Skipping argument with type " - << util::type() << std::endl; - } - } // visit -}; // struct unbind_accessors_t - -} // namespace exec::charm -} // namespace flecsi diff --git a/flecsi/run/charm/context.cc b/flecsi/run/charm/context.cc index bc9cfbbfd..8c608d27b 100644 --- a/flecsi/run/charm/context.cc +++ b/flecsi/run/charm/context.cc @@ -21,7 +21,6 @@ #include "flecsi/exec/launch.hh" #include "flecsi/exec/charm/task_wrapper.hh" #include "flecsi/run/charm/context.hh" -#include "flecsi/run/charm/mapper.hh" #include "flecsi/run/types.hh" #include @@ -37,14 +36,12 @@ using exec::charm::task_id; namespace charm { ContextGroup::ContextGroup() { - CkPrintf("Group created on %i\n", CkMyPe()); if (CkMyPe() != 0) { run::context::instance().context_proxy_ = thisProxy; } } void ContextGroup::top_level_task() { - std::cout << "Executing the top level task" << std::endl; context_t & context_ = context_t::instance(); detail::data_guard(), context_.exit_status() = (*context_.top_level_action_)(); @@ -108,10 +105,6 @@ context_t::start(const std::function & action) { context::start(); - /* - Legion command-line arguments. - */ - // FIXME: This needs to be gotten from Charm context::threads_per_process_ = 1; context::threads_ = context::processes_ * context::threads_per_process_; diff --git a/flecsi/run/charm/context.hh b/flecsi/run/charm/context.hh index 9ee51740e..756263d2d 100644 --- a/flecsi/run/charm/context.hh +++ b/flecsi/run/charm/context.hh @@ -22,8 +22,6 @@ #endif #include "../context.hh" -//#include "flecsi/execution/launch.hh" -//#include "flecsi/execution/processor.hh" #include #include @@ -57,6 +55,10 @@ namespace charm { template using task = R(std::vector&); +// This is the charm group which manages the context and makes context info +// accessible on all PEs. At the moment, it's pretty skeletal, but moving +// forward, should include field management, communication, asynchronous task +// execution, reduction logic, etc. class ContextGroup : public CBase_ContextGroup { public: ContextGroup(); @@ -173,7 +175,6 @@ struct context_t : context { */ size_t task_depth() { - // TODO: Must be some way to get this from Charm runtime return context_proxy_.ckLocalBranch()->task_depth(); } // task_depth @@ -181,6 +182,8 @@ struct context_t : context { Documentation for this interface is in the top-level context type. */ + // TODO: Color functionality still needs implementation. It may also need + // to be made static (as well as some of the other functions here) size_t color() { flog_assert( task_depth() > 0, "this method can only be called from within a task"); diff --git a/flecsi/run/charm/mapper.hh b/flecsi/run/charm/mapper.hh deleted file mode 100644 index 0290f3585..000000000 --- a/flecsi/run/charm/mapper.hh +++ /dev/null @@ -1,316 +0,0 @@ -/* - @@@@@@@@ @@ @@@@@@ @@@@@@@@ @@ - /@@///// /@@ @@////@@ @@////// /@@ - /@@ /@@ @@@@@ @@ // /@@ /@@ - /@@@@@@@ /@@ @@///@@/@@ /@@@@@@@@@/@@ - /@@//// /@@/@@@@@@@/@@ ////////@@/@@ - /@@ /@@/@@//// //@@ @@ /@@/@@ - /@@ @@@//@@@@@@ //@@@@@@ @@@@@@@@ /@@ - // /// ////// ////// //////// // - - Copyright (c) 2016, Triad National Security, LLC - All rights reserved. - */ -#pragma once - -/*! @file */ - -#include - -#if !defined(__FLECSI_PRIVATE__) -#error Do not include this file directly! -#endif - -#include "../backend.hh" - -#if !defined(FLECSI_ENABLE_CHARM) -#error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! -#endif - -namespace flecsi { - -inline log::devel_tag legion_mapper_tag("legion_mapper"); - -namespace run { -#if 0 - -/* - The mpi_mapper_t - is a custom mapper that handles mpi-legion - interoperability in FLeCSI - - @ingroup legion-runtime -*/ - -class mpi_mapper_t : public Legion::Mapping::DefaultMapper -{ -public: - /*! - Contructor. Derives from the Legion's Default Mapper - - @param machine Machine type for Legion's Realm - @param _runtime Legion runtime - @param local processor type: currently supports only - LOC_PROC and TOC_PROC - */ - - mpi_mapper_t(Legion::Machine machine, - Legion::Runtime * _runtime, - Legion::Processor local) - : Legion::Mapping::DefaultMapper(_runtime->get_mapper_runtime(), - machine, - local, - "default"), - machine(machine) { - using legion_machine = Legion::Machine; - using legion_proc = Legion::Processor; - - legion_machine::ProcessorQuery pq = - legion_machine::ProcessorQuery(machine).same_address_space_as(local); - for(legion_machine::ProcessorQuery::iterator pqi = pq.begin(); - pqi != pq.end(); - ++pqi) { - legion_proc p = *pqi; - if(p.kind() == legion_proc::LOC_PROC) - local_cpus.push_back(p); - else if(p.kind() == legion_proc::TOC_PROC) - local_gpus.push_back(p); - else - continue; - - std::map & mem_map = proc_mem_map[p]; - - legion_machine::MemoryQuery mq = - legion_machine::MemoryQuery(machine).has_affinity_to(p); - for(legion_machine::MemoryQuery::iterator mqi = mq.begin(); - mqi != mq.end(); - ++mqi) { - Realm::Memory m = *mqi; - mem_map[m.kind()] = m; - - if(m.kind() == Realm::Memory::SYSTEM_MEM) - local_sysmem = m; - } // end for - } // end for - - { - log::devel_guard guard(legion_mapper_tag); - flog_devel(info) << "Mapper constructor" << std::endl - << "\tlocal: " << local << std::endl - << "\tcpus: " << local_cpus.size() << std::endl - << "\tgpus: " << local_gpus.size() << std::endl - << "\tsysmem: " << local_sysmem << std::endl; - } // scope - } // end mpi_mapper_t - - /*! - Destructor - */ - virtual ~mpi_mapper_t(){}; - - Legion::LayoutConstraintID default_policy_select_layout_constraints( - Legion::Mapping::MapperContext ctx, - Realm::Memory, - const Legion::RegionRequirement &, - Legion::Mapping::DefaultMapper::MappingKind, - bool /* constraint */, - bool & force_new_instances) { - // We always set force_new_instances to false since we are - // deciding to optimize for minimizing memory usage instead - // of avoiding Write-After-Read (WAR) dependences - force_new_instances = false; - std::vector ordering; - ordering.push_back(Legion::DimensionKind::DIM_Y); - ordering.push_back(Legion::DimensionKind::DIM_X); - ordering.push_back(Legion::DimensionKind::DIM_F); // SOA - Legion::OrderingConstraint ordering_constraint( - ordering, true /*contiguous*/); - Legion::LayoutConstraintSet layout_constraint; - layout_constraint.add_constraint(ordering_constraint); - - // Do the registration - Legion::LayoutConstraintID result = - runtime->register_layout(ctx, layout_constraint); - return result; - } - - /*! - Specialization of the map_task funtion for FLeCSI - By default, map_task will execute Legions map_task from DefaultMapper. - In the case the launcher has been tagged with the - "MAPPER_COMPACTED_STORAGE" tag, mapper will create single physical - instance for exclusive, shared and ghost partitions for each data handle - - @param ctx Mapper Context - @param task Legion's task - @param input Input information about task mapping - @param output Output information about task mapping - */ - - virtual void map_task(const Legion::Mapping::MapperContext ctx, - const Legion::Task & task, - const Legion::Mapping::Mapper::MapTaskInput & input, - Legion::Mapping::Mapper::MapTaskOutput & output) { - DefaultMapper::map_task(ctx, task, input, output); - - if((task.tag == FLECSI_MAPPER_COMPACTED_STORAGE) && - (task.regions.size() > 0)) { - - Legion::Memory target_mem = - DefaultMapper::default_policy_select_target_memory( - ctx, task.target_proc, task.regions[0]); - - // check if we get region requirements for "exclusive, shared and ghost" - // logical regions for each data handle - - // Filling out "layout_constraints" with the defaults - Legion::LayoutConstraintSet layout_constraints; - // No specialization - layout_constraints.add_constraint(Legion::SpecializedConstraint()); - layout_constraints.add_constraint(Legion::OrderingConstraint()); - // Constrained for the target memory kind - layout_constraints.add_constraint( - Legion::MemoryConstraint(target_mem.kind())); - // Have all the field for the instance available - std::vector all_fields; - layout_constraints.add_constraint(Legion::FieldConstraint()); - - // FIXME:: add colocation_constraints - Legion::ColocationConstraint colocation_constraints; - - for(size_t indx = 0; indx < task.regions.size(); indx++) { - - Legion::Mapping::PhysicalInstance result; - std::vector regions; - bool created; - - if(task.regions[indx].tag == FLECSI_MAPPER_EXCLUSIVE_LR) { - - flog_assert((task.regions.size() >= (indx + 2)), - "ERROR:: wrong number of regions passed to the task wirth \ - the tag = FLECSI_MAPPER_COMPACTED_STORAGE"); - - flog_assert((!task.regions[indx].region.exists()), - "ERROR:: pasing not existing REGION to the mapper"); - regions.push_back(task.regions[indx].region); - regions.push_back(task.regions[indx + 1].region); - regions.push_back(task.regions[indx + 2].region); - - flog_assert(runtime->find_or_create_physical_instance(ctx, - target_mem, - layout_constraints, - regions, - result, - created, - true /*acquire*/, - GC_NEVER_PRIORITY), - "FLeCSI mapper failed to allocate instance"); - - for(size_t j = 0; j < 3; j++) { - output.chosen_instances[indx + j].push_back(result); - } // for - - indx = indx + 2; - } - else { - - regions.push_back(task.regions[indx].region); - - flog_assert(runtime->find_or_create_physical_instance(ctx, - target_mem, - layout_constraints, - regions, - result, - created, - true /*acquire*/, - GC_NEVER_PRIORITY), - "FLeCSI mapper failed to allocate instance"); - - output.chosen_instances[indx].push_back(result); - - } // end if - } // end for - - } // end if - - } // map_task - - virtual void slice_task(const Legion::Mapping::MapperContext ctx, - const Legion::Task & task, - const Legion::Mapping::Mapper::SliceTaskInput & input, - Legion::Mapping::Mapper::SliceTaskOutput & output) { - - switch(task.tag) { - case FLECSI_MAPPER_SUBRANK_LAUNCH: - // expect a 1-D index domain - assert(input.domain.get_dim() == 1); - // send the whole domain to our local processor - output.slices.resize(1); - output.slices[0].domain = input.domain; - output.slices[0].proc = task.target_proc; - break; - - case FLECSI_MAPPER_FORCE_RANK_MATCH: { - // expect a 1-D index domain - each point goes to the corresponding node - assert(input.domain.get_dim() == 1); - LegionRuntime::Arrays::Rect<1> r = input.domain.get_rect<1>(); - - // go through all the CPU processors and find a representative for each - // node (i.e. address space) - std::map targets; - - Legion::Machine::ProcessorQuery pq = - Legion::Machine::ProcessorQuery(machine).only_kind( - Legion::Processor::LOC_PROC); - for(Legion::Machine::ProcessorQuery::iterator it = pq.begin(); - it != pq.end(); - ++it) { - Legion::Processor p = *it; - int a = p.address_space(); - if(targets.count(a) == 0) - targets[a] = p; - } - - output.slices.resize(1); - for(int a = r.lo[0]; a <= r.hi[0]; a++) { - assert(targets.count(a) > 0); - output.slices[0].domain = // Legion::Domain::from_rect<1>( - Legion::Rect<1>(a, a); - output.slices[0].proc = targets[a]; - } - break; - } - - default: - DefaultMapper::slice_task(ctx, task, input, output); - } - } - -private: - std::map> - proc_mem_map; - Realm::Memory local_sysmem; - Realm::Machine machine; -}; - -/*! - mapper_registration is used to replace DefaultMapper with mpi_mapper_t in - FLeCSI - - @ingroup legion-runtime - */ - -inline void -mapper_registration(Legion::Machine machine, - Legion::HighLevelRuntime * rt, - const std::set & local_procs) { - for(std::set::const_iterator it = local_procs.begin(); - it != local_procs.end(); - it++) { - mpi_mapper_t * mapper = new mpi_mapper_t(machine, rt, *it); - rt->replace_default_mapper(mapper, *it); - } -} // mapper registration -#endif - -} // namespace run -} // namespace flecsi From fe9ab4bbcc0b8d9d29f1f7b68a82a15c5ea5c4a7 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Tue, 18 Aug 2020 16:20:27 -0400 Subject: [PATCH 17/19] Small fixes to wrap up recent rebase --- flecsi/data/charm/policy.hh | 9 ++++++- flecsi/exec/charm/task_prologue.hh | 39 ++++++++++++++++++++---------- flecsi/run/CMakeLists.txt | 1 - flecsi/run/charm/context.cc | 5 +++- 4 files changed, 38 insertions(+), 16 deletions(-) diff --git a/flecsi/data/charm/policy.hh b/flecsi/data/charm/policy.hh index 8efee5db3..d867b5547 100644 --- a/flecsi/data/charm/policy.hh +++ b/flecsi/data/charm/policy.hh @@ -35,6 +35,12 @@ namespace charm { struct region { region(size2 s, const fields & fs) : s_(s) {} size2 size() const { return s_; } + template + region & get_region() { + return *this; + } + template + void cleanup(field_id_t f, D d) {} size2 s_; }; @@ -54,7 +60,8 @@ struct partition { completeness cpt = incomplete) {} std::size_t colors() const { - return 1; + // TODO: This may not be correct + return CkNumPes(); } template diff --git a/flecsi/exec/charm/task_prologue.hh b/flecsi/exec/charm/task_prologue.hh index 380f6262e..6c84b5752 100644 --- a/flecsi/exec/charm/task_prologue.hh +++ b/flecsi/exec/charm/task_prologue.hh @@ -54,7 +54,7 @@ namespace exec::charm { struct task_prologue_t { template - void walk(const AA &... aa) { + void walk(AA &... aa) { walk(static_cast

(nullptr), aa...); } @@ -63,6 +63,16 @@ struct task_prologue_t { type, potentially for every permutation thereof. *^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/ + template + void visit(data::accessor * null_p, + const data::field_reference & ref) { + visit(get_null_base(null_p), ref.template cast()); + // TODO: use just one task for all fields + if constexpr(privilege_write_only(Priv) && + !std::is_trivially_destructible_v) + ref.topology()->template get_region().cleanup( + ref.fid(), [ref] { execute>(ref); }); + } template void visit( - data::accessor * /* parameter */, + data::accessor * /* parameter */, const data:: - field_reference & + field_reference & ref) { auto & flecsi_context = run::context::instance(); flecsi_context.regField(ref.fid(), sizeof(DATA_TYPE)); @@ -92,15 +102,15 @@ struct task_prologue_t { typename Topo::index_space Space, class = std::enable_if_t == 1>> void visit( - data::accessor * /* parameter */, - const data::field_reference & ref) { + data::accessor * /* parameter */, + const data::field_reference & ref) { auto & flecsi_context = run::context::instance(); flecsi_context.regField(ref.fid(), sizeof(DATA_TYPE)); } // visit template void visit(data::topology_accessor * /* parameter */, - const data::topology_slot & slot) { + data::topology_slot & slot) { Topo::core::fields([&](auto & f) { visit(static_cast *>(nullptr), f(slot)); @@ -111,15 +121,12 @@ struct task_prologue_t { Futures *--------------------------------------------------------------------------*/ template - void visit(future *, - const future & - future) { + void visit(const future & f) { CkAbort("Futures not yet supported\n"); } template - void visit(future *, - const future & future) { + void visit(const future & f) { CkAbort("Futures not yet supported\n"); } @@ -139,14 +146,20 @@ struct task_prologue_t { } // visit private: + template + static void destroy(typename field::template accessor a) { + const auto s = a.span(); + std::destroy(s.begin(), s.end()); + } + // Argument types for which we don't also need the type of the parameter: template - void visit(P *, DATA_TYPE & x) { + void visit(P *, const DATA_TYPE & x) { visit(x); } // visit template - void walk(std::tuple * /* to deduce PP */, const AA &... aa) { + void walk(std::tuple * /* to deduce PP */, AA &... aa) { (visit(static_cast *>(nullptr), aa), ...); } diff --git a/flecsi/run/CMakeLists.txt b/flecsi/run/CMakeLists.txt index e508b2d78..ddc3c4ccb 100644 --- a/flecsi/run/CMakeLists.txt +++ b/flecsi/run/CMakeLists.txt @@ -51,7 +51,6 @@ elseif(FLECSI_RUNTIME_MODEL STREQUAL "charm") set(run_HEADERS ${run_HEADERS} charm/context.hh - charm/mapper.hh ) set(run_SOURCES diff --git a/flecsi/run/charm/context.cc b/flecsi/run/charm/context.cc index 8c608d27b..6f07c1a4e 100644 --- a/flecsi/run/charm/context.cc +++ b/flecsi/run/charm/context.cc @@ -35,7 +35,9 @@ using exec::charm::task_id; namespace charm { -ContextGroup::ContextGroup() { +// TODO: Depth set to -1 because it's being incremented somewhere before +// the top level action executes. +ContextGroup::ContextGroup() : depth(-1) { if (CkMyPe() != 0) { run::context::instance().context_proxy_ = thisProxy; } @@ -88,6 +90,7 @@ context_t::finalize() { if(context::initialize_dependent_) { CharmLibExit(); + MPI_Finalize(); } // if } // finalize From 8a6e8ba83f87e3b4618a3402b94fb6ea14cff4c2 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Tue, 1 Sep 2020 15:38:40 -0400 Subject: [PATCH 18/19] Remove extra call to MPI_Finalize CharmLibExit calls MPI_Finalize if Charm++ is built on the MPI layer, so this was causing crashes. If Charm is not built on the MPI layer, then MPI is never used/initialized in the first place. --- flecsi/run/charm/context.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/flecsi/run/charm/context.cc b/flecsi/run/charm/context.cc index 6f07c1a4e..f5e4bbfae 100644 --- a/flecsi/run/charm/context.cc +++ b/flecsi/run/charm/context.cc @@ -90,7 +90,6 @@ context_t::finalize() { if(context::initialize_dependent_) { CharmLibExit(); - MPI_Finalize(); } // if } // finalize From ec884b17efb08af0484f245d513dc034df11a795 Mon Sep 17 00:00:00 2001 From: Eric Mikida Date: Wed, 9 Sep 2020 17:05:37 -0400 Subject: [PATCH 19/19] Cleanup for merge --- flecsi/data/backend.hh | 2 +- flecsi/exec/backend.hh | 2 +- flecsi/exec/charm/policy.hh | 2 +- flecsi/exec/charm/task_wrapper.hh | 2 +- flecsi/io/backend.hh | 2 +- flecsi/run/charm/context.cc | 6 ++-- flecsi/run/charm/context.hh | 55 +++++++++---------------------- 7 files changed, 23 insertions(+), 48 deletions(-) diff --git a/flecsi/data/backend.hh b/flecsi/data/backend.hh index 800831226..f97cd5918 100644 --- a/flecsi/data/backend.hh +++ b/flecsi/data/backend.hh @@ -48,6 +48,6 @@ constexpr inline std::size_t logical_size = 1ul << 32; #elif FLECSI_RUNTIME_MODEL == FLECSI_RUNTIME_MODEL_charm -#include +#include "flecsi/data/charm/policy.hh" #endif // FLECSI_RUNTIME_MODEL diff --git a/flecsi/exec/backend.hh b/flecsi/exec/backend.hh index c176566da..d882a6188 100644 --- a/flecsi/exec/backend.hh +++ b/flecsi/exec/backend.hh @@ -67,6 +67,6 @@ auto execute(ARGS &&...); #elif FLECSI_RUNTIME_MODEL == FLECSI_RUNTIME_MODEL_charm -#include +#include "flecsi/exec/charm/policy.hh" #endif // FLECSI_RUNTIME_MODEL diff --git a/flecsi/exec/charm/policy.hh b/flecsi/exec/charm/policy.hh index 45be2fe0b..fa8039400 100644 --- a/flecsi/exec/charm/policy.hh +++ b/flecsi/exec/charm/policy.hh @@ -28,7 +28,7 @@ #include "flecsi/run/backend.hh" #include "flecsi/util/demangle.hh" #include "flecsi/util/function_traits.hh" -#include +#include "flecsi/flog.hh" #include #include diff --git a/flecsi/exec/charm/task_wrapper.hh b/flecsi/exec/charm/task_wrapper.hh index e33b7af6c..af2a1ef10 100644 --- a/flecsi/exec/charm/task_wrapper.hh +++ b/flecsi/exec/charm/task_wrapper.hh @@ -28,7 +28,7 @@ #include "flecsi/util/common.hh" #include "flecsi/util/function_traits.hh" #include "flecsi/util/serialize.hh" -#include +#include "flecsi/flog.hh" #if !defined(FLECSI_ENABLE_CHARM) #error FLECSI_ENABLE_CHARM not defined! This file depends on Charm! diff --git a/flecsi/io/backend.hh b/flecsi/io/backend.hh index 84b380357..f29587542 100644 --- a/flecsi/io/backend.hh +++ b/flecsi/io/backend.hh @@ -43,6 +43,6 @@ using field_reference_t = data::field_reference_t; #elif FLECSI_RUNTIME_MODEL == FLECSI_RUNTIME_MODEL_charm -#include +#include "flecsi/io/charm/policy.hh" #endif // FLECSI_RUNTIME_MODEL diff --git a/flecsi/run/charm/context.cc b/flecsi/run/charm/context.cc index f5e4bbfae..1b6417935 100644 --- a/flecsi/run/charm/context.cc +++ b/flecsi/run/charm/context.cc @@ -22,7 +22,7 @@ #include "flecsi/exec/charm/task_wrapper.hh" #include "flecsi/run/charm/context.hh" #include "flecsi/run/types.hh" -#include +#include "flecsi/data.hh" #include @@ -35,9 +35,7 @@ using exec::charm::task_id; namespace charm { -// TODO: Depth set to -1 because it's being incremented somewhere before -// the top level action executes. -ContextGroup::ContextGroup() : depth(-1) { +ContextGroup::ContextGroup() { if (CkMyPe() != 0) { run::context::instance().context_proxy_ = thisProxy; } diff --git a/flecsi/run/charm/context.hh b/flecsi/run/charm/context.hh index 756263d2d..c83a85e55 100644 --- a/flecsi/run/charm/context.hh +++ b/flecsi/run/charm/context.hh @@ -22,8 +22,9 @@ #endif #include "../context.hh" -#include -#include +#include "flecsi/run/types.hh" +#include "flecsi/util/common.hh" +#include "flecsi/util/function_traits.hh" #if !defined(FLECSI_ENABLE_MPI) #error FLECSI_ENABLE_MPI not defined! This file depends on MPI! @@ -45,12 +46,6 @@ namespace flecsi::run { -const size_t FLECSI_TOP_LEVEL_TASK_ID = 0; -const size_t FLECSI_MAPPER_FORCE_RANK_MATCH = 0x00001000; -const size_t FLECSI_MAPPER_COMPACTED_STORAGE = 0x00002000; -const size_t FLECSI_MAPPER_SUBRANK_LAUNCH = 0x00003000; -const size_t FLECSI_MAPPER_EXCLUSIVE_LR = 0x00004000; - namespace charm { template using task = R(std::vector&); @@ -66,10 +61,20 @@ public: template auto execute(std::vector& buf) { - depth++; - return T::execute(buf); - depth--; + using traits_t = util::function_traits; + using return_t = typename traits_t::return_type; + if constexpr(std::is_same_v) { + depth++; + T::execute(buf); + depth--; + } else { + depth++; + return_t result = T::execute(buf); + depth--; + return result; + } } + int task_depth() const { return depth; } @@ -200,34 +205,6 @@ struct context_t : context { return 0; } // colors - /// Store a reference to the argument under a small unused positive integer. - /// Its type is forgotten. - template - std::size_t record(T & t) { - const auto tp = const_cast(static_cast(&t)); - if(auto & f = enumerated.front()) { // we have a free slot - auto & slot = *static_cast(f); - f = slot; - slot = tp; - return &slot - &f; - } - // NB: reallocation invalidates all zero of the free list pointers - enumerated.push_back(tp); - return enumerated.size() - 1; - } - /// Discard a recorded reference. Its index may be reused. - void forget(std::size_t i) { - void *&f = enumerated.front(), *&p = enumerated[i]; - p = f; - f = &p; - } - /// Obtain a reference from its index. - /// \tparam T the object's forgotten type - template - T & recall(std::size_t i) { - return *static_cast(enumerated[i]); - } - template auto execute(std::vector& buf) { return context_proxy_.ckLocalBranch()->execute(buf);