diff --git a/examples/usage/allgather_example.cpp b/examples/usage/allgather_example.cpp index eadb7a63..2c561292 100644 --- a/examples/usage/allgather_example.cpp +++ b/examples/usage/allgather_example.cpp @@ -20,7 +20,6 @@ #include "helpers_for_examples.hpp" #include "kamping/checking_casts.hpp" #include "kamping/collectives/allgather.hpp" -#include "kamping/collectives/alltoall.hpp" #include "kamping/communicator.hpp" #include "kamping/data_buffer.hpp" #include "kamping/environment.hpp" @@ -30,21 +29,67 @@ int main() { using namespace kamping; kamping::Environment e; kamping::Communicator comm; - std::vector input(comm.size(), comm.rank_signed()); + // std::vector input(comm.size(), comm.rank_signed()); - { // Basic form: Provide a send buffer and let KaMPIng allocate the receive buffer. - auto output = comm.allgather(send_buf(input)); - print_result_on_root(output, comm); - } + //{ // Basic form: Provide a send buffer and let KaMPIng allocate the receive buffer. + // auto output = comm.allgather(send_buf(input)); + // print_result_on_root(output, comm); + //} + + // print_on_root("------", comm); - print_on_root("------", comm); + //{ // We can also send only parts of the input and specify an explicit receive buffer. + // std::vector output; - { // We can also send only parts of the input and specify an explicit receive buffer. - std::vector output; + // // this can also be achieved with `kamping::Span` + // comm.allgather(send_buf(Span(input.begin(), 2)), recv_buf(output)); + // print_result_on_root(output, comm); + // return 0; + //} - // this can also be achieved with `kamping::Span` - comm.allgather(send_buf(Span(input.begin(), 2)), recv_buf(output)); - print_result_on_root(output, comm); - return 0; + MPI_Comm mpi_graph_comm; + int in_degree = -1, out_degree = -1; + std::vector input{comm.rank_signed()}; + std::vector recv_buf(10, -1); + int send_count = -1, recv_count = -1; + std::vector in_ranks, out_ranks; + if (comm.rank() == 0) { + in_degree = 0; + out_degree = 0; + send_count = 1; + recv_count = 1; + } else if (comm.rank() == 1) { + in_degree = 0; + out_degree = 1; + out_ranks.push_back(2); + send_count = 1; + recv_count = 1; + } else if (comm.rank() == 2) { + in_degree = 1; + out_degree = 1; + in_ranks.push_back(1); + out_ranks.push_back(3); + send_count = 1; + recv_count = 1; + } else if (comm.rank() == 3) { + in_degree = 1; + out_degree = 0; + in_ranks.push_back(2); + send_count = 1; + recv_count = 1; } + MPI_Dist_graph_create_adjacent( + comm.mpi_communicator(), + in_degree, + in_ranks.data(), + MPI_UNWEIGHTED, + out_degree, + out_ranks.data(), + MPI_UNWEIGHTED, + MPI_INFO_NULL, + false, + &mpi_graph_comm + ); + + MPI_Neighbor_alltoall(input.data(), send_count, MPI_INT, recv_buf.data(), recv_count, MPI_INT, mpi_graph_comm); } diff --git a/include/kamping/collectives/neighborhood/alltoall.hpp b/include/kamping/collectives/neighborhood/alltoall.hpp index 15fb2202..b163c5db 100644 --- a/include/kamping/collectives/neighborhood/alltoall.hpp +++ b/include/kamping/collectives/neighborhood/alltoall.hpp @@ -43,14 +43,16 @@ /// @brief Wrapper for \c MPI_Neighbor_alltoall. /// -/// This wrapper for \c MPI_Neighbor_alltoall sends the same amount of data from a rank i to each of its neighbour j for -/// which an edge (i,j) in the communication graph exists. The following buffers are required: +/// @todo check again once the concrete semantics (potential differing number of send/recv counts) of +/// MPI_Neighbor_alltoall has been clarified. This wrapper for \c MPI_Neighbor_alltoall sends the same amount of data +/// from a rank i to each of its neighbour j for which an edge (i,j) in the communication graph exists. The following +/// buffers are required: /// - \ref kamping::send_buf() containing the data that is sent to each neighbor. This buffer has to be divisible by the -/// size of the communicator unless a send_count or a send_type is explicitly given as parameter. +/// out degree unless a send_count or a send_type is explicitly given as parameter. /// /// The following parameters are optional: /// - \ref kamping::send_count() specifying how many elements are sent. If -/// omitted, the size of send buffer divided by number of neighbors is used. +/// omitted, the size of send buffer divided by number of outgoing neighbors is used. /// This has to be the same on all ranks. /// This parameter is mandatory if \ref kamping::send_type() is given. /// @@ -59,7 +61,7 @@ /// This parameter is mandatory if \ref kamping::recv_type() is given. /// /// - \ref kamping::recv_buf() specifying a buffer for the output. A buffer of at least -/// `recv_count * communicator size` is required. +/// `recv_count * in degree` is required. /// /// - \ref kamping::send_type() specifying the \c MPI datatype to use as send type. If omitted, the \c MPI datatype is /// derived automatically based on send_buf's underlying \c value_type. @@ -67,9 +69,6 @@ /// - \ref kamping::recv_type() specifying the \c MPI datatype to use as recv type. If omitted, the \c MPI datatype is /// derived automatically based on recv_buf's underlying \c value_type. /// -/// Inplace alltoall is supported by passing send_recv_buf as parameter. This changes the requirements for the other -/// parameters, see \ref Communicator::alltoall_inplace. -/// /// @tparam Args Automatically deduced template parameters. /// @param args All required and any number of the optional buffers described above. /// @return Result type wrapping the output parameters to be returned by value. diff --git a/include/kamping/distributed_graph_communicator.hpp b/include/kamping/distributed_graph_communicator.hpp index a28ea16c..e0b26c95 100644 --- a/include/kamping/distributed_graph_communicator.hpp +++ b/include/kamping/distributed_graph_communicator.hpp @@ -19,24 +19,25 @@ namespace kamping { -/// @brief View on a a distributed communication graph. Each vertex is a rank and the edge define possible communication -/// links between the vertices. This view of a distributed communication graph contains views on the (potentially -/// weighted) in- and outgoing edges which are basically a sequence of neighboring ranks. Note that MPI allow this to be -/// a multi-graph. -class CommunicationGraphView { +/// @brief Local view on a a distributed communication graph. Each vertex is a rank and the edges define possible +/// communication links between the vertices. This view of a distributed communication graph contains views on the +/// (potentially weighted) in- and outgoing edges which are basically a sequence of neighboring ranks. Note that MPI +/// allows this to be a multi-graph. This view is local as it only provides a view on the neighboring ranks of this +/// process' rank. +class CommunicationGraphLocalView { public: using SpanType = kamping::Span; ///< type to be used for views on ranks and weights - /// @brief Construct a view of an unweighted communication graph. + /// @brief Constructs a view of an unweighted communication graph. /// /// @tparam ContiguousRange Type of the input range for in and out ranks. /// @param in_ranks Neighboring in ranks, i.e., ranks i for which there is an edge (i,own_rank). /// @param out_ranks Neighboring out ranks, i.e., ranks i for which there is an edge (own_rank, i). template - CommunicationGraphView(ContiguousRange const& in_ranks, ContiguousRange const& out_ranks) + CommunicationGraphLocalView(ContiguousRange const& in_ranks, ContiguousRange const& out_ranks) : _in_ranks{in_ranks.data(), in_ranks.size()}, _out_ranks{out_ranks.data(), out_ranks.size()} {} - /// @brief Construct a view of an unweighted communication graph. + /// @brief Constructs a view of an unweighted communication graph. /// /// @tparam ContiguousRange Type of the input range for in and out ranks. /// @param in_ranks Neighboring in ranks, i.e., ranks i for which there is an edge (i,own_rank). @@ -44,7 +45,7 @@ class CommunicationGraphView { /// @param in_weights Weights associcated to neighboring in ranks. /// @param out_weights Weights associcated to neighboring out ranks. template - CommunicationGraphView( + CommunicationGraphLocalView( ContiguousRange const& in_ranks, ContiguousRange const& out_ranks, ContiguousRange const& in_weights, @@ -126,7 +127,7 @@ class CommunicationGraphView { out_ranks().data(), out_weights, MPI_INFO_NULL, - false, + false, // do not reorder ranks &mpi_graph_comm ); return mpi_graph_comm; @@ -140,73 +141,80 @@ class CommunicationGraphView { }; namespace internal { -/// @brief Returns whether a given range of edges is weighted or not at compile time -/// @tparam EdgeRange Range type to be checked. -template -constexpr bool are_edges_weighted() { - using EdgeType = typename EdgeRange::value_type; - return !std::is_integral_v; +/// @brief Returns whether a given range of neighbors is weighted or not at compile time +/// @tparam NeighborhoodRange Range type to be checked. +template +constexpr bool are_neighborhoods_weighted() { + using NeighborType = typename NeighborhoodRange::value_type; + static_assert( + kamping::internal::tuple_size == 1 || kamping::internal::tuple_size == 2, + "Neighbor type has to be a scalar (in the unweighted case) or pair-like (in the weighted case) type" + ); + return kamping::internal::tuple_size == 2; } } // namespace internal -/// @brief A Distributed communication graph. Each vertex of the graph corresponds to a rank and each edge (i,j) connect -/// two ranks i and j which can communicate with each other. Note that MPI allow multiple edges between the same ranks i -/// and j, i.e. the distributed communication graph can be a multi-graph. Each rank holds its local view on the -/// communication graph, i.e., it knows its neighboring vertices/ranks. +/// @brief A (vertex-centric) distributed communication graph. Each vertex of the graph corresponds to a rank and each +/// edge (i,j) connect two ranks i and j which can communicate with each other. The distributed communication graph is +/// vertex-centric in the sense that on each rank the local graph only contains the corresponding vertex and its in and +/// out neighborhood. Note that MPI allow multiple edges between the same ranks i and j, i.e. the distributed +/// communication graph can be a multi-graph. template