diff --git a/src/quo-vadis.cc b/src/quo-vadis.cc index 4baf81d..15f6034 100644 --- a/src/quo-vadis.cc +++ b/src/quo-vadis.cc @@ -82,7 +82,7 @@ qv_scope_free( return QV_ERR_INVLD_ARG; } try { - qvi_scope_free(&scope); + qvi_scope_delete(&scope); return QV_SUCCESS; } qvi_catch_and_return(); @@ -98,7 +98,7 @@ qv_scope_nobjs( return QV_ERR_INVLD_ARG; } try { - return qvi_scope_nobjs(scope, obj, nobjs); + return qvi_scope_nobjects(scope, obj, nobjs); } qvi_catch_and_return(); } diff --git a/src/quo-vadisd.cc b/src/quo-vadisd.cc index 0d94d31..43bda19 100644 --- a/src/quo-vadisd.cc +++ b/src/quo-vadisd.cc @@ -45,8 +45,8 @@ struct context_s { /** Destructor */ ~context_s(void) { - qvi_rmi_server_free(&rmi); - qvi_hwloc_free(&hwloc); + qvi_rmi_server_delete(&rmi); + qvi_hwloc_delete(&hwloc); } }; diff --git a/src/qvi-bbuff-rmi.h b/src/qvi-bbuff-rmi.h index 70e5abe..86fee2f 100644 --- a/src/qvi-bbuff-rmi.h +++ b/src/qvi-bbuff-rmi.h @@ -793,7 +793,7 @@ qvi_bbuff_rmi_unpack_item( *bytes_written = strlen(cpusets) + 1; out: if (rc != QV_SUCCESS) { - qvi_hwloc_bitmap_free(cpuset); + qvi_hwloc_bitmap_delete(cpuset); } return rc; } @@ -814,7 +814,7 @@ qvi_bbuff_rmi_unpack_item( if (rc != QV_SUCCESS) return rc; rc = bitmap.set(raw_cpuset); - qvi_hwloc_bitmap_free(&raw_cpuset); + qvi_hwloc_bitmap_delete(&raw_cpuset); return rc; } diff --git a/src/qvi-bbuff.cc b/src/qvi-bbuff.cc index 05c426c..8271407 100644 --- a/src/qvi-bbuff.cc +++ b/src/qvi-bbuff.cc @@ -64,7 +64,7 @@ qvi_bbuff_dup( } void -qvi_bbuff_free( +qvi_bbuff_delete( qvi_bbuff_t **buff ) { qvi_delete(buff); diff --git a/src/qvi-bbuff.h b/src/qvi-bbuff.h index f4866f6..67627ad 100644 --- a/src/qvi-bbuff.h +++ b/src/qvi-bbuff.h @@ -39,7 +39,7 @@ qvi_bbuff_dup( * */ void -qvi_bbuff_free( +qvi_bbuff_delete( qvi_bbuff_t **buff ); diff --git a/src/qvi-common.h b/src/qvi-common.h index f2b506b..b1d14fc 100644 --- a/src/qvi-common.h +++ b/src/qvi-common.h @@ -60,24 +60,23 @@ #endif // IWYU pragma: end_keep -// Internal types. +// Internal type aliases. typedef uint8_t byte_t; -typedef char const* cstr_t; +typedef char const * cstr_t; typedef unsigned int uint_t; -/** Opaque hwloc instance. */ +// Forward declarations. struct qvi_hwloc_s; typedef struct qvi_hwloc_s qvi_hwloc_t; -/** Opaque RMI client. */ +struct qvi_hwpool_s; + struct qvi_rmi_client_s; typedef struct qvi_rmi_client_s qvi_rmi_client_t; -/** Opaque byte buffer. */ struct qvi_bbuff_s; typedef struct qvi_bbuff_s qvi_bbuff_t; -/** Opaque task. */ struct qvi_task_s; typedef struct qvi_task_s qvi_task_t; diff --git a/src/qvi-group-mpi.cc b/src/qvi-group-mpi.cc index 48b1352..1de1f10 100644 --- a/src/qvi-group-mpi.cc +++ b/src/qvi-group-mpi.cc @@ -34,7 +34,7 @@ qvi_group_mpi_s::qvi_group_mpi_s( qvi_group_mpi_s::~qvi_group_mpi_s(void) { - qvi_mpi_group_free(&m_mpi_group); + qvi_mpi_group_delete(&m_mpi_group); qvi_delete(&m_task); } diff --git a/src/qvi-group-mpi.h b/src/qvi-group-mpi.h index 2944b53..f0b79c0 100644 --- a/src/qvi-group-mpi.h +++ b/src/qvi-group-mpi.h @@ -48,7 +48,7 @@ struct qvi_group_mpi_s : public qvi_group_s { virtual int rank(void) { - return qvi_mpi_group_id(m_mpi_group); + return qvi_mpi_group_rank(m_mpi_group); } virtual int @@ -128,7 +128,7 @@ struct qvi_zgroup_mpi_s : public qvi_group_mpi_s { /** Destructor. */ virtual ~qvi_zgroup_mpi_s(void) { - qvi_mpi_free(&m_mpi); + qvi_mpi_delete(&m_mpi); } }; diff --git a/src/qvi-group-omp.cc b/src/qvi-group-omp.cc index 51451f8..9a7cdcf 100644 --- a/src/qvi-group-omp.cc +++ b/src/qvi-group-omp.cc @@ -30,7 +30,7 @@ qvi_group_omp_s::qvi_group_omp_s(void) qvi_group_omp_s::~qvi_group_omp_s(void) { - qvi_omp_group_free(&m_ompgroup); + qvi_omp_group_delete(&m_ompgroup); qvi_delete(&m_task); } diff --git a/src/qvi-group-process.cc b/src/qvi-group-process.cc index 8aca1d7..1cb69ce 100644 --- a/src/qvi-group-process.cc +++ b/src/qvi-group-process.cc @@ -23,7 +23,7 @@ qvi_group_process_s::qvi_group_process_s(void) qvi_group_process_s::~qvi_group_process_s(void) { - qvi_process_group_free(&m_proc_group); + qvi_process_group_delete(&m_proc_group); qvi_delete(&m_task); } diff --git a/src/qvi-hwloc.cc b/src/qvi-hwloc.cc index cc178f0..7598beb 100644 --- a/src/qvi-hwloc.cc +++ b/src/qvi-hwloc.cc @@ -65,7 +65,7 @@ qvi_hwloc_new( } void -qvi_hwloc_free( +qvi_hwloc_delete( qvi_hwloc_t **hwl ) { qvi_delete(hwl); @@ -620,7 +620,7 @@ qvi_hwloc_bitmap_calloc( } void -qvi_hwloc_bitmap_free( +qvi_hwloc_bitmap_delete( hwloc_cpuset_t *cpuset ) { if (!cpuset) return; @@ -653,7 +653,7 @@ qvi_hwloc_bitmap_dup( rc = qvi_hwloc_bitmap_copy(src, idest); out: if (qvi_unlikely(rc != QV_SUCCESS)) { - qvi_hwloc_bitmap_free(&idest); + qvi_hwloc_bitmap_delete(&idest); } *dest = idest; return rc; @@ -824,7 +824,7 @@ qvi_hwloc_emit_cpubind( getpid(), task_id, cpusets ); out: - qvi_hwloc_bitmap_free(&cpuset); + qvi_hwloc_bitmap_delete(&cpuset); if (cpusets) free(cpusets); return rc; } @@ -928,7 +928,7 @@ qvi_hwloc_task_get_cpubind( rc = get_proc_cpubind(hwl, task_id, cur_bind); out: if (rc != QV_SUCCESS) { - qvi_hwloc_bitmap_free(&cur_bind); + qvi_hwloc_bitmap_delete(&cur_bind); } *out_cpuset = cur_bind; return rc; @@ -958,7 +958,7 @@ qvi_hwloc_task_get_cpubind_as_string( if (rc != QV_SUCCESS) return rc; rc = qvi_hwloc_bitmap_asprintf(cpuset, cpusets); - qvi_hwloc_bitmap_free(&cpuset); + qvi_hwloc_bitmap_delete(&cpuset); return rc; } @@ -992,7 +992,7 @@ task_obj_xop_by_type_id( break; } } - qvi_hwloc_bitmap_free(&cur_bind); + qvi_hwloc_bitmap_delete(&cur_bind); return QV_SUCCESS; } @@ -1155,7 +1155,7 @@ qvi_hwloc_device_new( } void -qvi_hwloc_device_free( +qvi_hwloc_device_delete( qvi_hwloc_device_t **dev ) { qvi_delete(dev); @@ -1389,7 +1389,7 @@ qvi_hwloc_get_cpuset_for_nobjs( } out: if (rc != QV_SUCCESS) { - qvi_hwloc_bitmap_free(&iresult); + qvi_hwloc_bitmap_delete(&iresult); } *result = iresult; return rc; @@ -1450,7 +1450,7 @@ qvi_hwloc_get_device_affinity( if (!icpuset) rc = QV_ERR_NOT_FOUND; out: if (rc != QV_SUCCESS) { - qvi_hwloc_bitmap_free(cpuset); + qvi_hwloc_bitmap_delete(cpuset); } *cpuset = icpuset; return rc; diff --git a/src/qvi-hwloc.h b/src/qvi-hwloc.h index e746e08..fc5f1b2 100644 --- a/src/qvi-hwloc.h +++ b/src/qvi-hwloc.h @@ -67,7 +67,7 @@ qvi_hwloc_new( * */ void -qvi_hwloc_free( +qvi_hwloc_delete( qvi_hwloc_t **hwl ); @@ -132,7 +132,7 @@ qvi_hwloc_bitmap_calloc( * */ void -qvi_hwloc_bitmap_free( +qvi_hwloc_bitmap_delete( hwloc_cpuset_t *cpuset ); @@ -327,7 +327,7 @@ qvi_hwloc_device_new( * */ void -qvi_hwloc_device_free( +qvi_hwloc_device_delete( qvi_hwloc_device_t **dev ); @@ -449,7 +449,7 @@ struct qvi_hwloc_bitmap_s { /** Destructor. */ ~qvi_hwloc_bitmap_s(void) { - qvi_hwloc_bitmap_free(&m_data); + qvi_hwloc_bitmap_delete(&m_data); } /** Assignment operator. */ void diff --git a/src/qvi-hwpool.cc b/src/qvi-hwpool.cc index f7e2f4e..70a959d 100644 --- a/src/qvi-hwpool.cc +++ b/src/qvi-hwpool.cc @@ -145,10 +145,10 @@ qvi_hwpool_s::add_devices_with_affinity( rc = qvi_hwloc_get_devices_in_bitmap( hwloc, devt, m_cpu.cpuset, devs ); - if (rc != QV_SUCCESS) return rc; + if (qvi_unlikely(rc != QV_SUCCESS)) return rc; for (const auto &dev : devs) { rc = add_device(qvi_hwpool_dev_s(dev)); - if (rc != QV_SUCCESS) return rc; + if (qvi_unlikely(rc != QV_SUCCESS)) return rc; } } return rc; diff --git a/src/qvi-hwpool.h b/src/qvi-hwpool.h index 463803d..3460719 100644 --- a/src/qvi-hwpool.h +++ b/src/qvi-hwpool.h @@ -10,7 +10,7 @@ /** * @file qvi-hwpool.h * - * Hardware Resource Pool + * Hardware resource pool. */ #ifndef QVI_HWPOOL_H diff --git a/src/qvi-mpi.cc b/src/qvi-mpi.cc index f1dd5f0..89446cb 100644 --- a/src/qvi-mpi.cc +++ b/src/qvi-mpi.cc @@ -112,7 +112,7 @@ qvi_mpi_new( } void -qvi_mpi_free( +qvi_mpi_delete( qvi_mpi_t **mpi ) { qvi_delete(mpi); @@ -228,7 +228,7 @@ qvi_mpi_group_new( } void -qvi_mpi_group_free( +qvi_mpi_group_delete( qvi_mpi_group_t **group ) { qvi_delete(group); @@ -242,7 +242,7 @@ qvi_mpi_group_size( } int -qvi_mpi_group_id( +qvi_mpi_group_rank( const qvi_mpi_group_t *group ) { return group->qvcomm.rank; @@ -321,7 +321,7 @@ qvi_mpi_group_create_from_mpi_comm( rc = mpi->add_group(**new_group); out: if (rc != QV_SUCCESS) { - qvi_mpi_group_free(new_group); + qvi_mpi_group_delete(new_group); if (node_comm != MPI_COMM_NULL) { MPI_Comm_free(&node_comm); } @@ -427,7 +427,7 @@ qvi_mpi_group_gather_bbuffs( if (rc != QV_SUCCESS) { if (bbuffs) { for (int i = 0; i < group_size; ++i) { - qvi_bbuff_free(&bbuffs[i]); + qvi_bbuff_delete(&bbuffs[i]); } delete[] bbuffs; } @@ -499,7 +499,7 @@ qvi_mpi_group_scatter_bbuffs( rc = qvi_bbuff_append(mybbuff, mybytes.data(), rxcount); out: if (rc != QV_SUCCESS) { - qvi_bbuff_free(&mybbuff); + qvi_bbuff_delete(&mybbuff); } *rxbuff = mybbuff; return rc; diff --git a/src/qvi-mpi.h b/src/qvi-mpi.h index 8e8a808..d6c59d4 100644 --- a/src/qvi-mpi.h +++ b/src/qvi-mpi.h @@ -48,7 +48,7 @@ qvi_mpi_new( * */ void -qvi_mpi_free( +qvi_mpi_delete( qvi_mpi_t **mpi ); @@ -73,7 +73,7 @@ qvi_mpi_group_new( * */ void -qvi_mpi_group_free( +qvi_mpi_group_delete( qvi_mpi_group_t **group ); @@ -89,7 +89,7 @@ qvi_mpi_group_size( * */ int -qvi_mpi_group_id( +qvi_mpi_group_rank( const qvi_mpi_group_t *group ); diff --git a/src/qvi-omp.cc b/src/qvi-omp.cc index 8663c9a..8a52508 100644 --- a/src/qvi-omp.cc +++ b/src/qvi-omp.cc @@ -48,7 +48,7 @@ qvi_omp_group_new( } void -qvi_omp_group_free( +qvi_omp_group_delete( qvi_omp_group_t **group ) { #pragma omp barrier @@ -185,7 +185,7 @@ qvi_omp_group_gather_bbuffs( #pragma omp single if (bbuffs) { for (int i = 0; i < group_size; ++i) { - qvi_bbuff_free(&bbuffs[i]); + qvi_bbuff_delete(&bbuffs[i]); } delete[] bbuffs; } @@ -216,7 +216,7 @@ qvi_omp_group_scatter_bbuffs( #pragma omp single delete tmp; if (rc != QV_SUCCESS) { - qvi_bbuff_free(&mybbuff); + qvi_bbuff_delete(&mybbuff); } *rxbuff = mybbuff; return rc; diff --git a/src/qvi-omp.h b/src/qvi-omp.h index 49d1df6..62bee9f 100644 --- a/src/qvi-omp.h +++ b/src/qvi-omp.h @@ -61,7 +61,7 @@ qvi_omp_group_new( ); void -qvi_omp_group_free( +qvi_omp_group_delete( qvi_omp_group_t **group ); diff --git a/src/qvi-process.cc b/src/qvi-process.cc index c5183c4..87c8868 100644 --- a/src/qvi-process.cc +++ b/src/qvi-process.cc @@ -30,7 +30,7 @@ qvi_process_group_new( } void -qvi_process_group_free( +qvi_process_group_delete( qvi_process_group_t **procgrp ) { qvi_delete(procgrp); @@ -78,7 +78,7 @@ qvi_process_group_gather_bbuffs( const int rc = qvi_bbuff_dup(txbuff, &bbuffs[0]); if (rc != QV_SUCCESS) { if (bbuffs) { - qvi_bbuff_free(&bbuffs[0]); + qvi_bbuff_delete(&bbuffs[0]); delete[] bbuffs; } bbuffs = nullptr; @@ -105,7 +105,7 @@ qvi_process_group_scatter_bbuffs( qvi_bbuff_t *mybbuff = nullptr; const int rc = qvi_bbuff_dup(txbuffs[root], &mybbuff); if (rc != QV_SUCCESS) { - qvi_bbuff_free(&mybbuff); + qvi_bbuff_delete(&mybbuff); } *rxbuff = mybbuff; return rc; diff --git a/src/qvi-process.h b/src/qvi-process.h index a678845..04f17e5 100644 --- a/src/qvi-process.h +++ b/src/qvi-process.h @@ -32,7 +32,7 @@ qvi_process_group_new( * */ void -qvi_process_group_free( +qvi_process_group_delete( qvi_process_group_t **group ); diff --git a/src/qvi-rmi.cc b/src/qvi-rmi.cc index 0a58199..821f3f8 100644 --- a/src/qvi-rmi.cc +++ b/src/qvi-rmi.cc @@ -24,6 +24,8 @@ // caller in case of an non-fatal error on the server side. // // TODO(skg) We need to implement timeouts. +// +// TODO(skg) We need to version client/server RPC dispatch. #include "qvi-rmi.h" #include "qvi-bbuff.h" @@ -33,7 +35,20 @@ #include "zmq.h" -static const cstr_t ZINPROC_ADDR = "inproc://qvi-rmi-workers"; +static constexpr cstr_t ZINPROC_ADDR = "inproc://qvi-rmi-workers"; + +typedef enum qvi_rpc_funid_e { + FID_INVALID = 0, + FID_SERVER_SHUTDOWN, + FID_HELLO, + FID_GBYE, + FID_CPUBIND, + FID_SET_CPUBIND, + FID_OBJ_TYPE_DEPTH, + FID_GET_NOBJS_IN_CPUSET, + FID_GET_DEVICE_IN_CPUSET, + FID_SCOPE_GET_INTRINSIC_HWPOOL +} qvi_rpc_funid_t; static void send_server_shutdown_msg( @@ -119,33 +134,20 @@ struct qvi_rmi_client_s { { // Remember clients own the hwloc data, unlike the server. int rc = qvi_hwloc_new(&config.hwloc); - if (rc != QV_SUCCESS) throw qvi_runtime_error(); + if (qvi_unlikely(rc != QV_SUCCESS)) throw qvi_runtime_error(); // Create a new ZMQ context. zctx = zmq_ctx_new(); - if (!zctx) throw qvi_runtime_error(); + if (qvi_unlikely(!zctx)) throw qvi_runtime_error(); } /** Destructor. */ ~qvi_rmi_client_s(void) { zsocket_close(&zsock); zctx_destroy(&zctx); - qvi_hwloc_free(&config.hwloc); + qvi_hwloc_delete(&config.hwloc); } }; -typedef enum qvi_rpc_funid_e { - FID_INVALID = 0, - FID_SERVER_SHUTDOWN, - FID_HELLO, - FID_GBYE, - FID_TASK_GET_CPUBIND, - FID_TASK_SET_CPUBIND_FROM_CPUSET, - FID_OBJ_TYPE_DEPTH, - FID_GET_NOBJS_IN_CPUSET, - FID_GET_DEVICE_IN_CPUSET, - FID_SCOPE_GET_INTRINSIC_HWPOOL -} qvi_rpc_funid_t; - typedef struct qvi_msg_header_s { qvi_rpc_funid_t fid = FID_INVALID; #if QVI_DEBUG_MODE == 1 @@ -221,7 +223,7 @@ msg_free_byte_buffer_cb( void *hint ) { qvi_bbuff_t *buff = (qvi_bbuff_t *)hint; - qvi_bbuff_free(&buff); + qvi_bbuff_delete(&buff); } static int @@ -350,7 +352,7 @@ rpc_pack( rc = qvi_bbuff_rmi_pack(ibuff, std::forward(args)...); out: if (rc != QV_SUCCESS) { - qvi_bbuff_free(&ibuff); + qvi_bbuff_delete(&ibuff); } *buff = ibuff; return rc; @@ -398,7 +400,7 @@ rpc_req( qvi_bbuff_t *buff = nullptr; int rc = rpc_pack(&buff, fid, std::forward(args)...); if (rc != QV_SUCCESS) { - qvi_bbuff_free(&buff); + qvi_bbuff_delete(&buff); return rc; } zmq_msg_t msg; @@ -494,7 +496,7 @@ rpc_ssi_gbye( } static int -rpc_ssi_task_get_cpubind( +rpc_ssi_cpubind( qvi_rmi_server_t *server, qvi_msg_header_t *hdr, void *input, @@ -510,13 +512,13 @@ rpc_ssi_task_get_cpubind( ); qvrc = rpc_pack(output, hdr->fid, rpcrc, bitmap); - hwloc_bitmap_free(bitmap); + qvi_hwloc_bitmap_delete(&bitmap); return qvrc; } static int -rpc_ssi_task_set_cpubind_from_cpuset( +rpc_ssi_set_cpubind( qvi_rmi_server_t *server, qvi_msg_header_t *hdr, void *input, @@ -530,8 +532,7 @@ rpc_ssi_task_set_cpubind_from_cpuset( const int rpcrc = qvi_hwloc_task_set_cpubind_from_cpuset( server->config.hwloc, who, cpuset ); - hwloc_bitmap_free(cpuset); - + qvi_hwloc_bitmap_delete(&cpuset); return rpc_pack(output, hdr->fid, rpcrc); } @@ -577,7 +578,7 @@ rpc_ssi_get_nobjs_in_cpuset( qvrc = rpc_pack(output, hdr->fid, rpcrc, nobjs); - hwloc_bitmap_free(cpuset); + qvi_hwloc_bitmap_delete(&cpuset); return qvrc; } @@ -604,7 +605,7 @@ rpc_ssi_get_device_in_cpuset( qvrc = rpc_pack(output, hdr->fid, rpcrc, dev_id); - hwloc_bitmap_free(cpuset); + qvi_hwloc_bitmap_delete(&cpuset); free(dev_id); return qvrc; } @@ -639,7 +640,7 @@ get_intrinsic_scope_proc( server->config.hwloc, cpuset, hwpool ); out: - if (cpuset) hwloc_bitmap_free(cpuset); + qvi_hwloc_bitmap_delete(&cpuset); if (rc != QV_SUCCESS) { qvi_delete(hwpool); } @@ -695,8 +696,8 @@ static const std::map rpc_dispatch_table = { {FID_SERVER_SHUTDOWN, rpc_ssi_shutdown}, {FID_HELLO, rpc_ssi_hello}, {FID_GBYE, rpc_ssi_gbye}, - {FID_TASK_GET_CPUBIND, rpc_ssi_task_get_cpubind}, - {FID_TASK_SET_CPUBIND_FROM_CPUSET, rpc_ssi_task_set_cpubind_from_cpuset}, + {FID_CPUBIND, rpc_ssi_cpubind}, + {FID_SET_CPUBIND, rpc_ssi_set_cpubind}, {FID_OBJ_TYPE_DEPTH, rpc_ssi_obj_type_depth}, {FID_GET_NOBJS_IN_CPUSET, rpc_ssi_get_nobjs_in_cpuset}, {FID_GET_DEVICE_IN_CPUSET, rpc_ssi_get_device_in_cpuset}, @@ -754,7 +755,8 @@ server_go( ); if (qvi_unlikely(!zworksock)) return nullptr; - int rc, bsent, bsentt = 0; + int rc, bsent; + volatile int bsentt = 0; volatile std::atomic active{true}; do { zmq_msg_t mrx, mtx; @@ -774,7 +776,7 @@ server_go( QVI_UNUSED(bsentt); #endif zsocket_close(&zworksock); - if (rc != QV_SUCCESS && rc != QV_SUCCESS_SHUTDOWN) { + if (qvi_unlikely(rc != QV_SUCCESS && rc != QV_SUCCESS_SHUTDOWN)) { qvi_log_error("RX/TX loop exited with rc={} ({})", rc, qv_strerr(rc)); } return nullptr; @@ -796,7 +798,7 @@ send_server_shutdown_msg( } void -qvi_rmi_server_free( +qvi_rmi_server_delete( qvi_rmi_server_t **server ) { qvi_delete(server); @@ -903,7 +905,7 @@ qvi_rmi_client_new( } void -qvi_rmi_client_free( +qvi_rmi_client_delete( qvi_rmi_client_t **client ) { qvi_delete(client); @@ -958,54 +960,40 @@ qvi_rmi_client_hwloc( // Client-Side (Public) RPC Stub Definitions //////////////////////////////////////////////////////////////////////////////// int -qvi_rmi_task_get_cpubind( +qvi_rmi_cpubind( qvi_rmi_client_t *client, pid_t who, hwloc_cpuset_t *cpuset ) { - int qvrc = rpc_req( - client->zsock, - FID_TASK_GET_CPUBIND, - who - ); - if (qvrc != QV_SUCCESS) return qvrc; - + int qvrc = rpc_req(client->zsock, FID_CPUBIND, who); + if (qvi_unlikely(qvrc != QV_SUCCESS)) return qvrc; // Should be set by rpc_rep, so assume an error. int rpcrc = QV_ERR_MSG; qvrc = rpc_rep(client->zsock, &rpcrc, cpuset); - - if (qvrc != QV_SUCCESS) { - hwloc_bitmap_free(*cpuset); - *cpuset = nullptr; + if (qvi_unlikely(qvrc != QV_SUCCESS)) { + qvi_hwloc_bitmap_delete(cpuset); return qvrc; } return rpcrc; } int -qvi_rmi_task_set_cpubind_from_cpuset( +qvi_rmi_set_cpubind( qvi_rmi_client_t *client, pid_t who, hwloc_const_cpuset_t cpuset ) { - int qvrc = rpc_req( - client->zsock, - FID_TASK_SET_CPUBIND_FROM_CPUSET, - who, - cpuset - ); - if (qvrc != QV_SUCCESS) return qvrc; - + int qvrc = rpc_req(client->zsock, FID_SET_CPUBIND, who, cpuset); + if (qvi_unlikely(qvrc != QV_SUCCESS)) return qvrc; // Should be set by rpc_rep, so assume an error. int rpcrc = QV_ERR_MSG; qvrc = rpc_rep(client->zsock, &rpcrc); - if (qvrc != QV_SUCCESS) return qvrc; - + if (qvi_unlikely(qvrc != QV_SUCCESS)) return qvrc; return rpcrc; } int -qvi_rmi_scope_get_intrinsic_hwpool( +qvi_rmi_get_intrinsic_hwpool( qvi_rmi_client_t *client, pid_t who, qv_scope_intrinsic_t iscope, diff --git a/src/qvi-rmi.h b/src/qvi-rmi.h index 59ce56a..6c9ae88 100644 --- a/src/qvi-rmi.h +++ b/src/qvi-rmi.h @@ -20,12 +20,14 @@ #define QVI_RMI_H #include "qvi-common.h" -#include "qvi-hwpool.h" #ifdef __cplusplus +/** + * Maintains RMI configuration information. + */ struct qvi_rmi_config_s { - /** Not sent, initialized elsewhere. */ + /** Maintains hardware locality information. */ qvi_hwloc_t *hwloc = nullptr; /** Connection URL. */ std::string url; @@ -33,6 +35,15 @@ struct qvi_rmi_config_s { std::string hwtopo_path; }; +/** + * Connects a client to to the server specified by the provided URL. + */ +int +qvi_rmi_client_connect( + qvi_rmi_client_t *client, + const std::string &url +); + extern "C" { #endif @@ -52,7 +63,7 @@ qvi_rmi_server_new( * */ void -qvi_rmi_server_free( +qvi_rmi_server_delete( qvi_rmi_server_t **server ); @@ -86,7 +97,7 @@ qvi_rmi_client_new( * */ void -qvi_rmi_client_free( +qvi_rmi_client_delete( qvi_rmi_client_t **client ); @@ -99,34 +110,34 @@ qvi_rmi_client_hwloc( ); /** - * + * Returns a new hardware pool based on the intrinsic scope specifier. */ int -qvi_rmi_task_get_cpubind( +qvi_rmi_get_intrinsic_hwpool( qvi_rmi_client_t *client, pid_t task_id, - hwloc_cpuset_t *cpuset + qv_scope_intrinsic_t iscope, + qvi_hwpool_s **hwpool ); /** * */ int -qvi_rmi_task_set_cpubind_from_cpuset( +qvi_rmi_cpubind( qvi_rmi_client_t *client, pid_t task_id, - hwloc_const_cpuset_t cpuset + hwloc_cpuset_t *cpuset ); /** * */ int -qvi_rmi_scope_get_intrinsic_hwpool( +qvi_rmi_set_cpubind( qvi_rmi_client_t *client, pid_t task_id, - qv_scope_intrinsic_t iscope, - qvi_hwpool_s **hwpool + hwloc_const_cpuset_t cpuset ); /** @@ -174,15 +185,6 @@ qvi_rmi_get_cpuset_for_nobjs( #ifdef __cplusplus } - -/** - * - */ -int -qvi_rmi_client_connect( - qvi_rmi_client_t *client, - const std::string &url -); #endif #endif diff --git a/src/qvi-scope.cc b/src/qvi-scope.cc index 50be931..7aa2f39 100644 --- a/src/qvi-scope.cc +++ b/src/qvi-scope.cc @@ -50,8 +50,8 @@ struct qv_scope_s { } }; -int -qvi_scope_new( +static int +scope_new( qvi_group_t *group, qvi_hwpool_s *hwpool, qv_scope_t **scope @@ -65,29 +65,79 @@ qvi_scope_get( qv_scope_intrinsic_t iscope, qv_scope_t **scope ) { - qvi_hwpool_s *hwpool = nullptr; + *scope = nullptr; // Get the requested intrinsic group. int rc = group->make_intrinsic(iscope); - if (qvi_unlikely(rc != QV_SUCCESS)) goto out; + if (qvi_unlikely(rc != QV_SUCCESS)) return rc; // Get the requested intrinsic hardware pool. - rc = qvi_rmi_scope_get_intrinsic_hwpool( - group->task()->rmi(), - qvi_task_s::mytid(), - iscope, &hwpool + qvi_hwpool_s *hwpool = nullptr; + rc = qvi_rmi_get_intrinsic_hwpool( + group->task()->rmi(), qvi_task_s::mytid(), iscope, &hwpool ); - if (qvi_unlikely(rc != QV_SUCCESS)) goto out; + if (qvi_unlikely(rc != QV_SUCCESS)) return rc; // Create and initialize the scope. - rc = qvi_scope_new(group, hwpool, scope); -out: + rc = scope_new(group, hwpool, scope); if (qvi_unlikely(rc != QV_SUCCESS)) { + qvi_scope_delete(scope); + } + return rc; +} + +// TODO(skg) Implement use of hints. +int +qvi_scope_create( + qv_scope_t *parent, + qv_hw_obj_type_t type, + int nobjs, + qv_scope_create_hints_t, + qv_scope_t **child +) { + *child = nullptr; + // Create underlying group. Notice the use of self here. + qvi_group_t *group = nullptr; + int rc = parent->group->self(&group); + if (rc != QV_SUCCESS) return rc; + // Create the hardware pool. + qvi_hwpool_s *hwpool = nullptr; + rc = qvi_new(&hwpool); + if (rc != QV_SUCCESS) { + qvi_delete(&group); + return rc; + } + // Get the appropriate cpuset based on the caller's request. + hwloc_cpuset_t cpuset = nullptr; + rc = qvi_rmi_get_cpuset_for_nobjs( + parent->group->task()->rmi(), + parent->hwpool->cpuset().cdata(), + type, nobjs, &cpuset + ); + if (rc != QV_SUCCESS) { + qvi_delete(&group); + qvi_delete(&hwpool); + return rc; + } + // Now that we have the desired cpuset, + // initialize the new hardware pool. + rc = hwpool->initialize(parent->group->hwloc(), cpuset); + if (rc != QV_SUCCESS) { + qvi_delete(&group); qvi_delete(&hwpool); - qvi_scope_free(scope); + return rc; + } + // No longer needed. + qvi_hwloc_bitmap_delete(&cpuset); + // Create and initialize the new scope. + qv_scope_t *ichild = nullptr; + rc = scope_new(group, hwpool, &ichild); + if (rc != QV_SUCCESS) { + qvi_scope_delete(&ichild); } + *child = ichild; return rc; } void -qvi_scope_free( +qvi_scope_delete( qv_scope_t **scope ) { qvi_delete(scope); @@ -101,7 +151,7 @@ qvi_scope_thfree( if (!kscopes) return; qv_scope_t **ikscopes = *kscopes; for (uint_t i = 0; i < k; ++i) { - qvi_scope_free(&ikscopes[i]); + qvi_scope_delete(&ikscopes[i]); } delete[] ikscopes; *kscopes = nullptr; @@ -115,32 +165,6 @@ qvi_scope_group( return scope->group; } -const qvi_hwpool_s * -qvi_scope_hwpool( - qv_scope_t *scope -) { - assert(scope); - return scope->hwpool; -} - -const qvi_hwloc_bitmap_s & -qvi_scope_cpuset( - qv_scope_t *scope -) { - assert(scope); - return scope->hwpool->cpuset(); -} - -int -qvi_scope_group_rank( - qv_scope_t *scope, - int *taskid -) { - assert(scope); - *taskid = scope->group->rank(); - return QV_SUCCESS; -} - int qvi_scope_group_size( qv_scope_t *scope, @@ -152,21 +176,23 @@ qvi_scope_group_size( } int -qvi_scope_barrier( - qv_scope_t *scope +qvi_scope_group_rank( + qv_scope_t *scope, + int *taskid ) { assert(scope); - return scope->group->barrier(); + *taskid = scope->group->rank(); + return QV_SUCCESS; } int -qvi_scope_nobjs( +qvi_scope_nobjects( qv_scope_t *scope, qv_hw_obj_type_t obj, - int *n + int *result ) { return scope->hwpool->nobjects( - scope->group->hwloc(), obj, n + scope->group->hwloc(), obj, result ); } @@ -205,7 +231,7 @@ qvi_scope_device_id( break; default: rc = QV_ERR_INVLD_ARG; - goto out; + break; } if (qvi_unlikely(nw == -1)) rc = QV_ERR_OOR; out: @@ -215,6 +241,14 @@ qvi_scope_device_id( return rc; } +int +qvi_scope_barrier( + qv_scope_t *scope +) { + assert(scope); + return scope->group->barrier(); +} + int qvi_scope_bind_push( qv_scope_t *scope @@ -242,7 +276,7 @@ qvi_scope_bind_string( if (qvi_unlikely(rc != QV_SUCCESS)) return rc; rc = qvi_hwloc_bitmap_string(bitmap, format, result); - qvi_hwloc_bitmap_free(&bitmap); + qvi_hwloc_bitmap_delete(&bitmap); return rc; } @@ -425,12 +459,12 @@ gather_values( if (!shared || (shared && (group->rank() == root))) { if (bbuffs) { for (uint_t i = 0; i < group_size; ++i) { - qvi_bbuff_free(&bbuffs[i]); + qvi_bbuff_delete(&bbuffs[i]); } delete[] bbuffs; } } - qvi_bbuff_free(&txbuff); + qvi_bbuff_delete(&txbuff); if (rc != QV_SUCCESS) { // If something went wrong, just zero-initialize the values. outvals = {}; @@ -472,12 +506,12 @@ gather_hwpools( if (!shared || (shared && (group->rank() == root))) { if (bbuffs) { for (uint_t i = 0; i < group_size; ++i) { - qvi_bbuff_free(&bbuffs[i]); + qvi_bbuff_delete(&bbuffs[i]); } delete[] bbuffs; } } - qvi_bbuff_free(&txbuff); + qvi_bbuff_delete(&txbuff); if (rc != QV_SUCCESS) { // If something went wrong, just zero-initialize the pools. rxpools = {}; @@ -521,9 +555,9 @@ scatter_values( *value = *(TYPE *)qvi_bbuff_data(rxbuff); out: for (auto &buff : txbuffs) { - qvi_bbuff_free(&buff); + qvi_bbuff_delete(&buff); } - qvi_bbuff_free(&rxbuff); + qvi_bbuff_delete(&rxbuff); if (rc != QV_SUCCESS) { // If something went wrong, just zero-initialize the value. *value = {}; @@ -562,9 +596,9 @@ scatter_hwpools( rc = qvi_bbuff_rmi_unpack(qvi_bbuff_data(rxbuff), pool); out: for (auto &buff : txbuffs) { - qvi_bbuff_free(&buff); + qvi_bbuff_delete(&buff); } - qvi_bbuff_free(&rxbuff); + qvi_bbuff_delete(&rxbuff); if (rc != QV_SUCCESS) { qvi_delete(pool); } @@ -625,7 +659,7 @@ scope_split_coll_gather( splitcoll.gsplit.affinities.resize(group_size); for (uint_t tid = 0; tid < group_size; ++tid) { hwloc_cpuset_t cpuset = nullptr; - rc = qvi_rmi_task_get_cpubind( + rc = qvi_rmi_cpubind( parent->group->task()->rmi(), splitcoll.gsplit.taskids[tid], &cpuset ); @@ -633,7 +667,7 @@ scope_split_coll_gather( rc = splitcoll.gsplit.affinities[tid].set(cpuset); // Clean up. - qvi_hwloc_bitmap_free(&cpuset); + qvi_hwloc_bitmap_delete(&cpuset); if (rc != QV_SUCCESS) break; } } @@ -1160,12 +1194,12 @@ qvi_scope_split( ); if (rc != QV_SUCCESS) goto out; // Create and initialize the new scope. - rc = qvi_scope_new(group, hwpool, &ichild); + rc = scope_new(group, hwpool, &ichild); out: if (rc != QV_SUCCESS) { qvi_delete(&hwpool); qvi_delete(&group); - qvi_scope_free(&ichild); + qvi_scope_delete(&ichild); } *child = ichild; return rc; @@ -1180,7 +1214,6 @@ qvi_scope_thsplit( qv_hw_obj_type_t maybe_obj_type, qv_scope_t ***thchildren ) { - if (k == 0 || !thchildren) return QV_ERR_INVLD_ARG; *thchildren = nullptr; const uint_t group_size = k; @@ -1192,10 +1225,11 @@ qvi_scope_thsplit( if (rc != QV_SUCCESS) return rc; // Since this is called by a single task, get its ID and associated hardware // affinity here, and replicate them in the following loop that populates - // splitagg. No point in doing this in a loop. + // splitagg. + //No point in doing this in a loop. const pid_t taskid = qvi_task_t::mytid(); hwloc_cpuset_t task_affinity = nullptr; - rc = qvi_rmi_task_get_cpubind( + rc = qvi_rmi_cpubind( parent->group->task()->rmi(), taskid, &task_affinity ); @@ -1217,26 +1251,21 @@ qvi_scope_thsplit( // Same goes for the task's affinity. splitagg.affinities[i].set(task_affinity); } + if (rc != QV_SUCCESS) return rc; // Cleanup: we don't need task_affinity anymore. - qvi_hwloc_bitmap_free(&task_affinity); + qvi_hwloc_bitmap_delete(&task_affinity); if (rc != QV_SUCCESS) return rc; // Split the hardware resources based on the provided split parameters. rc = agg_split(splitagg); if (rc != QV_SUCCESS) return rc; - - // Now populate the children. - qv_scope_t **ithchildren = new qv_scope_t *[group_size]; - - qvi_group_t *thgroup = nullptr; // Split off from our parent group. This call is called from a context in // which a process is splitting its resources across threads, so create a // new thread group for each child. + qvi_group_t *thgroup = nullptr; rc = parent->group->thsplit(group_size, &thgroup); - if (rc != QV_SUCCESS) { - qvi_scope_thfree(&ithchildren, group_size); - return rc; - } - + if (rc != QV_SUCCESS) return rc; + // Now create and populate the children. + qv_scope_t **ithchildren = new qv_scope_t *[group_size]; for (uint_t i = 0; i < group_size; ++i) { // Copy out, since the hardware pools in splitagg will get freed. qvi_hwpool_s *hwpool = nullptr; @@ -1247,7 +1276,7 @@ qvi_scope_thsplit( } // Create and initialize the new scope. qv_scope_t *child = nullptr; - rc = qvi_scope_new(thgroup, hwpool, &child); + rc = scope_new(thgroup, hwpool, &child); if (rc != QV_SUCCESS) { qvi_delete(&thgroup); break; @@ -1276,7 +1305,7 @@ qvi_scope_thsplit_at( qv_scope_t ***kchildren ) { int nobj = 0; - const int rc = qvi_scope_nobjs(parent, type, &nobj); + const int rc = qvi_scope_nobjects(parent, type, &nobj); if (qvi_unlikely(rc != QV_SUCCESS)) return rc; return qvi_scope_thsplit(parent, nobj, kgroup_ids, k, type, kchildren); @@ -1290,56 +1319,12 @@ qvi_scope_split_at( qv_scope_t **child ) { int nobj = 0; - const int rc = qvi_scope_nobjs(parent, type, &nobj); + const int rc = qvi_scope_nobjects(parent, type, &nobj); if (qvi_unlikely(rc != QV_SUCCESS)) return rc; return qvi_scope_split(parent, nobj, color, type, child); } -int -qvi_scope_create( - qv_scope_t *parent, - qv_hw_obj_type_t type, - int nobjs, - qv_scope_create_hints_t hints, - qv_scope_t **child -) { - // TODO(skg) Implement use of hints. - QVI_UNUSED(hints); - - qvi_group_t *group = nullptr; - qvi_hwpool_s *hwpool = nullptr; - qv_scope_t *ichild = nullptr; - hwloc_cpuset_t cpuset = nullptr; - // TODO(skg) We need to acquire these resources. - int rc = qvi_rmi_get_cpuset_for_nobjs( - parent->group->task()->rmi(), - parent->hwpool->cpuset().cdata(), - type, nobjs, &cpuset - ); - if (rc != QV_SUCCESS) goto out; - // Now that we have the desired cpuset, - // create a corresponding hardware pool. - rc = qvi_new(&hwpool); - if (rc != QV_SUCCESS) goto out; - - rc = hwpool->initialize(parent->group->hwloc(), cpuset); - if (rc != QV_SUCCESS) goto out; - // Create underlying group. Notice the use of self here. - rc = parent->group->self(&group); - if (rc != QV_SUCCESS) goto out; - // Create and initialize the new scope. - rc = qvi_scope_new(group, hwpool, &ichild); -out: - qvi_hwloc_bitmap_free(&cpuset); - if (rc != QV_SUCCESS) { - qvi_delete(&hwpool); - qvi_scope_free(&ichild); - } - *child = ichild; - return rc; -} - /* * vim: ft=cpp ts=4 sts=4 sw=4 expandtab */ diff --git a/src/qvi-scope.h b/src/qvi-scope.h index 3e5b02f..cedc4bf 100644 --- a/src/qvi-scope.h +++ b/src/qvi-scope.h @@ -17,9 +17,8 @@ #ifndef QVI_SCOPE_H #define QVI_SCOPE_H -#include "qvi-common.h" // IWYU pragma: keep +#include "qvi-common.h" #include "qvi-group.h" -#include "qvi-hwloc.h" /** * Returns a new intrinsic scope. @@ -32,7 +31,8 @@ qvi_scope_get( ); /** - * + * Creates a new scope based on the specified hardare type, number of resources, + * and creation hints. */ int qvi_scope_create( @@ -44,10 +44,10 @@ qvi_scope_create( ); /** - * Frees a scope. + * Destroys a scope. */ void -qvi_scope_free( +qvi_scope_delete( qv_scope_t **scope ); @@ -69,11 +69,12 @@ qvi_scope_group( ); /** - * Returns a const reference to the provided scope's cpuset. + * Returns the scope's group size. */ -const qvi_hwloc_bitmap_s & -qvi_scope_cpuset( - qv_scope_t *scope +int +qvi_scope_group_size( + qv_scope_t *scope, + int *ntasks ); /** @@ -85,23 +86,14 @@ qvi_scope_group_rank( int *taskid ); -/** - * Returns the scope's group size. - */ -int -qvi_scope_group_size( - qv_scope_t *scope, - int *ntasks -); - /** * Returns the number of hardware objects in the provided scope. */ int -qvi_scope_nobjs( +qvi_scope_nobjects( qv_scope_t *scope, qv_hw_obj_type_t obj, - int *n + int *result ); /** diff --git a/src/qvi-task.cc b/src/qvi-task.cc index aaabc0e..3fb5f63 100644 --- a/src/qvi-task.cc +++ b/src/qvi-task.cc @@ -41,13 +41,13 @@ qvi_task_s::init_bind_stack(void) { // Cache current binding. hwloc_cpuset_t current_bind = nullptr; - const int rc = qvi_rmi_task_get_cpubind( + const int rc = qvi_rmi_cpubind( m_rmi, mytid(), ¤t_bind ); if (qvi_unlikely(rc != QV_SUCCESS)) return rc; m_stack.push(qvi_hwloc_bitmap_s(current_bind)); - hwloc_bitmap_free(current_bind); + qvi_hwloc_bitmap_delete(¤t_bind); return rc; } @@ -68,7 +68,7 @@ qvi_task_s::~qvi_task_s(void) while (!m_stack.empty()) { m_stack.pop(); } - qvi_rmi_client_free(&m_rmi); + qvi_rmi_client_delete(&m_rmi); } qvi_rmi_client_t * @@ -92,7 +92,7 @@ qvi_task_s::bind_push( // Copy input bitmap because we don't want to directly modify it. qvi_hwloc_bitmap_s bitmap_copy(cpuset); // Change policy - const int rc = qvi_rmi_task_set_cpubind_from_cpuset( + const int rc = qvi_rmi_set_cpubind( m_rmi, mytid(), bitmap_copy.cdata() ); if (qvi_unlikely(rc != QV_SUCCESS)) return rc; @@ -106,7 +106,7 @@ qvi_task_s::bind_pop(void) { m_stack.pop(); - return qvi_rmi_task_set_cpubind_from_cpuset( + return qvi_rmi_set_cpubind( m_rmi, mytid(), m_stack.top().cdata() ); } diff --git a/tests/internal/test-hwloc.c b/tests/internal/test-hwloc.c index f3ffb8f..3ba17d8 100644 --- a/tests/internal/test-hwloc.c +++ b/tests/internal/test-hwloc.c @@ -223,7 +223,7 @@ main(void) if (binds) free(binds); if (bitmap) hwloc_bitmap_free(bitmap); - qvi_hwloc_free(&hwl); + qvi_hwloc_delete(&hwl); printf("# Done\n"); return EXIT_SUCCESS; diff --git a/tests/internal/test-rmi-server.cc b/tests/internal/test-rmi-server.cc index c9a580f..61839ed 100644 --- a/tests/internal/test-rmi-server.cc +++ b/tests/internal/test-rmi-server.cc @@ -88,8 +88,8 @@ server( printf("# [%d] Server Start Time %lf seconds\n", getpid(), end - start); out: sleep(4); - qvi_rmi_server_free(&server); - qvi_hwloc_free(&hwloc); + qvi_rmi_server_delete(&server); + qvi_hwloc_delete(&hwloc); if (ers) { fprintf(stderr, "\n%s (rc=%d, %s)\n", ers, rc, qv_strerr(rc)); return EXIT_FAILURE; diff --git a/tests/internal/test-rmi.cc b/tests/internal/test-rmi.cc index e2e471c..af08649 100644 --- a/tests/internal/test-rmi.cc +++ b/tests/internal/test-rmi.cc @@ -86,8 +86,8 @@ server( printf("# [%d] Server Start Time %lf seconds\n", getpid(), end - start); out: sleep(4); - qvi_rmi_server_free(&server); - qvi_hwloc_free(&hwloc); + qvi_rmi_server_delete(&server); + qvi_hwloc_delete(&hwloc); if (ers) { fprintf(stderr, "\n%s (rc=%d, %s)\n", ers, rc, qv_strerr(rc)); return 1; @@ -118,9 +118,9 @@ client( goto out; } - rc = qvi_rmi_task_get_cpubind(client, who, &bitmap); + rc = qvi_rmi_cpubind(client, who, &bitmap); if (rc != QV_SUCCESS) { - ers = "qvi_rmi_task_get_cpubind() failed"; + ers = "qvi_rmi_cpubind() failed"; goto out; } char *res; @@ -129,7 +129,7 @@ client( hwloc_bitmap_free(bitmap); free(res); out: - qvi_rmi_client_free(&client); + qvi_rmi_client_delete(&client); if (ers) { fprintf(stderr, "\n%s (rc=%d, %s)\n", ers, rc, qv_strerr(rc)); return 1;