Skip to content

Commit

Permalink
Checkpoint more scope work. (#242)
Browse files Browse the repository at this point in the history
Signed-off-by: Samuel K. Gutierrez <[email protected]>
  • Loading branch information
samuelkgutierrez authored Jul 24, 2024
1 parent 312f9a1 commit d02ef1f
Show file tree
Hide file tree
Showing 7 changed files with 158 additions and 114 deletions.
8 changes: 4 additions & 4 deletions src/qvi-bbuff-rmi.h
Original file line number Diff line number Diff line change
Expand Up @@ -554,14 +554,14 @@ qvi_bbuff_rmi_pack_item_impl(
const qvi_hwpool_s *data
) {
// Pack CPU.
int rc = qvi_bbuff_rmi_pack_item(buff, data->cpu);
int rc = qvi_bbuff_rmi_pack_item(buff, data->m_cpu);
if (rc != QV_SUCCESS) return rc;
// Pack ndevinfos
const size_t ndev = data->devs.size();
const size_t ndev = data->m_devs.size();
rc = qvi_bbuff_rmi_pack_item(buff, ndev);
if (rc != QV_SUCCESS) return rc;
// Pack devices.
for (const auto &dev : data->devs) {
for (const auto &dev : data->m_devs) {
rc = qvi_bbuff_rmi_pack_item(buff, dev.second.get());
if (rc != QV_SUCCESS) return rc;
}
Expand Down Expand Up @@ -961,7 +961,7 @@ qvi_bbuff_rmi_unpack_item(

// Unpack CPU.
rc = qvi_bbuff_rmi_unpack_item(
ihwp->cpu, buffpos, &bw
ihwp->m_cpu, buffpos, &bw
);
if (rc != QV_SUCCESS) return rc;
total_bw += bw;
Expand Down
88 changes: 79 additions & 9 deletions src/qvi-hwpool.cc
Original file line number Diff line number Diff line change
Expand Up @@ -135,30 +135,100 @@ pool_release_cpus_by_cpuset(
}
#endif

// TODO(skg) Acquire devices.
int
qvi_hwpool_s::new_hwpool_by_cpuset(
qvi_hwpool_s::add_devices_with_affinity(
qvi_hwloc_t *hwloc
) {
int rc = QV_SUCCESS;
// Iterate over the supported device types.
for (const auto devt : qvi_hwloc_supported_devices()) {
qvi_hwloc_dev_list_t devs;
rc = qvi_hwloc_get_devices_in_bitmap(
hwloc, devt, m_cpu.cpuset, devs
);
if (rc != QV_SUCCESS) return rc;
for (const auto &dev : devs) {
rc = add_device(qvi_hwpool_dev_s(dev));
if (rc != QV_SUCCESS) return rc;
}
}
return rc;
}

int
qvi_hwpool_s::new_hwpool(
qvi_hwloc_t *hwloc,
hwloc_const_cpuset_t cpuset,
qvi_hwpool_s **opool
) {
// We obtained the CPUs, so create the new pool.
qvi_hwpool_s *ipool = nullptr;
int rc = qvi_new(&ipool);
if (rc != QV_SUCCESS) goto out;
if (qvi_unlikely(rc != QV_SUCCESS)) goto out;
// Initialize the hardware pool.
rc = ipool->initialize(cpuset);
if (rc != QV_SUCCESS) goto out;
// Add devices with affinity to the new hardware pool.
// TODO(skg) Acquire devices.
rc = ipool->add_devices_with_affinity(hwloc);
rc = ipool->initialize(hwloc, cpuset);
out:
if (rc != QV_SUCCESS) {
if (qvi_unlikely(rc != QV_SUCCESS)) {
qvi_delete(&ipool);
}
*opool = ipool;
return rc;
}

int
qvi_hwpool_s::initialize(
qvi_hwloc_t *hwloc,
hwloc_const_bitmap_t cpuset
) {
const int rc = m_cpu.cpuset.set(cpuset);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
// Add devices with affinity to the hardware pool.
return add_devices_with_affinity(hwloc);
}

const qvi_hwloc_bitmap_s &
qvi_hwpool_s::cpuset(void)
{
return m_cpu.cpuset;
}

const qvi_hwpool_devs_t &
qvi_hwpool_s::devices(void)
{
return m_devs;
}

int
qvi_hwpool_s::nobjects(
qvi_hwloc_t *hwloc,
qv_hw_obj_type_t obj_type,
int *result
) {
if (qvi_hwloc_obj_type_is_host_resource(obj_type)) {
return qvi_hwloc_get_nobjs_in_cpuset(
hwloc, obj_type, m_cpu.cpuset.cdata(), result
);
}
*result = m_devs.count(obj_type);
return QV_SUCCESS;
}

int
qvi_hwpool_s::add_device(
const qvi_hwpool_dev_s &dev
) {
auto shdev = std::make_shared<qvi_hwpool_dev_s>(dev);
m_devs.insert({dev.type, shdev});
return QV_SUCCESS;
}

int
qvi_hwpool_s::release_devices(void)
{
m_devs.clear();
return QV_SUCCESS;
}

int
qvi_hwpool_s::pack(
qvi_bbuff_t *buff
Expand Down
92 changes: 37 additions & 55 deletions src/qvi-hwpool.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,87 +85,69 @@ using qvi_hwpool_devs_t = std::multimap<

struct qvi_hwpool_s {
/** The hardware pool's CPU. */
qvi_hwpool_cpu_s cpu;
qvi_hwpool_cpu_s m_cpu;
/** The hardware pool's devices. */
qvi_hwpool_devs_t devs;
qvi_hwpool_devs_t m_devs;
private:
/**
* Initializes a hardware pool with the given cpuset.
* Adds all devices with affinity to the
* provided, initialized hardware resource pool.
*/
int
initialize(
hwloc_const_bitmap_t cpuset
) {
return cpu.cpuset.set(cpuset);
}
add_devices_with_affinity(
qvi_hwloc_t *hwloc
);
public:
/**
* Creates a new, initialized hardware pool based
* on the affinity encoded in the provided cpuset.
*/
static int
new_hwpool_by_cpuset(
new_hwpool(
qvi_hwloc_t *hwloc,
hwloc_const_cpuset_t cpuset,
qvi_hwpool_s **opool
);
/**
* Returns a pointer to the hwpool's cpuset.
* Initializes a hardware pool from the given
* hardare locality information and cpuset.
*/
int
initialize(
qvi_hwloc_t *hwloc,
hwloc_const_bitmap_t cpuset
);
/**
* Returns a const reference to the hardware pool's cpuset.
*/
const qvi_hwloc_bitmap_s &
get_cpuset(void)
{
return cpu.cpuset;
}
cpuset(void);
/**
* Adds a qvi_hwpool_dev_s device.
* Returns a const reference to the hardware pool's devices.
*/
int
add_device(
const qvi_hwpool_dev_s &dev
) {
auto shdev = std::make_shared<qvi_hwpool_dev_s>(dev);
devs.insert({dev.type, shdev});
return QV_SUCCESS;
}
const qvi_hwpool_devs_t &
devices(void);
/**
* Adds all devices with affinity to the
* provided, initialized hardware resource pool.
* Returns the number of objects in the hardware pool.
*/
int
add_devices_with_affinity(
qvi_hwloc_t *hwloc
) {
int rc = QV_SUCCESS;
// Iterate over the supported device types.
for (const auto devt : qvi_hwloc_supported_devices()) {
qvi_hwloc_dev_list_t devs;
rc = qvi_hwloc_get_devices_in_bitmap(
hwloc, devt, cpu.cpuset, devs
);
if (rc != QV_SUCCESS) return rc;
for (const auto &dev : devs) {
rc = add_device(qvi_hwpool_dev_s(dev));
if (rc != QV_SUCCESS) return rc;
}
}
return rc;
}
nobjects(
qvi_hwloc_t *hwloc,
qv_hw_obj_type_t obj_type,
int *result
);
/**
* Releases all devices in the hwpool.
* Adds a qvi_hwpool_dev_s device.
*/
int
release_devices(void)
{
devs.clear();
return QV_SUCCESS;
}
add_device(
const qvi_hwpool_dev_s &dev
);
/**
* Returns a const reference to the hardware pool's devices.
* Releases all devices in the hwpool.
*/
const qvi_hwpool_devs_t &
get_devices(void)
{
return devs;
}
int
release_devices(void);
/**
* Packs the instance into a bbuff.
*/
Expand Down
13 changes: 5 additions & 8 deletions src/qvi-rmi.cc
Original file line number Diff line number Diff line change
Expand Up @@ -616,7 +616,7 @@ get_intrinsic_scope_user(
qvi_hwpool_s **hwpool
) {
// TODO(skg) Is the cpuset the best way to do this?
return qvi_hwpool_s::new_hwpool_by_cpuset(
return qvi_hwpool_s::new_hwpool(
server->config.hwloc,
qvi_hwloc_topo_get_cpuset(server->config.hwloc),
hwpool
Expand All @@ -635,7 +635,7 @@ get_intrinsic_scope_proc(
);
if (rc != QV_SUCCESS) goto out;

rc = qvi_hwpool_s::new_hwpool_by_cpuset(
rc = qvi_hwpool_s::new_hwpool(
server->config.hwloc, cpuset, hwpool
);
out:
Expand Down Expand Up @@ -812,19 +812,16 @@ qvi_rmi_server_config(
}

/**
*
* Populates base hardware pool.
*/
static int
server_populate_base_hwpool(
qvi_rmi_server_t *server
) {
qvi_hwloc_t *const hwloc = server->config.hwloc;
hwloc_const_cpuset_t cpuset = qvi_hwloc_topo_get_cpuset(hwloc);
// The base resource pool will contain all available processors.
const int rc = server->hwpool->initialize(cpuset);
if (rc != QV_SUCCESS) return rc;
// Add all the discovered devices since the cpuset is the root.
return server->hwpool->add_devices_with_affinity(hwloc);
// The base resource pool will contain all available processors and devices.
return server->hwpool->initialize(hwloc, cpuset);
}

static void *
Expand Down
Loading

0 comments on commit d02ef1f

Please sign in to comment.