Skip to content

Commit

Permalink
Refactor task code.
Browse files Browse the repository at this point in the history
Working toward pthread support.

Signed-off-by: Samuel K. Gutierrez <[email protected]>
  • Loading branch information
samuelkgutierrez committed Jul 17, 2024
1 parent 346ccf2 commit 9954972
Show file tree
Hide file tree
Showing 4 changed files with 118 additions and 172 deletions.
5 changes: 3 additions & 2 deletions src/qvi-group.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

#include "qvi-common.h"
#include "qvi-task.h"
#include "qvi-utils.h"

/** Group ID type. */
using qvi_group_id_t = uint64_t;
Expand All @@ -34,13 +35,13 @@ struct qvi_group_s {
/** Constructor. */
qvi_group_s(void)
{
const int rc = qvi_task_new(&m_task);
const int rc = qvi_new(&m_task);
if (rc != QV_SUCCESS) throw qvi_runtime_error();
}
/** Virtual destructor. */
virtual ~qvi_group_s(void)
{
qvi_task_free(&m_task);
qvi_delete(&m_task);
}
/** Returns pointer to the caller's task information. */
qvi_task_t *task(void)
Expand Down
17 changes: 8 additions & 9 deletions src/qvi-scope.cc
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ scope_split_coll_gather(
int rc = gather_values(
parent->group,
qvi_scope_split_coll_s::rootid,
qvi_task_id(),
qvi_task_t::mytid(),
splitcoll.gsplit.taskids
);
if (rc != QV_SUCCESS) return rc;
Expand Down Expand Up @@ -602,13 +602,13 @@ qvi_scope_get(
qv_scope_t **scope
) {
qvi_hwpool_s *hwpool = nullptr;
qvi_rmi_client_t *rmi = qvi_task_rmi(zgroup->task());
qvi_rmi_client_t *rmi = zgroup->task()->rmi();
// Get the requested intrinsic group.
int rc = zgroup->make_intrinsic(iscope);
if (rc != QV_SUCCESS) goto out;
// Get the requested intrinsic hardware pool.
rc = qvi_rmi_scope_get_intrinsic_hwpool(
rmi, qvi_task_id(), iscope, &hwpool
rmi, qvi_task_s::mytid(), iscope, &hwpool
);
if (rc != QV_SUCCESS) goto out;
// Create the scope.
Expand Down Expand Up @@ -1146,7 +1146,7 @@ qvi_scope_ksplit(
// Since this is called by a single task, get its ID and associated hardware
// affinity here, and replicate them in the following loop that populates
// splitagg. No point in doing this in a loop.
const pid_t taskid = qvi_task_id();
const pid_t taskid = qvi_task_s::mytid();
hwloc_cpuset_t task_affinity = nullptr;
rc = qvi_rmi_task_get_cpubind(
parent->rmi, taskid, &task_affinity
Expand Down Expand Up @@ -1202,7 +1202,7 @@ qvi_scope_ksplit(
break;
}
// TODO(skg) We need to rethink how we deal with RMI in scopes.
auto test_rmi = qvi_task_rmi(group->task());
auto test_rmi = group->task()->rmi();
rc = scope_init(child, test_rmi, group, hwpool);
if (rc != QV_SUCCESS) {
qvi_delete(&hwpool);
Expand Down Expand Up @@ -1366,8 +1366,7 @@ int
qvi_scope_bind_push(
qv_scope_t *scope
) {
return qvi_task_bind_push(
scope->group->task(),
return scope->group->task()->bind_push(
scope->hwpool->get_cpuset().cdata()
);
}
Expand All @@ -1376,7 +1375,7 @@ int
qvi_scope_bind_pop(
qv_scope_t *scope
) {
return qvi_task_bind_pop(scope->group->task());
return scope->group->task()->bind_pop();
}

int
Expand All @@ -1388,7 +1387,7 @@ qvi_scope_bind_string(
char *istr = nullptr;

hwloc_cpuset_t cpuset = nullptr;
int rc = qvi_task_bind_top(scope->group->task(), &cpuset);
int rc = scope->group->task()->bind_top(&cpuset);
if (rc != QV_SUCCESS) return rc;

switch (format) {
Expand Down
186 changes: 66 additions & 120 deletions src/qvi-task.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,151 +18,97 @@
#include "qvi-rmi.h"
#include "qvi-utils.h"

using qvi_task_bind_stack_t = std::stack<qvi_hwloc_bitmap_s>;

struct qvi_task_s {
/** Client-side connection to the RMI. */
qvi_rmi_client_t *rmi = nullptr;
/** The task's bind stack. */
qvi_task_bind_stack_t stack;
/** Returns the caller's thread ID. */
static pid_t
me(void)
{
return qvi_gettid();
}
/** Connects to the RMI server. */
int
connect_to_server(void)
{
std::string url;
const int rc = qvi_url(url);
if (qvi_unlikely(rc != QV_SUCCESS)) {
qvi_log_error("{}", qvi_conn_ers());
return rc;
}
return qvi_rmi_client_connect(rmi, url);
}
/** Initializes the bind stack. */
int
bind_stack_init(void)
{
// Cache current binding.
hwloc_cpuset_t current_bind = nullptr;
const int rc = qvi_rmi_task_get_cpubind(
rmi, me(), &current_bind
);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
pid_t
qvi_task_s::mytid(void)
{
return qvi_gettid();
}

stack.push(qvi_hwloc_bitmap_s(current_bind));
hwloc_bitmap_free(current_bind);
return rc;
}
/** Default constructor. */
qvi_task_s(void)
{
int rc = qvi_rmi_client_new(&rmi);
if (qvi_unlikely(rc != QV_SUCCESS)) throw qvi_runtime_error();
// Connect to our server.
rc = connect_to_server();
if (qvi_unlikely(rc != QV_SUCCESS)) throw qvi_runtime_error();
// Initialize our bind stack.
rc = bind_stack_init();
if (qvi_unlikely(rc != QV_SUCCESS)) throw qvi_runtime_error();
}
/** Copy constructor. */
qvi_task_s(const qvi_task_s &src) = delete;
/** Destructor. */
~qvi_task_s(void)
{
while (!stack.empty()) {
stack.pop();
}
qvi_rmi_client_free(&rmi);
}
/** Changes the task's affinity. */
int
bind_push(
hwloc_const_cpuset_t cpuset
) {
// Copy input bitmap because we don't want to directly modify it.
qvi_hwloc_bitmap_s bitmap_copy(cpuset);
// Change policy
const int rc = qvi_rmi_task_set_cpubind_from_cpuset(
rmi, me(), bitmap_copy.cdata()
);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
// Push bitmap onto stack.
stack.push(bitmap_copy);
int
qvi_task_s::connect_to_server(void)
{
std::string url;
const int rc = qvi_url(url);
if (qvi_unlikely(rc != QV_SUCCESS)) {
qvi_log_error("{}", qvi_conn_ers());
return rc;
}
/** */
int
bind_pop(void)
{
stack.pop();

return qvi_rmi_task_set_cpubind_from_cpuset(
rmi, me(), stack.top().cdata()
);
}
/** Returns the task's current cpuset. */
int
bind_top(
hwloc_cpuset_t *dest
) {
return qvi_hwloc_bitmap_dup(stack.top().cdata(), dest);
}
};
return qvi_rmi_client_connect(myrmi, url);
}

pid_t
qvi_task_id(void)
int
qvi_task_s::bind_stack_init(void)
{
return qvi_task_s::me();
// Cache current binding.
hwloc_cpuset_t current_bind = nullptr;
const int rc = qvi_rmi_task_get_cpubind(
myrmi, mytid(), &current_bind
);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;

mystack.push(qvi_hwloc_bitmap_s(current_bind));
hwloc_bitmap_free(current_bind);
return rc;
}

int
qvi_task_new(
qvi_task_t **task
) {
return qvi_new(task);
qvi_task_s::qvi_task_s(void)
{
int rc = qvi_rmi_client_new(&myrmi);
if (qvi_unlikely(rc != QV_SUCCESS)) throw qvi_runtime_error();
// Connect to our server.
rc = connect_to_server();
if (qvi_unlikely(rc != QV_SUCCESS)) throw qvi_runtime_error();
// Initialize our bind stack.
rc = bind_stack_init();
if (qvi_unlikely(rc != QV_SUCCESS)) throw qvi_runtime_error();
}

void
qvi_task_free(
qvi_task_t **task
) {
qvi_delete(task);
qvi_task_s::~qvi_task_s(void)
{
while (!mystack.empty()) {
mystack.pop();
}
qvi_rmi_client_free(&myrmi);
}

qvi_rmi_client_t *
qvi_task_rmi(
qvi_task_t *task
) {
return task->rmi;
qvi_task_s::rmi(void)
{
assert(myrmi);
return myrmi;
}

int
qvi_task_bind_push(
qvi_task_t *task,
qvi_task_s::bind_push(
hwloc_const_cpuset_t cpuset
) {
return task->bind_push(cpuset);
// Copy input bitmap because we don't want to directly modify it.
qvi_hwloc_bitmap_s bitmap_copy(cpuset);
// Change policy
const int rc = qvi_rmi_task_set_cpubind_from_cpuset(
myrmi, mytid(), bitmap_copy.cdata()
);
if (qvi_unlikely(rc != QV_SUCCESS)) return rc;
// Push bitmap onto stack.
mystack.push(bitmap_copy);
return rc;
}

int
qvi_task_bind_pop(
qvi_task_t *task
) {
return task->bind_pop();
qvi_task_s::bind_pop(void)
{
mystack.pop();

return qvi_rmi_task_set_cpubind_from_cpuset(
myrmi, mytid(), mystack.top().cdata()
);
}

int
qvi_task_bind_top(
qvi_task_t *task,
qvi_task_s::bind_top(
hwloc_cpuset_t *dest
) {
return task->bind_top(dest);
return qvi_hwloc_bitmap_dup(mystack.top().cdata(), dest);
}

/*
Expand Down
Loading

0 comments on commit 9954972

Please sign in to comment.