Skip to content

Commit

Permalink
Refactor scope code with cleanups. (#231)
Browse files Browse the repository at this point in the history
Signed-off-by: Samuel K. Gutierrez <samuel@lanl.gov>
  • Loading branch information
samuelkgutierrez authored Jul 17, 2024
1 parent 48db379 commit 6413a38
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 28 deletions.
14 changes: 9 additions & 5 deletions src/qvi-group.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
#define QVI_GROUP_H

#include "qvi-common.h"
#include "qvi-task.h"
#include "qvi-task.h" // IWYU pragma: keep
#include "qvi-utils.h"

/** Group ID type. */
Expand All @@ -44,16 +44,20 @@ struct qvi_group_s {
qvi_delete(&m_task);
}
/** Returns pointer to the caller's task information. */
qvi_task_t *task(void)
qvi_task_t *
task(void)
{
return m_task;
}
/** Returns the caller's group rank. */
virtual int rank(void) = 0;
virtual int
rank(void) = 0;
/** Returns the number of members in this group. */
virtual int size(void) = 0;
virtual int
size(void) = 0;
/** Performs node-local group barrier. */
virtual int barrier(void) = 0;
virtual int
barrier(void) = 0;
/** Makes the calling instance an intrinsic group. */
virtual int
make_intrinsic(
Expand Down
34 changes: 14 additions & 20 deletions src/qvi-scope.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@ using id_devinfo_multimap_t = std::multimap<int, const qvi_hwpool_dev_s *>;

/** Scope type definition. */
struct qv_scope_s {
/** Pointer to initialized RMI infrastructure. */
qvi_rmi_client_t *rmi = nullptr;
/** Task group associated with this scope instance. */
qvi_group_t *group = nullptr;
/** Hardware resource pool. */
Expand All @@ -39,7 +37,6 @@ struct qv_scope_s {
/** Destructor */
~qv_scope_s(void)
{
rmi = nullptr;
qvi_delete(&hwpool);
qvi_delete(&group);
}
Expand Down Expand Up @@ -181,7 +178,7 @@ struct qvi_scope_split_coll_s {
if (myid == qvi_scope_split_coll_s::rootid) {
const uint_t group_size = parent_scope->group->size();
gsplit.init(
parent_scope->rmi, parent_scope->hwpool,
parent_scope->group->task()->rmi(), parent_scope->hwpool,
group_size, split_size_a, split_at_type_a
);
}
Expand Down Expand Up @@ -219,7 +216,6 @@ get_obj_type_in_hwpool(
);
}


template <typename TYPE>
static int
gather_values(
Expand Down Expand Up @@ -457,7 +453,8 @@ scope_split_coll_gather(
for (uint_t tid = 0; tid < group_size; ++tid) {
hwloc_cpuset_t cpuset = nullptr;
rc = qvi_rmi_task_get_cpubind(
parent->rmi, splitcoll.gsplit.taskids[tid], &cpuset
parent->group->task()->rmi(),
splitcoll.gsplit.taskids[tid], &cpuset
);
if (rc != QV_SUCCESS) break;

Expand Down Expand Up @@ -540,12 +537,10 @@ qvi_scope_kfree(
static int
scope_init(
qv_scope_t *scope,
qvi_rmi_client_t *rmi,
qvi_group_t *group,
qvi_hwpool_s *hwpool
) {
if (!rmi || !hwpool || !scope) qvi_abort();
scope->rmi = rmi;
if (!hwpool || !scope) qvi_abort();
scope->group = group;
scope->hwpool = hwpool;
return QV_SUCCESS;
Expand Down Expand Up @@ -615,7 +610,7 @@ qvi_scope_get(
rc = qvi_scope_new(scope);
if (rc != QV_SUCCESS) goto out;
// Initialize the scope.
rc = scope_init(*scope, rmi, zgroup, hwpool);
rc = scope_init(*scope, zgroup, hwpool);
out:
if (rc != QV_SUCCESS) {
qvi_delete(&hwpool);
Expand Down Expand Up @@ -1113,7 +1108,7 @@ qvi_scope_split(
rc = qvi_scope_new(&ichild);
if (rc != QV_SUCCESS) goto out;

rc = scope_init(ichild, parent->rmi, group, hwpool);
rc = scope_init(ichild, group, hwpool);
out:
if (rc != QV_SUCCESS) {
qvi_delete(&hwpool);
Expand All @@ -1140,7 +1135,8 @@ qvi_scope_ksplit(
const uint_t group_size = k;
qvi_scope_split_agg_s splitagg{};
int rc = splitagg.init(
parent->rmi, parent->hwpool, group_size, npieces, maybe_obj_type
parent->group->task()->rmi(), parent->hwpool,
group_size, npieces, maybe_obj_type
);
if (rc != QV_SUCCESS) return rc;
// Since this is called by a single task, get its ID and associated hardware
Expand All @@ -1149,7 +1145,7 @@ qvi_scope_ksplit(
const pid_t taskid = qvi_task_s::mytid();
hwloc_cpuset_t task_affinity = nullptr;
rc = qvi_rmi_task_get_cpubind(
parent->rmi, taskid, &task_affinity
parent->group->task()->rmi(), taskid, &task_affinity
);
if (rc != QV_SUCCESS) return rc;
// Now populate the relevant data before attempting a split.
Expand Down Expand Up @@ -1201,9 +1197,7 @@ qvi_scope_ksplit(
qvi_scope_free(&child);
break;
}
// TODO(skg) We need to rethink how we deal with RMI in scopes.
auto test_rmi = group->task()->rmi();
rc = scope_init(child, test_rmi, group, hwpool);
rc = scope_init(child, group, hwpool);
if (rc != QV_SUCCESS) {
qvi_delete(&hwpool);
qvi_delete(&group);
Expand Down Expand Up @@ -1265,7 +1259,7 @@ qvi_scope_create(
hwloc_cpuset_t cpuset = nullptr;
// TODO(skg) We need to acquire these resources.
int rc = qvi_rmi_get_cpuset_for_nobjs(
parent->rmi,
parent->group->task()->rmi(),
parent->hwpool->get_cpuset().cdata(),
type, nobjs, &cpuset
);
Expand All @@ -1284,7 +1278,7 @@ qvi_scope_create(
rc = qvi_scope_new(&ichild);
if (rc != QV_SUCCESS) goto out;

rc = scope_init(ichild, parent->rmi, group, hwpool);
rc = scope_init(ichild, group, hwpool);
out:
qvi_hwloc_bitmap_free(&cpuset);
if (rc != QV_SUCCESS) {
Expand All @@ -1302,7 +1296,7 @@ qvi_scope_nobjs(
int *n
) {
return get_nobjs_in_hwpool(
scope->rmi, scope->hwpool, obj, n
scope->group->task()->rmi(), scope->hwpool, obj, n
);
}

Expand All @@ -1313,7 +1307,7 @@ qvi_scope_obj_type(
qv_hw_obj_type_t *obj
) {
return get_obj_type_in_hwpool(
scope->rmi, scope->hwpool, npieces, obj
scope->group->task()->rmi(), scope->hwpool, npieces, obj
);
}

Expand Down
6 changes: 3 additions & 3 deletions src/qvi-task.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,12 @@ struct qvi_task_s {
qvi_rmi_client_t *myrmi = nullptr;
/** The task's bind stack. */
qvi_task_bind_stack_t mystack;
/** Initializes the bind stack. */
int
bind_stack_init(void);
/** Connects to the RMI server. */
int
connect_to_server(void);
/** Initializes the bind stack. */
int
bind_stack_init(void);
public:
/** Returns the caller's thread ID. */
static pid_t
Expand Down

0 comments on commit 6413a38

Please sign in to comment.