Skip to content

Commit 07e5c54

Browse files
authored
Merge pull request #6584 from markalle/cpu_set
binding: -cpu-set as a constraint rather than as a binding
2 parents cd5fa97 + bf3980d commit 07e5c54

File tree

5 files changed

+67
-6
lines changed

5 files changed

+67
-6
lines changed

opal/mca/hwloc/base/hwloc_base_frame.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
* Copyright (c) 2013-2018 Intel, Inc. All rights reserved.
44
* Copyright (c) 2016-2017 Research Organization for Information Science
55
* and Technology (RIST). All rights reserved.
6+
* Copyright (c) 2019 IBM Corporation. All rights reserved.
67
* $COPYRIGHT$
78
*
89
* Additional copyrights may follow
@@ -217,7 +218,14 @@ static int opal_hwloc_base_open(mca_base_open_flag_t flags)
217218
* we do bind to the given cpus if provided, otherwise this would be
218219
* ignored if someone didn't also specify a binding policy
219220
*/
220-
OPAL_SET_BINDING_POLICY(opal_hwloc_binding_policy, OPAL_BIND_TO_CPUSET);
221+
// Restoring pre ef86707fbe3392c8ed15f79cc4892f0313b409af behavior.
222+
// Formerly -cpu-set #,#,# along with -use_hwthread-cpus resulted
223+
// in the binding policy staying OPAL_BIND_TO_HWTHREAD
224+
// I think that should be right because I thought -cpu-set was a contraint you put
225+
// on another binding policy, not a binding policy in itself.
226+
if (!OPAL_BINDING_POLICY_IS_SET(opal_hwloc_binding_policy)) {
227+
OPAL_SET_BINDING_POLICY(opal_hwloc_binding_policy, OPAL_BIND_TO_CPUSET);
228+
}
221229
}
222230

223231
/* if we are binding to hwthreads, then we must use hwthreads as cpus */

opal/mca/hwloc/base/hwloc_base_util.c

Lines changed: 22 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -765,15 +765,34 @@ static hwloc_obj_t df_search(hwloc_topology_t topo,
765765
return found;
766766
}
767767
if (OPAL_HWLOC_AVAILABLE == rtype) {
768+
// The previous (3.x) code included a check for
769+
// available = opal_hwloc_base_get_available_cpus(topo, start)
770+
// and skipped objs that had hwloc_bitmap_iszero(available)
771+
hwloc_obj_t root;
772+
opal_hwloc_topo_data_t *rdata;
773+
root = hwloc_get_root_obj(topo);
774+
rdata = (opal_hwloc_topo_data_t*)root->userdata;
775+
hwloc_cpuset_t constrained_cpuset;
776+
777+
constrained_cpuset = hwloc_bitmap_alloc();
778+
if (rdata && rdata->available) {
779+
hwloc_bitmap_and(constrained_cpuset, start->cpuset, rdata->available);
780+
} else {
781+
hwloc_bitmap_copy(constrained_cpuset, start->cpuset);
782+
}
783+
768784
unsigned idx = 0;
769785
if (num_objs)
770-
*num_objs = hwloc_get_nbobjs_inside_cpuset_by_depth(topo, start->cpuset, search_depth);
786+
*num_objs = hwloc_get_nbobjs_inside_cpuset_by_depth(topo, constrained_cpuset, search_depth);
771787
obj = NULL;
772-
while ((obj = hwloc_get_next_obj_inside_cpuset_by_depth(topo, start->cpuset, search_depth, obj)) != NULL) {
773-
if (idx == nobj)
788+
while ((obj = hwloc_get_next_obj_inside_cpuset_by_depth(topo, constrained_cpuset, search_depth, obj)) != NULL) {
789+
if (idx == nobj) {
790+
hwloc_bitmap_free(constrained_cpuset);
774791
return obj;
792+
}
775793
idx++;
776794
}
795+
hwloc_bitmap_free(constrained_cpuset);
777796
return NULL;
778797
}
779798
return NULL;

orte/mca/rmaps/base/rmaps_base_binding.c

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
* Copyright (c) 2015-2017 Research Organization for Information Science
1717
* and Technology (RIST). All rights reserved.
1818
* Copyright (c) 2018 Inria. All rights reserved.
19+
* Copyright (c) 2019 IBM Corporation. All rights reserved.
1920
* $COPYRIGHT$
2021
*
2122
* Additional copyrights may follow
@@ -168,8 +169,19 @@ static int bind_generic(orte_job_t *jdata,
168169
trg_obj = NULL;
169170
min_bound = UINT_MAX;
170171
while (NULL != (tmp_obj = hwloc_get_next_obj_by_depth(node->topology->topo, target_depth, tmp_obj))) {
172+
hwloc_obj_t root;
173+
opal_hwloc_topo_data_t *rdata;
174+
root = hwloc_get_root_obj(node->topology->topo);
175+
rdata = (opal_hwloc_topo_data_t*)root->userdata;
176+
171177
if (!hwloc_bitmap_intersects(locale->cpuset, tmp_obj->cpuset))
172178
continue;
179+
// From the old 3.x code trg_obj was picked via a call to
180+
// opal_hwloc_base_find_min_bound_target_under_obj() which
181+
// skiped over unavailable objects (via opal_hwloc_base_get_npus).
182+
if (rdata && rdata->available && !hwloc_bitmap_intersects(rdata->available, tmp_obj->cpuset))
183+
continue;
184+
173185
data = (opal_hwloc_obj_data_t*)tmp_obj->userdata;
174186
if (NULL == data) {
175187
data = OBJ_NEW(opal_hwloc_obj_data_t);

orte/mca/rmaps/base/rmaps_base_ranking.c

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
* Copyright (c) 2014-2018 Intel, Inc. All rights reserved.
1414
* Copyright (c) 2017 Research Organization for Information Science
1515
* and Technology (RIST). All rights reserved.
16+
* Copyright (c) 2019 IBM Corporation. All rights reserved.
1617
* $COPYRIGHT$
1718
*
1819
* Additional copyrights may follow
@@ -377,8 +378,25 @@ static int rank_by(orte_job_t *jdata,
377378
* Perhaps someday someone will come up with a more efficient
378379
* algorithm, but this works for now.
379380
*/
381+
// In 3.x this was two loops:
382+
// while (cnt < app->num_procs)
383+
// for (i=0; i<num_objs; ...)
384+
// Then in 4.x it switched to
385+
// while (cnt < app->num_procs && i < (int)node->num_procs)
386+
// where that extra i part seems wrong to me. First of all if anything
387+
// it seems like it should be i<num_objs since that's the array i is
388+
// cycling through, but even then all the usage of i below is
389+
// (i % num_objs) so I think i is intended to wrap and you should
390+
// keep looping until you've made all the assignments you can for
391+
// this node.
392+
//
393+
// So that's what I added the other loop counter for, figuring if it
394+
// cycles through the whole array of objs without making an assignment
395+
// it's time for this loop to end and the outer loop to take us to the
396+
// next node.
380397
i = 0;
381-
while (cnt < app->num_procs && i < (int)node->num_procs) {
398+
int niters_of_i_without_assigning_a_proc = 0;
399+
while (cnt < app->num_procs && niters_of_i_without_assigning_a_proc <= num_objs) {
382400
/* get the next object */
383401
obj = (hwloc_obj_t)opal_pointer_array_get_item(&objs, i % num_objs);
384402
if (NULL == obj) {
@@ -446,6 +464,7 @@ static int rank_by(orte_job_t *jdata,
446464
return rc;
447465
}
448466
num_ranked++;
467+
niters_of_i_without_assigning_a_proc = 0;
449468
/* track where the highest vpid landed - this is our
450469
* new bookmark
451470
*/
@@ -454,6 +473,7 @@ static int rank_by(orte_job_t *jdata,
454473
break;
455474
}
456475
i++;
476+
++niters_of_i_without_assigning_a_proc;
457477
}
458478
}
459479
/* cleanup */

orte/mca/rmaps/rank_file/rmaps_rank_file_component.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
* Copyright (c) 2014-2018 Intel, Inc. All rights reserved.
1616
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
1717
* reserved.
18+
* Copyright (c) 2019 IBM Corporation. All rights reserved.
1819
* $COPYRIGHT$
1920
*
2021
* Additional copyrights may follow
@@ -106,7 +107,8 @@ static int orte_rmaps_rank_file_register(void)
106107
static int orte_rmaps_rank_file_open(void)
107108
{
108109
/* ensure we flag mapping by user */
109-
if ((NULL != opal_hwloc_base_cpu_list && !OPAL_BIND_ORDERED_REQUESTED(opal_hwloc_binding_policy)) ||
110+
if ((OPAL_BIND_TO_CPUSET == OPAL_GET_BINDING_POLICY(opal_hwloc_binding_policy) &&
111+
!OPAL_BIND_ORDERED_REQUESTED(opal_hwloc_binding_policy)) ||
110112
NULL != orte_rankfile) {
111113
if (ORTE_MAPPING_GIVEN & ORTE_GET_MAPPING_DIRECTIVE(orte_rmaps_base.mapping)) {
112114
/* if a non-default mapping is already specified, then we

0 commit comments

Comments
 (0)