@@ -98,6 +98,7 @@ struct ompi_comm_cid_context_t {
98
98
int remote_leader ;
99
99
int iter ;
100
100
/** storage for activate barrier */
101
+ int local_peers ;
101
102
int max_local_peers ;
102
103
char * port_string ;
103
104
bool send_first ;
@@ -266,7 +267,8 @@ static ompi_comm_cid_context_t *mca_comm_cid_context_alloc (ompi_communicator_t
266
267
267
268
context -> send_first = send_first ;
268
269
context -> iter = 0 ;
269
- context -> max_local_peers = ompi_group_count_local_peers (newcomm -> c_local_group );
270
+ context -> local_peers = ompi_group_count_local_peers (newcomm -> c_local_group );
271
+ context -> max_local_peers = -1 ;
270
272
271
273
return context ;
272
274
}
@@ -908,8 +910,12 @@ int ompi_comm_activate_nb (ompi_communicator_t **newcomm, ompi_communicator_t *c
908
910
}
909
911
910
912
if (OMPI_COMM_IS_INTRA (* newcomm )) {
911
- /* The communicator's disjointness is inferred from max_local_peers. */
912
- ret = context -> iallreduce_fn (MPI_IN_PLACE , & context -> max_local_peers , 1 , MPI_MAX , context ,
913
+ /**
914
+ * The communicator's disjointness is inferred from max_local_peers.
915
+ * Note: MPI_IN_PLACE cannot be used here because the parent could be an
916
+ * inter-communicator
917
+ */
918
+ ret = context -> iallreduce_fn (& context -> local_peers , & context -> max_local_peers , 1 , MPI_MAX , context ,
913
919
& subreq );
914
920
if (OMPI_SUCCESS != ret ) {
915
921
ompi_comm_request_return (request );
@@ -919,7 +925,7 @@ int ompi_comm_activate_nb (ompi_communicator_t **newcomm, ompi_communicator_t *c
919
925
} else {
920
926
ompi_comm_request_schedule_append (request , ompi_comm_activate_nb_complete , NULL , 0 );
921
927
}
922
-
928
+
923
929
ompi_comm_request_start (request );
924
930
925
931
* req = & request -> super ;
0 commit comments