Replaced MPI_COMM_WORLD where needed with testComm.
parent
e4120d600d
commit
40c6d97e72
|
@ -255,7 +255,7 @@ HandleDistribute(enum handleType type)
|
||||||
DCHECK(rc, "Failed to get global handle size");
|
DCHECK(rc, "Failed to get global handle size");
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_CHECK(MPI_Bcast(&global.iov_buf_len, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD),
|
MPI_CHECK(MPI_Bcast(&global.iov_buf_len, 1, MPI_UINT64_T, 0, testComm),
|
||||||
"Failed to bcast global handle buffer size");
|
"Failed to bcast global handle buffer size");
|
||||||
|
|
||||||
global.iov_len = global.iov_buf_len;
|
global.iov_len = global.iov_buf_len;
|
||||||
|
@ -273,7 +273,7 @@ HandleDistribute(enum handleType type)
|
||||||
DCHECK(rc, "Failed to create global handle");
|
DCHECK(rc, "Failed to create global handle");
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_CHECK(MPI_Bcast(global.iov_buf, global.iov_buf_len, MPI_BYTE, 0, MPI_COMM_WORLD),
|
MPI_CHECK(MPI_Bcast(global.iov_buf, global.iov_buf_len, MPI_BYTE, 0, testComm),
|
||||||
"Failed to bcast global pool handle");
|
"Failed to bcast global pool handle");
|
||||||
|
|
||||||
if (rank != 0) {
|
if (rank != 0) {
|
||||||
|
@ -555,16 +555,16 @@ DFS_Finalize(aiori_mod_opt_t *options)
|
||||||
DFS_options_t *o = (DFS_options_t *)options;
|
DFS_options_t *o = (DFS_options_t *)options;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
MPI_Barrier(MPI_COMM_WORLD);
|
MPI_Barrier(testComm);
|
||||||
d_hash_table_destroy(dir_hash, true /* force */);
|
d_hash_table_destroy(dir_hash, true /* force */);
|
||||||
|
|
||||||
rc = dfs_umount(dfs);
|
rc = dfs_umount(dfs);
|
||||||
DCHECK(rc, "Failed to umount DFS namespace");
|
DCHECK(rc, "Failed to umount DFS namespace");
|
||||||
MPI_Barrier(MPI_COMM_WORLD);
|
MPI_Barrier(testComm);
|
||||||
|
|
||||||
rc = daos_cont_close(coh, NULL);
|
rc = daos_cont_close(coh, NULL);
|
||||||
DCHECK(rc, "Failed to close container %s (%d)", o->cont, rc);
|
DCHECK(rc, "Failed to close container %s (%d)", o->cont, rc);
|
||||||
MPI_Barrier(MPI_COMM_WORLD);
|
MPI_Barrier(testComm);
|
||||||
|
|
||||||
if (o->destroy) {
|
if (o->destroy) {
|
||||||
if (rank == 0) {
|
if (rank == 0) {
|
||||||
|
@ -580,7 +580,7 @@ DFS_Finalize(aiori_mod_opt_t *options)
|
||||||
INFO(VERBOSE_1, "Container Destroy time = %f secs", t2-t1);
|
INFO(VERBOSE_1, "Container Destroy time = %f secs", t2-t1);
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_Bcast(&rc, 1, MPI_INT, 0, MPI_COMM_WORLD);
|
MPI_Bcast(&rc, 1, MPI_INT, 0, testComm);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
if (rank == 0)
|
if (rank == 0)
|
||||||
DCHECK(rc, "Failed to destroy container %s (%d)", o->cont, rc);
|
DCHECK(rc, "Failed to destroy container %s (%d)", o->cont, rc);
|
||||||
|
@ -594,7 +594,7 @@ DFS_Finalize(aiori_mod_opt_t *options)
|
||||||
rc = daos_pool_disconnect(poh, NULL);
|
rc = daos_pool_disconnect(poh, NULL);
|
||||||
DCHECK(rc, "Failed to disconnect from pool");
|
DCHECK(rc, "Failed to disconnect from pool");
|
||||||
|
|
||||||
MPI_CHECK(MPI_Barrier(MPI_COMM_WORLD), "barrier error");
|
MPI_CHECK(MPI_Barrier(testComm), "barrier error");
|
||||||
|
|
||||||
if (rank == 0)
|
if (rank == 0)
|
||||||
INFO(VERBOSE_1, "Finalizing DAOS..\n");
|
INFO(VERBOSE_1, "Finalizing DAOS..\n");
|
||||||
|
|
|
@ -1076,7 +1076,7 @@ static void S3_Close_internal(aiori_fd_t* fd, s3_options_t* param, int multi_pa
|
||||||
MPI_Abort(testComm, 1);
|
MPI_Abort(testComm, 1);
|
||||||
}
|
}
|
||||||
MPI_Gather(etag_data, etag_data_size, MPI_BYTE,
|
MPI_Gather(etag_data, etag_data_size, MPI_BYTE,
|
||||||
etag_vec, etag_data_size, MPI_BYTE, 0, MPI_COMM_WORLD);
|
etag_vec, etag_data_size, MPI_BYTE, 0, testComm);
|
||||||
|
|
||||||
// --- debugging: show the gathered etag data
|
// --- debugging: show the gathered etag data
|
||||||
// (This shows the raw concatenated etag-data from each node.)
|
// (This shows the raw concatenated etag-data from each node.)
|
||||||
|
@ -1196,7 +1196,7 @@ static void S3_Close_internal(aiori_fd_t* fd, s3_options_t* param, int multi_pa
|
||||||
aws_iobuf_append_str(xml, "</CompleteMultipartUpload>\n");
|
aws_iobuf_append_str(xml, "</CompleteMultipartUpload>\n");
|
||||||
} else {
|
} else {
|
||||||
MPI_Gather(etag_data, etag_data_size, MPI_BYTE,
|
MPI_Gather(etag_data, etag_data_size, MPI_BYTE,
|
||||||
NULL, etag_data_size, MPI_BYTE, 0, MPI_COMM_WORLD);
|
NULL, etag_data_size, MPI_BYTE, 0, testComm);
|
||||||
}
|
}
|
||||||
} else { /* N:N */
|
} else { /* N:N */
|
||||||
|
|
||||||
|
|
|
@ -589,11 +589,9 @@ void DistributeHints(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_CHECK(MPI_Bcast(&hintCount, sizeof(hintCount), MPI_BYTE,
|
MPI_CHECK(MPI_Bcast(&hintCount, sizeof(hintCount), MPI_BYTE, 0, testComm), "cannot broadcast hints");
|
||||||
0, MPI_COMM_WORLD), "cannot broadcast hints");
|
|
||||||
for (i = 0; i < hintCount; i++) {
|
for (i = 0; i < hintCount; i++) {
|
||||||
MPI_CHECK(MPI_Bcast(&hint[i], MAX_STR, MPI_BYTE,
|
MPI_CHECK(MPI_Bcast(&hint[i], MAX_STR, MPI_BYTE, 0, testComm),
|
||||||
0, MPI_COMM_WORLD),
|
|
||||||
"cannot broadcast hints");
|
"cannot broadcast hints");
|
||||||
strcpy(fullHint, hint[i]);
|
strcpy(fullHint, hint[i]);
|
||||||
strcpy(hintVariable, strtok(fullHint, "="));
|
strcpy(hintVariable, strtok(fullHint, "="));
|
||||||
|
@ -1884,7 +1882,7 @@ static IOR_offset_t WriteOrRead(IOR_param_t *test, IOR_results_t *results,
|
||||||
if ( test->collective && test->deadlineForStonewalling ) {
|
if ( test->collective && test->deadlineForStonewalling ) {
|
||||||
// if collective-mode, you'll get a HANG, if some rank 'accidentally' leave this loop
|
// if collective-mode, you'll get a HANG, if some rank 'accidentally' leave this loop
|
||||||
// it absolutely must be an 'all or none':
|
// it absolutely must be an 'all or none':
|
||||||
MPI_CHECK(MPI_Bcast(&hitStonewall, 1, MPI_INT, 0, MPI_COMM_WORLD), "hitStonewall broadcast failed");
|
MPI_CHECK(MPI_Bcast(&hitStonewall, 1, MPI_INT, 0, testComm), "hitStonewall broadcast failed");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue