From 40c6d97e72afb1f1ca062ac37fa1b2a8915deb3e Mon Sep 17 00:00:00 2001 From: "Julian M. Kunkel" Date: Wed, 20 Jan 2021 14:35:06 +0000 Subject: [PATCH] Replaced MPI_COMM_WORLD where needed with testComm. --- src/aiori-DFS.c | 14 +++++++------- src/aiori-S3-4c.c | 4 ++-- src/ior.c | 8 +++----- 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/src/aiori-DFS.c b/src/aiori-DFS.c index 686075f..8e6b2a7 100755 --- a/src/aiori-DFS.c +++ b/src/aiori-DFS.c @@ -255,7 +255,7 @@ HandleDistribute(enum handleType type) DCHECK(rc, "Failed to get global handle size"); } - MPI_CHECK(MPI_Bcast(&global.iov_buf_len, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD), + MPI_CHECK(MPI_Bcast(&global.iov_buf_len, 1, MPI_UINT64_T, 0, testComm), "Failed to bcast global handle buffer size"); global.iov_len = global.iov_buf_len; @@ -273,7 +273,7 @@ HandleDistribute(enum handleType type) DCHECK(rc, "Failed to create global handle"); } - MPI_CHECK(MPI_Bcast(global.iov_buf, global.iov_buf_len, MPI_BYTE, 0, MPI_COMM_WORLD), + MPI_CHECK(MPI_Bcast(global.iov_buf, global.iov_buf_len, MPI_BYTE, 0, testComm), "Failed to bcast global pool handle"); if (rank != 0) { @@ -555,16 +555,16 @@ DFS_Finalize(aiori_mod_opt_t *options) DFS_options_t *o = (DFS_options_t *)options; int rc; - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(testComm); d_hash_table_destroy(dir_hash, true /* force */); rc = dfs_umount(dfs); DCHECK(rc, "Failed to umount DFS namespace"); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(testComm); rc = daos_cont_close(coh, NULL); DCHECK(rc, "Failed to close container %s (%d)", o->cont, rc); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(testComm); if (o->destroy) { if (rank == 0) { @@ -580,7 +580,7 @@ DFS_Finalize(aiori_mod_opt_t *options) INFO(VERBOSE_1, "Container Destroy time = %f secs", t2-t1); } - MPI_Bcast(&rc, 1, MPI_INT, 0, MPI_COMM_WORLD); + MPI_Bcast(&rc, 1, MPI_INT, 0, testComm); if (rc) { if (rank == 0) DCHECK(rc, "Failed to destroy container %s (%d)", o->cont, rc); @@ -594,7 +594,7 @@ DFS_Finalize(aiori_mod_opt_t *options) rc = daos_pool_disconnect(poh, NULL); DCHECK(rc, "Failed to disconnect from pool"); - MPI_CHECK(MPI_Barrier(MPI_COMM_WORLD), "barrier error"); + MPI_CHECK(MPI_Barrier(testComm), "barrier error"); if (rank == 0) INFO(VERBOSE_1, "Finalizing DAOS..\n"); diff --git a/src/aiori-S3-4c.c b/src/aiori-S3-4c.c index f34fadb..6155ceb 100755 --- a/src/aiori-S3-4c.c +++ b/src/aiori-S3-4c.c @@ -1076,7 +1076,7 @@ static void S3_Close_internal(aiori_fd_t* fd, s3_options_t* param, int multi_pa MPI_Abort(testComm, 1); } MPI_Gather(etag_data, etag_data_size, MPI_BYTE, - etag_vec, etag_data_size, MPI_BYTE, 0, MPI_COMM_WORLD); + etag_vec, etag_data_size, MPI_BYTE, 0, testComm); // --- debugging: show the gathered etag data // (This shows the raw concatenated etag-data from each node.) @@ -1196,7 +1196,7 @@ static void S3_Close_internal(aiori_fd_t* fd, s3_options_t* param, int multi_pa aws_iobuf_append_str(xml, "\n"); } else { MPI_Gather(etag_data, etag_data_size, MPI_BYTE, - NULL, etag_data_size, MPI_BYTE, 0, MPI_COMM_WORLD); + NULL, etag_data_size, MPI_BYTE, 0, testComm); } } else { /* N:N */ diff --git a/src/ior.c b/src/ior.c index f30594a..3509cc5 100755 --- a/src/ior.c +++ b/src/ior.c @@ -589,11 +589,9 @@ void DistributeHints(void) } } - MPI_CHECK(MPI_Bcast(&hintCount, sizeof(hintCount), MPI_BYTE, - 0, MPI_COMM_WORLD), "cannot broadcast hints"); + MPI_CHECK(MPI_Bcast(&hintCount, sizeof(hintCount), MPI_BYTE, 0, testComm), "cannot broadcast hints"); for (i = 0; i < hintCount; i++) { - MPI_CHECK(MPI_Bcast(&hint[i], MAX_STR, MPI_BYTE, - 0, MPI_COMM_WORLD), + MPI_CHECK(MPI_Bcast(&hint[i], MAX_STR, MPI_BYTE, 0, testComm), "cannot broadcast hints"); strcpy(fullHint, hint[i]); strcpy(hintVariable, strtok(fullHint, "=")); @@ -1884,7 +1882,7 @@ static IOR_offset_t WriteOrRead(IOR_param_t *test, IOR_results_t *results, if ( test->collective && test->deadlineForStonewalling ) { // if collective-mode, you'll get a HANG, if some rank 'accidentally' leave this loop // it absolutely must be an 'all or none': - MPI_CHECK(MPI_Bcast(&hitStonewall, 1, MPI_INT, 0, MPI_COMM_WORLD), "hitStonewall broadcast failed"); + MPI_CHECK(MPI_Bcast(&hitStonewall, 1, MPI_INT, 0, testComm), "hitStonewall broadcast failed"); } } }