NEURON
nrnmpi.cpp
Go to the documentation of this file.
1 /*
2 # =============================================================================
3 # Copyright (c) 2016 - 2021 Blue Brain Project/EPFL
4 #
5 # See top-level LICENSE file for details.
6 # =============================================================================.
7 */
8 
9 #include <iostream>
10 #include <string>
11 #include <tuple>
12 
13 #include "coreneuron/nrnconf.h"
14 #include "coreneuron/mpi/nrnmpi.h"
16 #include "nrnmpi.hpp"
17 #if _OPENMP
18 #include <omp.h>
19 #endif
20 #include <mpi.h>
21 namespace coreneuron {
22 
23 
28 
29 static bool nrnmpi_under_nrncontrol_{false};
30 
31 static void nrn_fatal_error(const char* msg) {
32  if (nrnmpi_myid_ == 0) {
33  printf("%s\n", msg);
34  }
36 }
37 
38 void corenrn_subworld();
39 
40 nrnmpi_init_ret_t nrnmpi_init_impl(int* pargc, char*** pargv, bool is_quiet) {
41  // Execute at most once per launch. Avoid memory leak.
42  static bool executed = false;
43  if (executed) {
45  }
46 
48 
49  if (!nrnmpi_initialized_impl()) {
50 #if defined(_OPENMP)
51  int required = MPI_THREAD_FUNNELED;
52  int provided;
53  nrn_assert(MPI_Init_thread(pargc, pargv, required, &provided) == MPI_SUCCESS);
54 
55  nrn_assert(required <= provided);
56 #else
57  nrn_assert(MPI_Init(pargc, pargv) == MPI_SUCCESS);
58 #endif
59  }
60 
61  nrn_assert(MPI_Comm_dup(MPI_COMM_WORLD, &nrnmpi_world_comm) == MPI_SUCCESS);
62  nrn_assert(MPI_Comm_dup(nrnmpi_world_comm, &nrnmpi_comm) == MPI_SUCCESS);
63  corenrn_subworld(); // split nrnmpi_comm if ParallelContext.subworlds has been used
64  nrn_assert(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid_) == MPI_SUCCESS);
65  nrn_assert(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs_) == MPI_SUCCESS);
66 
68 
69  if (nrnmpi_myid_ == 0 && !is_quiet) {
70 #if defined(_OPENMP)
71  printf(" num_mpi=%d\n num_omp_thread=%d\n\n", nrnmpi_numprocs_, omp_get_max_threads());
72 #else
73  printf(" num_mpi=%d\n\n", nrnmpi_numprocs_);
74 #endif
75  }
76 
77  executed = true;
79 }
80 
84  MPI_Comm_free(&nrnmpi_world_comm);
85  MPI_Comm_free(&nrnmpi_comm);
86  MPI_Finalize();
87  }
88  }
89 }
90 
91 extern "C" {
92 extern void (*nrn2core_subworld_info_)(int&, int&, int&, int&, int&);
93 }
94 
96  // If ParallelContext.subworlds has been invoked, split the world
97  // communicator according to the subworld partitioning.
98  static int change_cnt{0};
99  int nrn_subworld_change_cnt, nrn_subworld_index, nrn_subworld_rank, nrn_mpi_numprocs_subworld,
100  nrn_mpi_numprocs_world;
102  return;
103  }
104  (*nrn2core_subworld_info_)(nrn_subworld_change_cnt,
105  nrn_subworld_index,
106  nrn_subworld_rank,
107  nrn_mpi_numprocs_subworld,
108  nrn_mpi_numprocs_world);
109  if (nrn_subworld_change_cnt == change_cnt) {
110  return;
111  }
112  change_cnt = nrn_subworld_change_cnt;
113 
114  // clean up / free old nrn_mpi_comm
115  nrn_assert(MPI_Comm_free(&nrnmpi_comm) == MPI_SUCCESS);
116 
117  // ensure world size is the same as NEURON
118  int world_size{-1};
119  nrn_assert(MPI_Comm_size(nrnmpi_world_comm, &world_size) == MPI_SUCCESS);
120  nrn_assert(world_size == nrn_mpi_numprocs_world);
121 
122  // create a new nrnmpi_comm based on subworld partitioning
123  nrn_assert(
124  MPI_Comm_split(nrnmpi_world_comm, nrn_subworld_index, nrn_subworld_rank, &nrnmpi_comm) ==
125  MPI_SUCCESS);
126 
127  // assert that rank order and size is consistent between NEURON and CoreNEURON
128  int subworld_rank{-1};
129  nrn_assert(MPI_Comm_rank(nrnmpi_comm, &subworld_rank) == MPI_SUCCESS);
130  nrn_assert(nrn_subworld_rank == subworld_rank);
131 
132  int subworld_size{-1};
133  nrn_assert(MPI_Comm_size(nrnmpi_comm, &subworld_size) == MPI_SUCCESS);
134  nrn_assert(subworld_size == nrn_mpi_numprocs_subworld);
135 }
136 
137 
138 // check if appropriate threading level supported (i.e. MPI_THREAD_FUNNELED)
140  int th = 0;
141  MPI_Query_thread(&th);
142  if (th < MPI_THREAD_FUNNELED) {
144  "\n Current MPI library doesn't support MPI_THREAD_FUNNELED,\
145  \n Run without enabling multi-threading!");
146  }
147 }
148 
150  int flag = 0;
151  MPI_Initialized(&flag);
152  return flag != 0;
153 }
154 
155 void nrnmpi_abort_impl(int errcode) {
156  MPI_Abort(MPI_COMM_WORLD, errcode);
157 }
158 
160  return MPI_Wtime();
161 }
162 
163 /**
164  * Return local mpi rank within a shared memory node
165  *
166  * When performing certain operations, we need to know the rank of mpi
167  * process on a given node. This function uses MPI 3 MPI_Comm_split_type
168  * function and MPI_COMM_TYPE_SHARED key to find out the local rank.
169  */
171  int local_rank = 0;
172  if (nrnmpi_initialized_impl()) {
173  MPI_Comm local_comm;
174  MPI_Comm_split_type(
175  MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, nrnmpi_myid_, MPI_INFO_NULL, &local_comm);
176  MPI_Comm_rank(local_comm, &local_rank);
177  MPI_Comm_free(&local_comm);
178  }
179  return local_rank;
180 }
181 
182 /**
183  * Return number of ranks running on single shared memory node
184  *
185  * We use MPI 3 MPI_Comm_split_type function and MPI_COMM_TYPE_SHARED key to
186  * determine number of mpi ranks within a shared memory node.
187  */
189  int local_size = 1;
190  if (nrnmpi_initialized_impl()) {
191  MPI_Comm local_comm;
192  MPI_Comm_split_type(
193  MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, nrnmpi_myid_, MPI_INFO_NULL, &local_comm);
194  MPI_Comm_size(local_comm, &local_size);
195  MPI_Comm_free(&local_comm);
196  }
197  return local_size;
198 }
199 
200 /**
201  * Write given buffer to a new file using MPI collective I/O
202  *
203  * For output like spikes, each rank has to write spike timing
204  * information to a single file. This routine writes buffers
205  * of length len1, len2, len3... at the offsets 0, 0+len1,
206  * 0+len1+len2... offsets. This write op is a collective across
207  * all ranks of the common MPI communicator used for spike exchange.
208  *
209  * @param filename Name of the file to write
210  * @param buffer Buffer to write
211  * @param length Length of the buffer to write
212  */
213 void nrnmpi_write_file_impl(const std::string& filename, const char* buffer, size_t length) {
214  MPI_File fh;
215  MPI_Status status;
216 
217  // global offset into file
218  unsigned long offset = 0;
219  MPI_Exscan(&length, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, nrnmpi_comm);
220 
221  int op_status = MPI_File_open(
222  nrnmpi_comm, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
223  if (op_status != MPI_SUCCESS && nrnmpi_myid_ == 0) {
224  std::cerr << "Error while opening output file " << filename << std::endl;
225  abort();
226  }
227 
228  op_status = MPI_File_write_at_all(fh, offset, buffer, length, MPI_BYTE, &status);
229  if (op_status != MPI_SUCCESS && nrnmpi_myid_ == 0) {
230  std::cerr << "Error while writing output " << std::endl;
231  abort();
232  }
233 
234  MPI_File_close(&fh);
235 }
236 } // namespace coreneuron
printf
Definition: extdef.h:5
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
MPI_Comm nrnmpi_comm
Definition: nrnmpi.cpp:25
int nrnmpi_myid_
Definition: nrnmpi.cpp:27
nrnmpi_init_ret_t nrnmpi_init_impl(int *pargc, char ***pargv, bool is_quiet)
Definition: nrnmpi.cpp:40
MPI_Comm nrnmpi_world_comm
Definition: nrnmpi.cpp:24
static bool nrnmpi_under_nrncontrol_
Definition: nrnmpi.cpp:29
void nrnmpi_finalize_impl(void)
Definition: nrnmpi.cpp:81
void(* nrn2core_subworld_info_)(int &, int &, int &, int &, int &)
Definition: utils.cpp:36
void nrnmpi_abort_impl(int errcode)
Definition: nrnmpi.cpp:155
void nrnmpi_write_file_impl(const std::string &filename, const char *buffer, size_t length)
Write given buffer to a new file using MPI collective I/O.
Definition: nrnmpi.cpp:213
void nrnmpi_spike_initialize()
Definition: mpispike.cpp:37
static void nrn_fatal_error(const char *msg)
Definition: nrnmpi.cpp:31
double nrnmpi_wtime_impl()
Definition: nrnmpi.cpp:159
int nrnmpi_local_size_impl()
Return number of ranks running on single shared memory node.
Definition: nrnmpi.cpp:188
int nrnmpi_local_rank_impl()
Return local mpi rank within a shared memory node.
Definition: nrnmpi.cpp:170
bool nrnmpi_initialized_impl()
Definition: nrnmpi.cpp:149
void nrnmpi_check_threading_support_impl()
Definition: nrnmpi.cpp:139
int nrnmpi_numprocs_
Definition: nrnmpi.cpp:26
void corenrn_subworld()
Definition: nrnmpi.cpp:95
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
Definition: nrn_assert.h:33
return status
#define MPI_Comm