NEURON
nrncore_io.cpp
Go to the documentation of this file.
1 #include "nrncore_io.h"
4 
5 #include <cstdlib>
6 #include "nrnmpi.h"
7 #include "section.h"
8 #include "hocdec.h"
9 #include "ocfile.h" // for idDirExist and makePath
10 #include "nrnran123.h" // globalindex written to globals.dat
11 #include "cvodeobj.h"
12 #include "netcvode.h" // for nrnbbcore_vecplay_write
13 #include "vrecitem.h" // for nrnbbcore_vecplay_write
14 #include <fstream>
15 #include <sstream>
16 #include "nrnsection_mapping.h"
17 
18 extern short* nrn_is_artificial_;
19 extern int* bbcore_dparam_size;
22 extern void (*nrnthread_v_transfer_)(NrnThread*);
23 
24 int chkpnt;
25 const char* bbcore_write_version = "1.8"; // Include ArrayDims
26 
27 /// create directory with given path
28 void create_dir_path(const std::string& path) {
29  // only one rank needs to create directory
30  if (nrnmpi_myid == 0) {
31  if (!isDirExist(path)) {
32  if (!makePath(path)) {
33  hoc_execerror(path.c_str(), "directory did not exist and makePath for it failed");
34  }
35  }
36  }
37  // rest of the ranks should wait before continue simulation
38 #ifdef NRNMPI
40 #endif
41 }
42 
43 std::string get_write_path() {
44  std::string path("."); // default path
45  if (ifarg(1)) {
46  path = hoc_gargstr(1);
47  }
48  return path;
49 }
50 
51 std::string get_filename(const std::string& path, std::string file_name) {
52  std::string fname(path + '/' + file_name);
53  nrn_assert(fname.size() < 1024);
54  return fname;
55 }
56 
57 
58 void write_memb_mech_types(const char* fname) {
59  if (nrnmpi_myid > 0) {
60  return;
61  } // only rank 0 writes this file
62  std::ofstream fs(fname);
63  if (!fs.good()) {
64  hoc_execerror("nrncore_write write_mem_mech_types could not open for writing: %s\n", fname);
65  }
67 }
68 
69 
70 // format is name value
71 // with last line of 0 0
72 // In case of an array, the line is name[num] with num lines following with
73 // one value per line. Values are %.20g format.
74 void write_globals(const char* fname) {
75  if (nrnmpi_myid > 0) {
76  return;
77  } // only rank 0 writes this file
78 
79  FILE* f = fopen(fname, "w");
80  if (!f) {
81  hoc_execerror("nrncore_write write_globals could not open for writing: %s\n", fname);
82  }
83 
84  fprintf(f, "%s\n", bbcore_write_version);
85  const char* name;
86  int size; // 0 means scalar, is 0 will still allocated one element for val.
87  double* val = NULL; // Allocated by new in get_global_item, must be delete [] here.
88  // Note that it is possible for get_global_dbl_item to return NULL but
89  // name, size, and val must still be handled if val != NULL
90  for (void* sp = NULL;;) {
91  sp = get_global_dbl_item(sp, name, size, val);
92  if (val) {
93  if (size) {
94  fprintf(f, "%s[%d]\n", name, size);
95  for (int i = 0; i < size; ++i) {
96  fprintf(f, "%.20g\n", val[i]);
97  }
98  } else {
99  fprintf(f, "%s %.20g\n", name, val[0]);
100  }
101  delete[] val;
102  val = NULL;
103  }
104  if (!sp) {
105  break;
106  }
107  }
108  fprintf(f, "0 0\n");
109  fprintf(f, "secondorder %d\n", secondorder);
110  fprintf(f, "Random123_globalindex %d\n", nrnran123_get_globalindex());
111 
112  fclose(f);
113 }
114 
115 
116 void write_nrnthread(const char* path, NrnThread& nt, CellGroup& cg) {
117  char fname[1000];
118  if (cg.n_output <= 0) {
119  return;
120  }
121  assert(cg.group_id >= 0);
122  nrn_assert(snprintf(fname, 1000, "%s/%d_1.dat", path, cg.group_id) < 1000);
123  FILE* f = fopen(fname, "wb");
124  if (!f) {
125  hoc_execerror("nrncore_write write_nrnthread could not open for writing:", fname);
126  }
127  fprintf(f, "%s\n", bbcore_write_version);
128 
129  // nrnthread_dat1(int tid, int& n_presyn, int& n_netcon, int*& output_gid, int*& netcon_srcgid);
130  fprintf(f, "%d npresyn\n", cg.n_presyn);
131  fprintf(f, "%d nnetcon\n", cg.n_netcon);
132  writeint(cg.output_gid.data(), cg.n_presyn);
134 
135  cg.output_gid.clear();
136  if (cg.netcon_srcgid) {
137  delete[] cg.netcon_srcgid;
138  cg.netcon_srcgid = NULL;
139  }
140  fclose(f);
141 
142  nrn_assert(snprintf(fname, 1000, "%s/%d_2.dat", path, cg.group_id) < 1000);
143  f = fopen(fname, "w");
144  if (!f) {
145  hoc_execerror("nrncore_write write_nrnthread could not open for writing:", fname);
146  }
147 
148  fprintf(f, "%s\n", bbcore_write_version);
149 
150  // sizes and total data count
151  int ncell, ngid, n_real_gid, nnode, ndiam, nmech;
152  int *tml_index, *ml_nodecount, nidata, nvdata, nweight;
153  nrnthread_dat2_1(nt.id,
154  ncell,
155  ngid,
156  n_real_gid,
157  nnode,
158  ndiam,
159  nmech,
160  tml_index,
161  ml_nodecount,
162  nidata,
163  nvdata,
164  nweight);
165 
166  fprintf(f, "%d n_real_cell\n", ncell);
167  fprintf(f, "%d ngid\n", ngid);
168  fprintf(f, "%d n_real_gid\n", n_real_gid);
169  fprintf(f, "%d nnode\n", nnode);
170  fprintf(f, "%d ndiam\n", ndiam);
171  fprintf(f, "%d nmech\n", nmech);
172 
173  for (int i = 0; i < nmech; ++i) {
174  fprintf(f, "%d\n", tml_index[i]);
175  fprintf(f, "%d\n", ml_nodecount[i]);
176  }
177  delete[] tml_index;
178  delete[] ml_nodecount;
179 
180  fprintf(f, "%d nidata\n", 0);
181  fprintf(f, "%d nvdata\n", nvdata);
182  fprintf(f, "%d nweight\n", nweight);
183 
184  // data
185  int* v_parent_index = NULL;
186  double *a = NULL, *b = NULL, *area = NULL, *v = NULL, *diamvec = NULL;
187  nrnthread_dat2_2(nt.id, v_parent_index, a, b, area, v, diamvec);
188  writeint(nt._v_parent_index, nt.end);
189  // Warning: this is only correct if no modifications have been made to any
190  // Node since reorder_secorder() was last called.
191  auto const cache_token = nrn_ensure_model_data_are_sorted();
192  writedbl(nt.node_a_storage(), nt.end);
193  writedbl(nt.node_b_storage(), nt.end);
194  writedbl(nt.node_area_storage(), nt.end);
196  if (cg.ndiam) {
197  writedbl(diamvec, nt.end);
198  delete[] diamvec;
199  }
200 
201  // mechanism data
202  int dsz_inst = 0;
203  MlWithArt& mla = cg.mlwithart;
204  for (size_t i = 0; i < mla.size(); ++i) {
205  int type = mla[i].first;
206  int *nodeindices = NULL, *pdata = NULL;
207  double* data = NULL;
208  std::vector<int> pointer2type;
209  std::vector<uint32_t> nmodlrandom;
211  nt.id, i, dsz_inst, nodeindices, data, pdata, nmodlrandom, pointer2type);
212  Memb_list* ml = mla[i].second;
213  int n = ml->nodecount;
214  int sz = nrn_prop_param_size_[type];
215  if (nodeindices) {
217  }
218  writedbl(data, n * sz);
219  if (data) {
220  delete[] data;
221  }
222  sz = bbcore_dparam_size[type];
223  if (pdata) {
224  ++dsz_inst;
225  writeint(pdata, n * sz);
226  delete[] pdata;
227  sz = pointer2type.size();
228  fprintf(f, "%d npointer\n", int(sz));
229  if (sz > 0) {
230  writeint(pointer2type.data(), sz);
231  }
232 
233  fprintf(f, "%d nmodlrandom\n", int(nmodlrandom.size()));
234  if (nmodlrandom.size()) {
235  write_uint32vec(nmodlrandom, f);
236  }
237  }
238  }
239 
240  int *output_vindex, *netcon_pnttype, *netcon_pntindex;
241  double *output_threshold, *weights, *delays;
242  nrnthread_dat2_3(nt.id,
243  nweight,
244  output_vindex,
245  output_threshold,
246  netcon_pnttype,
247  netcon_pntindex,
248  weights,
249  delays);
250  writeint(output_vindex, cg.n_presyn);
251  delete[] output_vindex;
252  writedbl(output_threshold, cg.n_real_output);
253  delete[] output_threshold;
254 
255  // connections
256  int n = cg.n_netcon;
257  // printf("n_netcon=%d nweight=%d\n", n, nweight);
258  writeint(netcon_pnttype, n);
259  delete[] netcon_pnttype;
260  writeint(netcon_pntindex, n);
261  delete[] netcon_pntindex;
262  writedbl(weights, nweight);
263  delete[] weights;
264  writedbl(delays, n);
265  delete[] delays;
266 
267  // special handling for BBCOREPOINTER
268  // how many mechanisms require it
270  fprintf(f, "%d bbcorepointer\n", n);
271  // for each of those, what is the mech type and data size
272  // and what is the data
273  for (size_t i = 0; i < mla.size(); ++i) {
274  int type = mla[i].first;
275  if (nrn_bbcore_write_[type]) {
276  int icnt, dcnt, *iArray;
277  double* dArray;
278  nrnthread_dat2_corepointer_mech(nt.id, type, icnt, dcnt, iArray, dArray);
279  fprintf(f, "%d\n", type);
280  fprintf(f, "%d\n%d\n", icnt, dcnt);
281  if (icnt) {
282  writeint(iArray, icnt);
283  delete[] iArray;
284  }
285  if (dcnt) {
286  writedbl(dArray, dcnt);
287  delete[] dArray;
288  }
289  }
290  }
291 
293 
294  fclose(f);
295 }
296 
297 
298 void writeint_(int* p, size_t size, FILE* f) {
299  fprintf(f, "chkpnt %d\n", chkpnt++);
300  size_t n = fwrite(p, sizeof(int), size, f);
301  assert(n == size);
302 }
303 
304 void writedbl_(double* p, size_t size, FILE* f) {
305  fprintf(f, "chkpnt %d\n", chkpnt++);
306  size_t n = fwrite(p, sizeof(double), size, f);
307  assert(n == size);
308 }
309 
310 void write_uint32vec(std::vector<uint32_t>& vec, FILE* f) {
311  fprintf(f, "chkpnt %d\n", chkpnt++);
312  size_t n = fwrite(vec.data(), sizeof(uint32_t), vec.size(), f);
313  assert(n == vec.size());
314 }
315 
316 #define writeint(p, size) writeint_(p, size, f)
317 #define writedbl(p, size) writedbl_(p, size, f)
318 
319 void nrnbbcore_vecplay_write(FILE* f, NrnThread& nt) {
320  // Get the indices in NetCvode.fixed_play_ for this thread
321  // error if not a VecPlayContinuous with no discon vector
322  std::vector<int> indices;
323  nrnthread_dat2_vecplay(nt.id, indices);
324  fprintf(f, "%d VecPlay instances\n", int(indices.size()));
325  for (auto i: indices) {
326  int vptype, mtype, ix, sz;
327  double *yvec, *tvec;
328  // the 'if' is not necessary as item i is certainly in this thread
329  int unused = 0;
331  nt.id, i, vptype, mtype, ix, sz, yvec, tvec, unused, unused, unused)) {
332  fprintf(f, "%d\n", vptype);
333  fprintf(f, "%d\n", mtype);
334  fprintf(f, "%d\n", ix);
335  fprintf(f, "%d\n", sz);
336  writedbl(yvec, sz);
337  writedbl(tvec, sz);
338  }
339  }
340 }
341 
342 
343 static void fgets_no_newline(char* s, int size, FILE* f) {
344  if (fgets(s, size, f) == NULL) {
345  fclose(f);
346  hoc_execerror("Error reading line in files.dat", strerror(errno));
347  }
348  int n = strlen(s);
349  if (n && s[n - 1] == '\n') {
350  s[n - 1] = '\0';
351  }
352 }
353 
354 /** Write all dataset ids to files.dat.
355  *
356  * Format of the files.dat file is:
357  *
358  * version string
359  * -1 (if model uses gap junction)
360  * n (number of datasets) in format %10d
361  * id1
362  * id2
363  * ...
364  * idN
365  */
366 void write_nrnthread_task(const char* path, CellGroup* cgs, bool append) {
367  // ids of datasets that will be created
368  std::vector<int> iSend;
369 
370  // ignore empty nrnthread (has -1 id)
371  for (int iInt = 0; iInt < nrn_nthread; ++iInt) {
372  if (cgs[iInt].group_id >= 0) {
373  iSend.push_back(cgs[iInt].group_id);
374  }
375  }
376 
377  // receive and displacement buffers for mpi
378  std::vector<int> iRecv, iDispl;
379 
380  if (nrnmpi_myid == 0) {
381  iRecv.resize(nrnmpi_numprocs);
382  iDispl.resize(nrnmpi_numprocs);
383  }
384 
385  // number of datasets on the current rank
386  int num_datasets = iSend.size();
387 
388 #ifdef NRNMPI
389  // gather number of datasets from each task
390  if (nrnmpi_numprocs > 1) {
391  nrnmpi_int_gather(&num_datasets, begin_ptr(iRecv), 1, 0);
392  } else {
393  iRecv[0] = num_datasets;
394  }
395 #else
396  iRecv[0] = num_datasets;
397 #endif
398 
399  // total number of datasets across all ranks
400  int iSumThread = 0;
401 
402  // calculate mpi displacements
403  if (nrnmpi_myid == 0) {
404  for (int iInt = 0; iInt < nrnmpi_numprocs; ++iInt) {
405  iDispl[iInt] = iSumThread;
406  iSumThread += iRecv[iInt];
407  }
408  }
409 
410  // buffer for receiving all dataset ids
411  std::vector<int> iRecvVec(iSumThread);
412 
413 #ifdef NRNMPI
414  // gather ids into the array with correspondent offsets
415  if (nrnmpi_numprocs > 1) {
416  nrnmpi_int_gatherv(begin_ptr(iSend),
417  num_datasets,
418  begin_ptr(iRecvVec),
419  begin_ptr(iRecv),
420  begin_ptr(iDispl),
421  0);
422  } else {
423  for (int iInt = 0; iInt < num_datasets; ++iInt) {
424  iRecvVec[iInt] = iSend[iInt];
425  }
426  }
427 #else
428  for (int iInt = 0; iInt < num_datasets; ++iInt) {
429  iRecvVec[iInt] = iSend[iInt];
430  }
431 #endif
432 
433  /// Writing the file with task, correspondent number of threads and list of correspondent first
434  /// gids
435  if (nrnmpi_myid == 0) {
436  // If append is false, begin a new files.dat (overwrite old if exists).
437  // If append is true, append groupids to existing files.dat.
438  // Note: The number of groupids (2nd or 3rd line) has to be
439  // overwritten wih the total number so far. To avoid copying
440  // old to new, we allocate 10 chars for that number.
441 
442  std::stringstream ss;
443  ss << path << "/files.dat";
444 
445  std::string filename = ss.str();
446 
447  FILE* fp = NULL;
448  if (append == false) { // start a new file
449  fp = fopen(filename.c_str(), "w");
450  if (!fp) {
451  hoc_execerror("nrncore_write: could not open for writing:", filename.c_str());
452  }
453  } else { // modify groupid number and append to existing file
454  fp = fopen(filename.c_str(), "r+");
455  if (!fp) {
456  hoc_execerror("nrncore_write append: could not open for modifying:",
457  filename.c_str());
458  }
459  }
460 
461  constexpr int max_line_len = 20;
462  char line[max_line_len]; // All lines are actually no larger than %10d.
463 
464  if (append) {
465  // verify same version
466  fgets_no_newline(line, max_line_len, fp);
467  // unfortunately line has the newline
468  size_t n = strlen(bbcore_write_version);
469  if ((strlen(line) != n) || strncmp(line, bbcore_write_version, n) != 0) {
470  fclose(fp);
471  hoc_execerror("nrncore_write append: existing files.dat has inconsisten version:",
472  line);
473  }
474  } else {
475  fprintf(fp, "%s\n", bbcore_write_version);
476  }
477 
478  // notify coreneuron that this model involves gap junctions
479  if (nrnthread_v_transfer_) {
480  if (append) {
481  fgets_no_newline(line, max_line_len, fp);
482  if (strcmp(line, "-1") != 0) {
483  fclose(fp);
485  "nrncore_write append: existing files.dat does not have a gap junction "
486  "indicator\n",
487  NULL);
488  }
489  } else {
490  fprintf(fp, "-1\n");
491  }
492  }
493 
494  // total number of datasets
495  if (append) {
496  // this is the one that needs the space to get a new value
497  long pos = ftell(fp);
498  fgets_no_newline(line, max_line_len, fp);
499  int oldval = 0;
500  if (sscanf(line, "%d", &oldval) != 1) {
501  fclose(fp);
502  hoc_execerror("nrncore_write append: error reading number of groupids", NULL);
503  }
504  if (oldval == -1) {
505  fclose(fp);
507  "nrncore_write append: existing files.dat has gap junction indicator where we "
508  "expected a groupgid count.",
509  NULL);
510  }
511  iSumThread += oldval;
512  fseek(fp, pos, SEEK_SET);
513  }
514  fprintf(fp, "%10d\n", iSumThread);
515 
516  if (append) {
517  // Start writing the groupids starting at the end of the file.
518  fseek(fp, 0, SEEK_END);
519  }
520 
521  // write all dataset ids
522  for (int i = 0; i < iRecvVec.size(); ++i) {
523  fprintf(fp, "%d\n", iRecvVec[i]);
524  }
525 
526  fclose(fp);
527  }
528 }
529 
530 /** @brief dump mapping information to gid_3.dat file */
531 void nrn_write_mapping_info(const char* path, int gid, NrnMappingInfo& minfo) {
532  if (minfo.size() <= 0) {
533  return;
534  }
535 
536  /** full path of mapping file */
537  std::stringstream ss;
538  ss << path << "/" << gid << "_3.dat";
539 
540  std::string fname(ss.str());
541  FILE* f = fopen(fname.c_str(), "w");
542 
543  if (!f) {
544  hoc_execerror("nrnbbcore_write could not open for writing:", fname.c_str());
545  }
546 
547  fprintf(f, "%s\n", bbcore_write_version);
548 
549  /** number of gids in NrnThread */
550  int count;
552  fprintf(f, "%d\n", count);
553 
554  /** all cells mapping information in NrnThread */
555  for (size_t i = 0; i < count; i++) {
556  int cgid;
557  int t_sec;
558  int t_seg;
559  int n_seclist;
560  nrnthread_dat3_cellmapping(i, cgid, t_sec, t_seg, n_seclist);
561  /** gid, #section, #compartments, #sectionlists */
562  fprintf(f, "%d %d %d %d\n", cgid, t_sec, t_seg, n_seclist);
563 
564  for (size_t j = 0; j < n_seclist; j++) {
565  std::string sclname;
566  int nsec;
567  int nseg;
568  int n_electrodes;
569  size_t total_lfp_factors;
570  std::vector<int> data_sec;
571  std::vector<int> data_seg;
572  std::vector<double> data_lfp;
574  j,
575  sclname,
576  nsec,
577  nseg,
578  total_lfp_factors,
579  n_electrodes,
580  data_sec,
581  data_seg,
582  data_lfp);
583  /** section list name, number of sections, number of segments */
584  fprintf(f,
585  "%s %d %d %zd %d\n",
586  sclname.c_str(),
587  nsec,
588  nseg,
589  total_lfp_factors,
590  n_electrodes);
591 
592  /** section - segment mapping */
593  if (nseg) {
594  writeint(&(data_sec.front()), nseg);
595  writeint(&(data_seg.front()), nseg);
596  if (total_lfp_factors) {
597  writedbl(&(data_lfp.front()), total_lfp_factors);
598  }
599  }
600  }
601  }
602  fclose(f);
603 }
static void nrnmpi_barrier()
std::vector< MlWithArtItem > MlWithArt
Definition: cell_group.h:15
int group_id
Definition: cell_group.h:23
int n_real_output
Definition: cell_group.h:28
std::vector< int > output_gid
Definition: cell_group.h:34
int * netcon_srcgid
Definition: cell_group.h:39
int n_presyn
Definition: cell_group.h:26
MlWithArt mlwithart
Definition: cell_group.h:49
int n_netcon
Definition: cell_group.h:37
int n_output
Definition: cell_group.h:27
int ndiam
Definition: cell_group.h:29
static Frame * fp
Definition: code.cpp:96
#define nodeindices
Definition: md1redef.h:35
#define area
Definition: md1redef.h:12
#define v
Definition: md1redef.h:11
#define data
Definition: md1redef.h:36
#define weights
Definition: md1redef.h:42
#define i
Definition: md1redef.h:19
#define pdata
Definition: md1redef.h:37
char * hoc_gargstr(int)
#define assert(ex)
Definition: hocassrt.h:24
const char * name
Definition: init.cpp:16
void append(Item *ql, Item *q)
Definition: list.cpp:289
int nrn_nthread
Definition: multicore.cpp:55
void hoc_execerror(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:39
uint32_t nrnran123_get_globalindex()
Definition: nrnran123.cpp:112
icycle< ncycle;++icycle) { int istride=stride[icycle];nrn_pragma_acc(loop vector) nrn_pragma_omp(loop bind(parallel)) for(int icore=0;icore< warpsize;++icore) { int i=ii+icore;if(icore< istride) { int ip=GPU_PARENT(i);GPU_RHS(i) -=GPU_B(i) *GPU_RHS(ip);GPU_RHS(i)/=GPU_D(i);} i+=istride;} ii+=istride;} }}void solve_interleaved2(int ith) { NrnThread *nt=nrn_threads+ith;InterleaveInfo &ii=interleave_info[ith];int nwarp=ii.nwarp;if(nwarp==0) return;int ncore=nwarp *warpsize;int *ncycles=ii.cellsize;int *stridedispl=ii.stridedispl;int *strides=ii.stride;int *rootbegin=ii.firstnode;int *nodebegin=ii.lastnode;if(0) { nrn_pragma_acc(parallel loop gang present(nt[0:1], strides[0:nstride], ncycles[0:nwarp], stridedispl[0:nwarp+1], rootbegin[0:nwarp+1], nodebegin[0:nwarp+1]) async(nt->stream_id)) nrn_pragma_omp(target teams loop map(present, alloc:nt[:1], strides[:nstride], ncycles[:nwarp], stridedispl[:nwarp+1], rootbegin[:nwarp+1], nodebegin[:nwarp+1])) for(int icore=0;icore< ncore;icore+=warpsize) { solve_interleaved2_loop_body(nt, icore, ncycles, strides, stridedispl, rootbegin, nodebegin);} nrn_pragma_acc(wait(nt->stream_id)) } else { for(int icore=0;icore< ncore;icore+=warpsize) { solve_interleaved2_loop_body(nt, icore, ncycles, strides, stridedispl, rootbegin, nodebegin);} }}void solve_interleaved1(int ith) { NrnThread *nt=nrn_threads+ith;int ncell=nt-> ncell
Definition: cellorder.cpp:784
neuron::model_sorted_token nrn_ensure_model_data_are_sorted()
Ensure neuron::container::* data are sorted.
Definition: treeset.cpp:2182
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
Definition: nrn_assert.h:33
void nrnthread_dat3_cell_count(int &cell_count)
int nrnthread_dat2_corepointer_mech(int tid, int type, int &icnt, int &dcnt, int *&iArray, double *&dArray)
int nrnthread_dat2_3(int tid, int nweight, int *&output_vindex, double *&output_threshold, int *&netcon_pnttype, int *&netcon_pntindex, double *&weights, double *&delays)
void nrnthread_dat3_cellmapping(int i, int &gid, int &nsec, int &nseg, int &n_seclist)
void * get_global_dbl_item(void *p, const char *&name, int &size, double *&val)
int nrnthread_dat2_1(int tid, int &ncell, int &ngid, int &n_real_gid, int &nnode, int &ndiam, int &nmech, int *&tml_index, int *&ml_nodecount, int &nidata, int &nvdata, int &nweight)
int nrnthread_dat2_vecplay(int tid, std::vector< int > &indices)
void nrnthread_dat3_secmapping(int i_c, int i_sec, std::string &sclname, int &nsec, int &nseg, size_t &total_lfp_factors, int &n_electrodes, std::vector< int > &data_sec, std::vector< int > &data_seg, std::vector< double > &data_lfp)
int nrnthread_dat2_vecplay_inst(int tid, int i, int &vptype, int &mtype, int &ix, int &sz, double *&yvec, double *&tvec, int &last_index, int &discon_index, int &ubound_index)
void write_memb_mech_types_direct(std::ostream &s)
int nrnthread_dat2_corepointer(int tid, int &n)
int nrnthread_dat2_2(int tid, int *&v_parent_index, double *&a, double *&b, double *&area, double *&v, double *&diamvec)
int nrnthread_dat2_mech(int tid, size_t i, int dsz_inst, int *&nodeindices, double *&data, int *&pdata, std::vector< uint32_t > &nmodlrandom, std::vector< int > &pointer2type)
void write_uint32vec(std::vector< uint32_t > &vec, FILE *f)
Definition: nrncore_io.cpp:310
short * nrn_is_artificial_
Definition: init.cpp:214
void(* nrnthread_v_transfer_)(NrnThread *)
Definition: fadvance.cpp:139
void write_memb_mech_types(const char *fname)
Definition: nrncore_io.cpp:58
#define writeint(p, size)
Definition: nrncore_io.cpp:316
void create_dir_path(const std::string &path)
create directory with given path
Definition: nrncore_io.cpp:28
std::string get_filename(const std::string &path, std::string file_name)
Definition: nrncore_io.cpp:51
void write_globals(const char *fname)
Definition: nrncore_io.cpp:74
void writeint_(int *p, size_t size, FILE *f)
Definition: nrncore_io.cpp:298
int * bbcore_dparam_size
void nrn_write_mapping_info(const char *path, int gid, NrnMappingInfo &minfo)
dump mapping information to gid_3.dat file
Definition: nrncore_io.cpp:531
static void fgets_no_newline(char *s, int size, FILE *f)
Definition: nrncore_io.cpp:343
void write_nrnthread_task(const char *path, CellGroup *cgs, bool append)
Write all dataset ids to files.dat.
Definition: nrncore_io.cpp:366
bbcore_write_t * nrn_bbcore_write_
Definition: init.cpp:173
void write_nrnthread(const char *path, NrnThread &nt, CellGroup &cg)
Definition: nrncore_io.cpp:116
NetCvode * net_cvode_instance
Definition: cvodestb.cpp:26
void writedbl_(double *p, size_t size, FILE *f)
Definition: nrncore_io.cpp:304
int chkpnt
Definition: nrncore_io.cpp:24
#define writedbl(p, size)
Definition: nrncore_io.cpp:317
void nrnbbcore_vecplay_write(FILE *f, NrnThread &nt)
Definition: nrncore_io.cpp:319
std::string get_write_path()
Definition: nrncore_io.cpp:43
const char * bbcore_write_version
Definition: nrncore_io.cpp:25
void(*)(double *, int *, int *, int *, Memb_list *, std::size_t, Datum *, Datum *, double *, NrnThread *) bbcore_write_t
Definition: nrncore_io.h:48
T * begin_ptr(std::vector< T > &v)
Definition: nrncore_io.h:17
int const size_t const size_t n
Definition: nrngsl.h:10
size_t p
size_t j
s
Definition: multisend.cpp:521
int * nrn_prop_param_size_
Definition: init.cpp:162
int ifarg(int)
Definition: code.cpp:1607
short type
Definition: cabvars.h:10
int nrnmpi_myid
bool makePath(const std::string &path)
Definition: ocfile.cpp:539
bool isDirExist(const std::string &path)
Definition: ocfile.cpp:523
#define NULL
Definition: spdefs.h:105
A view into a set of mechanism instances.
Definition: nrnoc_ml.h:34
int nodecount
Definition: nrnoc_ml.h:78
Compartment mapping information for NrnThread.
size_t size()
number of cells
Represent main neuron object computed by single thread.
Definition: multicore.h:58
int * _v_parent_index
Definition: multicore.h:89
double * node_a_storage()
Definition: multicore.cpp:1054
int id
Definition: multicore.h:66
double * node_area_storage()
Definition: multicore.cpp:1059
int end
Definition: multicore.h:65
double * node_voltage_storage()
Definition: multicore.cpp:1098
double * node_b_storage()
Definition: multicore.cpp:1064