NEURON
nrn_checkpoint.cpp
Go to the documentation of this file.
1 /*
2 # =============================================================================
3 # Copyright (c) 2016 - 2021 Blue Brain Project/EPFL
4 #
5 # See top-level LICENSE file for details.
6 # =============================================================================.
7 */
8 #include <filesystem>
9 #include <iostream>
10 #include <sstream>
11 #include <cassert>
12 #include <memory>
13 
14 #include "nrnoc/ion_semantics.h"
29 
30 namespace fs = std::filesystem;
31 
32 namespace coreneuron {
33 // Those functions comes from mod file directly
36 
37 CheckPoints::CheckPoints(const std::string& save, const std::string& restore)
38  : save_(save)
39  , restore_(restore)
40  , restored(false) {
41  if (!save.empty()) {
42  if (nrnmpi_myid == 0) {
43  fs::create_directories(save);
44  }
45  }
46 }
47 
48 /// todo : need to broadcast this rather than all reading a double
49 double CheckPoints::restore_time() const {
50  if (!should_restore()) {
51  return 0.;
52  }
53 
54  double rtime = 0.;
55  FileHandler f;
56  std::string filename = restore_ + "/time.dat";
57  f.open(filename, std::ios::in);
58  f.read_array(&rtime, 1);
59  f.close();
60  return rtime;
61 }
62 
63 void CheckPoints::write_checkpoint(NrnThread* nt, int nb_threads) const {
64  if (!should_save()) {
65  return;
66  }
67 
68 #if NRNMPI
71  }
72 #endif
73 
74  /**
75  * if openmp threading needed:
76  * #pragma omp parallel for private(i) shared(nt, nb_threads) schedule(runtime)
77  * but note that nrn_mech_random_indices(type) is not threadsafe on first
78  * call for each type.
79  */
80  for (int i = 0; i < nb_threads; i++) {
81  if (nt[i].ncell || nt[i].tml) {
82  write_phase2(nt[i]);
83  }
84  }
85 
86  if (nrnmpi_myid == 0) {
87  write_time();
88  }
89 #if NRNMPI
92  }
93 #endif
94 }
95 
96 // Factor out the body of ion handling below as the same code
97 // handles POINTER
98 static int nrn_original_aos_index(int etype, int ix, NrnThread& nt, int** ml_pinv) {
99  // Determine ei_instance and ei from etype and ix.
100  // Deal with existing permutation and SoA.
101  Memb_list* eml = nt._ml_list[etype];
102  int ecnt = eml->nodecount;
103  int esz = corenrn.get_prop_param_size()[etype];
104  int elayout = corenrn.get_mech_data_layout()[etype];
105  // current index into eml->data is a function
106  // of elayout, eml._permute, ei_instance, ei, and
107  // eml padding.
108  int p = ix - (eml->data - nt._data);
109  assert(p >= 0 && p < eml->_nodecount_padded * esz);
110  int ei_instance, ei;
111  nrn_inverse_i_layout(p, ei_instance, ecnt, ei, esz, elayout);
112  if (elayout == Layout::SoA) {
113  if (eml->_permute) {
114  if (!ml_pinv[etype]) {
115  ml_pinv[etype] = inverse_permute(eml->_permute, eml->nodecount);
116  }
117  ei_instance = ml_pinv[etype][ei_instance];
118  }
119  }
120  return ei_instance * esz + ei;
121 }
122 
124  FileHandler fh;
125 
127  auto filename = get_save_path() + "/" + std::to_string(ntc.file_id) + "_2.dat";
128 
129  fh.open(filename, std::ios::out);
130  fh.checkpoint(2);
131 
132  int n_outputgid = 0; // calculate PreSyn with gid >= 0
133  for (int i = 0; i < nt.n_presyn; ++i) {
134  if (nt.presyns[i].gid_ >= 0) {
135  ++n_outputgid;
136  }
137  }
138 
139  fh << nt.ncell << " ncell\n";
140  fh << n_outputgid << " ngid\n";
141 #if CHKPNTDEBUG
142  assert(ntc.n_outputgids == n_outputgid);
143 #endif
144 
145  fh << nt.n_real_output << " n_real_output\n";
146  fh << nt.end << " nnode\n";
147  fh << ((nt._actual_diam == nullptr) ? 0 : nt.end) << " ndiam\n";
148  int nmech = 0;
149  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
150  if (tml->index != patstimtype) { // skip PatternStim
151  ++nmech;
152  }
153  }
154 
155  fh << nmech << " nmech\n";
156 #if CHKPNTDEBUG
157  assert(nmech == ntc.nmech);
158 #endif
159 
160  for (NrnThreadMembList* current_tml = nt.tml; current_tml; current_tml = current_tml->next) {
161  if (current_tml->index == patstimtype) {
162  continue;
163  }
164  fh << current_tml->index << "\n";
165  fh << current_tml->ml->nodecount << "\n";
166  }
167 
168  fh << nt._nidata << " nidata\n";
169  fh << nt._nvdata << " nvdata\n";
170  fh << nt.n_weight << " nweight\n";
171 
172  // see comment about parent in node_permute.cpp
173  int* pinv_nt = nullptr;
174  if (nt._permute) {
175  int* d = new int[nt.end];
176  pinv_nt = inverse_permute(nt._permute, nt.end);
177  for (int i = 0; i < nt.end; ++i) {
178  int x = nt._v_parent_index[nt._permute[i]];
179  if (x >= 0) {
180  d[i] = pinv_nt[x];
181  } else {
182  d[i] = 0; // really should be -1;
183  }
184  }
185 #if CHKPNTDEBUG
186  for (int i = 0; i < nt.end; ++i) {
187  assert(d[i] == ntc.parent[i]);
188  }
189 #endif
190  fh.write_array<int>(d, nt.end);
191  delete[] d;
192  } else {
193 #if CHKPNTDEBUG
194  for (int i = 0; i < nt.end; ++i) {
195  assert(nt._v_parent_index[i] == ntc.parent[i]);
196  }
197 #endif
198  fh.write_array<int>(nt._v_parent_index, nt.end);
199  pinv_nt = new int[nt.end];
200  for (int i = 0; i < nt.end; ++i) {
201  pinv_nt[i] = i;
202  }
203  }
204 
205  data_write(fh, nt._actual_a, nt.end, 1, 0, nt._permute);
206  data_write(fh, nt._actual_b, nt.end, 1, 0, nt._permute);
207 
208 #if CHKPNTDEBUG
209  for (int i = 0; i < nt.end; ++i) {
210  assert(nt._actual_area[i] == ntc.area[pinv_nt[i]]);
211  }
212 #endif
213 
214  data_write(fh, nt._actual_area, nt.end, 1, 0, nt._permute);
215  data_write(fh, nt._actual_v, nt.end, 1, 0, nt._permute);
216 
217  if (nt._actual_diam) {
218  data_write(fh, nt._actual_diam, nt.end, 1, 0, nt._permute);
219  }
220 
221  auto& memb_func = corenrn.get_memb_funcs();
222  // will need the ml_pinv inverse permutation of ml._permute for ions and POINTER
223  int** ml_pinv = (int**) ecalloc(memb_func.size(), sizeof(int*));
224 
225  for (NrnThreadMembList* current_tml = nt.tml; current_tml; current_tml = current_tml->next) {
226  Memb_list* ml = current_tml->ml;
227  int type = current_tml->index;
228  if (type == patstimtype) {
229  continue;
230  }
231  int cnt = ml->nodecount;
235 
236  int sz = nrn_prop_param_size_[type];
237  int layout = corenrn.get_mech_data_layout()[type];
238  int* semantics = memb_func[type].dparam_semantics;
239 
240  if (!nrn_is_artificial_[type]) {
241  // ml->nodeindices values are permuted according to nt._permute
242  // and locations according to ml._permute
243  // i.e. according to comment in node_permute.cpp
244  // nodelist[p_m[i]] = p[nodelist_original[i]
245  // so pinv[nodelist[p_m[i]] = nodelist_original[i]
246  int* nd_ix = new int[cnt];
247  for (int i = 0; i < cnt; ++i) {
248  int ip = ml->_permute ? ml->_permute[i] : i;
249  int ipval = ml->nodeindices[ip];
250  nd_ix[i] = pinv_nt[ipval];
251  }
252  fh.write_array<int>(nd_ix, cnt);
253  delete[] nd_ix;
254  }
255 
256  data_write(fh, ml->data, cnt, sz, layout, ml->_permute);
257 
259  if (sz) {
260  // need to update some values according to Datum semantics.
261  int* d = soa2aos(ml->pdata, cnt, sz, layout, ml->_permute);
262  std::vector<int> pointer2type; // voltage or mechanism type (starts empty)
263  if (!nrn_is_artificial_[type]) {
264  for (int i_instance = 0; i_instance < cnt; ++i_instance) {
265  for (int i = 0; i < sz; ++i) {
266  int ix = i_instance * sz + i;
267  int s = semantics[i];
268  if (s == -1) { // area
269  int p = pinv_nt[d[ix] - (nt._actual_area - nt._data)];
270  d[ix] = p; // relative _actual_area
271  } else if (s == -9) { // diam
272  int p = pinv_nt[d[ix] - (nt._actual_diam - nt._data)];
273 
274  d[ix] = p; // relative to _actual_diam
275  } else if (s == -5) { // POINTER
276  // loop over instances, then sz, means that we
277  // visit consistent with natural order of
278  // pointer2type
279 
280  // Relevant code that this has to invert
281  // is permute/node_permute.cpp :: update_pdata_values with
282  // respect to permutation, and
283  // io/phase2.cpp :: Phase2::pdata_relocation
284  // with respect to that AoS -> SoA
285 
286  // Step 1: what mechanism is d[ix] pointing to
287  int ptype = type_of_ntdata(nt, d[ix], i_instance == 0);
288  pointer2type.push_back(ptype);
289 
290  // Step 2: replace d[ix] with AoS index relative to type
291  if (ptype == voltage) {
292  int p = pinv_nt[d[ix] - (nt._actual_v - nt._data)];
293  d[ix] = p; // relative to _actual_v
294  } else {
295  // Since we know ptype, the situation is
296  // identical to ion below. (which was factored
297  // out into the following function.
298  d[ix] = nrn_original_aos_index(ptype, d[ix], nt, ml_pinv);
299  }
300  } else if (nrn_semantics_is_ion(s)) { // ion
301  auto type = nrn_semantics_ion_type(s);
302  d[ix] = nrn_original_aos_index(type, d[ix], nt, ml_pinv);
303  }
304 #if CHKPNTDEBUG
305  if (s != -8) { // WATCH values change
306  assert(d[ix] ==
307  ntc.mlmap[type]->pdata_not_permuted[i_instance * sz + i]);
308  }
309 #endif
310  }
311  }
312  }
313  fh.write_array<int>(d, cnt * sz);
314  size_t s = pointer2type.size();
315  fh << s << " npointer\n";
316  if (s) {
317  fh.write_array<int>(pointer2type.data(), s);
318  }
319 
320  // nmodlrandom
321  auto& indices = nrn_mech_random_indices(type);
322  s = indices.size() ? (1 + indices.size() + 5 * cnt * indices.size()) : 0;
323  fh << s << " nmodlrandom\n";
324  if (s) {
325  std::vector<uint32_t> nmodlrandom{};
326  nmodlrandom.reserve(s);
327  nmodlrandom.push_back((uint32_t) indices.size());
328  for (auto ix: indices) {
329  nmodlrandom.push_back((uint32_t) ix);
330  }
331  for (auto ix: indices) {
332  uint32_t data[5];
333  char which;
334  for (int i = 0; i < cnt; ++i) {
335  void* v = nt._vdata[d[i * sz + ix]];
337  nrnran123_getids3(r, &data[0], &data[1], &data[2]);
338  nrnran123_getseq(r, &data[3], &which);
339  data[4] = uint32_t(which);
340  for (auto j: data) {
341  nmodlrandom.push_back(j);
342  }
343  }
344  }
345  fh.write_array<uint32_t>(nmodlrandom.data(), nmodlrandom.size());
346  }
347 
348  delete[] d;
349  }
350  }
351 
352  int nnetcon = nt.n_netcon;
353 
354  int* output_vindex = new int[nt.n_presyn];
355  double* output_threshold = new double[nt.n_real_output];
356  for (int i = 0; i < nt.n_presyn; ++i) {
357  PreSyn* ps = nt.presyns + i;
358  if (ps->thvar_index_ >= 0) {
359  // real cell and index into (permuted) actual_v
360  // if any assert fails in this loop then we have faulty understanding
361  // of the for (int i = 0; i < nt.n_presyn; ++i) loop in nrn_setup.cpp
362  assert(ps->thvar_index_ < nt.end);
363  assert(ps->pntsrc_ == nullptr);
364  output_threshold[i] = ps->threshold_;
365  output_vindex[i] = pinv_nt[ps->thvar_index_];
366  } else if (i < nt.n_real_output) { // real cell without a presyn
367  output_threshold[i] = 0.0; // the way it was set in nrnbbcore_write.cpp
368  output_vindex[i] = -1;
369  } else {
370  Point_process* pnt = ps->pntsrc_;
371  assert(pnt);
372  int type = pnt->_type;
373  int ix = pnt->_i_instance;
374  if (nt._ml_list[type]->_permute) {
375  // pnt->_i_instance is the permuted index into pnt->_type
376  if (!ml_pinv[type]) {
377  Memb_list* ml = nt._ml_list[type];
378  ml_pinv[type] = inverse_permute(ml->_permute, ml->nodecount);
379  }
380  ix = ml_pinv[type][ix];
381  }
382  output_vindex[i] = -(ix * 1000 + type);
383  }
384  }
385  fh.write_array<int>(output_vindex, nt.n_presyn);
386  fh.write_array<double>(output_threshold, nt.n_real_output);
387 #if CHKPNTDEBUG
388  for (int i = 0; i < nt.n_presyn; ++i) {
389  assert(ntc.output_vindex[i] == output_vindex[i]);
390  }
391  for (int i = 0; i < nt.n_real_output; ++i) {
392  assert(ntc.output_threshold[i] == output_threshold[i]);
393  }
394 #endif
395  delete[] output_vindex;
396  delete[] output_threshold;
397  delete[] pinv_nt;
398 
399  int synoffset = 0;
400  std::vector<int> pnt_offset(memb_func.size(), -1);
401  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
402  int type = tml->index;
403  if (corenrn.get_pnt_map()[type] > 0) {
404  pnt_offset[type] = synoffset;
405  synoffset += tml->ml->nodecount;
406  }
407  }
408 
409  int* pnttype = new int[nnetcon];
410  int* pntindex = new int[nnetcon];
411  double* delay = new double[nnetcon];
412  for (int i = 0; i < nnetcon; ++i) {
413  NetCon& nc = nt.netcons[i];
414  Point_process* pnt = nc.target_;
415  if (pnt == nullptr) {
416  // nrn_setup.cpp allows type <=0 which generates nullptr target.
417  pnttype[i] = 0;
418  pntindex[i] = -1;
419  } else {
420  pnttype[i] = pnt->_type;
421 
422  // todo: this seems most natural, but does not work. Perhaps should look
423  // into how pntindex determined in nrnbbcore_write.cpp and change there.
424  // int ix = pnt->_i_instance;
425  // if (ml_pinv[pnt->_type]) {
426  // ix = ml_pinv[pnt->_type][ix];
427  // }
428 
429  // follow the inverse of nrn_setup.cpp using pnt_offset computed above.
430  int ix = (pnt - nt.pntprocs) - pnt_offset[pnt->_type];
431  pntindex[i] = ix;
432  }
433  delay[i] = nc.delay_;
434  }
435  fh.write_array<int>(pnttype, nnetcon);
436  fh.write_array<int>(pntindex, nnetcon);
437  fh.write_array<double>(nt.weights, nt.n_weight);
438  fh.write_array<double>(delay, nnetcon);
439 #if CHKPNTDEBUG
440  for (int i = 0; i < nnetcon; ++i) {
441  assert(ntc.pnttype[i] == pnttype[i]);
442  assert(ntc.pntindex[i] == pntindex[i]);
443  assert(ntc.delay[i] == delay[i]);
444  }
445 #endif
446  delete[] pnttype;
447  delete[] pntindex;
448  delete[] delay;
449 
450  // BBCOREPOINTER
451  int nbcp = 0;
452  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
453  if (corenrn.get_bbcore_read()[tml->index] && tml->index != patstimtype) {
454  ++nbcp;
455  }
456  }
457 
458  fh << nbcp << " bbcorepointer\n";
459 #if CHKPNTDEBUG
460  assert(nbcp == ntc.nbcp);
461 #endif
462  nbcp = 0;
463  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
464  if (corenrn.get_bbcore_read()[tml->index] && tml->index != patstimtype) {
465  int i = nbcp++;
466  int type = tml->index;
468  Memb_list* ml = tml->ml;
469  double* d = nullptr;
470  Datum* pd = nullptr;
471  int layout = corenrn.get_mech_data_layout()[type];
472  int dsz = corenrn.get_prop_param_size()[type];
473  int pdsz = corenrn.get_prop_dparam_size()[type];
474  int aln_cntml = nrn_soa_padded_size(ml->nodecount, layout);
475  fh << type << "\n";
476  int icnt = 0;
477  int dcnt = 0;
478  // data size and allocate
479  for (int j = 0; j < ml->nodecount; ++j) {
480  int jp = j;
481  if (ml->_permute) {
482  jp = ml->_permute[j];
483  }
484  d = ml->data + nrn_i_layout(jp, ml->nodecount, 0, dsz, layout);
485  pd = ml->pdata + nrn_i_layout(jp, ml->nodecount, 0, pdsz, layout);
487  nullptr, nullptr, &dcnt, &icnt, 0, aln_cntml, d, pd, ml->_thread, &nt, ml, 0.0);
488  }
489  fh << icnt << "\n";
490  fh << dcnt << "\n";
491 #if CHKPNTDEBUG
492  assert(ntc.bcptype[i] == type);
493  assert(ntc.bcpicnt[i] == icnt);
494  assert(ntc.bcpdcnt[i] == dcnt);
495 #endif
496  int* iArray = nullptr;
497  double* dArray = nullptr;
498  if (icnt) {
499  iArray = new int[icnt];
500  }
501  if (dcnt) {
502  dArray = new double[dcnt];
503  }
504  icnt = dcnt = 0;
505  for (int j = 0; j < ml->nodecount; j++) {
506  int jp = j;
507 
508  if (ml->_permute) {
509  jp = ml->_permute[j];
510  }
511 
512  d = ml->data + nrn_i_layout(jp, ml->nodecount, 0, dsz, layout);
513  pd = ml->pdata + nrn_i_layout(jp, ml->nodecount, 0, pdsz, layout);
514 
516  dArray, iArray, &dcnt, &icnt, 0, aln_cntml, d, pd, ml->_thread, &nt, ml, 0.0);
517  }
518 
519  if (icnt) {
520  fh.write_array<int>(iArray, icnt);
521  delete[] iArray;
522  }
523 
524  if (dcnt) {
525  fh.write_array<double>(dArray, dcnt);
526  delete[] dArray;
527  }
528  ++i;
529  }
530  }
531 
532  fh << nt.n_vecplay << " VecPlay instances\n";
533  for (int i = 0; i < nt.n_vecplay; i++) {
534  PlayRecord* pr = (PlayRecord*) nt._vecplay[i];
535  int vtype = pr->type();
536  int mtype = -1;
537  int ix = -1;
538 
539  // not as efficient as possible but there should not be too many
540  Memb_list* ml = nullptr;
541  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
542  ml = tml->ml;
543  int nn = corenrn.get_prop_param_size()[tml->index] * ml->_nodecount_padded;
544  if (nn && pr->pd_ >= ml->data && pr->pd_ < (ml->data + nn)) {
545  mtype = tml->index;
546  ix = (pr->pd_ - ml->data);
547  break;
548  }
549  }
550  assert(mtype >= 0);
551  int icnt, isz;
553  icnt,
554  ml->nodecount,
555  isz,
556  corenrn.get_prop_param_size()[mtype],
557  corenrn.get_mech_data_layout()[mtype]);
558  if (ml_pinv[mtype]) {
559  icnt = ml_pinv[mtype][icnt];
560  }
561  ix = nrn_i_layout(
562  icnt, ml->nodecount, isz, corenrn.get_prop_param_size()[mtype], AOS_LAYOUT);
563 
564  fh << vtype << "\n";
565  fh << mtype << "\n";
566  fh << ix << "\n";
567 #if CHKPNTDEBUG
568  assert(ntc.vtype[i] == vtype);
569  assert(ntc.mtype[i] == mtype);
570  assert(ntc.vecplay_ix[i] == ix);
571 #endif
572  if (vtype == VecPlayContinuousType) {
574  int sz = vpc->y_.size();
575  fh << sz << "\n";
576  fh.write_array<double>(vpc->y_.data(), sz);
577  fh.write_array<double>(vpc->t_.data(), sz);
578  } else {
579  std::cerr << "Error checkpointing vecplay type" << std::endl;
580  assert(0);
581  }
582  }
583 
584  for (size_t i = 0; i < memb_func.size(); ++i) {
585  if (ml_pinv[i]) {
586  delete[] ml_pinv[i];
587  }
588  }
589  free(ml_pinv);
590 
591  write_tqueue(nt, fh);
592  fh.close();
593 }
594 
596  FileHandler f;
597  auto filename = get_save_path() + "/time.dat";
598  f.open(filename, std::ios::out);
599  f.write_array(&t, 1);
600  f.close();
601 }
602 
603 // A call to finitialize must be avoided after restoring the checkpoint
604 // as that would change all states to a voltage clamp initialization.
605 // Nevertheless t and some spike exchange and other computer state needs to
606 // be initialized.
607 // Also it is occasionally the case that nrn_init allocates data so we
608 // need to call it but avoid the internal call to initmodel.
609 // Consult finitialize.c to help decide what should be here
611  dt2thread(-1.);
614 
616 
617  // if PatternStim exists, needs initialization
618  for (NrnThreadMembList* tml = nrn_threads[0].tml; tml; tml = tml->next) {
619  if (tml->index == patstimtype && patstim_index >= 0 && patstim_te > 0.0) {
620  Memb_list* ml = tml->ml;
622  patstim_te,
623  /* below correct only for AoS */
624  0,
625  ml->nodecount,
626  ml->data,
627  ml->pdata,
628  ml->_thread,
629  nrn_threads,
630  ml,
631  0.0);
632  break;
633  }
634  }
635 
636  // Check that bbcore_write is defined if we want to use checkpoint
637  for (NrnThreadMembList* tml = nrn_threads[0].tml; tml; tml = tml->next) {
638  auto type = tml->index;
641  fprintf(stderr,
642  "Checkpoint is requested involving BBCOREPOINTER but there is no bbcore_write"
643  " function for %s\n",
644  memb_func.sym);
646  }
647  }
648 
649 
650  return restored;
651 }
652 
653 template <typename T>
654 T* CheckPoints::soa2aos(T* data, int cnt, int sz, int layout, int* permute) const {
655  // inverse of F -> data. Just a copy if layout=1. If SoA,
656  // original file order depends on padding and permutation.
657  // Good for a, b, area, v, diam, Memb_list.data, or anywhere values do not change.
658  T* d = new T[cnt * sz];
659  if (layout == Layout::AoS) {
660  for (int i = 0; i < cnt * sz; ++i) {
661  d[i] = data[i];
662  }
663  } else if (layout == Layout::SoA) {
664  int align_cnt = nrn_soa_padded_size(cnt, layout);
665  for (int i = 0; i < cnt; ++i) {
666  int ip = i;
667  if (permute) {
668  ip = permute[i];
669  }
670  for (int j = 0; j < sz; ++j) {
671  d[i * sz + j] = data[ip + j * align_cnt];
672  }
673  }
674  }
675  return d;
676 }
677 
678 template <typename T>
679 void CheckPoints::data_write(FileHandler& F, T* data, int cnt, int sz, int layout, int* permute)
680  const {
681  T* d = soa2aos(data, cnt, sz, layout, permute);
682  F.write_array<T>(d, cnt * sz);
683  delete[] d;
684 }
685 
687 
689 
691  DiscreteEvent* d = (DiscreteEvent*) q->data_;
692 
693  // printf(" p %.20g %d\n", q->t_, d->type());
694  // d->pr("", q->t_, net_cvode_instance);
695 
696  if (!d->require_checkpoint()) {
697  return;
698  }
699 
700  fh << d->type() << "\n";
701  fh.write_array(&q->t_, 1);
702 
703  switch (d->type()) {
704  case NetConType: {
705  NetCon* nc = (NetCon*) d;
706  assert(nc >= nt.netcons && (nc < (nt.netcons + nt.n_netcon)));
707  fh << (nc - nt.netcons) << "\n";
708  break;
709  }
710  case SelfEventType: {
711  SelfEvent* se = (SelfEvent*) d;
712  fh << int(se->target_->_type) << "\n";
713  fh << se->target_ - nt.pntprocs << "\n"; // index of nrnthread.pntprocs
714  fh << se->target_->_i_instance << "\n"; // not needed except for assert check
715  fh.write_array(&se->flag_, 1);
716  fh << (se->movable_ - nt._vdata) << "\n"; // DANGEROUS?
717  fh << se->weight_index_ << "\n";
718  // printf(" %d %ld %d %g %ld %d\n", se->target_->_type, se->target_ - nt.pntprocs,
719  // se->target_->_i_instance, se->flag_, se->movable_ - nt._vdata, se->weight_index_);
720  break;
721  }
722  case PreSynType: {
723  PreSyn* ps = (PreSyn*) d;
724  assert(ps >= nt.presyns && (ps < (nt.presyns + nt.n_presyn)));
725  fh << (ps - nt.presyns) << "\n";
726  break;
727  }
728  case NetParEventType: {
729  // nothing extra to write
730  break;
731  }
732  case PlayRecordEventType: {
733  PlayRecord* pr = ((PlayRecordEvent*) d)->plr_;
734  fh << pr->type() << "\n";
735  if (pr->type() == VecPlayContinuousType) {
737  int ix = -1;
738  for (int i = 0; i < nt.n_vecplay; ++i) {
739  // if too many for fast search, put ix in the instance
740  if (nt._vecplay[i] == (void*) vpc) {
741  ix = i;
742  break;
743  }
744  }
745  assert(ix >= 0);
746  fh << ix << "\n";
747  } else {
748  assert(0);
749  }
750  break;
751  }
752  default: {
753  // In particular, InputPreSyn does not appear in tqueue as it
754  // immediately fans out to NetCon.
755  assert(0);
756  break;
757  }
758  }
759 }
760 
762  std::shared_ptr<Phase2::EventTypeBase> event,
763  NrnThread& nt) {
764  // printf("restore tqitem type=%d time=%.20g\n", type, time);
765 
766  switch (type) {
767  case NetConType: {
768  auto e = static_cast<Phase2::NetConType_*>(event.get());
769  // printf(" NetCon %d\n", netcon_index);
770  NetCon* nc = nt.netcons + e->netcon_index;
771  nc->send(e->time, net_cvode_instance, &nt);
772  break;
773  }
774  case SelfEventType: {
775  auto e = static_cast<Phase2::SelfEventType_*>(event.get());
776  if (e->target_type == patstimtype) {
777  if (nt.id == 0) {
778  patstim_te = e->time;
779  }
780  break;
781  }
782  Point_process* pnt = nt.pntprocs + e->point_proc_instance;
783  // printf(" SelfEvent %d %d %d %g %d %d\n", target_type, point_proc_instance,
784  // target_instance, flag, movable, weight_index);
785  nrn_assert(e->target_instance == pnt->_i_instance);
786  nrn_assert(e->target_type == pnt->_type);
787  net_send(nt._vdata + e->movable, e->weight_index, pnt, e->time, e->flag);
788  break;
789  }
790  case PreSynType: {
791  auto e = static_cast<Phase2::PreSynType_*>(event.get());
792  // printf(" PreSyn %d\n", presyn_index);
793  PreSyn* ps = nt.presyns + e->presyn_index;
794  int gid = ps->output_index_;
795  ps->output_index_ = -1;
796  ps->send(e->time, net_cvode_instance, &nt);
797  ps->output_index_ = gid;
798  break;
799  }
800  case NetParEventType: {
801  // nothing extra to read
802  // printf(" NetParEvent\n");
803  break;
804  }
805  case PlayRecordEventType: {
806  auto e = static_cast<Phase2::PlayRecordEventType_*>(event.get());
807  VecPlayContinuous* vpc = (VecPlayContinuous*) (nt._vecplay[e->vecplay_index]);
808  vpc->e_->send(e->time, net_cvode_instance, &nt);
809  break;
810  }
811  default: {
812  assert(0);
813  break;
814  }
815  }
816 }
817 
819  // VecPlayContinuous
820  fh << nt.n_vecplay << " VecPlayContinuous state\n";
821  for (int i = 0; i < nt.n_vecplay; ++i) {
823  fh << vpc->last_index_ << "\n";
824  fh << vpc->discon_index_ << "\n";
825  fh << vpc->ubound_index_ << "\n";
826  }
827 
828  // PatternStim
829  int patstim_index = -1;
830  for (NrnThreadMembList* tml = nrn_threads[0].tml; tml; tml = tml->next) {
831  if (tml->index == patstimtype) {
832  Memb_list* ml = tml->ml;
834  /* below correct only for AoS */
835  0,
836  ml->nodecount,
837  ml->data,
838  ml->pdata,
839  ml->_thread,
840  nrn_threads,
841  ml,
842  0.0);
843  break;
844  }
845  }
846  fh << patstim_index << " PatternStim\n";
847 
848  // Avoid extra spikes due to some presyn voltages above threshold
849  fh << -1 << " Presyn ConditionEvent flags\n";
850  for (int i = 0; i < nt.n_presyn; ++i) {
851  // PreSyn.flag_ not used. HPC memory utilizes PreSynHelper.flag_ array
852  fh << nt.presyns_helper[i].flag_ << "\n";
853  }
854 
856  // printf("write_tqueue %d %p\n", nt.id, ndt.tqe_);
857  TQueue<QTYPE>* tqe = ntd.tqe_;
858  TQItem* q;
859 
860  fh << -1 << " TQItems from atomic_dq\n";
861  while ((q = tqe->atomic_dq(1e20)) != nullptr) {
862  write_tqueue(q, nt, fh);
863  }
864  fh << 0 << "\n";
865  fh << -1 << " TQItemsfrom binq_\n";
866  for (q = tqe->binq_->first(); q; q = tqe->binq_->next(q)) {
867  write_tqueue(q, nt, fh);
868  }
869  fh << 0 << "\n";
870 }
871 
872 // Read a tqueue/checkpoint
873 // int :: should be equal to the previous n_vecplay
874 // n_vecplay:
875 // int: last_index
876 // int: discon_index
877 // int: ubound_index
878 // int: patstim_index
879 // int: should be -1
880 // n_presyn:
881 // int: flags of presyn_helper
882 // int: should be -1
883 // null terminated:
884 // int: type
885 // array of size 1:
886 // double: time
887 // ... depends of the type
888 // int: should be -1
889 // null terminated:
890 // int: TO BE DEFINED
891 // ... depends of the type
893  restored = true;
894 
895  for (int i = 0; i < nt.n_vecplay; ++i) {
897  auto& vec = p2.vec_play_continuous[i];
898  vpc->last_index_ = vec.last_index;
899  vpc->discon_index_ = vec.discon_index;
900  vpc->ubound_index_ = vec.ubound_index;
901  }
902 
903  // PatternStim
904  patstim_index = p2.patstim_index; // PatternStim
905  if (nt.id == 0) {
906  patstim_te = -1.0; // changed if relevant SelfEvent;
907  }
908 
909  for (int i = 0; i < nt.n_presyn; ++i) {
911  }
912 
913  for (const auto& event: p2.events) {
914  restore_tqitem(event.first, event.second, nt);
915  }
916 }
917 
918 } // namespace coreneuron
static void nrnmpi_barrier()
static double restore(void *v)
TQItem * next(TQItem *)
Definition: tqueue.cpp:101
TQItem * first()
Definition: tqueue.cpp:93
std::string get_save_path() const
void restore_tqitem(int type, std::shared_ptr< Phase2::EventTypeBase > event, NrnThread &nt)
void write_tqueue(TQItem *q, NrnThread &nt, FileHandler &fh) const
void write_phase2(NrnThread &nt) const
T * soa2aos(T *data, int cnt, int sz, int layout, int *permute) const
void write_checkpoint(NrnThread *nt, int nb_threads) const
const std::string restore_
double restore_time() const
todo : need to broadcast this rather than all reading a double
void data_write(FileHandler &F, T *data, int cnt, int sz, int layout, int *permute) const
void restore_tqueue(NrnThread &, const Phase2 &p2)
CheckPoints(const std::string &save, const std::string &restore)
auto & get_memb_func(size_t idx)
Definition: coreneuron.hpp:135
void write_array(T *p, size_t nb_elements)
Write an 1D array.
T * read_array(T *p, size_t count)
Read an integer array of fixed length.
void open(const std::string &filename, std::ios::openmode mode=std::ios::in)
Preserving chkpnt state, move to a new file.
int checkpoint() const
Query chkpnt state.
void close()
Close currently open file.
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:372
Point_process * target_
Definition: netcon.hpp:49
NetCvodeThreadData * p
Definition: netcvode.hpp:64
TQueue< QTYPE > * tqe_
Definition: netcvode.hpp:49
std::vector< int > preSynConditionEventFlags
Definition: phase2.hpp:31
std::vector< VecPlayContinuous_ > vec_play_continuous
Definition: phase2.hpp:68
std::vector< std::pair< int, std::shared_ptr< EventTypeBase > > > events
Definition: phase2.hpp:71
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:409
Point_process * pntsrc_
Definition: netcon.hpp:113
Point_process * target_
Definition: netcon.hpp:71
TQItem * atomic_dq(double til)
PlayRecordEvent * e_
Definition: vrecitem.h:86
#define cnt
Definition: tqueue.hpp:44
#define v
Definition: md1redef.h:11
#define i
Definition: md1redef.h:19
#define VecPlayContinuousType
Definition: vrecitem.h:17
#define PlayRecordEventType
Definition: vrecitem.h:18
short * nrn_is_artificial_
Definition: cell_group.cpp:19
#define AOS_LAYOUT
Definition: data_layout.hpp:12
void nrnran123_getids3(nrnran123_State *s, std::uint32_t *id1, std::uint32_t *id2, std::uint32_t *id3)
Definition: nrnran123.cpp:97
void nrnran123_getseq(nrnran123_State *s, std::uint32_t *seq, char *which)
Get sequence number and selector from an nrnran123_State object.
Definition: nrnran123.cpp:50
#define assert(ex)
Definition: hocassrt.h:24
bool nrn_semantics_is_ion(int i)
Definition: ion_semantics.h:6
int nrn_semantics_ion_type(int i)
Definition: ion_semantics.h:12
#define _threadargsproto_
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
NrnThread * nrn_threads
Definition: multicore.cpp:56
void nrn_inverse_i_layout(int i, int &icnt, int cnt, int &isz, int sz, int layout)
Definition: nrn_setup.cpp:680
int nrn_i_layout(int icnt, int cnt, int isz, int sz, int layout)
This function return the index in a flat array of a matrix coordinate (icnt, isz).
int checkpoint_save_patternstim(_threadargsproto_)
std::vector< int > & nrn_mech_random_indices(int type)
void nrn_thread_table_check()
Definition: multicore.cpp:168
int * inverse_permute(int *p, int n)
int Datum
Definition: nrnconf.h:23
CoreNeuron corenrn
Definition: multicore.cpp:53
void * ecalloc(size_t n, size_t size)
Definition: nrnoc_aux.cpp:85
void checkpoint_restore_patternstim(int, double, _threadargsproto_)
NrnThreadChkpnt * nrnthread_chkpnt
static int nrn_original_aos_index(int etype, int ix, NrnThread &nt, int **ml_pinv)
void net_send(void **, int, Point_process *, double, double)
Definition: netcvode.cpp:77
void nrn_spike_exchange_init()
Definition: netpar.cpp:238
void dt2thread(double adt)
void allocate_data_in_mechanism_nrn_init()
Definition: finitialize.cpp:21
int nrn_soa_padded_size(int cnt, int layout)
calculate size after padding for specific memory layout
NetCvode * net_cvode_instance
Definition: netcvode.cpp:35
static int pntindex
Definition: prcellstate.cpp:24
corenrn_parameters corenrn_param
Printing method.
static int permute(int i, NrnThread &nt)
Definition: prcellstate.cpp:28
icycle< ncycle;++icycle) { int istride=stride[icycle];nrn_pragma_acc(loop vector) nrn_pragma_omp(loop bind(parallel)) for(int icore=0;icore< warpsize;++icore) { int i=ii+icore;if(icore< istride) { int ip=GPU_PARENT(i);GPU_RHS(i) -=GPU_B(i) *GPU_RHS(ip);GPU_RHS(i)/=GPU_D(i);} i+=istride;} ii+=istride;} }}void solve_interleaved2(int ith) { NrnThread *nt=nrn_threads+ith;InterleaveInfo &ii=interleave_info[ith];int nwarp=ii.nwarp;if(nwarp==0) return;int ncore=nwarp *warpsize;int *ncycles=ii.cellsize;int *stridedispl=ii.stridedispl;int *strides=ii.stride;int *rootbegin=ii.firstnode;int *nodebegin=ii.lastnode;if(0) { nrn_pragma_acc(parallel loop gang present(nt[0:1], strides[0:nstride], ncycles[0:nwarp], stridedispl[0:nwarp+1], rootbegin[0:nwarp+1], nodebegin[0:nwarp+1]) async(nt->stream_id)) nrn_pragma_omp(target teams loop map(present, alloc:nt[:1], strides[:nstride], ncycles[:nwarp], stridedispl[:nwarp+1], rootbegin[:nwarp+1], nodebegin[:nwarp+1])) for(int icore=0;icore< ncore;icore+=warpsize) { solve_interleaved2_loop_body(nt, icore, ncycles, strides, stridedispl, rootbegin, nodebegin);} nrn_pragma_acc(wait(nt->stream_id)) } else { for(int icore=0;icore< ncore;icore+=warpsize) { solve_interleaved2_loop_body(nt, icore, ncycles, strides, stridedispl, rootbegin, nodebegin);} }}void solve_interleaved1(int ith) { NrnThread *nt=nrn_threads+ith;int ncell=nt-> ncell
Definition: cellorder.cpp:784
if(ncell==0)
Definition: cellorder.cpp:785
int type_of_ntdata(NrnThread &, int index, bool reset)
std::string to_string(const T &obj)
#define NetParEventType
Definition: netcon.hpp:27
#define PreSynType
Definition: netcon.hpp:26
#define SelfEventType
Definition: netcon.hpp:25
#define NetConType
Definition: netcon.hpp:24
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
Definition: nrn_assert.h:33
int * nrn_prop_dparam_size_
Definition: init.cpp:163
size_t q
size_t p
size_t j
s
Definition: multisend.cpp:521
int * nrn_prop_param_size_
Definition: init.cpp:162
std::vector< Memb_func > memb_func
Definition: init.cpp:145
short type
Definition: cabvars.h:10
static void pr(N_Vector x)
static double save(void *v)
Definition: ocbox.cpp:344
virtual bool require_checkpoint()
Definition: netcon.hpp:39
virtual void send(double deliverytime, NetCvode *, NrnThread *)
Definition: netcvode.cpp:362
virtual int type() const
Definition: netcon.hpp:36
ThreadDatum * _thread
Definition: mechanism.hpp:141
PreSynHelper * presyns_helper
Definition: multicore.hpp:84
Memb_list ** _ml_list
Definition: multicore.hpp:81
NrnThreadMembList * tml
Definition: multicore.hpp:80
Point_process * pntprocs
Definition: multicore.hpp:82
NrnThreadMembList * next
Definition: multicore.hpp:33
bool mpi_enable
Initialization seed for random number generator (int)