NEURON
fadvance.cpp
Go to the documentation of this file.
1 #include <../../nrnconf.h>
2 
3 #include <nrnmpi.h>
4 #include <stdlib.h>
5 #include <errno.h>
6 #include "neuron.h"
7 #include "section.h"
8 #include "nrn_ansi.h"
9 #include "nrniv_mf.h"
10 #include "multisplit.h"
12 #define nrnoc_fadvance_c
13 #include "utils/profile/profiler_interface.h"
14 #include "nonvintblock.h"
15 #include "nrncvode.h"
16 #include "spmatrix.h"
17 
18 #include <vector>
19 
20 /*
21  after an fadvance from t-dt to t, v is defined at t
22  states that depend on v are defined at t+dt/2
23  states, such as concentration that depend on current are defined at t
24  and currents are defined at t-dt/2.
25  */
26 /*
27  fcurrent is used to set up all assigned variables without changing any
28  states. It assumes all states have their values at time t which is
29  only first order correct. It determines the nonvint assignments first
30  so that ena, etc. are correct for the current determinations. We
31  demand that nonvint assignments can be done without changing states when
32  dt is temporarily set to 0.
33 
34  It turned out to be a bad idea to do this. Many methods (KINETIC, PROCEDURE)
35  for solving are not even called when dt =0. Also it plays havoc with tables
36  that are recomputed several times when dt is changed then changed back.
37  Therefore we are no longer trying to initialize assigned variables within
38  SOLVE'd blocks. Instead we are making initialization more convenient,
39  at least within the normal situations, by introducing a finitialize()
40  with an optional argument vinit. finitialize will call the INITIALIZE block
41  for all mechanisms in all segments. (Default initialization sets all states
42  to state0.) The user can set things up specially in a models INITIAL
43  block. INITIAL blocks can make use of v. With care they can make use of
44  ionic concentrations just like breakpoint and solve blocks. When the argument
45  is present, v for all segments are set to that value.
46  */
47 
48 #if !defined(NRNMPI) || NRNMPI == 0
49 extern double nrnmpi_wtime();
50 #endif
51 extern double* nrn_mech_wtime_;
52 extern double t, dt;
53 extern double chkarg(int, double low, double high);
56 extern void nrn_solve(NrnThread*);
57 static void nonvint(neuron::model_sorted_token const&, NrnThread&);
58 extern void nrncvode_set_t(double t);
59 
61 static void* nrn_ms_reduce_solve(NrnThread*);
62 static void* nrn_ms_bksub(NrnThread*);
64 extern void* nrn_multisplit_triang(NrnThread*);
66 extern void* nrn_multisplit_bksub(NrnThread*);
67 extern void (*nrn_multisplit_setup_)();
69 
70 extern int state_discon_allowed_;
71 extern double hoc_epsilon;
72 
73 #define NRNCTIME 1
74 #define NONVINT_ODE_COUNT 5
75 
76 #if NRNCTIME
77 #define CTBEGIN double wt = nrnmpi_wtime()
78 #define CTADD nth->_ctime += nrnmpi_wtime() - wt
79 #else
80 #define CTBEGIN /**/
81 #define CTADD /**/
82 #endif
83 
84 #define ELIMINATE_T_ROUNDOFF 0
85 #if ELIMINATE_T_ROUNDOFF
86 /* in order to simplify and as much as possible avoid the handling
87 of round-off error due to repeated t += dt, we use
88 t = nrn_tbase_ + nrn_ndt_ * nrn_dt_;
89 to advance time. The problems that must be overcome are when the
90 user changes dt in the middle of a run or switches from cvode or
91 abruptly and arbitrarily sets a new value of t and continues from
92 there (hence nrn_tbase_)
93 */
94 double nrn_ndt_, nrn_tbase_, nrn_dt_;
95 void nrn_chk_ndt() {
96  if (dt != nrn_dt_ || t != nrn_tbase_ + nrn_ndt_ * nrn_dt_) {
97  if (nrnmpi_myid == 0)
98  Printf("nrn_chk_ndt t=%g dt=%g old nrn_tbase_=%g nrn_ndt_=%g nrn_dt_=%g\n",
99  t,
100  dt,
101  nrn_tbase_,
102  nrn_ndt_,
103  nrn_dt_);
104  nrn_dt_ = dt;
105  nrn_tbase_ = t;
106  nrn_ndt_ = 0.;
107  }
108 }
109 #endif /* ELIMINATE_T_ROUNDOFF */
110 
111 /*
112 There are (too) many variants of nrn_fixed_step depending on
113 nrnmpi_numprocs 1 or > 1, nrn_nthread 1 or > 1,
114 nrnmpi_v_transfer nullptr or callable, nrn_multisplit_setup nullptr or callable,
115 and whether one step with fadvance
116 or possibly many with ParallelContext.psolve before synchronizing with
117 NetParEvent. The combination of simultaneous nrnmpi_numprocs > 1 and
118 nrn_nthread > 1 with parallel transfer
119 requires some refactoring of the use of the old
120 (*nrnmpi_v_transfer_)() from within nonvint(nt) which handled either mpi
121 or threads but cannot handle both simultaneously. (Unless the first
122 thread that arrives in nrnmpi_v_transfer is the one that accomplishes
123 the mpi transfer). Instead, we replace with
124 nrnthread_v_transfer(nt) to handle the per thread copying of source
125 data into the target space owned by the thread. Between update (called by
126 nrn_fixed_step_thread and nrn_ms_bksub), and nonvint (called by
127 nrn_fixed_step_lastpart, we need to have thread 0 do the interprocessor
128 transfer of source voltages to a either a staging area for later
129 copying to the target by a thread (or directly to the target if only one
130 thread). This will happen with (*nrnmpi_v_transfer_)(). (Look Ma, no threads).
131 This deals properly with the necessary mpi synchronization. And leaves
132 thread handling where it was before. Also, writing to the staging area
133 is only done by thread 0. Fixed step and global variable step
134 logic is limited to the case where an nrnmpi_v_transfer requires
135 existence of nrnthread_v_transfer (even if one thread).
136 */
137 #if 1 || NRNMPI
138 void (*nrnmpi_v_transfer_)(); /* called by thread 0 */
140 /* if at least one gap junction has a source voltage with extracellular inserted */
142 #endif
143 
145 
148 
149 #define PROFILE 0
150 #include "profile.h"
151 
152 void fadvance() {
153  nrn::Instrumentor::phase p_fadvance("fadvance");
154  tstopunset;
155  if (cvode_active_) {
156  cvode_fadvance(-1.);
157  tstopunset;
158  hoc_retpushx(1.);
159  return;
160  }
161  if (tree_changed) {
162  setup_topology();
163  }
164  if (v_structure_change) {
165  v_setup_vectors();
166  }
167  if (diam_changed) {
168  recalc_diam();
169  }
170  auto const cache_token = nrn_ensure_model_data_are_sorted();
171  nrn_fixed_step(cache_token);
172  tstopunset;
173  hoc_retpushx(1.);
174 }
175 
176 /*
177  batch_save() initializes list of variables
178  batch_save(&varname, ...) adds variable names to list for saving
179  batch_save("varname", ...) adds variable names to the list and name
180  will appear in header.
181  batch_run(tstop, tstep, "file") saves variables in file every tstep
182 */
183 
184 static void batch_out(), batch_open(), batch_close();
185 
186 static FILE* batch_file;
187 static int batch_size;
188 static int batch_n;
189 static double** batch_var;
190 
191 static void batch_open(char* name, double tstop, double tstep, const char* comment) {
192  if (batch_file) {
193  batch_close();
194  }
195  if (!name) {
196  return;
197  }
198  batch_file = fopen(name, "w");
199  if (!batch_file) {
200  hoc_execerror("Couldn't open batch file", name);
201  }
202  fprintf(batch_file,
203  "%s\nbatch_run from t = %g to %g in steps of %g with dt = %g\n",
204  comment,
205  t,
206  tstop,
207  tstep,
208  dt);
209 #if 0
210  fprintf(batch_file, "variable names --\n");
211  if (!batch_var) {
212  batch_var = hoc_newlist();
213  }
214  count = 0;
215  ITERATE(q, batch_varname) {
216  fprintf(batch_file, "%s\n", STR(q));
217  ++count;
218  }
219  if (count != batch_n) {
220  if (batch_var) {
221  free(batch_var);
222  }
223  batch_n = count;
224  batch_var = (double**)ecalloc(batch_n, sizeof(double*));
225  }
226  count = 0;
227  ITERATE(q, batch_varname) {
228  batch_var[count++] = hoc_val_pointer(STR(q));
229  }
230 #endif
231 }
232 
233 void batch_run(void) /* avoid interpreter overhead */
234 {
235  double tstop, tstep, tnext;
236  char* filename;
237 
238  tstopunset;
239  tstop = chkarg(1, 0., 1e20);
240  tstep = chkarg(2, 0., 1e20);
241  if (ifarg(3)) {
242  filename = gargstr(3);
243  } else {
244  filename = 0;
245  }
246  auto* comment = ifarg(4) ? hoc_gargstr(4) : "";
247 
248  if (tree_changed) {
249  setup_topology();
250  }
251  if (v_structure_change) {
252  v_setup_vectors();
253  }
254  batch_open(filename, tstop, tstep, comment);
255  batch_out();
256  auto const cache_token = nrn_ensure_model_data_are_sorted();
257  if (cvode_active_) {
258  while (t < tstop) {
259  cvode_fadvance(t + tstep);
260  batch_out();
261  }
262  } else {
263  tstep -= dt / 4.;
264  tstop -= dt / 4.;
265  tnext = t + tstep;
266  while (t < tstop) {
267  nrn_fixed_step(cache_token);
268  if (t > tnext) {
269  batch_out();
270  tnext = t + tstep;
271  }
272  if (stoprun) {
273  tstopunset;
274  break;
275  }
276  }
277  }
278  batch_close();
279  hoc_retpushx(1.);
280 }
281 
282 static void dt2thread(double adt) {
283  if (adt != nrn_threads[0]._dt) {
284  int i;
285  for (i = 0; i < nrn_nthread; ++i) {
286  NrnThread* nt = nrn_threads + i;
287  nt->_t = t;
288  nt->_dt = dt;
289  if (secondorder) {
290  nt->cj = 2.0 / dt;
291  } else {
292  nt->cj = 1.0 / dt;
293  }
294  }
295  }
296 }
297 
298 static int _upd;
299 static void daspk_init_step_thread(neuron::model_sorted_token const& cache_token, NrnThread& nt) {
300  setup_tree_matrix(cache_token, nt);
301  nrn_solve(&nt);
302  if (_upd) {
303  nrn_update_voltage(cache_token, nt);
304  }
305 }
306 
307 void nrn_daspk_init_step(double tt, double dteps, int upd) {
308  double dtsav = nrn_threads->_dt;
309  int so = secondorder;
310  dt = dteps;
311  t = tt;
312  secondorder = 0;
313  dt2thread(dteps);
314  auto const sorted_token = nrn_ensure_model_data_are_sorted();
315  nrn_thread_table_check(sorted_token);
316  _upd = upd;
318  dt = dtsav;
319  secondorder = so;
320  dt2thread(dtsav);
321  nrn_thread_table_check(sorted_token);
322 }
323 
324 void nrn_fixed_step(neuron::model_sorted_token const& cache_token) {
325  nrn::Instrumentor::phase p_timestep("timestep");
326 #if ELIMINATE_T_ROUNDOFF
327  nrn_chk_ndt();
328 #endif
329  if (t != nrn_threads->_t) {
330  dt2thread(-1.);
331  } else {
332  dt2thread(dt);
333  }
334  nrn_thread_table_check(cache_token);
335  if (nrn_multisplit_setup_) {
337  // remove to avoid possible deadlock where some ranks do a
338  // v transfer and others do a spike exchange.
339  // i.e. must complete the full multisplit time step.
340  // if (!nrn_allthread_handle) {
343  /* see comment below */
344  if (nrnthread_v_transfer_) {
345  if (nrnmpi_v_transfer_) {
346  nrn::Instrumentor::phase p_gap("gap-v-transfer");
347  (*nrnmpi_v_transfer_)();
348  }
350  }
351  //}
352  } else {
354  /* if there is no nrnthread_v_transfer then there cannot be
355  a nrnmpi_v_transfer and lastpart
356  will be done in above call.
357  */
358  if (nrnthread_v_transfer_) {
359  if (nrnmpi_v_transfer_) {
360  nrn::Instrumentor::phase p_gap("gap-v-transfer");
361  (*nrnmpi_v_transfer_)();
362  }
364  }
365  }
366  t = nrn_threads[0]._t;
367  if (nrn_allthread_handle) {
368  (*nrn_allthread_handle)();
369  }
370 }
371 
372 /* better cache efficiency since a thread can do an entire minimum delay
373 integration interval before joining
374 */
375 static int step_group_n;
376 static int step_group_begin;
377 static int step_group_end;
378 
379 void nrn_fixed_step_group(neuron::model_sorted_token const& cache_token, int n) {
380  int i;
381 #if ELIMINATE_T_ROUNDOFF
382  nrn_chk_ndt();
383 #endif
384  dt2thread(dt);
385  nrn_thread_table_check(cache_token);
386  if (nrn_multisplit_setup_) {
387  int b = 0;
389  step_group_n = 0; /* abort at bksub flag */
390  for (i = 1; i < n; ++i) {
393  if (step_group_n) {
394  step_group_n = 0;
395  if (nrn_allthread_handle) {
396  (*nrn_allthread_handle)();
397  }
398  /* aborted step at bksub, so if not stopped
399  must do the triang*/
400  b = 1;
401  if (!stoprun) {
403  }
404  }
405  if (stoprun) {
406  break;
407  }
408  b = 0;
409  }
410  if (!b) {
413  }
414  if (nrn_allthread_handle) {
415  (*nrn_allthread_handle)();
416  }
417  } else {
418  step_group_n = n;
419  step_group_begin = 0;
420  step_group_end = 0;
421  while (step_group_end < step_group_n) {
422  /*printf("step_group_end=%d step_group_n=%d\n", step_group_end, step_group_n);*/
424  if (nrn_allthread_handle) {
425  (*nrn_allthread_handle)();
426  }
427  if (stoprun) {
428  break;
429  }
431  }
432  }
433  t = nrn_threads[0]._t;
434 }
435 
437  NrnThread& nt) {
438  auto* const nth = &nt;
439  int i;
440  nth->_stop_stepping = 0;
441  for (i = step_group_begin; i < step_group_n; ++i) {
442  nrn::Instrumentor::phase p_timestep("timestep");
443  nrn_fixed_step_thread(cache_token, nt);
444  if (nth->_stop_stepping) {
445  if (nth->id == 0) {
446  step_group_end = i + 1;
447  }
448  nth->_stop_stepping = 0;
449  return;
450  }
451  }
452  if (nth->id == 0) {
454  }
455 }
456 
457 static void nrn_fixed_step_thread(neuron::model_sorted_token const& cache_token, NrnThread& nt) {
458  auto* const nth = &nt;
459  {
460  nrn::Instrumentor::phase p("deliver-events");
461  deliver_net_events(nth);
462  }
463 
464  CTBEGIN;
465  nrn_random_play();
466 #if ELIMINATE_T_ROUNDOFF
467  nt.nrn_ndt_ += .5;
468  nt._t = nrn_tbase_ + nt.nrn_ndt_ * nrn_dt_;
469 #else
470  nt._t += .5 * nt._dt;
471 #endif
473  setup_tree_matrix(cache_token, nt);
474  {
475  nrn::Instrumentor::phase p("matrix-solver");
478  } else {
479  nrn_solve(nth);
480  }
481  }
482  {
483  nrn::Instrumentor::phase p("second-order-cur");
484  second_order_cur(nth);
485  }
486  {
487  nrn::Instrumentor::phase p("update");
488  nrn_update_voltage(cache_token, *nth);
489  }
490  CTADD;
491  /*
492  To simplify the logic,
493  if there is no nrnthread_v_transfer then there cannot be an nrnmpi_v_transfer.
494  */
495  if (!nrnthread_v_transfer_) {
496  nrn_fixed_step_lastpart(cache_token, nt);
497  }
498 }
499 
500 extern void nrn_extra_scatter_gather(int direction, int tid);
501 
503  auto* const nth = &nt;
504  CTBEGIN;
505 #if ELIMINATE_T_ROUNDOFF
506  nth->nrn_ndt_ += .5;
507  nth->_t = nrn_tbase_ + nth->nrn_ndt_ * nrn_dt_;
508 #else
509  nth->_t += .5 * nth->_dt;
510 #endif
512  nrn_extra_scatter_gather(0, nth->id);
513  nonvint(cache_token, nt);
514  nrn_ba(cache_token, nt, AFTER_SOLVE);
515  fixed_record_continuous(cache_token, nt);
516  CTADD;
517  {
518  nrn::Instrumentor::phase p("deliver-events");
519  nrn_deliver_events(nth); /* up to but not past texit */
520  }
521 }
522 
523 /* nrn_fixed_step_thread is split into three pieces */
524 
526  deliver_net_events(nth);
527  CTBEGIN;
528  nrn_random_play();
529 #if ELIMINATE_T_ROUNDOFF
530  nth->nrn_ndt_ += .5;
531  nth->_t = nrn_tbase_ + nth->nrn_ndt_ * nrn_dt_;
532 #else
533  nth->_t += .5 * nth->_dt;
534 #endif
538  CTADD;
539  return nullptr;
540 }
543  return nullptr;
544 }
545 void* nrn_ms_bksub(NrnThread* nth) {
546  CTBEGIN;
548  second_order_cur(nth);
549  auto const cache_token = nrn_ensure_model_data_are_sorted();
550  nrn_update_voltage(cache_token, *nth);
551  CTADD;
552  /* see above comment in nrn_fixed_step_thread */
553  if (!nrnthread_v_transfer_) {
554  nrn_fixed_step_lastpart(cache_token, *nth);
555  }
556  return nullptr;
557 }
559  nrn_ms_bksub(nth);
560  if (nth->_stop_stepping) {
561  nth->_stop_stepping = 0;
562  if (nth == nrn_threads) {
563  step_group_n = 1;
564  }
565  return nullptr;
566  }
568  return nullptr;
569 }
570 
571 
573  auto* const vec_rhs = nt.node_rhs_storage();
574  auto* const vec_v = nt.node_voltage_storage();
575  auto* const _nt = &nt;
576  int i, i1, i2;
577  i1 = 0;
578  i2 = _nt->end;
579  /* do not need to worry about linmod or extracellular*/
580  if (secondorder) {
581  for (i = i1; i < i2; ++i) {
582  vec_v[i] += 2. * vec_rhs[i];
583  }
584  } else {
585  for (i = i1; i < i2; ++i) {
586  vec_v[i] += vec_rhs[i];
587  }
588  }
589  if (use_sparse13) {
590  nrndae_update(_nt);
591  }
592 
593 #if EXTRACELLULAR
594  nrn_update_2d(_nt);
595 #endif
596  if (nrnthread_vi_compute_) {
597  (*nrnthread_vi_compute_)(_nt);
598  }
599 #if I_MEMBRANE
600  if (_nt->tml) {
601  assert(_nt->tml->index == CAP);
602  nrn_capacity_current(sorted_token, _nt, _nt->tml->ml);
603  }
604 #endif
605  if (nrn_use_fast_imem) {
606  nrn_calc_fast_imem(_nt);
607  }
608 }
609 
611  constexpr int i1 = 0;
612  auto const i3 = _nt->end;
613  auto const vec_area = _nt->node_area_storage();
614  auto const vec_rhs = _nt->node_rhs_storage();
615  auto const vec_sav_d = _nt->node_sav_d_storage();
616  auto const vec_sav_rhs = _nt->node_sav_rhs_storage();
617  assert(vec_sav_d);
618  assert(vec_sav_rhs);
619  for (int i = i1; i < i3; ++i) {
620  vec_sav_rhs[i] = (vec_sav_d[i] * vec_rhs[i] + vec_sav_rhs[i]) * vec_area[i] * 0.01;
621  }
622 }
623 
625  // nrn_rhs() is called near end of nrn_finitialize() via setup_tree_matrix()
626  // and nrn_rhs() sets up _nrn_sav_rhs as -total ionic_current (without
627  // ELECTRODE_CURRENT contributions) and RHS as axial + ionic + stim currents
628  // So sum, scaled by area, is i_membrane_ in nA.
629  // (Note: capacitance does not appear on rhs because delta_v is the
630  // variable in the current balance equations set up by setup_tree_matrix.)
631  // Warning: Have not thought deeply about extracellular or LinearMechanism.
632  // But there is a good chance things are ok. But needs testing.
633  // I don't believe this is used by Cvode or IDA.
634  constexpr auto i1 = 0;
635  int i3 = _nt->end;
636  auto const vec_area = _nt->node_area_storage();
637  auto const vec_rhs = _nt->node_rhs_storage();
638  auto const vec_sav_rhs = _nt->node_sav_rhs_storage();
639  for (int i = i1; i < i3; ++i) {
640  vec_sav_rhs[i] = (vec_rhs[i] + vec_sav_rhs[i]) * vec_area[i] * 0.01;
641  }
642 }
643 
644 void fcurrent(void) {
645  if (tree_changed) {
646  setup_topology();
647  }
648  if (v_structure_change) {
649  v_setup_vectors();
650  }
651  if (diam_changed) {
652  recalc_diam();
653  }
654 
655  dt2thread(-1.);
656  auto const sorted_token = nrn_ensure_model_data_are_sorted();
657  nrn_thread_table_check(sorted_token);
659  nrn_multithread_job(sorted_token, setup_tree_matrix);
661  hoc_retpushx(1.);
662 }
663 
665  extern int section_count;
666  extern Section** secorder;
667  int isec, inode;
668  Section* sec;
669  Node* nd;
670  if (use_sparse13) {
671  if (ifarg(1) && chkarg(1, 0., 1.) == 0.) {
672  spPrint(_nt->_sp13mat, 1, 0, 1);
673  } else {
674  int i, n = spGetSize(_nt->_sp13mat, 0);
675  spPrint(_nt->_sp13mat, 1, 1, 1);
676  for (i = 1; i <= n; ++i) {
677  Printf("%d %g\n", i, _nt->actual_rhs(i));
678  }
679  }
680  } else if (_nt) {
681  for (inode = 0; inode < _nt->end; ++inode) {
682  nd = _nt->_v_node[inode];
683  Printf("%d %g %g %g %g\n",
684  inode,
685  *nrn_classicalNodeB(nd),
686  *nrn_classicalNodeA(nd),
687  NODED(nd),
688  NODERHS(nd));
689  }
690  } else {
691  for (isec = 0; isec < section_count; ++isec) {
692  sec = secorder[isec];
693  for (inode = 0; inode < sec->nnode; ++inode) {
694  nd = sec->pnode[inode];
695  Printf("%d %d %g %g %g %g\n",
696  isec,
697  inode,
698  *nrn_classicalNodeB(nd),
699  *nrn_classicalNodeA(nd),
700  NODED(nd),
701  NODERHS(nd));
702  }
703  }
704  }
705 }
706 
707 void fmatrix(void) {
708  if (ifarg(1)) {
709  double x;
710  Section* sec;
711  int id;
712  Node* nd;
713  nrn_seg_or_x_arg(1, &sec, &x);
714  id = (int) chkarg(2, 1., 4.);
715  nd = node_exact(sec, x);
716  switch (id) {
717  case 1:
718  hoc_retpushx(NODEA(nd));
719  break;
720  case 2:
721  hoc_retpushx(NODED(nd));
722  break;
723  case 3:
724  hoc_retpushx(NODEB(nd));
725  break;
726  case 4:
727  hoc_retpushx(NODERHS(nd));
728  break;
729  }
730  return;
731  }
733  hoc_retpushx(1.);
734  return;
735 }
736 
737 static void nonvint(neuron::model_sorted_token const& sorted_token, NrnThread& nt) {
738  /* nrnmpi_v_transfer if needed was done earlier */
739  if (nrnthread_v_transfer_) {
740  nrn::Instrumentor::phase p_gap("gap-v-transfer");
742  }
743  nrn::Instrumentor::phase_begin("state-update");
744  bool const measure{nt.id == 0 && nrn_mech_wtime_};
745  errno = 0;
746  for (auto* tml = nt.tml; tml; tml = tml->next) {
747  if (memb_func[tml->index].state) {
748  std::string mechname("state-");
749  mechname += memb_func[tml->index].sym->name;
750  auto const w = measure ? nrnmpi_wtime() : -1.0;
752  memb_func[tml->index].state(sorted_token, &nt, tml->ml, tml->index);
754  if (measure) {
755  nrn_mech_wtime_[tml->index] += nrnmpi_wtime() - w;
756  }
757  if (errno && nrn_errno_check(0)) {
758  hoc_warning("errno set during calculation of states", nullptr);
759  }
760  }
761  }
762  long_difus_solve(sorted_token, 0, nt); /* if any longitudinal diffusion */
764  nrn::Instrumentor::phase_end("state-update");
765 }
766 
767 int nrn_errno_check(int i) {
768  int ierr = hoc_errno_check();
769  if (ierr) {
770  fprintf(stderr,
771  "%d errno=%d at t=%g during call to mechanism %s\n",
772  nrnmpi_myid,
773  ierr,
774  t,
775  memb_func[i].sym->name);
776  }
777  return ierr;
778 }
779 
780 void frecord_init(void) { /* useful when changing states after an finitialize() */
781  int i;
782  dt2thread(-1);
783  nrn_record_init();
784  if (!cvode_active_) {
785  for (i = 0; i < nrn_nthread; ++i) {
787  }
788  }
789  hoc_retpushx(1.);
790 }
791 
792 void verify_structure(void) {
793  if (tree_changed) {
794  setup_topology();
795  }
796  if (v_structure_change) {
797  v_setup_vectors();
798  }
799  if (diam_changed) {
800  recalc_diam();
801  }
802  nrn_solver_prepare(); /* cvode ready to be used */
803 }
804 
805 void nrn_finitialize(int setv, double v) {
806  int iord, i;
807  extern int _ninits;
808  extern short* nrn_is_artificial_;
809  ++_ninits;
810 
811  nrn::Instrumentor::phase_begin("finitialize");
812  nrn_fihexec(3); /* model structure changes can be made */
814  // Is this the right place to call this?
815  auto const sorted_token = nrn_ensure_model_data_are_sorted();
816 #if ELIMINATE_T_ROUNDOFF
817  nrn_ndt_ = 0.;
818  nrn_dt_ = dt;
819  nrn_tbase_ = 0.;
820 #else
821  t = 0.;
822  dt2thread(-1.);
823 #endif
824  if (cvode_active_) {
825  nrncvode_set_t(t);
826  }
827  nrn_thread_table_check(sorted_token);
830  nrn_random_play();
831  nrn_play_init(); /* Vector.play */
832  for (i = 0; i < nrn_nthread; ++i) {
833  nrn_deliver_events(nrn_threads + i); /* The play events at t=0 */
834  }
835  if (setv) {
837  auto const vec_v = _nt->node_voltage_storage();
838  std::fill_n(vec_v, _nt->end, v);
839  }
840  }
841 #if 1 || NRNMPI
844  (*nrnthread_vi_compute_)(_nt);
845  }
846  {
847  nrn::Instrumentor::phase p_gap("gap-v-transfer");
848  if (nrnmpi_v_transfer_) {
849  (nrnmpi_v_transfer_)();
850  }
853  (*nrnthread_v_transfer_)(_nt);
854  }
855  }
856 #endif
857  nrn_fihexec(0); /* after v is set but before INITIAL blocks are called*/
858  for (i = 0; i < nrn_nthread; ++i) {
859  nrn_ba(sorted_token, nrn_threads[i], BEFORE_INITIAL);
860  }
861  /* the INITIAL blocks are ordered so that mechanisms that write
862  concentrations are after ions and before mechanisms that read
863  concentrations.
864  */
865  /* the memblist list in NrnThread is already so ordered */
866 #if MULTICORE
867  for (i = 0; i < nrn_nthread; ++i) {
868  NrnThread* nt = nrn_threads + i;
870  NrnThreadMembList* tml;
871  for (tml = nt->tml; tml; tml = tml->next) {
872  if (memb_func[tml->index].has_initialize()) {
873  memb_func[tml->index].invoke_initialize(sorted_token, nt, tml->ml, tml->index);
874  }
875  }
876  }
877 #endif
878  for (iord = 0; iord < n_memb_func; ++iord) {
879  i = memb_order_[iord];
880  /* first clause due to MULTICORE */
881  if (nrn_is_artificial_[i])
882  if (memb_func[i].has_initialize()) {
883  if (memb_list[i].nodecount) {
884  // initialize all artificial cells in all threads at once
885  auto& ml = memb_list[i];
886  ml.set_storage_offset(0);
887  memb_func[i].invoke_initialize(sorted_token, nrn_threads, &ml, i);
888  }
889  if (errno) {
890  if (nrn_errno_check(i)) {
891  hoc_warning("errno set during call to INITIAL block", (char*) 0);
892  }
893  }
894  }
895  }
896  if (use_sparse13) {
897  nrndae_init();
898  }
899 
900  init_net_events();
901  for (i = 0; i < nrn_nthread; ++i) {
902  nrn_ba(sorted_token, nrn_threads[i], AFTER_INITIAL);
903  }
904  nrn_fihexec(1); /* after INITIAL blocks, before fcurrent*/
905 
906  for (i = 0; i < nrn_nthread; ++i) {
907  nrn_deliver_events(nrn_threads + i); /* The INITIAL sent events at t=0 */
908  }
909  if (cvode_active_) {
911  nrn_record_init();
912  } else {
914  for (i = 0; i < nrn_nthread; ++i) {
915  setup_tree_matrix(sorted_token, nrn_threads[i]);
916  if (nrn_use_fast_imem) {
918  }
919  }
921  nrn_record_init();
922  for (i = 0; i < nrn_nthread; ++i) {
923  fixed_record_continuous(sorted_token, nrn_threads[i]);
924  }
925  }
926  for (i = 0; i < nrn_nthread; ++i) {
927  nrn_deliver_events(nrn_threads + i); /* The record events at t=0 */
928  }
929 #if NRNMPI
931 #endif
932  if (nrn_allthread_handle) {
933  (*nrn_allthread_handle)();
934  }
935 
936  nrn_fihexec(2); /* just before return */
937  nrn::Instrumentor::phase_end("finitialize");
938 }
939 
940 void finitialize(void) {
941  int setv;
942  double v = 0.0;
943  setv = 0;
944  if (ifarg(1)) {
945  v = *getarg(1);
946  setv = 1;
947  }
948  tstopunset;
949  nrn_finitialize(setv, v);
950  tstopunset;
951  hoc_retpushx(1.);
952 }
953 
954 
955 static void batch_close() {
956  if (batch_file) {
957  fclose(batch_file);
958  batch_file = 0;
959  }
960 }
961 
962 static void batch_out() {
963  if (batch_file) {
964  int i;
965  for (i = 0; i < batch_n; ++i) {
966  fprintf(batch_file, " %g", *batch_var[i]);
967  }
968  fprintf(batch_file, "\n");
969  }
970 }
971 
972 void batch_save(void) {
973  int i;
974  if (!ifarg(1)) {
975  batch_n = 0;
976  } else {
977  for (i = 1; ifarg(i); ++i) {
978  if (batch_size <= batch_n) {
979  batch_size += 20;
980  batch_var = (double**) erealloc(batch_var, batch_size * sizeof(double*));
981  }
983  ++batch_n;
984  }
985  }
986  hoc_retpushx(1.);
987 }
988 
989 void nrn_ba(neuron::model_sorted_token const& cache_token, NrnThread& nt, int bat) {
990  for (NrnThreadBAList* tbl = nt.tbl[bat]; tbl; tbl = tbl->next) {
991  nrn_bamech_t const f{tbl->bam->f};
992  Memb_list* const ml{tbl->ml};
993  // TODO move this loop into the translated MOD file code
994  for (int i = 0; i < ml->nodecount; ++i) {
995  f(ml->nodelist[i], ml->pdata[i], ml->_thread, &nt, ml, i, cache_token);
996  }
997  }
998 }
999 
1000 typedef int (*NonVintBlockItem)(int method, int size, double* pd1, double* pd2, int tid);
1001 /* list to store the nrn_nonvint_block functions */
1002 static std::vector<NonVintBlockItem> nonvint_block_list;
1003 
1004 int nrn_nonvint_block_exe(int method, int size, double* pd1, double* pd2, int tid) {
1005  /* execute all functions in nonvint_block_list and return the sum of the
1006  * return values
1007  */
1008  int rval, sum = 0;
1009 
1010  for (auto& func: nonvint_block_list) {
1011  rval = (*func)(method, size, pd1, pd2, tid);
1012  if (rval == -1) {
1013  hoc_execerror("nrn_nonvint_block error", 0);
1014  } else {
1015  sum += rval;
1016  }
1017  if (method == NONVINT_ODE_COUNT) {
1018  size += rval;
1019  }
1020  }
1021 
1022  return sum;
1023 }
1024 
1026  /* store new_nrn_nonvint_block functions in a list */
1027  nonvint_block_list.push_back(func);
1028 
1029  /* could this be set directly in nrn_nonvint_block_helper? */
1031 
1032  return 0;
1033 }
1034 
1036  // remove new_nrn_nonvint_block functions from the list
1037  // in c++-20 one could say std::erase(nonvint_block_list, func);
1038  auto n = nonvint_block_list.size();
1039  for (size_t i = 0; i < n; ++i) {
1040  if (func == nonvint_block_list[i]) {
1041  nonvint_block_list.erase(nonvint_block_list.begin() + i);
1042  break;
1043  }
1044  }
1045  if (nonvint_block_list.empty()) {
1047  }
1048  return 0;
1049 }
1050 
1051 int nrn_nonvint_block_helper(int method, int size, double* pd1, double* pd2, int tid) {
1053  int rval = (*nrn_nonvint_block)(method, size, pd1, pd2, tid);
1054  if (rval == -1) {
1055  hoc_execerror("nrn_nonvint_block error", 0);
1056  }
1057  return rval;
1058 }
1059 
1060 // nrn_ensure_model_data_are_sorted_opaque() can be used in circumstances where
1061 // neuron:model_sorted_token const& is a forward ref and nrn_ensure_model_data_are_sorted() cannot
1062 // be used
1063 namespace neuron {
1065  : m_ptr{std::make_unique<model_sorted_token>(std::move(token))} {}
1067 } // namespace neuron
1068 
1071 }
Section ** secorder
Definition: solve.cpp:82
int section_count
Definition: solve.cpp:81
void setup_topology(void)
Definition: cabcode.cpp:1635
int tree_changed
Definition: cabcode.cpp:51
Node * node_exact(Section *sec, double x)
like node_index but give proper node when x is 0 or 1 as well as in between
Definition: cabcode.cpp:1800
char * gargstr(int narg)
Definition: code2.cpp:227
#define v
Definition: md1redef.h:11
#define sec
Definition: md1redef.h:20
#define nodecount
Definition: md1redef.h:39
#define id
Definition: md1redef.h:41
#define i
Definition: md1redef.h:19
short * nrn_is_artificial_
Definition: cell_group.cpp:19
void cvode_finitialize()
void nrn_update_2d(NrnThread *nt)
Definition: extcelln.cpp:122
double dt
Definition: fadvance.cpp:52
static void * nrn_ms_bksub_through_triang(NrnThread *)
Definition: fadvance.cpp:558
void(* nrnthread_vi_compute_)(NrnThread *nt)
Definition: fadvance.cpp:141
void(* nrn_multisplit_setup_)()
Definition: treeset.cpp:50
static std::vector< NonVintBlockItem > nonvint_block_list
Definition: fadvance.cpp:1002
static FILE * batch_file
Definition: fadvance.cpp:186
static double ** batch_var
Definition: fadvance.cpp:189
double chkarg(int, double low, double high)
Definition: code2.cpp:626
static void batch_out()
Definition: fadvance.cpp:962
void finitialize(void)
Definition: fadvance.cpp:940
void nrn_calc_fast_imem(NrnThread *_nt)
Definition: fadvance.cpp:610
static int step_group_end
Definition: fadvance.cpp:377
int unset_nonvint_block(NonVintBlockItem func)
Definition: fadvance.cpp:1035
static void nonvint(neuron::model_sorted_token const &, NrnThread &)
Definition: fadvance.cpp:737
void * nrn_multisplit_reduce_solve(NrnThread *)
static int batch_size
Definition: fadvance.cpp:187
void nrn_update_voltage(neuron::model_sorted_token const &sorted_token, NrnThread &nt)
Definition: fadvance.cpp:572
void nrn_fixed_step_group(neuron::model_sorted_token const &cache_token, int n)
Definition: fadvance.cpp:379
void fcurrent(void)
Definition: fadvance.cpp:644
void nrncvode_set_t(double t)
Definition: cvodestb.cpp:137
int set_nonvint_block(NonVintBlockItem func)
Definition: fadvance.cpp:1025
static void * nrn_ms_bksub(NrnThread *)
Definition: fadvance.cpp:545
double * nrn_mech_wtime_
Definition: treeset.cpp:38
#define CTADD
Definition: fadvance.cpp:78
void batch_save(void)
Definition: fadvance.cpp:972
void nrn_print_matrix(NrnThread *_nt)
Definition: fadvance.cpp:664
int nrn_nonvint_block_exe(int method, int size, double *pd1, double *pd2, int tid)
Definition: fadvance.cpp:1004
void batch_run(void)
Definition: fadvance.cpp:233
void nrn_daspk_init_step(double tt, double dteps, int upd)
Definition: fadvance.cpp:307
static int batch_n
Definition: fadvance.cpp:188
int nrn_nonvint_block_helper(int method, int size, double *pd1, double *pd2, int tid)
Definition: fadvance.cpp:1051
int cvode_active_
Definition: fadvance.cpp:144
void(* nrn_allthread_handle)()
Definition: fadvance.cpp:68
static int _upd
Definition: fadvance.cpp:298
void nrn_solve(NrnThread *)
Definition: solve.cpp:333
int state_discon_allowed_
Definition: init.cpp:108
double t
Definition: cvodeobj.cpp:57
int(* NonVintBlockItem)(int method, int size, double *pd1, double *pd2, int tid)
Definition: fadvance.cpp:1000
void fmatrix(void)
Definition: fadvance.cpp:707
static void nrn_fixed_step_thread(neuron::model_sorted_token const &, NrnThread &)
Definition: fadvance.cpp:457
void * nrn_multisplit_triang(NrnThread *)
static int step_group_n
Definition: fadvance.cpp:375
#define CTBEGIN
Definition: fadvance.cpp:77
static void * nrn_ms_reduce_solve(NrnThread *)
Definition: fadvance.cpp:541
void nrn_fixed_step(neuron::model_sorted_token const &cache_token)
Definition: fadvance.cpp:324
void nrn_calc_fast_imem_fixedstep_init(NrnThread *_nt)
Definition: fadvance.cpp:624
static int step_group_begin
Definition: fadvance.cpp:376
bool nrn_use_fast_imem
Definition: fadvance.cpp:147
void * nrn_multisplit_bksub(NrnThread *)
void nrn_fixed_step_lastpart(neuron::model_sorted_token const &cache_token, NrnThread &nt)
Definition: fadvance.cpp:502
void nrn_ba(neuron::model_sorted_token const &cache_token, NrnThread &nt, int bat)
Definition: fadvance.cpp:989
void verify_structure(void)
Definition: fadvance.cpp:792
void nrn_finitialize(int setv, double v)
Definition: fadvance.cpp:805
void nrn_extra_scatter_gather(int direction, int tid)
Definition: cvodeobj.cpp:506
static void dt2thread(double adt)
Definition: fadvance.cpp:282
static void * nrn_ms_treeset_through_triang(NrnThread *)
Definition: fadvance.cpp:525
#define NONVINT_ODE_COUNT
Definition: fadvance.cpp:74
void frecord_init(void)
Definition: fadvance.cpp:780
static void batch_close()
Definition: fadvance.cpp:955
void(* nrnthread_v_transfer_)(NrnThread *nt)
Definition: fadvance.cpp:139
double nrnmpi_wtime()
Definition: nrnmpi.cpp:175
void fadvance()
Definition: fadvance.cpp:152
int nrn_errno_check(int i)
Definition: fadvance.cpp:767
static void batch_open()
static void daspk_init_step_thread(neuron::model_sorted_token const &cache_token, NrnThread &nt)
Definition: fadvance.cpp:299
double hoc_epsilon
Definition: hoc_init.cpp:221
static void nrn_fixed_step_group_thread(neuron::model_sorted_token const &, NrnThread &)
Definition: fadvance.cpp:436
void(* nrnmpi_v_transfer_)()
Definition: fadvance.cpp:138
neuron::opaque_model_sorted_token nrn_ensure_model_data_are_sorted_opaque()
Definition: fadvance.cpp:1069
void nrn_fihexec(int type)
Definition: finithnd.cpp:33
int stoprun
Definition: fadvance.cpp:146
int hoc_errno_check(void)
Definition: math.cpp:257
char * hoc_gargstr(int)
double * hoc_val_pointer(const char *s)
Definition: code2.cpp:728
void hoc_retpushx(double x)
Definition: hocusr.cpp:154
double * hoc_pgetarg(int narg)
Definition: oc_ansi.h:253
double(* func)(double)
Definition: hoc_init.cpp:85
#define assert(ex)
Definition: hocassrt.h:24
#define getarg
Definition: hocdec.h:17
void long_difus_solve(neuron::model_sorted_token const &sorted_token, int method, NrnThread &nt)
Definition: ldifus.cpp:86
void(*)(Node *, Datum *, Datum *, NrnThread *, Memb_list *, std::size_t, neuron::model_sorted_token const &) nrn_bamech_t
Definition: membfunc.h:30
#define AFTER_SOLVE
Definition: membfunc.hpp:70
#define CAP
Definition: membfunc.hpp:60
#define BEFORE_INITIAL
Definition: membfunc.hpp:67
#define AFTER_INITIAL
Definition: membfunc.hpp:68
#define STR(q)
Definition: model.h:76
#define ITERATE(itm, lst)
Definition: model.h:18
nrn_random_play
Definition: extdef.h:10
const char * name
Definition: init.cpp:16
void move(Item *q1, Item *q2, Item *q3)
Definition: list.cpp:200
auto for_threads(NrnThread *threads, int num_threads)
Definition: multicore.h:133
double * nrn_classicalNodeA(Node *nd)
double * nrn_classicalNodeB(Node *nd)
static void phase_end(const char *name)
static void phase_begin(const char *name)
phase
Reading phase number.
Definition: nrn_setup.hpp:53
NrnThread * nrn_threads
Definition: multicore.cpp:56
void clear_event_queue()
Definition: cvodestb.cpp:47
void init_net_events()
Definition: cvodestb.cpp:53
void nrn_spike_exchange(NrnThread *nt)
int nrn_nthread
Definition: multicore.cpp:55
void fixed_play_continuous(NrnThread *nt)
Definition: cvodestb.cpp:81
void second_order_cur(NrnThread *_nt, int secondorder)
void nrn_thread_table_check()
Definition: multicore.cpp:168
int v_structure_change
Definition: nrnoc_aux.cpp:20
void hoc_execerror(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:39
void * erealloc(void *ptr, size_t size)
Definition: nrnoc_aux.cpp:94
void * ecalloc(size_t n, size_t size)
Definition: nrnoc_aux.cpp:85
void deliver_net_events(NrnThread *nt)
Definition: cvodestb.cpp:24
void nrn_deliver_events(NrnThread *nt)
Definition: cvodestb.cpp:32
void nrn_spike_exchange_init()
Definition: netpar.cpp:238
void nrn_multithread_job(F &&job, Args &&... args)
Definition: multicore.hpp:161
void nrn_play_init()
Definition: cvodestb.cpp:72
void hoc_warning(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:44
int diam_changed
Definition: nrnoc_aux.cpp:21
In mechanism libraries, cannot use auto const token = nrn_ensure_model_data_are_sorted(); because the...
Definition: tnode.hpp:17
int interleave_permute_type
Definition: cellorder.cpp:42
auto *const vec_rhs
Definition: cellorder.cpp:616
void solve_interleaved(int ith)
Solve the Hines matrices based on the interleave_permute_type (1 or 2).
static char * mechname
Definition: nocpout.cpp:137
void v_setup_vectors()
Definition: treeset.cpp:1596
nonvintblock_extern int(* nrn_nonvint_block)(int method, int length, double *pd1, double *pd2, int tid)
Definition: nonvintblock.h:30
#define nrn_nonvint_block_init(tid)
Definition: nonvintblock.h:40
#define nrn_nonvint_block_fixed_step_solve(tid)
Definition: nonvintblock.h:52
void setup_tree_matrix(neuron::model_sorted_token const &sorted_token, NrnThread &nt)
Definition: treeset.cpp:599
void nrn_seg_or_x_arg(int iarg, Section **psec, double *px)
Definition: point.cpp:170
void recalc_diam(void)
Definition: treeset.cpp:923
neuron::model_sorted_token nrn_ensure_model_data_are_sorted()
Ensure neuron::container::* data are sorted.
Definition: treeset.cpp:2182
#define tstopunset
Definition: nrnconf.h:45
void cvode_fadvance(double tstop)
Definition: cvodestb.cpp:100
void nrn_record_init()
Definition: cvodestb.cpp:70
void nrn_solver_prepare()
Definition: cvodestb.cpp:94
void fixed_record_continuous(neuron::model_sorted_token const &cache_token, NrnThread &nt)
Definition: cvodestb.cpp:88
void nrndae_init()
Definition: nrndae.cpp:56
void nrndae_update(NrnThread *_nt)
Definition: nrndae.cpp:34
int const size_t const size_t n
Definition: nrngsl.h:10
size_t q
size_t p
int ifarg(int)
Definition: code.cpp:1607
std::vector< Memb_func > memb_func
Definition: init.cpp:145
void nrn_capacity_current(neuron::model_sorted_token const &sorted_token, NrnThread *_nt, Memb_list *ml)
Definition: capac.cpp:61
std::vector< Memb_list > memb_list
Definition: init.cpp:146
int _ninits
Definition: init.cpp:1071
short * memb_order_
Definition: init.cpp:147
int n_memb_func
Definition: init.cpp:448
int nrnmpi_myid
#define NODEA(n)
Definition: section_fwd.hpp:52
#define NODEB(n)
Definition: section_fwd.hpp:53
int use_sparse13
Definition: treeset.cpp:58
#define NODERHS(n)
Definition: section_fwd.hpp:55
#define NODED(n)
Definition: section_fwd.hpp:54
int spGetSize(char *eMatrix, BOOLEAN External)
Definition: spalloc.cpp:638
#define NULL
Definition: spdefs.h:105
void spPrint(char *, int, int, int)
Definition: spoutput.cpp:126
A view into a set of mechanism instances.
Definition: nrnoc_ml.h:34
Definition: section.h:105
struct NrnThreadBAList * next
Definition: multicore.h:42
Represent main neuron object computed by single thread.
Definition: multicore.h:58
double * node_sav_d_storage()
Definition: multicore.cpp:1078
double _dt
Definition: multicore.h:60
NrnThreadMembList * tml
Definition: multicore.h:62
char * _sp13mat
Definition: multicore.h:94
int id
Definition: multicore.h:66
NrnThreadBAList * tbl[BEFORE_AFTER_SIZE]
Definition: multicore.h:103
double & actual_rhs(std::size_t row)
Definition: multicore.h:85
double cj
Definition: multicore.h:61
double * node_sav_rhs_storage()
Definition: multicore.cpp:1088
int _stop_stepping
Definition: multicore.h:67
double * node_rhs_storage()
Definition: multicore.cpp:1074
double * node_area_storage()
Definition: multicore.cpp:1059
int end
Definition: multicore.h:65
Node ** _v_node
Definition: multicore.h:90
double * node_voltage_storage()
Definition: multicore.cpp:1098
double _t
Definition: multicore.h:59
struct NrnThreadMembList * next
Definition: multicore.h:34
Memb_list * ml
Definition: multicore.h:35
opaque_model_sorted_token(model_sorted_token &&)
Definition: fadvance.cpp:1064
int Printf(const char *fmt, Args... args)
Definition: logger.hpp:18