NEURON
occvode.cpp
Go to the documentation of this file.
1 #include <../../nrnconf.h>
2 #include "hocdec.h"
3 #include "cabcode.h"
4 #include "nrn_ansi.h"
5 #include "nrndae_c.h"
6 #include "nrniv_mf.h"
7 #include "nrnoc2iv.h"
8 #include "nrndaspk.h"
9 #include "cvodeobj.h"
10 #include "netcvode.h"
11 #include "ivocvect.h"
12 #include "vrecitem.h"
13 #include "membfunc.h"
14 #include "nonvintblock.h"
15 #include "nrndigest.h"
16 
17 #include <cerrno>
18 #include <numeric>
19 
20 
21 #include "spmatrix.h"
22 extern double* sp13mat;
23 
24 #if 1 || NRNMPI
25 extern void (*nrnthread_v_transfer_)(NrnThread*);
26 extern void (*nrnmpi_v_transfer_)();
27 #endif
28 
29 extern void (*nrn_multisplit_setup_)();
30 extern void* nrn_multisplit_triang(NrnThread*);
32 extern void* nrn_multisplit_bksub(NrnThread*);
33 extern void nrn_multisplit_nocap_v();
38 #if NRNMPI
39 extern void (*nrn_multisplit_solve_)();
40 #endif
41 
42 static Symbol* vsym; // for absolute tolerance
43 /*
44 CVODE expects dy/dt = f(y) and solve (I - gamma*J)*x = b with approx to
45 J=df/dy.
46 
47 The NEURON fixed step method sets up C*dv/dt = F(v)
48 by first calculating F(v) and storing it on the right hand side of
49 the matrix equation ( see src/nrnoc/treeset.cpp nrn_rhs() ).
50 It then sets up the left hand side of the matrix equation using
51 nrn_set_cj(1./dt); setup1_tree_matrix(); setup2_tree_matrix();
52 to form
53 (C/dt - J(F))*dv = F(v)
54 After a nrn_solve() the answer, dv, is stored in the right hand side
55 vector.
56 
57 However, one must be aware of the fact that the cvode state vector
58 y is not the vector y for the fixed step in two ways. 1) the cvode
59 state vector includes ALL states, including channel states.
60 2) the cvode state vector does NOT include the zero area nodes
61 (since the capacitance for those nodes are 0). Furthermore, cvode
62 cannot work with the extracellular mechanism (both because extracellular
63 capacitance is often 0 and because more than one dv/dt is involved
64 in some of the current balance equations) or LinearMechanism (same reasons).
65 In that case the current balance equations are of the differential
66 algebraic form c*dv/dt = f(v) where c is non-diagonal and may have empty rows.
67 The variable step method for these cases is handled by daspk.
68 
69 */
70 
71 // determine neq_ and vector of pointers to scatter/gather y
72 // as well as algebraic nodes (no_cap)
73 
75 #if NRNMPI
76  if (!use_partrans_ && nrnmpi_numprocs > 1 && (nrnmpi_v_transfer_ || nrn_multisplit_solve_)) {
77  assert(nrn_nthread == 1); // we lack an NVector class for both
78  // threads and mpi together
79  // could be a lot better.
80  use_partrans_ = true;
81  } else
82 #endif
83  if (!structure_change_) {
84  return false;
85  }
86  if (ctd_[0].cv_memb_list_ == nullptr) {
87  neq_ = 0;
88  if (use_daspk_) {
89  return true;
90  }
91  if (nrn_nonvint_block_ode_count(0, 0)) {
92  return true;
93  }
94  return false;
95  }
96  return true;
97 }
98 
100  double vtol;
101 
102  CvMembList* cml;
103  int i, j, zneq, zneq_v, zneq_cap_v;
104  // printf("Cvode::init_eqn\n");
105  if (nthsizes_) {
106  delete[] nthsizes_;
107  nthsizes_ = 0;
108  }
109  neq_ = 0;
110  for (int id = 0; id < nctd_; ++id) {
111  CvodeThreadData& z = ctd_[id];
112  z.cmlcap_ = nullptr;
113  z.cmlext_ = nullptr;
114  for (cml = z.cv_memb_list_; cml; cml = cml->next) {
115  if (cml->index == CAP) {
116  z.cmlcap_ = cml;
117  }
118  if (cml->index == EXTRACELL) {
119  z.cmlext_ = cml;
120  }
121  }
122  }
123  if (use_daspk_) {
124  daspk_init_eqn();
125  return;
126  }
128  // for lvardt, this body done only once and for ctd_[0]
129  CvodeThreadData& z = ctd_[_nt->id];
130  // how many ode's are there? First ones are non-zero capacitance
131  // nodes with non-zero capacitance
132  zneq_cap_v = 0;
133  if (z.cmlcap_) {
134  for (auto& ml: z.cmlcap_->ml) {
135  zneq_cap_v += ml.nodecount;
136  }
137  }
138  zneq = zneq_cap_v;
139  z.neq_v_ = z.nonvint_offset_ = zneq;
140  // now add the membrane mechanism ode's to the count
141  for (cml = z.cv_memb_list_; cml; cml = cml->next) {
142  if (auto const ode_count = memb_func[cml->index].ode_count; ode_count) {
143  auto const count = ode_count(cml->index);
144  for (auto& ml: cml->ml) {
145  zneq += ml.nodecount * count;
146  }
147  }
148  }
149  z.nonvint_extra_offset_ = zneq;
150  z.pv_.resize(z.nonvint_extra_offset_);
151  z.pvdot_.resize(z.nonvint_extra_offset_);
152  zneq += nrn_nonvint_block_ode_count(zneq, _nt->id);
153  z.nvsize_ = zneq;
154  z.nvoffset_ = neq_;
155  neq_ += zneq;
156 #if 0
157 printf("%d Cvode::init_eqn id=%d neq_v_=%d #nonvint=%d #nonvint_extra=%d nvsize=%d\n",
160 #endif
161  if (nth_) {
162  break;
163  } // lvardt
164  }
165 #if NRNMPI
166  if (use_partrans_) {
167  global_neq_ = nrnmpi_int_sum_reduce(neq_);
168  // printf("%d global_neq_=%d neq=%d\n", nrnmpi_myid, global_neq_, neq_);
169  }
170 #endif
172  for (int id = 0; id < nctd_; ++id) {
173  CvodeThreadData& z = ctd_[id];
174  // If lvardt, this is a cvode integrating a particular cell in a
175  // particular thread, i.e., nth_. But nctd_ = 1.
176  // If gvardt, this cvode is unique and nctd_ is nrn_nthread and
177  // the relevant thread is nrn_threads + id;
178  NrnThread* nt_;
179  if (nctd_ > 1) { // definitely gvardt
180  nt_ = nrn_threads + id;
181  } else if (nrn_nthread > 1) { // definitely lvardt
182  nt_ = nth_;
183  } else { // either way, certainly thread 0
184  nt_ = nrn_threads;
185  }
186  double* atv = n_vector_data(atolnvec_, id);
187  zneq_cap_v = 0;
188  if (z.cmlcap_) {
189  for (auto& ml: z.cmlcap_->ml) {
190  zneq_cap_v += ml.nodecount;
191  }
192  }
193  zneq = z.nvsize_;
194  zneq_v = zneq_cap_v;
195 
196  for (i = 0; i < zneq; ++i) {
197  atv[i] = ncv_->atol();
198  }
199  vtol = 1.;
200  if (!vsym) {
202  }
203  if (vsym->extra) {
204  double x;
205  x = vsym->extra->tolerance;
206  if (x != 0 && x < vtol) {
207  vtol = x;
208  }
209  }
210  for (i = 0; i < zneq_cap_v; ++i) {
211  atv[i] *= vtol;
212  }
213 
214  // mark all nodes to help with marking only no_cap nodes
215  auto* const vec_rhs = nt_->node_rhs_storage();
216  for (int i = z.rootnode_begin_index_; i < z.rootnode_end_index_; ++i) {
217  vec_rhs[i] = 1.;
218  }
219  for (int i = z.vnode_begin_index_; i < z.vnode_end_index_; ++i) {
220  vec_rhs[i] = 1.;
221  }
222 
223  i = 0;
224  if (zneq_cap_v) {
225  for (auto& ml: z.cmlcap_->ml) {
226  for (int j = 0; j < ml.nodecount; ++j) {
227  auto* const node = ml.nodelist[j];
228  z.pv_[i] = static_cast<double*>(node->v_handle());
229  z.pvdot_[i] = static_cast<double*>(node->rhs_handle());
230  *z.pvdot_[i] = 0.; // only ones = 1 are no_cap
231  ++i;
232  }
233  }
234  }
235 
236  int n_vnode = (z.vnode_end_index_ - z.vnode_begin_index_) +
238  z.no_cap_indices_.resize(n_vnode - zneq_cap_v);
239  // possibly a few more than needed.
240  z.no_cap_child_indices_.resize(n_vnode - zneq_cap_v);
241  int nocap_index = 0;
242  int nocap_child_index = 0;
243  for (int i = z.rootnode_begin_index_; i < z.rootnode_end_index_; ++i) {
244  if (vec_rhs[i] > .5) {
245  z.no_cap_indices_[nocap_index++] = i;
246  }
247  }
248  for (int i = z.vnode_begin_index_; i < z.vnode_end_index_; ++i) {
249  if (vec_rhs[i] > .5) {
250  z.no_cap_indices_[nocap_index++] = i;
251  }
252  auto parent_i = nt_->_v_parent_index[i];
253  if (vec_rhs[parent_i] > .5) {
254  z.no_cap_child_indices_[nocap_child_index++] = i;
255  }
256  }
257  z.no_cap_indices_.resize(nocap_index);
258  z.no_cap_child_indices_.resize(nocap_child_index);
259 
260  // use the sentinal values in NODERHS to construct a new no cap membrane list
261  new_no_cap_memb(z, nullptr);
262 
263  // map the membrane mechanism ode state and dstate pointers
264  int ieq = zneq_v;
265  for (cml = z.cv_memb_list_; cml; cml = cml->next) {
266  Memb_func& mf = memb_func[cml->index];
267  if (!mf.ode_count) {
268  continue;
269  }
270  // rather than change ode_map pv,pvdot args back to double*
271  // from data_handle<double>, use a small (single instance
272  // ode count) temporary data handle vector and do the
273  // static_cast here.
274  std::vector<neuron::container::data_handle<double>> pv, pvdot;
275  for (auto& ml: cml->ml) {
276  if (int n; (n = mf.ode_count(cml->index)) > 0) {
277  // Note: if mf.hoc_mech then all cvode related
278  // callbacks are NULL (including ode_count)
279  // See src/nrniv/hocmech.cpp. That won't change but
280  // if it does, hocmech.cpp must follow all the
281  // nrn_ode_..._t prototypes to avoid segfault
282  // with Apple M1.
283  pv.resize(n);
284  pvdot.resize(n);
285  for (j = 0; j < ml.nodecount; ++j) {
286  mf.ode_map(ml.prop[j], ieq, pv.data(), pvdot.data(), atv + ieq, cml->index);
287  for (auto k = 0; k < n; ++k) {
288  z.pv_[k + ieq] = static_cast<double*>(pv[k]);
289  z.pvdot_[k + ieq] = static_cast<double*>(pvdot[k]);
290  }
291  ieq += n;
292  }
293  }
294  }
295  }
297  }
298  // validate pv_ and pvdot_ pointer elements as non null.
299  for (int id = 0; id < nctd_; ++id) {
300  CvodeThreadData& z = ctd_[id];
301  nrn_assert(std::size_t(z.nonvint_extra_offset_) == z.pv_.size());
302  for (int i = 0; i < z.nonvint_extra_offset_; ++i) {
303  nrn_assert(z.pv_[i]);
304  nrn_assert(z.pvdot_[i]);
305  }
306  }
307 
308  structure_change_ = false;
309 }
310 
313  z.no_cap_memb_ = nullptr;
314  CvMembList* ncm{};
315  for (auto* cml = z.cv_memb_list_; cml; cml = cml->next) {
316  const Memb_func& mf = memb_func[cml->index];
317  // only point processes with currents are possibilities
318  if (!mf.is_point || !mf.current) {
319  continue;
320  }
321  // count how many at no cap nodes
322  int n{};
323  for (auto& ml: cml->ml) {
324  for (auto i = 0; i < ml.nodecount; ++i) {
325  if (NODERHS(ml.nodelist[i]) > .5) {
326  ++n;
327  }
328  }
329  }
330  if (n == 0) {
331  continue;
332  }
333 
334  // keep same order
335  if (!z.no_cap_memb_) {
336  z.no_cap_memb_ = new CvMembList{cml->index};
337  ncm = z.no_cap_memb_;
338  } else {
339  ncm->next = new CvMembList{cml->index};
340  ncm = ncm->next;
341  }
342  ncm->next = nullptr;
343  ncm->index = cml->index;
344  // ncm is in non-contiguous mode
345  ncm->ml.reserve(n);
346  ncm->ml.clear();
347  for (auto& ml: cml->ml) {
348  for (auto i = 0; i < ml.nodecount; ++i) {
349  if (NODERHS(ml.nodelist[i]) > .5) {
350  auto& newml = ncm->ml.emplace_back(cml->index /* mechanism type */);
351  newml.nodecount = 1;
352  newml.nodelist = new Node* [1] { ml.nodelist[i] };
353  assert(newml.nodelist[0] == ml.nodelist[i]);
354  newml.nodeindices = new int[1]{ml.nodeindices[i]};
355  newml.prop = new Prop* [1] { ml.prop[i] };
356  if (!mf.hoc_mech) {
357  // Danger: this is not stable w.r.t. permutation
358  newml.set_storage_offset(ml.get_storage_offset() + i);
359  newml.pdata = new Datum* [1] { ml.pdata[i] };
360  }
361  newml._thread = ml._thread;
362  }
363  }
364  }
365  assert(ncm->ml.size() == std::size_t(n));
366  }
367 }
368 
370  // DASPK equation order is exactly the same order as the
371  // fixed step method for current balance (including
372  // extracellular nodes) and linear mechanism. Remaining ode
373  // equations are same order as for Cvode. Thus, daspk differs from
374  // cvode order primarily in that cap and no-cap nodes are not
375  // distinguished.
376  // note that only one thread is allowed for sparse right now.
377  NrnThread* _nt = nrn_threads;
378  CvodeThreadData& z = ctd_[0];
379  double vtol;
380  // printf("Cvode::daspk_init_eqn\n");
381  int i, j, in, ie, k, zneq;
382 
383  // how many equations are there?
384  neq_ = 0;
385  CvMembList* cml;
386  // start with all the equations for the fixed step method.
387  if (use_sparse13 == 0 || diam_changed != 0) {
388  recalc_diam();
389  }
390  zneq = spGetSize(_nt->_sp13mat, 0);
391  z.neq_v_ = z.nonvint_offset_ = zneq;
392  // now add the membrane mechanism ode's to the count
393  for (cml = z.cv_memb_list_; cml; cml = cml->next) {
394  if (auto ode_count = memb_func[cml->index].ode_count; ode_count) {
395  zneq += std::accumulate(cml->ml.begin(),
396  cml->ml.end(),
397  0,
398  [](int total, auto& ml) { return total + ml.nodecount; }) *
399  ode_count(cml->index);
400  }
401  }
402  z.nonvint_extra_offset_ = zneq;
403  zneq += nrn_nonvint_block_ode_count(zneq, _nt->id);
404  z.nvsize_ = zneq;
405  z.nvoffset_ = neq_;
406  neq_ = z.nvsize_;
407  // printf("Cvode::daspk_init_eqn: neq_v_=%d neq_=%d\n", neq_v_, neq_);
408  z.pv_.resize(z.nonvint_extra_offset_);
409  z.pvdot_.resize(z.nonvint_extra_offset_);
411  double* atv = n_vector_data(atolnvec_, 0);
412  for (i = 0; i < neq_; ++i) {
413  atv[i] = ncv_->atol();
414  }
415  vtol = 1.;
416  if (!vsym) {
418  }
419  if (vsym->extra) {
420  double x;
421  x = vsym->extra->tolerance;
422  if (x != 0 && x < vtol) {
423  vtol = x;
424  }
425  }
426  // deal with voltage and extracellular and linear circuit nodes
427  // for daspk the order is the same
429  if (use_sparse13) {
430  for (in = 0; in < _nt->end; ++in) {
431  Node* nd;
432  Extnode* nde;
433  nd = _nt->_v_node[in];
434  nde = nd->extnode;
435  i = nd->eqn_index_ - 1; // the sparse matrix index starts at 1
436  z.pv_[i] = static_cast<double*>(nd->v_handle());
437  z.pvdot_[i] = static_cast<double*>(nd->rhs_handle());
438  if (nde) {
439  for (ie = 0; ie < nlayer; ++ie) {
440  k = i + ie + 1;
441  z.pv_[k] = static_cast<double*>(
443  z.pvdot_[k] = static_cast<double*>(
445  nde->_rhs[ie]});
446  }
447  }
448  }
449  nrndae_dkmap(z.pv_, z.pvdot_);
450  for (i = 0; i < z.neq_v_; ++i) {
451  atv[i] *= vtol;
452  }
453  }
454 
455  // map the membrane mechanism ode state and dstate pointers
456  int ieq = z.neq_v_;
457  for (cml = z.cv_memb_list_; cml; cml = cml->next) {
458  auto const& mf = memb_func[cml->index];
459  auto const ode_count = mf.ode_count;
460  if (!ode_count) {
461  continue;
462  }
463  auto const n = ode_count(cml->index);
464  if (n <= 0) {
465  continue;
466  }
467  auto const ode_map = mf.ode_map;
468  // ode_map uses data_handle. Do static_cast<double*> here
469  std::vector<neuron::container::data_handle<double>> pv(n), pvdot(n);
470  for (auto& ml: cml->ml) {
471  for (j = 0; j < ml.nodecount; ++j) {
472  assert(ode_map);
473  ode_map(ml.prop[j], ieq, pv.data(), pvdot.data(), atv + ieq, cml->index);
474  for (auto k = 0; k < n; ++k) {
475  z.pv_[k + ieq] = static_cast<double*>(pv[k]);
476  z.pvdot_[k + ieq] = static_cast<double*>(pvdot[k]);
477  }
478  ieq += n;
479  }
480  }
481  }
482  structure_change_ = false;
483 }
484 
485 double* Cvode::n_vector_data(N_Vector v, int tid) {
486  if (!v) {
487  return 0;
488  }
489  if (nctd_ > 1) {
490  N_Vector subvec = ((N_Vector*) N_VGetArrayPointer(v))[tid];
491  return N_VGetArrayPointer(subvec);
492  }
493  return N_VGetArrayPointer(v);
494 }
495 
496 extern void nrn_extra_scatter_gather(int, int);
497 
498 void Cvode::scatter_y(neuron::model_sorted_token const& sorted_token, double* y, int tid) {
499  CvodeThreadData& z = CTD(tid);
500  assert(std::size_t(z.nonvint_extra_offset_) == z.pv_.size());
501  for (int i = 0; i < z.nonvint_extra_offset_; ++i) {
502  *(z.pv_[i]) = y[i];
503  // printf("%d scatter_y %d %d %g\n", nrnmpi_myid, tid, i, y[i]);
504  }
505  for (CvMembList* cml = z.cv_memb_list_; cml; cml = cml->next) {
506  const Memb_func& mf = memb_func[cml->index];
507  if (mf.ode_synonym) {
508  for (auto& ml: cml->ml) {
509  mf.ode_synonym(sorted_token, nrn_threads[tid], ml, cml->index);
510  }
511  }
512  }
513  nrn_extra_scatter_gather(0, tid);
514 }
515 
516 static Cvode* gather_cv;
517 static N_Vector gather_vec;
518 static void* gather_y_thread(NrnThread* nt) {
519  Cvode* cv = gather_cv;
520  cv->gather_y(cv->n_vector_data(gather_vec, nt->id), nt->id);
521  return 0;
522 }
523 void Cvode::gather_y(N_Vector y) {
524  if (nth_) {
525  gather_y(N_VGetArrayPointer(y), nth_->id);
526  return;
527  }
528  gather_cv = this;
529  gather_vec = y;
531 }
532 void Cvode::gather_y(double* y, int tid) {
533  CvodeThreadData& z = CTD(tid);
534  nrn_extra_scatter_gather(1, tid);
535  assert(std::size_t(z.nonvint_extra_offset_) == z.pv_.size());
536  for (int i = 0; i < z.nonvint_extra_offset_; ++i) {
537  y[i] = *(z.pv_[i]);
538  // printf("gather_y %d %d %g\n", tid, i, y[i]);
539  }
540 }
541 void Cvode::scatter_ydot(double* ydot, int tid) {
542  int i;
543  CvodeThreadData& z = CTD(tid);
544  for (i = 0; i < z.nonvint_extra_offset_; ++i) {
545  *(z.pvdot_[i]) = ydot[i];
546  // printf("scatter_ydot %d %d %g\n", tid, i, ydot[i]);
547  }
548 }
549 static void* gather_ydot_thread(NrnThread* nt) {
550  Cvode* cv = gather_cv;
551  cv->gather_ydot(cv->n_vector_data(gather_vec, nt->id), nt->id);
552  return 0;
553 }
554 void Cvode::gather_ydot(N_Vector y) {
555  if (nth_) {
556  gather_ydot(N_VGetArrayPointer(y), nth_->id);
557  return;
558  }
559  gather_cv = this;
560  gather_vec = y;
562 }
563 void Cvode::gather_ydot(double* ydot, int tid) {
564  int i;
565  if (ydot) {
566  CvodeThreadData& z = CTD(tid);
567  for (i = 0; i < z.nonvint_extra_offset_; ++i) {
568  ydot[i] = *(z.pvdot_[i]);
569  // printf("%d gather_ydot %d %d %g\n", nrnmpi_myid, tid, i, ydot[i]);
570  }
571  }
572 }
573 
574 int Cvode::setup(N_Vector ypred, N_Vector fpred) {
575  // printf("Cvode::setup\n");
576  if (nth_) {
577  return 0;
578  }
579  ++jac_calls_;
580  CvodeThreadData& z = CTD(0);
581  double gamsave = nrn_threads->_dt;
582  nrn_threads->_dt = gam();
584  nrn_threads->_dt = gamsave;
585  return 0;
586 }
587 
589  double* b,
590  double* y,
591  NrnThread* nt) {
592  // printf("Cvode::solvex_thread %d t=%g t_=%g\n", nt->id, nt->t, t_);
593  // printf("Cvode::solvex_thread %d %g\n", nt->id, gam());
594  // printf("\tenter b\n");
595  // for (int i=0; i < neq_; ++i) { printf("\t\t%d %g\n", i, b[i]);}
596  int i;
597  CvodeThreadData& z = CTD(nt->id);
598  nt->cj = 1. / gam();
599  nt->_dt = gam();
600  if (z.nvsize_ == 0) {
601  return 0;
602  }
603 #if NRN_DIGEST
604  if (nrn_digest_) {
605  nrn_digest_dbl_array("solvex enter b", nt->id, t_, b, z.nvsize_);
606  nrn_digest_dbl_array("solvex enter y", nt->id, t_, y, z.nvsize_);
607  }
608 #endif
609  lhs(sorted_token, nt); // special version for cvode.
610  scatter_ydot(b, nt->id);
611  if (z.cmlcap_) {
612  for (auto& ml: z.cmlcap_->ml) {
613  nrn_mul_capacity(sorted_token, nt, &ml);
614  }
615  }
616  auto* const vec_rhs = nt->node_rhs_storage();
617  for (auto i: z.no_cap_indices_) {
618  vec_rhs[i] = 0;
619  }
620  // solve it
621 #if NRNMPI
622  if (nrn_multisplit_solve_) {
623  (*nrn_multisplit_solve_)();
624  } else
625 #endif
626  {
627  triang(nt);
628  bksub(nt);
629  }
630  // for (i=0; i < v_node_count; ++i) {
631  // printf("%d rhs %d %g t=%g\n", nrnmpi_myid, i, VEC_RHS(i), t);
632  //}
633  if (ncv_->stiff() == 2) {
634  solvemem(sorted_token, nt);
635  } else {
636  // bug here should multiply by gam
637  }
638  gather_ydot(b, nt->id);
639  // printf("\texit b\n");
640  // for (i=0; i < neq_; ++i) { printf("\t\t%d %g\n", i, b[i]);}
641  nrn_nonvint_block_ode_solve(z.nvsize_, b, y, nt->id);
642 #if NRN_DIGEST
643  if (nrn_digest_) {
644  nrn_digest_dbl_array("solvex leave b", nt->id, t_, b, z.nvsize_);
645  }
646 #endif
647  return 0;
648 }
649 
651  // printf("Cvode::solvex_thread %d t=%g t_=%g\n", nt->id, nt->t, t_);
652  // printf("Cvode::solvex_thread %d %g\n", nt->id, gam());
653  // printf("\tenter b\n");
654  // for (int i=0; i < neq_; ++i) { printf("\t\t%d %g\n", i, b[i]);}
655  int i;
656  CvodeThreadData& z = ctd_[nt->id];
657  nt->cj = 1. / gam();
658  nt->_dt = gam();
659  if (z.nvsize_ == 0) {
660  return 0;
661  }
662  auto const sorted_token = nrn_ensure_model_data_are_sorted();
663  lhs(sorted_token, nt); // special version for cvode.
664  scatter_ydot(b, nt->id);
665  if (z.cmlcap_) {
666  assert(z.cmlcap_->ml.size() == 1);
667  nrn_mul_capacity(sorted_token, nt, &z.cmlcap_->ml[0]);
668  }
669  auto* const vec_rhs = nt->node_rhs_storage();
670  for (auto i: z.no_cap_indices_) {
671  vec_rhs[i] = 0.;
672  }
673  // solve it
675  return 0;
676 }
679  return 0;
680 }
683  // for (i=0; i < v_node_count; ++i) {
684  // printf("%d rhs %d %g t=%g\n", nrnmpi_myid, i, VEC_RHS(i), t);
685  //}
686  if (ncv_->stiff() == 2) {
688  } else {
689  // bug here should multiply by gam
690  }
691  gather_ydot(b, nt->id);
692  // printf("\texit b\n");
693  // for (i=0; i < neq_; ++i) { printf("\t\t%d %g\n", i, b[i]);}
694  return 0;
695 }
696 
697 void Cvode::solvemem(neuron::model_sorted_token const& sorted_token, NrnThread* nt) {
698  // all the membrane mechanism matrices
699  CvodeThreadData& z = CTD(nt->id);
700  CvMembList* cml;
701  for (cml = z.cv_memb_list_; cml; cml = cml->next) { // probably can start at 6 or hh
702  const Memb_func& mf = memb_func[cml->index];
703  if (auto const ode_matsol = mf.ode_matsol; ode_matsol) {
704  for (auto& ml: cml->ml) {
705  ode_matsol(sorted_token, nt, &ml, cml->index);
706  if (errno && nrn_errno_check(cml->index)) {
707  hoc_warning("errno set during ode jacobian solve", nullptr);
708  }
709  }
710  }
711  }
712  long_difus_solve(sorted_token, 2, *nt);
713 }
714 
716  double tt,
717  double* y,
718  double* ydot,
719  NrnThread* nt) {
720  CvodeThreadData& z = CTD(nt->id);
721 #if NRN_DIGEST
722  if (nrn_digest_) {
723  nrn_digest_dbl_array("y", nt->id, tt, y, z.nvsize_);
724  }
725 #endif
726  fun_thread_transfer_part1(sorted_token, tt, y, nt);
727  nrn_nonvint_block_ode_fun(z.nvsize_, y, ydot, nt->id);
728  fun_thread_transfer_part2(sorted_token, ydot, nt);
729 
730 #if NRN_DIGEST
731  if (nrn_digest_ && ydot) {
732  nrn_digest_dbl_array("ydot", nt->id, tt, ydot, z.nvsize_);
733  }
734 #endif
735 }
736 
738  double tt,
739  double* y,
740  NrnThread* nt) {
741  CvodeThreadData& z = CTD(nt->id);
742  nt->_t = tt;
743 
744  // fix this!!!
745  nt->_dt = h(); // really does not belong here but dt is needed for events
746  if (nt->_dt == 0.) {
747  nt->_dt = 1e-8;
748  }
749 
750  // printf("%p fun %d %.15g %g\n", this, neq_, _t, _dt);
751  play_continuous_thread(tt, nt);
752  if (z.nvsize_ == 0) {
753  return;
754  }
755  scatter_y(sorted_token, y, nt->id);
756 #if NRNMPI
757  if (use_partrans_) {
758  nrnmpi_assert_opstep(opmode_, nt->_t);
759  }
760 #endif
761  nocap_v(sorted_token, nt); // vm at nocap nodes consistent with adjacent vm
762 }
763 
765  double* ydot,
766  NrnThread* nt) {
767  CvodeThreadData& z = CTD(nt->id);
768  if (z.nvsize_ == 0) {
769  return;
770  }
771 #if 1 || NRNMPI
772  if (nrnthread_v_transfer_) {
773  (*nrnthread_v_transfer_)(nt);
774  }
775 #endif
776  before_after(sorted_token, z.before_breakpoint_, nt);
777  rhs(sorted_token, nt); // similar to nrn_rhs in treeset.cpp
778 #if NRNMPI
779  if (nrn_multisplit_solve_) { // non-zero area nodes need an adjustment
781  }
782 #endif
783  do_ode(sorted_token, *nt);
784  // divide by cm and compute capacity current
785  if (z.cmlcap_) {
786  for (auto& ml: z.cmlcap_->ml) {
787  nrn_div_capacity(sorted_token, nt, &ml);
788  }
789  }
790  if (auto const vec_sav_rhs = nt->node_sav_rhs_storage(); vec_sav_rhs) {
791  auto* const vec_area = nt->node_area_storage();
792  for (int i = z.rootnode_begin_index_; i < z.rootnode_end_index_; ++i) {
793  vec_sav_rhs[i] *= vec_area[i] * 0.01; // 0.01 milliamp/cm2 * um2 is nanoamp
794  }
795  for (int i = z.vnode_begin_index_; i < z.vnode_end_index_; ++i) {
796  vec_sav_rhs[i] *= vec_area[i] * 0.01;
797  }
798  }
799  gather_ydot(ydot, nt->id);
800  before_after(sorted_token, z.after_solve_, nt);
801  // for (int i=0; i < z.neq_; ++i) { printf("\t%d %g %g\n", i, y[i], ydot?ydot[i]:-1e99);}
802 }
803 
804 void Cvode::fun_thread_ms_part1(double tt, double* y, NrnThread* nt) {
805  nt->_t = tt;
806 
807  // fix this!!!
808  nt->_dt = h(); // really does not belong here but dt is needed for events
809  if (nt->_dt == 0.) {
810  nt->_dt = 1e-8;
811  }
812 
813  // printf("%p fun %d %.15g %g\n", this, neq_, _t, _dt);
814  play_continuous_thread(tt, nt);
816 #if NRNMPI
817  if (use_partrans_) {
818  nrnmpi_assert_opstep(opmode_, nt->_t);
819  }
820 #endif
821  nocap_v_part1(nt); // vm at nocap nodes consistent with adjacent vm
822 }
824  nocap_v_part2(nt); // vm at nocap nodes consistent with adjacent vm
825 }
826 void Cvode::fun_thread_ms_part34(double* ydot, NrnThread* nt) {
828  fun_thread_ms_part4(ydot, nt);
829 }
831  nocap_v_part3(nt); // should be by itself in fun_thread_part2_5 if
832  // following is true and a gap is in 0 area node
833 }
834 void Cvode::fun_thread_ms_part4(double* ydot, NrnThread* nt) {
835 #if 1 || NRNMPI
836  if (nrnthread_v_transfer_) {
837  (*nrnthread_v_transfer_)(nt);
838  }
839 #endif
840  CvodeThreadData& z = ctd_[nt->id];
841  if (z.nvsize_ == 0) {
842  return;
843  }
844  auto const sorted_token = nrn_ensure_model_data_are_sorted();
845  before_after(sorted_token, z.before_breakpoint_, nt);
846  rhs(sorted_token, nt); // similar to nrn_rhs in treeset.cpp
848  do_ode(sorted_token, *nt);
849  // divide by cm and compute capacity current
850  assert(z.cmlcap_->ml.size() == 1);
851  nrn_div_capacity(sorted_token, nt, &z.cmlcap_->ml[0]);
852  gather_ydot(ydot, nt->id);
853  before_after(sorted_token, z.after_solve_, nt);
854  // for (int i=0; i < z.neq_; ++i) { printf("\t%d %g %g\n", i, y[i], ydot?ydot[i]:-1e99);}
855 }
856 
858  BAMechList* baml,
859  NrnThread* nt) {
860  for (auto* ba = baml; ba; ba = ba->next) {
861  nrn_bamech_t f = ba->bam->f;
862  for (auto* const ml: ba->ml) {
863  for (int i = 0; i < ml->nodecount; ++i) {
864  f(ml->nodelist[i], ml->pdata[i], ml->_thread, nt, ml, i, sorted_token);
865  }
866  }
867  }
868 }
869 
870 /*
871 v at nodes with capacitance is correct (from scatter v) however
872 v at no-cap nodes is out of date since the values are from the
873 previous call. v would merely be the weighted average of
874 the adjacent v's except for the possibility of membrane
875 currents at branch points. We thus need to calculate both i(v)
876 and di/dv at those zero area nodes so that we can solve the
877 algebraic equation (di/dv + a_j)*vmnew = - i(vmold) + a_j*v_j.
878 The simplest case is no membrane current and root or leaf. In that
879 case vmnew = v_j. The next simplest case is no membrane current.
880 In that case, vm is the weighted sum (via the axial coefficients)
881 of v_j.
882 For now we handle only the general case when there are membrane currents
883 This was done by constructing a list of membrane mechanisms that
884 contribute to the membrane current at the nocap nodes.
885 */
886 
887 void Cvode::nocap_v(neuron::model_sorted_token const& sorted_token, NrnThread* _nt) {
888  int i;
889  CvodeThreadData& z = CTD(_nt->id);
890 
891  auto* const vec_rhs = _nt->node_rhs_storage();
892  auto* const vec_d = _nt->node_d_storage();
893  auto* const vec_v = _nt->node_voltage_storage();
894  for (auto i: z.no_cap_indices_) {
895  vec_rhs[i] = 0.;
896  vec_d[i] = 0.;
897  }
898 
899  // compute the i(vmold) and di/dv
900  rhs_memb(sorted_token, z.no_cap_memb_, _nt);
901  lhs_memb(sorted_token, z.no_cap_memb_, _nt);
902 
903  auto* const vec_b = _nt->node_b_storage();
904  for (auto i: z.no_cap_indices_) {
905  vec_rhs[i] += vec_d[i] * vec_v[i];
906  if (i >= z.rootnode_end_index_) {
907  auto const parent_i = _nt->_v_parent_index[i];
908  vec_rhs[i] -= vec_b[i] * vec_v[parent_i];
909  vec_d[i] -= vec_b[i];
910  }
911  }
912 
913  auto* const vec_a = _nt->node_a_storage();
914  for (auto i: z.no_cap_child_indices_) {
915  auto const parent_i = _nt->_v_parent_index[i];
916  vec_rhs[parent_i] -= vec_a[i] * vec_v[i];
917  vec_d[parent_i] -= vec_a[i];
918  }
919 
920 #if NRNMPI
921  if (nrn_multisplit_solve_) { // add up the multisplit equations
923  }
924 #endif
925 
926  for (auto i: z.no_cap_indices_) {
927  vec_v[i] = vec_rhs[i] / vec_d[i];
928  }
929  // no_cap v's are now consistent with adjacent v's
930 }
931 
933  int i;
934  CvodeThreadData& z = ctd_[_nt->id];
935 
936  auto* const vec_d = _nt->node_d_storage();
937  auto* const vec_rhs = _nt->node_rhs_storage();
938  auto* const vec_v = _nt->node_voltage_storage();
939  auto* const vec_b = _nt->node_b_storage();
940  auto* const vec_a = _nt->node_a_storage();
941  for (auto i: z.no_cap_indices_) {
942  vec_d[i] = 0.;
943  vec_rhs[i] = 0.;
944  }
945 
946  // compute the i(vmold) and di/dv
947  auto const sorted_token = nrn_ensure_model_data_are_sorted();
948  rhs_memb(sorted_token, z.no_cap_memb_, _nt);
949  lhs_memb(sorted_token, z.no_cap_memb_, _nt);
950 
951  for (auto i: z.no_cap_indices_) { // parent axial current
952  vec_rhs[i] += vec_d[i] * vec_v[i];
953  if (i >= z.rootnode_end_index_) {
954  auto const parent_i = _nt->_v_parent_index[i];
955  vec_rhs[i] -= vec_b[i] * vec_v[parent_i];
956  vec_d[i] -= vec_b[i];
957  }
958  }
959 
960  for (auto i: z.no_cap_child_indices_) {
961  auto const parent_i = _nt->_v_parent_index[i];
962  vec_rhs[parent_i] -= vec_a[i] * vec_v[i];
963  vec_d[parent_i] -= vec_a[i];
964  }
965 
967 }
970 }
972  int i;
974  CvodeThreadData& z = ctd_[_nt->id];
975 
976  auto* const vec_d = _nt->node_d_storage();
977  auto* const vec_rhs = _nt->node_rhs_storage();
978  auto* const vec_v = _nt->node_voltage_storage();
979  for (auto i: z.no_cap_indices_) {
980  vec_v[i] = vec_rhs[i] / vec_d[i];
981  }
982  // no_cap v's are now consistent with adjacent v's
983 }
984 
985 void Cvode::do_ode(neuron::model_sorted_token const& sorted_token, NrnThread& nt) {
986  // all the membrane mechanism ode's
987  CvodeThreadData& z = CTD(nt.id);
988  for (auto* cml = z.cv_memb_list_; cml; cml = cml->next) { // probably can start at 6 or hh
989  if (auto* const ode_spec = memb_func[cml->index].ode_spec; ode_spec) {
990  for (auto& ml: cml->ml) {
991  ode_spec(sorted_token, &nt, &ml, cml->index);
992  if (errno && nrn_errno_check(cml->index)) {
993  hoc_warning("errno set during ode evaluation", nullptr);
994  }
995  }
996  }
997  }
998  long_difus_solve(sorted_token, 1, nt);
999 }
1000 
1002 static void nonode_thread(neuron::model_sorted_token const& sorted_token, NrnThread& nt) {
1003  nonode_cv->do_nonode(sorted_token, &nt);
1004 }
1005 void Cvode::do_nonode(neuron::model_sorted_token const& sorted_token, NrnThread* _nt) {
1006  // all the hacked integrators, etc, in SOLVE procedure almost a verbatim copy of nonvint in
1007  // fadvance.cpp
1008  if (!_nt) {
1009  if (nrn_nthread > 1) {
1010  nonode_cv = this;
1011  nrn_multithread_job(sorted_token, nonode_thread);
1012  return;
1013  }
1014  _nt = nrn_threads;
1015  }
1016  CvodeThreadData& z = CTD(_nt->id);
1017  CvMembList* cml;
1018  for (cml = z.cv_memb_list_; cml; cml = cml->next) {
1019  const Memb_func& mf = memb_func[cml->index];
1020  if (!mf.state) {
1021  continue;
1022  }
1023  for (auto& ml: cml->ml) {
1024  if (!mf.ode_spec) {
1025  mf.state(sorted_token, _nt, &ml, cml->index);
1026  } else if (mf.singchan_) {
1027  mf.singchan_(_nt, &ml, cml->index);
1028  }
1029  }
1030  }
1031 }
1032 
1033 void Cvode::states(double* pd) {
1034  int i, id;
1035  for (id = 0; id < nctd_; ++id) {
1036  CvodeThreadData& z = ctd_[id];
1037  double* s = n_vector_data(y_, id);
1038  for (i = 0; i < z.nvsize_; ++i) {
1039  pd[i + z.nvoffset_] = s[i];
1040  }
1041  }
1042 }
1043 
1044 void Cvode::dstates(double* pd) {
1045  int i, id;
1046  for (id = 0; id < nctd_; ++id) {
1047  CvodeThreadData& z = ctd_[id];
1048  for (i = 0; i < z.nonvint_extra_offset_; ++i) {
1049  pd[i + z.nvoffset_] = *z.pvdot_[i];
1050  }
1052  }
1053 }
1054 
1055 void Cvode::error_weights(double* pd) {
1056  int i, id;
1057  for (id = 0; id < nctd_; ++id) {
1058  CvodeThreadData& z = ctd_[id];
1059  double* s = n_vector_data(ewtvec(), id);
1060  for (i = 0; i < z.nvsize_; ++i) {
1061  pd[i + z.nvoffset_] = s[i];
1062  }
1063  }
1064 }
1065 
1066 void Cvode::acor(double* pd) {
1067  int i, id;
1068  for (id = 0; id < nctd_; ++id) {
1069  CvodeThreadData& z = ctd_[id];
1070  double* s = n_vector_data(acorvec(), id);
1071  for (i = 0; i < z.nvsize_; ++i) {
1072  pd[i + z.nvoffset_] = s[i];
1073  }
1074  }
1075 }
1076 
1078  int i;
1079  for (i = 0; i < nctd_; ++i) {
1080  CvodeThreadData& z = ctd_[i];
1081  if (z.play_) {
1082  delete z.play_;
1083  }
1084  z.play_ = nullptr;
1085  if (z.record_) {
1086  delete z.record_;
1087  }
1088  z.record_ = nullptr;
1089  }
1090 }
1091 
1093  CvodeThreadData& z = CTD(pr->ith_);
1094  if (!z.record_) {
1095  z.record_ = new std::vector<PlayRecord*>();
1096  z.record_->reserve(1);
1097  }
1098  z.record_->push_back(pr);
1099 }
1100 
1102  if (nth_) { // lvardt
1104  } else {
1105  auto const sorted_token = nrn_ensure_model_data_are_sorted();
1106  for (int i = 0; i < nrn_nthread; ++i) {
1107  NrnThread* nt = nrn_threads + i;
1108  CvodeThreadData& z = ctd_[i];
1109  if (z.before_step_) {
1110  before_after(sorted_token, z.before_step_, nt);
1111  }
1112  if (z.record_) {
1113  for (auto& item: *(z.record_)) {
1114  item->continuous(t_);
1115  }
1116  }
1117  }
1118  }
1119 }
1120 
1122  CvodeThreadData& z = CTD(nt->id);
1123  if (z.before_step_) {
1125  }
1126  if (z.record_) {
1127  for (auto& item: *(z.record_)) {
1128  item->continuous(t_);
1129  }
1130  }
1131 }
1132 
1134  CvodeThreadData& z = CTD(pr->ith_);
1135  if (!z.play_) {
1136  z.play_ = new std::vector<PlayRecord*>();
1137  }
1138  z.play_->push_back(pr);
1139 }
1140 
1141 void Cvode::play_continuous(double tt) {
1142  if (nth_) { // lvardt
1144  } else {
1145  for (int i = 0; i < nrn_nthread; ++i) {
1146  CvodeThreadData& z = ctd_[i];
1147  if (z.play_) {
1148  for (auto& item: *(z.play_)) {
1149  item->continuous(tt);
1150  }
1151  }
1152  }
1153  }
1154 }
1156  CvodeThreadData& z = CTD(nt->id);
1157  if (z.play_) {
1158  for (auto& item: *(z.play_)) {
1159  item->continuous(tt);
1160  }
1161  }
1162 }
Definition: cvodeobj.h:97
void fun_thread_transfer_part2(neuron::model_sorted_token const &, double *ydot, NrnThread *nt)
Definition: occvode.cpp:764
void fun_thread_ms_part2(NrnThread *nt)
Definition: occvode.cpp:823
int neq_
Definition: cvodeobj.h:245
void solvemem(neuron::model_sorted_token const &, NrnThread *)
Definition: occvode.cpp:697
void do_nonode(neuron::model_sorted_token const &, NrnThread *nt=0)
Definition: occvode.cpp:1005
double t_
Definition: cvodeobj.h:126
long int * nthsizes_
Definition: cvodeobj.h:243
void rhs(neuron::model_sorted_token const &, NrnThread *)
Definition: cvtrset.cpp:12
void rhs_memb(neuron::model_sorted_token const &, CvMembList *, NrnThread *)
Definition: cvtrset.cpp:66
int solvex_thread_part2(NrnThread *nt)
Definition: occvode.cpp:677
void fun_thread_transfer_part1(neuron::model_sorted_token const &, double t, double *y, NrnThread *nt)
Definition: occvode.cpp:737
void play_continuous_thread(double t, NrnThread *)
Definition: occvode.cpp:1155
int nctd_
Definition: cvodeobj.h:242
void states(double *)
Definition: occvode.cpp:1033
N_Vector atolnvec_
Definition: cvodeobj.h:232
int solvex_thread(neuron::model_sorted_token const &, double *b, double *y, NrnThread *nt)
Definition: occvode.cpp:588
int jac_calls_
Definition: cvodeobj.h:132
void dstates(double *)
Definition: occvode.cpp:1044
bool init_global()
Definition: occvode.cpp:74
void triang(NrnThread *)
Definition: cvtrset.cpp:139
void record_add(PlayRecord *)
Definition: occvode.cpp:1092
CvodeThreadData * ctd_
Definition: cvodeobj.h:240
void play_add(PlayRecord *)
Definition: occvode.cpp:1133
void gather_y(N_Vector)
Definition: occvode.cpp:523
void daspk_init_eqn()
Definition: occvode.cpp:369
int setup(N_Vector ypred, N_Vector fpred)
Definition: occvode.cpp:574
void play_continuous(double t)
Definition: occvode.cpp:1141
void record_continuous()
Definition: occvode.cpp:1101
void fun_thread_ms_part4(double *ydot, NrnThread *nt)
Definition: occvode.cpp:834
N_Vector y_
Definition: cvodeobj.h:231
void nocap_v_part2(NrnThread *)
Definition: occvode.cpp:968
void before_after(neuron::model_sorted_token const &, BAMechList *, NrnThread *)
Definition: occvode.cpp:857
void new_no_cap_memb(CvodeThreadData &, NrnThread *)
Definition: occvode.cpp:311
void fun_thread_ms_part34(double *ydot, NrnThread *nt)
Definition: occvode.cpp:826
void fun_thread_ms_part1(double t, double *y, NrnThread *nt)
Definition: occvode.cpp:804
void record_continuous_thread(NrnThread *)
Definition: occvode.cpp:1121
int solvex_thread_part3(double *b, NrnThread *nt)
Definition: occvode.cpp:681
void fun_thread(neuron::model_sorted_token const &, double t, double *y, double *ydot, NrnThread *nt)
Definition: occvode.cpp:715
bool structure_change_
Definition: cvodeobj.h:237
void nocap_v_part3(NrnThread *)
Definition: occvode.cpp:971
NetCvode * ncv_
Definition: cvodeobj.h:244
void fun_thread_ms_part3(NrnThread *nt)
Definition: occvode.cpp:830
N_Vector acorvec()
Definition: cvodeobj.cpp:1399
NrnThread * nth_
Definition: cvodeobj.h:241
void acor(double *)
Definition: occvode.cpp:1066
double * n_vector_data(N_Vector, int)
Definition: occvode.cpp:485
void bksub(NrnThread *)
Definition: cvtrset.cpp:155
void init_eqn()
Definition: occvode.cpp:99
N_Vector ewtvec()
Definition: cvodeobj.cpp:1391
double h()
Definition: cvodeobj.cpp:737
void nocap_v_part1(NrnThread *)
Definition: occvode.cpp:932
void delete_prl()
Definition: occvode.cpp:1077
void do_ode(neuron::model_sorted_token const &, NrnThread &)
Definition: occvode.cpp:985
double gam()
Definition: cvodeobj.cpp:729
void lhs_memb(neuron::model_sorted_token const &, CvMembList *, NrnThread *)
Definition: cvtrset.cpp:119
bool use_daspk_
Definition: cvodeobj.h:208
void lhs(neuron::model_sorted_token const &, NrnThread *)
Definition: cvtrset.cpp:86
void nocap_v(neuron::model_sorted_token const &, NrnThread *)
Definition: occvode.cpp:887
void atolvec_alloc(int)
Definition: cvodeobj.cpp:826
int solvex_thread_part1(double *b, NrnThread *nt)
Definition: occvode.cpp:650
void scatter_y(neuron::model_sorted_token const &, double *, int)
Definition: occvode.cpp:498
void scatter_ydot(double *, int)
Definition: occvode.cpp:541
void gather_ydot(N_Vector)
Definition: occvode.cpp:554
void error_weights(double *)
Definition: occvode.cpp:1055
int nonvint_extra_offset_
Definition: cvodeobj.h:92
int nonvint_offset_
Definition: cvodeobj.h:91
int vnode_begin_index_
Definition: cvodeobj.h:80
std::vector< double * > pv_
Definition: cvodeobj.h:87
int vnode_end_index_
Definition: cvodeobj.h:81
CvMembList * cv_memb_list_
Definition: cvodeobj.h:62
void delete_memb_list(CvMembList *)
Definition: netcvode.cpp:1326
std::vector< int > no_cap_indices_
Definition: cvodeobj.h:60
CvMembList * no_cap_memb_
Definition: cvodeobj.h:65
BAMechList * before_breakpoint_
Definition: cvodeobj.h:66
CvMembList * cmlext_
Definition: cvodeobj.h:64
BAMechList * after_solve_
Definition: cvodeobj.h:67
int rootnode_begin_index_
Definition: cvodeobj.h:78
std::vector< double * > pvdot_
Definition: cvodeobj.h:87
CvMembList * cmlcap_
Definition: cvodeobj.h:63
std::vector< PlayRecord * > * record_
Definition: cvodeobj.h:94
int rootnode_end_index_
Definition: cvodeobj.h:79
BAMechList * before_step_
Definition: cvodeobj.h:68
std::vector< PlayRecord * > * play_
Definition: cvodeobj.h:93
std::vector< int > no_cap_child_indices_
Definition: cvodeobj.h:61
void atol(double)
Definition: netcvode.cpp:4472
void stiff(int)
Definition: netcvode.cpp:4475
Symbol * hoc_table_lookup(const char *, Symlist *)
Definition: symbol.cpp:48
#define v
Definition: md1redef.h:11
#define id
Definition: md1redef.h:41
#define i
Definition: md1redef.h:19
#define CTD(i)
Definition: cvodeobj.h:53
int nrn_errno_check(int i)
Definition: fadvance.cpp:767
static RNG::key_type k
Definition: nrnran123.cpp:9
#define assert(ex)
Definition: hocassrt.h:24
static void ode_spec(neuron::model_sorted_token const &token, NrnThread *, Memb_list *ml, int type)
Definition: kschan.cpp:108
static int ode_count(int type)
Definition: kschan.cpp:93
static void ode_matsol(neuron::model_sorted_token const &token, NrnThread *nt, Memb_list *ml, int type)
Definition: kschan.cpp:113
static void ode_map(Prop *prop, int ieq, neuron::container::data_handle< double > *pv, neuron::container::data_handle< double > *pvdot, double *atol, int type)
Definition: kschan.cpp:98
void long_difus_solve(neuron::model_sorted_token const &sorted_token, int method, NrnThread &nt)
Definition: ldifus.cpp:86
void(*)(Node *, Datum *, Datum *, NrnThread *, Memb_list *, std::size_t, neuron::model_sorted_token const &) nrn_bamech_t
Definition: membfunc.h:30
#define CAP
Definition: membfunc.hpp:60
#define EXTRACELL
Definition: membfunc.hpp:61
printf
Definition: extdef.h:5
auto for_threads(NrnThread *threads, int num_threads)
Definition: multicore.h:133
void(* nrn_multisplit_solve_)()
Definition: solve.cpp:76
NrnThread * nrn_threads
Definition: multicore.cpp:56
void nrn_mul_capacity(NrnThread *, Memb_list *, int)
Definition: capac.cpp:154
void nrn_div_capacity(NrnThread *, Memb_list *, int)
Definition: capac.cpp:133
int nrn_nthread
Definition: multicore.cpp:55
void nrn_multithread_job(F &&job, Args &&... args)
Definition: multicore.hpp:161
void hoc_warning(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:44
int diam_changed
Definition: nrnoc_aux.cpp:21
constexpr do_not_search_t do_not_search
Definition: data_handle.hpp:11
auto *const vec_d
Definition: cellorder.cpp:615
auto *const vec_b
Definition: cellorder.cpp:614
auto *const vec_rhs
Definition: cellorder.cpp:616
#define nrn_nonvint_block_ode_fun(size, y, ydot, tid)
Definition: nonvintblock.h:61
#define nrn_nonvint_block_ode_count(offset, tid)
Definition: nonvintblock.h:55
#define nrn_nonvint_block_jacobian(size, ypred, ydot, tid)
Definition: nonvintblock.h:70
#define nrn_nonvint_block_ode_abstol(size, y, tid)
Definition: nonvintblock.h:73
#define nrn_nonvint_block_ode_solve(size, b, y, tid)
Definition: nonvintblock.h:66
void recalc_diam(void)
Definition: treeset.cpp:923
neuron::model_sorted_token nrn_ensure_model_data_are_sorted()
Ensure neuron::container::* data are sorted.
Definition: treeset.cpp:2182
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
Definition: nrn_assert.h:33
static Node * node(Object *)
Definition: netcvode.cpp:291
void nrndae_dkmap(std::vector< double * > &pv, std::vector< double * > &pvdot)
Definition: nrndae.cpp:92
int const size_t const size_t n
Definition: nrngsl.h:10
size_t j
s
Definition: multisend.cpp:521
std::vector< Memb_func > memb_func
Definition: init.cpp:145
static void pr(N_Vector x)
int nrnmpi_myid
void nrn_extra_scatter_gather(int, int)
Definition: cvodeobj.cpp:506
void(* nrn_multisplit_setup_)()
Definition: treeset.cpp:50
void(* nrnthread_v_transfer_)(NrnThread *)
Definition: fadvance.cpp:139
static void * gather_y_thread(NrnThread *nt)
Definition: occvode.cpp:518
void nrn_multisplit_nocap_v_part1(NrnThread *)
static N_Vector gather_vec
Definition: occvode.cpp:517
static void * gather_ydot_thread(NrnThread *nt)
Definition: occvode.cpp:549
void * nrn_multisplit_triang(NrnThread *)
void nrn_multisplit_adjust_rhs(NrnThread *)
void nrn_multisplit_nocap_v()
static void nonode_thread(neuron::model_sorted_token const &sorted_token, NrnThread &nt)
Definition: occvode.cpp:1002
double * sp13mat
static Symbol * vsym
Definition: occvode.cpp:42
void * nrn_multisplit_bksub(NrnThread *)
void nrn_multisplit_nocap_v_part2(NrnThread *)
static Cvode * nonode_cv
Definition: occvode.cpp:1001
static Cvode * gather_cv
Definition: occvode.cpp:516
void * nrn_multisplit_reduce_solve(NrnThread *)
void(* nrnmpi_v_transfer_)()
Definition: fadvance.cpp:138
void nrn_multisplit_nocap_v_part3(NrnThread *)
int use_sparse13
Definition: treeset.cpp:58
#define NODERHS(n)
Definition: section_fwd.hpp:55
#define nlayer
Definition: section_fwd.hpp:31
int spGetSize(char *eMatrix, BOOLEAN External)
Definition: spalloc.cpp:638
Symlist * hoc_built_in_symlist
Definition: symbol.cpp:28
BAMechList * next
Definition: cvodeobj.h:47
Wrapper for Memb_list in CVode related code.
Definition: cvodeobj.h:35
std::vector< Memb_list > ml
Definition: cvodeobj.h:41
int index
Definition: cvodeobj.h:42
CvMembList * next
Definition: cvodeobj.h:40
double * v
Definition: section_fwd.hpp:40
double ** _rhs
Definition: section_fwd.hpp:44
float tolerance
Definition: hocdec.h:100
void * hoc_mech
Definition: membfunc.h:87
nrn_ode_spec_t ode_spec
Definition: membfunc.h:77
int is_point
Definition: membfunc.h:86
nrn_ode_synonym_t ode_synonym
Definition: membfunc.h:79
nrn_ode_map_t ode_map
Definition: membfunc.h:76
nrn_ode_count_t ode_count
Definition: membfunc.h:75
nrn_cur_t current
Definition: membfunc.h:60
Pvmi singchan_
Definition: membfunc.h:80
nrn_ode_matsol_t ode_matsol
Definition: membfunc.h:78
nrn_state_t state
Definition: membfunc.h:62
Definition: section.h:105
int eqn_index_
Definition: section.h:187
auto v_handle()
Definition: section.h:153
Extnode * extnode
Definition: section.h:199
auto rhs_handle()
Definition: section.h:162
Represent main neuron object computed by single thread.
Definition: multicore.h:58
double _dt
Definition: multicore.h:60
int * _v_parent_index
Definition: multicore.h:89
double * node_a_storage()
Definition: multicore.cpp:1054
char * _sp13mat
Definition: multicore.h:94
int id
Definition: multicore.h:66
double cj
Definition: multicore.h:61
double * node_sav_rhs_storage()
Definition: multicore.cpp:1088
double * node_rhs_storage()
Definition: multicore.cpp:1074
double * node_area_storage()
Definition: multicore.cpp:1059
int end
Definition: multicore.h:65
double * node_d_storage()
Definition: multicore.cpp:1069
Node ** _v_node
Definition: multicore.h:90
double * node_voltage_storage()
Definition: multicore.cpp:1098
double * node_b_storage()
Definition: multicore.cpp:1064
double _t
Definition: multicore.h:59
Definition: section.h:231
Definition: model.h:47
HocSymExtension * extra
Definition: hocdec.h:131
Non-template stable handle to a generic value.