NEURON
nonlinz.cpp
Go to the documentation of this file.
1 #include <../../nrnconf.h>
2 #include <stdio.h>
3 #include <math.h>
4 #include <InterViews/resource.h>
5 #include "nonlinz.h"
6 #include "nrniv_mf.h"
7 #include "nrnoc2iv.h"
8 #include "nrnmpi.h"
9 #include "membfunc.h"
10 
11 #include <Eigen/Sparse>
12 #include <complex>
13 
14 using namespace std::complex_literals;
15 
16 extern void v_setup_vectors();
17 extern int nrndae_extra_eqn_count();
19 extern void (*nrnthread_v_transfer_)(NrnThread*);
20 
21 extern void pargap_jacobi_rhs(std::vector<std::complex<double>>&,
22  const std::vector<std::complex<double>>&);
23 extern void pargap_jacobi_setup(int mode);
24 
25 class NonLinImpRep {
26  public:
27  NonLinImpRep();
28  void delta(double);
29 
30  // Functions to fill the matrix
31  void didv();
32  void dids();
33  void dsdv();
34  void dsds();
35 
36  int gapsolve();
37 
38  // Matrix containing the non linear system to solve.
39  Eigen::SparseMatrix<std::complex<double>> m_{};
40  // The solver of the matrix using the LU decomposition method.
41  Eigen::SparseLU<Eigen::SparseMatrix<std::complex<double>>> lu_{};
42  int scnt_; // structure_change
43  int n_v_, n_ext_, n_lin_, n_ode_, neq_v_, neq_;
44  std::vector<neuron::container::data_handle<double>> pv_, pvdot_;
45  std::vector<std::complex<double>> v_;
46  std::vector<double> deltavec_; // just like cvode.atol*cvode.atolscale for ode's
47  double delta_; // slightly more efficient and easier for v.
48  void current(int, Memb_list*, int);
49  void ode(int, Memb_list*);
50 
51  double omega_;
52  int iloc_; // current injection site of last solve
53  float* vsymtol_{};
54  int maxiter_{500};
55 };
56 
58  delete rep_;
59 }
60 
61 double NonLinImp::transfer_amp(int curloc, int vloc) {
62  if (nrnmpi_numprocs > 1 && nrnthread_v_transfer_ && curloc != rep_->iloc_) {
64  "current injection site change not allowed with both gap junctions and nhost > 1", 0);
65  }
66  if (curloc != rep_->iloc_) {
67  solve(curloc);
68  }
69  return std::abs(rep_->v_[vloc]);
70 }
71 double NonLinImp::input_amp(int curloc) {
73  hoc_execerror("not allowed with both gap junctions and nhost>1", 0);
74  }
75  if (curloc != rep_->iloc_) {
76  solve(curloc);
77  }
78  if (curloc < 0) {
79  return 0.0;
80  }
81  return std::abs(rep_->v_[curloc]);
82 }
83 double NonLinImp::transfer_phase(int curloc, int vloc) {
84  if (nrnmpi_numprocs > 1 && nrnthread_v_transfer_ && curloc != rep_->iloc_) {
86  "current injection site change not allowed with both gap junctions and nhost > 1", 0);
87  }
88  if (curloc != rep_->iloc_) {
89  solve(curloc);
90  }
91  return std::arg(rep_->v_[vloc]);
92 }
93 double NonLinImp::input_phase(int curloc) {
95  hoc_execerror("not allowed with both gap junctions and nhost>1", 0);
96  }
97  if (curloc != rep_->iloc_) {
98  solve(curloc);
99  }
100  if (curloc < 0) {
101  return 0.0;
102  }
103  return std::arg(rep_->v_[curloc]);
104 }
105 double NonLinImp::ratio_amp(int clmploc, int vloc) {
107  hoc_execerror("not allowed with both gap junctions and nhost>1", 0);
108  }
109  if (clmploc < 0) {
110  return 0.0;
111  }
112  if (clmploc != rep_->iloc_) {
113  solve(clmploc);
114  }
115  return std::abs(rep_->v_[vloc] * std::conj(rep_->v_[clmploc]) / std::norm(rep_->v_[clmploc]));
116 }
117 void NonLinImp::compute(double omega, double deltafac, int maxiter) {
118  v_setup_vectors();
120  if (rep_ && rep_->scnt_ != structure_change_cnt) {
121  delete rep_;
122  rep_ = nullptr;
123  }
124  if (!rep_) {
125  rep_ = new NonLinImpRep();
126  }
127  rep_->maxiter_ = maxiter;
128  if (rep_->neq_ == 0) {
129  return;
130  }
131  if (nrndae_extra_eqn_count() > 0) {
132  hoc_execerror("Impedance calculation with LinearMechanism not implemented", 0);
133  }
135  hoc_execerror("Impedance calculation with extracellular not implemented", 0);
136  }
137 
138  rep_->omega_ = 1000. * omega;
139  rep_->delta(deltafac);
140 
141  rep_->m_.setZero();
142 
143  // fill matrix
144  rep_->didv();
145  rep_->dsds();
146 #if 1 // when 0 equivalent to standard method
147  rep_->dids();
148  rep_->dsdv();
149 #endif
150 
151  // Now that the matrix is filled we can compress it (mandatory for SparseLU)
152  rep_->m_.makeCompressed();
153 
154  // Factorize the matrix so this is ready to solve
155  rep_->lu_.compute(rep_->m_);
156  switch (rep_->lu_.info()) {
157  case Eigen::Success:
158  // Everything fine
159  break;
160  case Eigen::NumericalIssue:
162  "Eigen Sparse LU factorization failed with Eigen::NumericalIssue, please check the "
163  "input matrix:",
164  rep_->lu_.lastErrorMessage().c_str());
165  break;
166  case Eigen::NoConvergence:
168  "Eigen Sparse LU factorization reports Eigen::NonConvergence after calling compute():",
169  rep_->lu_.lastErrorMessage().c_str());
170  break;
171  case Eigen::InvalidInput:
173  "Eigen Sparse LU factorization failed with Eigen::InvalidInput, the input matrix seems "
174  "invalid:",
175  rep_->lu_.lastErrorMessage().c_str());
176  break;
177  }
178 
179  rep_->iloc_ = -2;
180 }
181 
182 int NonLinImp::solve(int curloc) {
183  int rval = 0;
184  NrnThread* _nt = nrn_threads;
185  if (!rep_) {
186  hoc_execerror("Must call Impedance.compute first", 0);
187  }
188  if (rep_->iloc_ != curloc) {
189  rep_->iloc_ = curloc;
190  rep_->v_ = std::vector<std::complex<double>>(rep_->neq_);
191  if (curloc >= 0) {
192  rep_->v_[curloc] = 1.e2 / NODEAREA(_nt->_v_node[curloc]);
193  }
194  if (nrnthread_v_transfer_) {
195  rval = rep_->gapsolve();
196  } else {
197  auto v =
198  Eigen::Map<Eigen::Vector<std::complex<double>, Eigen::Dynamic>>(rep_->v_.data(),
199  rep_->v_.size());
200  v = rep_->lu_.solve(v);
201  }
202  }
203  return rval;
204 }
205 
206 // too bad it is not easy to reuse the cvode/daspk structures. Most of
207 // mapping is already done there.
208 
210  NrnThread* _nt = nrn_threads;
211 
213  if (vsym->extra) {
214  vsymtol_ = &vsym->extra->tolerance;
215  }
216  // the equation order is the same order as for the fixed step method
217  // for current balance. Remaining ode equation order is
218  // defined by the memb_list.
219 
220 
221  // how many equations
222  n_v_ = _nt->end;
223  n_ext_ = 0;
224  if (_nt->_ecell_memb_list) {
225  n_ext_ = _nt->_ecell_memb_list->nodecount * nlayer;
226  }
227  n_lin_ = nrndae_extra_eqn_count();
228  n_ode_ = 0;
229  for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) {
230  Memb_list* ml = tml->ml;
231  int i = tml->index;
232  nrn_ode_count_t s = memb_func[i].ode_count;
233  if (s) {
234  int cnt = (*s)(i);
235  n_ode_ += cnt * ml->nodecount;
236  }
237  }
238  neq_v_ = n_v_ + n_ext_ + n_lin_;
239  neq_ = neq_v_ + n_ode_;
240  if (neq_ == 0) {
241  return;
242  }
243  m_.resize(neq_, neq_);
244  pv_.resize(neq_);
245  pvdot_.resize(neq_);
246  v_.resize(neq_);
247  deltavec_.resize(neq_);
248 
249  for (int i = 0; i < n_v_; ++i) {
250  // utilize nd->eqn_index in case of use_sparse13 later
251  Node* nd = _nt->_v_node[i];
252  pv_[i] = nd->v_handle();
253  pvdot_[i] = nd->rhs_handle();
254  }
255  scnt_ = structure_change_cnt;
256 }
257 
258 void NonLinImpRep::delta(double deltafac) { // also defines pv_,pvdot_ map for ode
259  int i, nc, cnt, ieq;
260  NrnThread* nt = nrn_threads;
261  for (i = 0; i < neq_; ++i) {
262  deltavec_[i] = deltafac; // all v's wasted but no matter.
263  }
264  ieq = neq_v_;
265  for (NrnThreadMembList* tml = nt->tml; tml; tml = tml->next) {
266  Memb_list* ml = tml->ml;
267  i = tml->index;
268  nc = ml->nodecount;
269  if (nrn_ode_count_t s = memb_func[i].ode_count; s && (cnt = s(i)) > 0) {
270  nrn_ode_map_t ode_map = memb_func[i].ode_map;
271  for (auto j = 0; j < nc; ++j) {
272  ode_map(ml->prop[j],
273  ieq,
274  pv_.data() + ieq,
275  pvdot_.data() + ieq,
276  deltavec_.data() + ieq,
277  i);
278  ieq += cnt;
279  }
280  }
281  }
282  delta_ = (vsymtol_ && (*vsymtol_ != 0.)) ? *vsymtol_ : 1.;
283  delta_ *= deltafac;
284 }
285 
287  int i, j, ip;
288  Node* nd;
289  NrnThread* _nt = nrn_threads;
290  // d2v/dv2 terms
291  for (i = _nt->ncell; i < n_v_; ++i) {
292  nd = _nt->_v_node[i];
293  ip = _nt->_v_parent[i]->v_node_index;
294  m_.coeffRef(ip, i) += NODEA(nd);
295  m_.coeffRef(i, ip) += NODEB(nd);
296  m_.coeffRef(i, i) -= NODEB(nd);
297  m_.coeffRef(ip, ip) -= NODEA(nd);
298  }
299  // jwC term
300  Memb_list* mlc = _nt->tml->ml;
301  int n = mlc->nodecount;
302  for (i = 0; i < n; ++i) {
303  j = mlc->nodelist[i]->v_node_index;
304  m_.coeffRef(j, j) += .001 * mlc->data(i, 0) * omega_ * 1i;
305  }
306  // di/dv terms
307  // because there may be several point processes of the same type
308  // at the same location, we have to be careful to neither increment that
309  // nd->v multiple times nor count the rhs multiple times.
310  // So we can't take advantage of vectorized point processes.
311  // To avoid this we do each mechanism item separately.
312  // We assume there is no interaction between
313  // separate locations. Note that interactions such as gap junctions
314  // would not be handled in any case without computing a full jacobian.
315  // i.e. calling nrn_rhs varying every state one at a time (that would
316  // give the d2v/dv2 terms as well), but the expense is unwarranted.
317  for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) {
318  i = tml->index;
319  if (i == CAP) {
320  continue;
321  }
322  if (!memb_func[i].current) {
323  continue;
324  }
325  Memb_list* ml = tml->ml;
326  for (j = 0; j < ml->nodecount; ++j) {
327  Node* nd = ml->nodelist[j];
328  double x2;
329  // zero rhs
330  // save v
331  NODERHS(nd) = 0;
332  double x1 = NODEV(nd);
333  // v+dv
334  nd->v() = x1 + delta_;
335  current(i, ml, j);
336  // save rhs
337  // zero rhs
338  // restore v
339  x2 = NODERHS(nd);
340  NODERHS(nd) = 0;
341  nd->v() = x1;
342  current(i, ml, j);
343  // conductance
344  // add to matrix
345  m_.coeffRef(nd->v_node_index, nd->v_node_index) -= (x2 - NODERHS(nd)) / delta_;
346  }
347  }
348 }
349 
351  // same strategy as didv but now the innermost loop is over
352  // every state but just for that compartment
353  // outer loop over ode mechanisms in same order as we created the pv_ map // so we can eas
354  int ieq, i, in, is, iis;
355  NrnThread* nt = nrn_threads;
356  ieq = neq_ - n_ode_;
357  for (NrnThreadMembList* tml = nt->tml; tml; tml = tml->next) {
358  Memb_list* ml = tml->ml;
359  i = tml->index;
360  if (memb_func[i].ode_count && ml->nodecount) {
361  int nc = ml->nodecount;
362  nrn_ode_count_t s = memb_func[i].ode_count;
363  int cnt = (*s)(i);
364  if (memb_func[i].current) {
365  for (in = 0; in < ml->nodecount; ++in) {
366  Node* nd = ml->nodelist[in];
367  // zero rhs
368  NODERHS(nd) = 0;
369  // compute rhs
370  current(i, ml, in);
371  // save rhs
372  v_[in].imag(NODERHS(nd));
373  // each state incremented separately and restored
374  for (iis = 0; iis < cnt; ++iis) {
375  is = ieq + in * cnt + iis;
376  // save s
377  v_[is].real(*pv_[is]);
378  // increment s and zero rhs
379  *pv_[is] += deltavec_[is];
380  NODERHS(nd) = 0;
381  current(i, ml, in);
382  *pv_[is] = v_[is].real(); // restore s
383  double g = (NODERHS(nd) - v_[in].imag()) / deltavec_[is];
384  if (g != 0.) {
385  m_.coeffRef(nd->v_node_index, is) = -g;
386  }
387  }
388  // don't know if this is necessary but make sure last
389  // call with respect to original states
390  current(i, ml, in);
391  }
392  }
393  ieq += cnt * nc;
394  }
395  }
396 }
397 
399  int ieq, i, in, is, iis;
400  NrnThread* nt = nrn_threads;
401  ieq = neq_ - n_ode_;
402  for (NrnThreadMembList* tml = nt->tml; tml; tml = tml->next) {
403  Memb_list* ml = tml->ml;
404  i = tml->index;
405  if (memb_func[i].ode_count && ml->nodecount) {
406  int nc = ml->nodecount;
407  nrn_ode_count_t s = memb_func[i].ode_count;
408  int cnt = (*s)(i);
409  if (memb_func[i].current) {
410  // zero rhs, save v
411  for (in = 0; in < ml->nodecount; ++in) {
412  Node* nd = ml->nodelist[in];
413  for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) {
414  *pvdot_[is] = 0.;
415  }
416  v_[in].real(NODEV(nd));
417  }
418  // increment v only once in case there are multiple
419  // point processes at the same location
420  for (in = 0; in < ml->nodecount; ++in) {
421  Node* nd = ml->nodelist[in];
422  auto const v = nd->v();
423  if (v_[in].real() == v) {
424  nd->v() = v + delta_;
425  }
426  }
427  // compute rhs. this is the rhs(v+dv)
428  ode(i, ml);
429  // save rhs, restore v, and zero rhs
430  for (in = 0; in < ml->nodecount; ++in) {
431  Node* nd = ml->nodelist[in];
432  for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) {
433  v_[is].imag(*pvdot_[is]);
434  *pvdot_[is] = 0;
435  }
436  nd->v() = v_[in].real();
437  }
438  // compute the rhs(v)
439  ode(i, ml);
440  // fill the ds/dv elements
441  for (in = 0; in < ml->nodecount; ++in) {
442  Node* nd = ml->nodelist[in];
443  for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) {
444  double ds = (v_[is].imag() - *pvdot_[is]) / delta_;
445  if (ds != 0.) {
446  m_.coeffRef(is, nd->v_node_index) = -ds;
447  }
448  }
449  }
450  }
451  ieq += cnt * nc;
452  }
453  }
454 }
455 
457  int ieq, i, in, is, iis, ks, kks;
458  NrnThread* nt = nrn_threads;
459  // jw term
460  for (i = neq_v_; i < neq_; ++i) {
461  m_.coeffRef(i, i) += omega_ * 1i;
462  }
463  ieq = neq_v_;
464  for (NrnThreadMembList* tml = nt->tml; tml; tml = tml->next) {
465  Memb_list* ml = tml->ml;
466  i = tml->index;
467  if (memb_func[i].ode_count && ml->nodecount) {
468  int nc = ml->nodecount;
469  nrn_ode_count_t s = memb_func[i].ode_count;
470  int cnt = (*s)(i);
471  // zero rhs, save s
472  for (in = 0; in < ml->nodecount; ++in) {
473  for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) {
474  *pvdot_[is] = 0.;
475  v_[is].real(*pv_[is]);
476  }
477  }
478  // compute rhs. this is the rhs(s)
479  ode(i, ml);
480  // save rhs
481  for (in = 0; in < ml->nodecount; ++in) {
482  for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) {
483  v_[is].imag(*pvdot_[is]);
484  }
485  }
486  // iterate over the states
487  for (kks = 0; kks < cnt; ++kks) {
488  // zero rhs, increment s(ks)
489  for (in = 0; in < ml->nodecount; ++in) {
490  ks = ieq + in * cnt + kks;
491  for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) {
492  *pvdot_[is] = 0.;
493  }
494  *pv_[ks] += deltavec_[ks];
495  }
496  ode(i, ml);
497  // store element and restore s
498  // fill the ds/dv elements
499  for (in = 0; in < ml->nodecount; ++in) {
500  Node* nd = ml->nodelist[in];
501  ks = ieq + in * cnt + kks;
502  for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) {
503  double ds = (*pvdot_[is] - v_[is].imag()) / deltavec_[is];
504  if (ds != 0.) {
505  m_.coeffRef(is, ks) = -ds;
506  }
507  *pv_[ks] = v_[ks].real();
508  }
509  }
510  // perhaps not necessary but ensures the last computation is with
511  // the base states.
512  ode(i, ml);
513  }
514  ieq += cnt * nc;
515  }
516  }
517 }
518 
519 void NonLinImpRep::current(int im, Memb_list* ml, int in) { // assume there is in fact a current
520  // method
521  // fake a 1 element memb_list
522  Memb_list mfake{im};
523  mfake.nodeindices = ml->nodeindices + in;
524  mfake.nodelist = ml->nodelist + in;
525  mfake.set_storage_offset(ml->get_storage_offset());
526  mfake.pdata = ml->pdata + in;
527  mfake.prop = ml->prop ? ml->prop + in : nullptr;
528  mfake.nodecount = 1;
529  mfake._thread = ml->_thread;
530  memb_func[im].current(nrn_ensure_model_data_are_sorted(), nrn_threads, &mfake, im);
531 }
532 
533 void NonLinImpRep::ode(int im, Memb_list* ml) { // assume there is in fact an ode method
534  memb_func[im].ode_spec(nrn_ensure_model_data_are_sorted(), nrn_threads, ml, im);
535 }
536 
537 // This function compute a solution of a converging system by iteration.
538 // The value returned is the number of iterations to reach a precision of "tol" (1e-9).
540  // On entry, v_ contains the complex b for A*x = b.
541  // On return v_ contains complex solution, x.
542  // m_ is the factored matrix for the trees without gap junctions
543  // Jacobi method (easy for parallel)
544  // A = D + R
545  // D*x_(k+1) = (b - R*x_(k))
546  // D is m_ (and includes the gap junction contribution to the diagonal)
547  // R is the off diagonal matrix of the gaps.
548 
549  // one and only one stimulus
550 #if NRNMPI
551  if (nrnmpi_numprocs > 1 && nrnmpi_int_sum_reduce(iloc_ >= 0 ? 1 : 0) != 1) {
552  if (nrnmpi_myid == 0) {
553  hoc_execerror("there can be one and only one impedance stimulus", 0);
554  }
555  }
556 #endif
557 
558  pargap_jacobi_setup(0); // 0 means 'setup'
559 
560  std::vector<std::complex<double>> x_old(neq_);
561  std::vector<std::complex<double>> x(neq_);
562  std::vector<std::complex<double>> b(v_);
563 
564  // iterate till change in x is small
565  double tol = 1e-9;
566  double delta{};
567 
568  int success = 0;
569  int iter;
570 
571  for (iter = 1; iter <= maxiter_; ++iter) {
572  if (neq_) {
573  auto b_ = Eigen::Map<Eigen::Vector<std::complex<double>, Eigen::Dynamic>>(b.data(),
574  b.size());
575  auto x_ = Eigen::Map<Eigen::Vector<std::complex<double>, Eigen::Dynamic>>(x.data(),
576  x.size());
577  x_ = lu_.solve(b_);
578  }
579 
580  // if any change in x > tol, then do another iteration.
581  success = 1;
582  delta = 0.0;
583  // Do the substraction of the previous result (x_old) and current result (x).
584  // If all differences are < tol stop the loop, otherwise continue to iterate
585  for (int i = 0; i < neq_; ++i) {
586  auto diff = x[i] - x_old[i];
587  double err = std::abs(diff.real()) + std::abs(diff.imag());
588  if (err > tol) {
589  success = 0;
590  }
591  delta = std::max(err, delta);
592  }
593 #if NRNMPI
594  if (nrnmpi_numprocs > 1) {
595  success = nrnmpi_int_sum_reduce(success) / nrnmpi_numprocs;
596  }
597 #endif
598  if (success) {
599  v_ = x;
600  break;
601  }
602 
603  // setup for next iteration
604  x_old = x;
605  b = v_;
606  pargap_jacobi_rhs(b, x_old);
607  }
608 
609  pargap_jacobi_setup(1); // 1 means 'tear down'
610 
611  if (!success) {
612  char buf[256];
613  Sprintf(buf,
614  "Impedance calculation did not converge in %d iterations. Max state change on last "
615  "iteration was %g (Iterations stop at %g)\n",
616  maxiter_,
617  delta,
618  tol);
619  hoc_execerror(buf, nullptr);
620  }
621  return iter;
622 }
double transfer_amp(int curloc, int vloc)
Definition: nonlinz.cpp:61
~NonLinImp()
Definition: nonlinz.cpp:57
int solve(int curloc)
Definition: nonlinz.cpp:182
void compute(double omega, double deltafac, int maxiter)
Definition: nonlinz.cpp:117
double ratio_amp(int clmploc, int vloc)
Definition: nonlinz.cpp:105
double input_phase(int curloc)
Definition: nonlinz.cpp:93
double input_amp(int curloc)
Definition: nonlinz.cpp:71
double transfer_phase(int curloc, int vloc)
Definition: nonlinz.cpp:83
std::vector< neuron::container::data_handle< double > > pv_
Definition: nonlinz.cpp:44
std::vector< double > deltavec_
Definition: nonlinz.cpp:46
std::vector< std::complex< double > > v_
Definition: nonlinz.cpp:45
double omega_
Definition: nonlinz.cpp:51
void ode(int, Memb_list *)
Definition: nonlinz.cpp:533
double delta_
Definition: nonlinz.cpp:47
void dids()
Definition: nonlinz.cpp:350
void delta(double)
Definition: nonlinz.cpp:258
void dsdv()
Definition: nonlinz.cpp:398
int gapsolve()
Definition: nonlinz.cpp:539
void didv()
Definition: nonlinz.cpp:286
void dsds()
Definition: nonlinz.cpp:456
void current(int, Memb_list *, int)
Definition: nonlinz.cpp:519
Symbol * hoc_table_lookup(const char *, Symlist *)
Definition: symbol.cpp:48
#define cnt
Definition: tqueue.hpp:44
#define v
Definition: md1redef.h:11
#define i
Definition: md1redef.h:19
#define b_(arg)
Definition: crout.hpp:137
static double solve(void *v)
Definition: cvodeobj.cpp:87
char buf[512]
Definition: init.cpp:13
static double deltafac(void *v)
Definition: impedanc.cpp:131
static int ode_count(int type)
Definition: kschan.cpp:93
static void ode_map(Prop *prop, int ieq, neuron::container::data_handle< double > *pv, neuron::container::data_handle< double > *pvdot, double *atol, int type)
Definition: kschan.cpp:98
static ldifusfunc2_t ode
Definition: ldifus.cpp:43
void(*)(Prop *, int, neuron::container::data_handle< double > *, neuron::container::data_handle< double > *, double *, int) nrn_ode_map_t
Definition: membfunc.h:39
int(* nrn_ode_count_t)(int)
Definition: membfunc.h:23
#define CAP
Definition: membfunc.hpp:60
double norm(const Point3D &p1)
Definition: lfp.hpp:22
NrnThread * nrn_threads
Definition: multicore.cpp:56
void hoc_execerror(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:39
int structure_change_cnt
static void nrn_rhs(NrnThread *_nt)
int Sprintf(char(&buf)[N], const char *fmt, Args &&... args)
Redirect sprintf to snprintf if the buffer size can be deduced.
Definition: wrap_sprintf.h:14
void(* nrnthread_v_transfer_)(NrnThread *)
Definition: fadvance.cpp:139
void pargap_jacobi_setup(int mode)
Definition: partrans.cpp:796
void pargap_jacobi_rhs(std::vector< std::complex< double >> &, const std::vector< std::complex< double >> &)
Definition: partrans.cpp:886
int nrndae_extra_eqn_count()
Definition: nrndae.cpp:26
void v_setup_vectors()
Definition: treeset.cpp:1596
Symlist * hoc_built_in_symlist
Definition: symbol.cpp:28
neuron::model_sorted_token nrn_ensure_model_data_are_sorted()
Ensure neuron::container::* data are sorted.
Definition: treeset.cpp:2182
int const size_t const size_t n
Definition: nrngsl.h:10
size_t j
s
Definition: multisend.cpp:521
std::vector< Memb_func > memb_func
Definition: init.cpp:145
static List * current
Definition: nrnunit.cpp:13
static N_Vector x_
int nrnmpi_myid
static Symbol * vsym
Definition: occvode.cpp:42
#define NODEAREA(n)
Definition: section.h:101
#define NODEA(n)
Definition: section_fwd.hpp:52
#define NODEB(n)
Definition: section_fwd.hpp:53
#define NODEV(n)
Definition: section_fwd.hpp:60
#define NODERHS(n)
Definition: section_fwd.hpp:55
#define nlayer
Definition: section_fwd.hpp:31
float tolerance
Definition: hocdec.h:100
A view into a set of mechanism instances.
Definition: nrnoc_ml.h:34
int nodecount
Definition: nrnoc_ml.h:78
int * nodeindices
Definition: nrnoc_ml.h:74
Node ** nodelist
Definition: nrnoc_ml.h:68
Datum ** pdata
Definition: nrnoc_ml.h:75
std::vector< double * > data()
Get a vector of double* representing the model data.
Definition: memblist.cpp:64
Prop ** prop
Definition: nrnoc_ml.h:76
std::size_t get_storage_offset() const
Get the offset of this Memb_list into global storage for this type.
Definition: nrnoc_ml.h:195
Datum * _thread
Definition: nrnoc_ml.h:77
Definition: section.h:105
auto v_handle()
Definition: section.h:153
int v_node_index
Definition: section.h:212
auto & v()
Definition: section.h:141
auto rhs_handle()
Definition: section.h:162
Represent main neuron object computed by single thread.
Definition: multicore.h:58
NrnThreadMembList * tml
Definition: multicore.h:62
int ncell
Definition: multicore.h:64
int end
Definition: multicore.h:65
Node ** _v_parent
Definition: multicore.h:91
Memb_list * _ecell_memb_list
Definition: multicore.h:95
Node ** _v_node
Definition: multicore.h:90
struct NrnThreadMembList * next
Definition: multicore.h:34
Memb_list * ml
Definition: multicore.h:35
Definition: model.h:47
HocSymExtension * extra
Definition: hocdec.h:131
Definition: hocdec.h:75
Memb_list * _ecell_memb_list
Definition: multicore.hpp:129