NEURON
netcvode.cpp
Go to the documentation of this file.
1 #include <../../nrnconf.h>
2 
3 // define to 0 if do not wish use_min_delay_ to ever be 1
4 #define USE_MIN_DELAY 1
5 
6 #include <nrnmpi.h>
7 #include "cabcode.h"
8 #include "classreg.h"
9 #include "code.h"
10 #include "parse.hpp"
11 #include "cvodeobj.h"
12 #include "hoclist.h"
13 #include "pool.hpp"
14 #include "tqueue.hpp"
15 #include "ocobserv.h"
16 #include "nrnneosm.h"
17 #include "datapath.h"
18 #include "objcmd.h"
19 #include "kssingle.h"
20 #include "ocnotify.h"
21 #include "utils/enumerate.h"
22 #if HAVE_IV
23 #include "ivoc.h"
24 #include "glinerec.h"
25 #include "ocjump.h"
26 #endif
27 #include "vrecitem.h"
28 #include "oclist.h"
29 #define PROFILE 0
30 #include "htlist.h"
31 #include "ivocvect.h"
32 #include "netcon.h"
33 #include "netcvode.h"
34 #include "nrn_ansi.h"
36 #include "nrniv_mf.h"
37 #include "nrnste.h"
38 #include "profile.h"
39 #include "utils/profile/profiler_interface.h"
40 #include "utils/formatting.hpp"
41 
42 #include <array>
43 #include <cerrno>
44 #include <cstdlib>
45 #include <ctime>
46 #include <regex>
47 #include <unordered_set>
48 #include <utility>
49 
50 typedef void (*ReceiveFunc)(Point_process*, double*, double);
51 
52 #define lvardtloop(i, j) \
53  for (i = 0; i < nrn_nthread; ++i) \
54  for (j = 0; j < p[i].nlcv_; ++j)
55 
56 #define NVI_SUCCESS 0
57 #define PP2NT(pp) ((NrnThread*) ((pp)->_vnt))
58 #define PP2t(pp) (PP2NT(pp)->_t)
59 // classical and when DiscreteEvent::deliver is already in the right thread
60 // via a future thread instance of NrnNetItem with its own tqe.
61 #define POINT_RECEIVE(type, tar, w, f) (*pnt_receive[type])(tar, w, f)
62 // when global tqe is managed by master thread and the correct thread
63 // needs to be fired to execute the NET_RECEIVE block.
64 //#define POINT_RECEIVE(type, tar, w, f) ns->point_receive(type, tar, w, f)
65 
66 #include "membfunc.h"
67 extern void single_event_run();
70 extern double t, dt;
71 extern void nrn_cvfun(double t, double* y, double* ydot);
72 extern void nrn_cleanup_presyn(PreSyn*);
73 #define nt_dt nrn_threads->_dt
74 #define nt_t nrn_threads->_t
75 extern short* nrn_is_artificial_; // should be bool but not using that type in c
76 extern short* nrn_artcell_qindex_;
78 void nrn_pending_selfqueue(double tt, NrnThread*);
79 static void all_pending_selfqueue(double tt);
80 static void* pending_selfqueue(NrnThread*);
81 extern int nrn_use_daspk_;
83 extern int nrn_modeltype();
85 extern std::vector<PlayRecord*>* net_cvode_instance_prl();
86 extern void nrn_use_busywait(int);
88 extern void (*nrnthread_v_transfer_)(NrnThread*);
89 Object* (*nrnpy_seg_from_sec_x)(Section*, double);
90 extern "C" void nrnthread_get_trajectory_requests(int tid,
91  int& bsize,
92  int& n_pr,
93  void**& vpr,
94  int& n_trajec,
95  int*& types,
96  int*& indices,
97  double**& pvars,
98  double**& varrays);
99 extern "C" void nrnthread_trajectory_values(int tid, int n_pr, void** vpr, double t);
100 extern "C" void nrnthread_trajectory_return(int tid,
101  int n_pr,
102  int bsize,
103  int vecsz,
104  void** vpr,
105  double t);
107 #if NRN_MUSIC
108 #include "nrnmusicapi.h"
109 #endif
110 
111 extern int nrn_fornetcon_cnt_;
112 extern int* nrn_fornetcon_index_;
113 extern int* nrn_fornetcon_type_;
114 
115 // for use in mod files
117  return nc->delay_;
118 }
119 void nrn_netcon_set_delay(NetCon* nc, double d) {
120  nc->delay_ = d;
121 }
122 int nrn_netcon_weight(NetCon* nc, double** pw) {
123  *pw = nc->weight_;
124  return nc->cnt_;
125 }
126 double nrn_event_queue_stats(double* stats) {
127 #if COLLECT_TQueue_STATISTICS
129  return (stats[0] - stats[2]);
130 #else
131  return -1.;
132 #endif
133 }
135  if (nc->src_) {
136  return nc->src_->threshold_;
137  } else {
138  return 0;
139  }
140 }
141 void nrn_netcon_set_thresh(NetCon* nc, double th) {
142  if (nc->src_) {
143  nc->src_->threshold_ = th;
144  }
145 }
146 
147 void nrn_netcon_event(NetCon* nc, double td) {
148  nc->chktar();
149  net_cvode_instance->event(td, nc, PP2NT(nc->target_));
150 }
151 
153  nc->chktar();
154  return nc->target_;
155 }
156 
157 int nrn_netcon_info(NetCon* nc, double** pw, Point_process** target, double** th, double** del) {
158  *target = (nc->target_) ? nc->target_ : (Point_process*) 0;
159  *th = (nc->src_) ? &(nc->src_->threshold_) : (double*) 0;
160  *del = &nc->delay_;
161  *pw = nc->weight_;
162  return nc->cnt_;
163 }
164 
166  return ps->dil_.size();
167 }
168 void* nrn_presyn_netcon(PreSyn* ps, int i) {
169  return ps->dil_[i];
170 }
171 
172 void ncs2nrn_integrate(double tstop);
173 extern void (*nrn_allthread_handle)();
176 }
177 
178 #if USENCS
179 // As a subroutine for NCS, NEURON simulates the cells of a subnet
180 // (usually a single cell) and NCS manages whatever intercellular
181 // NetCon connections were specified by the hoc cvode method
182 // Cvode.ncs_netcons(List nc_inlist, List nc_outlist) . The former list
183 // is normally in one to one correspondence with all the synapses. These
184 // NetCon objects have those synapses as targets and nullptr sources.
185 // The latter list is normally in one to one correspondence with
186 // the cells of the subnet. Those NetCon objects have sources of the form
187 // cell.axon.v(1) and nullptr targets.
188 // Note that the program that creates the hoc file knows
189 // how to tell NCS that a particular integer corresponds to a particular
190 // NetCon
191 
192 // NCS tells NEURON what time to integrate to. (Using any integration method)
193 // Probably the step size will be the minimum delay between output spike
194 // and its effect on any synapse.
195 // void ncs2nrn_integrate(double tstop);
196 
197 // NCS gives synaptic events to NEURON but tdeliver must be >= t
198 void ncs2nrn_inputevent(int netcon_input_index, double tdeliver);
199 
200 // and NEURON notifies NCS if cell ouputs a spike during integrate
201 extern void nrn2ncs_outputevent(int netcon_output_index, double firetime);
202 
203 // netcon_input_index specifies the NetCon object
204 // in the following list. The hoc file sets this up via cvode.ncs_netcons
205 static NetConPList* ncs2nrn_input_;
206 
207 // the netcon_ouput_index is specified in a NetCon field called
208 // nrn2ncs_output_index_
209 
210 // helper functions
211 void nrn2ncs_netcons();
212 #endif
213 #if NRNMPI
214 extern void nrn2ncs_outputevent(int netcon_output_index, double firetime);
215 #endif
216 
217 #if NRNMPI
218 extern void nrn_multisend_send(PreSyn*, double t);
219 extern bool use_multisend_;
220 extern void nrn_multisend_advance();
221 #endif
222 
224 
226 
227 #if NRNMPI
228 // for compressed info during spike exchange
229 extern bool nrn_use_localgid_;
230 extern void nrn_outputevent(unsigned char, double);
231 #endif
232 
234  double** argslist;
235  int size;
236 };
237 
238 static unsigned long deliver_cnt_, net_event_cnt_;
241 unsigned long NetCon::netcon_send_active_;
242 unsigned long NetCon::netcon_send_inactive_;
243 unsigned long NetCon::netcon_deliver_;
244 unsigned long SelfEvent::selfevent_send_;
245 unsigned long SelfEvent::selfevent_move_;
246 unsigned long SelfEvent::selfevent_deliver_;
247 unsigned long WatchCondition::watch_send_;
248 unsigned long WatchCondition::watch_deliver_;
249 unsigned long ConditionEvent::init_above_;
250 unsigned long ConditionEvent::send_qthresh_;
252 unsigned long ConditionEvent::abandon_;
253 unsigned long ConditionEvent::eq_abandon_;
256 unsigned long ConditionEvent::abandon_above_;
257 unsigned long ConditionEvent::abandon_below_;
258 unsigned long PreSyn::presyn_send_mindelay_;
259 unsigned long PreSyn::presyn_send_direct_;
260 unsigned long PreSyn::presyn_deliver_netcon_;
261 unsigned long PreSyn::presyn_deliver_direct_;
262 unsigned long PreSyn::presyn_deliver_ncsend_;
265 unsigned long HocEvent::hocevent_send_;
266 unsigned long HocEvent::hocevent_deliver_;
267 unsigned long KSSingle::singleevent_deliver_;
268 unsigned long KSSingle::singleevent_move_;
269 
271  return net_cvode_instance->event_queue(nt);
272 }
273 
274 std::vector<PreSyn*>* net_cvode_instance_psl() {
275  return net_cvode_instance->psl_;
276 }
277 
278 std::vector<PlayRecord*>* net_cvode_instance_prl() {
280 }
281 
282 void nrn_use_daspk(int b) {
283  if (net_cvode_instance) {
284  net_cvode_instance->use_daspk(b == 1);
285  }
286 }
287 
288 double NetCvode::eps_;
289 
290 static Node* node(Object*);
291 Node* node(Object* ob) {
292  return ob2pntproc(ob)->node;
293 }
294 
297 
299  PlayRecordEvent* pre = new PlayRecordEvent();
300  pre->plr_ = plr_;
301  return pre;
302 }
304  nc->event(tt, plr_->event(), nrn_threads + plr_->ith_);
305 }
307  fprintf(f, "%d\n", PlayRecordEventType);
308  fprintf(f, "%d %d\n", plr_->type(), net_cvode_instance->playrec_item(plr_));
309 }
310 
312  char buf[100];
313  int type, plr_index;
314  nrn_assert(fgets(buf, 100, f));
315  sscanf(buf, "%d %d\n", &type, &plr_index);
316  PlayRecord* plr = net_cvode_instance->playrec_item(plr_index);
317  assert(plr && plr->type() == type);
318  return plr->event()->savestate_save();
319 }
320 
322  return new PlayRecordSave(this);
323 }
324 
326  PlayRecordSave* prs = nullptr;
327  int type, index;
328  char buf[100];
329  nrn_assert(fgets(buf, 100, f));
330  nrn_assert(sscanf(buf, "%d %d\n", &type, &index) == 2);
332  assert(plr->type() == type);
333  switch (type) {
335  prs = new VecRecordDiscreteSave(plr);
336  break;
337  case VecRecordDtType:
338  prs = new VecRecordDtSave(plr);
339  break;
340  case VecPlayStepType:
341  prs = new VecPlayStepSave(plr);
342  break;
344  prs = new VecPlayContinuousSave(plr);
345  break;
346  default:
347  // whenever there is no subclass specific data to save
348  prs = new PlayRecordSave(plr);
349  break;
350  }
351  prs->savestate_read(f);
352  return prs;
353 }
354 
356  pr_ = plr;
358  assert(prl_index_ >= 0);
359 }
363 }
364 
366  if (!src_) {
367  hoc_execerror(hoc_object_name(obj_), "source is missing");
368  }
369 }
371  if (!target_) {
372  hoc_execerror(hoc_object_name(obj_), "target is missing");
373  }
374 }
375 
377  Object* ob = ((ObjObservable*) o)->object();
378  // printf("%s disconnect from ", hoc_object_name(obj_));
379  if (target_->ob == ob) {
380  // printf("target %s\n", hoc_object_name(target_->ob));
381  target_ = nullptr;
382  active_ = 0;
383  }
384 }
385 
386 #if 0 // way of printing dinfo
387 printf("NetCon from %s to ",
388 d->src_->osrc_ ? hoc_object_name(d->src_->osrc_) : secname(d->src_->ssrc_));
389 printf("%s ", hoc_object_name(d->target_->ob));
390 printf(" weight index=%d\n", l);
391 #endif
392 
394  public:
396  double max_;
397  double amax_;
398 };
399 
402  double t_;
403 };
404 
405 typedef std::vector<WatchCondition*> WatchList;
407 typedef std::vector<TQItem*> TQList;
408 
409 // allows marshalling of all items in the event queue that need to be
410 // removed to avoid duplicates due to frecord_init after finitialize
412 
414 
415 static PreSyn* unused_presyn; // holds the NetCons with no source
416 
417 static double nc_preloc(void* v) { // user must pop section stack after call
418  NetCon* d = (NetCon*) v;
419  Section* s = nullptr;
420  if (d->src_) {
421  s = d->src_->ssrc_;
422  }
423  if (s) {
424  nrn_pushsec(s);
425  // This is a special handle, not just a pointer.
426  auto const& v = d->src_->thvar_;
427  nrn_parent_info(s); // make sure parentnode exists
428  // there is no efficient search for the location of
429  // an arbitrary variable. Search only for v at 0 - 1.
430  // Otherwise return .5 .
431  if (v == s->parentnode->v_handle()) {
432  return nrn_arc_position(s, s->parentnode);
433  }
434  for (int i = 0; i < s->nnode; ++i) {
435  if (v == s->pnode[i]->v_handle()) {
436  return nrn_arc_position(s, s->pnode[i]);
437  }
438  }
439  return -2.; // not voltage
440  } else {
441  return -1.;
442  }
443 }
444 
445 static Object** nc_preseg(void* v) { // user must pop section stack after call
446  NetCon* d = (NetCon*) v;
447  Section* s = NULL;
448  Object* obj = NULL;
449  double x = -1.;
450  if (d->src_) {
451  s = d->src_->ssrc_;
452  }
453  if (s && nrnpy_seg_from_sec_x) {
454  auto const& v = d->src_->thvar_;
455  nrn_parent_info(s); // make sure parentnode exists
456  // there is no efficient search for the location of
457  // an arbitrary variable. Search only for v at 0 - 1.
458  // Otherwise return NULL.
459  if (v == s->parentnode->v_handle()) {
460  x = nrn_arc_position(s, s->parentnode);
461  }
462  for (int i = 0; i < s->nnode; ++i) {
463  if (v == s->pnode[i]->v_handle()) {
464  x = nrn_arc_position(s, s->pnode[i]);
465  continue;
466  }
467  }
468  // perhaps should search for v
469  if (x >= 0) {
470  obj = (*nrnpy_seg_from_sec_x)(s, x);
471  --obj->refcount;
472  }
473  }
474  return hoc_temp_objptr(obj);
475 }
476 
477 static double nc_postloc(void* v) { // user must pop section stack after call
478  NetCon* d = (NetCon*) v;
479  if (d->target_ && d->target_->sec) {
480  nrn_pushsec(d->target_->sec);
481  return nrn_arc_position(d->target_->sec, d->target_->node);
482  } else {
483  return -1.;
484  }
485 }
486 
487 static Object** nc_postseg(void* v) { // user must pop section stack after call
488  NetCon* d = (NetCon*) v;
489  Object* obj = NULL;
490  if (d->target_ && d->target_->sec && nrnpy_seg_from_sec_x) {
491  double x = nrn_arc_position(d->target_->sec, d->target_->node);
492  obj = (*nrnpy_seg_from_sec_x)(d->target_->sec, x);
493  --obj->refcount;
494  }
495  return hoc_temp_objptr(obj);
496 }
497 
498 static Object** nc_syn(void* v) {
499  NetCon* d = (NetCon*) v;
500  Object* ob = nullptr;
501  if (d->target_) {
502  ob = d->target_->ob;
503  }
504  return hoc_temp_objptr(ob);
505 }
506 
507 static Object** nc_pre(void* v) {
508  NetCon* d = (NetCon*) v;
509  Object* ob = nullptr;
510  if (d->src_) {
511  ob = d->src_->osrc_;
512  }
513  return hoc_temp_objptr(ob);
514 }
515 
516 static Object** newoclist(int i, OcList*& o) {
517  Object** po;
518  if (ifarg(i) && hoc_is_object_arg(i)) {
519  po = hoc_objgetarg(i);
520  check_obj_type(*po, "List");
521  o = (OcList*) ((*po)->u.this_pointer);
522  } else {
523  o = new OcList();
524  o->ref();
525  Symbol* sl = hoc_lookup("List");
526  po = hoc_temp_objvar(sl, o);
527  }
528  return po;
529 }
530 
531 static Object** nc_prelist(void* v) {
532  NetCon* d = (NetCon*) v;
533  OcList* o;
534  Object** po = newoclist(1, o);
535  if (d->src_) {
536  for (const auto& nc: d->src_->dil_) {
537  if (nc->obj_) {
538  o->append(nc->obj_);
539  }
540  }
541  }
542  return po;
543 }
544 
545 static Object** nc_synlist(void* v) {
546  NetCon* d = (NetCon*) v;
547  OcList* o;
548  Object** po = newoclist(1, o);
550  for (const PreSyn* ps: *net_cvode_instance->psl_) {
551  for (const auto& nc: ps->dil_) {
552  if (nc->obj_ && nc->target_ == d->target_) {
553  o->append(nc->obj_);
554  }
555  }
556  }
557  return po;
558 }
559 
560 static Object** nc_postcelllist(void* v) {
561  NetCon* d = (NetCon*) v;
562  OcList* o;
563  Object** po = newoclist(1, o);
564  Object* cell = nullptr;
565  if (d->target_ && d->target_->sec) {
566  cell = nrn_sec2cell(d->target_->sec);
567  }
568  if (cell && net_cvode_instance->psl_)
569  for (const PreSyn* ps: *net_cvode_instance->psl_) {
570  for (const auto& nc: ps->dil_) {
571  if (nc->obj_ && nc->target_ && nrn_sec2cell_equals(nc->target_->sec, cell)) {
572  o->append(nc->obj_);
573  }
574  }
575  }
576  return po;
577 }
578 
579 static Object** nc_precelllist(void* v) {
580  NetCon* d = (NetCon*) v;
581  OcList* o;
582  Object** po = newoclist(1, o);
583  Object* cell = nullptr;
584  if (d->src_ && d->src_->ssrc_) {
585  cell = nrn_sec2cell(d->src_->ssrc_);
586  }
587  if (cell && net_cvode_instance->psl_)
588  for (PreSyn* ps: *net_cvode_instance->psl_) {
589  for (const auto& nc: ps->dil_) {
590  if (nc->obj_ && nc->src_ && ps->ssrc_ && nrn_sec2cell_equals(ps->ssrc_, cell)) {
591  o->append(nc->obj_);
592  }
593  }
594  }
595  return po;
596 }
597 
598 static Object** nc_precell(void* v) {
599  NetCon* d = (NetCon*) v;
600  if (d->src_ && d->src_->ssrc_) {
601  return hoc_temp_objptr(nrn_sec2cell(d->src_->ssrc_));
602  } else {
603  return hoc_temp_objptr(0);
604  }
605 }
606 
607 static Object** nc_postcell(void* v) {
608  NetCon* d = (NetCon*) v;
609  Object* ob = nullptr;
610  if (d->target_ && d->target_->sec) {
611  ob = nrn_sec2cell(d->target_->sec);
612  }
613  return hoc_temp_objptr(ob);
614 }
615 
616 static double nc_setpost(void* v) {
617  NetCon* d = (NetCon*) v;
618  Object* otar = nullptr;
619  if (ifarg(1)) {
620  otar = *hoc_objgetarg(1);
621  }
622  if (otar && !is_point_process(otar)) {
623  hoc_execerror("argument must be a point process or NULLobject", 0);
624  }
625  Point_process* tar = nullptr;
626  if (otar) {
627  tar = ob2pntproc(otar);
628  }
629  if (d->target_ && d->target_ != tar) {
630 #if DISCRETE_EVENT_OBSERVER
632 #endif
633  d->target_ = nullptr;
634  }
635  int cnt = 1;
636  if (tar) {
637  cnt = pnt_receive_size[tar->prop->_type];
638  d->target_ = tar;
639 #if DISCRETE_EVENT_OBSERVER
640  ObjObservable::Attach(otar, d);
641 #endif
642  } else {
643  d->active_ = false;
644  }
645  if (d->cnt_ != cnt) {
646  d->cnt_ = cnt;
647  delete[] std::exchange(d->weight_, new double[d->cnt_]);
648  }
649  return 0.;
650 }
651 
652 static double nc_valid(void* v) {
653  NetCon* d = (NetCon*) v;
655  if (d->src_ && d->target_) {
656  return 1.;
657  }
658  return 0.;
659 }
660 
661 static double nc_active(void* v) {
662  NetCon* d = (NetCon*) v;
663  bool a = d->active_;
664  if (d->target_ && ifarg(1)) {
665  d->active_ = bool(chkarg(1, 0, 1));
666  }
668  return double(a);
669 }
670 
671 // for threads, revised net_send to use absolute time (in the
672 // mod file we add the thread time when we call it).
673 // And we can no longer check with respect to minimum time in chkarg
674 static double nc_event(void* v) {
675  NetCon* d = (NetCon*) v;
676  double td = chkarg(1, -1e20, 1e20);
677  if (d->active_ == 0) {
678  return 0.0;
679  }
680  d->chktar();
681  NrnThread* nt = PP2NT(d->target_);
682  const auto nrn_thread_not_initialized_for_nc_target = nt && nt >= nrn_threads &&
683  nt < (nrn_threads + nrn_nthread);
684  nrn_assert(nrn_thread_not_initialized_for_nc_target);
685  if (ifarg(2)) {
686  double flag = *getarg(2);
687  Point_process* pnt = d->target_;
688  int type = pnt->prop->_type;
689  if (!nrn_is_artificial_[type]) {
690  hoc_execerror("Can only send fake self-events to ARTIFICIAL_CELLs", 0);
691  }
692  auto* pq = pnt->prop->dparam + nrn_artcell_qindex_[type];
693  nrn_net_send(pq, d->weight_, pnt, td, flag);
694  } else {
695  net_cvode_instance->event(td, d, PP2NT(d->target_));
696  }
697  return (double) d->active_;
698 }
699 static double nc_record(void* v) {
700  NetCon* d = (NetCon*) v;
701  d->chksrc();
702  if (ifarg(1)) {
703  if (ifarg(2)) {
704  int recid = d->obj_->index;
705  if (ifarg(3)) {
706  recid = (int) (*getarg(3));
707  }
708  d->src_->record(vector_arg(1), vector_arg(2), recid);
709  } else if (hoc_is_str_arg(1)) {
710  d->src_->record_stmt(gargstr(1));
711  } else if (is_vector_arg(1)) {
712  d->src_->record(vector_arg(1));
713  } else {
714  d->src_->record_stmt(*hoc_objgetarg(1));
715  }
716  } else {
717  d->src_->record(nullptr);
718  }
719  return 0;
720 }
721 
722 static double nc_srcgid(void* v) {
723  NetCon* d = (NetCon*) v;
725  if (d->src_) {
726  return (double) d->src_->gid_;
727  }
728  return -1.;
729 }
730 
731 static Object** nc_get_recordvec(void* v) {
732  NetCon* d = (NetCon*) v;
733  Object* ob = nullptr;
734  if (d->src_ && d->src_->tvec_) {
735  ob = d->src_->tvec_->obj_;
736  }
737  return hoc_temp_objptr(ob);
738 }
739 
740 static double nc_wcnt(void* v) {
741  NetCon* d = (NetCon*) v;
742  return d->cnt_;
743 }
744 
745 static Member_func members[] = {{"active", nc_active},
746  {"valid", nc_valid},
747  {"preloc", nc_preloc},
748  {"postloc", nc_postloc},
749  {"setpost", nc_setpost},
750  {"event", nc_event},
751  {"record", nc_record},
752  {"srcgid", nc_srcgid},
753  {"wcnt", nc_wcnt},
754  {"delay", 0}, // these four changed below
755  {"weight", 0},
756  {"threshold", 0},
757  {"x", 0},
758  {nullptr, nullptr}};
759 
760 static Member_ret_obj_func omembers[] = {{"syn", nc_syn},
761  {"pre", nc_pre},
762  {"precell", nc_precell},
763  {"postcell", nc_postcell},
764  {"preseg", nc_preseg},
765  {"postseg", nc_postseg},
766  {"prelist", nc_prelist},
767  {"synlist", nc_synlist},
768  {"precelllist", nc_precelllist},
769  {"postcelllist", nc_postcelllist},
770  {"get_recordvec", nc_get_recordvec},
771  {nullptr, nullptr}};
772 
773 static void steer_val(void* v) {
774  NetCon* d = (NetCon*) v;
775  Symbol* s = hoc_spop();
776  if (strcmp(s->name, "delay") == 0) {
777  d->chksrc();
778  hoc_pushpx(&d->delay_);
779  d->src_->use_min_delay_ = 0;
780  } else if (strcmp(s->name, "weight") == 0) {
781  int index = 0;
782  if (hoc_stack_type_is_ndim()) {
783  s->arayinfo->sub[0] = d->cnt_;
784  index = hoc_araypt(s, SYMBOL);
785  }
786  hoc_pushpx(d->weight_ + index);
787  } else if (strcmp(s->name, "x") == 0) {
788  static double dummy = 0.;
789  d->chksrc();
790  if (d->src_->thvar_) {
791  hoc_push(d->src_->thvar_);
792  } else {
793  dummy = 0.;
794  hoc_pushpx(&dummy);
795  }
796  } else if (strcmp(s->name, "threshold") == 0) {
797  d->chksrc();
798  hoc_pushpx(&d->src_->threshold_);
799  }
800 }
801 
802 static void* cons(Object* o) {
803  NetCon* d;
804  if (!net_cvode_instance) {
805  hoc_execerror("CVode instance must exist", 0);
806  }
807  // source, target, threshold, delay, magnitude
808  Object *osrc = nullptr, *otar;
809  Section* srcsec = nullptr;
811  if (hoc_is_object_arg(1)) {
812  osrc = *hoc_objgetarg(1);
813  if (osrc && !is_point_process(osrc)) {
814  hoc_execerror("if arg 1 is an object it must be a point process or NULLObject", 0);
815  }
816  } else {
817  psrc = hoc_hgetarg<double>(1);
818  srcsec = chk_access();
819  }
820  otar = *hoc_objgetarg(2);
821  if (otar && !is_point_process(otar)) {
822  hoc_execerror("arg 2 must be a point process or NULLobject", 0);
823  }
824  double thresh = -1.e9; // sentinal value. default is 10 if new PreSyn
825  double delay = 1.;
826  double weight = 0.;
827 
828  if (ifarg(3)) {
829  thresh = *getarg(3);
830  delay = chkarg(4, 0, 1e15);
831  weight = *getarg(5);
832  }
833  d = net_cvode_instance->install_deliver(psrc, srcsec, osrc, otar, thresh, delay, weight);
834  d->obj_ = o;
835  return (void*) d;
836 }
837 
838 static void destruct(void* v) {
839  delete static_cast<NetCon*>(v);
840 }
841 
842 void NetCon_reg() {
843  class2oc("NetCon", cons, destruct, members, omembers, nullptr);
844  Symbol* nc = hoc_lookup("NetCon");
845  nc->u.ctemplate->steer = steer_val;
846  Symbol* s;
847  s = hoc_table_lookup("delay", nc->u.ctemplate->symtable);
848  s->type = VAR;
849  s->arayinfo = nullptr;
850  s = hoc_table_lookup("x", nc->u.ctemplate->symtable);
851  s->type = VAR;
852  s->arayinfo = nullptr;
853  s = hoc_table_lookup("threshold", nc->u.ctemplate->symtable);
854  s->type = VAR;
855  s->arayinfo = nullptr;
856  s = hoc_table_lookup("weight", nc->u.ctemplate->symtable);
857  s->type = VAR;
858  s->arayinfo = new Arrayinfo;
859  s->arayinfo->refcount = 1;
860  s->arayinfo->a_varn = nullptr;
861  s->arayinfo->nsub = 1;
862  s->arayinfo->sub[0] = 1;
863 }
864 
865 static char* escape_bracket(const char* s) {
866  static char* b;
867  const char* p1;
868  char* p2;
869  if (!b) {
870  b = new char[256];
871  }
872  for (p1 = s, p2 = b; *p1; ++p1, ++p2) {
873  switch (*p1) {
874  case '<':
875  *p2 = '[';
876  break;
877  case '>':
878  *p2 = ']';
879  break;
880  case '[':
881  case ']':
882  *p2 = '\\';
883  *(++p2) = *p1;
884  break;
885  default:
886  *p2 = *p1;
887  break;
888  }
889  }
890  *p2 = '\0';
891  return b;
892 }
893 
894 static std::regex get_regex(int id) {
895  std::string s(gargstr(id));
896  if (s.empty()) {
897  return std::regex(".*");
898  } else {
899  try {
900  return std::regex(escape_bracket(s.data()));
901  } catch (std::regex_error&) {
902  hoc_execerror_fmt("Argument {} is not a valid regular expression '{}'.", id, s);
903  }
904  }
905 }
906 
908  // interface to cvode.netconlist(precell, postcell, target, [list])
909  OcList* o;
910 
911  Object** po = newoclist(4, o);
912 
913  Object *opre = nullptr, *opost = nullptr, *otar = nullptr;
914  std::regex spre, spost, star;
915 
916  if (hoc_is_object_arg(1)) {
917  opre = *hoc_objgetarg(1);
918  } else {
919  spre = get_regex(1);
920  }
921  if (hoc_is_object_arg(2)) {
922  opost = *hoc_objgetarg(2);
923  } else {
924  spost = get_regex(2);
925  }
926  if (hoc_is_object_arg(3)) {
927  otar = *hoc_objgetarg(3);
928  } else {
929  star = get_regex(3);
930  }
931 
932  if (psl_) {
933  for (PreSyn* ps: *psl_) {
934  bool b = false;
935  if (ps->ssrc_) {
936  Object* precell = nrn_sec2cell(ps->ssrc_);
937  if (opre) {
938  b = precell == opre;
939  } else {
940  std::string s(hoc_object_name(precell));
941  b = std::regex_search(s, spre);
942  }
943  } else if (ps->osrc_) {
944  Object* presyn = ps->osrc_;
945  if (opre) {
946  b = presyn == opre;
947  } else {
948  std::string s(hoc_object_name(presyn));
949  b = std::regex_search(s, spre);
950  }
951  }
952  if (b) {
953  for (const auto& d: ps->dil_) {
954  Object* postcell = nullptr;
955  Object* target = nullptr;
956  if (d->target_) {
957  Point_process* p = d->target_;
958  target = p->ob;
959  if (p->sec) {
960  postcell = nrn_sec2cell(p->sec);
961  }
962  }
963  if (opost) {
964  b = postcell == opost;
965  } else {
966  std::string s(hoc_object_name(postcell));
967  b = std::regex_search(s, spost);
968  }
969  if (b) {
970  if (otar) {
971  b = target == otar;
972  } else {
973  std::string s(hoc_object_name(target));
974  b = std::regex_search(s, star);
975  }
976  if (b) {
977  o->append(d->obj_);
978  }
979  }
980  }
981  }
982  }
983  }
984  return po;
985 }
986 
987 #define ITE_SIZE 10
989  tpool_ = new TQItemPool(1000, 1);
990  // tqe_ accessed only by thread i so no locking
991  tqe_ = new TQueue(tpool_, 0);
992  sepool_ = new SelfEventPool(1000, 1);
993  selfqueue_ = nullptr;
994  psl_thr_ = nullptr;
995  tq_ = nullptr;
996  lcv_ = nullptr;
998  ite_cnt_ = 0;
1000  immediate_deliver_ = -1e100;
1002  nlcv_ = 0;
1003  MUTCONSTRUCT(1)
1004 }
1005 
1007  delete[] std::exchange(inter_thread_events_, nullptr);
1008  if (psl_thr_) {
1010  }
1011  delete std::exchange(tq_, nullptr);
1012  delete std::exchange(tqe_, nullptr);
1013  delete std::exchange(tpool_, nullptr);
1014  if (selfqueue_) {
1016  delete std::exchange(selfqueue_, nullptr);
1017  }
1018  delete std::exchange(sepool_, nullptr);
1019  if (lcv_) {
1020  for (int i = 0; i < nlcv_; ++i) {
1022  }
1023  delete[] std::exchange(lcv_, nullptr);
1024  }
1025  MUTDESTRUCT
1026 }
1027 
1029  // bin_event(td, db, nt);
1030  MUTLOCK
1031 #if PRINT_EVENT
1033  Printf("interthread send td=%.15g DE type=%d thread=%d target=%d %s\n",
1034  td,
1035  db->type(),
1036  nt->id,
1037  (db->type() == 2) ? PP2NT(((NetCon*) db)->target_)->id : -1,
1038  (db->type() == 2) ? hoc_object_name(((NetCon*) (db))->target_->ob) : "?");
1039  }
1040 #endif
1041  if (ite_cnt_ >= ite_size_) {
1042  ite_size_ *= 2;
1044  for (int i = 0; i < ite_cnt_; ++i) {
1045  in[i].de_ = inter_thread_events_[i].de_;
1046  in[i].t_ = inter_thread_events_[i].t_;
1047  }
1048  delete[] std::exchange(inter_thread_events_, in);
1049  }
1051  ite.de_ = db;
1052  ite.t_ = td;
1053  // race since each NetCvodeThreadData has its own lock and enqueueing_
1054  // is a NetCvode instance variable. enqueuing_ is not logically
1055  // needed but can avoid a nrn_multithread_job call in allthread_least_t
1056  // which does nothing if there are no interthread events.
1057  // int& b = net_cvode_instance->enqueueing_;
1058  // if (!b) { b = 1; }
1059  MUTUNLOCK
1060  // have decided to lock net_cvode_instance and set it
1062 }
1063 
1065  int i;
1066  MUTLOCK
1067  for (i = 0; i < ite_cnt_; ++i) {
1069 #if PRINT_EVENT
1071  Printf("interthread enqueue td=%.15g DE type=%d thread=%d target=%d %s\n",
1072  ite.t_,
1073  ite.de_->type(),
1074  nt->id,
1075  (ite.de_->type() == 2) ? PP2NT(((NetCon*) (ite.de_))->target_)->id : -1,
1076  (ite.de_->type() == 2) ? hoc_object_name(((NetCon*) (ite.de_))->target_->ob)
1077  : "?");
1078  }
1079 #endif
1080  nc->bin_event(ite.t_, ite.de_, nt);
1081  }
1082  ite_cnt_ = 0;
1083  MUTUNLOCK
1084 }
1085 
1086 NetCvode::NetCvode(bool single) {
1087  use_long_double_ = 0;
1088  empty_ = true; // no equations (only artificial cells).
1089  MUTCONSTRUCT(0);
1090  maxorder_ = 5;
1091  maxstep_ = 1e9;
1092  minstep_ = 0.;
1093  rtol_ = 0.;
1094  atol_ = 1e-3;
1095  jacobian_ = 0;
1096  stiff_ = 2;
1097  mst_ = nullptr;
1098  condition_order_ = 1;
1099  null_event_ = new DiscreteEvent();
1100  eps_ = 100. * UNIT_ROUNDOFF;
1101  print_event_ = 0;
1102  nrn_use_fifo_queue_ = false;
1103  single_ = single;
1104  nrn_use_daspk_ = false;
1105  gcv_ = nullptr;
1107  pcnt_ = 0;
1108  p = nullptr;
1109  p_construct(1);
1110  // eventually these should not have to be thread safe
1111  pst_ = nullptr;
1112  pst_cnt_ = 0;
1113  psl_ = nullptr;
1114  // for parallel network simulations hardly any presyns have
1115  // a threshold and it can be very inefficient to check the entire
1116  // presyn list for thresholds during the fixed step method.
1117  // So keep a threshold list.
1118  unused_presyn = nullptr;
1119  structure_change_cnt_ = -1;
1120  fornetcon_change_cnt_ = -2;
1121  matrix_change_cnt_ = -1;
1122  playrec_change_cnt_ = 0;
1123  alloc_list();
1124  prl_ = new std::vector<PlayRecord*>();
1125  prl_->reserve(10);
1126  fixed_play_ = new std::vector<PlayRecord*>();
1127  fixed_play_->reserve(10);
1128  fixed_record_ = new std::vector<PlayRecord*>();
1129  fixed_record_->reserve(10);
1130  vec_event_store_ = nullptr;
1131  if (!record_init_items_) {
1132  record_init_items_ = new TQList();
1133  }
1134  // re_init(t);
1135 }
1136 
1138  MUTDESTRUCT
1139  if (net_cvode_instance == (NetCvode*) this) {
1140  net_cvode_instance = nullptr;
1141  }
1142  delete_list();
1143  p_construct(0);
1144  // and should also iterate and delete the MaxStateItem
1145  delete std::exchange(mst_, nullptr);
1146  if (psl_) {
1147  for (PreSyn* ps: *psl_) {
1148  std::for_each(ps->dil_.rbegin(), ps->dil_.rend(), [](NetCon*& d) {
1149  d->src_ = nullptr;
1150  delete std::exchange(d, nullptr);
1151  });
1152  delete ps;
1153  }
1154  delete std::exchange(psl_, nullptr);
1155  }
1156  delete std::exchange(pst_, nullptr);
1157  delete std::exchange(fixed_play_, nullptr);
1158  delete std::exchange(fixed_record_, nullptr);
1159  for (auto& item: *prl_) {
1160  delete item;
1161  }
1162  delete std::exchange(prl_, nullptr);
1163  unused_presyn = nullptr;
1164  wl_list_.clear();
1165  delete std::exchange(allthread_hocevents_, nullptr);
1166 }
1167 
1169  return !single_;
1170 }
1171 
1173  return (cvode_active_ && localstep());
1174 }
1175 
1176 void NetCvode::localstep(bool b) {
1177  // due to possibility of gap junctions and until the complete matrix
1178  // is analysed for block structure localstep and daspk are incompatible
1179  b = (nrn_modeltype() == 1 ? b : false); // localstep doesn't work yet with DAE's
1180 
1181  if (!b != single_) {
1182  delete_list();
1183  single_ = !b;
1185  use_sparse13 = 0;
1186  nrn_use_daspk_ = false;
1187  re_init(nt_t);
1188  }
1189 }
1190 
1192  return (gcv_ != 0) ? gcv_->use_daspk_ : false;
1193 }
1194 
1195 void NetCvode::use_daspk(bool b) {
1196  b = (nrn_modeltype() == 2 ? true : b); // not optional if algebraic
1197  if (gcv_ && b != gcv_->use_daspk_) {
1198  delete_list();
1199  single_ = (b ? true : single_);
1201  nrn_use_daspk_ = b;
1202  // printf("NetCvode::use_daspk nrn_use_daspk=%d\n", nrn_use_daspk_);
1203  if (use_sparse13 != nrn_use_daspk_) {
1205  diam_changed = 1;
1206  }
1207  re_init(nt_t);
1208  }
1209 }
1210 
1211 // Append new BAMechList item to arg
1212 BAMechList::BAMechList(BAMechList** first) { // preserve the list order
1213  next = nullptr;
1214  BAMechList* last;
1215  if (*first) {
1216  for (last = *first; last->next; last = last->next) {
1217  }
1218  last->next = this;
1219  } else {
1220  *first = this;
1221  }
1222 }
1223 
1225  BAMechList *b, *bn;
1226  for (b = *first; b; b = bn) {
1227  bn = b->next;
1228  delete b;
1229  }
1230  *first = nullptr;
1231 }
1232 
1234  cv_memb_list_ = nullptr;
1235  cmlcap_ = nullptr;
1236  cmlext_ = nullptr;
1237  no_cap_memb_ = nullptr;
1238  before_breakpoint_ = nullptr;
1239  after_solve_ = nullptr;
1240  before_step_ = nullptr;
1241  psl_th_ = nullptr;
1242  watch_list_ = nullptr;
1243  nvoffset_ = 0;
1244  nvsize_ = 0;
1245  neq_v_ = nonvint_offset_ = 0;
1247  record_ = nullptr;
1248  play_ = nullptr;
1249 }
1251  if (no_cap_memb_) {
1253  }
1254  if (watch_list_) {
1256  delete watch_list_;
1257  }
1258 }
1259 
1261  int i, j;
1262  wl_list_.clear();
1263  wl_list_.resize(nrn_nthread);
1264  if (gcv_) {
1265  delete_list(gcv_);
1266  delete std::exchange(gcv_, nullptr);
1267  }
1268  for (i = 0; i < pcnt_; ++i) {
1269  NetCvodeThreadData& d = p[i];
1270  if (d.lcv_) {
1271  for (j = 0; j < d.nlcv_; ++j) {
1272  delete_list(d.lcv_ + j);
1273  }
1274  delete[] std::exchange(d.lcv_, nullptr);
1275  d.nlcv_ = 0;
1276  }
1277  delete std::exchange(d.tq_, nullptr);
1278  }
1279  empty_ = true;
1280 }
1281 
1283  del_cv_memb_list(cvode);
1284  Cvode& cv = *cvode;
1285  cv.delete_prl();
1286  delete[] std::exchange(cv.ctd_, nullptr);
1287 }
1288 
1290  if (gcv_) {
1292  }
1293  for (int i = 0; i < pcnt_; ++i) {
1294  NetCvodeThreadData& d = p[i];
1295  for (int j = 0; j < d.nlcv_; ++j) {
1296  del_cv_memb_list(d.lcv_ + j);
1297  }
1298  }
1299 }
1301  if (!cvode) {
1302  return;
1303  }
1304  Cvode& cv = *cvode;
1305  for (int j = 0; j < cv.nctd_; ++j) {
1306  CvodeThreadData& z = cv.ctd_[j];
1307  if (z.psl_th_) {
1308  z.psl_th_->clear();
1309  delete std::exchange(z.psl_th_, nullptr);
1310  }
1311  if (cvode != gcv_) {
1312  z.delete_memb_list(std::exchange(z.cv_memb_list_, nullptr));
1313  } else {
1314  CvMembList *cml, *cmlnext;
1315  for (cml = std::exchange(z.cv_memb_list_, nullptr); cml; cml = cmlnext) {
1316  cmlnext = cml->next;
1317  delete cml;
1318  }
1319  }
1323  }
1324 }
1325 
1327  CvMembList *cml, *cmlnext;
1328  for (cml = cmlist; cml; cml = cmlnext) {
1329  cmlnext = cml->next;
1330  for (auto& ml: cml->ml) {
1331  delete[] std::exchange(ml.nodelist, nullptr);
1332  delete[] std::exchange(ml.nodeindices, nullptr);
1333  delete[] std::exchange(ml.prop, nullptr);
1334  if (!memb_func[cml->index].hoc_mech) {
1335  delete[] std::exchange(ml.pdata, nullptr);
1336  }
1337  }
1338  delete cml;
1339  }
1340 }
1341 
1342 void NetCvode::distribute_dinfo(int* cellnum, int tid) {
1343  int j;
1344  // printf("distribute_dinfo %d\n", pst_cnt_);
1345  if (psl_) {
1346  for (PreSyn* ps: *psl_) {
1347  // printf("\tPreSyn %s\n", ps->osrc_ ? hoc_object_name(ps->osrc_):secname(ps->ssrc_));
1348  if (ps->thvar_) { // artcells and presyns for gid's not on this cpu have no threshold
1349  // check
1350  NrnThread* nt;
1351  Cvode* cvsrc;
1352  CvodeThreadData* z;
1353  // cvode instances poll which presyns
1354  if (single_) {
1355  if (ps->osrc_) {
1356  nt = (NrnThread*) ob2pntproc(ps->osrc_)->_vnt;
1357  } else if (ps->ssrc_) {
1358  nt = ps->ssrc_->pnode[0]->_nt;
1359  } else {
1360  nt = nrn_threads;
1361  }
1362  cvsrc = gcv_;
1363  z = cvsrc->ctd_ + nt->id;
1364  if (!z->psl_th_) {
1365  z->psl_th_ = new PreSynList();
1366  z->psl_th_->reserve(pst_cnt_);
1367  }
1368  z->psl_th_->push_back(ps);
1369  } else {
1370  if (ps->osrc_) {
1371  j = node(ps->osrc_)->v_node_index;
1372  nt = (NrnThread*) ob2pntproc(ps->osrc_)->_vnt;
1373  } else if (ps->ssrc_) {
1374  j = ps->ssrc_->pnode[0]->v_node_index;
1375  nt = ps->ssrc_->pnode[0]->_nt;
1376  } else {
1377  j = 0;
1378  nt = nrn_threads;
1379  }
1380  if (tid == nt->id) {
1381  cvsrc = p[tid].lcv_ + cellnum[j];
1382  z = cvsrc->ctd_;
1383  if (nt == cvsrc->nth_) {
1384  if (!z->psl_th_) {
1385  z->psl_th_ = new PreSynList();
1386  }
1387  z->psl_th_->push_back(ps);
1388  }
1389  }
1390  }
1391  }
1392  }
1393  }
1394 }
1395 
1397  int i;
1398  set_CVRhsFn();
1399  wl_list_.clear();
1400  wl_list_.resize(nrn_nthread);
1401  if (single_) {
1402  gcv_ = new Cvode();
1403  Cvode& cv = *gcv_;
1404  cv.ncv_ = this;
1405  cv.nctd_ = nrn_nthread;
1406  cv.ctd_ = new CvodeThreadData[cv.nctd_];
1407  } else {
1408  for (int id = 0; id < nrn_nthread; ++id) {
1409  NrnThread& nt = nrn_threads[id];
1410  NetCvodeThreadData& d = p[id];
1411  d.nlcv_ = nt.ncell;
1412  d.lcv_ = new Cvode[d.nlcv_];
1413  d.tq_ = new TQueue(d.tpool_);
1414  for (i = 0; i < d.nlcv_; ++i) {
1415  TQItem* ti = d.tq_->insert(0., d.lcv_ + i);
1416  d.lcv_[i].tqitem_ = ti;
1417  Cvode& cv = d.lcv_[i];
1418  cv.nth_ = &nt;
1419  cv.ncv_ = this;
1420  cv.nctd_ = 1;
1421  cv.ctd_ = new CvodeThreadData[cv.nctd_];
1422  }
1423  }
1424  }
1425  empty_ = false;
1426 }
1427 
1429  int i;
1430  CvMembList* cml;
1431  if (tree_changed) {
1432  setup_topology();
1433  }
1434  if (v_structure_change) {
1435  v_setup_vectors();
1436  }
1438  return false;
1439  }
1440  if (diam_changed) { // need to guarantee that the matrix is allocated
1441  recalc_diam(); // for the present method
1442  }
1444  matrix_change_cnt_ = -1;
1445  playrec_change_cnt_ = 0;
1446  // We copy Memb_list* into cml->ml below. At the moment this CVode code
1447  // generates its own complicated set of Memb_list* that operate in
1448  // list-of-handles mode instead of referring to contiguous sets of values.
1449  // This is a shame, as it forces that list-of-handles mode to exist.
1450  // Possible alternatives could include:
1451  // - making the sorting algorithm more sophisticated so that the values that
1452  // are going to be processed together are contiguous -- this might be a
1453  // bit intricate, but it shouldn't be *too* had to assert that we get the
1454  // right answer -- and it's the only way of making sure the actual compute
1455  // kernels are cache efficient / vectorisable.
1456  // - changing the type used by this code to not be Memb_list but rather some
1457  // Memb_list_with_list_of_handles type and adding extra glue to the code
1458  // generation so that we can call into translated MOD file code using that
1459  // type
1460  // - Using a list of Memb_list with size 1 instead of a single Memb_list
1461  // that holds a list of handles?
1462  auto const cache_token = nrn_ensure_model_data_are_sorted();
1463  if (single_) {
1464  if (!gcv_ || gcv_->nctd_ != nrn_nthread) {
1465  delete_list();
1466  alloc_list();
1467  }
1468  del_cv_memb_list();
1469  Cvode& cv = *gcv_;
1470  distribute_dinfo(nullptr, 0);
1472  CvodeThreadData& z = cv.ctd_[_nt->id];
1473 
1474  z.rootnode_begin_index_ = 0;
1475  z.rootnode_end_index_ = _nt->ncell;
1476  z.vnode_begin_index_ = _nt->ncell;
1477  z.vnode_end_index_ = _nt->end;
1478 
1479  CvMembList* last = 0;
1480  for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) {
1481  i = tml->index;
1482  const Memb_func& mf = memb_func[i];
1483  Memb_list* ml = tml->ml;
1484  if (ml->nodecount && (i == CAP || mf.current || mf.ode_count || mf.ode_matsol ||
1485  mf.ode_spec || mf.state)) {
1486  // maintain same order (not reversed) for
1487  // singly linked list built below
1488  cml = new CvMembList{i};
1489  if (!z.cv_memb_list_) {
1490  z.cv_memb_list_ = cml;
1491  } else {
1492  last->next = cml;
1493  }
1494  last = cml;
1495  cml->next = nullptr;
1496  auto const mech_offset = cache_token.thread_cache(_nt->id).mechanism_offset.at(
1497  i);
1498  assert(mech_offset != neuron::container::invalid_row);
1499  assert(cml->ml.size() == 1);
1500  cml->ml[0].set_storage_offset(mech_offset);
1501  cml->ml[0].nodecount = ml->nodecount;
1502  // assumes cell info grouped contiguously
1503  cml->ml[0].nodelist = ml->nodelist;
1504  cml->ml[0].nodeindices = ml->nodeindices;
1505  assert(ml->prop);
1506  cml->ml[0].prop = ml->prop; // used for ode_map even when hoc_mech = false
1507  if (!mf.hoc_mech) {
1508  cml->ml[0].pdata = ml->pdata;
1509  }
1510  cml->ml[0]._thread = ml->_thread;
1511  }
1512  }
1516  // Every point process, but not artificial cells, cause at least a retreat.
1517  // All point processes, but not artificial cells,
1518  // have the global cvode as its nvi field
1519  for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) {
1520  i = tml->index;
1521  const Memb_func& mf = memb_func[i];
1522  if (mf.is_point && !nrn_is_artificial_[i]) {
1523  Memb_list* ml = tml->ml;
1524  int j;
1525  for (j = 0; j < ml->nodecount; ++j) {
1526  auto& datum = mf.hoc_mech ? ml->prop[j]->dparam[1] : ml->pdata[j][1];
1527  auto* pp = datum.get<Point_process*>();
1528  pp->nvi_ = gcv_;
1529  }
1530  }
1531  }
1532  }
1533  } else { // lvardt
1534  bool b = false;
1535  if (gcv_) {
1536  b = true;
1537  }
1538  if (!b)
1539  for (i = 0; i < pcnt_; ++i) {
1540  if (p[i].nlcv_ != nrn_threads[i].ncell) {
1541  b = true;
1542  }
1543  }
1544  if (b) {
1545  delete_list();
1546  alloc_list();
1547  }
1548  del_cv_memb_list();
1549  // each node has a cell number
1550  for (int id = 0; id < nrn_nthread; ++id) {
1551  NrnThread* _nt = nrn_threads + id;
1552  NetCvodeThreadData& d = p[id];
1553  if (_nt->end == 0) {
1554  continue;
1555  }
1556  std::vector<int> cellnum(_nt->end);
1557  for (i = 0; i < _nt->ncell; ++i) {
1558  cellnum[i] = i;
1559  }
1560  for (i = _nt->ncell; i < _nt->end; ++i) {
1561  cellnum[i] = cellnum[_nt->_v_parent[i]->v_node_index];
1562  }
1563 
1564  for (i = 0; i < _nt->ncell; ++i) {
1565  auto& z = d.lcv_[i].ctd_[0];
1567  z.rootnode_end_index_ = i + 1;
1568  // start counting these
1569  z.vnode_begin_index_ = 0;
1570  z.vnode_end_index_ = 0;
1571  }
1572  for (i = _nt->ncell; i < _nt->end; ++i) {
1573  auto& z = d.lcv_[cellnum[i]].ctd_[0];
1574  // valid only if cell contiguity (except root) is satisified.
1575  if (!z.vnode_begin_index_) {
1576  z.vnode_begin_index_ = i;
1577  }
1578  if (z.vnode_end_index_ > 0) { // verify contiguity constraint
1579  assert(z.vnode_end_index_ == i);
1580  }
1581  z.vnode_end_index_ = i + 1;
1582  }
1583  // divide the memb_list info into per cell info
1584  // count
1585  std::vector<CvMembList*> last(_nt->ncell);
1586 
1587  // Need to determine if a Memb_list needs to be included
1588  // in CvMembList because there is a BEFORE/AFTER
1589  // statement that needs to be handled.
1590  std::unordered_set<int> ba_candidate;
1591  {
1592  constexpr std::array batypes{BEFORE_STEP, BEFORE_BREAKPOINT, AFTER_SOLVE};
1593  for (auto const bat: batypes) {
1594  for (BAMech* bam = bamech_[bat]; bam; bam = bam->next) {
1595  ba_candidate.insert(bam->type);
1596  }
1597  }
1598  }
1599 
1600  // Modified to also count the nodes and set the offsets for
1601  // each CvMembList.ml[contig_region].
1602  // The sum of the ml[i].nodecount must equal the mechanism
1603  // nodecount for the cell and each ml[i] data must be contiguous.
1604  // Ideally the node permutation would be such that each cell
1605  // is contiguous. So only needing a ml[0]. This is now mostly
1606  // the case with the default permutation. The root nodes are
1607  // all at the beginning, and thereafter all the cell nodes are
1608  // contiguous. This results in a
1609  // CvMembList.ml.size() == 1 almost always, with an exception of
1610  // size() == 2 only for extracellular and for POINT_PROCESSes
1611  // located both in the root node and other cell nodes.
1612 
1613  for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) {
1614  i = tml->index;
1615  const Memb_func& mf = memb_func[i];
1616  Memb_list* ml = tml->ml;
1617  if (ml->nodecount && (mf.current || mf.ode_count || mf.ode_matsol || mf.ode_spec ||
1618  mf.state || i == CAP || ba_candidate.count(i) == 1)) {
1619  // maintain same order (not reversed) for
1620  // singly linked list built below
1621  int j;
1622  for (j = 0; j < ml->nodecount; ++j) {
1623  auto offset = ml->get_storage_offset() + j;
1624  // for each Memb_list instance constructed, keep
1625  // track of its initial storage offset (i.e. offset)
1626 
1627  int inode = ml->nodelist[j]->v_node_index;
1628  int icell = cellnum[inode];
1629  Cvode& cv = d.lcv_[icell];
1630  CvodeThreadData& z = cv.ctd_[0];
1631 
1632  // Circumstances for creating a new CvMembList
1633  // or (due to non-contiguity of a cell),
1634  // appending a Memb_list instance to cml->ml
1635  if (!z.cv_memb_list_) { // initialize the first
1636  cml = new CvMembList{i};
1637  z.cv_memb_list_ = cml;
1638  cml->next = nullptr;
1639  last[icell] = cml;
1640  assert(cml->ml.size() == 1);
1641  assert(cml->ml[0].nodecount == 0);
1642  } else if (last[icell]->index != i) { // initialize next
1643  cml = new CvMembList{i};
1644  last[icell]->next = cml;
1645  cml->next = nullptr;
1646  last[icell] = cml;
1647  assert(cml->ml.size() == 1);
1648  assert(cml->ml[0].nodecount == 0);
1649  } else { // if non-contiguous, append Memb_list
1650  cml = last[icell];
1651  auto& cvml = cml->ml.back();
1652  auto cvml_offset = cvml.get_storage_offset() + cvml.nodecount;
1653  if (cvml_offset != offset) {
1654  // not contiguous, add another Memb_list
1655  // instance to cml->ml
1656  cml->ml.emplace_back(cml->index);
1657  assert(cml->ml.back().nodecount == 0);
1658  // For the default node permutation, cell
1659  // nodes are contiguous except for rootnode.
1660  assert(cml->ml.size() < 3);
1661  }
1662  }
1663 
1664  auto& cvml = cml->ml.back();
1665  if (cvml.nodecount == 0) { // first time for this Memb_List
1666  cvml.set_storage_offset(offset);
1667  }
1668  // Increment count of last Memb_list in cml->ml.
1669  ++cvml.nodecount;
1670  }
1671  }
1672  }
1673 
1674  std::vector<CvMembList*> cvml(d.nlcv_);
1675  for (i = 0; i < d.nlcv_; ++i) {
1676  cvml[i] = d.lcv_[i].ctd_[0].cv_memb_list_; // whole cell in thread
1677  }
1678  // fill pointers (and nodecount)
1679  // now list order is from 0 to n_memb_func
1680  for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) {
1681  i = tml->index;
1682  const Memb_func& mf = memb_func[i];
1683  Memb_list* ml = tml->ml;
1684  if (ml->nodecount && (mf.current || mf.ode_count || mf.ode_matsol || mf.ode_spec ||
1685  mf.state || i == CAP || ba_candidate.count(i) == 1)) {
1686  int increment = 1; // newml.nodecount is handled in the newml loop below
1687  for (int j = 0; j < ml->nodecount; j += increment) {
1688  int icell = cellnum[ml->nodelist[j]->v_node_index];
1689  if (cvml[icell]->index != i) {
1690  cvml[icell] = cvml[icell]->next;
1691  assert(cvml[icell] && cvml[icell]->index == i);
1692  }
1693  auto& cml = cvml[icell];
1694  increment = 1;
1695  for (auto& newml: cml->ml) {
1696  if (!newml.nodelist) {
1697  auto nodecount = newml.nodecount;
1698  // do nodecount of these for ml and then
1699  // skip forward by nodecount in the outer
1700  // ml->nodecount j loop (i.e. a contiguity
1701  // region)
1702  increment = nodecount;
1703  newml.nodelist = new Node*[nodecount];
1704  newml.nodeindices = new int[nodecount];
1705  newml.prop = new Prop*[nodecount];
1706  if (!mf.hoc_mech) {
1707  newml.pdata = new Datum*[nodecount];
1708  }
1709  for (int k = 0; k < nodecount; ++k) {
1710  newml.nodelist[k] = ml->nodelist[j + k];
1711  newml.nodeindices[k] = ml->nodeindices[j + k];
1712  assert(cellnum[newml.nodeindices[k]] ==
1713  cellnum[ml->nodeindices[j]]);
1714  newml.prop[k] = ml->prop[j + k];
1715  if (!mf.hoc_mech) {
1716  newml.pdata[k] = ml->pdata[j + k];
1717  }
1718  }
1719  newml._thread = ml->_thread;
1720  break;
1721  }
1722  }
1723  }
1724  }
1725  }
1726  // do the above for the BEFORE/AFTER functions
1727  fill_local_ba(cellnum.data(), d);
1728 
1729  distribute_dinfo(cellnum.data(), id);
1730  // If a point process is not an artificial cell, fill its nvi_ field.
1731  // artifical cells have no integrator
1732  for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) {
1733  i = tml->index;
1734  const Memb_func& mf = memb_func[i];
1735  if (mf.is_point) {
1736  Memb_list* ml = tml->ml;
1737  int j;
1738  for (j = 0; j < ml->nodecount; ++j) {
1739  auto& datum = mf.hoc_mech ? ml->prop[j]->dparam[1] : ml->pdata[j][1];
1740  auto* pp = datum.get<Point_process*>();
1741  if (nrn_is_artificial_[i] == 0) {
1742  int inode = ml->nodelist[j]->v_node_index;
1743  pp->nvi_ = d.lcv_ + cellnum[inode];
1744  } else {
1745  pp->nvi_ = nullptr;
1746  }
1747  }
1748  }
1749  }
1750  }
1751  }
1752  return true;
1753 }
1754 
1755 void NetCvode::fill_global_ba(NrnThread* nt, int bat, BAMechList** baml) {
1756  NrnThreadBAList* tbl;
1757  for (tbl = nt->tbl[bat]; tbl; tbl = tbl->next) {
1758  BAMechList* ba = new BAMechList(baml);
1759  ba->bam = tbl->bam;
1760  ba->ml.push_back(tbl->ml);
1761  }
1762 }
1763 
1766  fill_local_ba_cnt(AFTER_SOLVE, celnum, d);
1767  fill_local_ba_cnt(BEFORE_STEP, celnum, d);
1768 }
1769 
1770 void NetCvode::fill_local_ba_cnt(int bat, int* celnum, NetCvodeThreadData& d) {
1771  BAMech* bam;
1772  for (bam = bamech_[bat]; bam; bam = bam->next) {
1773  for (int icv = 0; icv < d.nlcv_; ++icv) {
1774  Cvode* cv = d.lcv_ + icv;
1775  assert(cv->nctd_ == 1);
1776  for (CvMembList* cml = cv->ctd_[0].cv_memb_list_; cml; cml = cml->next) {
1777  if (cml->index == bam->type) {
1778  BAMechList* bl = cvbml(bat, bam, cv);
1779  bl->bam = bam;
1780  for (auto& ml: cml->ml) {
1781  bl->ml.push_back(&ml);
1782  }
1783  }
1784  }
1785  }
1786  }
1787 }
1788 
1789 BAMechList* NetCvode::cvbml(int bat, BAMech* bam, Cvode* cv) {
1790  BAMechList** pbml;
1791  BAMechList* ba;
1792  if (bat == BEFORE_BREAKPOINT) {
1793  pbml = &cv->ctd_->before_breakpoint_;
1794  } else if (bat == AFTER_SOLVE) {
1795  pbml = &cv->ctd_->after_solve_;
1796  } else {
1797  pbml = &cv->ctd_->before_step_;
1798  }
1799  if (!*pbml) {
1800  ba = new BAMechList(pbml);
1801  } else {
1802  for (ba = *pbml; ba; ba = ba->next) {
1803  if (ba->bam->type == bam->type) {
1804  return ba;
1805  }
1806  }
1807  ba = new BAMechList(pbml);
1808  }
1809  ba->bam = bam;
1810  return ba;
1811 }
1812 
1813 /*
1814 The path through NetCvode::solve path is determined by the case dimensions of
1815 (single step, integrate to tout) and (no model, global dt, localdt).
1816 For threads there is one more constraint--- do not allow t0_ to pass
1817 by the minimum interthread netcon delay integration interval barrier
1818 without making sure all the interthread events to be delivered before
1819 that barrier are on the thread queue. For classical mpi spike exchange
1820 this was ensured by a NetParEvent which provided a synchronization barrier.
1821 We generalized this to a NetParEvent per thread and a thread barrier but
1822 there was a consequent loss of the possibility of sequential threads since
1823 in that context a barrier makes no sense (each job must run to completion).
1824 Hence the desire to avoid barriers and handle the interval barrier explicity
1825 here to make sure NetCvode::enqueue is called frequently enough.
1826 (Actually, it is called very frequently since it is called by deliver_events,
1827 so it is only required that the interval between the least and greatest
1828 integrator t0_ be less than the maximum).
1829 
1830 Without a thread barrier in NetParEvent,
1831 this mostly affects the "integrate to tout" case. But it also affects
1832 the single step case in that we are not allowed to handle any
1833 events with delivery time > barrier time. Since a microstep does the
1834 earliest next thing, we need to check that before the call.
1835 
1836 Can we deal with multisplit conveniently without a thread barrier.
1837 Yes if we split up the process into its three phases of triangularization,
1838 reduced tree solve, back substitution. No if we retain a single
1839 integration step quantum for the thread.
1840 Without barriers, we end up with finer and finer grain nrn_multithread_job.
1841 
1842 It is sounding more and more reasonable to sacrifice the debugging convenience
1843 of sequential threads in favor of allowing thread barriers.
1844 ...but... a look at the nrn_fixed_step and nrn_multisplit_solve shows
1845 that it would certainly be easy to split into the proper
1846 nrn_multithread_job groups since it is already functionally decomposed
1847 in that way. And we may want to limit NetParEvent to serve only as
1848 interprocessor spike exchange signals for the main thread.
1849 ...but... a look at cvode in a multisplit context
1850 means there must be one global cvode instance
1851 and when f and jacob is called it would divide there and do the scatter
1852 gather etc. Of course, with separate cvode instances with multisplit we
1853 still have to have a proper Vector library which does the corresponding
1854 mpi reduce functions. Conceptually much simpler with barrier and maybe
1855 impossible without. If we went with one global cvode it would be
1856 possible. And cvode overhead seems independent of number of equations
1857 so confining it to the main thread may not be bad.
1858 
1859 The principle multithread domain we design for below is the few event,
1860 multisplit case. That is, there is one cvode instance managed by thread 0
1861 and all events go onto one queue. However, an event is delivered according
1862 to its proper delivery thread. Note that the possiblity of 0 delay
1863 events is allowed. There is nothing that can be done at the minimum
1864 delay integration interval level since each step divides into a dozen or so
1865 multithread jobs.
1866 
1867 Threads and lvardt.
1868 Consider the global cv (used to be p.lcv_[0]) as gcv_ which has ctd[nrn_nthread]
1869 instances of separate CvodeThreadData and works with multisplit.
1870 Now consider the lvardt method (used to be supported by p.lcv_[i]) as
1871 NetCvodeThreadData instances of lcv_[i] and of course move tq_ into
1872 the NetCvodeThreadData. Then each of the p.lcv_[i] would have Cvode.nctd_
1873 equal 1 in which p.lcv_[i].ctd_[0] is the proper CvodeThreadData. Then
1874 Cvode would never look at nrn_nthread to determine the CvodeThreadData
1875 but only the Cvode.nctd_. And if Cvode.nctd_ > 1 which would be
1876 possible only for gcv_, only then would nt->id be used to get the proper
1877 CvodeThreadData. We presume for now that if lvardt is used with nrn_nthread > 1
1878 then the usual minimum delay interval requirements hold for interthread
1879 events.
1880 
1881 Inter-Thread-Events (possibility of 0 delay).
1882 
1883 The fixed step method is that for each step, the sequence of actions
1884 for each thread is:
1885 1) at time tbegin: check thresholds, if source and target on same thread
1886  put directly onto queue. If different threads (PreSyn only), do an
1887  interthread send which puts the event into the target thread
1888  inter_thread_events list. Finally we deliver all events up to tbegin+.5dt,
1889  this first transfers all the threads inter-thread-events onto the queue.
1890 2) at time tbegin + .5dt: v(tbegin) -> v(tend)
1891 3) at time tend: integrate states one step using v(tend)
1892 4) deliver events up to but not past tend (first transfers inter-thread-events
1893  onto queue).
1894 Therefore, a zero-delay inter-thread-event from an artificial cell
1895 generated in step 4 may or may not be delivered
1896 in the time step it is generated. And zero-delay threshold detection interthread
1897 events will be delivered in the time step but either in step 1 or step 4 (ie.
1898 beginning or end of the time step). For this reason, if there are zero-delay
1899 NetCon, then both the source and target should be on the same thread. In practice
1900 we should force them onto thread 0 or at least the source onto the
1901 target thread.
1902 
1903 What about cvode?
1904 Global variable time step method.
1905 As mentioned above, all events, when global cvode active, go onto the thread 0 event
1906 queue (interthread via the thread 0 inter_thread_events list)
1907 and from there are delivered in the context of the proper thread
1908 for cache efficiency (by means of nrn_onethread_job.) Again, as mentioned,
1909 the design side is for few events. With this in mind, there does not seem
1910 any impediment to 0 delay events. But note that an event with source and
1911 target on thread 2 (e.g. a self event) goes onto the thread 0 inter_thread_events
1912 list (in the context of thread 2), then in the context of thread 0 is transferred
1913 from the thread 0 inter_thread_events list to the thread 0 queue, then
1914 in the context of thread 0, the event is taken off the queue and delivered
1915 in the context of thread 2 (by means of nrn_onethread_job). (See why we
1916 are thinking few events? A bothersome aspect is that after every event
1917 delivery least_t has to enqueue onto the thread 0 queue the nthread
1918 inter_thread_events lists.
1919 The one queue idea for global variable time step is unsound because of the
1920 problem that multiple threads call the queue methods and so cause excessive
1921 cache line sharing. And if we avoided this through the use of the
1922 inter_thread_events list then consider a 0 delay NetCon
1923 event between artificial cells on thread 2 and thread 3.
1924 thread 0 in deliver_events_when_threads, least event for thread 2
1925 thread 2 (NET_RECEIVE -> net_send -> PreSyn::send -> p[3].interthead_send
1926 thread 0 enqueue onto thread 0 queue all inter_thread_events lists
1927 thread 0 least event for thread 3
1928 thread 3 (NET_RECEIVE ...)
1929 Actually, there are real problems. E.g ConditionEvent::condition is
1930 in the context of some thread and must remove an event on the thread 0 queue.
1931 That is a recipe for horrendous cacheline sharing and is the whole reason
1932 we designed inter_thread_events in the first place. Also the call to
1933 enqueue_thread0() is multiplying alarmingly (every call before least_t())
1934 and the reason for the privileged thread 0 queue has more or less disappeared.
1935 Instead we only need an effective reduce to find the minimum of the least_t()
1936 in each thread. Expensive but no more expensive than the single queue and without
1937 the cache-line shareing. And allows 0 delay events. And keeps a kind of uniformity
1938 between fixed step, global cvode, and the future lvardt in that each thread
1939 has its own event queue.
1940 Far reaching Local variable time step method change.
1941 gcv_ if it exists, refers to the global variable time step method
1942 and there are nthread CvodeThreadData instances in ctd_. If gcv_ is 0 and
1943 !empty_, then each NetCvodeThreadData manages an array of nlcv_
1944 Cvode instances in the lcv_ array. Each Cvode instance has only one ctd_
1945 Thus for global step the CvodeThreadData for a thread is gcv_->ctd_[i] whereas
1946 for lvardt it is p[i].lcv_[jcell_in_thread_i].ctd_[0].
1947 */
1948 
1949 int NetCvode::solve(double tout) {
1950  if (nrn_nthread > 1) {
1951  return solve_when_threads(tout); // more or less a copy of below
1952  }
1953  NrnThread* nt = nrn_threads;
1954  int err = NVI_SUCCESS;
1955  if (empty_) {
1956  if (tout >= 0.) {
1957  while (p[0].tqe_->least_t() <= tout && stoprun == 0) {
1958  deliver_least_event(nt);
1959  if (nrn_allthread_handle) {
1960  (*nrn_allthread_handle)();
1961  }
1962  }
1963  if (stoprun == 0) {
1964  nt_t = tout;
1965  }
1966  } else {
1967  if (p[0].tqe_->least()) {
1968  nt_t = p[0].tqe_->least_t();
1969  deliver_events(p[0].tqe_->least_t(), nt);
1970  } else {
1971  nt_t += 1e6;
1972  }
1973  if (nrn_allthread_handle) {
1974  (*nrn_allthread_handle)();
1975  }
1976  }
1977  } else if (single_) {
1978  if (tout >= 0.) {
1979  while (gcv_->t_ < tout || p[0].tqe_->least_t() < tout) {
1980  err = global_microstep();
1981  if (nrn_allthread_handle) {
1982  (*nrn_allthread_handle)();
1983  }
1984  if (err != NVI_SUCCESS || stoprun) {
1985  return err;
1986  }
1987  }
1988  retreat(tout, gcv_);
1990  } else {
1991  // advance or initialized
1992  double tc = gcv_->t_;
1993  initialized_ = false;
1994  while (gcv_->t_ <= tc && !initialized_) {
1995  err = global_microstep();
1996  if (nrn_allthread_handle) {
1997  (*nrn_allthread_handle)();
1998  }
1999  if (err != NVI_SUCCESS || stoprun) {
2000  return err;
2001  }
2002  }
2003  }
2004  } else if (!gcv_) { // lvardt
2005  auto const cache_token = nrn_ensure_model_data_are_sorted();
2006  if (tout >= 0.) {
2007  time_t rt = time(nullptr);
2008  // int cnt = 0;
2009  TQueue* tq = p[0].tq_;
2010  TQueue* tqe = p[0].tqe_;
2011  NrnThread* nt = nrn_threads;
2012  while (tq->least_t() < tout || tqe->least_t() <= tout) {
2013  err = local_microstep(cache_token, *nt);
2014  if (nrn_allthread_handle) {
2015  (*nrn_allthread_handle)();
2016  }
2017  if (err != NVI_SUCCESS || stoprun) {
2018  return err;
2019  }
2020 #if HAVE_IV
2021  if (hoc_usegui) {
2022  if (rt < time(nullptr)) {
2023  // if (++cnt > 10000) {
2024  // cnt = 0;
2025  Oc oc;
2026  oc.notify();
2027  single_event_run();
2028  rt = time(nullptr);
2029  }
2030  }
2031 #endif
2032  }
2033  int n = p[0].nlcv_;
2034  Cvode* lcv = p[0].lcv_;
2035  for (int i = 0; i < n; ++i) {
2036  local_retreat(tout, lcv + i);
2037  lcv[i].record_continuous();
2038  }
2039  } else {
2040  // an fadvance is not every microstep but
2041  // only when all the discontinuities at te take place or
2042  // tc increases.
2043  TQueue* tq = p[0].tq_;
2044  double tc = tq->least_t();
2045  double te = p[0].tqe_->least_t();
2046  while (tq->least_t() <= tc && p[0].tqe_->least_t() <= te) {
2047  err = local_microstep(cache_token, *nrn_threads);
2048  if (nrn_allthread_handle) {
2049  (*nrn_allthread_handle)();
2050  }
2051  if (err != NVI_SUCCESS || stoprun) {
2052  return err;
2053  }
2054  }
2055  // But make sure t is not past the least time.
2056  // fadvance and local step do not coexist seamlessly.
2057  nt_t = tq->least_t();
2058  if (te < nt_t) {
2059  nt_t = te;
2060  }
2061  }
2062  } else {
2063  nt_t += 1e9;
2064  }
2065  return err;
2066 }
2067 
2069  TQItem* q = p[nt->id].tqe_->least();
2070  DiscreteEvent* de = (DiscreteEvent*) q->data_;
2071  double tt = q->t_;
2072  p[nt->id].tqe_->remove(q);
2073 #if PRINT_EVENT
2074  if (print_event_) {
2075  de->pr("deliver", tt, this);
2076  }
2077 #endif
2079  de->deliver(tt, this, nt);
2080 }
2081 
2082 bool NetCvode::deliver_event(double til, NrnThread* nt) {
2083  TQItem* q;
2084  if ((q = p[nt->id].tqe_->atomic_dq(til)) != 0) {
2085  DiscreteEvent* de = (DiscreteEvent*) q->data_;
2086  double tt = q->t_;
2087  p[nt->id].tqe_->release(q);
2088 #if PRINT_EVENT
2089  if (print_event_) {
2090  de->pr("deliver", tt, this);
2091  }
2092 #endif
2094  de->deliver(tt, this, nt);
2095  return true;
2096  } else {
2097  return false;
2098  }
2099 }
2100 
2102  auto* const nt = &ntr;
2103  int err = NVI_SUCCESS;
2104  int i = nt->id;
2105  if (p[i].tqe_->least_t() <= p[i].tq_->least_t()) {
2106  deliver_least_event(nt);
2107  } else {
2108  TQItem* q = p[i].tq_->least();
2109  Cvode* cv = (Cvode*) q->data_;
2110  err = cv->handle_step(sorted_token, this, 1e100);
2111  p[i].tq_->move_least(cv->t_);
2112  }
2113  return err;
2114 }
2115 
2117  NrnThread* nt = nrn_threads;
2118  int err = NVI_SUCCESS;
2119  double tt = p[0].tqe_->least_t();
2120  double tdiff = tt - gcv_->t_;
2121  if (tdiff <= 0) {
2122  // since events do not internally retreat with the
2123  // global step, we should already be at the event time
2124  // if this is too strict, we could use eps(list_->t_).
2125  assert(tdiff == 0.0 || (gcv_->tstop_begin_ <= tt && tt <= gcv_->tstop_end_));
2126  deliver_events(tt, nt);
2127  } else {
2129  }
2130  if (p[0].tqe_->least_t() < gcv_->t_) {
2131  gcv_->interpolate(p[0].tqe_->least_t());
2132  }
2133  return err;
2134 }
2135 
2136 int Cvode::handle_step(neuron::model_sorted_token const& sorted_token, NetCvode* ns, double te) {
2137  int err = NVI_SUCCESS;
2138  // first order correct condition evaluation goes here
2139  if (ns->condition_order() == 1) {
2140  if (ns->gcv_) { // global step
2141  for (int i = 0; i < nctd_; ++i) {
2142  nrn_threads[i]._t = t_; // for global step could be assert
2143  }
2144  check_deliver();
2145  // done if the check puts a 0 delay event on queue
2146  if (nctd_ > 1) {
2147  int tid;
2148  if (ns->allthread_least_t(tid) <= t_) {
2149  return err;
2150  }
2151  } else {
2152  if (ns->p[0].tqe_->least_t() <= t_) {
2153  return err;
2154  }
2155  }
2156  } else { // lvardt so in a specific thread
2157  // for localstep method t is for a different cvode.fun call
2158  nth_->_t = t_;
2160  if (ns->p[nth_->id].tqe_->least_t() <= t_) {
2161  return err;
2162  }
2163  }
2164  }
2165  if (initialize_) {
2166  err = init(t_);
2167  if (ns->gcv_) {
2168  ns->initialized_ = true;
2169  }
2170  // second order correct condition evaluation goes here
2171  if (ns->condition_order() == 2) {
2173  }
2174  } else if (te <= tn_) {
2175  err = interpolate(te);
2176  } else if (t_ < tn_) {
2177  err = interpolate(tn_);
2178  } else {
2180  err = advance_tn(sorted_token);
2181  // second order correct condition evaluation goes here
2182  if (ns->condition_order() == 2) {
2184  }
2185  }
2186  return err;
2187 }
2188 
2189 void nrn_net_move(Datum* v, Point_process* pnt, double tt) {
2190  auto* const q = v->get<TQItem*>();
2191  if (!q) {
2192  hoc_execerror("No event with flag=1 for net_move in ", hoc_object_name(pnt->ob));
2193  }
2194  // printf("net_move tt=%g %s *v=%p\n", tt, hoc_object_name(pnt->ob), *v);
2195  if (tt < PP2t(pnt)) {
2196  SelfEvent* se = (SelfEvent*) q->data_;
2197  char buf[100];
2198  Sprintf(buf, "net_move tt-nt_t = %g", tt - PP2t(pnt));
2199  se->pr(buf, tt, net_cvode_instance);
2200  assert(0);
2201  hoc_execerror("net_move tt < t", 0);
2202  }
2203  net_cvode_instance->move_event(q, tt, PP2NT(pnt));
2204 }
2205 
2206 void artcell_net_move(Datum* v, Point_process* pnt, double tt) {
2207  if (nrn_use_selfqueue_) {
2208  auto* const q = v->get<TQItem*>();
2209  if (!q) {
2210  hoc_execerror("No event with flag=1 for net_move in ", hoc_object_name(pnt->ob));
2211  }
2212  NrnThread* nt = PP2NT(pnt);
2214  // printf("artcell_net_move t=%g qt_=%g tt=%g %s *v=%p\n", nt->_t, q->t_, tt,
2215  // hoc_object_name(pnt->ob), *v);
2216  if (tt < nt->_t) {
2217  SelfEvent* se = (SelfEvent*) q->data_;
2218  char buf[100];
2219  Sprintf(buf, "artcell_net_move tt-nt_t = %g", tt - nt->_t);
2220  se->pr(buf, tt, net_cvode_instance);
2221  hoc_execerror("net_move tt < t", 0);
2222  }
2223  q->t_ = tt;
2224  if (tt < p.immediate_deliver_) {
2225  // printf("artcell_net_move_ %s immediate %g %g %g\n", hoc_object_name(pnt->ob),
2226  // PP2t(pnt), tt, p.immediate_deliver_);
2227  SelfEvent* se = (SelfEvent*) q->data_;
2228  se->deliver(tt, net_cvode_instance, nt);
2229  }
2230  } else {
2231  nrn_net_move(v, pnt, tt);
2232  }
2233 }
2234 
2235 void NetCvode::move_event(TQItem* q, double tnew, NrnThread* nt) {
2236  int tid = nt->id;
2238 #if PRINT_EVENT
2239  if (print_event_) {
2240  SelfEvent* se = (SelfEvent*) q->data_;
2241  Printf("NetCvode::move_event self event target %s t=%g, old=%g new=%g\n",
2242  hoc_object_name(se->target_->ob),
2243  nt->_t,
2244  q->t_,
2245  tnew);
2246  }
2247 #endif
2248  p[tid].tqe_->move(q, tnew);
2249 }
2250 
2252  p[tid].tqe_->remove(q);
2253 }
2254 
2255 // for threads, revised net_send to use absolute time (in the
2256 // mod file we add the thread time when we call it).
2257 void nrn_net_send(Datum* v, double* weight, Point_process* pnt, double td, double flag) {
2259  NrnThread* nt = PP2NT(pnt);
2261  SelfEvent* se = p.sepool_->alloc();
2262  se->flag_ = flag;
2263  se->target_ = pnt;
2264  se->weight_ = weight;
2265  se->movable_ = v; // needed for SaveState
2267  ++p.unreffed_event_cnt_;
2268  if (td < nt->_t) {
2269  char buf[100];
2270  Sprintf(buf, "net_send td-t = %g", td - nt->_t);
2271  se->pr(buf, td, net_cvode_instance);
2272  abort();
2273  hoc_execerror("net_send delay < 0", 0);
2274  }
2275  TQItem* q;
2276  q = net_cvode_instance->event(td, se, nt);
2277  if (flag == 1.0) {
2278  *v = q;
2279  }
2280  // printf("net_send %g %s %g %p\n", td, hoc_object_name(pnt->ob), flag, *v);
2281 }
2282 
2283 void artcell_net_send(Datum* v, double* weight, Point_process* pnt, double td, double flag) {
2284  if (nrn_use_selfqueue_ && flag == 1.0) {
2286  NrnThread* nt = PP2NT(pnt);
2288  SelfEvent* se = p.sepool_->alloc();
2289  se->flag_ = flag;
2290  se->target_ = pnt;
2291  se->weight_ = weight;
2292  se->movable_ = v; // needed for SaveState
2294  ++p.unreffed_event_cnt_;
2295  if (td < nt->_t) {
2296  char buf[100];
2297  Sprintf(buf, "net_send td-t = %g", td - nt->_t);
2298  se->pr(buf, td, net_cvode_instance);
2299  hoc_execerror("net_send delay < 0", 0);
2300  }
2301  TQItem* q = p.selfqueue_->insert(se);
2302  q->t_ = td;
2303  *v = q;
2304  // printf("artcell_net_send %g %s %g %p\n", td, hoc_object_name(pnt->ob), flag, v);
2305  if (q->t_ < p.immediate_deliver_) {
2306  // printf("artcell_net_send_ %s immediate %g %g %g\n", hoc_object_name(pnt->ob),
2307  // nt->_t, q->t_, p.immediate_deliver_);
2308  SelfEvent* se = (SelfEvent*) q->data_;
2309  p.selfqueue_->remove(q);
2310  se->deliver(td, net_cvode_instance, nt);
2311  }
2312  } else {
2313  nrn_net_send(v, weight, pnt, td, flag);
2314  }
2315 }
2316 
2317 // Deprecated overloads for backwards compatibility
2318 void artcell_net_send(void* v, double* weight, Point_process* pnt, double td, double flag) {
2319  artcell_net_send(static_cast<Datum*>(v), weight, pnt, td, flag);
2320 }
2321 
2322 void nrn_net_send(void* v, double* weight, Point_process* pnt, double td, double flag) {
2323  nrn_net_send(static_cast<Datum*>(v), weight, pnt, td, flag);
2324 }
2325 
2326 void net_event(Point_process* pnt, double time) {
2328  PreSyn* ps = (PreSyn*) pnt->presyn_;
2329  if (ps) {
2330  if (time < PP2t(pnt)) {
2331  char buf[100];
2332  Sprintf(buf, "net_event time-t = %g", time - PP2t(pnt));
2333  ps->pr(buf, time, net_cvode_instance);
2334  hoc_execerror("net_event time < t", 0);
2335  }
2336  ps->send(time, net_cvode_instance, ps->nt_);
2337  }
2338 }
2339 
2341  double (*c)(Point_process*),
2342  int i,
2343  Point_process* pnt,
2344  int r,
2345  double flag) {
2346  auto* wl = d->get<WatchList*>();
2347  auto* wc = d[i].get<WatchCondition*>();
2348  if (!wl || !wc) {
2349  // When c is NULL, i.e. called from CoreNEURON,
2350  // we never get here because we made sure
2351  // _nrn_watch_allocated for this has been called earlier from
2352  // within the translated mod file.
2353  _nrn_watch_allocate(d, c, i, pnt, flag);
2354  // d[0] and d[i] have now been updated
2355  wl = d->get<WatchList*>();
2356  wc = d[i].get<WatchCondition*>();
2357  }
2358  if (r == 0) {
2359  for (auto wc1: *wl) {
2360  wc1->Remove();
2361  if (wc1->qthresh_) { // is it on the queue?
2362  net_cvode_instance->remove_event(wc1->qthresh_, PP2NT(pnt)->id);
2363  wc1->qthresh_ = nullptr;
2364  }
2365  }
2366  wl->clear();
2367  }
2368  wl->push_back(wc);
2369  wc->activate(flag); // nr_flag_ (NetReceive flag) not flag_ for above threshold.
2370 }
2371 
2372 /*
2373 An example of a call to _nrn_watch_activate from within the NET_RECEIVE
2374 block of a translated mod file is
2375 _nrn_watch_activate(_watch_array, _watch1_cond, 1, _pnt, _watch_rm++, 2.0);
2376 
2377 _watch_array begins at _ppvar+first_index_of_watch_information and the number
2378  of indices is 1 more than the number of watch statements. Each of those
2379  (starting at index 1) is a pointer to a WatchCondition.
2380  The 0th is a pointer to a HTList (HeadTailList) of the active WatchConditions.
2381  Third arg is index of the WatchCondition to be activated.
2382  The _watch_rm when 0 means, empty the HTList before adding the
2383  WatchCondition to the HTList, when > 0, just add (It is possible for
2384  several WatchCondition to be active at the same time. That is useful when
2385  the conceptual condition is for a variable to be within a specific range
2386  or when multiple variables need to be watched at the same time.
2387  The last arg is the flag value used by the NET_RECEIVE block to activate
2388  a new set of WATCH statements. Note that that particular flag does not
2389  have to result in the activation of a new set. In that case, the old set
2390  stays active.
2391 
2392 Note. At time of writing this note,
2393  _watch_array[1:] are only created the first time that 'i' WatchCondition
2394  is activated. And at that time the 2nd callback arg is stored in the
2395  WatchCondition. So, at the moment, return from CoreNEURON cannot count on
2396  all WatchConditions being in existence. Given that the 2nd callback arg
2397  is only known to the translated mod file, the most straightforward solution
2398  is for nocmodl to generate
2399  an "initialization" function that calls all possible _nrn_watch_activate.
2400  That could be done at the Point_process* pnt creation time (or after
2401  transfer of WATCH info to CoreNEURON (using structure_change_cnt_).)
2402  It could even be done as necessary when CoreNEURON sends back activated
2403  WatchCondition info when the WatchCondition* slot on the NEURON side
2404  is NULL. Then the "initialization" function could call a stripped down
2405  version of _nrn_watch_activate for only its NULL slots.
2406 
2407 Here are some more notes about WatchCondition, HTList, HTListList, and
2408  WatchList.
2409  The (WatchList*)d->_pvoid is used only in _nrn_watch_activate to
2410  iterate over its previously activated list in order to remove all those from
2411  its HTList.
2412  WatchCondition is a subclass of ConditionEvent and HTList
2413  Because of the latter, we can say
2414  WatchCondition.Remove() and HTList.append(WatchCondition)
2415  The former can be called any number of times. When it is called it becomes
2416  a singleton WatchCondition (only in itself as an HTList).
2417  The latter can be called any number of times. Whereever the
2418  WatchCondition is located before calling HTList.append ---
2419  singleton, in another list, or already in this list --- it will end up
2420  as the htlist.Last()
2421 
2422 */
2423 
2424 /** Introduced so corenrn->nrn can request the mod file to make sure
2425  * all WatchCondition are allocated. When that is the case then
2426  * corenrn can call _nrn_watch_activate with all args filled out
2427  * because the allocated WatchCondition has double (*c_)(Point_process)
2428  * and flag_ filled in.
2429  **/
2431  double (*c)(Point_process*),
2432  int i,
2433  Point_process* pnt,
2434  double flag) {
2435  if (!d->get<WatchList*>()) {
2436  *d = new WatchList;
2437  }
2438  if (!d[i].get<WatchCondition*>()) {
2439  WatchCondition* wc = new WatchCondition(pnt, c);
2440  wc->c_ = c;
2441  wc->nrflag_ = flag;
2442  d[i] = wc;
2443  // Simplify transfer to CoreNEURON
2444  // To avoid searching for the beginning of _watch_array after
2445  // transfer to CoreNEURON, compute the offset with respect to
2446  // dparam. That, of course, assumes NEURON and CoreNEURON
2447  // have same pdata arrangement.
2448  wc->watch_index_ = i + (d - pnt->prop->dparam);
2449  }
2450 }
2451 
2452 /** Watch info corenrn->nrn transfer requires all activated
2453  * WatchCondition be deactivated prior to mirroring the activation
2454  * that exists on the corenrn side.
2455  **/
2457  assert(net_cvode_instance->wl_list_.size() == (size_t) nrn_nthread);
2458  for (auto& htlists_of_thread: net_cvode_instance->wl_list_) {
2459  for (auto* wl: htlists_of_thread) {
2460  wl->RemoveAll();
2461  }
2462  }
2463  // not necessary to empty the WatchList in the Point_process dparam array
2464  // as that will happen when _nrn_watch_activate is called with an r
2465  // arg of 0.
2466 }
2467 
2468 /** Called by Point_process destructor in translated mod file **/
2469 void _nrn_free_watch(Datum* d, int offset, int n) {
2470  int i;
2471  int nn = offset + n;
2472  if (auto* d_offset = d[offset].get<WatchList*>(); d_offset) {
2473  delete d_offset;
2474  d[offset] = nullptr;
2475  }
2476  for (i = offset + 1; i < nn; ++i) {
2477  if (auto* wc = d[i].get<WatchCondition*>(); wc) {
2478  wc->Remove();
2479  delete wc;
2480  d[i] = nullptr;
2481  }
2482  }
2483 }
2484 
2486  // not destroyed when vector destroyed.
2487  // should resize to 0 or remove before storing, just keeps incrementing
2488  if (vec_event_store_) {
2489  vec_event_store_ = nullptr;
2490  }
2491  if (ifarg(1)) {
2493  }
2494 }
2495 
2496 #define fifo_event event
2497 
2499  if (nrn_use_bin_queue_) {
2500 #if PRINT_EVENT
2501  if (print_event_) {
2502  db->pr("binq send", td, this);
2503  }
2504  if (vec_event_store_) {
2505  assert(0);
2506  Vect* x = vec_event_store_;
2507  x->push_back(nt_t);
2508  x->push_back(td);
2509  }
2510 #endif
2511  return p[nt->id].tqe_->enqueue_bin(td, db);
2512  } else {
2513 #if PRINT_EVENT
2514  if (print_event_) {
2515  db->pr("send", td, this);
2516  }
2517 #endif
2518  return p[nt->id].tqe_->insert(td, db);
2519  }
2520 }
2521 
2523 #if PRINT_EVENT
2524  if (print_event_) {
2525  db->pr("send", td, this);
2526  }
2527  if (vec_event_store_) {
2528  Vect* x = vec_event_store_;
2529  x->push_back(nt_t);
2530  x->push_back(td);
2531  }
2532 #endif
2533  return p[nt->id].tqe_->insert(td, db);
2534 }
2535 
2536 void NetCvode::null_event(double tt) {
2537  assert(0);
2538  NrnThread* nt = nrn_threads;
2539  if (tt - nt->_t < 0) {
2540  return;
2541  }
2542  event(tt, null_event_, nt);
2543 }
2544 
2545 void NetCvode::hoc_event(double tt, const char* stmt, Object* ppobj, int reinit, Object* pyact) {
2546  if (!ppobj && tt - nt_t < 0) {
2547  return;
2548  }
2549  {
2550  NrnThread* nt = nrn_threads;
2551  if (nrn_nthread > 1 && (!cvode_active_ || localstep())) {
2552  if (ppobj) {
2553  int i = PP2NT(ob2pntproc(ppobj))->id;
2554  p[i].interthread_send(tt, HocEvent::alloc(stmt, ppobj, reinit, pyact), nt + i);
2555  nrn_interthread_enqueue(nt + i);
2556  } else {
2557  HocEvent* he = HocEvent::alloc(stmt, nullptr, 0, pyact);
2558  // put on each queue. The first thread to execute the deliver
2559  // for he will set the nrn_allthread_handle
2560  // callback which will cause all threads to rejoin at the
2561  // end of the current fixed step or, for var step methods,
2562  // after all events at this time are delivered. It is up
2563  // to the callers of the multithread_job functions
2564  // to do the right thing.
2565  for (int i = 0; i < nrn_nthread; ++i) {
2566  p[i].interthread_send(tt, he, nt + i);
2567  }
2569  }
2570  } else {
2571  event(tt, HocEvent::alloc(stmt, ppobj, reinit, pyact), nt);
2572  }
2573  }
2574 }
2575 
2577  nrn_allthread_handle = nullptr;
2578  t = nt_t;
2579  while (!allthread_hocevents_->empty()) {
2580  HocEvent* he = (*allthread_hocevents_)[0];
2581  allthread_hocevents_->erase(allthread_hocevents_->begin());
2582  he->allthread_handle();
2583  }
2584 }
2585 
2586 void NetCvode::allthread_handle(double tt, HocEvent* he, NrnThread* nt) {
2587  // printf("allthread_handle tt=%g nt=%d nt_t=%g\n", tt, nt->id, nt->_t);
2588  nt->_stop_stepping = 1;
2589  if (is_local()) {
2590  int i, n = p[nt->id].nlcv_;
2591  Cvode* lcv = p[nt->id].lcv_;
2592  if (n)
2593  for (i = 0; i < n; ++i) {
2594  local_retreat(tt, lcv + i);
2595  if (!he->stmt()) {
2596  lcv[i].record_continuous();
2597  }
2598  }
2599  else {
2600  nt->_t = tt;
2601  }
2602  } else if (!he->stmt() && cvode_active_ && gcv_) {
2603  assert(MyMath::eq2(tt, gcv_->t_, NetCvode::eps(tt)));
2605  }
2606  if (nt->id == 0) {
2608  allthread_hocevents_->push_back(he);
2609  nt->_t = tt;
2610  }
2611  if (cvode_active_ && gcv_ && nrnmpi_numprocs > 1) {
2612  assert(nrn_nthread == 1);
2613  return;
2614  }
2615  // deliver any other events at this time (in particular, a possible NetParEvent)
2616  // to guarantee consistency of the NetParEvent for all threads
2617  // Otherwise, if some threads do a NetParEvent and others not, then
2618  // the interthread enqueue can put an earlier event onto the thread queue
2619  // than the last delivered event
2620  deliver_events(tt, nt);
2621 }
2622 
2623 #if 0
2624 struct PPArgs {
2625  int type;
2626  Point_process* pp;
2627  double* w;
2628  double f;
2629 };
2630 
2631 static PPArgs* ppargs;
2632 
2633 static void point_receive_job(NrnThread* nt) {
2634  PPArgs* p = ppargs + nt->id;
2635  (*pnt_receive[p->_type])(p->pp, p->w, p->f);
2636 }
2637 
2638 void NetCvode::point_receive(int type, Point_process* pp, double* w, double f) {
2639  // this is the master thread. need to execute the thread associated
2640  // with the pp.
2641  int id = PP2NT(pp)->id;
2642  if (id == 0) { // execute on this, the master thread
2643  (*pnt_receive[type])(pp, w, f);
2644  }else{
2645  // marshall the args
2646  PPArgs* p = ppargs + id;
2647  p->_type = type;
2648  p->pp = pp;
2649  p->w = w;
2650  p->f = f;
2651  nrn_onethread_job(id, point_receive_job);
2652  }
2653  // global queue with different threads putting things in
2654  // means no guarantee that something goes into the queue
2655  // to be delivered earlier than something last extracted.
2656  // so return to calling main thread only after the
2657  // worker thread is done. Too bad...
2658  // this needs to be worked on. Only thread id is executing.
2660 }
2661 #endif
2662 
2664  int i;
2696 
2697  // SelfEvents need to be "freed". Other kinds of DiscreteEvents may
2698  // already have gone out of existence so the tqe_ may contain many
2699  // invalid item data pointers
2701  allthread_hocevents_->clear();
2702  nrn_allthread_handle = nullptr;
2703  if (!MUTCONSTRUCTED) {
2704  MUTCONSTRUCT(1);
2705  }
2706  enqueueing_ = 0;
2707  for (i = 0; i < nrn_nthread; ++i) {
2708  NetCvodeThreadData& d = p[i];
2709  delete std::exchange(d.tqe_, new TQueue(p[i].tpool_));
2710  d.unreffed_event_cnt_ = 0;
2711  if (d.sepool_) {
2712  d.sepool_->free_all();
2713  }
2714  d.immediate_deliver_ = -1e100;
2715  d.ite_cnt_ = 0;
2716  if (nrn_use_selfqueue_) {
2717  if (!d.selfqueue_) {
2718  d.selfqueue_ = new SelfQueue(d.tpool_, 0);
2719  } else {
2720  d.selfqueue_->remove_all();
2721  }
2722  }
2723  d.tqe_->nshift_ = -1;
2724  d.tqe_->shift_bin(nt_t - 0.5 * nt_dt);
2725  }
2726  // I don't believe this is needed anymore since cvode not needed
2727  // til delivery.
2728  if (cvode_active_) { // in case there is a net_send from INITIAL cvode
2729  // then this needs to be done before INITIAL blocks are called
2730  init_global();
2731  }
2732 }
2733 
2734 // Frees the allocated memory for the SelfEvent pool and TQItemPool after cleaning them
2736  clear_events();
2737  for (int i = 0; i < nrn_nthread; ++i) {
2738  NetCvodeThreadData& d = p[i];
2739  delete std::exchange(d.sepool_, nullptr);
2740  delete std::exchange(d.selfqueue_, nullptr);
2741  delete std::exchange(d.tqe_, nullptr);
2742  if (d.tpool_) {
2743  d.tpool_->free_all();
2744  delete std::exchange(d.tpool_, nullptr);
2745  }
2746  }
2747 }
2748 
2750  int i, j;
2751  for (i = 0; i < nrn_nthread; ++i) {
2752  p[i].tqe_->nshift_ = -1;
2753  // first bin starts 1/2 time step early because per time step
2754  // binq delivery during simulation from deliver_net_events,
2755  // after delivering all events in the current bin, shifts to
2756  // nt->_t + 0.5*nt->_dt where nt->_t is a multiple of dt.
2757  p[i].tqe_->shift_bin(nt_t - 0.5 * nt_dt);
2758  }
2759  if (psl_) {
2760  for (PreSyn* ps: *psl_) {
2761  ps->init();
2762  ps->flag_ = false;
2763  NetConPList& dil = ps->dil_;
2764  ps->use_min_delay_ = 0;
2765 #if USE_MIN_DELAY
2766  // also decide what to do about use_min_delay_
2767  // the rule for now is to use it if all delays are
2768  // the same and there are more than 2
2769  {
2770  if (dil.size() > 2) {
2771  ps->use_min_delay_ = 1;
2772  ps->delay_ = dil[0]->delay_;
2773  }
2774  }
2775 #endif // USE_MIN_DELAY
2776 
2777  for (const auto& d: dil) {
2778  if (ps->use_min_delay_ && ps->delay_ != d->delay_) {
2779  ps->use_min_delay_ = false;
2780  }
2781  }
2782  }
2783  }
2784  // iterate over all NetCon in creation order to call
2785  // NETRECEIVE INITIAL blocks.
2786  static hoc_List* nclist = NULL;
2787  if (!nclist) {
2788  Symbol* sym = hoc_lookup("NetCon");
2789  nclist = sym->u.ctemplate->olist;
2790  }
2791  hoc_Item* q = nullptr;
2792  ITERATE(q, nclist) {
2793  Object* obj = OBJ(q);
2794  auto* d = static_cast<NetCon*>(obj->u.this_pointer);
2795  if (d->target_) {
2796  int type = d->target_->prop->_type; // somehow prop is non-deterministically-null here
2797  if (pnt_receive_init[type]) {
2798  (*pnt_receive_init[type])(d->target_, d->weight_, 0);
2799  } else {
2800  // not the first
2801  for (j = d->cnt_ - 1; j > 0; --j) {
2802  d->weight_[j] = 0.;
2803  }
2804  }
2805  }
2806  }
2807  if (gcv_) {
2808  for (int j = 0; j < nrn_nthread; ++j) {
2809  if (gcv_->ctd_[j].watch_list_) {
2811  }
2812  }
2813  } else {
2814  for (int j = 0; j < nrn_nthread; ++j) {
2815  NetCvodeThreadData& d = p[j];
2816  for (i = 0; i < d.nlcv_; ++i) {
2817  if (d.lcv_[i].ctd_[0].watch_list_) {
2818  d.lcv_[i].ctd_[0].watch_list_->RemoveAll();
2819  }
2820  }
2821  }
2822  }
2823 }
2824 
2826  double md = 1e9;
2827  for (const auto& d: dil_) {
2828  if (md > d->delay_) {
2829  md = d->delay_;
2830  }
2831  }
2832  return md;
2833 }
2834 
2835 void NetCvode::deliver_events(double til, NrnThread* nt) {
2836  // printf("deliver_events til %20.15g\n", til);
2837  p[nt->id].enqueue(this, nt);
2838  while (deliver_event(til, nt)) {
2839  ;
2840  }
2841 }
2842 
2843 static IvocVect* peqvec; // if not nullptr then the sorted times on the event queue.
2844 static void peq(const TQItem*, int);
2845 static void peq(const TQItem* q, int) {
2846  if (peqvec) {
2847  peqvec->push_back(q->t_);
2848  } else {
2849  DiscreteEvent* d = (DiscreteEvent*) q->data_;
2850  d->pr("", q->t_, net_cvode_instance);
2851  }
2852 }
2853 
2855  // dangerous since many events can go out of existence after
2856  // a simulation and before NetCvode::clear at the next initialization
2857  if (ifarg(1)) {
2858  peqvec = vector_arg(1);
2859  peqvec->resize(0);
2860  }
2861  p[0].tqe_->forall_callback(peq);
2862  peqvec = nullptr;
2863 }
2864 
2865 static int event_info_type_;
2868 static OcList* event_info_list_; // netcon or point_process
2869 
2870 static void event_info_callback(const TQItem*, int);
2871 static void event_info_callback(const TQItem* q, int) {
2872  DiscreteEvent* d = (DiscreteEvent*) q->data_;
2873  switch (d->type()) {
2874  case NetConType:
2875  if (event_info_type_ == NetConType) {
2876  auto* nc = static_cast<NetCon*>(d);
2878  event_info_list_->append(nc->obj_);
2879  }
2880  break;
2881  case SelfEventType:
2883  auto* se = static_cast<SelfEvent*>(d);
2885  event_info_flagvec_->push_back(se->flag_);
2886  event_info_list_->append(se->target_->ob);
2887  }
2888  break;
2889  case PreSynType:
2890  if (event_info_type_ == NetConType) {
2891  auto* ps = static_cast<PreSyn*>(d);
2892  for (const auto& nc: reverse(ps->dil_)) {
2893  double td = nc->delay_ - ps->delay_;
2894  event_info_tvec_->push_back(q->t_ + td);
2895  event_info_list_->append(nc->obj_);
2896  }
2897  }
2898  break;
2899  }
2900 }
2901 
2903  // dangerous since many events can go out of existence after
2904  // a simulation and before NetCvode::clear at the next initialization
2905  int i = 1;
2906  event_info_type_ = (int) chkarg(i++, 2, 3);
2912  }
2913  Object* o = *hoc_objgetarg(i++);
2914  check_obj_type(o, "List");
2918 }
2919 
2920 void DiscreteEvent::send(double tt, NetCvode* ns, NrnThread* nt) {
2922  ns->event(tt, this, nt);
2923 }
2924 
2925 void DiscreteEvent::deliver(double tt, NetCvode* ns, NrnThread* nt) {
2927 }
2928 
2930  return nrn_threads;
2931 }
2932 
2935 }
2936 
2937 void DiscreteEvent::pr(const char* s, double tt, NetCvode* ns) {
2938  Printf("%s DiscreteEvent %.15g\n", s, tt);
2939 }
2940 
2941 void NetCon::send(double tt, NetCvode* ns, NrnThread* nt) {
2942  if (active_ && target_) {
2943  assert(PP2NT(target_) == nt);
2945  ns->bin_event(tt, this, PP2NT(target_));
2946  } else {
2948  }
2949 }
2950 
2951 void NetCon::deliver(double tt, NetCvode* ns, NrnThread* nt) {
2952  assert(target_);
2953  int type = target_->prop->_type;
2954  std::string ss("net-receive-");
2955  ss += memb_func[type].sym->name;
2956  nrn::Instrumentor::phase p_get_pnt_receive(ss.c_str());
2957  if (PP2NT(target_) != nt) {
2958  Printf("NetCon::deliver nt=%d target=%d\n", nt->id, PP2NT(target_)->id);
2959  }
2960  assert(PP2NT(target_) == nt);
2961  Cvode* cv = (Cvode*) target_->nvi_;
2963  auto& datum = target_->prop->dparam[nrn_artcell_qindex_[type]];
2964  TQItem* q;
2965  while ((q = datum.get<TQItem*>()) && q->t_ < tt) {
2966  double t1 = q->t_;
2967  auto* const se = static_cast<SelfEvent*>(ns->p[nt->id].selfqueue_->remove(q));
2968  // printf("%d NetCon::deliver %g , earlier selfevent at %g\n", nrnmpi_myid, tt, q->t_);
2969  se->deliver(t1, ns, nt);
2970  }
2971  }
2972  if (cvode_active_ && cv) {
2973  ns->local_retreat(tt, cv);
2974  cv->set_init_flag();
2975  } else {
2976  // no interpolation necessary for local step method and ARTIFICIAL_CELL
2977  nt->_t = tt;
2978  }
2979 
2980  // printf("NetCon::deliver t=%g tt=%g %s\n", t, tt, hoc_object_name(target_->ob));
2983  if (errno) {
2984  if (nrn_errno_check(type)) {
2985  hoc_warning("errno set during NetCon deliver to NET_RECEIVE", (char*) 0);
2986  }
2987  }
2988 }
2989 
2991  return PP2NT(target_);
2992 }
2993 
2994 void NetCon::pgvts_deliver(double tt, NetCvode* ns) {
2995  assert(target_);
2996  int type = target_->prop->_type;
2999  if (errno) {
3000  if (nrn_errno_check(type)) {
3001  hoc_warning("errno set during NetCon deliver to NET_RECEIVE", (char*) 0);
3002  }
3003  }
3004 }
3005 
3006 void NetCon::pr(const char* s, double tt, NetCvode* ns) {
3007  Printf("%s %s", s, hoc_object_name(obj_));
3008  if (src_) {
3009  Printf(" src=%s", src_->osrc_ ? hoc_object_name(src_->osrc_) : secname(src_->ssrc_));
3010  } else {
3011  Printf(" src=nullptr");
3012  }
3013  Printf(" target=%s %.15g\n", (target_ ? hoc_object_name(target_->ob) : "nullptr"), tt);
3014 }
3015 
3016 void PreSyn::send(double tt, NetCvode* ns, NrnThread* nt) {
3017  int i;
3018  record(tt);
3019 #ifndef USENCS
3020  if (use_min_delay_) {
3022  for (i = 0; i < nrn_nthread; ++i) {
3023  if (nt->id == i) {
3024  ns->bin_event(tt + delay_, this, nt);
3025  } else {
3026  ns->p[i].interthread_send(tt + delay_, this, nrn_threads + i);
3027  }
3028  }
3029  } else {
3031  for (const auto& d: dil_) {
3032  if (d->active_ && d->target_) {
3033  NrnThread* n = PP2NT(d->target_);
3034  if (nt == n) {
3035  ns->bin_event(tt + d->delay_, d, n);
3036  } else {
3037  ns->p[n->id].interthread_send(tt + d->delay_, d, n);
3038  }
3039  }
3040  }
3041  }
3042 #endif // ndef USENCS
3043 #if USENCS || NRNMPI
3044  if (output_index_ >= 0) {
3045 #if NRNMPI
3046  if (use_multisend_) {
3047  nrn_multisend_send(this, tt);
3048  } else {
3049  if (nrn_use_localgid_) {
3050  nrn_outputevent(localgid_, tt);
3051  } else
3052 #endif // NRNMPI
3054 #if NRNMPI
3055  }
3056 #endif // NRNMPI
3057 #if NRN_MUSIC
3058  if (music_port_) {
3059  nrnmusic_injectlist(music_port_, tt);
3060  }
3061 #endif // NRN_MUSIC
3062  }
3063 #endif // USENCS || NRNMPI
3064 }
3065 
3066 void PreSyn::deliver(double tt, NetCvode* ns, NrnThread* nt) {
3067  if (qthresh_) {
3068  // the thread is the one that owns the PreSyn
3069  assert(nt == nt_);
3070  qthresh_ = nullptr;
3071  // printf("PreSyn::deliver %s condition event tt=%20.15g\n", ssrc_?secname(ssrc_):"", tt);
3073  // If local variable time step and send is recorded,
3074  // tt will be recorded correctly. But if the recording results
3075  // in a callback, the user might inquire about other variables
3076  // as well, so perhaps we need to interpolate first.
3077  if (!ns->gcv_ && stmt_) {
3078  int i = nt->id;
3079  TQItem* q = ns->p[i].tq_->least();
3080  Cvode* cv = (Cvode*) q->data_;
3081  if (tt < cv->t_) {
3082  if (int err = cv->handle_step(nrn_ensure_model_data_are_sorted(), ns, tt);
3083  err != NVI_SUCCESS) {
3084  Printf("warning: cv->handle_step failed with error %d", err);
3085  }
3086  ns->p[i].tq_->move_least(cv->t_);
3087  }
3088  }
3089  send(tt, ns, nt);
3090  return;
3091  }
3092  // the thread is the one that owns the targets
3094  for (const auto& d: dil_) {
3095  if (d->active_ && d->target_ && PP2NT(d->target_) == nt) {
3096  double dtt = d->delay_ - delay_;
3097  if (dtt == 0.) {
3100  d->deliver(tt, ns, nt);
3101  } else if (dtt < 0.) {
3102  hoc_execerror("internal error: Source delay is > NetCon delay", 0);
3103  } else {
3105  ns->event(tt + dtt, d, nt);
3106  }
3107  }
3108  }
3109 }
3110 
3111 // used by bbsavestate since during restore, some NetCon spikes may
3112 // have already been delivered while others need to be delivered in
3113 // the future. Not implemented fof qthresh_ case. No statistics.
3114 void PreSyn::fanout(double td, NetCvode* ns, NrnThread* nt) {
3115  for (const auto& d: dil_) {
3116  if (d->active_ && d->target_ && PP2NT(d->target_) == nt) {
3117  double dtt = d->delay_ - delay_;
3118  ns->bin_event(td + dtt, d, nt);
3119  }
3120  }
3121 }
3122 
3124  return nt_;
3125 }
3126 
3127 void PreSyn::pgvts_deliver(double tt, NetCvode* ns) {
3128  NrnThread* nt = 0;
3129  assert(0);
3130  if (qthresh_) {
3131  qthresh_ = nullptr;
3132  // printf("PreSyn::deliver %s condition event tt=%20.15g\n", ssrc_?secname(ssrc_):"", tt);
3134  send(tt, ns, nt);
3135  return;
3136  }
3138  for (const auto& d: dil_) {
3139  if (d->active_ && d->target_) {
3140  double dtt = d->delay_ - delay_;
3141  if (dtt < 0.) {
3142  hoc_execerror("internal error: Source delay is > NetCon delay", 0);
3143  } else {
3145  ns->event(tt + dtt, d, nt);
3146  }
3147  }
3148  }
3149 }
3150 
3151 void PreSyn::pr(const char* s, double tt, NetCvode* ns) {
3152  Printf("%s", s);
3153  Printf(" PreSyn src=%s", osrc_ ? hoc_object_name(osrc_) : secname(ssrc_));
3154  Printf(" %.15g\n", tt);
3155 }
3156 
3159 
3161  // pr("savestate_save", 0, net_cvode_instance);
3162  SelfEvent* se = new SelfEvent();
3163  se->flag_ = flag_;
3164  se->target_ = target_;
3165  se->weight_ = weight_;
3166  se->movable_ = movable_;
3167  return se;
3168 }
3169 
3171  // pr("savestate_restore", tt, nc);
3173 }
3174 
3176  SelfEvent* se = new SelfEvent();
3177  char buf[300];
3178  char ppname[200];
3179  int ppindex, ncindex, moff, pptype;
3180  double flag;
3181  nrn_assert(fgets(buf, 300, f));
3182  nrn_assert(
3183  sscanf(buf, "%s %d %d %d %d %lf\n", ppname, &ppindex, &pptype, &ncindex, &moff, &flag) ==
3184  6);
3185  se->target_ = SelfEvent::index2pp(pptype, ppindex);
3186  se->weight_ = nullptr;
3187  if (ncindex >= 0) {
3188  NetCon* nc = NetConSave::index2netcon(ncindex);
3189  se->weight_ = nc->weight_;
3190  }
3191  se->flag_ = flag;
3192  se->movable_ = (moff >= 0) ? (se->target_->prop->dparam + moff) : nullptr;
3193  return se;
3194 }
3195 
3196 std::unique_ptr<SelfEventPPTable> SelfEvent::sepp_;
3197 
3199  // code the type and object index together
3200  if (!sepp_) {
3201  int i;
3202  sepp_.reset(new SelfEventPPTable());
3203  sepp_->reserve(211);
3204  // should only be the ones that call net_send
3205  for (i = 0; i < n_memb_func; ++i)
3206  if (pnt_receive[i]) {
3208  hoc_Item* q;
3209  ITERATE(q, hl) {
3210  Object* o = OBJ(q);
3211  (*sepp_)[i + n_memb_func * o->index] = ob2pntproc(o);
3212  }
3213  }
3214  }
3215  const auto& iter = sepp_->find(type + n_memb_func * oindex);
3216  nrn_assert(iter != sepp_->end());
3217  return iter->second;
3218 }
3219 
3221  sepp_.reset();
3222 }
3223 
3225  fprintf(f, "%d\n", SelfEventType);
3226  int const moff = movable_ ? (movable_ - target_->prop->dparam) : -1;
3227  int ncindex = -1;
3228  // find the NetCon index for weight_
3229  if (weight_) {
3231  assert(nc);
3232  ncindex = nc->obj_->index;
3233  }
3234 
3235  fprintf(f,
3236  "%s %d %d %d %d %g\n",
3237  target_->ob->ctemplate->sym->name,
3238  target_->ob->index,
3239  target_->prop->_type,
3240  ncindex,
3241  moff,
3242  flag_);
3243 }
3244 
3245 void SelfEvent::deliver(double tt, NetCvode* ns, NrnThread* nt) {
3246  Cvode* cv = (Cvode*) target_->nvi_;
3247  int type = target_->prop->_type;
3248  assert(nt == PP2NT(target_));
3249  if (nrn_use_selfqueue_ && nrn_is_artificial_[type]) { // handle possible earlier flag=1 self
3250  // event
3251  if (flag_ == 1.0) {
3252  *movable_ = nullptr;
3253  }
3254  TQItem* q;
3255  while ((q = movable_->get<TQItem*>()) != 0 && q->t_ <= tt) {
3256  // printf("handle earlier %g selfqueue event from within %g SelfEvent::deliver\n",
3257  // q->t_, tt);
3258  double t1 = q->t_;
3259  SelfEvent* se = (SelfEvent*) ns->p[nt->id].selfqueue_->remove(q);
3260  PP2t(target_) = t1;
3261  se->call_net_receive(ns);
3262  }
3263  }
3264  if (cvode_active_ && cv) {
3265  ns->local_retreat(tt, cv);
3266  cv->set_init_flag();
3267  } else {
3268  PP2t(target_) = tt;
3269  }
3270  // printf("SelfEvent::deliver t=%g tt=%g %s\n", PP2t(target), tt, hoc_object_name(target_->ob));
3271  call_net_receive(ns);
3272 }
3273 
3275  return PP2NT(target_);
3276 }
3277 
3278 void SelfEvent::pgvts_deliver(double tt, NetCvode* ns) {
3279  call_net_receive(ns);
3280 }
3284  if (errno) {
3285  if (nrn_errno_check(target_->prop->_type)) {
3286  hoc_warning("errno set during SelfEvent deliver to NET_RECEIVE", (char*) 0);
3287  }
3288  }
3289  NetCvodeThreadData& nctd = ns->p[PP2NT(target_)->id];
3290  --nctd.unreffed_event_cnt_;
3291  nctd.sepool_->hpfree(this);
3292 }
3293 
3294 void SelfEvent::pr(const char* s, double tt, NetCvode* ns) {
3295  Printf("%s", s);
3296  Printf(" SelfEvent target=%s %.15g flag=%g\n", hoc_object_name(target_->ob), tt, flag_);
3297 }
3298 
3300  plr_->frecord_init(q);
3301 }
3302 
3303 void PlayRecordEvent::deliver(double tt, NetCvode* ns, NrnThread* nt) {
3304  if (plr_->cvode_ && plr_->cvode_->nth_) {
3305  assert(nt == plr_->cvode_->nth_);
3306  ns->local_retreat(tt, plr_->cvode_);
3307  }
3309  plr_->deliver(tt, ns);
3310 }
3311 
3313  return nrn_threads + plr_->ith_;
3314 }
3315 
3316 void PlayRecordEvent::pr(const char* s, double tt, NetCvode* ns) {
3317  Printf("%s PlayRecordEvent %.15g ", s, tt);
3318  plr_->pr();
3319 }
3320 
3323 
3325  stmt_ = nullptr;
3326  ppobj_ = nullptr;
3327  reinit_ = 0;
3328 }
3329 
3331  if (stmt_) {
3332  delete stmt_;
3333  }
3334 }
3335 
3336 void HocEvent::pr(const char* s, double tt, NetCvode* ns) {
3337  Printf("%s HocEvent %s %.15g\n", s, stmt_ ? stmt_->name() : "", tt);
3338 }
3339 
3340 HocEvent* HocEvent::alloc(const char* stmt, Object* ppobj, int reinit, Object* pyact) {
3341  if (!hepool_) {
3342  nrn_hoc_lock();
3343  if (!hepool_) {
3344  hepool_ = new HocEventPool(100, 1);
3345  }
3346  nrn_hoc_unlock();
3347  }
3348  HocEvent* he = hepool_->alloc();
3349  he->stmt_ = nullptr;
3350  he->ppobj_ = ppobj;
3351  he->reinit_ = reinit;
3352  if (pyact) {
3353  he->stmt_ = new HocCommand(pyact);
3354  } else if (stmt) {
3355  he->stmt_ = new HocCommand(stmt);
3356  }
3357  return he;
3358 }
3359 
3361  if (stmt_) {
3362  delete stmt_;
3363  stmt_ = nullptr;
3364  }
3365  hepool_->hpfree(this);
3366 }
3367 
3369  if (stmt_) {
3370  delete stmt_;
3371  stmt_ = nullptr;
3372  }
3373 }
3374 
3375 void HocEvent::deliver(double tt, NetCvode* nc, NrnThread* nt) {
3376  extern double t;
3377  if (!ppobj_) {
3378  nc->allthread_handle(tt, this, nt);
3379  return;
3380  }
3381  if (stmt_) {
3382  if (nrn_nthread > 1 || nc->is_local()) {
3383  if (!ppobj_) {
3384  hoc_execerror(
3385  "multiple threads and/or local variable time step method require an "
3386  "appropriate POINT_PROCESS arg to CVode.event to safely execute:",
3387  stmt_->name());
3388  }
3389  Cvode* cv = (Cvode*) ob2pntproc(ppobj_)->nvi_;
3390  if (cv && cvode_active_) {
3391  nc->local_retreat(tt, cv);
3392  if (reinit_) {
3393  cv->set_init_flag();
3394  }
3395  nt->_t = cv->t_;
3396  }
3397  nrn_hoc_lock();
3398  t = tt;
3399  } else if (cvode_active_ && reinit_) {
3400  nc->retreat(tt, nc->gcv_);
3401  assert(MyMath::eq(tt, nc->gcv_->t_, NetCvode::eps(tt)));
3402  assert(tt == nt->_t);
3403  nc->gcv_->set_init_flag();
3404  t = tt;
3405  } else {
3406  t = nt_t = tt;
3407  }
3408  stmt_->execute(false);
3409  if (nrn_nthread > 1 || nc->is_local()) {
3410  nrn_hoc_unlock();
3411  }
3412  }
3413  hefree();
3414 }
3415 
3417  if (stmt_) {
3418  stmt_->execute(false);
3419  } else {
3420  tstopset;
3421  }
3422  hefree();
3423 }
3424 
3425 void HocEvent::pgvts_deliver(double tt, NetCvode* nc) {
3426  deliver(tt, nc, nrn_threads);
3427 }
3428 
3430  if (hepool_) {
3431  hepool_->free_all();
3432  }
3433 }
3434 
3436  // pr("HocEvent::savestate_save", 0, net_cvode_instance);
3437  HocEvent* he = new HocEvent();
3438  if (stmt_) {
3439  if (stmt_->pyobject()) {
3440  he->stmt_ = new HocCommand(stmt_->pyobject());
3441  } else {
3442  he->stmt_ = new HocCommand(stmt_->name(), stmt_->object());
3443  }
3444  he->reinit_ = reinit_;
3445  he->ppobj_ = ppobj_;
3446  }
3447  return he;
3448 }
3449 
3451  // pr("HocEvent::savestate_restore", tt, nc);
3452  HocEvent* he = alloc(nullptr, nullptr, 0);
3453  NrnThread* nt = nrn_threads;
3454  if (stmt_) {
3455  if (stmt_->pyobject()) {
3456  he->stmt_ = new HocCommand(stmt_->pyobject());
3457  } else {
3458  he->stmt_ = new HocCommand(stmt_->name(), stmt_->object());
3459  }
3460  he->reinit_ = reinit_;
3461  he->ppobj_ = ppobj_;
3462  if (ppobj_) {
3463  nt = (NrnThread*) ob2pntproc(ppobj_)->_vnt;
3464  }
3465  }
3466  nc->event(tt, he, nt);
3467 }
3468 
3470  HocEvent* he = new HocEvent();
3471  int have_stmt, have_obj, index;
3472  char stmt[256], objname[100], buf[200];
3473  Object* obj = nullptr;
3474  // nrn_assert(fscanf(f, "%d %d\n", &have_stmt, &have_obj) == 2);
3475  nrn_assert(fgets(buf, 200, f));
3476  nrn_assert(sscanf(buf, "%d %d\n", &have_stmt, &have_obj) == 2);
3477  if (have_stmt) {
3478  nrn_assert(fgets(stmt, 256, f));
3479  stmt[strlen(stmt) - 1] = '\0';
3480  if (have_obj) {
3481  // nrn_assert(fscanf(f, "%s %d\n", objname, &index) == 1);
3482  nrn_assert(fgets(buf, 200, f));
3483  nrn_assert(sscanf(buf, "%s %d\n", objname, &index) == 1);
3484  obj = hoc_name2obj(objname, index);
3485  }
3486  he->stmt_ = new HocCommand(stmt, obj);
3487  }
3488  return he;
3489 }
3490 
3492  fprintf(f, "%d\n", HocEventType);
3493  fprintf(f, "%d %d\n", stmt_ ? 1 : 0, (stmt_ && stmt_->object()) ? 1 : 0);
3494  if (stmt_) {
3495  fprintf(f, "%s\n", stmt_->name());
3496  if (stmt_->object()) {
3497  fprintf(f, "%s %d\n", stmt_->object()->ctemplate->sym->name, stmt_->object()->index);
3498  }
3499  }
3500 }
3501 
3502 void NetCvode::local_retreat(double t, Cvode* cv) {
3503  if (!cvode_active_) {
3504  return;
3505  }
3506  TQueue* tq = p[cv->nth_ ? cv->nth_->id : 0].tq_;
3507  if (tq) {
3508 #if PRINT_EVENT
3509  if (print_event_) {
3510  Printf("microstep local retreat from %g (cvode_%p is at %g) for event onset=%g\n",
3511  cv->tqitem_->t_,
3512  fmt::ptr(cv),
3513  cv->t_,
3514  t);
3515  }
3516 #endif
3517  cv->interpolate(t);
3518  tq->move(cv->tqitem_, t);
3519 #if PRINT_EVENT
3520  if (print_event_ > 1) {
3521  Printf("after target solve time for %p is %g , dt=%g\n",
3522  fmt::ptr(cv),
3523  cv->time(),
3524  nt_dt);
3525  }
3526 #endif
3527  } else {
3528  assert(t == cv->t_ || (cv->tstop_begin_ <= t && t <= cv->tstop_end_));
3529  }
3530 }
3531 
3532 void NetCvode::retreat(double t, Cvode* cv) {
3533  if (!cvode_active_) {
3534  return;
3535  }
3536  TQueue* tq = p[cv->nth_ ? cv->nth_->id : 0].tq_;
3537 #if PRINT_EVENT
3538  if (print_event_) {
3539  Printf("microstep retreat from %g (cvode_%p is at %g) for event onset=%g\n",
3540  tq ? cv->tqitem_->t_ : cv->t_,
3541  fmt::ptr(cv),
3542  cv->t_,
3543  t);
3544  }
3545 #endif
3546  cv->interpolate(t);
3547  if (tq) {
3548  tq->move(cv->tqitem_, t);
3549  }
3550 #if PRINT_EVENT
3551  if (print_event_ > 1) {
3552  Printf("after target solve time for %p is %g , dt=%g\n", fmt::ptr(cv), cv->time(), dt);
3553  }
3554 #endif
3555 }
3556 
3557 // parallel global variable time-step
3558 int NetCvode::pgvts(double tstop) {
3559  int err = NVI_SUCCESS;
3560  double tt = nt_t;
3561  while (tt < tstop && !stoprun && err == NVI_SUCCESS) {
3562  err = pgvts_event(tt);
3563  }
3564  return err;
3565 }
3566 
3567 // parallel global variable time-step event handling
3568 // return is what cvode call to make and the value of tt to make it at
3569 // in response to the next global event. We try to do only one
3570 // allreduce for a given event. Since all processes have to stay together
3571 // with respect to cvode, we have to factor out those calls from the
3572 // classical DiscreteEvent::deliver methods. I.e. deliver can only
3573 // deliver an event, it cannot interpolate, etc.
3574 // Assume events are sparse and handle them one at a time.
3575 int NetCvode::pgvts_event(double& tt) {
3576  int op, err, init;
3577  DiscreteEvent* de;
3578  assert(gcv_);
3579  de = pgvts_least(tt, op, init);
3580  err = pgvts_cvode(tt, op);
3581  if (init) {
3582  gcv_->set_init_flag();
3583  }
3584  if (de) { // handle the event and others just like it
3585  de->pgvts_deliver(tt, this);
3586  while (p[0].tqe_->least_t() == tt) {
3587  TQItem* q = p[0].tqe_->least();
3588  de = (DiscreteEvent*) q->data_;
3589  int i1;
3590  if (de->pgvts_op(i1) == op && i1 == init) {
3591  p[0].tqe_->remove(q);
3592  de->pgvts_deliver(tt, this);
3593  } else {
3594  break;
3595  }
3596  }
3597  }
3598  if (nrn_allthread_handle) {
3599  (*nrn_allthread_handle)();
3600  }
3601  return err;
3602 }
3603 
3604 DiscreteEvent* NetCvode::pgvts_least(double& tt, int& op, int& init) {
3605  DiscreteEvent* de = nullptr;
3606 #if NRNMPI
3607  TQItem* q = nullptr;
3608  if (gcv_->initialize_ && p[0].tqe_->least_t() > gcv_->t_) {
3609  tt = gcv_->t_;
3610  op = 3;
3611  init = 0;
3612  } else if (gcv_->tn_ < p[0].tqe_->least_t()) {
3613  tt = gcv_->tn_;
3614  op = 1;
3615  init = 0;
3616  } else {
3617  // If there are several events at the same time we need the
3618  // highest priority (in particular, NetParEvent last).
3619  // This is due to the fact that NetParEvent.deliver
3620  // handles all the events at that time so there better not
3621  // be any after it on the queue.
3622  q = p[0].tqe_->least();
3623  if (q) {
3624  de = (DiscreteEvent*) q->data_;
3625  tt = q->t_;
3626  op = de->pgvts_op(init);
3627  if (op == 4) { // is there another event at the same time?
3628  TQItem* q2 = p[0].tqe_->second_least(tt);
3629  if (q2) {
3630  q = q2;
3631  de = (DiscreteEvent*) q2->data_;
3632  op = de->pgvts_op(init);
3633  assert(op != 4);
3634  // printf("%d Type %d event after NetParEvent with deliver %g and t=%g\n",
3635  // nrnmpi_myid, de->type(), tt, gcv_->t_);
3636  }
3637  }
3638  } else {
3639  tt = 1e20;
3640  op = 1;
3641  init = 0;
3642  }
3643  }
3644  double ts = tt;
3645  int ops = op;
3646  if (nrnmpi_pgvts_least(&tt, &op, &init)) {
3647  if (q) {
3648  p[0].tqe_->remove(q);
3649  }
3650  } else if (op == 4) { // NetParEvent need to be done all together
3651  p[0].tqe_->remove(q);
3652  } else if (ts == tt && q && ops == op) { // safe to do this event as well
3653  p[0].tqe_->remove(q);
3654  } else {
3655  de = nullptr;
3656  }
3657 #endif
3658  return de;
3659 }
3660 
3661 int NetCvode::pgvts_cvode(double tt, int op) {
3662  int err = NVI_SUCCESS;
3663  // this is the only place where we can enter cvode
3664  switch (op) {
3665  case 1: // advance
3666  if (condition_order() == 1) {
3667  gcv_->check_deliver();
3668  }
3671  if (condition_order() == 2) {
3673  }
3674  break;
3675  case 2: // interpolate
3676  err = gcv_->interpolate(tt);
3677  break;
3678  case 3: // initialize
3679  err = gcv_->init(tt);
3680  initialized_ = true;
3681  if (condition_order() == 2) {
3683  }
3684  break;
3685  }
3686  return err;
3687 }
3688 
3690 #if NRNMPI
3691  if (gcv_) {
3692  return gcv_->use_partrans_;
3693  } else {
3694  return 0;
3695  }
3696 #endif
3697  return 0;
3698 }
3699 
3700 void ncs2nrn_integrate(double tstop) {
3701  double ts;
3702  nrn_use_busywait(1); // just a possibility
3703  auto const cache_token = nrn_ensure_model_data_are_sorted();
3704  if (cvode_active_) {
3705 #if NRNMPI
3707  net_cvode_instance->pgvts(tstop);
3708  t = nt_t;
3709  dt = nt_dt;
3710  } else
3711 #endif
3712  {
3713  net_cvode_instance->solve(tstop);
3714  t = nt_t;
3715  dt = nt_dt;
3716  }
3717  } else {
3718 #if 1
3719  int n = (int) ((tstop - nt_t) / dt + 1e-9);
3720  if (n > 3 && !nrnthread_v_transfer_) {
3721  nrn_fixed_step_group(cache_token, n);
3722  } else
3723 #endif
3724  {
3725 #if NRNMPI && !defined(USENCS)
3726  ts = tstop - dt;
3727  assert(nt_t <= tstop);
3728  // It may very well be the case that we do not advance at all
3729  while (nt_t <= ts) {
3730 #else
3731  ts = tstop - .5 * dt;
3732  while (nt_t < ts) {
3733 #endif
3734  nrn_fixed_step(cache_token);
3735  if (stoprun) {
3736  break;
3737  }
3738  }
3739  }
3740  }
3741  // handle all the pending flag=1 self events
3742  for (int i = 0; i < nrn_nthread; ++i) {
3743  assert(nrn_threads[i]._t == nt_t);
3744  }
3746  nrn_use_busywait(0); // certainly not
3747 }
3748 
3750  return p[nt->id].tqe_;
3751 }
3752 
3754 static void* pending_selfqueue(NrnThread* nt) {
3756  return 0;
3757 }
3758 
3759 void nrn_pending_selfqueue(double tt, NrnThread* nt) {
3760  NetCvodeThreadData& nctd = net_cvode_instance->p[nt->id];
3761  double ts = nt->_t;
3762  // net_cvode_instance->deliver_events(nctd.immediate_deliver_, nt);
3763  SelfQueue* sq = nctd.selfqueue_;
3764  TQItem *q1, *q2;
3765  nctd.immediate_deliver_ = tt;
3766  for (q1 = sq->first(); q1; q1 = q2) {
3767  if (q1->t_ <= tt) {
3768  SelfEvent* se = (SelfEvent*) q1->data_;
3769  // printf("ncs2nrn_integrate %g SelfEvent for %s at %g\n", tstop,
3770  // hoc_object_name(se->target_->ob), q1->t_);
3771  se->deliver(q1->t_, net_cvode_instance, nt);
3772  // could it add another self-event?, check before removal
3773  q2 = sq->next(q1);
3774  sq->remove(q1);
3775  } else {
3776  q2 = sq->next(q1);
3777  }
3778  }
3779  assert(nctd.tqe_->least_t() >= tt);
3780  nt->_t = ts;
3781  nctd.immediate_deliver_ = -1e100;
3782 }
3783 
3784 // only the main thread can calls this
3785 static void all_pending_selfqueue(double tt) {
3786  if (nrn_use_selfqueue_) {
3788  // for (int i=0; i < nrn_nthread; ++i) { assert(nrn_threads[i]._t == nt_t);}
3791  }
3792 }
3793 
3794 #if USENCS
3795 
3796 void ncs2nrn_inputevent(int i, double tdeliver) {
3797  NrnThread* nt = nrn_threads;
3798  net_cvode_instance->event(tdeliver, ncs2nrn_input_->item(i), nt);
3799 }
3800 
3801 // hoc tells us which are the input NetCons and which are the
3802 // output NetCons
3803 
3804 void nrn2ncs_netcons() {
3805  int i;
3806  Object* o;
3807  NetCon* nc;
3808  o = *hoc_objgetarg(1);
3809  check_obj_type(o, "List");
3810  OcList* list = (OcList*) (o->u.this_pointer);
3811  if (ncs2nrn_input_) {
3812  for (i = 0; i < ncs2nrn_input_->count(); ++i) {
3813  hoc_obj_unref(ncs2nrn_input_->item(i)->obj_);
3814  }
3815  ncs2nrn_input_->remove_all();
3816  } else {
3817  ncs2nrn_input_ = new NetConPList(list->count());
3818  }
3819  for (i = 0; i < list->count(); ++i) {
3820  hoc_obj_ref(list->object(i));
3821  nc = (NetCon*) (list->object(i)->u.this_pointer);
3822  ncs2nrn_input_->append(nc);
3823  }
3824 
3825  o = *hoc_objgetarg(2);
3826  check_obj_type(o, "List");
3827  list = (OcList*) (o->u.this_pointer);
3828  for (i = 0; i < list->count(); ++i) {
3829  nc = (NetCon*) (list->object(i)->u.this_pointer);
3830  assert(nc->src_);
3831  nc->src_->output_index_ = i;
3832  }
3833 }
3834 
3835 #endif // USENCS
3836 
3838  int id, j, ii = 0;
3839  if (gcv_) {
3840  gcv_->statistics();
3841  } else {
3842  lvardtloop(id, j) {
3843  if (i < 0 || ii++ == i) {
3844  p[id].lcv_[j].statistics();
3845  }
3846  }
3847  }
3848  Printf("NetCon active=%lu (not sent)=%lu delivered=%lu\n",
3852  Printf(
3853  "Condition O2 thresh detect=%lu via init=%lu effective=%lu abandoned=%lu (unnecesarily=%lu "
3854  "init+=%lu init-=%lu above=%lu below=%lu)\n",
3864  Printf("PreSyn send: mindelay=%lu direct=%lu\n",
3867  Printf("PreSyn deliver: O2 thresh=%lu NetCon=%lu (send=%lu deliver=%lu)\n",
3872  Printf("SelfEvent send=%lu move=%lu deliver=%lu\n",
3876  Printf("Watch send=%lu deliver=%lu\n",
3879  Printf("PlayRecord send=%lu deliver=%lu\n",
3882  Printf("HocEvent send=%lu deliver=%lu\n",
3885  Printf("SingleEvent deliver=%lu move=%lu\n",
3888  Printf("DiscreteEvent send=%lu deliver=%lu\n",
3891  Printf("%lu total events delivered net_event=%lu\n", deliver_cnt_, net_event_cnt_);
3892  Printf("Discrete event TQueue\n");
3893  p[0].tqe_->statistics();
3894  if (p[0].tq_) {
3895  Printf("Variable step integrator TQueue\n");
3896  p[0].tq_->statistics();
3897  }
3898 }
3899 
3901  Vect* v = vector_arg(1);
3902  v->resize(11);
3903  double* d = vector_vec(v);
3904  int i, j, n = 0;
3905  if (gcv_) {
3906  n += gcv_->neq_;
3907  } else {
3908  lvardtloop(i, j) {
3909  n += p[i].lcv_[j].neq_;
3910  }
3911  }
3912  d[0] = n;
3913  Symbol* nc = hoc_lookup("NetCon");
3914  d[1] = nc->u.ctemplate->count;
3915  d[2] = deliver_cnt_;
3916  d[3] = NetCon::netcon_deliver_;
3921  // should do all threads
3922  p[0].tqe_->spike_stat(d + 8);
3923 }
3924 
3926  int i, j;
3928  if (nrn_modeltype() == 0) {
3929  delete_list();
3930  } else {
3931  init_global();
3932  if (cvode_active_) {
3934  structure_change();
3936  }
3937  if (gcv_) {
3939  gcv_->init_prepare();
3940  // since there may be Vector.play events and INITIAL send events
3941  // at time 0 before actual initialization of integrators.
3942  gcv_->can_retreat_ = false;
3943  } else {
3944  lvardtloop(i, j) {
3945  Cvode& cv = p[i].lcv_[j];
3947  cv.init_prepare();
3948  cv.can_retreat_ = false;
3949  }
3950  }
3951  }
3952  }
3954  playrec_setup();
3955  }
3956 }
3957 
3958 void NetCvode::re_init(double t) {
3959  int i, j;
3960  if (nrn_modeltype() == 0) {
3961  if (gcv_) {
3962  gcv_->t_ = t;
3963  gcv_->tn_ = t;
3964  } else {
3965  lvardtloop(i, j) {
3966  Cvode& cv = p[i].lcv_[j];
3967  cv.t_ = t;
3968  cv.tn_ = t;
3969  }
3970  }
3971  return;
3972  }
3973  double dtsav = nt_dt;
3974  solver_prepare();
3975  if (gcv_) {
3976  gcv_->stat_init();
3977  gcv_->init(t);
3978  if (condition_order() == 2) {
3980  }
3981  } else {
3982  lvardtloop(i, j) {
3983  Cvode& cv = p[i].lcv_[j];
3984  cv.stat_init();
3985  cv.init(t);
3986  cv.tqitem_->t_ = t;
3987  if (condition_order() == 2) {
3988  cv.evaluate_conditions();
3989  }
3990  }
3991  }
3992  nt_dt = dtsav;
3993 }
3994 
3996  NrnThreadMembList* tml;
3998  return;
3999  }
4001  if (nrn_fornetcon_cnt_ == 0) {
4002  return;
4003  }
4004  int i, j;
4005  // initialize a map from type to dparam index, -1 means no FOR_NETCONS statement
4006  std::vector<int> t2i(n_memb_func, -1);
4007  // create ForNetConsInfo in all the relevant point processes
4008  // and fill in the t2i map.
4009  for (i = 0; i < nrn_fornetcon_cnt_; ++i) {
4010  int index = nrn_fornetcon_index_[i];
4011  int type = nrn_fornetcon_type_[i];
4012  t2i[type] = index;
4013  if (nrn_is_artificial_[type]) {
4014  auto* const m = &memb_list[type];
4015  for (j = 0; j < m->nodecount; ++j) {
4016  // Save ForNetConsInfo* as void* to avoid needing to expose the
4017  // definition of ForNetConsInfo to translated MOD file code
4018  void** v = &(m->pdata[j][index].literal_value<void*>());
4020  ForNetConsInfo* fnc = new ForNetConsInfo;
4021  *v = fnc;
4022  fnc->argslist = 0;
4023  fnc->size = 0;
4024  }
4025  } else {
4027  for (tml = nt->tml; tml; tml = tml->next)
4028  if (tml->index == type) {
4029  Memb_list* m = tml->ml;
4030  for (j = 0; j < m->nodecount; ++j) {
4031  void** v = &(m->pdata[j][index].literal_value<void*>());
4033  ForNetConsInfo* fnc = new ForNetConsInfo;
4034  *v = fnc;
4035  fnc->argslist = 0;
4036  fnc->size = 0;
4037  }
4038  }
4039  }
4040  }
4041  // two loops over all netcons. one to count, one to fill in argslist
4042  // count
4043  if (psl_)
4044  for (const PreSyn* ps: *psl_) {
4045  const NetConPList& dil = ps->dil_;
4046  for (const auto& d1: dil) {
4047  Point_process* pnt = d1->target_;
4048  if (pnt && t2i[pnt->prop->_type] > -1) {
4049  auto* fnc = static_cast<ForNetConsInfo*>(
4050  pnt->prop->dparam[t2i[pnt->prop->_type]].get<void*>());
4051  assert(fnc);
4052  fnc->size += 1;
4053  }
4054  }
4055  }
4056 
4057  // allocate argslist space and initialize for another count
4058  for (i = 0; i < nrn_fornetcon_cnt_; ++i) {
4059  int index = nrn_fornetcon_index_[i];
4060  int type = nrn_fornetcon_type_[i];
4061  if (nrn_is_artificial_[type]) {
4062  auto* const m = &memb_list[type];
4063  for (j = 0; j < m->nodecount; ++j) {
4064  auto* fnc = static_cast<ForNetConsInfo*>(m->pdata[j][index].get<void*>());
4065  if (fnc->size > 0) {
4066  fnc->argslist = new double*[fnc->size];
4067  fnc->size = 0;
4068  }
4069  }
4070  } else {
4072  for (tml = nt->tml; tml; tml = tml->next)
4073  if (tml->index == nrn_fornetcon_type_[i]) {
4074  Memb_list* m = tml->ml;
4075  for (j = 0; j < m->nodecount; ++j) {
4076  auto* fnc = static_cast<ForNetConsInfo*>(
4077  m->pdata[j][index].get<void*>());
4078  if (fnc->size > 0) {
4079  fnc->argslist = new double*[fnc->size];
4080  fnc->size = 0;
4081  }
4082  }
4083  }
4084  }
4085  }
4086  // fill in argslist and count again
4087  if (psl_) {
4088  for (const PreSyn* ps: *psl_) {
4089  const NetConPList& dil = ps->dil_;
4090  for (const auto& d1: dil) {
4091  Point_process* pnt = d1->target_;
4092  if (pnt && t2i[pnt->prop->_type] > -1) {
4093  auto* fnc = static_cast<ForNetConsInfo*>(
4094  pnt->prop->dparam[t2i[pnt->prop->_type]].get<void*>());
4095  fnc->argslist[fnc->size] = d1->weight_;
4096  fnc->size += 1;
4097  }
4098  }
4099  }
4100  }
4101 }
4102 
4103 int _nrn_netcon_args(void* v, double*** argslist) {
4104  auto* fnc = static_cast<ForNetConsInfo*>(v);
4105  assert(fnc);
4106  *argslist = fnc->argslist;
4107  return fnc->size;
4108 }
4109 
4110 void _nrn_free_fornetcon(void** v) {
4111  if (auto* fnc = static_cast<ForNetConsInfo*>(*v); fnc) {
4112  delete[] std::exchange(fnc->argslist, nullptr);
4113  delete fnc;
4114  *v = nullptr;
4115  }
4116 }
4117 
4118 void record_init_clear(const TQItem* q, int) {
4119  DiscreteEvent* d = (DiscreteEvent*) q->data_;
4120  d->frecord_init((TQItem*) q);
4121 }
4122 
4124  if (!prl_->empty()) {
4125  // there may be some events on the queue descended from
4126  // finitialize that need to be removed
4127  record_init_items_->clear();
4129  for (auto tq: *record_init_items_) {
4130  p[0].tqe_->remove(tq);
4131  }
4132  record_init_items_->clear();
4133  }
4134  for (auto& item: *prl_) {
4135  item->record_init();
4136  }
4137 }
4138 
4140  for (auto& item: *prl_) {
4141  item->play_init();
4142  }
4143 }
4144 
4145 #if 0 // never used, existed in Version 6.2
4146 int NetCvode::cellindex() {
4147  Section* sec = chk_access();
4148  int i, j, ii;
4149  if (single_) {
4150  return 0;
4151  } else {
4152  ii = 0;
4153  lvardtloop(i, j) {
4154  NrnThread* nt_ = nrn_threads + i;
4155  int inode = p[i].lcv_[j].ctd_[0].vnode_begin_index_;
4156  if (sec == nt_->_v_node[inode]->sec) {
4157  return ii;
4158  }
4159  ii++;
4160  }
4161  }
4162  hoc_execerror(secname(sec), " is not the root section for any local step cvode instance");
4163  return 0;
4164 }
4165 #endif
4166 
4168  Vect* v = vector_arg(1);
4169  if (!cvode_active_) {
4170  v->resize(0);
4171  return;
4172  }
4173  int n{};
4174  if (gcv_) {
4175  n += gcv_->neq_;
4176  } else {
4177  int i, j;
4178  lvardtloop(i, j) {
4179  n += p[i].lcv_[j].neq_;
4180  }
4181  }
4182  v->resize(n);
4183  double* vp{vector_vec(v)};
4184  if (gcv_) {
4185  gcv_->states(vp);
4186  } else {
4187  int i{}, j{}, k{};
4188  lvardtloop(i, j) {
4189  p[i].lcv_[j].states(vp + k);
4190  k += p[i].lcv_[j].neq_;
4191  }
4192  }
4193 }
4194 
4196  int i, j, k, n;
4197  Vect* v = vector_arg(1);
4198  if (!cvode_active_) {
4199  v->resize(0);
4200  return;
4201  }
4202  double* vp;
4203  n = 0;
4204  if (gcv_) {
4205  n += gcv_->neq_;
4206  } else {
4207  lvardtloop(i, j) {
4208  n += p[i].lcv_[j].neq_;
4209  }
4210  }
4211  v->resize(n);
4212  vp = vector_vec(v);
4213  k = 0;
4214  if (gcv_) {
4215  gcv_->dstates(vp);
4216  } else {
4217  lvardtloop(i, j) {
4218  p[i].lcv_[j].dstates(vp + k);
4219  k += p[i].lcv_[j].neq_;
4220  }
4221  }
4222 }
4223 
4224 void nrn_cvfun(double t, double* y, double* ydot) {
4227 }
4228 
4229 double nrn_hoc2fixed_step(void*) {
4231  return 0.;
4232 }
4233 
4234 double nrn_hoc2fun(void* v) {
4235  NetCvode* d = (NetCvode*) v;
4236  double tt = *getarg(1);
4237  Vect* s = vector_arg(2);
4238  Vect* ds = vector_arg(3);
4239  if (!d->gcv_) {
4240  hoc_execerror("not global variable time step", 0);
4241  }
4242  if (s->size() != size_t(d->gcv_->neq_)) {
4243  hoc_execerror("size of state vector != number of state equations", 0);
4244  }
4245  if (nrn_nthread > 1) {
4246  hoc_execerror("only one thread allowed", 0);
4247  }
4248  ds->resize(s->size());
4249  nrn_cvfun(tt, vector_vec(s), vector_vec(ds));
4250  return 0.;
4251 }
4252 
4253 double nrn_hoc2scatter_y(void* v) {
4254  NetCvode* d = (NetCvode*) v;
4255  Vect* s = vector_arg(1);
4256  if (!d->gcv_) {
4257  hoc_execerror("not global variable time step", 0);
4258  }
4259  if (s->size() != size_t(d->gcv_->neq_)) {
4260  hoc_execerror("size of state vector != number of state equations", 0);
4261  }
4262  if (nrn_nthread > 1) {
4263  hoc_execerror("only one thread allowed", 0);
4264  }
4266  return 0.;
4267 }
4268 
4269 double nrn_hoc2gather_y(void* v) {
4270  NetCvode* d = (NetCvode*) v;
4271  Vect* s = vector_arg(1);
4272  if (!d->gcv_) {
4273  hoc_execerror("not global variable time step", 0);
4274  }
4275  if (nrn_nthread > 1) {
4276  hoc_execerror("only one thread allowed", 0);
4277  }
4278  s->resize(d->gcv_->neq_);
4279  d->gcv_->gather_y(vector_vec(s), 0);
4280  return s->size();
4281 }
4282 
4284  int i, j, k, n;
4285  Vect* v = vector_arg(1);
4286  if (!cvode_active_) {
4287  v->resize(0);
4288  return;
4289  }
4290  double* vp;
4291  n = 0;
4292  if (gcv_) {
4293  n += gcv_->neq_;
4294  } else {
4295  lvardtloop(i, j) {
4296  n += p[i].lcv_[j].neq_;
4297  }
4298  }
4299  v->resize(n);
4300  vp = vector_vec(v);
4301  k = 0;
4302  if (gcv_) {
4303  gcv_->error_weights(vp);
4304  } else {
4305  lvardtloop(i, j) {
4306  p[i].lcv_[j].error_weights(vp + k);
4307  k += p[i].lcv_[j].neq_;
4308  }
4309  }
4310 }
4311 
4313  int i, j, k, n;
4314  Vect* v = vector_arg(1);
4315  if (!cvode_active_) {
4316  v->resize(0);
4317  return;
4318  }
4319  double* vp;
4320  n = 0;
4321  if (gcv_) {
4322  n += gcv_->neq_;
4323  } else {
4324  lvardtloop(i, j) {
4325  n += p[i].lcv_[j].neq_;
4326  }
4327  }
4328  v->resize(n);
4329  vp = vector_vec(v);
4330  k = 0;
4331  if (gcv_) {
4332  gcv_->acor(vp);
4333  } else {
4334  lvardtloop(i, j) {
4335  p[i].lcv_[j].acor(vp + k);
4336  k += p[i].lcv_[j].neq_;
4337  }
4338  }
4339 }
4340 
4341 /** @brief Create a lookup table for variable names.
4342  *
4343  * This is only created on-demand because it involves building a lookup table
4344  * of pointers, some of which are obtained from data_handles (and are therefore
4345  * unstable). Eventually the operator<< of data_handle might provide the
4346  * necessary functionality and this could be dropped completely.
4347  */
4349  int n{};
4350  if (gcv_) {
4351  n += gcv_->neq_;
4352  } else {
4353  int i, j;
4354  lvardtloop(i, j) {
4355  n += p[i].lcv_[j].neq_;
4356  }
4357  }
4358  HocDataPaths hdp{2 * n, style};
4359  if (gcv_) {
4360  for (int it = 0; it < nrn_nthread; ++it) {
4361  CvodeThreadData& z = gcv_->ctd_[it];
4362  for (int j = 0; j < z.nonvint_extra_offset_; ++j) {
4363  hdp.append(static_cast<double*>(z.pv_[j]));
4364  }
4365  }
4366  } else {
4367  int i, it;
4368  lvardtloop(it, i) {
4369  auto const neq = p[it].lcv_[i].ctd_[0].nvsize_;
4370  auto& pv = p[it].lcv_[i].ctd_[0].pv_;
4371  for (int j = 0; j < neq; ++j) {
4372  hdp.append(static_cast<double*>(pv[j]));
4373  }
4374  }
4375  }
4376  hdp.search();
4377  return hdp;
4378 }
4379 
4380 std::string NetCvode::statename(int is, int style) {
4381  if (!cvode_active_) {
4382  hoc_execerror("Cvode is not active", 0);
4383  }
4384  auto const n = [&]() {
4385  int n{};
4386  if (gcv_) {
4387  n += gcv_->neq_;
4388  } else {
4389  int i, j;
4390  lvardtloop(i, j) {
4391  n += p[i].lcv_[j].neq_;
4392  }
4393  }
4394  return n;
4395  }();
4396  if (is >= n) {
4397  hoc_execerror("Cvode::statename argument out of range", nullptr);
4398  }
4399  auto const impl = [hdp = create_hdp(style), style, this](auto& handle) -> std::string {
4400  auto* const raw_ptr = static_cast<double*>(handle);
4401  if (style == 2) {
4402  auto* sym = hdp.retrieve_sym(raw_ptr);
4403  assert(sym);
4404  return sym2name(sym);
4405  } else {
4406  std::string s = hdp.retrieve(raw_ptr);
4407  return !s.empty() ? s.c_str() : "unknown";
4408  }
4409  };
4410  int j{};
4411  if (gcv_) {
4412  for (int it = 0; it < nrn_nthread; ++it) {
4413  CvodeThreadData& z = gcv_->ctd_[it];
4414  if (j + z.nvoffset_ + z.nvsize_ > is) {
4415  return impl(z.pv_[is - j]);
4416  }
4417  j += z.nvsize_;
4418  }
4419  } else {
4420  int it, i;
4421  lvardtloop(it, i) {
4422  if (j + p[it].lcv_[i].neq_ > is) {
4423  CvodeThreadData& z = p[it].lcv_[i].ctd_[0];
4424  return impl(z.pv_[is - j]);
4425  }
4426  j += p[it].lcv_[i].neq_;
4427  }
4428  }
4429  return "unknown";
4430 }
4431 
4432 const char* NetCvode::sym2name(Symbol* sym) {
4433  if (sym->type == RANGEVAR && sym->u.rng.type > 1 && memb_func[sym->u.rng.type].is_point) {
4434  static char buf[200];
4435  Sprintf(buf, "%s.%s", memb_func[sym->u.rng.type].sym->name, sym->name);
4436  return buf;
4437  } else {
4438  return sym->name;
4439  }
4440 }
4441 
4443  std::vector<char> buf(strlen(name) + 1);
4444  strcpy(buf.data(), name);
4445  char* cp;
4446  for (cp = buf.data(); *cp; ++cp) {
4447  if (*cp == '.') {
4448  *cp = '\0';
4449  ++cp;
4450  break;
4451  }
4452  }
4454  if (!sym) {
4456  }
4457  if (sym && *cp == '\0' && (sym->type == RANGEVAR || strcmp(sym->name, "Vector") == 0)) {
4458  return sym;
4459  } else if (sym && sym->type == TEMPLATE && *cp != '\0') {
4460  sym = hoc_table_lookup(cp, sym->u.ctemplate->symtable);
4461  if (sym) {
4462  return sym;
4463  }
4464  }
4465  hoc_execerror(name, "must be in form rangevar or Template.var");
4466  return nullptr;
4467 }
4468 
4469 void NetCvode::rtol(double x) {
4470  rtol_ = x;
4471 }
4472 void NetCvode::atol(double x) {
4473  atol_ = x;
4474 }
4475 void NetCvode::stiff(int x) {
4476  if ((stiff_ == 0) != (x == 0)) { // need to free if change between 0 and nonzero
4477  if (gcv_) {
4478  gcv_->free_cvodemem();
4479  } else {
4480  int i, j;
4481  lvardtloop(i, j) {
4482  p[i].lcv_[j].free_cvodemem();
4483  }
4484  }
4485  }
4486  stiff_ = x;
4487 }
4488 void NetCvode::maxorder(int x) {
4489  maxorder_ = x;
4490  if (gcv_) {
4491  gcv_->free_cvodemem();
4493  } else {
4494  int i, j;
4495  lvardtloop(i, j) {
4496  p[i].lcv_[j].free_cvodemem();
4497  p[i].lcv_[j].maxorder(maxorder_);
4498  }
4499  }
4500 }
4502  int o = 0;
4503  if (gcv_) {
4504  o = gcv_->order();
4505  } else {
4506  int i, j, i2 = 0;
4507  lvardtloop(i, j) {
4508  if (ii == i2++) {
4509  o = p[i].lcv_[j].order();
4510  }
4511  }
4512  }
4513  return o;
4514 }
4515 void NetCvode::minstep(double x) {
4516  minstep_ = x;
4517  if (gcv_) {
4518  gcv_->minstep(minstep_);
4519  } else {
4520  int i, j;
4521  lvardtloop(i, j) {
4522  p[i].lcv_[j].minstep(minstep_);
4523  }
4524  }
4525 }
4526 void NetCvode::maxstep(double x) {
4527  maxstep_ = x;
4528  if (gcv_) {
4529  gcv_->maxstep(maxstep_);
4530  } else {
4531  int i, j;
4532  lvardtloop(i, j) {
4533  p[i].lcv_[j].maxstep(maxstep_);
4534  }
4535  }
4536 }
4537 void NetCvode::jacobian(int x) {
4538  jacobian_ = x;
4539 }
4541  if (gcv_) {
4542  gcv_->structure_change_ = true;
4543  } else {
4544  int i, j;
4545  lvardtloop(i, j) {
4546  p[i].lcv_[j].structure_change_ = true;
4547  }
4548  }
4549 }
4550 
4552  Section* ssrc,
4553  Object* osrc,
4554  Object* target,
4555  double threshold,
4556  double delay,
4557  double magnitude) {
4558  PreSyn* ps = nullptr;
4560  if (ssrc) {
4561  consist_sec_pd("NetCon", ssrc, dsrc);
4562  }
4563  if (!pst_) {
4564  pst_ = new PreSynTable(1000);
4565  pst_cnt_ = 0;
4566  }
4567  if (!psl_) {
4568  psl_ = new std::vector<PreSyn*>();
4569  }
4570  if (osrc) {
4571  assert(!dsrc);
4572  char buf[256];
4573  if (hoc_table_lookup("x", osrc->ctemplate->symtable)) {
4574  Point_process* pp = ob2pntproc(osrc);
4575  assert(pp && pp->prop);
4576  if (!pnt_receive[pp->prop->_type]) { // only if no NET_RECEIVE block
4577  Sprintf(buf, "%s.x", hoc_object_name(osrc));
4578  psrc = hoc_val_handle(buf);
4579  }
4580  }
4581  } else {
4582  psrc = dsrc;
4583  }
4584  if (psrc) {
4585  auto psti = pst_->find(psrc);
4586  if (psti == pst_->end()) {
4587  ps = new PreSyn(psrc, osrc, ssrc);
4588  psl_->push_back(ps);
4589  (*pst_)[psrc] = ps;
4590  ++pst_cnt_;
4591  } else {
4592  ps = psti->second;
4593  }
4594  if (threshold != -1e9) {
4595  ps->threshold_ = threshold;
4596  }
4597  } else if (osrc) {
4598  Point_process* pnt = ob2pntproc(osrc);
4599  if (pnt->presyn_) {
4600  ps = (PreSyn*) pnt->presyn_;
4601  } else {
4602  ps = new PreSyn(psrc, osrc, ssrc);
4603  if (threshold != -1e9) {
4604  ps->threshold_ = threshold;
4605  }
4606  psl_->push_back(ps);
4607  pnt->presyn_ = ps;
4608  }
4609  } else if (target) { // no source so use the special presyn
4610  if (!unused_presyn) {
4611  unused_presyn = new PreSyn({}, nullptr, nullptr);
4612  psl_->push_back(unused_presyn);
4613  }
4614  ps = unused_presyn;
4615  }
4616  ps_thread_link(ps);
4617  NetCon* d = new NetCon(ps, target);
4618  d->delay_ = delay;
4619  d->weight_[0] = magnitude;
4621  return d;
4622 }
4623 
4625  if (!psl_) {
4626  psl_ = new std::vector<PreSyn*>();
4627  }
4628  psl_->push_back(ps);
4629 }
4630 
4632  if (ps == unused_presyn) {
4633  unused_presyn = nullptr;
4634  }
4635  if (psl_) {
4636  auto it = std::find(psl_->begin(), psl_->end(), ps);
4637  if (it != psl_->end()) {
4638  psl_->erase(it);
4639  }
4640  }
4641  if (ps->hi_th_) {
4642  hoc_l_delete(ps->hi_th_);
4643  ps->hi_th_ = nullptr;
4644  }
4645  if (ps->thvar_) {
4646  --pst_cnt_;
4647  pst_->erase(ps->thvar_);
4648  ps->thvar_ = {};
4649  }
4650  if (gcv_) {
4651  for (int it = 0; it < gcv_->nctd_; ++it) {
4652  PreSynList* psl = gcv_->ctd_[it].psl_th_;
4653  if (psl)
4654  for (size_t j = 0; j < psl->size(); ++j) {
4655  if ((*psl)[j] == ps) {
4656  psl->erase(psl->begin() + j);
4657  return;
4658  }
4659  }
4660  }
4661  } else {
4662  int i, j;
4663  lvardtloop(i, j) {
4664  PreSynList* psl = p[i].lcv_[j].ctd_[0].psl_th_;
4665  if (psl)
4666  for (size_t j = 0; j < psl->size(); ++j) {
4667  if ((*psl)[j] == ps) {
4668  psl->erase(psl->begin() + j);
4669  return;
4670  }
4671  }
4672  }
4673  }
4674 }
4675 
4678 
4680  // pr("savestate_save", 0, net_cvode_instance);
4681  if (this != null_event_) {
4682  pr("savestate_save", 0, net_cvode_instance);
4683  hoc_execerror("DiscreteEvent::savestate_save:", " is not the null_event_");
4684  }
4685  return new DiscreteEvent();
4686 }
4687 
4689  // pr("savestate_restore", tt, nc);
4690  Printf("null_event_ onto queue\n");
4691  nc->null_event(tt);
4692 }
4693 
4695  return new DiscreteEvent();
4696 }
4697 
4699  fprintf(f, "%d\n", DiscreteEventType);
4700 }
4701 
4702 NetCon::NetCon(PreSyn* src, Object* target) {
4704  obj_ = nullptr;
4705  src_ = src;
4706  delay_ = 1.0;
4707  if (src_) {
4708  src_->dil_.push_back(this);
4709  src_->use_min_delay_ = 0;
4710  }
4711  if (target == nullptr) {
4712  target_ = nullptr;
4713  active_ = false;
4714  cnt_ = 1;
4715  weight_ = new double[cnt_];
4716  weight_[0] = 0.0;
4717  return;
4718  }
4719  target_ = ob2pntproc(target);
4720  active_ = true;
4721 #if DISCRETE_EVENT_OBSERVER
4722  ObjObservable::Attach(target, this);
4723 #endif
4724  if (!pnt_receive[target_->prop->_type]) {
4725  hoc_execerror("No NET_RECEIVE in target PointProcess:", hoc_object_name(target));
4726  }
4728  weight_ = nullptr;
4729  if (cnt_) {
4730  weight_ = new double[cnt_];
4731  for (int i = 0; i < cnt_; ++i) {
4732  weight_[i] = 0.0;
4733  }
4734  }
4735 }
4736 
4738  // printf("~NetCon\n");
4740  rmsrc();
4741  if (cnt_) {
4742  delete[] weight_;
4743  }
4744 #if DISCRETE_EVENT_OBSERVER
4745  if (target_) {
4747  }
4748 #endif
4749 }
4750 
4752  if (src_) {
4753  for (size_t i = 0; i < src_->dil_.size(); ++i) {
4754  if (src_->dil_[i] == this) {
4755  src_->dil_.erase(src_->dil_.begin() + i);
4756  if (src_->dil_.size() == 0 && src_->tvec_ == NULL && src_->idvec_ == NULL) {
4757  if (src_->output_index_ == -1) {
4758  delete std::exchange(src_, nullptr);
4759  }
4760  }
4761  break;
4762  }
4763  }
4764  }
4765  src_ = nullptr;
4766 }
4767 
4769  rmsrc();
4770  src_ = p;
4771  if (src_) {
4772  src_->dil_.push_back(this);
4773  src_->use_min_delay_ = 0;
4774  }
4775 }
4776 
4778  // pr("savestate_save", 0, net_cvode_instance);
4779  return new NetConSave(this);
4780 }
4781 
4783  netcon_ = netcon;
4784 }
4786 
4788  // netcon_->pr("savestate_restore", tt, nc);
4789  NrnThread* nt;
4790  if (netcon_ && netcon_->target_) {
4791  nt = PP2NT(netcon_->target_);
4792  // printf(" on thread %d\n", nt->id);
4793  } else {
4794  nt = nrn_threads;
4795  }
4796  nc->event(tt, netcon_, nt);
4797 }
4798 
4800  int index;
4801  char buf[200];
4802  // fscanf(f, "%d\n", &index);
4803  nrn_assert(fgets(buf, 200, f));
4804  sscanf(buf, "%d\n", &index);
4806  assert(nc);
4807  return new NetConSave(nc);
4808 }
4809 
4811  fprintf(f, "%d\n", NetConType);
4812  fprintf(f, "%d\n", netcon_->obj_->index);
4813 }
4814 
4817 
4819  delete std::exchange(wtable_, nullptr);
4820  delete std::exchange(idxtable_, nullptr);
4821 }
4822 
4824  NetCon* nc;
4825  if (!wtable_) {
4826  hoc_Item* q;
4827  Symbol* sym = hoc_lookup("NetCon");
4828  wtable_ = new NetConSaveWeightTable(2 * sym->u.ctemplate->count);
4829  ITERATE(q, sym->u.ctemplate->olist) {
4830  Object* obj = OBJ(q);
4831  nc = (NetCon*) obj->u.this_pointer;
4832  if (nc->weight_) {
4833  (*wtable_)[nc->weight_] = nc;
4834  }
4835  }
4836  }
4837  auto wti = wtable_->find(pd);
4838  if (wti != wtable_->end()) {
4839  nc = wti->second;
4840  assert(nc->weight_ == pd);
4841  return nc;
4842  } else {
4843  return nullptr;
4844  }
4845 }
4846 
4848  NetCon* nc;
4849  if (!idxtable_) {
4850  hoc_Item* q;
4851  Symbol* sym = hoc_lookup("NetCon");
4852  idxtable_ = new NetConSaveIndexTable(2 * sym->u.ctemplate->count);
4853  ITERATE(q, sym->u.ctemplate->olist) {
4854  Object* obj = OBJ(q);
4855  nc = (NetCon*) obj->u.this_pointer;
4856  if (nc->weight_) {
4857  (*idxtable_)[obj->index] = nc;
4858  }
4859  }
4860  }
4861  auto idxti = idxtable_->find(id);
4862  if (idxti != idxtable_->end()) {
4863  nc = idxti->second;
4864  assert(nc->obj_->index == id);
4865  return nc;
4866  } else {
4867  return nullptr;
4868  }
4869 }
4870 
4873 }
4874 
4876  if (!ps) {
4877  return;
4878  }
4879  ps->nt_ = nullptr;
4880  if (!v_structure_change) { // PP2NT etc are correct
4881  if (ps->osrc_) {
4882  ps->nt_ = PP2NT(ob2pntproc(ps->osrc_));
4883  } else if (ps->ssrc_) {
4884  ps->nt_ = ps->ssrc_->prop->dparam[9].get<NrnThread*>();
4885  }
4886  }
4887  if (!ps->nt_) { // premature, reorder_secorder() not called yet
4888  return;
4889  }
4890  if (ps->thvar_) {
4891  int i = ps->nt_->id;
4892  if (!p[i].psl_thr_) {
4893  p[i].psl_thr_ = hoc_l_newlist();
4894  }
4895  ps->hi_th_ = hoc_l_insertvoid(p[i].psl_thr_, ps);
4896  }
4897 }
4898 
4900  int i;
4901  // first, opportunistically create p[]
4903  // iterate over all threshold PreSyn and fill the NrnThread field
4904  for (i = 0; i < nrn_nthread; ++i) {
4905  if (p[i].psl_thr_) {
4906  hoc_l_freelist(&p[i].psl_thr_);
4907  }
4908  }
4909  if (psl_) {
4910  for (PreSyn* ps: *psl_) {
4911  ps_thread_link(ps);
4912  }
4913  }
4914 }
4915 
4917  int i;
4918  if (pcnt_ != n) {
4919  delete[] std::exchange(p, nullptr);
4920  if (n > 0) {
4921  p = new NetCvodeThreadData[n];
4922  }
4923  pcnt_ = n;
4924  }
4925  for (i = 0; i < n; ++i) {
4926  p[i].unreffed_event_cnt_ = 0;
4927  }
4928 }
4929 
4931  : thvar_{std::move(src)} {
4932  // printf("Presyn %x %s\n", (long)this, osrc?hoc_object_name(osrc):"nullptr");
4934  hi_index_ = -1;
4935  hi_th_ = nullptr;
4936  flag_ = false;
4937  valthresh_ = 0;
4938  osrc_ = osrc;
4939  ssrc_ = ssrc;
4940  threshold_ = 10.;
4941  use_min_delay_ = 0;
4942  tvec_ = nullptr;
4943  idvec_ = nullptr;
4944  stmt_ = nullptr;
4945  gid_ = -1;
4946  nt_ = nullptr;
4947  if (thvar_) {
4948  if (osrc) {
4949  nt_ = PP2NT(ob2pntproc(osrc));
4950  } else if (ssrc) {
4951  nt_ = ssrc->prop->dparam[9].get<NrnThread*>();
4952  }
4953  }
4954  if (osrc_ && !thvar_) {
4955  nt_ = PP2NT(ob2pntproc(osrc));
4956  }
4957 #if 1 || USENCS || NRNMPI
4958  output_index_ = -1;
4959 #endif
4960 #if NRNMPI
4961  bgp.multisend_send_ = 0;
4962 #endif
4963 #if NRN_MUSIC
4964  music_port_ = 0;
4965 #endif
4966 #if DISCRETE_EVENT_OBSERVER
4967  if (thvar_) {
4969  } else if (osrc_) {
4971  }
4972 #endif
4973 }
4974 
4977  // printf("~PreSyn %p\n", this);
4978  nrn_cleanup_presyn(this);
4979  delete std::exchange(stmt_, nullptr);
4980 #if DISCRETE_EVENT_OBSERVER
4981  if (tvec_) {
4983  tvec_ = nullptr;
4984  }
4985  if (idvec_) {
4987  idvec_ = nullptr;
4988  }
4989 #endif
4990  if (thvar_ || osrc_) {
4991 #if DISCRETE_EVENT_OBSERVER
4993 #endif
4994  if (!thvar_) {
4995  // even if the point process section was deleted earlier
4997  if (pnt) {
4998  pnt->presyn_ = nullptr;
4999  }
5000  }
5001  }
5002  for (const auto& d: dil_) {
5003  d->src_ = nullptr;
5004  }
5006 }
5007 
5009  // pr("savestate_save", 0, net_cvode_instance);
5010  return new PreSynSave(this);
5011 }
5012 
5014  presyn_ = presyn;
5015 }
5017 
5019  // presyn_->pr("savestate_restore", tt, nc);
5020  nc->event(tt, presyn_, presyn_->nt_);
5021 }
5022 
5024  PreSyn* ps = nullptr;
5025  char buf[200];
5026  int index, tid;
5027  nrn_assert(fgets(buf, 200, f));
5028  nrn_assert(sscanf(buf, "%d %d\n", &index, &tid) == 2);
5030  assert(ps);
5031  ps->nt_ = nrn_threads + tid;
5032  return new PreSynSave(ps);
5033 }
5034 
5036  fprintf(f, "%d\n", PreSynType);
5037  fprintf(f, "%ld %d\n", presyn_->hi_index_, presyn_->nt_ ? presyn_->nt_->id : 0);
5038 }
5039 
5041 
5043  delete std::exchange(idxtable_, nullptr);
5044 }
5045 
5047  if (!idxtable_) {
5049  for (auto&& [index, ps]: enumerate(*net_cvode_instance->psl_)) {
5050  assert(ps->hi_index_ == index);
5051  (*idxtable_)[ps->hi_index_] = ps;
5052  }
5053  }
5054  auto idxti = idxtable_->find(id);
5055  if (idxti != idxtable_->end()) {
5056  PreSyn* ps = idxti->second;
5057  assert(ps->hi_index_ == id);
5058  return ps;
5059  } else {
5060  return nullptr;
5061  }
5062 }
5063 
5065  qthresh_ = nullptr;
5066  if (tvec_) {
5067  tvec_->resize(0);
5068  }
5069  if (idvec_) {
5070  idvec_->resize(0);
5071  }
5072 }
5073 
5074 void PreSyn::record_stmt(const char* stmt) {
5075  delete std::exchange(stmt_, nullptr);
5076  if (strlen(stmt) > 0) {
5077  stmt_ = new HocCommand(stmt);
5078  }
5079 }
5080 
5082  delete std::exchange(stmt_, nullptr);
5083  if (pyact) {
5084  stmt_ = new HocCommand(pyact);
5085  }
5086 }
5087 
5088 void PreSyn::record(IvocVect* vec, IvocVect* idvec, int rec_id) {
5089 #if DISCRETE_EVENT_OBSERVER
5090  if (tvec_) {
5092  }
5093  if (idvec_) {
5095  }
5096 #endif
5097  tvec_ = vec;
5098  idvec_ = idvec;
5099  rec_id_ = rec_id;
5100 #if DISCRETE_EVENT_OBSERVER
5101  if (tvec_) {
5103  }
5104  if (idvec_) {
5106  tvec_->mutconstruct(1);
5107  }
5108 #endif
5109 }
5110 
5111 void PreSyn::record(double tt) {
5112  if (tvec_) {
5113  // need to lock the vector if shared by other PreSyn
5114  // since we get here in the thread that manages the
5115  // threshold detection (or net_event from NET_RECEIVE).
5116  if (idvec_) {
5117  tvec_->lock();
5118  }
5119  tvec_->push_back(tt);
5120  if (idvec_) {
5122  tvec_->unlock();
5123  }
5124  }
5125  if (stmt_) {
5126  if (nrn_nthread > 1) {
5127  nrn_hoc_lock();
5128  }
5129  t = tt;
5130  stmt_->execute(false);
5131  if (nrn_nthread > 1) {
5132  nrn_hoc_unlock();
5133  }
5134  }
5135 }
5136 
5138  // printf("PreSyn::disconnect %s\n", hoc_object_name(((ObjObservable*)o)->object()));
5139  if (tvec_ && tvec_->obj_ == ((ObjObservable*) o)->object()) {
5140  tvec_ = nullptr;
5141  }
5142  if (idvec_ && idvec_->obj_ == ((ObjObservable*) o)->object()) {
5143  idvec_ = nullptr;
5144  }
5145  if (dil_.size() == 0 && tvec_ == nullptr && idvec_ == nullptr && output_index_ == -1) {
5146  delete this;
5147  }
5148 }
5149 
5150 void PreSyn::update(Observable* o) { // should be disconnect
5151  // printf("PreSyn::update\n");
5152  for (const auto& d: dil_) {
5153 #if 0 // osrc_ below is invalid
5154 if (d->obj_) {
5155  printf("%s disconnect from ", hoc_object_name(d->obj_));
5156  printf("source %s\n", osrc_ ? hoc_object_name(osrc_) : secname(ssrc_));
5157 }
5158 #endif
5159  d->src_ = nullptr;
5160  }
5161  if (tvec_) {
5162 #if DISCRETE_EVENT_OBSERVER
5164 #endif
5165  tvec_ = nullptr;
5166  }
5167  if (idvec_) {
5168 #if DISCRETE_EVENT_OBSERVER
5170 #endif
5171  idvec_ = nullptr;
5172  }
5174  thvar_ = {};
5175  osrc_ = nullptr;
5176  delete this;
5177 }
5178 
5179 void ConditionEvent::check(NrnThread* nt, double tt, double teps) {
5180  if (value() > 0.0) {
5181  if (flag_ == false) {
5182  flag_ = true;
5183  valthresh_ = 0.;
5184  send(tt + teps, net_cvode_instance, nt);
5185  }
5186  } else {
5187  flag_ = false;
5188  }
5189 }
5190 
5192  qthresh_ = NULL;
5193  valold_ = 0.0;
5194 }
5196 
5197 void ConditionEvent::condition(Cvode* cv) { // logic for high order threshold detection
5198  // printf("ConditionEvent::condition f=%d t=%20.15g v=%20.15g\n", flag_, t, value());
5199  NrnThread* nt = thread();
5200  if (qthresh_) { // the threshold event has not
5201  // been handled. i.e. the cell must have retreated to
5202  // a time not later than the threshold time.
5203  assert(nt->_t <= qthresh_->t_);
5204  abandon_statistics(cv);
5205  // abandon the event
5208  qthresh_ = nullptr;
5209  valthresh_ = 0.;
5210  flag_ = false;
5211  }
5212 
5213  double val = value();
5214  if (flag_ == false && val >= 0.0) { // above threshold
5215  flag_ = true;
5216  valthresh_ = 0.;
5217  if (cv->t0_ == cv->tn_) { // inited
5218  // means immediate threshold event now.
5219  // no need for qthresh since there is
5220  // no question of abandoning it so instead
5221  // of a qthresh it is a send event.
5223  send(nt->_t, net_cvode_instance, nt);
5224  } else { // crossed somewhere in the told to t interval
5226  // reset the flag_ when the value goes lower than
5227  // valold since value() when qthresh_ handled
5228  // may in fact be below 0.0
5229  valthresh_ = valold_;
5230  double th = -valold_ / (val - valold_);
5231  th = th * nt->_t + (1. - th) * told_;
5232  assert(th >= cv->t0_ && th <= cv->tn_);
5233  qthresh_ = net_cvode_instance->event(th, this, nt);
5234  }
5235  } else if (flag_ == true && valold_ < valthresh_ && val < valthresh_) {
5236  // below threshold
5237  // previous step crossed in negative direction
5238  // and there was not any retreat or initialization
5239  // to give spurious crossing.
5240  flag_ = false;
5241  }
5242  valold_ = val;
5243  told_ = nt->_t;
5244 }
5245 
5247  double val = stet_->value();
5248  return val;
5249 }
5250 
5252 #if 1
5253  // printf("ConditionEvent::condition %s t=%20.15g abandon event at %20.15g\n",
5254  // ssrc_?secname(ssrc_):"", t, qthresh_->t_);
5255  if (nt_t == qthresh_->t_) { // it is not clear whether
5256  // this could happen and if it does it may
5257  // take fastidiousness to
5258  // an extreme
5260  Printf("abandon when t == qthresh_->t_ = %20.15g\n", nt_t);
5261  }
5262  if (cv->t0_ == cv->tn_) { // inited
5263  if (value() > 0.0) { // above threshold
5265  } else {
5267  }
5268  } else {
5269  if (value() > 0.0) { // above threshold
5271  } else {
5273  }
5274  }
5275 #endif
5276 }
5277 
5279  pnt_ = pnt;
5280  c_ = c;
5281  watch_index_ = 0; // For transfer, will be a small positive integer.
5282 }
5283 
5285  Remove();
5286 }
5287 
5288 // A WatchCondition but with different deliver
5290  : WatchCondition(pnt, c) {}
5291 
5292 void WatchCondition::activate(double flag) {
5293  Cvode* cv = NULL;
5294  int id = 0;
5295  qthresh_ = nullptr;
5296  flag_ = (value() >= -hoc_epsilon) ? true : false;
5297  valthresh_ = 0.;
5298  nrflag_ = flag;
5299  if (!pnt_) { // possible for StateTransitionEvent
5300  // but only if 1 thread and no lvardt
5301  assert(nrn_nthread == 1);
5302  assert(net_cvode_instance->localstep() == false);
5303  cv = net_cvode_instance->gcv_;
5304  } else {
5305  cv = (Cvode*) pnt_->nvi_;
5306  }
5307  assert(cv);
5308  id = (cv->nctd_ > 1) ? thread()->id : 0;
5309  auto*& wl = cv->ctd_[id].watch_list_;
5310  if (!wl) {
5311  wl = new HTList(nullptr);
5312  net_cvode_instance->wl_list_[id].push_back(wl);
5313  }
5314  Remove();
5315  wl->Append(this);
5316 }
5317 
5319  fprintf(stderr, "WATCH condition with flag=%g for %s\n", nrflag_, hoc_object_name(pnt_->ob));
5320 }
5321 
5323  fprintf(stderr, "PreSyn threshold for %s\n", osrc_ ? hoc_object_name(osrc_) : secname(ssrc_));
5324 }
5325 
5326 void WatchCondition::send(double tt, NetCvode* nc, NrnThread* nt) {
5327  qthresh_ = nc->event(tt, this, nt);
5329 }
5330 
5331 void WatchCondition::deliver(double tt, NetCvode* ns, NrnThread* nt) {
5332  if (qthresh_) {
5333  qthresh_ = nullptr;
5335  }
5336  Cvode* cv = (Cvode*) pnt_->nvi_;
5337  int type = pnt_->prop->_type;
5338  if (cvode_active_ && cv) {
5339  ns->local_retreat(tt, cv);
5340  cv->set_init_flag();
5341  } else {
5342  PP2t(pnt_) = tt;
5343  }
5345  POINT_RECEIVE(type, pnt_, nullptr, nrflag_);
5346  if (errno) {
5347  if (nrn_errno_check(type)) {
5348  hoc_warning("errno set during WatchCondition deliver to NET_RECEIVE", (char*) 0);
5349  }
5350  }
5351 }
5352 
5354  int dest,
5357  std::unique_ptr<HocCommand> hc) {
5358  STETransition& st = states_[src].add_transition(pnt_);
5359  st.dest_ = dest;
5360  st.var1_ = std::move(var1);
5361  st.var2_ = std::move(var2);
5362  st.hc_ = std::move(hc);
5363  st.ste_ = this;
5364  st.var1_is_time_ = (static_cast<double*>(st.var1_) == &t);
5365 }
5366 
5368  if (var1_is_time_) {
5370  &stec_->thread()->_t};
5371  }
5372  if (stec_->qthresh_) { // is it on the queue
5373  net_cvode_instance->remove_event(stec_->qthresh_, stec_->thread()->id);
5374  stec_->qthresh_ = NULL;
5375  }
5376  stec_->activate(0);
5377 }
5378 
5380  if (stec_->qthresh_) { // is it on the queue
5381  net_cvode_instance->remove_event(stec_->qthresh_, stec_->thread()->id);
5382  stec_->qthresh_ = nullptr;
5383  }
5384  stec_->Remove();
5385 }
5386 
5387 void STECondition::deliver(double tt, NetCvode* ns, NrnThread* nt) {
5388  if (qthresh_) {
5389  qthresh_ = nullptr;
5391  }
5392  if (!pnt_) {
5393  assert(nrn_nthread == 1 && ns->localstep() == false);
5394  if (cvode_active_) {
5395  Cvode* cv = ns->gcv_;
5396  ns->local_retreat(tt, cv);
5397  cv->set_init_flag();
5398  } else {
5399  nt->_t = tt;
5400  }
5401  } else {
5402  Cvode* cv = (Cvode*) pnt_->nvi_;
5403  if (cvode_active_ && cv) {
5404  ns->local_retreat(tt, cv);
5405  cv->set_init_flag();
5406  } else {
5407  PP2t(pnt_) = tt;
5408  }
5409  }
5411  t = tt;
5412  stet_->event();
5413 }
5414 
5416  return PP2NT(pnt_);
5417 }
5418 
5420  if (pnt_) {
5421  return PP2NT(pnt_);
5422  } else {
5423  assert(nrn_nthread == 1);
5424  return nrn_threads;
5425  }
5426 }
5427 
5429  assert(0);
5430  if (qthresh_) {
5431  qthresh_ = nullptr;
5433  }
5434  int type = pnt_->prop->_type;
5436  POINT_RECEIVE(type, pnt_, nullptr, nrflag_);
5437  if (errno) {
5438  if (nrn_errno_check(type)) {
5439  hoc_warning("errno set during WatchCondition deliver to NET_RECEIVE", (char*) 0);
5440  }
5441  }
5442 }
5443 
5445  assert(0);
5446  if (qthresh_) {
5447  qthresh_ = nullptr;
5449  }
5450  int type = pnt_->prop->_type;
5452  stet_->event();
5453  if (errno) {
5454  if (nrn_errno_check(type)) {
5455  hoc_warning("errno set during STECondition pgvts_deliver to NET_RECEIVE", (char*) 0);
5456  }
5457  }
5458 }
5459 
5460 void WatchCondition::pr(const char* s, double tt, NetCvode* ns) {
5461  Printf("%s", s);
5462  Printf(" WatchCondition %s %.15g flag=%g\n", hoc_object_name(pnt_->ob), tt, nrflag_);
5463 }
5464 
5465 static Cvode* eval_cv;
5466 static void* eval_cond(NrnThread* nt) {
5468  return 0;
5469 }
5471  if (!nt) {
5472  if (nrn_nthread > 1 && nctd_ > 1) {
5473  eval_cv = this;
5475  return;
5476  }
5477  nt = nrn_threads;
5478  }
5479  CvodeThreadData& z = CTD(nt->id);
5480  if (z.psl_th_) {
5481  // originally was a reverse iteration, but I thing that was
5482  // just to avoid continually calling count() on the List.
5483  for (auto ps: *(z.psl_th_)) {
5484  ps->condition(this);
5485  }
5486  }
5487  if (z.watch_list_) {
5488  for (HTList* item = z.watch_list_->First(); item != z.watch_list_->End();
5489  item = item->Next()) {
5490  ((WatchCondition*) item)->condition(this);
5491  }
5492  }
5493 }
5494 
5495 static void* chk_deliv(NrnThread* nt) {
5496  eval_cv->check_deliver(nt);
5497  return 0;
5498 }
5500  if (!nt) {
5501  if (nrn_nthread > 1) {
5502  eval_cv = this;
5504  return;
5505  }
5506  nt = nrn_threads;
5507  }
5508  CvodeThreadData& z = CTD(nt->id);
5509  if (z.psl_th_) {
5510  // originally was a reverse iteration, but I thing that was
5511  // just to avoid continually calling count() on the List.
5512  for (auto ps: *(z.psl_th_)) {
5513  ps->check(nt, nt->_t);
5514  }
5515  }
5516  if (z.watch_list_) {
5517  for (HTList* item = z.watch_list_->First(); item != z.watch_list_->End();
5518  item = item->Next()) {
5519  ((WatchCondition*) item)->check(nt, nt->_t);
5520  }
5521  }
5522 }
5523 
5525  NrnThread& nt) {
5526  nrn_ba(cache_token, nt, BEFORE_STEP);
5527  for (auto& pr: *fixed_record_) {
5528  if (pr->ith_ == nt.id) {
5529  pr->continuous(nt._t);
5530  }
5531  }
5532 }
5533 
5535  for (auto& pr: *fixed_play_) {
5536  if (pr->ith_ == nt->id) {
5537  pr->continuous(nt->_t);
5538  }
5539  }
5540 }
5541 
5542 // nrnthread_get_trajectory_requests helper for buffered trajectories
5543 // also for per time step return (no Vector and varrays is NULL)
5544 // if bsize > 0 then CoreNEURON will write that number of values to the vectors.
5545 // If CoreNEURON has a start time > 0, (current value of t),
5546 // the amount to augment the vector depends
5547 // on the vector's current size in the current NEURON context and the
5548 // varray[i_trajec] double* will be determined by that current size.
5549 // However determination of whether to do per time step return or buffering
5550 // can be specified on the NEURON side.
5552  int bsize,
5553  IvocVect* v,
5555  int i_pr,
5556  PlayRecord* pr,
5557  void** vpr,
5558  int i_trajec,
5559  int* types,
5560  int* indices,
5561  double** pvars,
5562  double** varrays) {
5563  int err = 0; // success
5564  if (bsize > 0) {
5565  int cur_size = v->size();
5566  if (v->buffer_size() < bsize + cur_size) {
5567  v->buffer_size(bsize + cur_size);
5568  }
5569  // nrnthread_trajectory_values will resize to correct size.
5570  v->resize(bsize + cur_size);
5571  varrays[i_trajec] = vector_vec(v) + cur_size; // begin filling here
5572  } else {
5573  // Danger, think this through better
5574  pvars[i_trajec] = static_cast<double*>(pd);
5575  }
5576  vpr[i_pr] = pr;
5577  if (static_cast<double const*>(pd) == &nt._t) {
5578  types[i_trajec] = 0;
5579  indices[i_trajec] = 0;
5580  } else {
5581  err = nrn_dblpntr2nrncore(pd, nt, types[i_trajec], indices[i_trajec]);
5582  if (err) {
5583  Fprintf(stderr,
5584  "Pointer %p of PlayRecord type %d ignored because not a Range Variable",
5585  fmt::ptr(static_cast<double*>(pd)),
5586  pr->type());
5587  }
5588  }
5589  return err;
5590 }
5591 
5592 // Transfer of trajectories from CoreNEURON to NEURON.
5593 // bsize == 0: Most flexible interactivity, transfer on a per time step basis.
5594 // bsize > 0: Greatest performance, fill the entire trajectory arrays on
5595 // the CoreNEURON side and transferring at the end of a CoreNEURON run.
5596 // Here, we ensure the arrays have at least bsize size (i.e. at least tstop/dt)
5597 // beyond their current size and CoreNEURON will start filling from the
5598 // current fill time, h.t, location of the arrays. I.e. starting at CoreNEURON's
5599 // start time. (Multiple calls to psolve append to these arrays.)
5600 // n_pr refers to the number of PlayRecord instances in the vpr array.
5601 // n_trajec refers to the number of trajectories to be recorded on the
5602 // CoreNEURON side and is the size of the types, indices, and varrays.
5603 // n_pr is different from n_trajec when one of the GLineRecord instances has
5604 // a gl_->name that is an expression that contains several range variables.
5605 // Per time step transfer now uses pvars so CoreNEURON scatters values
5606 // to pvars instead of collecting in double array.
5608  int& bsize,
5609  int& n_pr,
5610  void**& vpr,
5611  int& n_trajec,
5612  int*& types,
5613  int*& indices,
5614  double**& pvars,
5615  double**& varrays) {
5616  if (bsize > 0) { // but would NEURON rather use per time step mode
5618  bsize = 0;
5619  }
5620  }
5621  n_pr = 0;
5622  n_trajec = 0;
5623  types = NULL;
5624  indices = NULL;
5625  vpr = NULL;
5626  varrays = NULL;
5627  pvars = NULL;
5628  if (tid < nrn_nthread) {
5629  NrnThread& nt = nrn_threads[tid];
5630  auto* fr = net_cvode_instance->fixed_record_;
5631  // allocate
5632  for (auto& pr: *fr) {
5633  if (pr->ith_ == tid) {
5634  if (pr->type() == TvecRecordType || pr->type() == YvecRecordType) {
5635  n_pr++;
5636  n_trajec++;
5637 #if HAVE_IV
5638  } else if (pr->type() == GLineRecordType) {
5639  n_pr++;
5640  if (!pr->pd_) {
5641  GLineRecord* glr = (GLineRecord*) pr;
5642  assert(glr->gl_->expr_);
5643  glr->fill_pd();
5644  if (pr->pd_) {
5645  n_trajec++;
5646  } else {
5647  n_trajec += glr->pd_and_vec_.size();
5648  }
5649  } else {
5650  n_trajec++;
5651  }
5652  } else if (pr->type() == GVectorRecordType) {
5653  GVectorRecord* gvr = (GVectorRecord*) pr;
5654  if (gvr->count()) {
5655  bsize = 0;
5656  n_pr++;
5657  for (int j = 0; j < gvr->count(); ++j) {
5658  if (gvr->pdata(j)) {
5659  n_trajec++;
5660  }
5661  }
5662  }
5663 #endif // HAVE_IV
5664  }
5665  }
5666  }
5667  if (n_pr == 0) {
5668  return;
5669  }
5670  vpr = new void*[n_pr];
5671  types = new int[n_trajec];
5672  indices = new int[n_trajec];
5673  if (bsize > 0) {
5674  varrays = new double*[n_trajec];
5675  } else {
5676  pvars = new double*[n_trajec];
5677  } // if both varrays and pvars are NULL then CoreNEURON will return values every time step
5678  // everything allocated, start over and fill
5679  n_pr = 0;
5680  n_trajec = 0;
5681  for (auto& pr: *fr) {
5682  int err = 0;
5683  if (pr->ith_ == tid) {
5684  if (1) { // buffered or per time step value return
5685  if (pr->type() == TvecRecordType) {
5686  IvocVect* v = ((TvecRecord*) pr)->t_;
5687  err = trajec_buffered(nt,
5688  bsize,
5689  v,
5692  n_pr++,
5693  pr,
5694  vpr,
5695  n_trajec++,
5696  types,
5697  indices,
5698  pvars,
5699  varrays);
5700  if (err) {
5701  n_pr--;
5702  n_trajec--;
5703  }
5704  } else if (pr->type() == YvecRecordType) {
5705  IvocVect* v = ((YvecRecord*) pr)->y_;
5706  err = trajec_buffered(nt,
5707  bsize,
5708  v,
5709  pr->pd_,
5710  n_pr++,
5711  pr,
5712  vpr,
5713  n_trajec++,
5714  types,
5715  indices,
5716  pvars,
5717  varrays);
5718  if (err) {
5719  n_pr--;
5720  n_trajec--;
5721  }
5722 #if HAVE_IV
5723  } else if (pr->type() == GLineRecordType) {
5724  GLineRecord* glr = (GLineRecord*) pr;
5725  if (pr->pd_) { // glr->gl_->name is an expression resolved to a double*
5726  if (bsize && !glr->v_) {
5727  glr->v_ = new IvocVect(bsize);
5728  }
5729  IvocVect* v = glr->v_;
5730  err = trajec_buffered(nt,
5731  bsize,
5732  v,
5733  pr->pd_,
5734  n_pr++,
5735  pr,
5736  vpr,
5737  n_trajec++,
5738  types,
5739  indices,
5740  pvars,
5741  varrays);
5742  if (err) {
5743  n_pr--;
5744  n_trajec--;
5745  }
5746  } else { // glr->gl_->name expression involves several range variables
5747  int n = n_trajec;
5748  for (auto&& [pd, v]: glr->pd_and_vec_) {
5749  assert(pd);
5750  if (bsize && v == nullptr) {
5751  v = new IvocVect(bsize);
5752  }
5753  // TODO avoid the conversion?
5754  err = trajec_buffered(nt,
5755  bsize,
5756  v,
5758  n_pr,
5759  pr,
5760  vpr,
5761  n_trajec++,
5762  types,
5763  indices,
5764  pvars,
5765  varrays);
5766  if (err) {
5767  break;
5768  }
5769  }
5770  n_pr++;
5771  if (err) { // single error removes entire GLineRecord
5772  n_pr--;
5773  n_trajec = n;
5774  }
5775  }
5776  } else if (pr->type() == GVectorRecordType) {
5777  GVectorRecord* gvr = (GVectorRecord*) pr;
5778  if (gvr->count()) {
5779  int n = n_trajec;
5780  for (int j = 0; j < gvr->count(); ++j) {
5781  if (gvr->pdata(j)) {
5782  err = trajec_buffered(nt,
5783  bsize,
5784  NULL,
5785  gvr->pdata(j),
5786  n_pr,
5787  pr,
5788  vpr,
5789  n_trajec++,
5790  types,
5791  indices,
5792  pvars,
5793  varrays);
5794  if (err) {
5795  break;
5796  }
5797  }
5798  }
5799  n_pr++;
5800  if (err) { // single error removes entire GVectorRecord
5801  n_pr--;
5802  n_trajec = n;
5803  }
5804  }
5805 #endif // HAVE_IV
5806  }
5807  }
5808  }
5809  }
5810  if (n_trajec == 0) { // if errors reduced to 0, clean up
5811  assert(n_pr == 0);
5812  delete[] std::exchange(types, nullptr);
5813  delete[] std::exchange(indices, nullptr);
5814  delete[] std::exchange(vpr, nullptr);
5815  delete[] std::exchange(varrays, nullptr);
5816  delete[] std::exchange(pvars, nullptr);
5817  }
5818 #if 0
5819  printf("nrnthread_get_trajectory_requests tid=%d bsize=%d n_pr=%d n_trajec=%d\n", tid, bsize, n_pr, n_trajec);
5820  int i_trajec = 0;
5821  double* pd;
5822  for (int i=0; i < n_pr; ++i) {
5823  PlayRecord* pr = (PlayRecord*)vpr[i];
5824  pd = pr->pd_;
5825  if (pd) {
5826  printf(" %d %d prtype=%d %p type=%d index=%d\n", i, i_trajec, pr->type(), pd, types[i_trajec], indices[i_trajec]);
5827  i_trajec++;
5828  }else{
5829  assert(pr->type() == GLineRecordType);
5830  GLineRecord* glr = (GLineRecord*)pr;
5831  GLineRecordEData& ed = glr->pd_and_vec_;
5832  i_trajec += ed.size();
5833  }
5834  }
5835 #endif
5836  }
5837 }
5838 
5839 void nrnthread_trajectory_values(int tid, int n_pr, void** vpr, double tt) { //, int n_trajec,
5840  // double* values) {
5841  if (tid < 0) {
5842  return;
5843  }
5844  if (tid < nrn_nthread) {
5845 #if HAVE_IV
5846  ObjectContext obc(NULL); // in case GLineRecord with expression.
5847 #endif // HAVE_IV
5848  nrn_threads[tid]._t = tt;
5849  if (tid == 0) {
5850  t = tt;
5851  }
5852  int flush = 0;
5853  for (int i = 0; i < n_pr; ++i) {
5854  PlayRecord* pr = (PlayRecord*) vpr[i];
5855  pr->continuous(tt);
5856 #if HAVE_IV
5857  if (pr->type() == GVectorRecordType) { // see the movie
5858  flush = 1;
5859  }
5860  }
5861  if (flush) {
5862  Oc oc;
5863  oc.run("screen_update()\n");
5864  }
5865 #else
5866  }
5867 #endif // HAVE_IV
5868  }
5869 }
5870 
5871 // CoreNEURON received pointers to the data of each Vector (resized so that
5872 // the data is long enough to hold the trajectory (tstop/dt). On calling
5873 // this, each Vector needs only be resized to the actual vecsz.
5874 // Note that we have not bothered to fill the variables on the hoc side with
5875 // the last values, though we did fill t.
5876 // In the case where glr->pd_ = NULL, the GraphLine is an expression that
5877 // cannot be interpreted as a pointer and the expression involves one or
5878 // more range variables that individually were resolved into pointers. In
5879 // this case in glr->plot, the relevant Vector elements are copied into those
5880 // pointers and the expression is then evaluated and plotted.
5881 void nrnthread_trajectory_return(int tid, int n_pr, int bsize, int vecsz, void** vpr, double tt) {
5882  if (tid < 0) {
5883  return;
5884  }
5885  if (tid < nrn_nthread) {
5886  nrn_threads[tid]._t = tt;
5887  if (tid == 0) {
5888  t = tt;
5889  }
5890  for (int i = 0; i < n_pr; ++i) {
5891  PlayRecord* pr = (PlayRecord*) vpr[i];
5892  IvocVect* v = NULL;
5893  if (pr->type() == TvecRecordType) {
5894  v = ((TvecRecord*) pr)->t_;
5895  // reserved bsize but only used vecsz. (need non-zeroing resize).
5896  v->resize(v->size() - (bsize - vecsz)); // do not zero
5897  } else if (pr->type() == YvecRecordType) {
5898  v = ((YvecRecord*) pr)->y_;
5899  v->resize(v->size() - (bsize - vecsz)); // do not zero
5900 #if HAVE_IV
5901  } else if (pr->type() == GLineRecordType) {
5902  GLineRecord* glr = (GLineRecord*) pr;
5903  glr->plot(vecsz, tt);
5904 #endif // HAVE_IV
5905  } else {
5906  assert(0);
5907  }
5908  }
5909  }
5910 }
5911 
5912 // factored this out from deliver_net_events so we can
5913 // stay in the cache
5914 void NetCvode::check_thresh(NrnThread* nt) { // for default method
5915  nrn::Instrumentor::phase p_check_threshold("check-threshold");
5916  hoc_Item* pth = p[nt->id].psl_thr_;
5917 
5918  if (pth) { /* only look at ones with a threshold */
5919  hoc_Item* q1;
5920  ITERATE(q1, pth) {
5921  PreSyn* ps = (PreSyn*) VOIDITM(q1);
5922  // only the ones for this thread
5923  if (ps->nt_ == nt) {
5924  if (ps->thvar_) {
5925  ps->check(nt, nt->_t, 1e-10);
5926  }
5927  }
5928  }
5929  }
5930 
5931  for (auto* wl: wl_list_[nt->id]) {
5932  for (HTList* item = wl->First(); item != wl->End(); item = item->Next()) {
5933  WatchCondition* wc = static_cast<WatchCondition*>(item);
5934  wc->check(nt, nt->_t);
5935  }
5936  }
5937 }
5938 
5939 /** In nrncore_callbacks.cpp **/
5941  void (*cb)(int, int, int, int, int));
5942 extern "C" {
5943 void nrn2core_transfer_WATCH(void (*cb)(int, int, int, int, int));
5944 }
5945 void nrn2core_transfer_WATCH(void (*cb)(int, int, int, int, int)) {
5946  // should be revisited for possible simplification since wl_list now
5947  // segregated by threads.
5948  for (auto& htlists_of_thread: net_cvode_instance->wl_list_) {
5949  for (HTList* wl: htlists_of_thread) {
5950  for (HTList* item = wl->First(); item != wl->End(); item = item->Next()) {
5951  WatchCondition* wc = static_cast<WatchCondition*>(item);
5953  }
5954  }
5955  }
5956 }
5957 
5958 void NetCvode::deliver_net_events(NrnThread* nt) { // for default method
5959  TQItem* q;
5960  double tm, tsav;
5961 #if NRNMPI
5962  if (use_multisend_) {
5964  }
5965 #endif
5966  int tid = nt->id;
5967  tsav = nt->_t;
5968  tm = nt->_t + 0.5 * nt->_dt;
5969 tryagain:
5970  // one of the events on the main queue may be a NetParEvent
5971  // which due to dt round off error can result in an event
5972  // placed on the bin queue to be delivered now, which
5973  // can put 0 delay events on to the main queue. So loop til
5974  // no events. The alternative would be to deliver an idt=0 event
5975  // immediately but that would very much change the sequence
5976  // with respect to what is being done here and it is unclear
5977  // how to fix the value of t there. This can be a do while loop
5978  // but I do not want to affect the case of not using a bin queue.
5979 
5980  if (nrn_use_bin_queue_) {
5981  // it was noticed on binq + compressed spike exchange +
5982  // threads that a transferred event may be languishing in
5983  // the interthread event buffer. Perhaps this is better done
5984  // as a multithread job at the end of nrn_spike_exchange
5985  // instead of every time step --- but here we are
5986  // already in a multithread job, so what is the overhead of
5987  // starting such a small one in nrn_spike_exchange.
5988 #if NRNMPI
5989  extern bool nrn_use_compress_;
5990  if (nrn_use_compress_ && nrn_nthread > 1) {
5991  p[tid].enqueue(this, nt);
5992  }
5993 #endif
5994  while ((q = p[tid].tqe_->dequeue_bin()) != 0) {
5995  DiscreteEvent* db = (DiscreteEvent*) q->data_;
5996 #if PRINT_EVENT
5997  if (print_event_) {
5998  db->pr("binq deliver", nt_t, this);
5999  }
6000 #endif
6001  p[tid].tqe_->release(q);
6002  db->deliver(nt->_t, this, nt);
6003  }
6004  // assert(int(tm/nt->_dt)%1000 == p[tid].tqe_->nshift_);
6005  }
6006 
6007  deliver_events(tm, nt);
6008 
6009  if (nrn_use_bin_queue_) {
6010  if (p[tid].tqe_->top()) {
6011  goto tryagain;
6012  }
6013  p[tid].tqe_->shift_bin(tm);
6014  }
6015  nt->_t = tsav;
6016 }
6017 
6018 void NetCvode::playrec_add(PlayRecord* pr) { // called by PlayRecord constructor
6019  // printf("NetCvode::playrec_add %p\n", pr);
6020  playrec_change_cnt_ = 0;
6021  prl_->push_back(pr);
6022 }
6023 
6024 void NetCvode::playrec_remove(PlayRecord* pr) { // called by PlayRecord destructor
6025  // printf("NetCvode::playrec_remove %p\n", pr);
6026  playrec_change_cnt_ = 0;
6027  erase_first(*prl_, pr);
6030 }
6031 
6033  for (const auto&& [i, e]: enumerate(*prl_)) {
6034  if (e == pr) {
6035  return i;
6036  }
6037  }
6038  return -1;
6039 }
6040 
6042  return prl_->at(i);
6043 }
6044 
6046  for (auto& item: *prl_) {
6047  if (item->uses(v)) {
6048  return item;
6049  }
6050  }
6051  return nullptr;
6052 }
6053 
6055  : pd_{std::move(pd)} {
6056  // printf("PlayRecord::PlayRecord %p\n", this);
6057  cvode_ = nullptr;
6058  ith_ = 0;
6059  if (pd_) {
6061  }
6062  ppobj_ = ppobj;
6063  if (ppobj_) {
6065  }
6067 }
6068 
6070  // printf("PlayRecord::~PlayRecord %p\n", this);
6072  if (ppobj_) {
6074  }
6076 }
6077 
6079  // printf("PlayRecord::disconnect %ls\n", (long)this);
6080  delete this;
6081 }
6082 
6084  cvode_ = cv;
6085  if (cv) {
6086  cv->record_add(this);
6087  }
6088  net_cvode_instance->fixed_record_->push_back(this);
6089 }
6090 
6092  cvode_ = cv;
6093  if (cv) {
6094  cv->play_add(this);
6095  }
6096  net_cvode_instance->fixed_play_->push_back(this);
6097 }
6098 
6100  Printf("PlayRecord\n");
6101 }
6102 
6104  : PlayRecord(sec->pnode[0]->v_handle(), ppobj) {
6105  // printf("TvecRecord\n");
6106  t_ = t;
6107  ObjObservable::Attach(t_->obj_, this);
6108 }
6109 
6111  // printf("~TvecRecord\n");
6112  ObjObservable::Detach(t_->obj_, this);
6113 }
6114 
6116  // printf("%s TvecRecord disconnect\n", hoc_object_name(t_->obj_));
6117  delete this;
6118 }
6119 
6121  record_add(cv);
6122 }
6123 
6125  t_->resize(0);
6126 }
6127 
6128 void TvecRecord::continuous(double tt) {
6129  t_->push_back(tt);
6130 }
6131 
6133  : PlayRecord(std::move(dh), ppobj) {
6134  // printf("YvecRecord\n");
6135  y_ = y;
6136  ObjObservable::Attach(y_->obj_, this);
6137 }
6138 
6140  // printf("~YvecRecord\n");
6141  ObjObservable::Detach(y_->obj_, this);
6142 }
6143 
6145  // printf("%s YvecRecord disconnect\n", hoc_object_name(y_->obj_));
6146  delete this;
6147 }
6148 
6150  record_add(cv);
6151 }
6152 
6154  if (!pd_) {
6155  hoc_execerr_ext("%s recording from invalid data reference.", hoc_object_name(y_->obj_));
6156  }
6157  y_->resize(0);
6158 }
6159 
6160 void YvecRecord::continuous(double tt) {
6161  y_->push_back(*pd_);
6162 }
6163 
6165  IvocVect* y,
6166  IvocVect* t,
6167  Object* ppobj)
6168  : PlayRecord(std::move(dh), ppobj) {
6169  // printf("VecRecordDiscrete\n");
6170  y_ = y;
6171  t_ = t;
6172  ObjObservable::Attach(y_->obj_, this);
6173  ObjObservable::Attach(t_->obj_, this);
6174  e_ = new PlayRecordEvent();
6175  e_->plr_ = this;
6176 }
6177 
6179  // printf("~VecRecordDiscrete\n");
6180  ObjObservable::Detach(y_->obj_, this);
6181  ObjObservable::Detach(t_->obj_, this);
6182  delete e_;
6183 }
6184 
6186  return new VecRecordDiscreteSave(this);
6187 }
6188 
6190  : PlayRecordSave(prl) {
6191  cursize_ = ((VecRecordDiscrete*) pr_)->y_->size();
6192 }
6195  check();
6197  vrd->y_->resize(cursize_);
6198  assert(size_t(cursize_) <= vrd->t_->size());
6199 }
6201  fprintf(f, "%d\n", cursize_);
6202 }
6204  char buf[100];
6205  nrn_assert(fgets(buf, 100, f));
6206  nrn_assert(sscanf(buf, "%d\n", &cursize_) == 1);
6207 }
6208 
6210  // printf("%s VecRecordDiscrete disconnect\n", hoc_object_name(y_->obj_));
6211  delete this;
6212 }
6213 
6215  record_add(cv);
6216 }
6217 
6219  y_->resize(0);
6220  if (t_->size() > 0) {
6222  }
6223 }
6224 
6226  record_init_items_->push_back(q);
6227 }
6228 
6229 void VecRecordDiscrete::deliver(double tt, NetCvode* nc) {
6230  y_->push_back(*pd_);
6231  assert(MyMath::eq(t_->elem(y_->size() - 1), tt, 1e-8));
6232  if (y_->size() < t_->size()) {
6233  e_->send(t_->elem(y_->size()), nc, nrn_threads);
6234  }
6235 }
6236 
6238  IvocVect* y,
6239  double dt,
6240  Object* ppobj)
6241  : PlayRecord(std::move(pd), ppobj) {
6242  // printf("VecRecordDt\n");
6243  y_ = y;
6244  dt_ = dt;
6245  ObjObservable::Attach(y_->obj_, this);
6246  e_ = new PlayRecordEvent();
6247  e_->plr_ = this;
6248 }
6249 
6251  // printf("~VecRecordDt\n");
6252  ObjObservable::Detach(y_->obj_, this);
6253  delete e_;
6254 }
6255 
6257  return new VecRecordDtSave(this);
6258 }
6259 
6261  : PlayRecordSave(prl) {}
6264  check();
6265 }
6266 
6268  // printf("%s VecRecordDt disconnect\n", hoc_object_name(y_->obj_));
6269  delete this;
6270 }
6271 
6273  record_add(cv);
6274 }
6275 
6277  y_->resize(0);
6279 }
6280 
6282  record_init_items_->push_back(q);
6283 }
6284 
6285 void VecRecordDt::deliver(double tt, NetCvode* nc) {
6286  auto* const ptr = static_cast<double*>(pd_);
6287  if (ptr == &t) {
6288  y_->push_back(tt);
6289  } else {
6290  y_->push_back(*ptr);
6291  }
6292  e_->send(tt + dt_, nc, nrn_threads);
6293 }
6294 
6296  auto const pd = hoc_hgetarg<double>(1);
6297  consist_sec_pd("Cvode.record", chk_access(), pd);
6298  IvocVect* y = vector_arg(2);
6299  IvocVect* t = vector_arg(3);
6300  delete playrec_uses(y);
6301  bool discrete = ((ifarg(4) && (int) chkarg(4, 0, 1) == 1) ? true : false);
6302  if (discrete) {
6303  new VecRecordDiscrete(pd, y, t);
6304  } else {
6305  delete playrec_uses(t);
6306  new TvecRecord(chk_access(), t);
6307  new YvecRecord(pd, y);
6308  }
6309 }
6310 
6312  while (auto* const pr = playrec_uses(vector_arg(1))) {
6313  delete pr;
6314  }
6315 }
6316 
6318  long i, j;
6319  fixed_record_->clear();
6320  fixed_play_->clear();
6321  if (gcv_) {
6322  gcv_->delete_prl();
6323  } else {
6324  lvardtloop(i, j) {
6325  p[i].lcv_[j].delete_prl();
6326  }
6327  }
6328  std::vector<PlayRecord*> to_delete{};
6329  for (auto& pr: *prl_) {
6330  if (!pr->pd_) {
6331  // Presumably the recorded value was invalidated elsewhere, e.g. it
6332  // was a voltage of a deleted node, or a range variable of a deleted
6333  // mechanism instance
6334  to_delete.push_back(pr);
6335  continue;
6336  }
6337  bool b = false;
6338  if (single_) {
6339  pr->install(gcv_);
6340  b = true;
6341  } else {
6342  if (pr->ppobj_ && ob2pntproc(pr->ppobj_)->nvi_) {
6343  pr->install((Cvode*) ob2pntproc(pr->ppobj_)->nvi_);
6344  b = true;
6345  } else {
6346  lvardtloop(i, j) {
6347  Cvode& cv = p[i].lcv_[j];
6348  if (cv.is_owner(pr->pd_)) {
6349  pr->install(&cv);
6350  b = true;
6351  break;
6352  }
6353  }
6354  }
6355  }
6356  if (b == false) {
6357  hoc_execerror("We were unable to associate a PlayRecord item with a RANGE variable",
6358  nullptr);
6359  }
6360  // and need to know the thread owners
6361  if (pr->ppobj_) {
6362  i = PP2NT(ob2pntproc(pr->ppobj_))->id;
6363  } else {
6364  i = owned_by_thread(pr->pd_);
6365  }
6366  if (i < 0) {
6367  hoc_execerror("We were unable to associate a PlayRecord item with a thread", nullptr);
6368  }
6369  pr->ith_ = i;
6370  }
6371  for (auto* pr: to_delete) {
6372  // Destructor should de-register things
6373  delete pr;
6374  }
6376 }
6377 
6378 // is a pointer to range variable in this cell
6380  for (int it = 0; it < nrn_nthread; ++it) {
6381  CvodeThreadData& z = CTD(it);
6382  NrnThread* nt_ = nrn_threads + it;
6383  // ugly start but include root in single for loop
6384  for (int i = -1; i < z.vnode_end_index_; ++i) {
6385  int in = (i == -1) ? z.rootnode_begin_index_ : i;
6386  if (i == -1) {
6387  i = z.vnode_begin_index_ - 1; // ready for ++i on next iteration
6388  }
6389  Node* nd = nt_->_v_node[in];
6390  if (handle == nd->v_handle()) {
6391  return true;
6392  }
6393  auto* pd = static_cast<double const*>(handle);
6394  Prop* p;
6395  for (p = nd->prop; p; p = p->next) {
6396  if (p->owns(handle)) {
6397  return true;
6398  }
6399  }
6400  if (nd->extnode) {
6401  if (pd >= nd->extnode->v && pd < (nd->extnode->v + nlayer)) {
6402  return true;
6403  }
6404  }
6405  // will need to check the linear mechanisms when there is a cvode
6406  // specific list of them and IDA is allowed for local step method.
6407  }
6408  if (nth_) {
6409  break;
6410  } // lvardt
6411  }
6412  return false;
6413 }
6414 
6416  if (nrn_nthread == 1) {
6417  return 0;
6418  }
6419  int in, it;
6420  for (it = 0; it < nrn_nthread; ++it) {
6421  NrnThread& nt = nrn_threads[it];
6422  int i1 = 0;
6423  int i3 = nt.end;
6424  for (in = i1; in < i3; ++in) {
6425  Node* nd = nt._v_node[in];
6426  if (handle == nd->v_handle()) {
6427  return it;
6428  }
6429  for (Prop* p = nd->prop; p; p = p->next) {
6430  if (p->owns(handle)) {
6431  return it;
6432  }
6433  }
6434  if (nd->extnode) {
6435  auto* pd = static_cast<double const*>(handle);
6436  if (pd >= nd->extnode->v && pd < (nd->extnode->v + nlayer)) {
6437  return it;
6438  }
6439  }
6440  // will need to check the line mechanisms when there is a cvode
6441  // specific list of them and IDA is allowed for local step method.
6442  }
6443  }
6444  return -1;
6445 }
6446 
6447 void NetCvode::consist_sec_pd(const char* msg,
6448  Section* sec,
6450  int in;
6451  Node* nd;
6452  for (in = -1; in < sec->nnode; ++in) {
6453  if (in == -1) {
6454  nd = sec->parentnode; // in case &v(0)
6455  if (!nd) {
6456  continue;
6457  }
6458  } else {
6459  nd = sec->pnode[in];
6460  }
6461  if (nd->v_handle() == handle) {
6462  return;
6463  }
6464  Prop* p;
6465  auto* const pd = static_cast<double const*>(handle);
6466  for (p = nd->prop; p; p = p->next) {
6467  if (p->owns(handle)) {
6468  return;
6469  }
6470  }
6471  if (nd->extnode) {
6472  if (pd >= nd->extnode->v && pd < (nd->extnode->v + nlayer)) {
6473  return;
6474  }
6475  }
6476  // will need to check the linear mechanisms when there is a cvode
6477  // specific list of them and IDA is allowed for local step method.
6478  }
6479  hoc_execerror(msg,
6480  " pointer not associated with currently accessed section\n\
6481 Use section ... (&var(x)...) intead of ...(&section.var(x)...)\n");
6482 }
6483 
6485  if (hoc_is_double_arg(1)) {
6486  int on = (int) chkarg(1, 0, 2);
6487  int i, j;
6488  if (on == 2) {
6489  maxstate_analyse();
6490  } else {
6491  if (gcv_) {
6492  gcv_->activate_maxstate(on ? true : false);
6493  } else {
6494  lvardtloop(i, j) {
6495  p[i].lcv_[j].activate_maxstate(on ? true : false);
6496  }
6497  }
6498  }
6499  return 0.;
6500  } else if (hoc_is_str_arg(1)) {
6501  Symbol* sym = name2sym(gargstr(1));
6502  double dummy;
6503  double* pamax = &dummy;
6504  if (ifarg(2)) {
6505  pamax = hoc_pgetarg(2);
6506  }
6507  return maxstate_analyse(sym, pamax);
6508  } else {
6509  int i, j, n;
6510  Vect* v = vector_arg(1);
6511  if (!cvode_active_) {
6512  v->resize(0);
6513  return 0.;
6514  }
6515  double* vp;
6516  n = 0;
6517  if (gcv_) {
6518  n += gcv_->neq_;
6519  } else {
6520  lvardtloop(i, j) {
6521  n += p[i].lcv_[j].neq_;
6522  }
6523  }
6524  v->resize(n);
6525  vp = vector_vec(v);
6526  int getacor = 0;
6527  if (ifarg(2)) {
6528  getacor = (int) chkarg(2, 0, 1);
6529  }
6530  j = 0;
6531  if (gcv_) {
6532  if (gcv_->maxstate_) {
6533  if (getacor) {
6534  gcv_->maxacor(vp);
6535  } else {
6536  gcv_->maxstate(vp);
6537  }
6538  }
6539  } else {
6540  lvardtloop(i, j) {
6541  Cvode& cv = p[i].lcv_[j];
6542  if (cv.maxstate_) {
6543  if (getacor) {
6544  cv.maxacor(vp + j);
6545  } else {
6546  cv.maxstate(vp + j);
6547  }
6548  }
6549  j += cv.neq_;
6550  }
6551  }
6552  return 0.;
6553  }
6554 }
6556  int j, n;
6557  Symbol* sym;
6558  double* ms;
6559  double* ma;
6560  n = z.nvsize_;
6561  ms = cv.n_vector_data(cv.maxstate_, it);
6562  ma = cv.n_vector_data(cv.maxacor_, it);
6563  auto const hdp = create_hdp(2);
6564  for (j = 0; j < n; ++j) {
6565  sym = hdp.retrieve_sym(static_cast<double*>(z.pv_[j]));
6566  auto msti = mst_->find((void*) sym);
6567  MaxStateItem* msi;
6568  if (msti == mst_->end()) {
6569  msi = new MaxStateItem();
6570  msi->sym_ = sym;
6571  msi->max_ = -1e9;
6572  msi->amax_ = -1e9;
6573  (*mst_)[(void*) sym] = msi;
6574  } else {
6575  msi = msti->second;
6576  }
6577  if (msi->max_ < ms[j]) {
6578  msi->max_ = ms[j];
6579  }
6580  if (msi->amax_ < ma[j]) {
6581  msi->amax_ = ma[j];
6582  }
6583  }
6584 }
6585 
6587  int i, it, j;
6588  Symbol* sym;
6589  if (!mst_) {
6590  int n = 0;
6591  for (sym = hoc_built_in_symlist->first; sym; sym = sym->next) {
6592  ++n;
6593  }
6594  mst_ = new MaxStateTable(3 * n);
6595  }
6596  for (auto ti: *mst_) {
6597  MaxStateItem* msi = ti.second;
6598  msi->max_ = -1e9;
6599  msi->amax_ = -1e9;
6600  }
6601  if (empty_) {
6602  return;
6603  }
6604  statename(0, 2);
6605  if (gcv_) {
6606  for (it = 0; it < nrn_nthread; ++it) {
6607  maxstate_analyze_1(it, *gcv_, gcv_->ctd_[it]);
6608  }
6609  } else {
6610  lvardtloop(i, j) {
6611  Cvode& cv = p[i].lcv_[j];
6612  maxstate_analyze_1(i, cv, cv.ctd_[0]);
6613  }
6614  }
6615 }
6616 
6617 double NetCvode::maxstate_analyse(Symbol* sym, double* pamax) {
6618  if (mst_) {
6619  auto msti = mst_->find((void*) sym);
6620  if (msti != mst_->end()) {
6621  *pamax = msti->second->amax_;
6622  return msti->second->max_;
6623  }
6624  }
6625  *pamax = -1e9;
6626  return -1e9;
6627 }
6628 
6629 static double lvardt_tout_;
6630 
6631 static void lvardt_integrate(neuron::model_sorted_token const& token, NrnThread& ntr) {
6632  auto* const nt = &ntr;
6633  size_t err = NVI_SUCCESS;
6634  int id = nt->id;
6636  NetCvodeThreadData& p = nc->p[id];
6637  TQueue* tq = p.tq_;
6638  TQueue* tqe = p.tqe_;
6639  double tout = lvardt_tout_;
6640  nt->_stop_stepping = 0;
6641  while (tq->least_t() < tout || tqe->least_t() <= tout) {
6642  err = nc->local_microstep(token, ntr);
6643  if (nt->_stop_stepping) {
6644  nt->_stop_stepping = 0;
6645  return;
6646  }
6647  if (err != NVI_SUCCESS || stoprun) {
6648  return;
6649  }
6650  }
6651  int n = p.nlcv_;
6652  Cvode* lcv = p.lcv_;
6653  if (n)
6654  for (int i = 0; i < n; ++i) {
6655  nc->retreat(tout, lcv + i);
6656  lcv[i].record_continuous();
6657  }
6658  else {
6659  nt->_t = tout;
6660  }
6661 }
6662 
6664  int err = NVI_SUCCESS;
6665  int tid;
6666  double til;
6667  nrn_use_busywait(1); // just a possibility
6668  auto const cache_token = nrn_ensure_model_data_are_sorted();
6669  if (empty_) {
6670  if (tout >= 0.) {
6671  while (nt_t < tout && !stoprun) {
6673  if (nrn_allthread_handle) {
6674  (*nrn_allthread_handle)();
6675  }
6676  }
6677  if (stoprun == 0) {
6678  nt_t = tout;
6679  }
6680  } else {
6681  if ((til = allthread_least_t(tid)) < 1e10) {
6683  } else {
6684  nt_t += 1e6;
6685  }
6686  if (nrn_allthread_handle) {
6687  (*nrn_allthread_handle)();
6688  }
6689  }
6690  } else if (gcv_) {
6691  if (tout >= 0.) {
6692  while (gcv_->t_ < tout || allthread_least_t(tid) < tout) {
6694  if (nrn_allthread_handle) {
6695  (*nrn_allthread_handle)();
6696  }
6697  if (err != NVI_SUCCESS || stoprun) {
6698  return err;
6699  }
6700  }
6701  retreat(tout, gcv_);
6703  } else {
6704  // advance or initialized
6705  double tc = gcv_->t_;
6706  initialized_ = false;
6707  while (gcv_->t_ <= tc && !initialized_) {
6709  if (nrn_allthread_handle) {
6710  (*nrn_allthread_handle)();
6711  }
6712  if (err != NVI_SUCCESS || stoprun) {
6713  return err;
6714  }
6715  }
6716  }
6717  } else { // lvardt
6718  if (tout >= 0.) {
6719  // Each thread could integrate independently to tout
6720  // as long as no thread got more than
6721  // a minimum delay interval ahead of any other.
6722  // For now just integrate by min delay intervals.
6723  lvardt_tout_ = tout;
6724  while (nt_t < tout) {
6725  nrn_multithread_job(cache_token, lvardt_integrate);
6726  if (nrn_allthread_handle) {
6727  (*nrn_allthread_handle)();
6728  }
6729  if (err != NVI_SUCCESS || stoprun) {
6730  return err;
6731  }
6732  int tid;
6733  allthread_least_t(tid);
6734  }
6735  } else {
6736  // nthread>1 is more or less purposeless if we mean
6737  // that only the least cvode of all threads advances.
6738  // (which is required if minimum delay = 0)
6739  if (nrn_nthread > 1) {
6740  hoc_execerror("Lvardt method from fadvance()",
6741  "presently limited to single thread.");
6742  }
6743  }
6744  }
6745  nrn_use_busywait(0);
6746  t = nt_t;
6747  dt = nt_dt;
6748  return err;
6749 }
6750 
6751 static void* deliver_for_thread(NrnThread* nt) {
6753  NetCvodeThreadData& d = nc->p[nt->id];
6754  TQItem* q = d.tqe_->least();
6755  DiscreteEvent* de = (DiscreteEvent*) q->data_;
6756  double tt = q->t_;
6757  d.tqe_->remove(q);
6758 #if PRINT_EVENT
6759  if (nc->print_event_) {
6760  de->pr("deliver", tt, nc);
6761  }
6762 #endif
6763  de->deliver(tt, nc, nt);
6764  return 0;
6765 }
6766 
6768  // printf("deliver_events til %20.15g\n", til);
6769  int tid;
6770  while (allthread_least_t(tid) <= til) {
6773  if (stoprun || nrn_allthread_handle) {
6774  return;
6775  }
6776  }
6777 }
6778 
6780  int err = NVI_SUCCESS;
6781  int tid;
6782  double tt = allthread_least_t(tid);
6783  double tdiff = tt - gcv_->t_;
6784  if (tdiff <= 0) {
6785  // since events do not internally retreat with the
6786  // global step, we should already be at the event time
6787  // if this is too strict, we could use eps(list_->t_).
6788  // if (tdiff != 0.0) { printf("tdiff=%g\n", tdiff); }
6789  assert(tdiff == 0.0 || (gcv_->tstop_begin_ <= tt && tt <= gcv_->tstop_end_));
6791  } else {
6793  }
6794  if ((tt = allthread_least_t(tid)) < gcv_->t_) {
6795  gcv_->interpolate(tt);
6796  }
6797  return err;
6798 }
6799 
6802  return 0;
6803 }
6804 
6806  MUTLOCK
6807  enqueueing_ = 1;
6808  MUTUNLOCK
6809 }
6810 
6812  // reduce (take minimum) of p[i].tqe_->least_t()
6813  double tt, min = 1e50;
6814  // setting enqueueing_ in interthread_send was a race. Logically it is not
6815  // needed. It is not clear if higher performance would result in having
6816  // a MUTEX for the NetCvode instance but that is the current implementation
6817  // instead of commenting out the enqueuing related lines.
6818  if (enqueueing_) {
6820  enqueueing_ = 0;
6821  }
6822  for (int id = 0; id < pcnt_; ++id) {
6823  tt = p[id].tqe_->least_t();
6824  if (tt < min) {
6825  tid = id;
6826  min = tt;
6827  }
6828  }
6829  return min;
6830 }
ReceiveFunc * pnt_receive
Definition: init.cpp:155
Section * chk_access()
Definition: cabcode.cpp:449
const char * secname(Section *sec)
name of section (for use in error messages)
Definition: cabcode.cpp:1674
void nrn_pushsec(Section *sec)
Definition: cabcode.cpp:130
void nrn_parent_info(Section *s)
Definition: cabcode.cpp:1589
double nrn_arc_position(Section *sec, Node *node)
Definition: cabcode.cpp:1755
void setup_topology(void)
Definition: cabcode.cpp:1635
int nrn_sec2cell_equals(Section *sec, Object *obj)
Definition: cabcode.cpp:267
int tree_changed
Definition: cabcode.cpp:51
static unsigned long abandon_
Definition: netcon.h:206
static unsigned long deliver_qthresh_
Definition: netcon.h:212
bool flag_
Definition: netcon.h:202
virtual void check(NrnThread *, double sendtime, double teps=0.0)
Definition: netcvode.cpp:5179
void condition(Cvode *)
Definition: netcvode.cpp:5197
static unsigned long send_qthresh_
Definition: netcon.h:205
virtual double value()
Definition: netcon.h:192
virtual ~ConditionEvent()
Definition: netcvode.cpp:5195
void abandon_statistics(Cvode *)
Definition: netcvode.cpp:5251
static unsigned long init_above_
Definition: netcon.h:204
static unsigned long abandon_above_
Definition: netcon.h:210
static unsigned long abandon_init_above_
Definition: netcon.h:208
double told_
Definition: netcon.h:199
static unsigned long eq_abandon_
Definition: netcon.h:207
double valthresh_
Definition: netcon.h:200
double valold_
Definition: netcon.h:199
static unsigned long abandon_below_
Definition: netcon.h:211
TQItem * qthresh_
Definition: netcon.h:201
static unsigned long abandon_init_below_
Definition: netcon.h:209
Definition: cvodeobj.h:97
int neq_
Definition: cvodeobj.h:245
void free_cvodemem()
Definition: cvodeobj.cpp:1031
double t_
Definition: cvodeobj.h:126
void maxstep(double)
Definition: cvodeobj.cpp:1019
double tstop_begin_
Definition: cvodeobj.h:249
virtual int handle_step(neuron::model_sorted_token const &, NetCvode *, double)
Definition: netcvode.cpp:2136
void init_prepare()
Definition: cvodeobj.cpp:865
int nctd_
Definition: cvodeobj.h:242
void states(double *)
Definition: occvode.cpp:1033
void dstates(double *)
Definition: occvode.cpp:1044
TQItem * tqitem_
Definition: cvodeobj.h:267
void record_add(PlayRecord *)
Definition: occvode.cpp:1092
CvodeThreadData * ctd_
Definition: cvodeobj.h:240
void play_add(PlayRecord *)
Definition: occvode.cpp:1133
void stat_init()
Definition: cvodeobj.cpp:859
void gather_y(N_Vector)
Definition: occvode.cpp:523
void record_continuous()
Definition: occvode.cpp:1101
void statistics()
Definition: cvodeobj.cpp:1407
N_Vector maxstate_
Definition: cvodeobj.h:233
void check_deliver(NrnThread *nt=0)
Definition: netcvode.cpp:5499
void fun_thread(neuron::model_sorted_token const &, double t, double *y, double *ydot, NrnThread *nt)
Definition: occvode.cpp:715
bool structure_change_
Definition: cvodeobj.h:237
void evaluate_conditions(NrnThread *nt=0)
Definition: netcvode.cpp:5470
bool can_retreat_
Definition: cvodeobj.h:128
NetCvode * ncv_
Definition: cvodeobj.h:244
void maxstate(double *)
Definition: cvodeobj.cpp:956
double time() const
Definition: cvodeobj.h:118
int order()
Definition: cvodeobj.cpp:984
void maxorder(int)
Definition: cvodeobj.cpp:997
NrnThread * nth_
Definition: cvodeobj.h:241
virtual int init(double t)
Definition: cvodeobj.cpp:1189
void acor(double *)
Definition: occvode.cpp:1066
double * n_vector_data(N_Vector, int)
Definition: occvode.cpp:485
virtual int interpolate(double t)
Definition: cvodeobj.cpp:1222
void activate_maxstate(bool)
Definition: cvodeobj.cpp:899
void delete_prl()
Definition: occvode.cpp:1077
void set_init_flag()
Definition: cvodeobj.cpp:780
bool use_daspk_
Definition: cvodeobj.h:208
bool is_owner(neuron::container::data_handle< double > const &)
Definition: netcvode.cpp:6379
void maxacor(double *)
Definition: cvodeobj.cpp:969
void scatter_y(neuron::model_sorted_token const &, double *, int)
Definition: occvode.cpp:498
double t0_
Definition: cvodeobj.h:126
double tn_
Definition: cvodeobj.h:126
N_Vector maxacor_
Definition: cvodeobj.h:234
bool initialize_
Definition: cvodeobj.h:127
void error_weights(double *)
Definition: occvode.cpp:1055
void minstep(double)
Definition: cvodeobj.cpp:1008
virtual int advance_tn(neuron::model_sorted_token const &)
Definition: cvodeobj.cpp:1117
virtual ~CvodeThreadData()
Definition: netcvode.cpp:1250
int nonvint_extra_offset_
Definition: cvodeobj.h:92
int nonvint_offset_
Definition: cvodeobj.h:91
PreSynList * psl_th_
Definition: cvodeobj.h:83
int vnode_begin_index_
Definition: cvodeobj.h:80
std::vector< double * > pv_
Definition: cvodeobj.h:87
int vnode_end_index_
Definition: cvodeobj.h:81
CvMembList * cv_memb_list_
Definition: cvodeobj.h:62
void delete_memb_list(CvMembList *)
Definition: netcvode.cpp:1326
CvMembList * no_cap_memb_
Definition: cvodeobj.h:65
BAMechList * before_breakpoint_
Definition: cvodeobj.h:66
CvMembList * cmlext_
Definition: cvodeobj.h:64
BAMechList * after_solve_
Definition: cvodeobj.h:67
int rootnode_begin_index_
Definition: cvodeobj.h:78
CvMembList * cmlcap_
Definition: cvodeobj.h:63
std::vector< PlayRecord * > * record_
Definition: cvodeobj.h:94
int rootnode_end_index_
Definition: cvodeobj.h:79
HTList * watch_list_
Definition: cvodeobj.h:84
BAMechList * before_step_
Definition: cvodeobj.h:68
std::vector< PlayRecord * > * play_
Definition: cvodeobj.h:93
virtual int type()
Definition: netcon.h:72
virtual DiscreteEvent * savestate_save()
Definition: netcvode.cpp:4679
virtual void pr(const char *, double t, NetCvode *)
Definition: netcvode.cpp:2937
virtual void savestate_write(FILE *)
Definition: netcvode.cpp:4698
virtual void deliver(double t, NetCvode *, NrnThread *)
Definition: netcvode.cpp:2925
static unsigned long discretevent_send_
Definition: netcon.h:81
static unsigned long discretevent_deliver_
Definition: netcon.h:84
static DiscreteEvent * savestate_read(FILE *)
Definition: netcvode.cpp:4694
virtual void pgvts_deliver(double t, NetCvode *)
Definition: netcvode.cpp:2933
virtual void send(double deliverytime, NetCvode *, NrnThread *)
Definition: netcvode.cpp:2920
virtual ~DiscreteEvent()
Definition: netcvode.cpp:4677
virtual void savestate_restore(double deliverytime, NetCvode *)
Definition: netcvode.cpp:4688
virtual int pgvts_op(int &i)
Definition: netcon.h:65
virtual void frecord_init(TQItem *)
Definition: netcon.h:81
virtual NrnThread * thread()
Definition: netcvode.cpp:2929
void plot(int, double)
void fill_pd()
GraphLine * gl_
Definition: glinerec.h:29
IvocVect * v_
Definition: glinerec.h:30
GLineRecordEData pd_and_vec_
Definition: glinerec.h:34
neuron::container::data_handle< double > pdata(int)
Symbol * expr_
Definition: graph.h:365
Definition: htlist.h:34
void Remove()
Definition: htlist.cpp:75
void Remove(HTList *)
Definition: htlist.cpp:69
HTList * First()
Definition: htlist.h:66
HTList * Next()
Definition: htlist.h:75
HTList(void *=NULL)
Definition: htlist.cpp:41
HTList * End()
Definition: htlist.h:72
void RemoveAll()
Definition: htlist.cpp:84
Object * object()
Definition: objcmd.h:30
Object * pyobject()
Definition: objcmd.h:33
int execute(bool notify=true)
Definition: objcmd.cpp:94
const char * name()
Definition: objcmd.cpp:73
static DiscreteEvent * savestate_read(FILE *)
Definition: netcvode.cpp:3469
HocCommand * stmt_
Definition: netcon.h:383
static HocEvent * alloc(const char *stmt, Object *, int, Object *pyact=nullptr)
Definition: netcvode.cpp:3340
virtual void savestate_restore(double deliverytime, NetCvode *)
Definition: netcvode.cpp:3450
static void reclaim()
Definition: netcvode.cpp:3429
virtual void allthread_handle()
Definition: netcvode.cpp:3416
static unsigned long hocevent_deliver_
Definition: netcon.h:380
virtual ~HocEvent()
Definition: netcvode.cpp:3330
virtual void deliver(double, NetCvode *, NrnThread *)
Definition: netcvode.cpp:3375
void clear()
Definition: netcvode.cpp:3368
int reinit_
Definition: netcon.h:385
HocCommand * stmt()
Definition: netcon.h:367
virtual void savestate_write(FILE *)
Definition: netcvode.cpp:3491
virtual void pr(const char *, double t, NetCvode *)
Definition: netcvode.cpp:3336
virtual DiscreteEvent * savestate_save()
Definition: netcvode.cpp:3435
Object * ppobj_
Definition: netcon.h:384
static HocEventPool * hepool_
Definition: netcon.h:387
void hefree()
Definition: netcvode.cpp:3360
static unsigned long hocevent_send_
Definition: netcon.h:379
virtual void pgvts_deliver(double t, NetCvode *)
Definition: netcvode.cpp:3425
void mutconstruct(int)
Definition: ivocvect.h:90
void lock()
Definition: ivocvect.h:92
size_t size() const
Definition: ivocvect.h:42
void unlock()
Definition: ivocvect.h:95
Object * obj_
Definition: ivocvect.h:101
void resize(size_t n)
Definition: ivocvect.h:46
void push_back(double v)
Definition: ivocvect.h:80
double & elem(int n)
Definition: ivocvect.h:26
static unsigned long singleevent_deliver_
Definition: kssingle.h:88
static unsigned long singleevent_move_
Definition: kssingle.h:89
double amax_
Definition: netcvode.cpp:397
double max_
Definition: netcvode.cpp:396
Symbol * sym_
Definition: netcvode.cpp:395
T * alloc()
Definition: pool.hpp:89
void free_all()
Definition: pool.hpp:115
void hpfree(T *)
Definition: pool.hpp:105
static bool eq2(double x, double y, double e)
Definition: mymath.h:66
static bool eq(T x, T y, T e)
Definition: mymath.h:63
Definition: netcon.h:87
virtual void pr(const char *, double t, NetCvode *)
Definition: netcvode.cpp:3006
double * weight_
Definition: netcon.h:116
virtual int type()
Definition: netcon.h:101
virtual NrnThread * thread()
Definition: netcvode.cpp:2990
virtual ~NetCon()
Definition: netcvode.cpp:4737
virtual void deliver(double, NetCvode *, NrnThread *)
Definition: netcvode.cpp:2951
void replace_src(PreSyn *)
Definition: netcvode.cpp:4768
bool active_
Definition: netcon.h:119
void chktar()
Definition: netcvode.cpp:370
static unsigned long netcon_send_active_
Definition: netcon.h:121
virtual void disconnect(Observable *)
Definition: netcvode.cpp:376
int cnt_
Definition: netcon.h:118
virtual DiscreteEvent * savestate_save()
Definition: netcvode.cpp:4777
virtual void send(double sendtime, NetCvode *, NrnThread *)
Definition: netcvode.cpp:2941
double delay_
Definition: netcon.h:113
NetCon(PreSyn *src, Object *target)
Definition: netcvode.cpp:4702
static DiscreteEvent * savestate_read(FILE *)
Definition: netcvode.cpp:4799
Point_process * target_
Definition: netcon.h:115
Object * obj_
Definition: netcon.h:117
void rmsrc()
Definition: netcvode.cpp:4751
static unsigned long netcon_deliver_
Definition: netcon.h:123
void chksrc()
Definition: netcvode.cpp:365
PreSyn * src_
Definition: netcon.h:114
static unsigned long netcon_send_inactive_
Definition: netcon.h:122
virtual void pgvts_deliver(double t, NetCvode *)
Definition: netcvode.cpp:2994
NetCon * netcon_
Definition: netcon.h:135
static NetCon * weight2netcon(double *)
Definition: netcvode.cpp:4823
NetConSave(NetCon *)
Definition: netcvode.cpp:4782
virtual ~NetConSave()
Definition: netcvode.cpp:4785
static NetCon * index2netcon(long)
Definition: netcvode.cpp:4847
virtual void savestate_restore(double deliverytime, NetCvode *)
Definition: netcvode.cpp:4787
virtual void savestate_write(FILE *)
Definition: netcvode.cpp:4810
static NetConSaveWeightTable * wtable_
Definition: netcon.h:142
static NetConSaveIndexTable * idxtable_
Definition: netcon.h:143
static void invalid()
Definition: netcvode.cpp:4818
std::vector< PreSyn * > * psl_
Definition: netcvode.h:246
TQItem * bin_event(double tdeliver, DiscreteEvent *, NrnThread *)
Definition: netcvode.cpp:2498
void dstates()
Definition: netcvode.cpp:4195
double state_magnitudes()
Definition: netcvode.cpp:6484
void ps_thread_link(PreSyn *)
Definition: netcvode.cpp:4875
void set_CVRhsFn()
Definition: cvodeobj.cpp:1038
std::string statename(int, int style=1)
Definition: netcvode.cpp:4380
int stiff_
Definition: netcvode.h:229
void fornetcon_prepare()
Definition: netcvode.cpp:3995
void states()
Definition: netcvode.cpp:4167
void deliver_events(double til, NrnThread *)
Definition: netcvode.cpp:2835
double allthread_least_t(int &tid)
Definition: netcvode.cpp:6811
int pgvts_event(double &tt)
Definition: netcvode.cpp:3575
NetCvodeThreadData * p
Definition: netcvode.h:249
int playrec_change_cnt_
Definition: netcvode.h:237
void play_init()
Definition: netcvode.cpp:4139
BAMechList * cvbml(int, BAMech *, Cvode *)
Definition: netcvode.cpp:1789
HocEventList * allthread_hocevents_
Definition: netcvode.h:262
void deliver_least_event(NrnThread *)
Definition: netcvode.cpp:2068
static double eps(double x)
Definition: netcvode.h:137
Cvode * gcv_
Definition: netcvode.h:243
void local_retreat(double, Cvode *)
Definition: netcvode.cpp:3502
bool is_local()
Definition: netcvode.cpp:1172
IvocVect * vec_event_store_
Definition: netcvode.h:239
void check_thresh(NrnThread *)
Definition: netcvode.cpp:5914
virtual ~NetCvode()
Definition: netcvode.cpp:1137
void playrec_add(PlayRecord *)
Definition: netcvode.cpp:6018
PreSynTable * pst_
Definition: netcvode.h:235
int maxorder_
Definition: netcvode.h:229
void vec_remove()
Definition: netcvode.cpp:6311
void p_construct(int)
Definition: netcvode.cpp:4916
std::vector< PlayRecord * > * prl_
Definition: netcvode.h:238
double atol_
Definition: netcvode.h:158
int pst_cnt_
Definition: netcvode.h:236
double rtol()
Definition: netcvode.h:151
void retreat(double, Cvode *)
Definition: netcvode.cpp:3532
bool empty_
Definition: netcvode.h:193
int stiff()
Definition: netcvode.h:160
MaxStateTable * mst_
Definition: netcvode.h:226
int maxorder()
Definition: netcvode.h:164
int owned_by_thread(neuron::container::data_handle< double > const &)
Definition: netcvode.cpp:6415
std::vector< PlayRecord * > * playrec_list()
Definition: netcvode.h:124
void init_events()
Definition: netcvode.cpp:2749
DiscreteEvent * pgvts_least(double &tt, int &op, int &init)
Definition: netcvode.cpp:3604
void statistics(int)
Definition: netcvode.cpp:3837
HocDataPaths create_hdp(int style)
Create a lookup table for variable names.
Definition: netcvode.cpp:4348
HTListList wl_list_
Definition: netcvode.h:247
void del_cv_memb_list()
Definition: netcvode.cpp:1289
std::vector< PlayRecord * > * fixed_record_
Definition: netcvode.h:130
void deliver_net_events(NrnThread *)
Definition: netcvode.cpp:5958
void fixed_record_continuous(neuron::model_sorted_token const &, NrnThread &nt)
Definition: netcvode.cpp:5524
void vec_event_store()
Definition: netcvode.cpp:2485
double atol()
Definition: netcvode.h:155
void spike_stat()
Definition: netcvode.cpp:3900
void localstep(bool)
Definition: netcvode.cpp:1176
int condition_order()
Definition: netcvode.h:140
int pgvts_cvode(double tt, int op)
Definition: netcvode.cpp:3661
const char * sym2name(Symbol *)
Definition: netcvode.cpp:4432
int fornetcon_change_cnt_
Definition: netcvode.h:222
int solve_when_threads(double)
Definition: netcvode.cpp:6663
bool use_partrans()
Definition: netcvode.cpp:3689
void playrec_setup()
Definition: netcvode.cpp:6317
void free_event_pools()
Definition: netcvode.cpp:2735
void error_weights()
Definition: netcvode.cpp:4283
void point_receive(int, Point_process *, double *, double)
void delete_list()
Definition: netcvode.cpp:1260
void null_event(double)
Definition: netcvode.cpp:2536
void clear_events()
Definition: netcvode.cpp:2663
std::vector< PlayRecord * > * fixed_play_
Definition: netcvode.h:129
void consist_sec_pd(const char *, Section *, neuron::container::data_handle< double > const &)
Definition: netcvode.cpp:6447
Object ** netconlist()
Definition: netcvode.cpp:907
void fill_local_ba_cnt(int, int *, NetCvodeThreadData &)
Definition: netcvode.cpp:1770
void update_ps2nt()
Definition: netcvode.cpp:4899
static double eps_
Definition: netcvode.h:198
NetCvode(bool single=true)
Definition: netcvode.cpp:1086
void alloc_list()
Definition: netcvode.cpp:1396
int use_long_double_
Definition: netcvode.h:251
void use_daspk(bool)
Definition: netcvode.cpp:1195
void maxstate_analyze_1(int, Cvode &, CvodeThreadData &)
Definition: netcvode.cpp:6555
void distribute_dinfo(int *, int)
Definition: netcvode.cpp:1342
void maxstate_analyse()
Definition: netcvode.cpp:6586
void fill_global_ba(NrnThread *, int, BAMechList **)
Definition: netcvode.cpp:1755
MUTDEC void set_enqueueing()
Definition: netcvode.cpp:6805
bool initialized_
Definition: netcvode.h:184
int pcnt_
Definition: netcvode.h:248
void remove_event(TQItem *, int threadid)
Definition: netcvode.cpp:2251
bool init_global()
Definition: netcvode.cpp:1428
void solver_prepare()
Definition: netcvode.cpp:3925
int order(int)
Definition: netcvode.cpp:4501
int matrix_change_cnt_
Definition: netcvode.h:233
void playrec_remove(PlayRecord *)
Definition: netcvode.cpp:6024
double minstep_
Definition: netcvode.h:230
int jacobian_
Definition: netcvode.h:229
void psl_append(PreSyn *)
Definition: netcvode.cpp:4624
void presyn_disconnect(PreSyn *)
Definition: netcvode.cpp:4631
int global_microstep()
Definition: netcvode.cpp:2116
int global_microstep_when_threads()
Definition: netcvode.cpp:6779
void hoc_event(double, const char *hoc_stmt, Object *ppobj=nullptr, int reinit=0, Object *pyact=nullptr)
Definition: netcvode.cpp:2545
void re_init(double t0=0.)
Definition: netcvode.cpp:3958
int condition_order_
Definition: netcvode.h:203
void fill_local_ba(int *, NetCvodeThreadData &)
Definition: netcvode.cpp:1764
int local_microstep(neuron::model_sorted_token const &, NrnThread &)
Definition: netcvode.cpp:2101
void acor()
Definition: netcvode.cpp:4312
int print_event_
Definition: netcvode.h:181
void event_queue_info()
Definition: netcvode.cpp:2902
void allthread_handle()
Definition: netcvode.cpp:2576
TQueue * event_queue(NrnThread *nt)
Definition: netcvode.cpp:3749
void vecrecord_add()
Definition: netcvode.cpp:6295
bool single_
Definition: netcvode.h:234
double rtol_
Definition: netcvode.h:158
int solve(double t)
Definition: netcvode.cpp:1949
void move_event(TQItem *, double, NrnThread *)
Definition: netcvode.cpp:2235
int enqueueing_
Definition: netcvode.h:250
bool deliver_event(double til, NrnThread *)
Definition: netcvode.cpp:2082
void deliver_events_when_threads(double)
Definition: netcvode.cpp:6767
Symbol * name2sym(const char *)
Definition: netcvode.cpp:4442
void allthread_handle(double, HocEvent *, NrnThread *)
Definition: netcvode.cpp:2586
int playrec_item(PlayRecord *)
Definition: netcvode.cpp:6032
double maxstep_
Definition: netcvode.h:230
double maxstep()
Definition: netcvode.h:173
void fixed_play_continuous(NrnThread *)
Definition: netcvode.cpp:5534
int jacobian()
Definition: netcvode.h:177
NetCon * install_deliver(neuron::container::data_handle< double > psrc, Section *ssrc, Object *osrc, Object *target, double threshold, double delay, double weight)
Definition: netcvode.cpp:4551
void print_event_queue()
Definition: netcvode.cpp:2854
bool localstep()
Definition: netcvode.cpp:1168
int pgvts(double tstop)
Definition: netcvode.cpp:3558
void record_init()
Definition: netcvode.cpp:4123
int structure_change_cnt_
Definition: netcvode.h:232
double minstep()
Definition: netcvode.h:169
PlayRecord * playrec_uses(void *)
Definition: netcvode.cpp:6045
TQItem * event(double tdeliver, DiscreteEvent *, NrnThread *)
Definition: netcvode.cpp:2522
bool use_daspk()
Definition: netcvode.cpp:1191
void structure_change()
Definition: netcvode.cpp:4540
virtual ~NetCvodeThreadData()
Definition: netcvode.cpp:1006
MUTDEC int nlcv_
Definition: netcvode.h:58
double immediate_deliver_
Definition: netcvode.h:62
void interthread_send(double, DiscreteEvent *, NrnThread *)
Definition: netcvode.cpp:1028
TQItemPool * tpool_
Definition: netcvode.h:54
SelfEventPool * sepool_
Definition: netcvode.h:53
int unreffed_event_cnt_
Definition: netcvode.h:61
TQueue * tq_
Definition: netcvode.h:49
InterThreadEvent * inter_thread_events_
Definition: netcvode.h:55
void enqueue(NetCvode *, NrnThread *)
Definition: netcvode.cpp:1064
hoc_Item * psl_thr_
Definition: netcvode.h:52
SelfQueue * selfqueue_
Definition: netcvode.h:56
TQueue * tqe_
Definition: netcvode.h:51
static void Detach(Object *, Observer *)
Definition: ocobserv.cpp:23
static void Attach(Object *, Observer *)
Definition: ocobserv.cpp:16
Definition: ivoc.h:36
void notify()
int run(int argc, const char **argv)
Definition: oclist.h:11
void remove_all()
Definition: oclist.cpp:228
long count()
Definition: oclist.cpp:175
void append(Object *)
Definition: oclist.cpp:81
Object * object(long)
Definition: oclist.cpp:219
virtual void deliver(double, NetCvode *, NrnThread *)
Definition: netcvode.cpp:3303
virtual void frecord_init(TQItem *q)
Definition: netcvode.cpp:3299
virtual int type()
Definition: vrecitem.h:35
static unsigned long playrecord_send_
Definition: vrecitem.h:39
virtual ~PlayRecordEvent()
Definition: netcvode.cpp:296
static DiscreteEvent * savestate_read(FILE *)
Definition: netcvode.cpp:311
PlayRecord * plr_
Definition: vrecitem.h:38
virtual NrnThread * thread()
Definition: netcvode.cpp:3312
virtual void savestate_restore(double deliverytime, NetCvode *)
Definition: netcvode.cpp:303
virtual void pr(const char *, double t, NetCvode *)
Definition: netcvode.cpp:3316
static unsigned long playrecord_deliver_
Definition: vrecitem.h:40
virtual void savestate_write(FILE *)
Definition: netcvode.cpp:306
virtual DiscreteEvent * savestate_save()
Definition: netcvode.cpp:298
static PlayRecordSave * savestate_read(FILE *)
Definition: netcvode.cpp:325
virtual ~PlayRecord()
Definition: netcvode.cpp:6069
Cvode * cvode_
Definition: vrecitem.h:92
neuron::container::data_handle< double > pd_
Definition: vrecitem.h:90
Object * ppobj_
Definition: vrecitem.h:91
PlayRecord(neuron::container::data_handle< double > pd, Object *ppobj=nullptr)
Definition: netcvode.cpp:6054
virtual void frecord_init(TQItem *)
Definition: vrecitem.h:81
virtual void pr()
Definition: netcvode.cpp:6099
void play_add(Cvode *)
Definition: netcvode.cpp:6091
virtual void disconnect(Observable *)
Definition: netcvode.cpp:6078
virtual PlayRecordSave * savestate_save()
Definition: netcvode.cpp:321
void record_add(Cvode *)
Definition: netcvode.cpp:6083
virtual PlayRecordEvent * event()
Definition: vrecitem.h:61
int ith_
Definition: vrecitem.h:93
virtual void deliver(double t, NetCvode *)
Definition: vrecitem.h:60
virtual int type()
Definition: vrecitem.h:65
PlayRecord * pr_
Definition: vrecitem.h:105
virtual void savestate_read(FILE *)
Definition: vrecitem.h:102
PlayRecordSave(PlayRecord *)
Definition: netcvode.cpp:355
virtual ~PlayRecordSave()
Definition: netcvode.cpp:360
Definition: netcon.h:258
virtual void asf_err()
Definition: netcvode.cpp:5322
virtual void pr(const char *, double t, NetCvode *)
Definition: netcvode.cpp:3151
static unsigned long presyn_send_direct_
Definition: netcon.h:326
int output_index_
Definition: netcon.h:308
long hi_index_
Definition: netcon.h:305
void init()
Definition: netcvode.cpp:5064
virtual void send(double sendtime, NetCvode *, NrnThread *)
Definition: netcvode.cpp:3016
HocCommand * stmt_
Definition: netcon.h:302
void update(Observable *)
Definition: netcvode.cpp:5150
static unsigned long presyn_deliver_ncsend_
Definition: netcon.h:329
double mindelay()
Definition: netcvode.cpp:2825
Section * ssrc_
Definition: netcon.h:299
hoc_Item * hi_th_
Definition: netcon.h:304
IvocVect * idvec_
Definition: netcon.h:301
int rec_id_
Definition: netcon.h:307
virtual ~PreSyn()
Definition: netcvode.cpp:4975
void record_stmt(const char *)
Definition: netcvode.cpp:5074
void fanout(double, NetCvode *, NrnThread *)
Definition: netcvode.cpp:3114
PreSyn(neuron::container::data_handle< double > src, Object *osrc, Section *ssrc=nullptr)
Definition: netcvode.cpp:4930
NrnThread * nt_
Definition: netcon.h:303
void record(IvocVect *, IvocVect *idvec=nullptr, int rec_id=0)
Definition: netcvode.cpp:5088
int gid_
Definition: netcon.h:309
virtual void deliver(double, NetCvode *, NrnThread *)
Definition: netcvode.cpp:3066
virtual NrnThread * thread()
Definition: netcvode.cpp:3123
double delay_
Definition: netcon.h:296
static unsigned long presyn_deliver_direct_
Definition: netcon.h:328
Object * osrc_
Definition: netcon.h:298
virtual void pgvts_deliver(double t, NetCvode *)
Definition: netcvode.cpp:3127
static DiscreteEvent * savestate_read(FILE *)
Definition: netcvode.cpp:5023
IvocVect * tvec_
Definition: netcon.h:300
virtual DiscreteEvent * savestate_save()
Definition: netcvode.cpp:5008
double threshold_
Definition: netcon.h:295
void disconnect(Observable *)
Definition: netcvode.cpp:5137
neuron::container::data_handle< double > thvar_
Definition: netcon.h:297
static unsigned long presyn_deliver_netcon_
Definition: netcon.h:327
int use_min_delay_
Definition: netcon.h:306
static unsigned long presyn_send_mindelay_
Definition: netcon.h:325
NetConPList dil_
Definition: netcon.h:294
virtual void savestate_write(FILE *)
Definition: netcvode.cpp:5035
virtual void savestate_restore(double deliverytime, NetCvode *)
Definition: netcvode.cpp:5018
PreSyn * presyn_
Definition: netcon.h:340
static PreSynSaveIndexTable * idxtable_
Definition: netcon.h:347
static PreSyn * hindx2presyn(long)
Definition: netcvode.cpp:5046
virtual ~PreSynSave()
Definition: netcvode.cpp:5016
static void invalid()
Definition: netcvode.cpp:5042
PreSynSave(PreSyn *)
Definition: netcvode.cpp:5013
virtual void ref() const
Definition: resource.cpp:42
NrnThread * thread() override
Definition: netcvode.cpp:5419
STETransition * stet_
Definition: netcon.h:255
void deliver(double, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:5387
STECondition(Point_process *, double(*)(Point_process *)=NULL)
Definition: netcvode.cpp:5289
double value() override
Definition: netcvode.cpp:5246
void pgvts_deliver(double t, NetCvode *) override
Definition: netcvode.cpp:5444
static DiscreteEvent * savestate_read(FILE *)
Definition: netcvode.cpp:3175
static void savestate_free()
Definition: netcvode.cpp:3220
void call_net_receive(NetCvode *)
Definition: netcvode.cpp:3281
virtual void savestate_restore(double deliverytime, NetCvode *)
Definition: netcvode.cpp:3170
virtual NrnThread * thread()
Definition: netcvode.cpp:3274
virtual int type()
Definition: netcon.h:159
static unsigned long selfevent_deliver_
Definition: netcon.h:175
virtual void pr(const char *, double t, NetCvode *)
Definition: netcvode.cpp:3294
static unsigned long selfevent_send_
Definition: netcon.h:173
Datum * movable_
Definition: netcon.h:171
static unsigned long selfevent_move_
Definition: netcon.h:174
double * weight_
Definition: netcon.h:170
virtual void savestate_write(FILE *)
Definition: netcvode.cpp:3224
static std::unique_ptr< SelfEventPPTable > sepp_
Definition: netcon.h:181
Point_process * target_
Definition: netcon.h:169
virtual ~SelfEvent()
Definition: netcvode.cpp:3158
static Point_process * index2pp(int type, int oindex)
Definition: netcvode.cpp:3198
virtual void deliver(double, NetCvode *, NrnThread *)
Definition: netcvode.cpp:3245
virtual DiscreteEvent * savestate_save()
Definition: netcvode.cpp:3160
double flag_
Definition: netcon.h:168
virtual void pgvts_deliver(double t, NetCvode *)
Definition: netcvode.cpp:3278
TQItem * first()
Definition: tqueue.hpp:154
void remove_all()
Definition: tqueue.cpp:434
void * remove(TQItem *)
Definition: tqueue.cpp:419
TQItem * next(TQItem *q)
Definition: tqueue.hpp:157
void statistics()
Definition: tqueue.cpp:166
TQItem * second_least(double t)
Definition: tqueue.cpp:119
void remove(TQItem *)
Definition: tqueue.cpp:225
TQItem * insert(double t, void *data)
Definition: tqueue.cpp:190
TQItem * enqueue_bin(double t, void *data)
Definition: tqueue.cpp:209
double least_t()
Definition: tqueue.hpp:85
TQItem * least()
Definition: tqueue.hpp:68
void move(TQItem *, double tnew)
Definition: tqueue.cpp:148
void shift_bin(double t)
Definition: tqueue.hpp:99
void release(TQItem *)
Definition: tqueue.cpp:220
int nshift_
Definition: tqueue.hpp:119
TQItem * atomic_dq(double til)
Definition: tqueue.cpp:245
void forall_callback(void(*)(const TQItem *, int))
Definition: tqueue.cpp:103
void move_least(double tnew)
Definition: tqueue.cpp:128
void spike_stat(double *)
Definition: tqueue.cpp:180
virtual void record_init()
Definition: netcvode.cpp:6124
TvecRecord(Section *, IvocVect *tvec, Object *ppobj=nullptr)
Definition: netcvode.cpp:6103
IvocVect * t_
Definition: vrecitem.h:125
virtual void disconnect(Observable *)
Definition: netcvode.cpp:6115
virtual void install(Cvode *)
Definition: netcvode.cpp:6120
virtual ~TvecRecord()
Definition: netcvode.cpp:6110
virtual void continuous(double t)
Definition: netcvode.cpp:6128
IvocVect * y_
Definition: vrecitem.h:174
virtual void frecord_init(TQItem *)
Definition: netcvode.cpp:6225
virtual void install(Cvode *)
Definition: netcvode.cpp:6214
VecRecordDiscrete(neuron::container::data_handle< double >, IvocVect *y, IvocVect *t, Object *ppobj=nullptr)
Definition: netcvode.cpp:6164
virtual void deliver(double t, NetCvode *)
Definition: netcvode.cpp:6229
virtual ~VecRecordDiscrete()
Definition: netcvode.cpp:6178
virtual PlayRecordSave * savestate_save()
Definition: netcvode.cpp:6185
virtual void record_init()
Definition: netcvode.cpp:6218
virtual void disconnect(Observable *)
Definition: netcvode.cpp:6209
IvocVect * t_
Definition: vrecitem.h:175
PlayRecordEvent * e_
Definition: vrecitem.h:176
VecRecordDiscreteSave(PlayRecord *)
Definition: netcvode.cpp:6189
virtual ~VecRecordDiscreteSave()
Definition: netcvode.cpp:6193
virtual void savestate_write(FILE *)
Definition: netcvode.cpp:6200
virtual void savestate_read(FILE *)
Definition: netcvode.cpp:6203
virtual void savestate_restore()
Definition: netcvode.cpp:6194
PlayRecordEvent * e_
Definition: vrecitem.h:216
virtual void record_init()
Definition: netcvode.cpp:6276
virtual void disconnect(Observable *)
Definition: netcvode.cpp:6267
virtual ~VecRecordDt()
Definition: netcvode.cpp:6250
double dt_
Definition: vrecitem.h:215
virtual void frecord_init(TQItem *)
Definition: netcvode.cpp:6281
virtual void deliver(double t, NetCvode *)
Definition: netcvode.cpp:6285
virtual void install(Cvode *)
Definition: netcvode.cpp:6272
VecRecordDt(neuron::container::data_handle< double >, IvocVect *y, double dt, Object *ppobj=nullptr)
Definition: netcvode.cpp:6237
IvocVect * y_
Definition: vrecitem.h:214
virtual PlayRecordSave * savestate_save()
Definition: netcvode.cpp:6256
VecRecordDtSave(PlayRecord *)
Definition: netcvode.cpp:6260
virtual ~VecRecordDtSave()
Definition: netcvode.cpp:6262
virtual void savestate_restore()
Definition: netcvode.cpp:6263
virtual double value()
Definition: netcon.h:219
virtual NrnThread * thread()
Definition: netcvode.cpp:5415
static unsigned long watch_send_
Definition: netcon.h:243
static unsigned long watch_deliver_
Definition: netcon.h:244
virtual ~WatchCondition()
Definition: netcvode.cpp:5284
double(* c_)(Point_process *)
Definition: netcon.h:236
void activate(double flag)
Definition: netcvode.cpp:5292
virtual void deliver(double, NetCvode *, NrnThread *)
Definition: netcvode.cpp:5331
Point_process * pnt_
Definition: netcon.h:235
double nrflag_
Definition: netcon.h:234
virtual void asf_err()
Definition: netcvode.cpp:5318
int watch_index_
Definition: netcon.h:241
virtual void pgvts_deliver(double t, NetCvode *)
Definition: netcvode.cpp:5428
virtual void pr(const char *, double t, NetCvode *)
Definition: netcvode.cpp:5460
virtual void send(double, NetCvode *, NrnThread *)
Definition: netcvode.cpp:5326
WatchCondition(Point_process *, double(*)(Point_process *))
Definition: netcvode.cpp:5278
virtual void disconnect(Observable *)
Definition: netcvode.cpp:6144
virtual void install(Cvode *)
Definition: netcvode.cpp:6149
YvecRecord(neuron::container::data_handle< double >, IvocVect *y, Object *ppobj=nullptr)
Definition: netcvode.cpp:6132
IvocVect * y_
Definition: vrecitem.h:145
virtual ~YvecRecord()
Definition: netcvode.cpp:6139
virtual void continuous(double t)
Definition: netcvode.cpp:6160
virtual void record_init()
Definition: netcvode.cpp:6153
void class2oc(const char *, ctor_f *cons, dtor_f *destruct, Member_func *, Member_ret_obj_func *, Member_ret_str_func *)
Definition: hoc_oop.cpp:1631
Symbol * hoc_table_lookup(const char *, Symlist *)
Definition: symbol.cpp:48
char * gargstr(int narg)
Definition: code2.cpp:227
HocReturnType hoc_return_type_code
Definition: code.cpp:42
#define cnt
Definition: tqueue.hpp:44
#define v
Definition: md1redef.h:11
#define sec
Definition: md1redef.h:20
#define nodecount
Definition: md1redef.h:39
#define id
Definition: md1redef.h:41
#define i
Definition: md1redef.h:19
#define VecPlayContinuousType
Definition: vrecitem.h:17
#define PlayRecordEventType
Definition: vrecitem.h:18
std::vector< PreSyn * > PreSynList
Definition: cvodeobj.h:14
#define CTD(i)
Definition: cvodeobj.h:53
void erase_first(T &&iterable, value_type &&value)
Definition: enumerate.h:22
constexpr auto reverse(T &&iterable)
Definition: enumerate.h:62
constexpr auto enumerate(T &&iterable)
Definition: enumerate.h:90
ms
Definition: extargs.h:1
double chkarg(int, double low, double high)
Definition: code2.cpp:626
void nrn_fixed_step_group(neuron::model_sorted_token const &cache_token, int n)
Definition: fadvance.cpp:379
void nrn_fixed_step(neuron::model_sorted_token const &cache_token)
Definition: fadvance.cpp:324
int nrn_errno_check(int i)
Definition: fadvance.cpp:767
double hoc_epsilon
Definition: hoc_init.cpp:221
void hoc_execerror_fmt(const char *fmt, T &&... args)
Definition: formatting.hpp:8
std::vector< std::pair< double *, IvocVect * > > GLineRecordEData
Definition: glinerec.h:10
static RNG::key_type k
Definition: nrnran123.cpp:9
char buf[512]
Definition: init.cpp:13
int hoc_is_object_arg(int narg)
Definition: code.cpp:876
Object * hoc_name2obj(const char *name, int index)
Definition: hoc_oop.cpp:859
void nrn_hoc_unlock()
Definition: multicore.cpp:827
void hoc_execerr_ext(const char *fmt,...)
printf style specification of hoc_execerror message.
Definition: fileio.cpp:828
neuron::container::data_handle< double > hoc_val_handle(std::string_view s)
Definition: code2.cpp:715
Object ** hoc_temp_objvar(Symbol *symtemp, void *v)
Definition: hoc_oop.cpp:484
void hoc_pushpx(double *d)
Definition: code.cpp:834
int hoc_is_str_arg(int narg)
Definition: code.cpp:872
bool hoc_stack_type_is_ndim()
Definition: code.cpp:314
int hoc_is_double_arg(int narg)
Definition: code.cpp:864
void check_obj_type(Object *obj, const char *type_name)
Definition: hoc_oop.cpp:2098
IvocVect * vector_arg(int i)
Definition: ivocvect.cpp:265
void hoc_obj_ref(Object *obj)
Definition: hoc_oop.cpp:1844
char * hoc_object_name(Object *ob)
Definition: hoc_oop.cpp:73
int is_vector_arg(int i)
Definition: ivocvect.cpp:378
Symbol * hoc_spop()
Definition: code.cpp:928
double * hoc_pgetarg(int narg)
Definition: oc_ansi.h:253
Symbol * hoc_lookup(const char *)
Definition: symbol.cpp:59
void nrn_hoc_lock()
Definition: multicore.cpp:819
void hoc_obj_unref(Object *obj)
Definition: hoc_oop.cpp:1881
void hoc_push(neuron::container::generic_data_handle handle)
Definition: code.cpp:850
static int c
Definition: hoc.cpp:169
int hoc_usegui
Definition: hoc.cpp:121
#define assert(ex)
Definition: hocassrt.h:24
#define getarg
Definition: hocdec.h:17
#define OBJ(q)
Definition: hoclist.h:88
#define VOIDITM(q)
Definition: hoclist.h:89
hoc_List * hoc_l_newlist()
hoc_Item * hoc_l_insertvoid(hoc_Item *, void *)
void hoc_l_freelist(hoc_List **)
void hoc_l_delete(hoc_Item *)
Point_process * ob2pntproc(Object *ob)
Definition: hocmech.cpp:99
Point_process * ob2pntproc_0(Object *ob)
Definition: hocmech.cpp:89
Object ** hoc_objgetarg(int)
Definition: code.cpp:1614
Symlist * hoc_top_level_symlist
Definition: code2.cpp:677
int hoc_araypt(Symbol *, int)
Definition: code.cpp:2340
void nrn_notify_when_void_freed(void *p, Observer *ob)
Definition: ivoc.cpp:52
void nrn_notify_pointer_disconnect(Observer *ob)
Definition: ivoc.cpp:70
#define AFTER_SOLVE
Definition: membfunc.hpp:70
#define CAP
Definition: membfunc.hpp:60
#define BEFORE_STEP
Definition: membfunc.hpp:71
#define BEFORE_BREAKPOINT
Definition: membfunc.hpp:69
#define ITERATE(itm, lst)
Definition: model.h:18
#define SYMBOL
Definition: model.h:91
threshold
Definition: extdef.h:5
printf
Definition: extdef.h:5
void init()
Definition: init.cpp:141
const char * name
Definition: init.cpp:16
void move(Item *q1, Item *q2, Item *q3)
Definition: list.cpp:200
auto for_threads(NrnThread *threads, int num_threads)
Definition: multicore.h:133
phase
Reading phase number.
Definition: nrn_setup.hpp:53
NrnThread * nrn_threads
Definition: multicore.cpp:56
fixed_vector< double > IvocVect
Definition: ivocvect.hpp:72
double * vector_vec(IvocVect *v)
Definition: ivocvect.cpp:19
int nrn_nthread
Definition: multicore.cpp:55
void nrn2ncs_outputevent(int netcon_output_index, double firetime)
bool use_multisend_
Definition: multisend.cpp:53
bool stoprun
Definition: nrnoc_aux.cpp:19
bool cvode_active_
Definition: netcvode.cpp:36
int v_structure_change
Definition: nrnoc_aux.cpp:20
void hoc_execerror(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:39
void nrn_outputevent(unsigned char, double)
int structure_change_cnt
void nrn_multisend_advance()
void nrn_multithread_job(F &&job, Args &&... args)
Definition: multicore.hpp:161
bool nrn_use_localgid_
void hoc_warning(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:44
void nrn_ba(NrnThread *nt, int bat)
int diam_changed
Definition: nrnoc_aux.cpp:21
handle_interface< non_owning_identifier< storage > > handle
Non-owning handle to a Mechanism instance.
void notify_when_handle_dies(data_handle< double > dh, Observer *obs)
Register that obs should be notified when dh dies.
Definition: ivoc.cpp:91
constexpr std::size_t invalid_row
constexpr do_not_search_t do_not_search
Definition: data_handle.hpp:11
icycle< ncycle;++icycle) { int istride=stride[icycle];nrn_pragma_acc(loop vector) nrn_pragma_omp(loop bind(parallel)) for(int icore=0;icore< warpsize;++icore) { int i=ii+icore;if(icore< istride) { int ip=GPU_PARENT(i);GPU_RHS(i) -=GPU_B(i) *GPU_RHS(ip);GPU_RHS(i)/=GPU_D(i);} i+=istride;} ii+=istride;} }}void solve_interleaved2(int ith) { NrnThread *nt=nrn_threads+ith;InterleaveInfo &ii=interleave_info[ith];int nwarp=ii.nwarp;if(nwarp==0) return;int ncore=nwarp *warpsize;int *ncycles=ii.cellsize;int *stridedispl=ii.stridedispl;int *strides=ii.stride;int *rootbegin=ii.firstnode;int *nodebegin=ii.lastnode;if(0) { nrn_pragma_acc(parallel loop gang present(nt[0:1], strides[0:nstride], ncycles[0:nwarp], stridedispl[0:nwarp+1], rootbegin[0:nwarp+1], nodebegin[0:nwarp+1]) async(nt->stream_id)) nrn_pragma_omp(target teams loop map(present, alloc:nt[:1], strides[:nstride], ncycles[:nwarp], stridedispl[:nwarp+1], rootbegin[:nwarp+1], nodebegin[:nwarp+1])) for(int icore=0;icore< ncore;icore+=warpsize) { solve_interleaved2_loop_body(nt, icore, ncycles, strides, stridedispl, rootbegin, nodebegin);} nrn_pragma_acc(wait(nt->stream_id)) } else { for(int icore=0;icore< ncore;icore+=warpsize) { solve_interleaved2_loop_body(nt, icore, ncycles, strides, stridedispl, rootbegin, nodebegin);} }}void solve_interleaved1(int ith) { NrnThread *nt=nrn_threads+ith;int ncell=nt-> ncell
Definition: cellorder.cpp:784
int Sprintf(char(&buf)[N], const char *fmt, Args &&... args)
Redirect sprintf to snprintf if the buffer size can be deduced.
Definition: wrap_sprintf.h:14
int ii
Definition: cellorder.cpp:631
if(ncell==0)
Definition: cellorder.cpp:785
#define STATISTICS(arg)
Definition: netcon.h:21
std::unordered_map< void *, NetCon * > NetConSaveWeightTable
Definition: netcon.h:126
#define HocEventType
Definition: netcon.h:48
std::unordered_map< long, Point_process * > SelfEventPPTable
Definition: netcon.h:41
std::vector< NetCon * > NetConPList
Definition: netcon.h:184
std::unordered_map< long, PreSyn * > PreSynSaveIndexTable
Definition: netcon.h:332
std::unordered_map< long, NetCon * > NetConSaveIndexTable
Definition: netcon.h:127
#define PreSynType
Definition: netcon.hpp:26
#define DiscreteEventType
Definition: netcon.hpp:22
#define SelfEventType
Definition: netcon.hpp:25
#define NetConType
Definition: netcon.hpp:24
std::unordered_map< void *, MaxStateItem * > MaxStateTable
Definition: netcvode.h:34
std::vector< HocEvent * > HocEventList
Definition: netcvode.h:37
std::unordered_map< neuron::container::data_handle< double >, PreSyn * > PreSynTable
Definition: netcvode.h:18
#define PRINT_EVENT
Definition: netcvode.hpp:14
void v_setup_vectors()
Definition: treeset.cpp:1596
int is_point_process(Object *)
Definition: point.cpp:370
void recalc_diam(void)
Definition: treeset.cpp:923
neuron::model_sorted_token nrn_ensure_model_data_are_sorted()
Ensure neuron::container::* data are sorted.
Definition: treeset.cpp:2182
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
Definition: nrn_assert.h:33
#define tstopset
Definition: nrnconf.h:44
int nrn_dblpntr2nrncore(neuron::container::data_handle< double > dh, NrnThread &nt, int &type, int &index)
static Node * node(Object *)
Definition: netcvode.cpp:291
#define POINT_RECEIVE(type, tar, w, f)
Definition: netcvode.cpp:61
double dt
Definition: netcvode.cpp:70
std::vector< PreSyn * > * net_cvode_instance_psl()
Definition: netcvode.cpp:274
void nrn2core_transfer_WATCH(void(*cb)(int, int, int, int, int))
Definition: netcvode.cpp:5945
static double nc_setpost(void *v)
Definition: netcvode.cpp:616
static TQList * record_init_items_
Definition: netcvode.cpp:411
static int trajec_buffered(NrnThread &nt, int bsize, IvocVect *v, neuron::container::data_handle< double > pd, int i_pr, PlayRecord *pr, void **vpr, int i_trajec, int *types, int *indices, double **pvars, double **varrays)
Definition: netcvode.cpp:5551
short * nrn_is_artificial_
Definition: init.cpp:214
short * nrn_artcell_qindex_
Definition: init.cpp:215
static Object ** nc_precelllist(void *v)
Definition: netcvode.cpp:579
static double nc_event(void *v)
Definition: netcvode.cpp:674
void nrn_netcon_event(NetCon *nc, double td)
Definition: netcvode.cpp:147
static void * pending_selfqueue(NrnThread *)
Definition: netcvode.cpp:3754
#define lvardtloop(i, j)
Definition: netcvode.cpp:52
#define nt_dt
Definition: netcvode.cpp:73
double nrn_hoc2scatter_y(void *v)
Definition: netcvode.cpp:4253
void nrn2core_transfer_WatchCondition(WatchCondition *wc, void(*cb)(int, int, int, int, int))
In nrncore_callbacks.cpp.
#define ITE_SIZE
Definition: netcvode.cpp:987
void(* nrnthread_v_transfer_)(NrnThread *)
Definition: fadvance.cpp:139
int nrn_presyn_count(PreSyn *ps)
Definition: netcvode.cpp:165
static Object ** nc_syn(void *v)
Definition: netcvode.cpp:498
static double nc_active(void *v)
Definition: netcvode.cpp:661
Point_process * nrn_netcon_target(NetCon *nc)
Definition: netcvode.cpp:152
static double lvardt_tout_
Definition: netcvode.cpp:6629
static PreSyn * unused_presyn
Definition: netcvode.cpp:415
int * nrn_fornetcon_type_
Definition: init.cpp:202
std::vector< WatchCondition * > WatchList
Definition: netcvode.cpp:405
void artcell_net_move(Datum *v, Point_process *pnt, double tt)
Definition: netcvode.cpp:2206
void _nrn_watch_allocate(Datum *d, double(*c)(Point_process *), int i, Point_process *pnt, double flag)
Introduced so corenrn->nrn can request the mod file to make sure all WatchCondition are allocated.
Definition: netcvode.cpp:2430
void * nrn_presyn_netcon(PreSyn *ps, int i)
Definition: netcvode.cpp:168
void nrnthread_trajectory_values(int tid, int n_pr, void **vpr, double t)
Definition: netcvode.cpp:5839
static void allthread_handle_callback()
Definition: netcvode.cpp:174
void nrn_use_busywait(int)
Definition: multicore.cpp:1009
double nrn_hoc2fixed_step(void *)
Definition: netcvode.cpp:4229
static std::regex get_regex(int id)
Definition: netcvode.cpp:894
void ncs2nrn_integrate(double tstop)
Definition: netcvode.cpp:3700
static Object ** nc_prelist(void *v)
Definition: netcvode.cpp:531
void nrn_cleanup_presyn(PreSyn *)
Definition: netpar.cpp:970
int * nrn_fornetcon_index_
Definition: init.cpp:203
#define PP2t(pp)
Definition: netcvode.cpp:58
static double nc_preloc(void *v)
Definition: netcvode.cpp:417
static Object ** nc_preseg(void *v)
Definition: netcvode.cpp:445
double nrn_hoc2gather_y(void *v)
Definition: netcvode.cpp:4269
static Member_func members[]
Definition: netcvode.cpp:745
static IvocVect * event_info_tvec_
Definition: netcvode.cpp:2866
int nrn_fornetcon_cnt_
Definition: init.cpp:201
void single_event_run()
static void event_info_callback(const TQItem *, int)
Definition: netcvode.cpp:2871
static Object ** nc_get_recordvec(void *v)
Definition: netcvode.cpp:731
void _nrn_free_fornetcon(void **v)
Definition: netcvode.cpp:4110
void nrn_cvfun(double t, double *y, double *ydot)
Definition: netcvode.cpp:4224
bool nrn_use_bin_queue_
Definition: netcvode.cpp:225
void record_init_clear(const TQItem *q, int)
Definition: netcvode.cpp:4118
int nrn_netcon_info(NetCon *nc, double **pw, Point_process **target, double **th, double **del)
Definition: netcvode.cpp:157
static double nc_wcnt(void *v)
Definition: netcvode.cpp:740
double nrn_netcon_get_thresh(NetCon *nc)
Definition: netcvode.cpp:134
static void destruct(void *v)
Definition: netcvode.cpp:838
void net_event(Point_process *pnt, double time)
Definition: netcvode.cpp:2326
cTemplate ** nrn_pnt_template_
Definition: init.cpp:153
static Object ** nc_postcell(void *v)
Definition: netcvode.cpp:607
void nrn_net_send(Datum *v, double *weight, Point_process *pnt, double td, double flag)
Definition: netcvode.cpp:2257
bool nrn_use_fifo_queue_
Definition: netcvode.cpp:223
void nrn_netcon_set_thresh(NetCon *nc, double th)
Definition: netcvode.cpp:141
int nrn_netcon_weight(NetCon *nc, double **pw)
Definition: netcvode.cpp:122
void * nrn_interthread_enqueue(NrnThread *)
Definition: netcvode.cpp:6800
void(* ReceiveFunc)(Point_process *, double *, double)
Definition: netcvode.cpp:50
double nrn_hoc2fun(void *v)
Definition: netcvode.cpp:4234
void _nrn_free_watch(Datum *d, int offset, int n)
Called by Point_process destructor in translated mod file.
Definition: netcvode.cpp:2469
void(* nrn_allthread_handle)()
Definition: fadvance.cpp:68
int linmod_extra_eqn_count()
Object *(* nrnpy_seg_from_sec_x)(Section *, double)
Definition: netcvode.cpp:89
static double nc_record(void *v)
Definition: netcvode.cpp:699
void nrn_net_move(Datum *v, Point_process *pnt, double tt)
Definition: netcvode.cpp:2189
void nrnthread_trajectory_return(int tid, int n_pr, int bsize, int vecsz, void **vpr, double t)
Definition: netcvode.cpp:5881
static Member_ret_obj_func omembers[]
Definition: netcvode.cpp:760
static char * escape_bracket(const char *s)
Definition: netcvode.cpp:865
double t
Definition: cvodeobj.cpp:57
static Object ** nc_postseg(void *v)
Definition: netcvode.cpp:487
static Object ** nc_postcelllist(void *v)
Definition: netcvode.cpp:560
static DiscreteEvent * null_event_
Definition: netcvode.cpp:413
void artcell_net_send(Datum *v, double *weight, Point_process *pnt, double td, double flag)
Definition: netcvode.cpp:2283
double nrn_event_queue_stats(double *stats)
Definition: netcvode.cpp:126
static void all_pending_selfqueue(double tt)
Definition: netcvode.cpp:3785
static Object ** newoclist(int i, OcList *&o)
Definition: netcvode.cpp:516
TQueue * net_cvode_instance_event_queue(NrnThread *)
Definition: netcvode.cpp:270
double nrn_netcon_get_delay(NetCon *nc)
Definition: netcvode.cpp:116
#define PP2NT(pp)
Definition: netcvode.cpp:57
static void steer_val(void *v)
Definition: netcvode.cpp:773
std::vector< TQItem * > TQList
Definition: netcvode.cpp:407
void nrnthread_get_trajectory_requests(int tid, int &bsize, int &n_pr, void **&vpr, int &n_trajec, int *&types, int *&indices, double **&pvars, double **&varrays)
Definition: netcvode.cpp:5607
static void * cons(Object *o)
Definition: netcvode.cpp:802
MutexPool< SelfEvent > SelfEventPool
Definition: netcvode.cpp:406
static unsigned long net_event_cnt_
Definition: netcvode.cpp:238
static Object ** nc_precell(void *v)
Definition: netcvode.cpp:598
int nrn_use_daspk_
Definition: treeset.cpp:59
static Object ** nc_synlist(void *v)
Definition: netcvode.cpp:545
static Cvode * eval_cv
Definition: netcvode.cpp:5465
static double nc_valid(void *v)
Definition: netcvode.cpp:652
static void * chk_deliv(NrnThread *nt)
Definition: netcvode.cpp:5495
static int event_info_type_
Definition: netcvode.cpp:2865
void nrn_pending_selfqueue(double tt, NrnThread *)
Definition: netcvode.cpp:3759
int nrn_modeltype()
Definition: treeset.cpp:1785
MutexPool< HocEvent > HocEventPool
Definition: netcvode.cpp:3321
static double nc_srcgid(void *v)
Definition: netcvode.cpp:722
static void lvardt_integrate(neuron::model_sorted_token const &token, NrnThread &ntr)
Definition: netcvode.cpp:6631
int _nrn_netcon_args(void *v, double ***argslist)
Definition: netcvode.cpp:4103
static IvocVect * peqvec
Definition: netcvode.cpp:2843
NetCvode * net_cvode_instance
Definition: cvodestb.cpp:26
static double pending_selfqueue_deliver_
Definition: netcvode.cpp:3753
static double nc_postloc(void *v)
Definition: netcvode.cpp:477
void nrn_netcon_set_delay(NetCon *nc, double d)
Definition: netcvode.cpp:119
void NetCon_reg()
Definition: netcvode.cpp:842
#define NVI_SUCCESS
Definition: netcvode.cpp:56
bool nrn_trajectory_request_per_time_step_
Definition: netcvode.cpp:106
static IvocVect * event_info_flagvec_
Definition: netcvode.cpp:2867
static void peq(const TQItem *, int)
Definition: netcvode.cpp:2845
#define nt_t
Definition: netcvode.cpp:74
void nrn_use_daspk(int b)
Definition: netcvode.cpp:282
void nrn_watch_clear()
Watch info corenrn->nrn transfer requires all activated WatchCondition be deactivated prior to mirror...
Definition: netcvode.cpp:2456
static void * deliver_for_thread(NrnThread *nt)
Definition: netcvode.cpp:6751
void _nrn_watch_activate(Datum *d, double(*c)(Point_process *), int i, Point_process *pnt, int r, double flag)
Definition: netcvode.cpp:2340
static OcList * event_info_list_
Definition: netcvode.cpp:2868
int nrn_use_selfqueue_
Definition: netcvode.cpp:77
static Object ** nc_pre(void *v)
Definition: netcvode.cpp:507
static unsigned long deliver_cnt_
Definition: netcvode.cpp:238
static void * eval_cond(NrnThread *nt)
Definition: netcvode.cpp:5466
std::vector< PlayRecord * > * net_cvode_instance_prl()
Definition: netcvode.cpp:278
void nrn_update_ps2nt()
Definition: netcvode.cpp:4871
MutexPool< TQItem > TQItemPool
Definition: tqueue.hpp:12
#define GVectorRecordType
Definition: vrecitem.h:24
#define YvecRecordType
Definition: vrecitem.h:22
#define VecRecordDiscreteType
Definition: vrecitem.h:17
#define GLineRecordType
Definition: vrecitem.h:23
#define TvecRecordType
Definition: vrecitem.h:21
#define VecPlayStepType
Definition: vrecitem.h:19
#define VecRecordDtType
Definition: vrecitem.h:18
int const size_t const size_t n
Definition: nrngsl.h:10
size_t q
size_t p
size_t j
TBUF void nrn_multisend_send(PreSyn *ps, double t)
Definition: multisend.cpp:541
s
Definition: multisend.cpp:521
static void del(int *a)
Object * nrn_sec2cell(Section *)
Definition: cabcode.cpp:252
int nrn_matrix_cnt_
Definition: treeset.cpp:57
int ifarg(int)
Definition: code.cpp:1607
void nrnmusic_injectlist(void *vp, double tt)
Definition: nrnmusic.cpp:100
#define MUTCONSTRUCTED
Definition: nrnmutdec.h:32
#define MUTCONSTRUCT(mkmut)
Definition: nrnmutdec.h:33
#define MUTDESTRUCT
Definition: nrnmutdec.h:34
#define MUTLOCK
Definition: nrnmutdec.h:35
#define MUTUNLOCK
Definition: nrnmutdec.h:36
short index
Definition: cabvars.h:11
std::vector< Memb_func > memb_func
Definition: init.cpp:145
short type
Definition: cabvars.h:10
short * pnt_receive_size
Definition: init.cpp:157
std::vector< Memb_list > memb_list
Definition: init.cpp:146
pnt_receive_init_t * pnt_receive_init
Definition: init.cpp:156
BAMech ** bamech_
Definition: init.cpp:151
int n_memb_func
Definition: init.cpp:448
void nrn_onethread_job(int i, void *(*job)(NrnThread *))
Definition: multicore.cpp:875
void nrn_wait_for_threads()
Definition: multicore.cpp:891
static void pr(N_Vector x)
static double cell(void *v)
Definition: ocbbs.cpp:540
static double dummy
Definition: ocptrvector.cpp:23
static void pnode(Prop *)
Definition: psection.cpp:47
int find(const int, const int, const int, const int, const int)
int use_sparse13
Definition: treeset.cpp:58
#define nlayer
Definition: section_fwd.hpp:31
#define NULL
Definition: spdefs.h:105
Object ** hoc_temp_objptr(Object *)
Definition: code.cpp:151
Symlist * hoc_built_in_symlist
Definition: symbol.cpp:28
struct BAMech * next
Definition: membfunc.h:178
int type
Definition: membfunc.h:177
BAMechList * next
Definition: cvodeobj.h:47
BAMech * bam
Definition: cvodeobj.h:48
std::vector< Memb_list * > ml
Definition: cvodeobj.h:49
static void destruct(BAMechList **first)
Definition: netcvode.cpp:1224
BAMechList(BAMechList **first)
Definition: netcvode.cpp:1212
Wrapper for Memb_list in CVode related code.
Definition: cvodeobj.h:35
std::vector< Memb_list > ml
Definition: cvodeobj.h:41
int index
Definition: cvodeobj.h:42
CvMembList * next
Definition: cvodeobj.h:40
double * v
Definition: section_fwd.hpp:40
double ** argslist
Definition: netcvode.cpp:234
DiscreteEvent * de_
Definition: netcvode.cpp:401
void * hoc_mech
Definition: membfunc.h:87
nrn_ode_spec_t ode_spec
Definition: membfunc.h:77
int is_point
Definition: membfunc.h:86
nrn_ode_count_t ode_count
Definition: membfunc.h:75
nrn_cur_t current
Definition: membfunc.h:60
nrn_ode_matsol_t ode_matsol
Definition: membfunc.h:78
nrn_state_t state
Definition: membfunc.h:62
A view into a set of mechanism instances.
Definition: nrnoc_ml.h:34
int nodecount
Definition: nrnoc_ml.h:78
int * nodeindices
Definition: nrnoc_ml.h:74
Node ** nodelist
Definition: nrnoc_ml.h:68
Datum ** pdata
Definition: nrnoc_ml.h:75
Prop ** prop
Definition: nrnoc_ml.h:76
std::size_t get_storage_offset() const
Get the offset of this Memb_list into global storage for this type.
Definition: nrnoc_ml.h:195
Datum * _thread
Definition: nrnoc_ml.h:77
Definition: section.h:105
auto v_handle()
Definition: section.h:153
Section * sec
Definition: section.h:193
int v_node_index
Definition: section.h:212
Extnode * extnode
Definition: section.h:199
Prop * prop
Definition: section.h:190
BAMech * bam
Definition: multicore.h:41
struct NrnThreadBAList * next
Definition: multicore.h:42
Memb_list * ml
Definition: multicore.h:40
Represent main neuron object computed by single thread.
Definition: multicore.h:58
double _dt
Definition: multicore.h:60
NrnThreadMembList * tml
Definition: multicore.h:62
int ncell
Definition: multicore.h:64
int id
Definition: multicore.h:66
NrnThreadBAList * tbl[BEFORE_AFTER_SIZE]
Definition: multicore.h:103
int _stop_stepping
Definition: multicore.h:67
int end
Definition: multicore.h:65
Node ** _v_parent
Definition: multicore.h:91
Node ** _v_node
Definition: multicore.h:90
double _t
Definition: multicore.h:59
struct NrnThreadMembList * next
Definition: multicore.h:34
Memb_list * ml
Definition: multicore.h:35
Definition: hocdec.h:173
void * this_pointer
Definition: hocdec.h:178
int index
Definition: hocdec.h:175
int refcount
Definition: hocdec.h:174
cTemplate * ctemplate
Definition: hocdec.h:180
union Object::@47 u
A point process is computed just like regular mechanisms.
Definition: section_fwd.hpp:77
Section * sec
Definition: section_fwd.hpp:78
Definition: section.h:231
Datum * dparam
Definition: section.h:247
short _type
Definition: section.h:244
neuron::container::data_handle< double > var1_
Definition: nrnste.h:25
double value()
Definition: nrnste.h:18
neuron::container::data_handle< double > var2_
Definition: nrnste.h:25
StateTransitionEvent * ste_
Definition: nrnste.h:27
void deactivate()
Definition: netcvode.cpp:5379
std::unique_ptr< HocCommand > hc_
Definition: nrnste.h:26
void event()
Definition: nrnste.cpp:122
std::unique_ptr< STECondition > stec_
Definition: nrnste.h:28
void activate()
Definition: netcvode.cpp:5367
bool var1_is_time_
Definition: nrnste.h:30
int dest_
Definition: nrnste.h:29
Prop * prop
Definition: section.h:71
Point_process * pnt_
Definition: nrnste.h:58
std::vector< STEState > states_
Definition: nrnste.h:57
void transition(int src, int dest, neuron::container::data_handle< double > var1, neuron::container::data_handle< double > var2, std::unique_ptr< HocCommand >)
Definition: netcvode.cpp:5353
Definition: model.h:47
Symbol * next
Definition: hocdec.h:133
union Symbol::@28 u
struct Symbol::@45::@46 rng
short type
Definition: model.h:48
cTemplate * ctemplate
Definition: hocdec.h:126
char * name
Definition: model.h:61
Symbol * first
Definition: hocdec.h:76
Definition: tqitem.hpp:3
double t_
Definition: tqitem.hpp:8
void * data_
Definition: tqitem.hpp:7
Symbol * sym
Definition: hocdec.h:147
Symlist * symtable
Definition: hocdec.h:148
hoc_List * olist
Definition: hocdec.h:155
int count
Definition: hocdec.h:154
void(* steer)(void *)
Definition: hocdec.h:160
Non-template stable handle to a generic value.
T & literal_value()
Obtain a reference to the literal value held by this handle.
T get() const
Explicit conversion to any T.
int Fprintf(FILE *stream, const char *fmt, Args... args)
Definition: logger.hpp:8
int Printf(const char *fmt, Args... args)
Definition: logger.hpp:18