NEURON
netcvode.cpp
Go to the documentation of this file.
1 /*
2 # =============================================================================
3 # Copyright (c) 2016 - 2022 Blue Brain Project/EPFL
4 #
5 # See top-level LICENSE file for details.
6 # =============================================================================.
7 */
8 
9 #include <float.h>
10 #include <map>
11 #include <mutex>
12 
13 #include "coreneuron/nrnconf.h"
28 
29 namespace coreneuron {
30 #define PP2NT(pp) (nrn_threads + (pp)->_tid)
31 #define PP2t(pp) (PP2NT(pp)->_t)
32 //#define POINT_RECEIVE(type, tar, w, f) (*pnt_receive[type])(tar, w, f)
33 
34 double NetCvode::eps_;
37 
38 /// Flag to use the bin queue
40 
41 void mk_netcvode() {
42  if (!net_cvode_instance) {
44  }
45 }
46 
47 #ifdef DEBUG
48 // temporary
49 static int nrn_errno_check(int type) {
50  printf("nrn_errno_check() was called on pid %d: errno=%d type=%d\n", nrnmpi_myid, errno, type);
51  // assert(0);
52  type = 0;
53  return 1;
54 }
55 #endif
56 
57 // for _OPENACC and/or NET_RECEIVE_BUFFERING
58 // sem 0:3 send event move
59 void net_sem_from_gpu(int sendtype,
60  int i_vdata,
61  int weight_index_,
62  int ith,
63  int ipnt,
64  double td,
65  double flag) {
66  NrnThread& nt = nrn_threads[ith];
67  Point_process* pnt = (Point_process*) nt._vdata[ipnt];
68  if (sendtype == 0) {
69  net_send(nt._vdata + i_vdata, weight_index_, pnt, td, flag);
70  } else if (sendtype == 2) {
71  net_move(nt._vdata + i_vdata, pnt, td);
72  } else {
73  net_event(pnt, td);
74  }
75 }
76 
77 void net_send(void** v, int weight_index_, Point_process* pnt, double td, double flag) {
78  NrnThread* nt = PP2NT(pnt);
80  SelfEvent* se = new SelfEvent;
81  se->flag_ = flag;
82  se->target_ = pnt;
84  if (v >= nt->_vdata) {
85  se->movable_ = v; // needed for SaveState
86  }
88  ++p.unreffed_event_cnt_;
89  if (td < nt->_t) {
90  char buf[100];
91  std::snprintf(buf, sizeof(buf), "net_send td-t = %g", td - nt->_t);
92  se->pr(buf, td, net_cvode_instance);
93  abort();
94  hoc_execerror("net_send delay < 0", 0);
95  }
96  TQItem* q = net_cvode_instance->event(td, se, nt);
97  if (flag == 1.0 && v >= nt->_vdata) {
98  *v = (void*) q;
99  }
100  // printf("net_send %g %s %g %p\n", td, pnt_name(pnt), flag, *v);
101 }
102 
103 void artcell_net_send(void** v, int weight_index_, Point_process* pnt, double td, double flag) {
104  net_send(v, weight_index_, pnt, td, flag);
105 }
106 
107 void net_event(Point_process* pnt, double time) {
108  NrnThread* nt = PP2NT(pnt);
109  PreSyn* ps = nt->presyns +
111  if (ps) {
112  if (time < nt->_t) {
113  char buf[100];
114  std::snprintf(buf, sizeof(buf), "net_event time-t = %g", time - nt->_t);
115  ps->pr(buf, time, net_cvode_instance);
116  hoc_execerror("net_event time < t", 0);
117  }
118  ps->send(time, net_cvode_instance, nt);
119  }
120 }
121 
123  : tqe_{new TQueue<QTYPE>()} {
124  inter_thread_events_.reserve(1000);
125 }
126 
128  delete tqe_;
129 }
130 
131 /// If the PreSyn is on a different thread than the target,
132 /// we have to lock the buffer
134  std::lock_guard<OMP_Mutex> lock(mut);
135  inter_thread_events_.emplace_back(InterThreadEvent{db, td});
136 }
137 
140 }
141 
143  std::lock_guard<OMP_Mutex> lock(mut);
144  for (const auto& ite: inter_thread_events_) {
145  nc->bin_event(ite.t_, ite.de_, nt);
146  }
147  inter_thread_events_.clear();
148 }
149 
151  eps_ = 100. * DBL_EPSILON;
152 #if PRINT_EVENT
153  print_event_ = 1;
154 #else
155  print_event_ = 0;
156 #endif
157  pcnt_ = 0;
158  p = nullptr;
159  p_construct(1);
160  // eventually these should not have to be thread safe
161  // for parallel network simulations hardly any presyns have
162  // a threshold and it can be very inefficient to check the entire
163  // presyn list for thresholds during the fixed step method.
164  // So keep a threshold list.
165 }
166 
168  if (net_cvode_instance == this) {
169  net_cvode_instance = nullptr;
170  }
171 
172  p_construct(0);
173 }
174 
177 }
178 
180  if (pcnt_ != n) {
181  if (p) {
182  delete[] p;
183  p = nullptr;
184  }
185 
186  if (n > 0)
187  p = new NetCvodeThreadData[n];
188  else
189  p = nullptr;
190 
191  pcnt_ = n;
192  }
193 
194  for (int i = 0; i < n; ++i)
195  p[i].unreffed_event_cnt_ = 0;
196 }
197 
199  if (nrn_use_bin_queue_) {
200 #if PRINT_EVENT
201  if (print_event_) {
202  db->pr("binq send", td, this);
203  }
204 #endif
205  return p[nt->id].tqe_->enqueue_bin(td, db);
206  } else {
207 #if PRINT_EVENT
208  if (print_event_) {
209  db->pr("send", td, this);
210  }
211 #endif
212  return p[nt->id].tqe_->insert(td, db);
213  }
214 }
215 
217 #if PRINT_EVENT
218  if (print_event_) {
219  db->pr("send", td, this);
220  }
221 #endif
222  return p[nt->id].tqe_->insert(td, db);
223 }
224 
226  // DiscreteEvents may already have gone out of existence so the tqe_
227  // may contain many invalid item data pointers
228  enqueueing_ = 0;
229  for (int i = 0; i < nrn_nthread; ++i) {
230  NetCvodeThreadData& d = p[i];
231  delete d.tqe_;
232  d.tqe_ = new TQueue<QTYPE>();
233  d.unreffed_event_cnt_ = 0;
234  d.inter_thread_events_.clear();
235  d.tqe_->nshift_ = -1;
236  d.tqe_->shift_bin(nrn_threads->_t - 0.5 * nrn_threads->_dt);
237  }
238 }
239 
241  for (int i = 0; i < nrn_nthread; ++i) {
242  p[i].tqe_->nshift_ = -1;
243  p[i].tqe_->shift_bin(nrn_threads->_t - 0.5 * nrn_threads->_dt);
244  }
245 
246  for (int tid = 0; tid < nrn_nthread; ++tid) { // can be done in parallel
247  NrnThread* nt = nrn_threads + tid;
248 
249  for (int ipre = 0; ipre < nt->n_presyn; ++ipre) {
250  PreSyn* ps = nt->presyns + ipre;
251  ps->flag_ = false;
252  }
253 
254  for (int inetc = 0; inetc < nt->n_netcon; ++inetc) {
255  NetCon* d = nt->netcons + inetc;
256  if (d->target_) {
257  int type = d->target_->_type;
260  } else {
262  double* wt = nt->weights + d->u.weight_index_;
263  // not the first
264  for (int j = 1; j < cnt; ++j) {
265  wt[j] = 0.;
266  }
267  }
268  }
269  }
270  }
271 }
272 
273 bool NetCvode::deliver_event(double til, NrnThread* nt) {
274  TQItem* q = p[nt->id].tqe_->atomic_dq(til);
275  if (q == nullptr) {
276  return false;
277  }
278 
279  DiscreteEvent* de = q->data_;
280  double tt = q->t_;
281  delete q;
282 #if PRINT_EVENT
283  if (print_event_) {
284  de->pr("deliver", tt, this);
285  }
286 #endif
287  de->deliver(tt, this, nt);
288 
289  /// In case of a self event we need to delete the self event
290  if (de->type() == SelfEventType) {
291  delete static_cast<SelfEvent*>(de);
292  }
293  return true;
294 }
295 
296 void net_move(void** v, Point_process* pnt, double tt) {
297  // assert, if possible that *v == pnt->movable.
298  if (!(*v))
299  hoc_execerror("No event with flag=1 for net_move in ",
300  corenrn.get_memb_func(pnt->_type).sym);
301 
302  TQItem* q = (TQItem*) (*v);
303  // printf("net_move tt=%g %s *v=%p\n", tt, memb_func[pnt->_type].sym, *v);
304  if (tt < PP2t(pnt))
305  nrn_assert(0);
306 
307  net_cvode_instance->move_event(q, tt, PP2NT(pnt));
308 }
309 
310 void artcell_net_move(void** v, Point_process* pnt, double tt) {
311  net_move(v, pnt, tt);
312 }
313 
314 void NetCvode::move_event(TQItem* q, double tnew, NrnThread* nt) {
315  int tid = nt->id;
316 
317 #if PRINT_EVENT
318  if (print_event_) {
319  SelfEvent* se = (SelfEvent*) q->data_;
320  printf("NetCvode::move_event self event target %s t=%g, old=%g new=%g\n",
322  nt->_t,
323  q->t_,
324  tnew);
325  }
326 #endif
327 
328  p[tid].tqe_->move(q, tnew);
329 }
330 
331 void NetCvode::deliver_events(double til, NrnThread* nt) {
332  // printf("deliver_events til %20.15g\n", til);
333  /// Enqueue any outstanding events in the interthread event buffer
334  p[nt->id].enqueue(this, nt);
335 
336  /// Deliver events. When the map is used, the loop is explicit
337  while (deliver_event(til, nt))
338  ;
339 }
340 
341 void PreSyn::record(double tt) {
342  spikevec_lock();
343  if (gid_ > -1) {
344  spikevec_gid.push_back(gid_);
345  spikevec_time.push_back(tt);
346  }
347  spikevec_unlock();
348 }
349 
351  if (value(nt) > 0.0) {
352  if (flag_ == false) {
353  flag_ = true;
354  return true;
355  }
356  } else {
357  flag_ = false;
358  }
359  return false;
360 }
361 
362 void DiscreteEvent::send(double tt, NetCvode* ns, NrnThread* nt) {
363  ns->event(tt, this, nt);
364 }
365 
366 void DiscreteEvent::deliver(double /* tt */, NetCvode* /* ns */, NrnThread* /* nt */) {}
367 
368 void DiscreteEvent::pr(const char* s, double tt, NetCvode* /* ns */) {
369  printf("%s DiscreteEvent %.15g\n", s, tt);
370 }
371 
372 void NetCon::send(double tt, NetCvode* ns, NrnThread* nt) {
373  if (active_ && target_) {
374  nrn_assert(PP2NT(target_) == nt);
375  ns->bin_event(tt, this, PP2NT(target_));
376  }
377 }
378 
379 void NetCon::deliver(double tt, NetCvode* /* ns */, NrnThread* nt) {
381 
382  if (PP2NT(target_) != nt)
383  printf("NetCon::deliver nt=%d target=%d\n", nt->id, PP2NT(target_)->id);
384 
385  nrn_assert(PP2NT(target_) == nt);
386  int typ = target_->_type;
387  nt->_t = tt;
388 
389  // printf("NetCon::deliver t=%g tt=%g %s\n", t, tt, pnt_name(target_));
390  std::string ss("net-receive-");
391  ss += nrn_get_mechname(typ);
392  Instrumentor::phase p_get_pnt_receive(ss.c_str());
393  (*corenrn.get_pnt_receive()[typ])(target_, u.weight_index_, 0);
394 #ifdef DEBUG
395  if (errno && nrn_errno_check(typ))
396  hoc_warning("errno set during NetCon deliver to NET_RECEIVE", (char*) 0);
397 #endif
398 }
399 
400 void NetCon::pr(const char* s, double tt, NetCvode* /* ns */) {
401  Point_process* pp = target_;
402  printf("%s NetCon target=%s[%d] %.15g\n",
403  s,
404  corenrn.get_memb_func(pp->_type).sym,
405  pp->_i_instance,
406  tt);
407 }
408 
409 void PreSyn::send(double tt, NetCvode* ns, NrnThread* nt) {
410  record(tt);
411  for (int i = nc_cnt_ - 1; i >= 0; --i) {
413  if (d->active_ && d->target_) {
414  NrnThread* n = PP2NT(d->target_);
415 
416  if (nt == n)
417  ns->bin_event(tt + d->delay_, d, n);
418  else
419  ns->p[n->id].interthread_send(tt + d->delay_, d, n);
420  }
421  }
422 
423 #if NRNMPI
424  if (output_index_ >= 0) {
425 #if NRN_MULTISEND
426  if (use_multisend_) {
427  nrn_multisend_send(this, tt, nt);
428  } else {
429 #else
430  {
431 #endif
432  if (nrn_use_localgid_) {
433  nrn_outputevent(localgid_, tt);
434  } else {
436  }
437  }
438  }
439 #endif // NRNMPI
440 }
441 
442 void InputPreSyn::send(double tt, NetCvode* ns, NrnThread* nt) {
443  for (int i = nc_cnt_ - 1; i >= 0; --i) {
445  if (d->active_ && d->target_) {
446  NrnThread* n = PP2NT(d->target_);
447 
448  if (nt == n)
449  ns->bin_event(tt + d->delay_, d, n);
450  else
451  ns->p[n->id].interthread_send(tt + d->delay_, d, n);
452  }
453  }
454 }
455 
457  assert(0); // no PreSyn delay.
458 }
459 
461  assert(0); // no InputPreSyn delay.
462 }
463 
464 void SelfEvent::deliver(double tt, NetCvode* ns, NrnThread* nt) {
465  nrn_assert(nt == PP2NT(target_));
466  PP2t(target_) = tt;
467  // printf("SelfEvent::deliver t=%g tt=%g %s\n", PP2t(target_), tt, pnt_name(target_));
468  call_net_receive(ns);
469 }
470 
472  (*corenrn.get_pnt_receive()[target_->_type])(target_, weight_index_, flag_);
473 
474 #ifdef DEBUG
475  if (errno && nrn_errno_check(target_->_type))
476  hoc_warning("errno set during SelfEvent deliver to NET_RECEIVE", (char*) 0);
477 #endif
478 
479  NetCvodeThreadData& nctd = ns->p[PP2NT(target_)->id];
480  --nctd.unreffed_event_cnt_;
481 }
482 
483 void SelfEvent::pr(const char* s, double tt, NetCvode*) {
484  printf("%s", s);
485  printf(" SelfEvent target=%s %.15g flag=%g\n", pnt_name(target_), tt, flag_);
486 }
487 
488 void ncs2nrn_integrate(double tstop) {
489  int total_sim_steps = static_cast<int>((tstop - nrn_threads->_t) / dt + 1e-9);
490 
491  if (total_sim_steps > 3 && !nrn_have_gaps) {
492  nrn_fixed_step_group_minimal(total_sim_steps);
493  } else {
494  nrn_fixed_single_steps_minimal(total_sim_steps, tstop);
495  }
496 
497  // handle all the pending flag=1 self events
498  for (int i = 0; i < nrn_nthread; ++i)
500 }
501 
502 // factored this out from deliver_net_events so we can
503 // stay in the cache
504 // net_send_buffer added so checking can be done on gpu
505 // while event queueing is on cpu.
506 // Remember: passsing reference variable causes cray
507 // compiler bug
508 
509 static bool pscheck(double var, double thresh, int* flag) {
510  if (var > thresh) {
511  if (*flag == false) {
512  *flag = true;
513  return true;
514  }
515  } else {
516  *flag = false;
517  }
518  return false;
519 }
520 
522  return nt->_actual_v[thvar_index_] - threshold_;
523 }
524 
525 void NetCvode::check_thresh(NrnThread* nt) { // for default method
526  Instrumentor::phase p("check-threshold");
527  double teps = 1e-10;
528 
529  nt->_net_send_buffer_cnt = 0;
530  int net_send_buf_count = 0;
531  PreSyn* presyns = nt->presyns;
532  PreSynHelper* presyns_helper = nt->presyns_helper;
533  double* actual_v = nt->_actual_v;
534 
535  if (nt->ncell == 0)
536  return;
537 
538  nrn_pragma_acc(parallel loop present(
539  nt [0:1], presyns_helper [0:nt->n_presyn], presyns [0:nt->n_presyn], actual_v [0:nt->end])
540  copy(net_send_buf_count) if (nt->compute_gpu) async(nt->stream_id))
541  nrn_pragma_omp(target teams distribute parallel for map(tofrom: net_send_buf_count) if(nt->compute_gpu))
542  for (int i = 0; i < nt->n_real_output; ++i) {
543  PreSyn* ps = presyns + i;
544  PreSynHelper* psh = presyns_helper + i;
545  int idx = 0;
546  int thidx = ps->thvar_index_;
547  double v = actual_v[thidx];
548  double threshold = ps->threshold_;
549  int* flag = &(psh->flag_);
550 
551  if (pscheck(v, threshold, flag)) {
552 #ifndef CORENEURON_ENABLE_GPU
553  nt->_net_send_buffer_cnt = net_send_buf_count;
555  nt->_net_send_buffer_size *= 2;
556  nt->_net_send_buffer = (int*) erealloc(nt->_net_send_buffer,
557  nt->_net_send_buffer_size * sizeof(int));
558  }
559 #endif
560 
561  nrn_pragma_acc(atomic capture)
562  nrn_pragma_omp(atomic capture)
563  idx = net_send_buf_count++;
564 
565  nt->_net_send_buffer[idx] = i;
566  }
567  }
568  nrn_pragma_acc(wait(nt->stream_id))
569  nt->_net_send_buffer_cnt = net_send_buf_count;
570 
571  if (nt->compute_gpu && nt->_net_send_buffer_cnt) {
572 #ifdef CORENEURON_ENABLE_GPU
573  int* nsbuffer = nt->_net_send_buffer;
574 #endif
575  nrn_pragma_acc(update host(nsbuffer [0:nt->_net_send_buffer_cnt]) async(nt->stream_id))
576  nrn_pragma_acc(wait(nt->stream_id))
577  nrn_pragma_omp(target update from(nsbuffer [0:nt->_net_send_buffer_cnt]))
578  }
579 
580  // on CPU...
581  for (int i = 0; i < nt->_net_send_buffer_cnt; ++i) {
582  PreSyn* ps = nt->presyns + nt->_net_send_buffer[i];
583  ps->send(nt->_t + teps, net_cvode_instance, nt);
584  }
585 
586  // Types that have WATCH statements. If exist, then last element is 0.
587  if (nt->_watch_types) {
588  for (int i = 0; nt->_watch_types[i] != 0; ++i) {
589  int type = nt->_watch_types[i];
590  (*corenrn.get_watch_check()[type])(nt, nt->_ml_list[type]);
591  // may generate net_send events (with 0 (teps) delay)
592  }
593  }
594 }
595 
596 // WATCH statements are rare. Conceptually they are very similar to
597 // PreSyn thresholds as above but an optimal peformance implementation for GPU is
598 // not obvious. Each WATCH statement threshold test could make use of
599 // pscheck. Note that it is possible that there are several active WATCH
600 // statements for a given POINT_PROCESS instance as well as none active.
601 // Also WATCH statements switch between active and inactive state.
602 //
603 // In NEURON,
604 // both PreSyn and WatchCondition were subclasses of ConditionEvent. When
605 // a WatchCondition fired in the fixed step method, it was placed on the queue
606 // with a delivery time of t+teps. WatchCondition::deliver called the NET_RECEIVE
607 // block with proper flag ( but nullptr weight vector). WatchConditions
608 // were created,added/removed,destroyed from a list as necessary.
609 // Perhaps the most commonly used WATCH statement is in the context of a
610 // ThresholdDetect Point_process which watches voltage and compares to
611 // an instance specific threshold parameter. A firing ThresholdDetect instance
612 // would call net_event(tdeliver) which then feeds into the standard
613 // artcell PreSyn sequence (using pntsrc_ instead of thvar_index_).
614 //
615 // So... the PreSyns have the same order as they are checked (although PreSyn
616 // data is AoS instead of SoA and nested 'if' means a failure of SIMD.)
617 // But if multiple WATCH, there is (from one kind of implementation viewpoint),
618 // yet another 'if' with regard to whether a WATCH is active. And if there
619 // are multiple WATCH, the size of the list is dynamic.
620 //
621 // An experimental implementation is to check all WATCH of all instances
622 // of a type with the proviso that there is an active flag for each WATCH.
623 // ie. active, below, var1, var2 are all SoA (except one of the var may
624 // be voltage). Can use 'if (active && pscheck(var1, var2, &below)'
625 // The mod file net_send_buffering fragments can be used which
626 // ultimately call net_send using a transient SelfEvent. ie. all
627 // checking computation takes place in the context of the mod file without
628 // using explicit WatchCondition instances.
629 
630 // events including binqueue events up to t+dt/2
631 void NetCvode::deliver_net_events(NrnThread* nt) { // for default method
632 #if NRN_MULTISEND
633  if (use_multisend_ && nt->id == 0) {
635  }
636 #endif
637  int tid = nt->id;
638  double tsav = nt->_t;
639  double tm = nt->_t + 0.5 * nt->_dt;
640 tryagain:
641  // one of the events on the main queue may be a NetParEvent
642  // which due to dt round off error can result in an event
643  // placed on the bin queue to be delivered now, which
644  // can put 0 delay events on to the main queue. So loop til
645  // no events. The alternative would be to deliver an idt=0 event
646  // immediately but that would very much change the sequence
647  // with respect to what is being done here and it is unclear
648  // how to fix the value of t there. This can be a do while loop
649  // but I do not want to affect the case of not using a bin queue.
650 
651  if (nrn_use_bin_queue_) {
652  TQItem* q;
653  while ((q = p[tid].tqe_->dequeue_bin()) != 0) {
654  DiscreteEvent* db = q->data_;
655 
656 #if PRINT_EVENT
657  if (print_event_) {
658  db->pr("binq deliver", nrn_threads->_t, this);
659  }
660 #endif
661 
662  delete q;
663  db->deliver(nt->_t, this, nt);
664  }
665  // assert(int(tm/nt->_dt)%1000 == p[tid].tqe_->nshift_);
666  }
667 
668  deliver_events(tm, nt);
669 
670  if (nrn_use_bin_queue_) {
671  if (p[tid].tqe_->top()) {
672  goto tryagain;
673  }
674  p[tid].tqe_->shift_bin(tm);
675  }
676 
677  nt->_t = tsav;
678 
679  /*before executing on gpu, we have to update the NetReceiveBuffer_t on GPU */
681 
682  for (auto& net_buf_receive: corenrn.get_net_buf_receive()) {
683  std::string ss("net-buf-receive-");
684  ss += nrn_get_mechname(net_buf_receive.second);
685  Instrumentor::phase p_net_buf_receive(ss.c_str());
686  (*net_buf_receive.first)(nt);
687  }
688 }
689 } // namespace coreneuron
virtual double value(NrnThread *)
Definition: netcon.hpp:94
virtual bool check(NrnThread *)
Definition: netcvode.cpp:350
auto & get_memb_func(size_t idx)
Definition: coreneuron.hpp:135
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:442
virtual void deliver(double, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:460
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:372
virtual void pr(const char *, double t, NetCvode *) override
Definition: netcvode.cpp:400
virtual void deliver(double, NetCvode *ns, NrnThread *) override
Definition: netcvode.cpp:379
union coreneuron::NetCon::@0 u
Point_process * target_
Definition: netcon.hpp:49
TQItem * bin_event(double tdeliver, DiscreteEvent *, NrnThread *)
Definition: netcvode.cpp:198
NetCvodeThreadData * p
Definition: netcvode.hpp:64
void deliver_events(double til, NrnThread *)
Definition: netcvode.cpp:331
void check_thresh(NrnThread *)
Definition: netcvode.cpp:525
void p_construct(int)
Definition: netcvode.cpp:179
void deliver_net_events(NrnThread *)
Definition: netcvode.cpp:631
static double eps_
Definition: netcvode.hpp:65
void move_event(TQItem *, double, NrnThread *)
Definition: netcvode.cpp:314
bool deliver_event(double til, NrnThread *)
Definition: netcvode.cpp:273
TQItem * event(double tdeliver, DiscreteEvent *, NrnThread *)
Definition: netcvode.cpp:216
void interthread_send(double, DiscreteEvent *, NrnThread *)
If the PreSyn is on a different thread than the target, we have to lock the buffer.
Definition: netcvode.cpp:133
std::vector< InterThreadEvent > inter_thread_events_
Definition: netcvode.hpp:50
TQueue< QTYPE > * tqe_
Definition: netcvode.hpp:49
void enqueue(NetCvode *, NrnThread *)
Definition: netcvode.cpp:142
virtual int type() const override
Definition: netcon.hpp:119
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:409
void record(double t)
Definition: netcvode.cpp:341
virtual void deliver(double, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:456
virtual double value(NrnThread *) override
Definition: netcvode.cpp:521
virtual void pr(const char *, double t, NetCvode *) override
Definition: netcvode.cpp:483
void call_net_receive(NetCvode *)
Definition: netcvode.cpp:471
Point_process * target_
Definition: netcon.hpp:71
virtual void deliver(double, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:464
void shift_bin(double _t_)
Definition: tqueue.hpp:136
TQItem * atomic_dq(double til)
void move(TQItem *, double tnew)
TQItem * enqueue_bin(double t, DiscreteEvent *data)
Definition: tqueue.ipp:71
TQItem * insert(double t, DiscreteEvent *data)
#define PP2t(pp)
Definition: netcvode.cpp:31
#define PP2NT(pp)
Definition: netcvode.cpp:30
#define cnt
Definition: tqueue.hpp:44
#define v
Definition: md1redef.h:11
#define i
Definition: md1redef.h:19
#define weight_index_
Definition: md1redef.h:43
nrn_pragma_acc(routine seq) nrn_pragma_omp(declare target) philox4x32_ctr_t coreneuron_random123_philox4x32_helper(coreneuron nrn_pragma_omp(end declare target) namespace coreneuron
Provide a helper function in global namespace that is declared target for OpenMP offloading to functi...
Definition: nrnran123.h:66
int nrn_errno_check(int i)
Definition: fadvance.cpp:767
char buf[512]
Definition: init.cpp:13
#define assert(ex)
Definition: hocassrt.h:24
double var(InputIterator begin, InputIterator end)
Definition: ivocvect.h:108
static double map(void *v)
Definition: mlinedit.cpp:43
threshold
Definition: extdef.h:5
printf
Definition: extdef.h:5
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
NrnThread * nrn_threads
Definition: multicore.cpp:56
void spikevec_lock()
char * pnt_name(Point_process *pnt)
Definition: nrnoc_aux.cpp:26
void artcell_net_move(void **, Point_process *, double)
Definition: netcvode.cpp:310
void nrn_multisend_send(PreSyn *, double t, NrnThread *)
bool nrn_use_bin_queue_
Flag to use the bin queue.
Definition: netcvode.cpp:39
void interthread_enqueue(NrnThread *nt)
Definition: netcvode.cpp:138
void net_move(void **, Point_process *, double)
Definition: netcvode.cpp:296
int nrn_nthread
Definition: multicore.cpp:55
void mk_netcvode()
Definition: netcvode.cpp:41
void nrn2ncs_outputevent(int netcon_output_index, double firetime)
bool nrn_have_gaps
variables defined in coreneuron library
Definition: partrans.cpp:21
void update(NrnThread *_nt)
bool use_multisend_
Definition: multisend.cpp:53
static bool pscheck(double var, double thresh, int *flag)
Definition: netcvode.cpp:509
bool cvode_active_
Definition: netcvode.cpp:36
void nrn_fixed_single_steps_minimal(int total_sim_steps, double tstop)
--> Coreneuron
const char * nrn_get_mechname(int type)
Definition: mk_mech.cpp:152
void nrn_fixed_step_group_minimal(int total_sim_steps)
void hoc_execerror(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:39
void nrn_outputevent(unsigned char, double)
void nrn_p_construct()
Definition: netcvode.cpp:175
void * erealloc(void *ptr, size_t size)
Definition: nrnoc_aux.cpp:94
CoreNeuron corenrn
Definition: multicore.cpp:53
nrn_pragma_acc(routine seq) int vector_capacity(void *v)
Definition: ivocvect.cpp:30
void spikevec_unlock()
void net_event(Point_process *, double)
Definition: netcvode.cpp:107
std::vector< double > spikevec_time
--> Coreneuron as SpikeBuffer class
void update_net_receive_buffer(NrnThread *nt)
void nrn_multisend_advance()
void ncs2nrn_integrate(double tstop)
Definition: netcvode.cpp:488
void net_send(void **, int, Point_process *, double, double)
Definition: netcvode.cpp:77
void net_sem_from_gpu(int sendtype, int i_vdata, int, int ith, int ipnt, double, double)
Definition: netcvode.cpp:59
void artcell_net_send(void **, int, Point_process *, double, double)
Definition: netcvode.cpp:103
bool nrn_use_localgid_
NetCvode * net_cvode_instance
Definition: netcvode.cpp:35
std::vector< int > spikevec_gid
void hoc_warning(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:44
std::vector< NetCon * > netcon_in_presyn_order_
InputPreSyn.nc_index_ to + InputPreSyn.nc_cnt_ give the NetCon*.
Definition: nrn_setup.cpp:163
if(ncell==0)
Definition: cellorder.cpp:785
#define SelfEventType
Definition: netcon.hpp:25
#define QTYPE
QTYPE options include: spltree, pq_que STL priority queue is used instead of the splay tree by defaul...
Definition: netcvode.hpp:24
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
Definition: nrn_assert.h:33
int const size_t const size_t n
Definition: nrngsl.h:10
size_t q
size_t p
size_t j
s
Definition: multisend.cpp:521
short type
Definition: cabvars.h:10
#define lock
virtual void pr(const char *, double t, NetCvode *)
Definition: netcvode.cpp:368
virtual void deliver(double t, NetCvode *, NrnThread *)
Definition: netcvode.cpp:366
virtual void send(double deliverytime, NetCvode *, NrnThread *)
Definition: netcvode.cpp:362
virtual int type() const
Definition: netcon.hpp:36
PreSynHelper * presyns_helper
Definition: multicore.hpp:84
Memb_list ** _ml_list
Definition: multicore.hpp:81