NEURON
nrncore_callbacks.cpp
Go to the documentation of this file.
1 #include <vector>
2 #include <unordered_map>
3 #include "nrncore_callbacks.h"
4 #include "nrnconf.h"
5 #include "nrnmpi.h"
6 #include "section.h"
7 #include "netcon.h"
8 #include "nrncvode.h"
9 #include "nrniv_mf.h"
10 #include "hocdec.h"
14 #include "parse.hpp"
15 #include "nrnran123.h" // globalindex written to globals.
16 #include "netcvode.h" // for nrnbbcore_vecplay_write and PreSyn.flag_
18 #include "vrecitem.h" // for nrnbbcore_vecplay_write
19 
20 #include "nrnwrap_dlfcn.h"
21 #include "nrnsection_mapping.h"
22 
25 extern short* nrn_is_artificial_;
26 extern bool corenrn_direct;
27 extern int* bbcore_dparam_size;
28 extern double nrn_ion_charge(Symbol*);
29 extern CellGroup* cellgroups_;
30 extern NrnMappingInfo mapinfo;
32 extern char* pnt_map;
33 extern void* nrn_interthread_enqueue(NrnThread*);
34 
35 /** Populate function pointers by mapping function pointers for callback */
37  for (int i = 0; cnbs[i].name; ++i) {
38  void* sym = NULL;
39 #if defined(HAVE_DLFCN_H)
40  sym = dlsym(handle, cnbs[i].name);
41 #endif
42  if (!sym) {
43  fprintf(stderr, "Could not get symbol %s from CoreNEURON\n", cnbs[i].name);
44  hoc_execerror("dlsym returned NULL", NULL);
45  }
46  void** c = (void**) sym;
47  *c = (void*) (cnbs[i].f);
48  }
49 }
50 
51 void write_memb_mech_types_direct(std::ostream& s) {
52  // list of Memb_func names, types, point type info, is_ion
53  // and data, pdata instance sizes. If the mechanism is an eion type,
54  // the following line is the charge.
55  // Not all Memb_func are necessarily used in the model.
56  s << bbcore_write_version << std::endl;
57  s << n_memb_func << std::endl;
58  for (int type = 2; type < n_memb_func; ++type) {
59  const char* w = " ";
60  Memb_func& mf = memb_func[type];
61  Memb_list& ml = memb_list[type];
62  s << mf.sym->name << w << type << w << int(pnt_map[type])
63  << w // the pointtype, 0 means not a POINT_PROCESS
65  << w << bbcore_dparam_size[type] << w;
66 
67  int n_vars = ml.get_num_variables();
68  s << n_vars;
69  for (size_t i = 0; i < n_vars; ++i) {
70  s << w << ml.get_array_dims(i);
71  }
72  s << std::endl;
73 
74  if (nrn_is_ion(type)) {
75  s << nrn_ion_charge(mf.sym) << std::endl;
76  }
77  }
78 }
79 
80 
81 // just for secondorder and Random123_globalindex and legacy units flag
82 int get_global_int_item(const char* name) {
83  if (strcmp(name, "secondorder") == 0) {
84  return secondorder;
85  } else if (strcmp(name, "Random123_global_index") == 0) {
87  }
88  return 0;
89 }
90 
91 // successively return global double info. Begin with p==NULL.
92 // Done when return NULL.
93 void* get_global_dbl_item(void* p, const char*& name, int& size, double*& val) {
94  Symbol* sp = (Symbol*) p;
95  if (sp == NULL) {
97  }
98  for (; sp; sp = sp->next) {
99  if (sp->type == VAR && sp->subtype == USERDOUBLE) {
100  name = sp->name;
101  if (is_array(*sp)) {
102  Arrayinfo* a = sp->arayinfo;
103  if (a->nsub == 1) {
104  size = a->sub[0];
105  val = new double[size];
106  for (int i = 0; i < a->sub[0]; ++i) {
107  char n[256];
108  Sprintf(n, "%s[%d]", sp->name, i);
109  val[i] = *hoc_val_pointer(n);
110  }
111  }
112  } else {
113  size = 0;
114  val = new double[1];
115  val[0] = *sp->u.pval;
116  }
117  return sp->next;
118  }
119  }
120  return NULL;
121 }
122 
123 
124 /**
125  Copy weights from all coreneuron::NrnThread to NetCon instances.
126  This depends on the CoreNEURON weight order for each thread to be
127  the same as was originally sent from NEURON. See how that order
128  was constructed in CellGroup::mk_cgs_netcon_info.
129 **/
130 
131 void nrnthreads_all_weights_return(std::vector<double*>& weights) {
132  std::vector<int> iw(nrn_nthread); // index for each thread
133  Symbol* ncsym = hoc_lookup("NetCon");
134  hoc_List* ncl = ncsym->u.ctemplate->olist;
135  hoc_Item* q;
136  ITERATE(q, ncl) {
137  Object* ho = (Object*) VOIDITM(q);
138  NetCon* nc = (NetCon*) ho->u.this_pointer;
139  std::size_t ith = 0; // if no _vnt, put in thread 0
140  if (nc->target_ && nc->target_->_vnt) {
141  ith = std::size_t(((NrnThread*) (nc->target_->_vnt))->id);
142  }
143  for (int i = 0; i < nc->cnt_; ++i) {
144  nc->weight_[i] = weights[ith][iw[ith]++];
145  }
146  }
147 }
148 
149 /** @brief Return location for CoreNEURON to copy data into.
150  * The type is mechanism type or special negative type for voltage,
151  * i_membrane_, or time. See coreneuron/io/nrn_setup.cpp:legacy_index2pointer.
152  * We allow coreneuron to copy to NEURON's AoS data as CoreNEURON knows
153  * how its data is arranged (SoA and possibly permuted).
154  * This function figures out the size (just for sanity check)
155  * and data pointer to be returned based on type and thread id.
156  * The ARTIFICIAL_CELL type case is special as there is no thread specific
157  * Memb_list for those.
158  */
159 size_t nrnthreads_type_return(int type, int tid, double*& data, std::vector<double*>& mdata) {
160  size_t n = 0;
161  data = NULL;
162  mdata.clear();
163  if (tid >= nrn_nthread) {
164  return n;
165  }
166  NrnThread& nt = nrn_threads[tid];
167  if (type == voltage) {
168  auto const cache_token = nrn_ensure_model_data_are_sorted();
169  data = nt.node_voltage_storage();
170  n = size_t(nt.end);
171  } else if (type == i_membrane_) { // i_membrane_
172  auto const cache_token = nrn_ensure_model_data_are_sorted();
173  data = nt.node_sav_rhs_storage();
174  n = size_t(nt.end);
175  } else if (type == 0) { // time
176  data = &nt._t;
177  n = 1;
178  } else if (type > 0 && type < n_memb_func) {
179  auto set_mdata = [&mdata](Memb_list* ml) -> size_t {
180  mdata = ml->data();
181  return ml->nodecount;
182  };
183 
184  Memb_list* ml = nt._ml_list[type];
185  if (ml) {
186  n = set_mdata(ml);
187  } else {
188  // The single thread case is easy
189  if (nrn_nthread == 1) {
190  ml = &memb_list[type];
191  n = set_mdata(ml);
192  } else {
193  // mk_tml_with_art() created a cgs[id].mlwithart which appended
194  // artificial cells to the end. Turns out that
195  // cellgroups_[tid].type2ml[type]
196  // is the Memb_list we need. Sadly, by the time we get here, cellgroups_
197  // has already been deleted. So we defer deletion of the necessary
198  // cellgroups_ portion (deleting it on return from nrncore_run).
200  n = set_mdata(ml);
201  }
202  }
203  }
204  return n;
205 }
206 
207 
208 void nrnthread_group_ids(int* grp) {
209  for (int i = 0; i < nrn_nthread; ++i) {
210  grp[i] = cellgroups_[i].group_id;
211  }
212 }
213 
214 
215 int nrnthread_dat1(int tid,
216  int& n_presyn,
217  int& n_netcon,
218  std::vector<int>& output_gid,
219  int*& netcon_srcgid,
220  std::vector<int>& netcon_negsrcgid_tid) {
221  if (tid >= nrn_nthread) {
222  return 0;
223  }
224  CellGroup& cg = cellgroups_[tid];
225  n_presyn = cg.n_presyn;
226  n_netcon = cg.n_netcon;
227  output_gid = std::move(cg.output_gid);
228  netcon_srcgid = cg.netcon_srcgid;
229  cg.netcon_srcgid = NULL;
230  netcon_negsrcgid_tid = cg.netcon_negsrcgid_tid;
231  return 1;
232 }
233 
234 void nrnthread_dat3_cell_count(int& cell_count) {
235  cell_count = mapinfo.size();
236 }
237 
238 void nrnthread_dat3_cellmapping(int i, int& gid, int& nsec, int& nseg, int& n_seclist) {
240  gid = c->gid;
241  nsec = c->num_sections();
242  nseg = c->num_segments();
243  n_seclist = c->size();
244 }
245 
247  int i_sec,
248  std::string& sclname,
249  int& nsec,
250  int& nseg,
251  size_t& total_lfp_factors,
252  int& n_electrodes,
253  std::vector<int>& data_sec,
254  std::vector<int>& data_seg,
255  std::vector<double>& data_lfp) {
256  CellMapping* c = mapinfo.mapping[i_c];
257  SecMapping* s = c->secmapping[i_sec];
258  sclname = s->name;
259  nsec = s->nsec;
260  nseg = s->size();
261  total_lfp_factors = s->seglfp_factors.size();
262  n_electrodes = s->num_electrodes;
263  data_sec = s->sections;
264  data_seg = s->segments;
265  data_lfp = s->seglfp_factors;
266 }
267 
268 // sizes and total data count
269 int nrnthread_dat2_1(int tid,
270  int& ncell,
271  int& ngid,
272  int& n_real_gid,
273  int& nnode,
274  int& ndiam,
275  int& nmech,
276  int*& tml_index,
277  int*& ml_nodecount,
278  int& nidata,
279  int& nvdata,
280  int& nweight) {
281  if (tid >= nrn_nthread) {
282  return 0;
283  }
284  CellGroup& cg = cellgroups_[tid];
285  NrnThread& nt = nrn_threads[tid];
286 
287  ncell = cg.n_real_cell;
288  ngid = cg.n_output;
289  n_real_gid = cg.n_real_output;
290  nnode = nt.end;
291  ndiam = cg.ndiam;
292  nmech = cg.n_mech;
293 
294  cg.ml_vdata_offset = new int[nmech];
295  int vdata_offset = 0;
296  tml_index = new int[nmech];
297  ml_nodecount = new int[nmech];
298  MlWithArt& mla = cg.mlwithart;
299  for (size_t j = 0; j < mla.size(); ++j) {
300  int type = mla[j].first;
301  Memb_list* ml = mla[j].second;
302  tml_index[j] = type;
303  ml_nodecount[j] = ml->nodecount;
304  cg.ml_vdata_offset[j] = vdata_offset;
305  int* ds = memb_func[type].dparam_semantics.get();
306  for (int psz = 0; psz < bbcore_dparam_size[type]; ++psz) {
307  if (ds[psz] == -4 || ds[psz] == -6 || ds[psz] == -7 || ds[psz] == -11 || ds[psz] == 0) {
308  // printf("%s ds[%d]=%d vdata_offset=%d\n", memb_func[type].sym->name, psz, ds[psz],
309  // vdata_offset);
310  vdata_offset += ml->nodecount;
311  }
312  }
313  }
314  nvdata = vdata_offset;
315  nidata = 0;
316  // printf("nidata=%d nvdata=%d nnetcon=%d\n", nidata, nvdata, cg.n_netcon);
317  nweight = 0;
318  for (int i = 0; i < cg.n_netcon; ++i) {
319  nweight += cg.netcons[i]->cnt_;
320  }
321 
322  return 1;
323 }
324 
325 int nrnthread_dat2_2(int tid,
326  int*& v_parent_index,
327  double*& a,
328  double*& b,
329  double*& area,
330  double*& v,
331  double*& diamvec) {
332  if (tid >= nrn_nthread) {
333  return 0;
334  }
335  CellGroup& cg = cellgroups_[tid];
336  NrnThread& nt = nrn_threads[tid];
337 
338  assert(cg.n_real_cell == nt.ncell);
339 
340  // If direct transfer, copy, because target space already allocated
341  bool copy = corenrn_direct;
342  if (copy) {
343  std::copy_n(nt.node_a_storage(), nt.end, a);
344  std::copy_n(nt.node_b_storage(), nt.end, b);
345  std::copy_n(nt.node_area_storage(), nt.end, area);
346  std::copy_n(nt.node_voltage_storage(), nt.end, v);
347  std::copy_n(nt._v_parent_index, nt.end, v_parent_index);
348  } else {
349  v_parent_index = nt._v_parent_index;
350  auto const cache_token = nrn_ensure_model_data_are_sorted();
351  a = nt.node_a_storage();
352  area = nt.node_area_storage();
353  b = nt.node_b_storage();
354  v = nt.node_voltage_storage();
355  }
356  if (cg.ndiam) {
357  if (!copy) {
358  diamvec = new double[nt.end];
359  }
360  for (int i = 0; i < nt.end; ++i) {
361  Node* nd = nt._v_node[i];
362  double diam = 0.0;
363  for (Prop* p = nd->prop; p; p = p->next) {
364  if (p->_type == MORPHOLOGY) {
365  diam = p->param(0);
366  break;
367  }
368  }
369  diamvec[i] = diam;
370  }
371  }
372  return 1;
373 }
374 
376  size_t i,
377  int dsz_inst,
378  int*& nodeindices,
379  double*& data,
380  int*& pdata,
381  std::vector<uint32_t>& nmodlrandom, // 5 uint32_t per var per instance
382  std::vector<int>& pointer2type) {
383  if (tid >= nrn_nthread) {
384  return 0;
385  }
386  CellGroup& cg = cellgroups_[tid];
387  NrnThread& nt = nrn_threads[tid];
388  MlWithArtItem& mlai = cg.mlwithart[i];
389  int type = mlai.first;
390  Memb_list* ml = mlai.second;
391  // for direct transfer, data=NULL means copy into passed space for nodeindices, data, and pdata
392  bool copy = data ? true : false;
393 
394  int vdata_offset = cg.ml_vdata_offset[i];
395  int isart = nrn_is_artificial_[type];
396  int n = ml->nodecount;
397  int n_vars = ml->get_num_variables();
398  int sz = nrn_prop_param_size_[type];
399 
400  // As the NEURON data is now transposed then for now always create a new
401  // copy in the format expected by CoreNEURON.
402  // TODO remove the need for this entirely
403  if (!copy) {
404  data = new double[n * sz];
405  }
406  for (auto instance = 0, k = 0; instance < n; ++instance) {
407  for (int variable = 0; variable < n_vars; ++variable) {
408  auto array_dim = ml->get_array_dims(variable);
409  for (int array_index = 0; array_index < array_dim; ++array_index) {
410  data[k++] = ml->data(instance, variable, array_index);
411  }
412  }
413  }
414 
415  if (isart) { // data may not be contiguous
416  nodeindices = NULL;
417  } else {
418  nodeindices = ml->nodeindices; // allocated below if copy
419  }
420  if (copy) {
421  if (!isart) {
422  nodeindices = (int*) emalloc(n * sizeof(int));
423  for (int i = 0; i < n; ++i) {
424  nodeindices[i] = ml->nodeindices[i];
425  }
426  }
427  }
428 
429  sz = bbcore_dparam_size[type]; // nrn_prop_dparam_size off by 1 if cvode_ieq.
430  if (sz) {
431  int* pdata1;
432  pdata1 = datum2int(type, ml, nt, cg, cg.datumindices[dsz_inst], vdata_offset, pointer2type);
433  if (copy) {
434  int nn = n * sz;
435  for (int i = 0; i < nn; ++i) {
436  pdata[i] = pdata1[i];
437  }
438  delete[] pdata1;
439  } else {
440  pdata = pdata1;
441  }
442  } else {
443  pdata = NULL;
444  }
445 
446  // nmodlrandom: reserve 5 uint32 for each var of each instance
447  // id1, id2, id3, seq, uint32_t(which)
448  // Header is number of random variables followed by dparam indices
449  // if no destructor, skip. There are no random variables.
450  if (nrn_mech_inst_destruct.count(type)) {
451  auto& indices = nrn_mech_random_indices(type);
452  nmodlrandom.reserve(1 + indices.size() + 5 * n * indices.size());
453  nmodlrandom.push_back(indices.size());
454  for (int ix: indices) {
455  nmodlrandom.push_back((uint32_t) ix);
456  }
457  for (int ix: indices) {
458  uint32_t data[5];
459  char which;
460  for (int i = 0; i < n; ++i) {
461  auto& datum = ml->pdata[i][ix];
462  nrnran123_State* r = (nrnran123_State*) datum.get<void*>();
463  nrnran123_getids3(r, &data[0], &data[1], &data[2]);
464  nrnran123_getseq(r, &data[3], &which);
465  data[4] = uint32_t(which);
466  for (auto j: data) {
467  nmodlrandom.push_back(j);
468  }
469  }
470  }
471  }
472  return 1;
473 }
474 
475 int nrnthread_dat2_3(int tid,
476  int nweight,
477  int*& output_vindex,
478  double*& output_threshold,
479  int*& netcon_pnttype,
480  int*& netcon_pntindex,
481  double*& weights,
482  double*& delays) {
483  if (tid >= nrn_nthread) {
484  return 0;
485  }
486  CellGroup& cg = cellgroups_[tid];
487 
488  output_vindex = new int[cg.n_presyn];
489  output_threshold = new double[cg.n_real_output];
490  for (int i = 0; i < cg.n_presyn; ++i) {
491  output_vindex[i] = cg.output_vindex[i];
492  }
493  for (int i = 0; i < cg.n_real_output; ++i) {
494  output_threshold[i] = cg.output_ps[i] ? cg.output_ps[i]->threshold_ : 0.0;
495  }
496 
497  // connections
498  int n = cg.n_netcon;
499  // printf("n_netcon=%d nweight=%d\n", n, nweight);
500  netcon_pnttype = cg.netcon_pnttype;
501  cg.netcon_pnttype = NULL;
502  netcon_pntindex = cg.netcon_pntindex;
503  cg.netcon_pntindex = NULL;
504  // alloc a weight array and write netcon weights
505  weights = new double[nweight];
506  int iw = 0;
507  for (int i = 0; i < n; ++i) {
508  NetCon* nc = cg.netcons[i];
509  for (int j = 0; j < nc->cnt_; ++j) {
510  weights[iw++] = nc->weight_[j];
511  }
512  }
513  // alloc a delay array and write netcon delays
514  delays = new double[n];
515  for (int i = 0; i < n; ++i) {
516  NetCon* nc = cg.netcons[i];
517  delays[i] = nc->delay_;
518  }
519 
520  return 1;
521 }
522 
523 int nrnthread_dat2_corepointer(int tid, int& n) {
524  if (tid >= nrn_nthread) {
525  return 0;
526  }
527 
528  n = 0;
529  MlWithArt& mla = cellgroups_[tid].mlwithart;
530  for (size_t i = 0; i < mla.size(); ++i) {
531  if (nrn_bbcore_write_[mla[i].first]) {
532  ++n;
533  }
534  }
535 
536  return 1;
537 }
538 
540  int type,
541  int& icnt,
542  int& dcnt,
543  int*& iArray,
544  double*& dArray) {
545  if (tid >= nrn_nthread) {
546  return 0;
547  }
548  NrnThread& nt = nrn_threads[tid];
549  CellGroup& cg = cellgroups_[tid];
550  Memb_list* ml = cg.type2ml[type];
551 
552  dcnt = 0;
553  icnt = 0;
554  // data size and allocate
555  for (int i = 0; i < ml->nodecount; ++i) {
557  nullptr, nullptr, &dcnt, &icnt, ml, i, ml->pdata[i], ml->_thread, nullptr, &nt);
558  }
559  dArray = nullptr;
560  iArray = nullptr;
561  if (icnt) {
562  iArray = new int[icnt];
563  }
564  if (dcnt) {
565  dArray = new double[dcnt];
566  }
567  icnt = dcnt = 0;
568  // data values
569  for (int i = 0; i < ml->nodecount; ++i) {
571  dArray, iArray, &dcnt, &icnt, ml, i, ml->pdata[i], ml->_thread, nullptr, &nt);
572  }
573 
574  return 1;
575 }
576 
577 
578 // primarily to return nrnran123 sequence info when psolve on the coreneuron
579 // side is finished so can either do another coreneuron psolve or
580 // continue on neuron side.
581 int core2nrn_corepointer_mech(int tid, int type, int icnt, int dcnt, int* iArray, double* dArray) {
582  if (tid >= nrn_nthread) {
583  return 0;
584  }
585  NrnThread& nt = nrn_threads[tid];
586  Memb_list* ml = nt._ml_list[type];
587  // ARTIFICIAL_CELL are not in nt.
588  if (!ml) {
590  assert(ml);
591  }
592 
593  int ik = 0;
594  int dk = 0;
595  // data values
596  for (int i = 0; i < ml->nodecount; ++i) {
598  dArray, iArray, &dk, &ik, ml, i, ml->pdata[i], ml->_thread, nullptr, &nt);
599  }
600  assert(dk == dcnt);
601  assert(ik == icnt);
602  return 1;
603 }
604 
605 // NMODL RANDOM seq34 data return from coreneuron
607  int type,
608  const std::vector<int>& indices,
609  const std::vector<double>& nmodlrandom) {
610  if (tid >= nrn_nthread) {
611  return 0;
612  }
613  NrnThread& nt = nrn_threads[tid];
614  Memb_list* ml = nt._ml_list[type];
615  // ARTIFICIAL_CELL are not in nt.
616  if (!ml) {
618  assert(ml);
619  }
620 
621  auto& nrnindices = nrn_mech_random_indices(type); // for sanity checking
622  assert(nrnindices == indices);
623  assert(nmodlrandom.size() == indices.size() * ml->nodecount);
624 
625  int ir = 0; // into nmodlrandom
626  for (const auto ix: nrnindices) {
627  for (int i = 0; i < ml->nodecount; ++i) {
628  auto& datum = ml->pdata[i][ix];
629  nrnran123_State* state = (nrnran123_State*) datum.get<void*>();
630  nrnran123_setseq(state, nmodlrandom[ir++]);
631  }
632  }
633  return 1;
634 }
635 
636 int* datum2int(int type,
637  Memb_list* ml,
638  NrnThread& nt,
639  CellGroup& cg,
640  DatumIndices& di,
641  int ml_vdata_offset,
642  std::vector<int>& pointer2type) {
643  int isart = nrn_is_artificial_[di.type];
644  int sz = bbcore_dparam_size[type];
645  int* pdata = new int[ml->nodecount * sz];
646  int* semantics = memb_func[type].dparam_semantics.get();
647  for (int i = 0; i < ml->nodecount; ++i) {
648  int ioff = i * sz;
649  for (int j = 0; j < sz; ++j) {
650  int jj = ioff + j;
651  int etype = di.datum_type[jj];
652  int eindex = di.datum_index[jj];
653  const int seman = semantics[j];
654  // Would probably be more clear if use seman for as many as
655  // possible of the cases
656  // below and within each case deal with etype appropriately.
657  // datum_type and datum_index refer to mechanism type where the
658  // range variable lives (and otherwise is generally the same as
659  // seman). And datum_index refers to the index of the range variable
660  // within the mechanism (or voltage, area, etc.)
661  if (seman == -5) { // POINTER to range variable (e.g. voltage)
662  pdata[jj] = eindex;
663  pointer2type.push_back(etype);
664  } else if (etype == -1) {
665  if (isart) {
666  pdata[jj] = -1; // maybe save this space eventually. but not many of these in
667  // bb models
668  } else {
669  pdata[jj] = eindex;
670  }
671  } else if (etype == -9) {
672  pdata[jj] = eindex;
673  } else if (nrn_semantics_is_ion(etype)) { // ion pointer
674  pdata[jj] = eindex;
675  } else if (nrn_semantics_is_ionstyle(etype)) {
676  // ionstyle can be explicit instead of pointer to int*
677  pdata[jj] = eindex;
678  } else if (etype == -2) { // an ion and this is the iontype
679  pdata[jj] = eindex;
680  } else if (etype == -4) { // netsend (_tqitem)
681  pdata[jj] = ml_vdata_offset + eindex;
682  // printf("etype %d jj=%d eindex=%d pdata=%d\n", etype, jj, eindex, pdata[jj]);
683  } else if (etype == -6) { // pntproc
684  pdata[jj] = ml_vdata_offset + eindex;
685  // printf("etype %d jj=%d eindex=%d pdata=%d\n", etype, jj, eindex, pdata[jj]);
686  } else if (etype == -7) { // bbcorepointer
687  pdata[jj] = ml_vdata_offset + eindex;
688  // printf("etype %d jj=%d eindex=%d pdata=%d\n", etype, jj, eindex, pdata[jj]);
689  } else if (etype == -11) { // random
690  pdata[jj] = ml_vdata_offset + eindex;
691  } else { // uninterpreted
692  assert(eindex != -3); // avoided if last
693  pdata[jj] = 0;
694  }
695  }
696  }
697  return pdata;
698 }
699 
700 void part2_clean() {
702 
703  if (corenrn_direct) {
705  }
706 
707  delete[] cellgroups_;
708  cellgroups_ = NULL;
709 }
710 
711 std::vector<std::vector<NetCon*>> CellGroup::deferred_netcons;
712 
715  for (int tid = 0; tid < nrn_nthread; ++tid) {
716  deferred_netcons.push_back(std::move(cgs[tid].netcons));
717  }
718 }
719 
721  deferred_netcons.clear();
722 }
723 
724 // Vector.play information.
725 // Must play into a data element in this thread
726 // File format is # of play instances in this thread (generally VecPlayContinuous)
727 // For each Play instance
728 // VecPlayContinuousType (4), pd (index), y.size, yvec, tvec
729 // Other VecPlay instance types are possible, such as VecPlayContinuous with
730 // a discon vector or VecPlayStep with a DT or tvec, but are not implemented
731 // at present. Assertion errors are generated if not type 0 of if we
732 // cannot determine the index into the NrnThread._data .
733 
734 int nrnthread_dat2_vecplay(int tid, std::vector<int>& indices) {
735  if (tid >= nrn_nthread) {
736  return 0;
737  }
738  NrnThread& nt = nrn_threads[tid];
739 
740  // add the index of each instance in fixed_play_ for thread tid.
741  // error if not a VecPlayContinuous with no discon vector
742  int i = 0;
743  for (auto& item: *net_cvode_instance->fixed_play_) {
744  if (item->type() == VecPlayContinuousType) {
745  auto* vp = static_cast<VecPlayContinuous*>(item);
746  if (vp->discon_indices_ == NULL) {
747  if (vp->ith_ == nt.id) {
748  assert(vp->y_ && vp->t_);
749  indices.push_back(i);
750  }
751  } else {
752  assert(0);
753  }
754  } else {
755  assert(0);
756  }
757  ++i;
758  }
759 
760  return 1;
761 }
762 
764  int i,
765  int& vptype,
766  int& mtype,
767  int& ix,
768  int& sz,
769  double*& yvec,
770  double*& tvec,
771  int& last_index,
772  int& discon_index,
773  int& ubound_index) {
774  if (tid >= nrn_nthread) {
775  return 0;
776  }
777  NrnThread& nt = nrn_threads[tid];
778 
780  if (fp->at(i)->type() == VecPlayContinuousType) {
781  auto* const vp = static_cast<VecPlayContinuous*>(fp->at(i));
782  if (!vp->discon_indices_) {
783  if (vp->ith_ == nt.id) {
784  auto* pd = static_cast<double*>(vp->pd_);
785  int found = 0;
786  vptype = vp->type();
787  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
788  if (nrn_is_artificial_[tml->index]) {
789  continue;
790  }
791  Memb_list* ml = tml->ml;
792  auto const legacy_index = ml->legacy_index(pd);
793  if (legacy_index >= 0) {
794  mtype = tml->index;
795  ix = legacy_index;
796  sz = vector_capacity(vp->y_);
797  yvec = vector_vec(vp->y_);
798  tvec = vector_vec(vp->t_);
799  found = 1;
800  break;
801  }
802  }
803  assert(found);
804  // following 3 used for direct-mode.
805  last_index = vp->last_index_;
806  discon_index = vp->discon_index_;
807  ubound_index = vp->ubound_index_;
808  return 1;
809  }
810  }
811  }
812 
813  return 0;
814 }
815 
816 /** getting one item at a time from CoreNEURON **/
817 void core2nrn_vecplay(int tid, int i, int last_index, int discon_index, int ubound_index) {
818  if (tid >= nrn_nthread) {
819  return;
820  }
822  assert(fp->at(i)->type() == VecPlayContinuousType);
823  VecPlayContinuous* vp = (VecPlayContinuous*) fp->at(i);
824  vp->last_index_ = last_index;
825  vp->discon_index_ = discon_index;
826  vp->ubound_index_ = ubound_index;
827 }
828 
829 /** start the vecplay events **/
831  for (auto& item: *net_cvode_instance->fixed_play_) {
832  if (item->type() == VecPlayContinuousType) {
833  auto* vp = static_cast<VecPlayContinuous*>(item);
834  NrnThread* nt = nrn_threads + vp->ith_;
835  vp->e_->send(vp->t_->elem(vp->ubound_index_), net_cvode_instance, nt);
836  }
837  }
838 }
839 
840 /** getting one item at a time from nrn2core_transfer_WATCH **/
841 void nrn2core_transfer_WatchCondition(WatchCondition* wc, void (*cb)(int, int, int, int, int)) {
842  Point_process* pnt = wc->pnt_;
843  assert(pnt);
844  int tid = ((NrnThread*) (pnt->_vnt))->id;
845  int pnttype = pnt->prop->_type;
846  int watch_index = wc->watch_index_;
847  int triggered = wc->flag_ ? 1 : 0;
848  int pntindex = CellGroup::nrncore_pntindex_for_queue(pnt->prop, tid, pnttype);
849  (*cb)(tid, pnttype, pntindex, watch_index, triggered);
850 
851  // This transfers CvodeThreadData activated WatchCondition
852  // information. All WatchCondition stuff is implemented in netcvode.cpp.
853  // cvodeobj.h: HTList* CvodeThreadData.watch_list_
854  // netcon.h: WatchCondition
855  // On the NEURON side, WatchCondition is activated within a
856  // NET_RECEIVE block with the NMODL WATCH statement translated into a
857  // call to _nrn_watch_activate implmented as a function in netcvode.cpp.
858  // Note that on the CoreNEURON side, all the WATCH functionality is
859  // implemented within the mod file translation, and the info from this side
860  // is used to assign a value in the location specified by the
861  // _watch_array(flag) macro.
862  // The return from psolve must transfer back the correct conditions
863  // so that NEURON can continue with a classical psolve, or, if CoreNEURON
864  // continues, receive the correct transfer of conditions back from NEURON
865  // again.
866  // Note: the reason CoreNEURON does not already have the correct watch
867  // condition from phase2 setup is because, on the NEURON side,
868  // _nrn_watch_activate fills in the _watch_array[0] with a pointer to
869  // WatchList and _watch_array[i] with a pointer to WatchCondition.
870  // Activation consists of removing all conditions from a HTList (HeadTailList)
871  // and _watch_array[0] (only on the first _nrn_watch_activate call from a
872  // NET_RECEIVE delivery event). And appending to _watch_array[0] and
873  // Append to the HTList which is the CvodeThreadData.watch_list_;
874  // But on the CoreNEURON side, _watch_array[0] is unused and _watch_array[i]
875  // is a two bit integer. Bit 2 on means the WATCH is activated Bit 1
876  // is used to determine the transition from false to true for doing a
877  // net_send (immediate deliver).
878 }
879 
880 // for faster determination of the movable index given the type
881 static std::map<int, int> type2movable;
882 static void setup_type2semantics() {
883  if (type2movable.empty()) {
884  for (int type = 0; type < n_memb_func; ++type) {
885  int* ds = memb_func[type].dparam_semantics.get();
886  if (ds) {
887  for (int psz = 0; psz < bbcore_dparam_size[type]; ++psz) {
888  if (ds[psz] == -4) { // netsend semantics
889  type2movable[type] = psz;
890  }
891  }
892  }
893  }
894  }
895 }
896 
897 // Copying TQItem information for transfer to CoreNEURON has been factored
898 // out of nrn2core_transfer_tqueue since, if BinQ is being used, it involves
899 // iterating over the BinQ as well as the normal TQueue.
900 static void set_info(TQItem* tqi,
901  int tid,
902  NrnCoreTransferEvents* core_te,
903  std::unordered_map<NetCon*, std::vector<size_t>>& netcon2intdata,
904  std::unordered_map<PreSyn*, std::vector<size_t>>& presyn2intdata,
905  std::unordered_map<double*, std::vector<size_t>>& weight2intdata) {
906  DiscreteEvent* de = (DiscreteEvent*) (tqi->data_);
907  int type = de->type();
908  double tdeliver = tqi->t_;
909  core_te->type.push_back(type);
910  core_te->td.push_back(tdeliver);
911 
912  switch (type) {
913  case DiscreteEventType: { // 0
914  } break;
915  case NetConType: { // 2
916  NetCon* nc = (NetCon*) de;
917  // To find the i for cg.netcons[i] == nc
918  // and since there are generally very many fewer nc on the queue
919  // than netcons. Begin a nc2i map that we can fill in for i
920  // later in one sweep through the cg.netcons.
921  // Here is where it goes.
922  size_t iloc = core_te->intdata.size();
923  core_te->intdata.push_back(-1);
924  // But must take into account the rare situation where the same
925  // NetCon is on the queue more than once. Hence the std::vector
926  netcon2intdata[nc].push_back(iloc);
927  } break;
928  case SelfEventType: { // 3
929  SelfEvent* se = (SelfEvent*) de;
930  Point_process* pnt = se->target_;
931  int type = pnt->prop->_type;
932  int movable_index = type2movable[type];
933  double* wt = se->weight_;
934 
935  core_te->intdata.push_back(type);
936  core_te->dbldata.push_back(se->flag_);
937 
938  // All SelfEvent have a target. A SelfEvent only has a weight if
939  // it was issued in response to a NetCon event to the target
940  // NET_RECEIVE block. Determination of Point_process* target_ on the
941  // CoreNEURON side uses mechanism type and instance index from here
942  // on the NEURON side. And the latter can be determined now from pnt.
943  // On the other hand, if there is a non-null weight pointer, its index
944  // can only be determined by sweeping over all NetCon.
945 
946  // Introduced the public static method below because ARTIFICIAL_CELL
947  // are not located in NrnThread and are not cache efficient.
949  core_te->intdata.push_back(index);
950 
951  size_t iloc_wt = core_te->intdata.size();
952  if (wt) { // don't bother with NULL weights
953  weight2intdata[wt].push_back(iloc_wt);
954  }
955  core_te->intdata.push_back(-1); // If NULL weight this is the indicator
956  // Each of these holds a TQItem*
957  Datum* const movable = se->movable_;
958  Datum* const pnt_movable = pnt->prop->dparam + movable_index;
959  // Only one SelfEvent on the queue for a given point process can be
960  // movable
961  bool const condition = movable && (*movable).get<TQItem*>() == tqi;
962  core_te->intdata.push_back(condition);
963  if (condition) {
964  assert(pnt_movable && (*pnt_movable).get<TQItem*>() == tqi);
965  }
966 
967  } break;
968  case PreSynType: { // 4
969  PreSyn* ps = (PreSyn*) de;
970 
971  // NEURON puts PreSyn on every thread queue
972  // Skip if PreSyn not associated with this thread.
973  bool skip = (ps->nt_ && (ps->nt_->id != tid)) ? true : false;
974  // Skip if effectively an InputPresyn (ps->nt_ == NULL)
975  // and this is not thread 0.
976  skip = (!ps->nt_ && tid != 0) ? true : skip;
977  if (skip) {
978  // erase what was already added
979  core_te->type.pop_back();
980  core_te->td.pop_back();
981  break;
982  }
983  // Output PreSyn similar to NetCon but more data.
984  // Input PreSyn (ps->output_index = -1 and ps->gid >= 0)
985  // is distinquished from PreSyn (ps->output_index == ps->gid
986  // or both negative) by the first item of 0 or 1 respectively followed
987  // by gid or presyn index respectively.
988  // That is:
989  // Output PreSyn format is 0, presyn index
990  // initialized to -1 and figured out from presyn2intdata, and
991  // ps->delay_
992  // Input PreSyn format is 1, gid, and ps->delay_
993  if (ps->output_index_ < 0 && ps->gid_ >= 0) {
994  // InputPreSyn on the CoreNEURON side
995  core_te->intdata.push_back(1);
996  core_te->intdata.push_back(ps->gid_);
997  } else {
998  // PreSyn on the NEURON side
999  core_te->intdata.push_back(0);
1000  size_t iloc = core_te->intdata.size();
1001  core_te->intdata.push_back(-1);
1002  presyn2intdata[ps].push_back(iloc);
1003  }
1004  // CoreNEURON PreSyn has no notion of use_min_delay_ so if that
1005  // is in effect, then the send time is actually tt - nc->delay_
1006  // (Note there is no core2nrn inverse as PreSyn does not appear on
1007  // the CoreNEURON event queue).
1008  if (ps->use_min_delay_) {
1009  core_te->td.back() -= ps->delay_;
1010  }
1011  } break;
1012  case HocEventType: { // 5
1013  // Not supported in CoreNEURON, discard and print a warning.
1014  core_te->td.pop_back();
1015  core_te->type.pop_back();
1016  // Delivery time was often reduced by a quarter step to avoid
1017  // fixed step roundoff problems.
1018  Fprintf(stderr,
1019  "WARNING: CVode.event(...) for delivery at time step nearest %g discarded. "
1020  "CoreNEURON cannot presently handle interpreter events (rank %d, thread %d).\n",
1021  tdeliver,
1022  nrnmpi_myid,
1023  tid);
1024  } break;
1025  case PlayRecordEventType: { // 6
1026  } break;
1027  case NetParEventType: { // 7
1028  } break;
1029  default: {
1030  } break;
1031  }
1032 }
1033 
1035  if (tid >= nrn_nthread) {
1036  return NULL;
1037  }
1038 
1040 
1042 
1043  // see comments below about same object multiple times on queue
1044  // and single sweep fill
1045  std::unordered_map<NetCon*, std::vector<size_t>> netcon2intdata;
1046  std::unordered_map<PreSyn*, std::vector<size_t>> presyn2intdata;
1047  std::unordered_map<double*, std::vector<size_t>> weight2intdata;
1048 
1049  NrnThread& nt = nrn_threads[tid];
1051  TQItem* tqi;
1052  auto& cg = cellgroups_[tid];
1053  // make sure all buffered interthread events are on the queue
1055 
1056  // Iterate over all tqueue items to record info needed for transfer to
1057  // coreneuron. The atomic_dq removes items from the queue but misses
1058  // BinQ items if present. So need separate iteration for that (hence the
1059  // factoring out of the loop bodies into set_info.)
1060  while ((tqi = tq->atomic_dq(1e15)) != NULL) {
1061  set_info(tqi, tid, core_te, netcon2intdata, presyn2intdata, weight2intdata);
1062  }
1063  if (nrn_use_bin_queue_) {
1064  // does not remove items but the entire queue will be cleared
1065  // before using again.
1066  for (tqi = tq->binq()->first(); tqi; tqi = tq->binq()->next(tqi)) {
1067  set_info(tqi, tid, core_te, netcon2intdata, presyn2intdata, weight2intdata);
1068  }
1069  }
1070 
1071  // fill in the integers for the pointer translation
1072 
1073  // NEURON NetCon* to CoreNEURON index into nt.netcons
1074  for (int i = 0; i < cg.n_netcon; ++i) {
1075  NetCon* nc = cg.netcons[i];
1076  auto iter = netcon2intdata.find(nc);
1077  if (iter != netcon2intdata.end()) {
1078  for (auto iloc: iter->second) {
1079  core_te->intdata[iloc] = i;
1080  }
1081  }
1082  }
1083 
1084  // NEURON PreSyn* to CoreNEURON index into nt.presyns
1085 #define NRN_SENTINAL 100000000000
1086  for (int i = 0; i < cg.n_presyn; ++i) {
1087  PreSyn* ps = cg.output_ps[i];
1088  auto iter = presyn2intdata.find(ps);
1089  if (iter != presyn2intdata.end()) {
1090  // not visited twice
1091  assert(iter->second[0] < NRN_SENTINAL);
1092  for (auto iloc: iter->second) {
1093  core_te->intdata[iloc] = i;
1094  }
1095  presyn2intdata[ps][0] = i + NRN_SENTINAL;
1096  }
1097  }
1098  // all presyn2intdata should have been visited so all
1099  // presyn2intdata[ps][0] must be >= NRN_SENTINAL
1100  for (auto& iter: presyn2intdata) {
1101  assert(iter.second[0] >= NRN_SENTINAL);
1102  }
1103 
1104  // NEURON SelfEvent weight* into CoreNEURON index into nt.netcons
1105  // On the CoreNEURON side we find the NetCon and then the
1106  // nc.u.weight_index_
1107  for (int i = 0; i < cg.n_netcon; ++i) {
1108  NetCon* nc = cg.netcons[i];
1109  double* wt = nc->weight_;
1110  auto iter = weight2intdata.find(wt);
1111  if (iter != weight2intdata.end()) {
1112  for (auto iloc: iter->second) {
1113  core_te->intdata[iloc] = i;
1114  }
1115  }
1116  }
1117 
1118  return core_te;
1119 }
1120 
1121 /** @brief Initialize queues before transfer
1122  Probably aleady clear, but if binq then must be initialized to time.
1123  */
1124 void core2nrn_clear_queues(double time) {
1125  nrn_threads[0]._t = time; // used by clear_event_queue
1127 }
1128 
1129 /** @brief Called from CoreNEURON core2nrn_tqueue_item.
1130  */
1131 void core2nrn_NetCon_event(int tid, double td, size_t nc_index) {
1132  assert(tid < nrn_nthread);
1133  NrnThread& nt = nrn_threads[tid];
1134  // cellgroups_ has been deleted but deletion of cg.netcons was deferred
1135  // (and will be deleted on return from nrncore_run).
1136  // This is tragic for memory usage. There are more NetCon's than anything.
1137  // Would be better to save the memory at a cost of single iteration over
1138  // NetCon.
1139  NetCon* nc = CellGroup::deferred_netcons[tid][nc_index];
1140  nc->send(td, net_cvode_instance, &nt);
1141 }
1142 
1143 static void core2nrn_SelfEvent_helper(int tid,
1144  double td,
1145  int tar_type,
1146  int tar_index,
1147  double flag,
1148  double* weight,
1149  int is_movable) {
1150  if (type2movable.empty()) {
1152  }
1153  Memb_list* ml = nrn_threads[tid]._ml_list[tar_type];
1154  Point_process* pnt;
1155  if (ml) {
1156  pnt = ml->pdata[tar_index][1].get<Point_process*>();
1157  } else {
1158  // In NEURON world, ARTIFICIAL_CELLs do not live in NrnThread.
1159  // And the old deferred_type2artdata_ only gave us data, not pdata.
1160  // So this is where I decided to replace the more
1161  // expensive deferred_type2artml_.
1162  ml = CellGroup::deferred_type2artml_[tid][tar_type];
1163  pnt = ml->pdata[tar_index][1].get<Point_process*>();
1164  }
1165 
1166  // Needs to be tested when permuted on CoreNEURON side.
1167  assert(tar_type == pnt->prop->_type);
1168  assert(tar_index == CellGroup::nrncore_pntindex_for_queue(pnt->prop, tid, tar_type));
1169 
1170  int const movable_index = type2movable[tar_type];
1171  auto* const movable_arg = pnt->prop->dparam + movable_index;
1172  auto* const old_movable_arg = (*movable_arg).get<TQItem*>();
1173  nrn_net_send(movable_arg, weight, pnt, td, flag);
1174  if (!is_movable) {
1175  *movable_arg = old_movable_arg;
1176  }
1177 }
1178 
1180  double td,
1181  int tar_type,
1182  int tar_index,
1183  double flag,
1184  size_t nc_index,
1185  int is_movable) {
1186  assert(tid < nrn_nthread);
1187  NetCon* nc = CellGroup::deferred_netcons[tid][nc_index];
1188 
1189 #if 1
1190  // verify nc->target_ consistent with tar_type, tar_index.
1191  Memb_list* ml = nrn_threads[tid]._ml_list[tar_type];
1192  auto* pnt = ml->pdata[tar_index][1].get<Point_process*>();
1193  assert(nc->target_ == pnt);
1194 #endif
1195 
1196  double* weight = nc->weight_;
1197  core2nrn_SelfEvent_helper(tid, td, tar_type, tar_index, flag, weight, is_movable);
1198 }
1199 
1201  double td,
1202  int tar_type,
1203  int tar_index,
1204  double flag,
1205  int is_movable) {
1206  assert(tid < nrn_nthread);
1207  double* weight = NULL;
1208  core2nrn_SelfEvent_helper(tid, td, tar_type, tar_index, flag, weight, is_movable);
1209 }
1210 
1211 // Set of the voltage indices in which PreSyn.flag_ == true
1212 void core2nrn_PreSyn_flag(int tid, std::set<int> presyns_flag_true) {
1213  if (tid >= nrn_nthread) {
1214  return;
1215  }
1216  NetCvodeThreadData& nctd = net_cvode_instance->p[tid];
1217  hoc_Item* pth = nctd.psl_thr_;
1218  if (pth) {
1219  hoc_Item* q;
1220  // turn off all the PreSyn.flag_ as they might have been turned off
1221  // during the psolve on the coreneuron side.
1222  ITERATE(q, pth) {
1223  PreSyn* ps = (PreSyn*) VOIDITM(q);
1224  ps->flag_ = false;
1225  }
1226  if (presyns_flag_true.empty()) {
1227  return;
1228  }
1229  ITERATE(q, pth) {
1230  PreSyn* ps = (PreSyn*) VOIDITM(q);
1231  assert(ps->nt_ == (nrn_threads + tid));
1232  if (ps->thvar_) {
1233  int type = 0;
1234  int index_v = -1;
1235  nrn_dblpntr2nrncore(ps->thvar_, *ps->nt_, type, index_v);
1236  assert(type == voltage);
1237  if (presyns_flag_true.erase(index_v)) {
1238  ps->flag_ = true;
1239  if (presyns_flag_true.empty()) {
1240  break;
1241  }
1242  }
1243  }
1244  }
1245  }
1246 }
1247 
1248 // Add the voltage indices in which PreSyn.flag_ == true to the set.
1249 void nrn2core_PreSyn_flag(int tid, std::set<int>& presyns_flag_true) {
1250  if (tid >= nrn_nthread) {
1251  return;
1252  }
1253  NetCvodeThreadData& nctd = net_cvode_instance->p[tid];
1254  hoc_Item* pth = nctd.psl_thr_;
1255  if (pth) {
1256  hoc_Item* q;
1257  ITERATE(q, pth) {
1258  auto* ps = static_cast<PreSyn*>(VOIDITM(q));
1259  assert(ps->nt_ == (nrn_threads + tid));
1260  if (ps->flag_ && ps->thvar_) {
1261  int type = 0;
1262  int index_v = -1;
1263  nrn_dblpntr2nrncore(ps->thvar_, *ps->nt_, type, index_v);
1264  assert(type == voltage);
1265  presyns_flag_true.insert(index_v);
1266  }
1267  }
1268  }
1269 }
1270 
1271 // For each watch index, activate the WatchCondition
1272 void core2nrn_watch_activate(int tid, int type, int watch_begin, Core2NrnWatchInfo& wi) {
1273  if (tid >= nrn_nthread) {
1274  return;
1275  }
1276  NrnThread& nt = nrn_threads[tid];
1277  Memb_list* ml = nt._ml_list[type];
1278  for (size_t i = 0; i < wi.size(); ++i) {
1279  Core2NrnWatchInfoItem& active_watch_items = wi[i];
1280  Datum* pd = ml->pdata[i];
1281  int r = 0; // first activate removes formerly active from pd.
1282  for (auto watch_item: active_watch_items) {
1283  int watch_index = watch_item.first;
1284  bool above_thresh = watch_item.second;
1285  auto* wc = pd[watch_index].get<WatchCondition*>();
1286  if (!wc) { // if any do not exist in this instance, create them all
1287  // with proper callback and flag.
1288  (*(nrn_watch_allocate_[type]))(pd);
1289  wc = pd[watch_index].get<WatchCondition*>();
1290  }
1292  pd + watch_begin, wc->c_, watch_index - watch_begin, wc->pnt_, r++, wc->nrflag_);
1293  wc->flag_ = above_thresh ? 1 : 0;
1294  // If flag_ is 1
1295  // there will not be a (immediate) transition event
1296  // til the value() becomes negative again and then goes positive.
1297  }
1298  }
1299 }
1300 
1301 // nrn<->corenrn PatternStim
1302 
1304 static int patternstim_type;
1305 
1306 // Info from NEURON PatternStim at beginning of psolve.
1308  if (!patternstim_type) {
1309  for (int i = 3; i < n_memb_func; ++i) {
1310  if (strcmp(memb_func[i].sym->name, "PatternStim") == 0) {
1311  patternstim_type = i;
1312  break;
1313  }
1314  }
1315  }
1316 
1318  assert(ml.nodecount == 1);
1320 }
1321 
1322 
1323 // Info from NEURON subworlds at beginning of psolve.
1325  int& subworld_index,
1326  int& subworld_rank,
1327  int& numprocs_subworld,
1328  int& numprocs_world) {
1329 #ifdef NRNMPI
1330  nrnmpi_get_subworld_info(
1331  &cnt, &subworld_index, &subworld_rank, &numprocs_subworld, &numprocs_world);
1332 #else
1333  cnt = 0;
1334  subworld_index = -1;
1335  subworld_rank = 0;
1336  numprocs_subworld = 1;
1337  numprocs_world = 1;
1338 #endif
1339 }
std::vector< MlWithArtItem > MlWithArt
Definition: cell_group.h:15
std::pair< int, Memb_list * > MlWithArtItem
Definition: cell_group.h:12
TQItem * next(TQItem *)
Definition: tqueue.cpp:368
TQItem * first()
Iterate in ascending bin order starting at current bin.
Definition: tqueue.cpp:357
std::vector< NetCon * > netcons
Definition: cell_group.h:38
int group_id
Definition: cell_group.h:23
std::vector< int > output_vindex
Definition: cell_group.h:35
static void clean_deferred_netcons()
int n_real_output
Definition: cell_group.h:28
int * ml_vdata_offset
Definition: cell_group.h:31
std::vector< int > output_gid
Definition: cell_group.h:34
int n_mech
Definition: cell_group.h:30
static void defer_clean_netcons(CellGroup *)
static int nrncore_pntindex_for_queue(Prop *p, int tid, int type)
Definition: cell_group.h:86
int * netcon_srcgid
Definition: cell_group.h:39
static void clean_art(CellGroup *)
Definition: cell_group.cpp:596
Memb_list ** type2ml
Definition: cell_group.h:22
static std::vector< std::vector< NetCon * > > deferred_netcons
Definition: cell_group.h:75
std::vector< int > netcon_negsrcgid_tid
Definition: cell_group.h:41
int n_presyn
Definition: cell_group.h:26
MlWithArt mlwithart
Definition: cell_group.h:49
int n_real_cell
Definition: cell_group.h:24
std::vector< PreSyn * > output_ps
Definition: cell_group.h:33
int n_netcon
Definition: cell_group.h:37
static Deferred_Type2ArtMl deferred_type2artml_
Definition: cell_group.h:74
int * netcon_pnttype
Definition: cell_group.h:44
int * netcon_pntindex
Definition: cell_group.h:45
DatumIndices * datumindices
Definition: cell_group.h:48
int n_output
Definition: cell_group.h:27
int ndiam
Definition: cell_group.h:29
bool flag_
Definition: netcon.h:202
int * datum_index
Definition: datum_indices.h:20
int * datum_type
Definition: datum_indices.h:19
virtual int type()
Definition: netcon.h:72
Definition: netcon.h:87
double * weight_
Definition: netcon.h:116
int cnt_
Definition: netcon.h:118
virtual void send(double sendtime, NetCvode *, NrnThread *)
Definition: netcvode.cpp:2941
double delay_
Definition: netcon.h:113
Point_process * target_
Definition: netcon.h:115
NetCvodeThreadData * p
Definition: netcvode.h:249
std::vector< PlayRecord * > * fixed_play_
Definition: netcvode.h:129
hoc_Item * psl_thr_
Definition: netcvode.h:52
Definition: netcon.h:258
int output_index_
Definition: netcon.h:308
NrnThread * nt_
Definition: netcon.h:303
int gid_
Definition: netcon.h:309
double delay_
Definition: netcon.h:296
neuron::container::data_handle< double > thvar_
Definition: netcon.h:297
int use_min_delay_
Definition: netcon.h:306
Datum * movable_
Definition: netcon.h:171
double * weight_
Definition: netcon.h:170
Point_process * target_
Definition: netcon.h:169
double flag_
Definition: netcon.h:168
BinQ * binq()
Definition: tqueue.hpp:123
TQItem * atomic_dq(double til)
Definition: tqueue.cpp:245
virtual int type()
Definition: vrecitem.h:304
double(* c_)(Point_process *)
Definition: netcon.h:236
Point_process * pnt_
Definition: netcon.h:235
double nrflag_
Definition: netcon.h:234
int watch_index_
Definition: netcon.h:241
static Frame * fp
Definition: code.cpp:96
#define cnt
Definition: tqueue.hpp:44
#define nodeindices
Definition: md1redef.h:35
#define area
Definition: md1redef.h:12
#define v
Definition: md1redef.h:11
#define weights
Definition: md1redef.h:42
#define i
Definition: md1redef.h:19
#define pdata
Definition: md1redef.h:37
#define VecPlayContinuousType
Definition: vrecitem.h:17
#define PlayRecordEventType
Definition: vrecitem.h:18
DLFCN_NOINLINE DLFCN_EXPORT void * dlsym(void *handle, const char *name)
Definition: dlfcn.c:447
void nrnran123_getids3(nrnran123_State *s, std::uint32_t *id1, std::uint32_t *id2, std::uint32_t *id3)
Definition: nrnran123.cpp:97
static RNG::key_type k
Definition: nrnran123.cpp:9
void nrnran123_setseq(nrnran123_State *s, std::uint32_t seq, char which)
Set a Random123 sequence for a sequnece ID and which selector.
Definition: nrnran123.cpp:55
void nrnran123_getseq(nrnran123_State *s, std::uint32_t *seq, char *which)
Get sequence number and selector from an nrnran123_State object.
Definition: nrnran123.cpp:50
double * hoc_val_pointer(const char *s)
Definition: code2.cpp:728
Symbol * hoc_lookup(const char *)
Definition: symbol.cpp:59
static int c
Definition: hoc.cpp:169
#define assert(ex)
Definition: hocassrt.h:24
#define USERDOUBLE
Definition: hocdec.h:84
bool is_array(const Symbol &sym)
Definition: hocdec.h:136
#define VOIDITM(q)
Definition: hoclist.h:89
bool nrn_semantics_is_ion(int i)
Definition: ion_semantics.h:6
bool nrn_semantics_is_ionstyle(int i)
Definition: ion_semantics.h:9
#define MORPHOLOGY
Definition: membfunc.hpp:59
#define ITERATE(itm, lst)
Definition: model.h:18
const char * name
Definition: init.cpp:16
void move(Item *q1, Item *q2, Item *q3)
Definition: list.cpp:200
NrnThread * nrn_threads
Definition: multicore.cpp:56
void clear_event_queue()
Definition: cvodestb.cpp:47
double * vector_vec(IvocVect *v)
Definition: ivocvect.cpp:19
bool nrn_use_bin_queue_
Flag to use the bin queue.
Definition: netcvode.cpp:39
int nrn_nthread
Definition: multicore.cpp:55
std::vector< int > & nrn_mech_random_indices(int type)
void hoc_execerror(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:39
const char * bbcore_write_version
Definition: nrnoc_aux.cpp:24
static void * emalloc(size_t size)
Definition: mpispike.cpp:30
int nrn_is_ion(int)
int vector_capacity(IvocVect *v)
Definition: ivocvect.cpp:16
static int pntindex
Definition: prcellstate.cpp:24
uint32_t nrnran123_get_globalindex()
Definition: nrnran123.cpp:112
handle_interface< non_owning_identifier< storage > > handle
Non-owning handle to a Mechanism instance.
icycle< ncycle;++icycle) { int istride=stride[icycle];nrn_pragma_acc(loop vector) nrn_pragma_omp(loop bind(parallel)) for(int icore=0;icore< warpsize;++icore) { int i=ii+icore;if(icore< istride) { int ip=GPU_PARENT(i);GPU_RHS(i) -=GPU_B(i) *GPU_RHS(ip);GPU_RHS(i)/=GPU_D(i);} i+=istride;} ii+=istride;} }}void solve_interleaved2(int ith) { NrnThread *nt=nrn_threads+ith;InterleaveInfo &ii=interleave_info[ith];int nwarp=ii.nwarp;if(nwarp==0) return;int ncore=nwarp *warpsize;int *ncycles=ii.cellsize;int *stridedispl=ii.stridedispl;int *strides=ii.stride;int *rootbegin=ii.firstnode;int *nodebegin=ii.lastnode;if(0) { nrn_pragma_acc(parallel loop gang present(nt[0:1], strides[0:nstride], ncycles[0:nwarp], stridedispl[0:nwarp+1], rootbegin[0:nwarp+1], nodebegin[0:nwarp+1]) async(nt->stream_id)) nrn_pragma_omp(target teams loop map(present, alloc:nt[:1], strides[:nstride], ncycles[:nwarp], stridedispl[:nwarp+1], rootbegin[:nwarp+1], nodebegin[:nwarp+1])) for(int icore=0;icore< ncore;icore+=warpsize) { solve_interleaved2_loop_body(nt, icore, ncycles, strides, stridedispl, rootbegin, nodebegin);} nrn_pragma_acc(wait(nt->stream_id)) } else { for(int icore=0;icore< ncore;icore+=warpsize) { solve_interleaved2_loop_body(nt, icore, ncycles, strides, stridedispl, rootbegin, nodebegin);} }}void solve_interleaved1(int ith) { NrnThread *nt=nrn_threads+ith;int ncell=nt-> ncell
Definition: cellorder.cpp:784
int Sprintf(char(&buf)[N], const char *fmt, Args &&... args)
Redirect sprintf to snprintf if the buffer size can be deduced.
Definition: wrap_sprintf.h:14
if(ncell==0)
Definition: cellorder.cpp:785
#define HocEventType
Definition: netcon.h:48
#define NetParEventType
Definition: netcon.hpp:27
#define PreSynType
Definition: netcon.hpp:26
#define DiscreteEventType
Definition: netcon.hpp:22
#define SelfEventType
Definition: netcon.hpp:25
#define NetConType
Definition: netcon.hpp:24
static List * info
static int watch_index
Definition: nocpout.cpp:164
neuron::model_sorted_token nrn_ensure_model_data_are_sorted()
Ensure neuron::container::* data are sorted.
Definition: treeset.cpp:2182
short * nrn_is_artificial_
Definition: init.cpp:214
int nrnthread_dat1(int tid, int &n_presyn, int &n_netcon, std::vector< int > &output_gid, int *&netcon_srcgid, std::vector< int > &netcon_negsrcgid_tid)
CellGroup * cellgroups_
size_t nrnthreads_type_return(int type, int tid, double *&data, std::vector< double * > &mdata)
Return location for CoreNEURON to copy data into.
void nrn2core_transfer_WatchCondition(WatchCondition *wc, void(*cb)(int, int, int, int, int))
getting one item at a time from nrn2core_transfer_WATCH
void nrnthread_group_ids(int *grp)
NrnCoreTransferEvents * nrn2core_transfer_tqueue(int tid)
void nrnthread_dat3_cell_count(int &cell_count)
int nrnthread_dat2_corepointer_mech(int tid, int type, int &icnt, int &dcnt, int *&iArray, double *&dArray)
void core2nrn_NetCon_event(int tid, double td, size_t nc_index)
Called from CoreNEURON core2nrn_tqueue_item.
int nrnthread_dat2_3(int tid, int nweight, int *&output_vindex, double *&output_threshold, int *&netcon_pnttype, int *&netcon_pntindex, double *&weights, double *&delays)
static void core2nrn_SelfEvent_helper(int tid, double td, int tar_type, int tar_index, double flag, double *weight, int is_movable)
void map_coreneuron_callbacks(void *handle)
Populate function pointers by mapping function pointers for callback.
void nrnthread_dat3_cellmapping(int i, int &gid, int &nsec, int &nseg, int &n_seclist)
void * get_global_dbl_item(void *p, const char *&name, int &size, double *&val)
void nrn2core_PreSyn_flag(int tid, std::set< int > &presyns_flag_true)
static std::map< int, int > type2movable
bool corenrn_direct
int * datum2int(int type, Memb_list *ml, NrnThread &nt, CellGroup &cg, DatumIndices &di, int ml_vdata_offset, std::vector< int > &pointer2type)
void core2nrn_SelfEvent_event(int tid, double td, int tar_type, int tar_index, double flag, size_t nc_index, int is_movable)
void part2_clean()
int nrnthread_dat2_1(int tid, int &ncell, int &ngid, int &n_real_gid, int &nnode, int &ndiam, int &nmech, int *&tml_index, int *&ml_nodecount, int &nidata, int &nvdata, int &nweight)
int nrnthread_dat2_vecplay(int tid, std::vector< int > &indices)
void nrnthread_dat3_secmapping(int i_c, int i_sec, std::string &sclname, int &nsec, int &nseg, size_t &total_lfp_factors, int &n_electrodes, std::vector< int > &data_sec, std::vector< int > &data_seg, std::vector< double > &data_lfp)
void core2nrn_SelfEvent_event_noweight(int tid, double td, int tar_type, int tar_index, double flag, int is_movable)
void nrn2core_subworld_info(int &cnt, int &subworld_index, int &subworld_rank, int &numprocs_subworld, int &numprocs_world)
static void setup_type2semantics()
int nrnthread_dat2_vecplay_inst(int tid, int i, int &vptype, int &mtype, int &ix, int &sz, double *&yvec, double *&tvec, int &last_index, int &discon_index, int &ubound_index)
void core2nrn_vecplay_events()
start the vecplay events
int core2nrn_nmodlrandom(int tid, int type, const std::vector< int > &indices, const std::vector< double > &nmodlrandom)
void core2nrn_PreSyn_flag(int tid, std::set< int > presyns_flag_true)
void write_memb_mech_types_direct(std::ostream &s)
void nrnthreads_all_weights_return(std::vector< double * > &weights)
Copy weights from all coreneuron::NrnThread to NetCon instances.
int * bbcore_dparam_size
void * nrn_patternstim_info_ref(Datum *)
int get_global_int_item(const char *name)
int nrnthread_dat2_corepointer(int tid, int &n)
void core2nrn_watch_activate(int tid, int type, int watch_begin, Core2NrnWatchInfo &wi)
char * pnt_map
Definition: init.cpp:150
void core2nrn_vecplay(int tid, int i, int last_index, int discon_index, int ubound_index)
getting one item at a time from CoreNEURON
int nrnthread_dat2_2(int tid, int *&v_parent_index, double *&a, double *&b, double *&area, double *&v, double *&diamvec)
#define NRN_SENTINAL
bbcore_write_t * nrn_bbcore_write_
Definition: init.cpp:173
int core2nrn_corepointer_mech(int tid, int type, int icnt, int dcnt, int *iArray, double *dArray)
static int patternstim_type
TQueue * net_cvode_instance_event_queue(NrnThread *)
Definition: netcvode.cpp:270
NetCvode * net_cvode_instance
Definition: cvodestb.cpp:26
int nrnthread_dat2_mech(int tid, size_t i, int dsz_inst, int *&nodeindices, double *&data, int *&pdata, std::vector< uint32_t > &nmodlrandom, std::vector< int > &pointer2type)
void nrn2core_patternstim(void **info)
bbcore_write_t * nrn_bbcore_read_
Definition: init.cpp:174
double nrn_ion_charge(Symbol *)
Definition: eion.cpp:60
void * nrn_interthread_enqueue(NrnThread *)
Definition: netcvode.cpp:6800
void core2nrn_clear_queues(double time)
Initialize queues before transfer Probably aleady clear, but if binq then must be initialized to time...
NrnMappingInfo mapinfo
mapping information
static void set_info(TQItem *tqi, int tid, NrnCoreTransferEvents *core_te, std::unordered_map< NetCon *, std::vector< size_t >> &netcon2intdata, std::unordered_map< PreSyn *, std::vector< size_t >> &presyn2intdata, std::unordered_map< double *, std::vector< size_t >> &weight2intdata)
static core2nrn_callback_t cnbs[]
std::vector< Core2NrnWatchInfoItem > Core2NrnWatchInfo
@ i_membrane_
@ voltage
std::vector< std::pair< int, bool > > Core2NrnWatchInfoItem
void(*)(double *, int *, int *, int *, Memb_list *, std::size_t, Datum *, Datum *, double *, NrnThread *) bbcore_write_t
Definition: nrncore_io.h:48
int nrn_dblpntr2nrncore(neuron::container::data_handle< double > dh, NrnThread &nt, int &type, int &index)
void nrn_net_send(Datum *v, double *weight, Point_process *pnt, double td, double flag)
Definition: netcvode.cpp:2257
void _nrn_watch_activate(Datum *d, double(*c)(Point_process *), int i, Point_process *pnt, int r, double flag)
Definition: netcvode.cpp:2340
int const size_t const size_t n
Definition: nrngsl.h:10
size_t q
size_t p
size_t j
s
Definition: multisend.cpp:521
int * nrn_prop_param_size_
Definition: init.cpp:162
short index
Definition: cabvars.h:11
std::vector< Memb_func > memb_func
Definition: init.cpp:145
short type
Definition: cabvars.h:10
NrnWatchAllocateFunc_t * nrn_watch_allocate_
Definition: init.cpp:166
std::vector< Memb_list > memb_list
Definition: init.cpp:146
std::unordered_map< int, void(*)(Prop *)> nrn_mech_inst_destruct
Definition: init.cpp:167
int n_memb_func
Definition: init.cpp:448
int nrnmpi_myid
#define NULL
Definition: spdefs.h:105
Symlist * hoc_built_in_symlist
Definition: symbol.cpp:28
int nsub
Definition: hocdec.h:61
int sub[1]
Definition: hocdec.h:63
Compartment mapping information for a cell.
Symbol * sym
Definition: membfunc.h:74
A view into a set of mechanism instances.
Definition: nrnoc_ml.h:34
int nodecount
Definition: nrnoc_ml.h:78
int * nodeindices
Definition: nrnoc_ml.h:74
std::ptrdiff_t legacy_index(double const *ptr) const
Calculate a legacy index of the given pointer in this mechanism data.
Definition: memblist.cpp:131
Datum ** pdata
Definition: nrnoc_ml.h:75
std::vector< double * > data()
Get a vector of double* representing the model data.
Definition: memblist.cpp:64
Datum * _thread
Definition: nrnoc_ml.h:77
int get_array_dims(int variable) const
Get the array_dims of field variable.
Definition: memblist.cpp:110
Definition: section.h:105
Prop * prop
Definition: section.h:190
std::vector< double > td
std::vector< double > dbldata
std::vector< int > intdata
Compartment mapping information for NrnThread.
size_t size()
number of cells
std::vector< CellMapping * > mapping
list of cells mapping
Represent main neuron object computed by single thread.
Definition: multicore.h:58
int * _v_parent_index
Definition: multicore.h:89
NrnThreadMembList * tml
Definition: multicore.h:62
int ncell
Definition: multicore.h:64
double * node_a_storage()
Definition: multicore.cpp:1054
int id
Definition: multicore.h:66
double * node_sav_rhs_storage()
Definition: multicore.cpp:1088
double * node_area_storage()
Definition: multicore.cpp:1059
int end
Definition: multicore.h:65
Memb_list ** _ml_list
Definition: multicore.h:63
Node ** _v_node
Definition: multicore.h:90
double * node_voltage_storage()
Definition: multicore.cpp:1098
double * node_b_storage()
Definition: multicore.cpp:1064
double _t
Definition: multicore.h:59
struct NrnThreadMembList * next
Definition: multicore.h:34
Definition: hocdec.h:173
void * this_pointer
Definition: hocdec.h:178
union Object::@47 u
A point process is computed just like regular mechanisms.
Definition: section_fwd.hpp:77
Definition: section.h:231
Datum * dparam
Definition: section.h:247
short _type
Definition: section.h:244
Section to segment mapping.
Definition: model.h:47
Symbol * next
Definition: hocdec.h:133
union Symbol::@28 u
short type
Definition: model.h:48
long subtype
Definition: model.h:49
cTemplate * ctemplate
Definition: hocdec.h:126
char * name
Definition: model.h:61
double * pval
Definition: hocdec.h:112
Arrayinfo * arayinfo
Definition: hocdec.h:130
Symbol * first
Definition: hocdec.h:76
Definition: tqitem.hpp:3
double t_
Definition: tqitem.hpp:8
void * data_
Definition: tqitem.hpp:7
hoc_List * olist
Definition: hocdec.h:155
Memb_list ** _ml_list
Definition: multicore.hpp:81
Non-template stable handle to a generic value.
T get() const
Explicit conversion to any T.
int Fprintf(FILE *stream, const char *fmt, Args... args)
Definition: logger.hpp:8