NEURON
cell_group.cpp
Go to the documentation of this file.
1 #include "cell_group.h"
3 #include "nrnran123.h" // globalindex written to globals.dat
4 #include "section.h"
5 #include "parse.hpp"
6 #include "nrnmpi.h"
7 #include "netcon.h"
8 #include "netcvode.h"
10 
11 #include <limits>
12 #include <sstream>
13 
14 extern short* nrn_is_artificial_;
15 extern bool corenrn_direct;
16 extern int* bbcore_dparam_size;
17 extern int nrn_has_net_event_cnt_;
18 extern int* nrn_has_net_event_;
19 extern short* nrn_is_artificial_;
20 
23 
26  group_id = -1;
27  ndiam = 0;
29  datumindices = 0;
31  for (int i = 0; i < n_memb_func; ++i) {
32  type2ml[i] = 0;
33  }
35 }
36 
38  if (netcon_srcgid)
39  delete[] netcon_srcgid;
40  if (netcon_pnttype)
41  delete[] netcon_pnttype;
42  if (netcon_pntindex)
43  delete[] netcon_pntindex;
44  if (datumindices)
45  delete[] datumindices;
46  if (ml_vdata_offset)
47  delete[] ml_vdata_offset;
48  delete[] type2ml;
49 }
50 
51 
53  for (int i = 0; i < nrn_nthread; ++i) {
54  auto& nt = nrn_threads[i];
55  cgs[i].n_real_cell = nt.ncell; // real cell count
56 
57  // Count PreSyn watching voltage (raise error if watching something else)
58  // Allows possibility of multiple outputs for a cell.
59  int npre = 0;
61  hoc_Item* pth = nctd.psl_thr_;
62  if (pth) {
63  hoc_Item* q;
64  ITERATE(q, pth) {
65  auto* ps = static_cast<PreSyn*>(VOIDITM(q));
66  // The PreSyn should refer to a valid Node
67  assert(ps->thvar_);
68  // The old code says this should always be a voltage, and
69  // voltage is the thing we are moving to a new data structure,
70  // so we should not be hitting the backwards-compatibility layer
71  if (!ps->thvar_.refers_to<neuron::container::Node::field::Voltage>(
72  neuron::model().node_data())) {
73  hoc_execerr_ext("NetCon range variable reference source not a voltage");
74  }
75  if (ps->gid_ < 0) {
76  bool b1 = !ps->dil_.empty();
77  bool b2 = b1 && bool(ps->dil_[0]->target_);
78  std::string ncob(b1 ? hoc_object_name(ps->dil_[0]->obj_) : "");
80  "%s with voltage source has no gid."
81  " (The source is %s(x)._ref_v"
82  " and is the source for %zd NetCons. %s%s)",
83  (b1 ? ncob.c_str() : "NetCon"),
84  secname(ps->ssrc_),
85  ps->dil_.size(),
86  (b1 ? (ncob + " has target ").c_str() : ""),
87  (b1 ? (b2 ? std::string(hoc_object_name(ps->dil_[0]->target_->ob)).c_str()
88  : "None")
89  : ""));
90  }
91  ++npre;
92  }
93  }
94  cgs[i].n_real_output = npre;
95 
96  // Final count of n_presyn of thread
97  MlWithArt& mla = cgs[i].mlwithart;
98  for (size_t j = 0; j < mla.size(); ++j) {
99  int type = mla[j].first;
100  Memb_list* ml = mla[j].second;
101  cgs[i].type2ml[type] = ml;
102  if (nrn_has_net_event(type)) {
103  npre += ml->nodecount;
104  }
105  }
106  cgs[i].n_presyn = npre;
107 
108  // in case some cells do not have voltage presyns (eg threshold detection
109  // computed from a POINT_PROCESS NET_RECEIVE with WATCH and net_event)
110  // initialize as unused.
111  cgs[i].output_ps.resize(npre);
112  cgs[i].output_gid.resize(npre, -1);
113  cgs[i].output_vindex.resize(npre, -1);
114 
115  // fill in the output_ps, output_gid, and output_vindex for the real cells.
116  npre = 0;
117  if (pth) {
118  hoc_Item* q;
119  ITERATE(q, pth) {
120  auto* ps = static_cast<PreSyn*>(VOIDITM(q));
121  assert(ps->thvar_);
122  assert(ps->thvar_.refers_to_a_modern_data_structure());
123  assert(ps->thvar_.refers_to<neuron::container::Node::field::Voltage>(
124  neuron::model().node_data()));
125  cgs[i].output_ps.at(npre) = ps;
126  cgs[i].output_gid.at(npre) = ps->output_index_;
127  // Convert back to an old-style index, i.e. the index of the
128  // voltage within this NrnThread after sorting
129  cgs[i].output_vindex.at(npre) = ps->thvar_.current_row() -
130  cache_token.thread_cache(i).node_data_offset;
131  ++npre;
132  }
133  }
134  assert(npre == cgs[i].n_real_output);
135 
136  // fill in the artcell info
137  npre = cgs[i].n_real_output;
138  cgs[i].n_output = npre; // add artcell (and PP with net_event) with gid in following loop
139  for (size_t j = 0; j < mla.size(); ++j) {
140  int type = mla[j].first;
141  Memb_list* ml = mla[j].second;
142  if (nrn_has_net_event(type)) {
143  for (int instance = 0; instance < ml->nodecount; ++instance) {
144  auto* const pnt = ml->pdata[instance][1].get<Point_process*>();
145  auto* const ps = static_cast<PreSyn*>(pnt->presyn_);
146  auto const other_thread = static_cast<NrnThread*>(pnt->_vnt)->id;
147  assert(other_thread == i);
148  cgs[i].output_ps.at(npre) = ps;
149  auto const offset = cache_token.thread_cache(i).mechanism_offset.at(type);
150  auto const global_row = pnt->prop->id().current_row();
151  assert(global_row >= offset);
152  long const agid = -(type + 1000 * static_cast<long>(global_row - offset));
153  if (ps) {
154  if (ps->output_index_ >= 0) { // has gid
155  cgs[i].output_gid[npre] = ps->output_index_;
156  if (cgs[i].group_id < 0) {
157  cgs[i].group_id = ps->output_index_;
158  }
159  ++cgs[i].n_output;
160  } else {
161  cgs[i].output_gid[npre] = agid;
162  }
163  } else { // if an acell is never a source, it will not have a presyn
164  cgs[i].output_gid[npre] = -1;
165  }
166  // the way we associate an acell PreSyn with the
167  // Point_process.
168  if (agid < std::numeric_limits<int>::min() || agid >= -1) {
169  std::ostringstream oss;
170  oss << "maximum of ~" << std::numeric_limits<int>::max() / 1000
171  << " artificial cells of a given type can be created per NrnThread, "
172  "this model has "
173  << ml->nodecount << " instances of " << memb_func[type].sym->name
174  << " (cannot store cgs[" << i << "].output_vindex[" << npre
175  << "]=" << agid << ')';
176  hoc_execerror("integer overflow", oss.str().c_str());
177  }
178  cgs[i].output_vindex[npre] = agid;
179  ++npre;
180  }
181  }
182  }
183  }
184 
185  // use first real cell gid, if it exists, as the group_id
186  if (corenrn_direct == false)
187  for (int i = 0; i < nrn_nthread; ++i) {
188  if (cgs[i].n_real_output && cgs[i].output_gid[0] >= 0) {
189  cgs[i].group_id = cgs[i].output_gid[0];
190  } else if (cgs[i].group_id >= 0) {
191  // set above to first artificial cell with a ps->output_index >= 0
192  } else {
193  // Don't die yet as the thread may be empty. That just means no files
194  // output for this thread and no mention in files.dat.
195  // Can check for empty near end of datatransform(CellGroup* cgs)
196  }
197  }
198 
199  // use the Hoc NetCon object list to segregate according to threads
200  // and fill the CellGroup netcons, netcon_srcgid, netcon_pnttype, and
201  // netcon_pntindex (and, if nrn_nthread > 1, netcon_negsrcgid_tid).
202  CellGroup::mk_cgs_netcon_info(cache_token, cgs);
203 }
204 
206  // ions, area, and POINTER to v or mechanism data.
207  for (int ith = 0; ith < nrn_nthread; ++ith) {
208  NrnThread& nt = nrn_threads[ith];
209  CellGroup& cg = cgs[ith];
210  // how many mechanisms in use and how many DatumIndices do we need.
211  MlWithArt& mla = cgs[ith].mlwithart;
212  for (size_t j = 0; j < mla.size(); ++j) {
213  Memb_list* ml = mla[j].second;
214  ++cg.n_mech;
215  if (ml->pdata[0]) {
216  ++cg.ntype;
217  }
218  }
219  cg.datumindices = new DatumIndices[cg.ntype];
220  // specify type, allocate the space, and fill the indices
221  int i = 0;
222  for (size_t j = 0; j < mla.size(); ++j) {
223  int type = mla[j].first;
224  Memb_list* ml = mla[j].second;
225  int sz = bbcore_dparam_size[type];
226  if (sz) {
227  DatumIndices& di = cg.datumindices[i++];
228  di.type = type;
229  int n = ml->nodecount * sz;
230  di.datum_type = new int[n];
231  di.datum_index = new int[n];
232  // fill the indices.
233  // had tointroduce a memb_func[i].dparam_semantics registered by each mod file.
234  datumindex_fill(ith, cg, di, ml);
235  }
236  }
237  // if model is being transferred via files, and
238  // if there are no gids in the thread (group_id < 0), and
239  // if the thread is not empty (mechanisms exist, n_mech > 0)
240  if (corenrn_direct == false && cg.group_id < 0 && cg.n_mech > 0) {
241  hoc_execerror("A nonempty thread has no real cell or ARTIFICIAL_CELL with a gid", NULL);
242  }
243  }
244 }
245 
246 
248  NrnThread& nt = nrn_threads[ith];
249  int nnode = nt.end;
250  int mcnt = ml->nodecount;
251  int dsize = bbcore_dparam_size[di.type];
252  if (dsize == 0) {
253  return;
254  }
255  int* dmap = memb_func[di.type].dparam_semantics.get();
256  assert(dmap);
257  // what is the size of the nt._vdata portion needed for a single ml->dparam[i]
258  int vdata_size = 0;
259  for (int i = 0; i < dsize; ++i) {
260  if (dmap[i] == -4 || dmap[i] == -6 || dmap[i] == -7 || dmap[i] == -11 || dmap[i] == 0) {
261  ++vdata_size;
262  }
263  }
264 
265  int isart = nrn_is_artificial_[di.type];
266  for (int i = 0; i < mcnt; ++i) {
267  // Prop* datum instance arrays are not in cache efficient order
268  // ie. ml->pdata[i] are not laid out end to end in memory.
269  // Also, ml->_data for artificial cells is not in cache efficient order
270  // but in the artcell case there are no pointers to doubles
271  Datum* dparam = ml->pdata[i];
272  int offset = i * dsize;
273  int vdata_offset = i * vdata_size;
274  for (int j = 0; j < dsize; ++j) {
275  int etype = -100; // uninterpreted
276  int eindex = -1;
277  if (dmap[j] == -1) { // used to be a double* into _actual_area, now handled by soa<...>
278  if (isart) {
279  etype = -1;
280  eindex = -1; // the signal to ignore in bbcore.
281  } else {
282  auto area = static_cast<neuron::container::data_handle<double>>(dparam[j]);
283  assert(area.refers_to_a_modern_data_structure());
284  auto const cache_token = nrn_ensure_model_data_are_sorted();
285  etype = -1;
286  // current_row() refers to the global Node data, but we need
287  // to set eindex to something local to the NrnThread
288  eindex = area.current_row() - cache_token.thread_cache(ith).node_data_offset;
289  }
290  } else if (dmap[j] == -2) { // this is an ion and dparam[j][0].i is the iontype
291  etype = -2;
292  eindex = dparam[j].get<int>();
293  } else if (dmap[j] == -3) { // cvodeieq is always last and never seen
294  assert(dmap[j] != -3);
295  } else if (dmap[j] == -4) { // netsend (_tqitem pointer)
296  // eventually index into nt->_vdata
297  etype = -4;
298  eindex = vdata_offset++;
299  } else if (dmap[j] == -6) { // pntproc
300  // eventually index into nt->_vdata
301  etype = -6;
302  eindex = vdata_offset++;
303  } else if (dmap[j] == -7) { // bbcorepointer
304  // eventually index into nt->_vdata
305  etype = -6;
306  eindex = vdata_offset++;
307  } else if (dmap[j] == -8) { // watch
308  etype = -8;
309  eindex = 0;
310  } else if (dmap[j] == -10) { // fornetcon
311  etype = -10;
312  eindex = 0;
313  } else if (dmap[j] == -11) { // random
314  etype = -11;
315  eindex = vdata_offset++;
316  } else if (dmap[j] == -9) { // diam
317  cg.ndiam = nt.end;
318  etype = -9;
319  // Rare for a mechanism to use dparam pointing to diam.
320  // MORPHOLOGY was never made cache efficient. And
321  // is not in the tml_with_art.
322  // Need to determine this node and then simple to search its
323  // mechanism list for MORPHOLOGY and then know the diam.
324  Node* nd = ml->nodelist[i];
326  for (Prop* p = nd->prop; p; p = p->next) {
327  if (p->_type == MORPHOLOGY) {
328  pdiam = p->param_handle(0);
329  break;
330  }
331  }
332  assert(static_cast<neuron::container::data_handle<double>>(dparam[j]) == pdiam);
333  eindex = ml->nodeindices[i];
334  } else if (dmap[j] == -5) { // POINTER
335  // must be a pointer into nt->_data. Handling is similar to eion so
336  // give proper index into the type.
337  auto const pd = static_cast<neuron::container::data_handle<double>>(dparam[j]);
338  nrn_dblpntr2nrncore(pd, nt, etype, eindex);
339  if (etype == 0) {
340  fprintf(stderr,
341  "POINTER is not pointing to voltage or mechanism data. Perhaps it "
342  "should be a BBCOREPOINTER\n");
343  }
344  assert(etype != 0);
345  // pointer into one of the tml types?
346  } else if (nrn_semantics_is_ion(dmap[j])) { // double* into eion type data
347  etype = nrn_semantics_ion_type(dmap[j]);
348  Memb_list* eml = cg.type2ml[etype];
349  assert(eml);
350  auto* const pval = dparam[j].get<double*>();
351  auto const legacy_index = eml->legacy_index(pval);
352  assert(legacy_index >= 0);
353  eindex = legacy_index;
354  } else if (nrn_semantics_is_ionstyle(dmap[j])) { // int* into ion dparam[xxx][0]
355  // store the actual ionstyle
356  etype = dmap[j];
357  eindex = *dparam[j].get<int*>();
358  } else {
359  char errmes[100];
360  Sprintf(errmes, "Unknown semantics type %d for dparam item %d of", dmap[j], j);
361  hoc_execerror(errmes, memb_func[di.type].sym->name);
362  }
363  di.datum_type[offset + j] = etype;
364  di.datum_index[offset + j] = eindex;
365  }
366  }
367 }
368 
369 
370 // use the Hoc NetCon object list to segregate according to threads
371 // and fill the CellGroup netcons, netcon_srcgid, netcon_pnttype, and
372 // netcon_pntindex (called at end of mk_cellgroups);
374  // count the netcons for each thread
375  int* nccnt = new int[nrn_nthread];
376  for (int i = 0; i < nrn_nthread; ++i) {
377  nccnt[i] = 0;
378  }
379  Symbol* ncsym = hoc_lookup("NetCon");
380  hoc_List* ncl = ncsym->u.ctemplate->olist;
381  hoc_Item* q;
382  ITERATE(q, ncl) {
383  Object* ho = (Object*) VOIDITM(q);
384  NetCon* nc = (NetCon*) ho->u.this_pointer;
385  int ith = 0; // if no _vnt, put in thread 0
386  if (nc->target_ && nc->target_->_vnt) {
387  ith = ((NrnThread*) (nc->target_->_vnt))->id;
388  }
389  ++nccnt[ith];
390  }
391 
392  // allocate
393  for (int i = 0; i < nrn_nthread; ++i) {
394  cgs[i].n_netcon = nccnt[i];
395  cgs[i].netcons.resize(nccnt[i] + 1);
396  cgs[i].netcon_srcgid = new int[nccnt[i] + 1];
397  cgs[i].netcon_pnttype = new int[nccnt[i] + 1];
398  cgs[i].netcon_pntindex = new int[nccnt[i] + 1];
399  }
400 
401  // reset counts and fill
402  for (int i = 0; i < nrn_nthread; ++i) {
403  nccnt[i] = 0;
404  }
405  ITERATE(q, ncl) {
406  Object* ho = (Object*) VOIDITM(q);
407  NetCon* nc = (NetCon*) ho->u.this_pointer;
408  int ith = 0; // if no _vnt, put in thread 0
409  if (nc->target_ && nc->target_->_vnt) {
410  ith = ((NrnThread*) (nc->target_->_vnt))->id;
411  }
412  int i = nccnt[ith];
413  cgs[ith].netcons[i] = nc;
414 
415  if (nc->target_) {
416  int type = nc->target_->prop->_type;
417  auto const target_thread = static_cast<NrnThread*>(nc->target_->_vnt)->id;
418  assert(target_thread == ith);
419  cgs[ith].netcon_pnttype[i] = type;
420  cgs[ith].netcon_pntindex[i] = nc->target_->prop->id().current_row() -
421  cache_token.thread_cache(ith).mechanism_offset.at(type);
422  } else {
423  cgs[ith].netcon_pnttype[i] = 0;
424  cgs[ith].netcon_pntindex[i] = -1;
425  }
426 
427  if (nc->src_) {
428  PreSyn* ps = nc->src_;
429  if (ps->gid_ >= 0) {
430  cgs[ith].netcon_srcgid[i] = ps->gid_;
431  } else {
432  if (ps->osrc_) {
433  assert(!ps->thvar_);
434  if (nrn_nthread > 1) { // negative gid and multiple threads.
435  cgs[ith].netcon_negsrcgid_tid.push_back(ps->nt_->id);
436  // Raise error if file mode transfer and nc and ps not
437  // in same thread. In that case we cannot guarantee that
438  // the PreSyn will end up in the same coreneuron process.
439  if (!corenrn_direct && ith != ps->nt_->id) {
441  "NetCon and NetCon source with no gid are not in the same thread",
442  NULL);
443  }
444  }
445  auto* const pnt = static_cast<Point_process*>(ps->osrc_->u.this_pointer);
446  int type = pnt->prop->_type;
447  auto const src_thread = static_cast<NrnThread*>(pnt->_vnt)->id;
448  auto const current = pnt->prop->id().current_row();
449  auto const offset =
450  cache_token.thread_cache(src_thread).mechanism_offset.at(type);
451  // the resulting GID is different for "the same" pnt/source
452  // if the number of threads changes, because it encodes the
453  // offset of the source process into the thread that it
454  // lives in
455  cgs[ith].netcon_srcgid[i] = -(type +
456  1000 * static_cast<long>(current - offset));
457  } else {
458  cgs[ith].netcon_srcgid[i] = -1;
459  }
460  }
461  } else {
462  cgs[ith].netcon_srcgid[i] = -1;
463  }
464  ++nccnt[ith];
465  }
466  delete[] nccnt;
467 }
468 
469 
470 // Up to now all the artificial cells have been left out of the processing.
471 // Since most processing is in the context of iteration over nt.tml it
472 // might be easiest to transform the loops using a
473 // copy of nt.tml with artificial cell types belonging to nt at the end.
474 // Treat these artificial cell memb_list as much as possible like the others.
475 // The only issue is that data for artificial cells is not in cache order
476 // (after all there is no BREAKPOINT or SOLVE block for ARTIFICIAL_CELLs)
477 // so we assume there will be no POINTER usage into that data.
478 // Also, note that ml.nodecount for artificial cell does not refer to
479 // a list of voltage nodes but just to the count of instances.
481  // copy NrnThread tml list and append ARTIFICIAL cell types
482  // but do not include PatternStim if file mode.
483  // For direct mode PatternStim is not treated specially except that
484  // the Info struct is shared.
485  // For file mode transfer PatternStim has always been treated
486  // specially by CoreNEURON as it is not conceptually a part of
487  // the model but is invoked via an argument when launching
488  // CoreNEURON from the shell.
489  // Now using cgs[tid].mlwithart instead of
490  // tml_with_art = new NrnThreadMembList*[nrn_nthread];
491  // to allow fast retrieval of type and Memb_list* given index into the vector.
492  // copy from NrnThread
493  for (int id = 0; id < nrn_nthread; ++id) {
494  MlWithArt& mla = cgs[id].mlwithart;
495  for (NrnThreadMembList* tml = nrn_threads[id].tml; tml; tml = tml->next) {
496  mla.push_back(MlWithArtItem(tml->index, tml->ml));
497  }
498  }
499  int* acnt = new int[nrn_nthread];
500  for (int i = 0; i < n_memb_func; ++i) {
502  // skip PatternStim if file mode transfer.
503  if (!corenrn_direct && strcmp(memb_func[i].sym->name, "PatternStim") == 0) {
504  continue;
505  }
506  if (strcmp(memb_func[i].sym->name, "HDF5Reader") == 0) {
507  continue;
508  }
509  Memb_list* ml = &memb_list[i];
510  // how many artificial in each thread
511  for (int id = 0; id < nrn_nthread; ++id) {
512  acnt[id] = 0;
513  }
514  for (int j = 0; j < memb_list[i].nodecount; ++j) {
515  auto* pnt = memb_list[i].pdata[j][1].get<Point_process*>();
516  int id = ((NrnThread*) pnt->_vnt)->id;
517  ++acnt[id];
518  }
519 
520  // allocate
521  for (int id = 0; id < nrn_nthread; ++id) {
522  if (acnt[id]) {
523  MlWithArt& mla = cgs[id].mlwithart;
524  ml = new Memb_list{i};
525  mla.push_back(MlWithArtItem(i, ml)); // need to delete ml when mla destroyed.
526  ml->nodecount = acnt[id];
527  ml->nodelist = NULL;
528  ml->nodeindices = NULL;
529  ml->prop = NULL;
530  ml->_thread = NULL;
531  // ml->_data = new double*[acnt[id]];
532  ml->pdata = new Datum*[acnt[id]];
533  }
534  }
535  // fill data and pdata pointers
536  for (int id = 0; id < nrn_nthread; ++id) {
537  acnt[id] = 0;
538  }
539  for (int j = 0; j < memb_list[i].nodecount; ++j) {
540  auto* pnt = memb_list[i].pdata[j][1].get<Point_process*>();
541  int id = ((NrnThread*) pnt->_vnt)->id;
542  Memb_list* ml = cgs[id].mlwithart.back().second;
543  ml->set_storage_offset(cache_token.thread_cache(id).mechanism_offset.at(i));
544  ml->pdata[acnt[id]] = memb_list[i].pdata[j];
545  ++acnt[id];
546  }
547  }
548  }
549  delete[] acnt;
550 }
551 
553  size_t mla_rankbytes = 0;
554  size_t nbytes;
555  NrnThreadMembList* tml;
556  for (const NrnThread* nt: for_threads(nrn_threads, nrn_nthread)) {
557  size_t threadbytes = 0;
558  size_t npnt = 0;
559  size_t nart = 0;
560  int ith = nt->id;
561  nbytes = nt->end * (1 * sizeof(int) + 3 * sizeof(double));
562  threadbytes += nbytes;
563 
564  int mechcnt = 0;
565  size_t mechcnt_instances = 0;
566  MlWithArt& mla = cellgroups_[ith].mlwithart;
567  for (size_t i = 0; i < mla.size(); ++i) {
568  int type = mla[i].first;
569  Memb_list* ml = mla[i].second;
570  ++mechcnt;
571  mechcnt_instances += ml->nodecount;
572  npnt += (memb_func[type].is_point ? ml->nodecount : 0);
573  int psize = nrn_prop_param_size_[type];
574  int dpsize = nrn_prop_dparam_size_[type]; // includes cvodeieq if present
575  // printf("%d %s ispnt %d cnt %d psize %d dpsize %d\n",tml->index,
576  // memb_func[type].sym->name, memb_func[type].is_point, ml->nodecount, psize, dpsize);
577  // nodeindices, data, pdata + pnt with prop
578  int notart = nrn_is_artificial_[type] ? 0 : 1;
579  if (nrn_is_artificial_[type]) {
580  nart += ml->nodecount;
581  }
582  nbytes = ml->nodecount *
583  (notart * sizeof(int) + 1 * sizeof(double*) + 1 * sizeof(Datum*) +
584  psize * sizeof(double) + dpsize * sizeof(Datum));
585  threadbytes += nbytes;
586  }
587  nbytes += npnt * (sizeof(Point_process) + sizeof(Prop));
588  // printf(" mech in use %d Point instances %ld artcells %ld total instances %ld\n",
589  // mechcnt, npnt, nart, mechcnt_instances);
590  // printf(" thread bytes %ld\n", threadbytes);
591  mla_rankbytes += threadbytes;
592  }
593  return mla_rankbytes;
594 }
595 
597  // clean up the art Memb_list of CellGroup[].mlwithart
598  // But if multithread and direct transfer mode, defer deletion of
599  // data for artificial cells, so that the artificial cell ml->_data
600  // can be used when nrnthreads_type_return is called.
601  if (corenrn_direct && nrn_nthread > 0) {
603  }
604  for (int ith = 0; ith < nrn_nthread; ++ith) {
605  MlWithArt& mla = cgs[ith].mlwithart;
606  for (size_t i = 0; i < mla.size(); ++i) {
607  int type = mla[i].first;
608  Memb_list* ml = mla[i].second;
609  if (nrn_is_artificial_[type]) {
610  if (!deferred_type2artml_.empty()) {
611  deferred_type2artml_[ith][type] = ml;
612  } else {
613  // delete[] ml->_data;
614  delete[] ml->pdata;
615  delete ml;
616  }
617  }
618  }
619  }
620 }
621 
623  if (has_net_event_) {
624  return;
625  }
626 
627  has_net_event_ = new int[n_memb_func];
628  for (int i = 0; i < n_memb_func; ++i) {
629  has_net_event_[i] = 0;
630  }
631  for (int i = 0; i < nrn_has_net_event_cnt_; ++i) {
633  }
634 }
const char * secname(Section *sec)
name of section (for use in error messages)
Definition: cabcode.cpp:1674
short * nrn_is_artificial_
Definition: cell_group.cpp:19
bool corenrn_direct
int nrn_has_net_event_cnt_
Definition: init.cpp:160
int * bbcore_dparam_size
int * nrn_has_net_event_
Definition: init.cpp:161
NetCvode * net_cvode_instance
Definition: cvodestb.cpp:26
std::vector< std::map< int, Memb_list * > > Deferred_Type2ArtMl
Definition: cell_group.h:16
std::vector< MlWithArtItem > MlWithArt
Definition: cell_group.h:15
std::pair< int, Memb_list * > MlWithArtItem
Definition: cell_group.h:12
static void mk_cgs_netcon_info(neuron::model_sorted_token const &cache_token, CellGroup *cgs)
Definition: cell_group.cpp:373
std::vector< NetCon * > netcons
Definition: cell_group.h:38
int group_id
Definition: cell_group.h:23
std::vector< int > output_vindex
Definition: cell_group.h:35
int n_real_output
Definition: cell_group.h:28
int * ml_vdata_offset
Definition: cell_group.h:31
static void mk_tml_with_art(neuron::model_sorted_token const &cache_token, CellGroup *)
Definition: cell_group.cpp:480
std::vector< int > output_gid
Definition: cell_group.h:34
int n_mech
Definition: cell_group.h:30
static int * has_net_event_
Definition: cell_group.h:80
int * netcon_srcgid
Definition: cell_group.h:39
virtual ~CellGroup()
Definition: cell_group.cpp:37
static int nrn_has_net_event(int type)
Definition: cell_group.h:81
static void clean_art(CellGroup *)
Definition: cell_group.cpp:596
Memb_list ** type2ml
Definition: cell_group.h:22
static void datumindex_fill(int, CellGroup &, DatumIndices &, Memb_list *)
Definition: cell_group.cpp:247
std::vector< int > netcon_negsrcgid_tid
Definition: cell_group.h:41
int ntype
Definition: cell_group.h:47
int n_presyn
Definition: cell_group.h:26
MlWithArt mlwithart
Definition: cell_group.h:49
static void setup_nrn_has_net_event()
Definition: cell_group.cpp:622
static size_t get_mla_rankbytes(CellGroup *)
Definition: cell_group.cpp:552
static void datumtransform(CellGroup *)
Definition: cell_group.cpp:205
int n_real_cell
Definition: cell_group.h:24
static void mk_cellgroups(neuron::model_sorted_token const &cache_token, CellGroup *)
Definition: cell_group.cpp:52
std::vector< PreSyn * > output_ps
Definition: cell_group.h:33
int n_netcon
Definition: cell_group.h:37
static Deferred_Type2ArtMl deferred_type2artml_
Definition: cell_group.h:74
int * netcon_pnttype
Definition: cell_group.h:44
int * netcon_pntindex
Definition: cell_group.h:45
DatumIndices * datumindices
Definition: cell_group.h:48
int n_output
Definition: cell_group.h:27
int ndiam
Definition: cell_group.h:29
int * datum_index
Definition: datum_indices.h:20
int * datum_type
Definition: datum_indices.h:19
Definition: netcon.h:87
Point_process * target_
Definition: netcon.h:115
PreSyn * src_
Definition: netcon.h:114
NetCvodeThreadData * p
Definition: netcvode.h:249
hoc_Item * psl_thr_
Definition: netcvode.h:52
Definition: netcon.h:258
NrnThread * nt_
Definition: netcon.h:303
int gid_
Definition: netcon.h:309
Object * osrc_
Definition: netcon.h:298
neuron::container::data_handle< double > thvar_
Definition: netcon.h:297
#define area
Definition: md1redef.h:12
#define pval
Definition: md1redef.h:40
#define nodecount
Definition: md1redef.h:39
#define id
Definition: md1redef.h:41
#define i
Definition: md1redef.h:19
void hoc_execerr_ext(const char *fmt,...)
printf style specification of hoc_execerror message.
Definition: fileio.cpp:828
char * hoc_object_name(Object *ob)
Definition: hoc_oop.cpp:73
Symbol * hoc_lookup(const char *)
Definition: symbol.cpp:59
#define assert(ex)
Definition: hocassrt.h:24
#define VOIDITM(q)
Definition: hoclist.h:89
bool nrn_semantics_is_ion(int i)
Definition: ion_semantics.h:6
bool nrn_semantics_is_ionstyle(int i)
Definition: ion_semantics.h:9
int nrn_semantics_ion_type(int i)
Definition: ion_semantics.h:12
#define MORPHOLOGY
Definition: membfunc.hpp:59
#define ITERATE(itm, lst)
Definition: model.h:18
auto for_threads(NrnThread *threads, int num_threads)
Definition: multicore.h:133
NrnThread * nrn_threads
Definition: multicore.cpp:56
int nrn_nthread
Definition: multicore.cpp:55
int Datum
Definition: nrnconf.h:23
void hoc_execerror(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:39
Model & model()
Access the global Model instance.
Definition: model_data.hpp:206
int Sprintf(char(&buf)[N], const char *fmt, Args &&... args)
Redirect sprintf to snprintf if the buffer size can be deduced.
Definition: wrap_sprintf.h:14
if(ncell==0)
Definition: cellorder.cpp:785
neuron::model_sorted_token nrn_ensure_model_data_are_sorted()
Ensure neuron::container::* data are sorted.
Definition: treeset.cpp:2182
CellGroup * cellgroups_
int nrn_dblpntr2nrncore(neuron::container::data_handle< double > dh, NrnThread &nt, int &type, int &index)
int * nrn_prop_dparam_size_
Definition: init.cpp:163
int const size_t const size_t n
Definition: nrngsl.h:10
size_t q
size_t p
size_t j
int * nrn_prop_param_size_
Definition: init.cpp:162
std::vector< Memb_func > memb_func
Definition: init.cpp:145
short type
Definition: cabvars.h:10
std::vector< Memb_list > memb_list
Definition: init.cpp:146
int n_memb_func
Definition: init.cpp:448
static List * current
Definition: nrnunit.cpp:13
#define NULL
Definition: spdefs.h:105
A view into a set of mechanism instances.
Definition: nrnoc_ml.h:34
void set_storage_offset(std::size_t offset)
Set the offset of this Memb_list into global storage for this type.
Definition: nrnoc_ml.h:210
int nodecount
Definition: nrnoc_ml.h:78
int * nodeindices
Definition: nrnoc_ml.h:74
Node ** nodelist
Definition: nrnoc_ml.h:68
std::ptrdiff_t legacy_index(double const *ptr) const
Calculate a legacy index of the given pointer in this mechanism data.
Definition: memblist.cpp:131
Datum ** pdata
Definition: nrnoc_ml.h:75
Prop ** prop
Definition: nrnoc_ml.h:76
Datum * _thread
Definition: nrnoc_ml.h:77
Definition: section.h:105
Prop * prop
Definition: section.h:190
Represent main neuron object computed by single thread.
Definition: multicore.h:58
int id
Definition: multicore.h:66
int end
Definition: multicore.h:65
Definition: hocdec.h:173
void * this_pointer
Definition: hocdec.h:178
union Object::@47 u
A point process is computed just like regular mechanisms.
Definition: section_fwd.hpp:77
Definition: section.h:231
short _type
Definition: section.h:244
auto id() const
Get the identifier of this instance.
Definition: section.h:256
Definition: model.h:47
union Symbol::@28 u
cTemplate * ctemplate
Definition: hocdec.h:126
hoc_List * olist
Definition: hocdec.h:155
container::Node::storage & node_data()
Access the structure containing the data of all Nodes.
Definition: model_data.hpp:24
std::size_t node_data_offset
Offset into global Node storage for this thread.
Definition: model_data.hpp:30
std::vector< std::size_t > mechanism_offset
Offsets into global mechanism storage for this thread (one per mechanism)
Definition: model_data.hpp:34
Non-template stable handle to a generic value.
T get() const
Explicit conversion to any T.
cache::Thread & thread_cache(std::size_t i)
Definition: model_data.hpp:180