NEURON
nrn2core_data_init.cpp
Go to the documentation of this file.
1 /*
2 # =============================================================================
3 # Copyright (c) 2016 - 2022 Blue Brain Project/EPFL
4 #
5 # See top-level LICENSE file for details.
6 # =============================================================================.
7 */
8 #include <sstream>
9 
10 #include "coreneuron/nrnconf.h"
18 #include "coreneuron/io/mem_layout_util.hpp" // for WATCH use of nrn_i_layout
21 
22 namespace coreneuron {
23 
24 // helper functions defined below.
25 static void nrn2core_tqueue();
26 static void watch_activate_clear();
27 static void nrn2core_transfer_watch_condition(int, int, int, int, int);
28 static void vec_play_activate();
29 static void nrn2core_patstim_share_info();
30 
31 extern "C" {
32 /** Pointer to function in NEURON that iterates over activated
33  WATCH statements, sending each item to ...
34 **/
35 void (*nrn2core_transfer_watch_)(void (*cb)(int, int, int, int, int));
36 }
37 
38 /**
39  All state from NEURON necessary to continue a run.
40 
41  In NEURON direct mode, we desire the exact behavior of
42  ParallelContext.psolve(tstop). I.e. a sequence of such calls with and
43  without intervening calls to h.finitialize(). Most state (structure
44  and data of the substantive model) has been copied
45  from NEURON during nrn_setup. Now we need to copy the event queue
46  and set up any other invalid internal structures. I.e basically the
47  nrn_finitialize above but without changing any simulation data. We follow
48  some of the strategy of checkpoint_initialize.
49 **/
51  dt2thread(-1.);
53 
55 
56  // Reproduce present NEURON WATCH activation
57  // Start from nothing active.
59  // nrn2core_transfer_watch_condition(...) receives the WATCH activation info
60  // on a per active WatchCondition basis from NEURON.
61  (*nrn2core_transfer_watch_)(nrn2core_transfer_watch_condition);
62 
64 
65  // the things done by checkpoint restore at the end of Phase2::read_file
66  // vec_play_continuous n_vec_play_continuous of them
67  // patstim_index
68  // preSynConditionEventFlags nt.n_presyn of them
69  // restore_events
70  // restore_events
71  // the things done for checkpoint at the end of Phase2::populate
72  // checkpoint_restore_tqueue
73  // Lastly, if PatternStim exists, needs initialization
74  // checkpoint_restore_patternstim
75  // io/nrn_checkpoint.cpp: write_tqueue contains examples for each
76  // DiscreteEvent type with regard to the information needed for each
77  // subclass from the point of view of CoreNEURON.
78  // E.g. for NetConType_, just netcon_index
79  // The trick, then, is to figure out the CoreNEURON info from the
80  // NEURON queue items and that should be available in passing from
81  // the existing processing of nrncore_write.
82 
83  // activate the vec_play_continuous events defined in phase2 setup.
85 
86  // Any PreSyn.flag_ == 1 on the NEURON side needs to be transferred
87  // or the PreSyn will spuriously fire when psolve starts.
88  extern void nrn2core_PreSyn_flag_receive(int tid);
89  for (int tid = 0; tid < nrn_nthread; ++tid) {
91  }
92 
94 
96 }
97 
99  for (int tid = 0; tid < nrn_nthread; ++tid) {
100  NrnThread* nt = nrn_threads + tid;
101  for (int i = 0; i < nt->n_vecplay; ++i) {
102  PlayRecord* pr = (PlayRecord*) nt->_vecplay[i];
103  assert(pr->type() == VecPlayContinuousType);
105  assert(vpc->e_);
106  assert(vpc->discon_indices_ == nullptr); // not implemented
107  vpc->e_->send(vpc->t_[vpc->ubound_index_], net_cvode_instance, nt);
108  }
109  }
110 }
111 
112 } // namespace coreneuron
113 
114 // For direct transfer of event queue information
115 // Must be the same as corresponding struct NrnCoreTransferEvents in NEURON
116 // Do not put this coreneuron version in the coreneuron namespace so that the
117 // function pointer/callback has the same type in both NEURON and CoreNEURON.
118 // Calling a function through a pointer to a function of different type is
119 // undefined behaviour.
121  std::vector<int> type; // DiscreteEvent type
122  std::vector<double> td; // delivery time
123  std::vector<int> intdata; // ints specific to the DiscreteEvent type
124  std::vector<double> dbldata; // doubles specific to the type.
125 };
126 
127 namespace coreneuron {
128 
129 extern "C" {
130 /** Pointer to function in NEURON that iterates over its tqeueue **/
131 NrnCoreTransferEvents* (*nrn2core_transfer_tqueue_)(int tid);
132 }
133 
134 // for faster determination of the movable index given the type
135 static std::unordered_map<int, int> type2movable;
136 static void setup_type2semantics() {
137  if (type2movable.empty()) {
138  std::size_t const n_memb_func{corenrn.get_memb_funcs().size()};
139  for (std::size_t type = 0; type < n_memb_func; ++type) {
140  int* ds{corenrn.get_memb_func(type).dparam_semantics};
141  if (ds) {
142  int dparam_size = corenrn.get_prop_dparam_size()[type];
143  for (int psz = 0; psz < dparam_size; ++psz) {
144  if (ds[psz] == -4) { // netsend semantics
145  type2movable[type] = psz;
146  }
147  }
148  }
149  }
150  }
151 }
152 
153 /** Copy each thread's queue from NEURON **/
154 static void nrn2core_tqueue() {
155  setup_type2semantics(); // need type2movable for SelfEvent.
156  for (int tid = 0; tid < nrn_nthread; ++tid) { // should be parallel
157  NrnCoreTransferEvents* ncte = (*nrn2core_transfer_tqueue_)(tid);
158  if (ncte) {
159  size_t idat = 0;
160  size_t idbldat = 0;
161  NrnThread& nt = nrn_threads[tid];
162  for (size_t i = 0; i < ncte->type.size(); ++i) {
163  switch (ncte->type[i]) {
164  case 0: { // DiscreteEvent
165  // Ignore
166  } break;
167 
168  case 2: { // NetCon
169  int ncindex = ncte->intdata[idat++];
170  NetCon* nc = nt.netcons + ncindex;
171 #ifndef CORENRN_DEBUG_QUEUE
172 #define CORENRN_DEBUG_QUEUE 0
173 #endif
174 #if CORENRN_DEBUG_QUEUE
175  printf("nrn2core_tqueue tid=%d i=%zd type=%d tdeliver=%g NetCon %d\n",
176  tid,
177  i,
178  ncte->type[i],
179  ncte->td[i],
180  ncindex);
181 #endif
182  nc->send(ncte->td[i], net_cvode_instance, &nt);
183  } break;
184 
185  case 3: { // SelfEvent
186  // target_type, target_instance, weight_index, flag movable
187 
188  // This is a nightmare and needs to be profoundly re-imagined.
189 
190  // Determine Point_process*
191  int target_type = ncte->intdata[idat++];
192  int target_instance = ncte->intdata[idat++];
193  // From target_type and target_instance (mechanism data index)
194  // compute the nt.pntprocs index.
195  int offset = nt._pnt_offset[target_type];
196  Point_process* pnt = nt.pntprocs + offset + target_instance;
197  assert(pnt->_type == target_type);
198  Memb_list* ml = nt._ml_list[target_type];
199  if (ml->_permute) {
200  target_instance = ml->_permute[target_instance];
201  }
202  assert(pnt->_i_instance == target_instance);
203  assert(pnt->_tid == tid);
204 
205  // Determine weight_index
206  int netcon_index = ncte->intdata[idat++]; // via the NetCon
207  int weight_index = -1; // no associated netcon
208  if (netcon_index >= 0) {
209  weight_index = nt.netcons[netcon_index].u.weight_index_;
210  }
211 
212  double flag = ncte->dbldata[idbldat++];
213  int is_movable = ncte->intdata[idat++];
214  // If the queue item is movable, then the pointer needs to be
215  // stored in the mechanism instance movable slot by net_send.
216  // And don't overwrite if not movable. Only one SelfEvent
217  // for a given target instance is movable.
218  int movable_index = nrn_i_layout(target_instance,
219  ml->nodecount,
220  type2movable[target_type],
221  corenrn.get_prop_dparam_size()[target_type],
222  corenrn.get_mech_data_layout()[target_type]);
223  void** movable_arg = nt._vdata + ml->pdata[movable_index];
224  TQItem* old_movable_arg = (TQItem*) (*movable_arg);
225 #if CORENRN_DEBUG_QUEUE
226  printf("nrn2core_tqueue tid=%d i=%zd type=%d tdeliver=%g SelfEvent\n",
227  tid,
228  i,
229  ncte->type[i],
230  ncte->td[i]);
231  printf(
232  " target_type=%d pnt data index=%d flag=%g is_movable=%d netcon index "
233  "for weight=%d\n",
234  target_type,
235  target_instance,
236  flag,
237  is_movable,
238  netcon_index);
239 #endif
240  net_send(movable_arg, weight_index, pnt, ncte->td[i], flag);
241  if (!is_movable) {
242  *movable_arg = (void*) old_movable_arg;
243  }
244  } break;
245 
246  case 4: { // PreSyn
247  int type = ncte->intdata[idat++];
248  if (type == 0) { // CoreNEURON PreSyn
249  int ps_index = ncte->intdata[idat++];
250 #if CORENRN_DEBUG_QUEUE
251  printf("nrn2core_tqueue tid=%d i=%zd type=%d tdeliver=%g PreSyn %d\n",
252  tid,
253  i,
254  ncte->type[i],
255  ncte->td[i],
256  ps_index);
257 #endif
258  PreSyn* ps = nt.presyns + ps_index;
259  int gid = ps->output_index_;
260  // Following assumes already sent to other machines.
261  ps->output_index_ = -1;
262  ps->send(ncte->td[i], net_cvode_instance, &nt);
263  ps->output_index_ = gid;
264  } else { // CoreNEURON InputPreSyn
265  int gid = ncte->intdata[idat++];
266  InputPreSyn* ps = gid2in[gid];
267  ps->send(ncte->td[i], net_cvode_instance, &nt);
268  }
269  } break;
270 
271  case 6: { // PlayRecordEvent
272  // Ignore as phase2 handles analogous to checkpoint restore.
273  } break;
274 
275  case 7: { // NetParEvent
276 #if CORENRN_DEBUG_QUEUE
277  printf("nrn2core_tqueue tid=%d i=%zd type=%d tdeliver=%g NetParEvent\n",
278  tid,
279  i,
280  ncte->type[i],
281  ncte->td[i]);
282 #endif
283  } break;
284 
285  default: {
286  std::stringstream qetype;
287  qetype << ncte->type[i];
288  hoc_execerror("Unimplemented transfer queue event type:", qetype.str().c_str());
289  } break;
290  }
291  }
292  delete ncte;
293  }
294  }
295 }
296 
297 /** @brief return first and last datum indices of WATCH statements
298  */
299 void watch_datum_indices(int type, int& first, int& last) {
300  int* semantics = corenrn.get_memb_func(type).dparam_semantics;
301  int dparam_size = corenrn.get_prop_dparam_size()[type];
302  // which slots are WATCH
303  // Note that first is the WatchList item, not the WatchCondition
304  first = -1;
305  last = 0;
306  for (int i = 0; i < dparam_size; ++i) {
307  if (semantics[i] == -8) { // WATCH
308  if (first == -1) {
309  first = i;
310  }
311  last = i;
312  }
313  }
314 }
315 
317  // Can identify mechanisms with WATCH statements from non-NULL
318  // corenrn.get_watch_check()[type] and figure out pdata that are
319  // _watch_array items from corenrn.get_memb_func(type).dparam_semantics
320  // Ironically, all WATCH statements may already be inactivated in
321  // consequence of phase2 transfer. But, for direct mode psolve, we would
322  // eventually like to minimise that transfer (at least with respect to
323  // structure).
324 
325  // Loop over threads, mechanisms and pick out the ones with WATCH statements.
326  for (int tid = 0; tid < nrn_nthread; ++tid) {
327  NrnThread& nt = nrn_threads[tid];
328  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
329  if (corenrn.get_watch_check()[tml->index]) {
330  // zero all the WATCH slots.
331  Memb_list* ml = tml->ml;
332  int type = tml->index;
333  int dparam_size = corenrn.get_prop_dparam_size()[type];
334  // which slots are WATCH
335  int first, last;
336  watch_datum_indices(type, first, last);
337  // Zero the _watch_array from first to last inclusive.
338  // Note: the first is actually unused but is there because NEURON
339  // uses it. There is probably a better way to do this.
340  int* pdata = ml->pdata;
341  int nodecount = ml->nodecount;
342  int layout = corenrn.get_mech_data_layout()[type];
343  for (int iml = 0; iml < nodecount; ++iml) {
344  for (int i = first; i <= last; ++i) {
345  int* pd = pdata + nrn_i_layout(iml, nodecount, i, dparam_size, layout);
346  *pd = 0;
347  }
348  }
349  }
350  }
351  }
352 }
353 
355  int pnttype,
356  int pntindex,
357  int watch_index,
358  int triggered) {
359  // Note: watch_index relative to AoS _ppvar for instance.
360  NrnThread& nt = nrn_threads[tid];
361  int pntoffset = nt._pnt_offset[pnttype];
362  Point_process* pnt = nt.pntprocs + (pntoffset + pntindex);
363  assert(pnt->_type == pnttype);
364  Memb_list* ml = nt._ml_list[pnttype];
365  if (ml->_permute) {
366  pntindex = ml->_permute[pntindex];
367  }
368  assert(pnt->_i_instance == pntindex);
369  assert(pnt->_tid == tid);
370 
371  // perhaps all this should be more closely associated with phase2 since
372  // we are really talking about (direct) transfer from NEURON and not able
373  // to rely on finitialize() on the CoreNEURON side which would otherwise
374  // set up all this stuff as a consequence of SelfEvents initiated
375  // and delivered at time 0.
376  // I've become shakey in regard to how this is done since the reorganization
377  // from where everything was done in nrn_setup.cpp. Here, I'm guessing
378  // nrn_i_layout is the relevant index transformation after finding the
379  // beginning of the mechanism pdata.
380  int* pdata = ml->pdata;
381  int iml = pntindex;
382  int nodecount = ml->nodecount;
383  int i = watch_index;
384  int dparam_size = corenrn.get_prop_dparam_size()[pnttype];
385  int layout = corenrn.get_mech_data_layout()[pnttype];
386  int* pd = pdata + nrn_i_layout(iml, nodecount, i, dparam_size, layout);
387 
388  // activate the WatchCondition
389  *pd = 2 + triggered;
390 }
391 
392 // PatternStim direct mode
393 // NEURON and CoreNEURON had different definitions for struct Info but
394 // the NEURON version of pattern.mod for PatternStim was changed to
395 // adopt the CoreNEURON version (along with THREADSAFE so they have the
396 // same param size). So they now both share the same
397 // instance of Info and NEURON is responsible for constructor/destructor.
398 // And in direct mode, PatternStim gets no special treatment except that
399 // on the CoreNEURON side, the Info struct points to the NEURON instance.
400 
401 // from patstim.mod
402 extern void** pattern_stim_info_ref(int icnt,
403  int cnt,
404  double* _p,
405  Datum* _ppvar,
406  ThreadDatum* _thread,
407  NrnThread* _nt,
408  Memb_list* ml,
409  double v);
410 
411 extern "C" {
412 void (*nrn2core_patternstim_)(void** info);
413 }
414 
415 // In direct mode, CoreNEURON and NEURON share the same PatternStim Info
416 // Assume singleton for PatternStim but that is not really necessary in principle.
418  int type = nrn_get_mechtype("PatternStim");
419  NrnThread* nt = nrn_threads + 0;
420  Memb_list* ml = nt->_ml_list[type];
421  if (ml) {
422  int layout = corenrn.get_mech_data_layout()[type];
423  int sz = corenrn.get_prop_param_size()[type];
424  int psz = corenrn.get_prop_dparam_size()[type];
425  int _cntml = ml->nodecount;
426  assert(ml->nodecount == 1);
427  int _iml = 0; // Assume singleton here and in (*nrn2core_patternstim_)(info) below.
428  double* _p = ml->data;
429  Datum* _ppvar = ml->pdata;
430  if (layout == Layout::AoS) {
431  _p += _iml * sz;
432  _ppvar += _iml * psz;
433  } else if (layout == Layout::SoA) {
434  ;
435  } else {
436  assert(0);
437  }
438 
439  void** info = pattern_stim_info_ref(_iml, _cntml, _p, _ppvar, nullptr, nt, ml, 0.0);
440  (*nrn2core_patternstim_)(info);
441  }
442 }
443 
444 
445 } // namespace coreneuron
auto & get_memb_func(size_t idx)
Definition: coreneuron.hpp:135
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:442
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:372
union coreneuron::NetCon::@0 u
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:409
PlayRecordEvent * e_
Definition: vrecitem.h:86
#define cnt
Definition: tqueue.hpp:44
#define v
Definition: md1redef.h:11
#define nodecount
Definition: md1redef.h:39
#define i
Definition: md1redef.h:19
#define pdata
Definition: md1redef.h:37
#define VecPlayContinuousType
Definition: vrecitem.h:17
#define assert(ex)
Definition: hocassrt.h:24
printf
Definition: extdef.h:5
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
NrnThread * nrn_threads
Definition: multicore.cpp:56
int nrn_i_layout(int icnt, int cnt, int isz, int sz, int layout)
This function return the index in a flat array of a matrix coordinate (icnt, isz).
void clear_event_queue()
Definition: cvodestb.cpp:47
static void setup_type2semantics()
void watch_datum_indices(int type, int &first, int &last)
return first and last datum indices of WATCH statements
static std::unordered_map< int, int > type2movable
static void nrn2core_transfer_watch_condition(int, int, int, int, int)
int nrn_nthread
Definition: multicore.cpp:55
std::map< int, InputPreSyn * > gid2in
Definition: nrn_setup.cpp:160
void nrn_thread_table_check()
Definition: multicore.cpp:168
int Datum
Definition: nrnconf.h:23
void(* nrn2core_transfer_watch_)(void(*cb)(int, int, int, int, int))
Pointer to function in NEURON that iterates over activated WATCH statements, sending each item to ....
void hoc_execerror(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:39
void(* nrn2core_patternstim_)(void **info)
CoreNeuron corenrn
Definition: multicore.cpp:53
int nrn_get_mechtype(const char *name)
Get mechanism type by the mechanism name.
Definition: mk_mech.cpp:145
static void watch_activate_clear()
void ** pattern_stim_info_ref(int icnt, int cnt, double *_p, Datum *_ppvar, ThreadDatum *_thread, NrnThread *_nt, Memb_list *ml, double v)
static void nrn2core_patstim_share_info()
void net_send(void **, int, Point_process *, double, double)
Definition: netcvode.cpp:77
void nrn_spike_exchange_init()
Definition: netpar.cpp:238
void dt2thread(double adt)
void nrn2core_PreSyn_flag_receive(int tid)
static void nrn2core_tqueue()
Copy each thread's queue from NEURON.
void direct_mode_initialize()
All state from NEURON necessary to continue a run.
NetCvode * net_cvode_instance
Definition: netcvode.cpp:35
static int pntindex
Definition: prcellstate.cpp:24
static void vec_play_activate()
static List * info
static int watch_index
Definition: nocpout.cpp:164
short type
Definition: cabvars.h:10
int n_memb_func
Definition: init.cpp:448
static void pr(N_Vector x)
std::vector< double > td
std::vector< double > dbldata
std::vector< int > intdata
virtual void send(double deliverytime, NetCvode *, NrnThread *)
Definition: netcvode.cpp:362
std::vector< int > _pnt_offset
Definition: multicore.hpp:154
Memb_list ** _ml_list
Definition: multicore.hpp:81
NrnThreadMembList * tml
Definition: multicore.hpp:80
Point_process * pntprocs
Definition: multicore.hpp:82
NrnThreadMembList * next
Definition: multicore.hpp:33