NEURON
eion.cpp
Go to the documentation of this file.
1 /*
2 # =============================================================================
3 # Copyright (c) 2016 - 2022 Blue Brain Project/EPFL
4 #
5 # See top-level LICENSE file for details.
6 # =============================================================================.
7 */
8 
9 #include <math.h>
10 #include <string.h>
11 
13 #include "coreneuron/mpi/nrnmpi.h"
18 
19 #define CNRN_FLAT_INDEX_IML_ROW(i) ((i) * (_cntml_padded) + (_iml))
20 
21 namespace coreneuron {
22 
23 // for each ion it refers to internal concentration, external concentration, and charge,
24 const int ion_global_map_member_size = 3;
25 
26 
27 #define nparm 5
28 void nrn_init_ion(NrnThread*, Memb_list*, int);
29 void nrn_alloc_ion(double*, Datum*, int);
30 
31 static int na_ion, k_ion, ca_ion; /* will get type for these special ions */
32 
33 int nrn_is_ion(int type) {
34  // Old: commented to remove dependency on memb_func and alloc function
35  // return (memb_func[type].alloc == ion_alloc);
36  return (type < nrn_ion_global_map_size // type smaller than largest ion's
37  && nrn_ion_global_map[type] != nullptr); // allocated ion charge variables
38 }
39 
41 double** nrn_ion_global_map;
42 #define global_conci(type) nrn_ion_global_map[type][0]
43 #define global_conco(type) nrn_ion_global_map[type][1]
44 #define global_charge(type) nrn_ion_global_map[type][2]
45 
46 double nrn_ion_charge(int type) {
47  return global_charge(type);
48 }
49 
50 void ion_reg(const char* name, double valence) {
51  std::array<std::string, 7> buf{};
52 #define VAL_SENTINAL -10000.
53  std::string name_str{name};
54  buf[0] = name_str + "_ion";
55  buf[1] = "e" + name_str;
56  buf[2] = name_str + "i";
57  buf[3] = name_str + "o";
58  buf[5] = "i" + name_str;
59  buf[6] = "di" + name_str + "_dv_";
60  std::array<const char*,
61  buf.size() + 1 /* shifted by 1 */ + NB_MECH_VAR_CATEGORIES /* trailing nulls */>
62  mech{};
63  for (int i = 0; i < buf.size(); i++) {
64  mech[i + 1] = buf[i].empty() ? nullptr : buf[i].c_str();
65  }
66  int mechtype = nrn_get_mechtype(buf[0].c_str());
67  if (mechtype >= nrn_ion_global_map_size ||
68  nrn_ion_global_map[mechtype] == nullptr) { // if hasn't yet been allocated
69 
70  // allocates mem for ion in ion_map and sets null all non-ion types
71  if (nrn_ion_global_map_size <= mechtype) {
72  int size = mechtype + 1;
73  nrn_ion_global_map = (double**) erealloc(nrn_ion_global_map, sizeof(double*) * size);
74 
75  for (int i = nrn_ion_global_map_size; i < mechtype; i++) {
76  nrn_ion_global_map[i] = nullptr;
77  }
78  nrn_ion_global_map_size = mechtype + 1;
79  }
81  sizeof(double));
82 
83  register_mech(mech.data(),
86  nullptr,
87  nullptr,
89  nullptr,
90  nullptr,
91  -1,
92  1);
93  mechtype = nrn_get_mechtype(mech[1]);
94  _nrn_layout_reg(mechtype, SOA_LAYOUT);
95  hoc_register_prop_size(mechtype, nparm, 1);
96  hoc_register_dparam_semantics(mechtype, 0, "iontype");
97  nrn_writes_conc(mechtype, 1);
98 
99  buf[0] = name_str + "i0_" + buf[0];
100  buf[1] = name_str + "o0_" + buf[0];
101  if (strcmp("na", name) == 0) {
102  na_ion = mechtype;
103  global_conci(mechtype) = DEF_nai;
104  global_conco(mechtype) = DEF_nao;
105  global_charge(mechtype) = 1.;
106  } else if (strcmp("k", name) == 0) {
107  k_ion = mechtype;
108  global_conci(mechtype) = DEF_ki;
109  global_conco(mechtype) = DEF_ko;
110  global_charge(mechtype) = 1.;
111  } else if (strcmp("ca", name) == 0) {
112  ca_ion = mechtype;
113  global_conci(mechtype) = DEF_cai;
114  global_conco(mechtype) = DEF_cao;
115  global_charge(mechtype) = 2.;
116  } else {
117  global_conci(mechtype) = DEF_ioni;
118  global_conco(mechtype) = DEF_iono;
119  global_charge(mechtype) = VAL_SENTINAL;
120  }
121  }
122  double val = global_charge(mechtype);
123  if (valence != VAL_SENTINAL && val != VAL_SENTINAL && valence != val) {
124  fprintf(stderr,
125  "%s ion valence defined differently in\n\
126 two USEION statements (%g and %g)\n",
127  buf[0].c_str(),
128  valence,
129  global_charge(mechtype));
130  nrn_exit(1);
131  } else if (valence == VAL_SENTINAL && val == VAL_SENTINAL) {
132  fprintf(stderr,
133  "%s ion valence must be defined in\n\
134 the USEION statement of any model using this ion\n",
135  buf[0].c_str());
136  nrn_exit(1);
137  } else if (valence != VAL_SENTINAL) {
138  global_charge(mechtype) = valence;
139  }
140 }
141 
142 #if VECTORIZE
143 #define erev pd[CNRN_FLAT_INDEX_IML_ROW(0)] /* From Eion */
144 #define conci pd[CNRN_FLAT_INDEX_IML_ROW(1)]
145 #define conco pd[CNRN_FLAT_INDEX_IML_ROW(2)]
146 #define cur pd[CNRN_FLAT_INDEX_IML_ROW(3)]
147 #define dcurdv pd[CNRN_FLAT_INDEX_IML_ROW(4)]
148 
149 /*
150  handle erev, conci, conc0 "in the right way" according to ion_style
151  default. See nrn/lib/help/nrnoc.help.
152 ion_style("name_ion", [c_style, e_style, einit, eadvance, cinit])
153 
154  ica is assigned
155  eca is parameter but if conc exists then eca is assigned
156  if conc is nrnocCONST then eca calculated on finitialize
157  if conc is STATE then eca calculated on fadvance and conc finitialize
158  with global nai0, nao0
159 
160  nernst(ci, co, charge) and ghk(v, ci, co, charge) available to hoc
161  and models.
162 */
163 
164 #define iontype ppd[_iml] /* how _AMBIGUOUS is to be handled */
165 /*the bitmap is
166 03 concentration unused, nrnocCONST, DEP, STATE
167 04 initialize concentrations
168 030 reversal potential unused, nrnocCONST, DEP, STATE
169 040 initialize reversal potential
170 0100 calc reversal during fadvance
171 0200 ci being written by a model
172 0400 co being written by a model
173 */
174 
175 #define charge global_charge(type)
176 #define conci0 global_conci(type)
177 #define conco0 global_conco(type)
178 
179 double nrn_nernst_coef(int type) {
180  /* for computing jacobian element dconc'/dconc */
181  return ktf(celsius) / charge;
182 }
183 
184 /* Must be called prior to any channels which update the currents */
185 void nrn_cur_ion(NrnThread* nt, Memb_list* ml, int type) {
186  int _cntml_actual = ml->nodecount;
187  double* pd;
188  Datum* ppd;
189  (void) nt; /* unused */
190  /*printf("ion_cur %s\n", memb_func[type].sym->name);*/
191  int _cntml_padded = ml->_nodecount_padded;
192  pd = ml->data;
193  ppd = ml->pdata;
194  // clang-format off
195  nrn_pragma_acc(parallel loop present(pd[0:_cntml_padded * 5],
196  ppd[0:_cntml_actual],
199  if (nt->compute_gpu)
200  async(nt->stream_id))
201  // clang-format on
202  nrn_pragma_omp(target teams distribute parallel for simd if(nt->compute_gpu))
203  for (int _iml = 0; _iml < _cntml_actual; ++_iml) {
204  dcurdv = 0.;
205  cur = 0.;
206  if (iontype & 0100) {
207  erev = nrn_nernst(conci, conco, charge, celsius);
208  }
209  };
210 }
211 
212 /* Must be called prior to other models which possibly also initialize
213  concentrations based on their own states
214 */
215 void nrn_init_ion(NrnThread* nt, Memb_list* ml, int type) {
216  int _cntml_actual = ml->nodecount;
217  double* pd;
218  Datum* ppd;
219  (void) nt; /* unused */
220 
221  // skip initialization if restoring from checkpoint
222  if (_nrn_skip_initmodel == 1) {
223  return;
224  }
225 
226  /*printf("ion_init %s\n", memb_func[type].sym->name);*/
227  int _cntml_padded = ml->_nodecount_padded;
228  pd = ml->data;
229  ppd = ml->pdata;
230  // There was no async(...) clause in the initial OpenACC implementation, so
231  // no `nowait` clause has been added to the OpenMP implementation. TODO:
232  // verify if this can be made asynchronous or if there is a strong reason it
233  // needs to be like this.
234  // clang-format off
235  nrn_pragma_acc(parallel loop present(pd[0:_cntml_padded * 5],
236  ppd[0:_cntml_actual],
239  if (nt->compute_gpu))
240  // clang-format on
241  nrn_pragma_omp(target teams distribute parallel for simd if(nt->compute_gpu))
242  for (int _iml = 0; _iml < _cntml_actual; ++_iml) {
243  if (iontype & 04) {
244  conci = conci0;
245  conco = conco0;
246  }
247  if (iontype & 040) {
248  erev = nrn_nernst(conci, conco, charge, celsius);
249  }
250  }
251 }
252 
253 void nrn_alloc_ion(double* p, Datum* ppvar, int _type) {
254  assert(0);
255 }
256 
257 void second_order_cur(NrnThread* _nt, int secondorder) {
258  int _cntml_padded;
259  double* pd;
260  (void) _nt; /* unused */
261  double* _vec_rhs = _nt->_actual_rhs;
262 
263  if (secondorder == 2) {
264  for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next)
265  if (nrn_is_ion(tml->index)) {
266  Memb_list* ml = tml->ml;
267  int _cntml_actual = ml->nodecount;
268  int* ni = ml->nodeindices;
269  _cntml_padded = ml->_nodecount_padded;
270  pd = ml->data;
271  nrn_pragma_acc(parallel loop present(pd [0:_cntml_padded * 5],
272  ni [0:_cntml_actual],
273  _vec_rhs [0:_nt->end]) if (_nt->compute_gpu)
274  async(_nt->stream_id))
275  nrn_pragma_omp(target teams distribute parallel for simd if(_nt->compute_gpu))
276  for (int _iml = 0; _iml < _cntml_actual; ++_iml) {
277  cur += dcurdv * (_vec_rhs[ni[_iml]]);
278  }
279  }
280  }
281 }
282 } // namespace coreneuron
283 #endif
#define global_conco(type)
#define VAL_SENTINAL
#define global_charge(type)
#define nparm
#define global_conci(type)
#define i
Definition: md1redef.h:19
nrn_pragma_acc(routine seq) nrn_pragma_omp(declare target) philox4x32_ctr_t coreneuron_random123_philox4x32_helper(coreneuron nrn_pragma_omp(end declare target) namespace coreneuron
Provide a helper function in global namespace that is declared target for OpenMP offloading to functi...
Definition: nrnran123.h:66
#define SOA_LAYOUT
Definition: data_layout.hpp:11
char buf[512]
Definition: init.cpp:13
#define assert(ex)
Definition: hocassrt.h:24
#define NB_MECH_VAR_CATEGORIES
#define DEF_nai
#define DEF_nao
#define DEF_ioni
#define DEF_iono
#define DEF_ko
#define DEF_cao
#define DEF_cai
#define DEF_ki
const char * name
Definition: init.cpp:16
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
void nrn_exit(int err)
Definition: nrnoc_aux.cpp:30
bool _nrn_skip_initmodel
Definition: finitialize.cpp:19
void nrn_init_ion(NrnThread *, Memb_list *, int)
void _nrn_layout_reg(int, int)
constexpr double nrn_nernst(double ci, double co, double z, double celsius)
Definition: membfunc.hpp:129
void nrn_writes_conc(int, int)
double ** nrn_ion_global_map
void ion_reg(const char *, double)
void second_order_cur(NrnThread *_nt, int secondorder)
const int ion_global_map_member_size
double celsius
void hoc_register_dparam_semantics(int type, int, const char *name)
void nrn_alloc_ion(double *data, Datum *pdata, int type)
void * erealloc(void *ptr, size_t size)
Definition: nrnoc_aux.cpp:94
static void * emalloc(size_t size)
Definition: mpispike.cpp:30
int nrn_get_mechtype(const char *name)
Get mechanism type by the mechanism name.
Definition: mk_mech.cpp:145
nrn_pragma_acc(routine seq) int vector_capacity(void *v)
Definition: ivocvect.cpp:30
void hoc_register_prop_size(int, int, int)
int register_mech(const char **m, mod_alloc_t alloc, mod_f_t cur, mod_f_t jacob, mod_f_t stat, mod_f_t initialize, mod_f_t private_constructor, mod_f_t private_destructor, int nrnpointerindex, int vectorized)
constexpr double ktf(double celsius)
Definition: membfunc.hpp:124
int nrn_is_ion(int)
int nrn_ion_global_map_size
void nrn_cur_ion(NrnThread *_nt, Memb_list *ml, int type)
if(ncell==0)
Definition: cellorder.cpp:785
int iontype(char *s1, char *s2)
Definition: nocpout.cpp:2054
size_t p
short type
Definition: cabvars.h:10
double nrn_ion_charge(Symbol *sym)
Definition: eion.cpp:60
static int na_ion
Definition: eion.cpp:48
static int k_ion
Definition: eion.cpp:48
#define conci0
Definition: eion.cpp:385
static int ca_ion
Definition: eion.cpp:48
double nrn_nernst_coef(int type)
Definition: eion.cpp:388
#define charge
Definition: eion.cpp:384
#define conco0
Definition: eion.cpp:386
A view into a set of mechanism instances.
Definition: nrnoc_ml.h:34
int nodecount
Definition: nrnoc_ml.h:78
int * nodeindices
Definition: nrnoc_ml.h:74
Datum ** pdata
Definition: nrnoc_ml.h:75
std::vector< double * > data()
Get a vector of double* representing the model data.
Definition: memblist.cpp:64
Represent main neuron object computed by single thread.
Definition: multicore.h:58
NrnThreadMembList * tml
Definition: multicore.h:62
int end
Definition: multicore.h:65
NrnThreadMembList * next
Definition: multicore.hpp:33
Non-template stable handle to a generic value.