NEURON
grids.cpp
Go to the documentation of this file.
1 /******************************************************************
2 Author: Austin Lachance
3 Date: 10/28/16
4 Description: Defines the functions for implementing and manipulating
5 a linked list of Grid_nodes
6 ******************************************************************/
7 #include <stdio.h>
8 #include <assert.h>
9 #include <vector>
10 #include "nrnpython.h"
11 #include "grids.h"
12 #include "rxd.h"
13 
14 extern int NUM_THREADS;
15 double* dt_ptr;
16 double* t_ptr;
18 
19 /* globals used by ECS do_currents */
20 extern TaskQueue* AllTasks;
21 /*Current from multicompartment reations*/
22 extern int _memb_curr_total;
23 extern int* _rxd_induced_currents_grid;
25 extern double* _rxd_induced_currents_ecs;
26 extern double* _rxd_induced_currents_scale;
27 
28 // Set dt, t pointers
29 extern "C" NRN_EXPORT void make_time_ptr(PyHocObject* my_dt_ptr, PyHocObject* my_t_ptr) {
30  dt_ptr = static_cast<double*>(my_dt_ptr->u.px_);
31  t_ptr = static_cast<double*>(my_t_ptr->u.px_);
32 }
33 
34 static double get_alpha_scalar(double* alpha, int) {
35  return alpha[0];
36 }
37 static double get_alpha_array(double* alpha, int idx) {
38  return alpha[idx];
39 }
40 
41 
42 static double get_permeability_scalar(double*, int) {
43  return 1.; /*already rescale the diffusion coefficients*/
44 }
45 static double get_permeability_array(double* permeability, int idx) {
46  return permeability[idx];
47 }
48 
49 // Make a new Grid_node given required Grid_node parameters
52  int my_num_states_x,
53  int my_num_states_y,
54  int my_num_states_z,
55  double my_dc_x,
56  double my_dc_y,
57  double my_dc_z,
58  double my_dx,
59  double my_dy,
60  double my_dz,
61  PyHocObject* my_alpha,
62  PyHocObject* my_permeability,
63  int bc_type,
64  double bc_value,
65  double atolscale) {
66  int k;
67  states = static_cast<double*>(my_states->u.px_);
68 
69  /*TODO: When there are multiple grids share the largest intermediate arrays to save memory*/
70  /*intermediate states for DG-ADI*/
71  states_x = (double*) malloc(sizeof(double) * my_num_states_x * my_num_states_y *
72  my_num_states_z);
73  states_y = (double*) malloc(sizeof(double) * my_num_states_x * my_num_states_y *
74  my_num_states_z);
75  states_cur = (double*) malloc(sizeof(double) * my_num_states_x * my_num_states_y *
76  my_num_states_z);
77 
78  size_x = my_num_states_x;
79  size_y = my_num_states_y;
80  size_z = my_num_states_z;
81 
82  dc_x = my_dc_x;
83  dc_y = my_dc_y;
84  dc_z = my_dc_z;
85  diffusable = (dc_x > 0) || (dc_y > 0) || (dc_z > 0);
86 
87  dx = my_dx;
88  dy = my_dy;
89  dz = my_dz;
90 
94  num_currents = 0;
95 
96  next = NULL;
98 
99  /*Check to see if variable tortuosity/volume fraction is used*/
100  if (PyFloat_Check(my_permeability)) {
101  /*note permeability is the tortuosity squared*/
102  permeability = (double*) malloc(sizeof(double));
103  permeability[0] = PyFloat_AsDouble((PyObject*) my_permeability);
105 
106  /*apply the tortuosity*/
107  dc_x = my_dc_x * permeability[0];
108  dc_y = my_dc_y * permeability[0];
109  dc_z = my_dc_z * permeability[0];
110  } else {
111  permeability = static_cast<double*>(my_permeability->u.px_);
114  }
115 
116  if (PyFloat_Check(my_alpha)) {
117  alpha = (double*) malloc(sizeof(double));
118  alpha[0] = PyFloat_AsDouble((PyObject*) my_alpha);
120 
121  } else {
122  alpha = static_cast<double*>(my_alpha->u.px_);
125  }
126 #if NRNMPI
127  if (nrnmpi_use) {
128  proc_offsets = (int*) calloc(nrnmpi_numprocs, sizeof(int));
129  proc_num_currents = (int*) calloc(nrnmpi_numprocs, sizeof(int));
130  proc_flux_offsets = (int*) calloc(nrnmpi_numprocs, sizeof(int));
131  proc_num_fluxes = (int*) calloc(nrnmpi_numprocs, sizeof(int));
132  proc_num_reactions = (int*) calloc(nrnmpi_numprocs, sizeof(int));
133  proc_num_reaction_states = (int*) calloc(nrnmpi_numprocs, sizeof(int*));
134  proc_induced_current_count = (int*) calloc(nrnmpi_numprocs, sizeof(int*));
135  proc_induced_current_offset = (int*) calloc(nrnmpi_numprocs, sizeof(int*));
136  }
137 #endif
141  react_offsets = (int*) calloc(1, sizeof(int));
144  react_offset_count = 1;
145  num_all_currents = 0;
146  current_dest = NULL;
147  all_currents = NULL;
153 
154  bc = (BoundaryConditions*) malloc(sizeof(BoundaryConditions));
155  bc->type = bc_type;
156  bc->value = bc_value;
157 
158  ecs_tasks = NULL;
159  ecs_tasks = (ECSAdiGridData*) malloc(NUM_THREADS * sizeof(ECSAdiGridData));
160  for (k = 0; k < NUM_THREADS; k++) {
161  ecs_tasks[k].scratchpad = (double*) malloc(
162  sizeof(double) * MAX(my_num_states_x, MAX(my_num_states_y, my_num_states_z)));
163  ecs_tasks[k].g = this;
164  }
165 
166 
167  ecs_adi_dir_x = (ECSAdiDirection*) malloc(sizeof(ECSAdiDirection));
170  ecs_adi_dir_x->line_size = my_num_states_x;
171 
172 
173  ecs_adi_dir_y = (ECSAdiDirection*) malloc(sizeof(ECSAdiDirection));
176  ecs_adi_dir_y->line_size = my_num_states_y;
177 
178 
179  ecs_adi_dir_z = (ECSAdiDirection*) malloc(sizeof(ECSAdiDirection));
182  ecs_adi_dir_z->line_size = my_num_states_z;
183 
184  this->atolscale = atolscale;
185 
186  node_flux_count = 0;
190  hybrid = false;
191  volume_setup();
192 }
193 
194 
195 // Insert a Grid_node "new_Grid" into the list located at grid_list_index in Parallel_grids
196 /* returns the grid number
197  TODO: change this to returning the pointer */
198 extern "C" NRN_EXPORT int ECS_insert(int grid_list_index,
199  PyHocObject* my_states,
200  int my_num_states_x,
201  int my_num_states_y,
202  int my_num_states_z,
203  double my_dc_x,
204  double my_dc_y,
205  double my_dc_z,
206  double my_dx,
207  double my_dy,
208  double my_dz,
209  PyHocObject* my_alpha,
210  PyHocObject* my_permeability,
211  int bc,
212  double bc_value,
213  double atolscale) {
214  ECS_Grid_node* new_Grid = new ECS_Grid_node(my_states,
215  my_num_states_x,
216  my_num_states_y,
217  my_num_states_z,
218  my_dc_x,
219  my_dc_y,
220  my_dc_z,
221  my_dx,
222  my_dy,
223  my_dz,
224  my_alpha,
225  my_permeability,
226  bc,
227  bc_value,
228  atolscale);
229 
230  return new_Grid->insert(grid_list_index);
231 }
232 
235  long num_nodes,
236  long* neighbors,
237  long* x_line_defs,
238  long x_lines_length,
239  long* y_line_defs,
240  long y_lines_length,
241  long* z_line_defs,
242  long z_lines_length,
243  double* dc,
244  double* dcgrid,
245  double dx,
246  bool is_diffusable,
247  double atolscale,
248  double* ics_alphas) {
249  int k;
250  _num_nodes = num_nodes;
251  diffusable = is_diffusable;
252  this->atolscale = atolscale;
253 
254  states = static_cast<double*>(my_states->u.px_);
255  states_x = (double*) malloc(sizeof(double) * _num_nodes);
256  states_y = (double*) malloc(sizeof(double) * _num_nodes);
257  states_z = (double*) malloc(sizeof(double) * _num_nodes);
258  states_cur = (double*) malloc(sizeof(double) * _num_nodes);
259  next = NULL;
260 
261  size_x = _num_nodes;
262  size_y = 1;
263  size_z = 1;
264 
265 
267  num_concentrations = 0;
268  current_list = NULL;
269  num_currents = 0;
270  node_flux_count = 0;
271 
276 
277 #if NRNMPI
278  if (nrnmpi_use) {
279  proc_offsets = (int*) malloc(nrnmpi_numprocs * sizeof(int));
280  proc_num_currents = (int*) calloc(nrnmpi_numprocs, sizeof(int));
281  proc_num_fluxes = (int*) calloc(nrnmpi_numprocs, sizeof(int));
282  proc_flux_offsets = (int*) malloc(nrnmpi_numprocs * sizeof(int));
283  }
284 #endif
285  num_all_currents = 0;
286  current_dest = NULL;
287  all_currents = NULL;
288 
289  _ics_alphas = ics_alphas;
291 
292  // stores the positive x,y, and z neighbors for each node. [node0_x, node0_y, node0_z, node1_x
293  // ...]
294  _neighbors = neighbors;
295 
296  /*Line definitions from Python. In pattern of [line_start_node, line_length, ...]
297  Array is sorted from longest to shortest line */
298  _sorted_x_lines = x_line_defs;
299  _sorted_y_lines = y_line_defs;
300  _sorted_z_lines = z_line_defs;
301 
302  // Lengths of _sorted_lines arrays. Used to find thread start and stop indices
303  _x_lines_length = x_lines_length;
304  _y_lines_length = y_lines_length;
305  _z_lines_length = z_lines_length;
306 
307  // Find the maximum line length for scratchpad memory allocation
308  long x_max = x_line_defs[1];
309  long y_max = y_line_defs[1];
310  long z_max = z_line_defs[1];
311  long xy_max = (x_max > y_max) ? x_max : y_max;
312  _line_length_max = (z_max > xy_max) ? z_max : xy_max;
313 
314  ics_tasks = (ICSAdiGridData*) malloc(NUM_THREADS * sizeof(ICSAdiGridData));
315 
316  for (k = 0; k < NUM_THREADS; k++) {
317  ics_tasks[k].RHS = (double*) malloc(sizeof(double) * (_line_length_max));
318  ics_tasks[k].scratchpad = (double*) malloc(sizeof(double) * (_line_length_max - 1));
319  ics_tasks[k].g = this;
320  ics_tasks[k].u_diag = (double*) malloc(sizeof(double) * _line_length_max - 1);
321  ics_tasks[k].diag = (double*) malloc(sizeof(double) * _line_length_max);
322  ics_tasks[k].l_diag = (double*) malloc(sizeof(double) * _line_length_max - 1);
323  }
324 
325  hybrid = false;
326  hybrid_data = (Hybrid_data*) malloc(sizeof(Hybrid_data));
327 
328  ics_adi_dir_x = (ICSAdiDirection*) malloc(sizeof(ICSAdiDirection));
331  ics_adi_dir_x->ordered_start_stop_indices = (long*) malloc(sizeof(long) * NUM_THREADS * 2);
332  ics_adi_dir_x->line_start_stop_indices = (long*) malloc(sizeof(long) * NUM_THREADS * 2);
333  ics_adi_dir_x->ordered_nodes = (long*) malloc(sizeof(long) * _num_nodes);
334  ics_adi_dir_x->ordered_line_defs = (long*) malloc(sizeof(long) * _x_lines_length);
335  ics_adi_dir_x->deltas = (double*) malloc(sizeof(double) * _num_nodes);
336  ics_adi_dir_x->d = dx;
337 
338  ics_adi_dir_y = (ICSAdiDirection*) malloc(sizeof(ICSAdiDirection));
341  ics_adi_dir_y->ordered_start_stop_indices = (long*) malloc(sizeof(long) * NUM_THREADS * 2);
342  ics_adi_dir_y->line_start_stop_indices = (long*) malloc(sizeof(long) * NUM_THREADS * 2);
343  ics_adi_dir_y->ordered_nodes = (long*) malloc(sizeof(long) * _num_nodes);
344  ics_adi_dir_y->ordered_line_defs = (long*) malloc(sizeof(long) * _y_lines_length);
345  ics_adi_dir_y->deltas = (double*) malloc(sizeof(double) * _num_nodes);
346  ics_adi_dir_y->d = dx;
347 
348  ics_adi_dir_z = (ICSAdiDirection*) malloc(sizeof(ICSAdiDirection));
351  ics_adi_dir_z->ordered_start_stop_indices = (long*) malloc(sizeof(long) * NUM_THREADS * 2);
352  ics_adi_dir_z->line_start_stop_indices = (long*) malloc(sizeof(long) * NUM_THREADS * 2);
353  ics_adi_dir_z->ordered_nodes = (long*) malloc(sizeof(long) * _num_nodes);
354  ics_adi_dir_z->ordered_line_defs = (long*) malloc(sizeof(long) * _z_lines_length);
355  ics_adi_dir_z->deltas = (double*) malloc(sizeof(double) * _num_nodes);
356  ics_adi_dir_z->d = dx;
357 
358  if (dcgrid == NULL) {
359  ics_adi_dir_x->dc = dc[0];
360  ics_adi_dir_y->dc = dc[1];
361  ics_adi_dir_z->dc = dc[2];
365  } else {
366  ics_adi_dir_x->dcgrid = dcgrid;
367  ics_adi_dir_y->dcgrid = &dcgrid[_num_nodes];
368  ics_adi_dir_z->dcgrid = &dcgrid[2 * _num_nodes];
369  }
370  volume_setup();
374 
375  node_flux_count = 0;
379 }
380 
381 
382 // Insert a Grid_node "new_Grid" into the list located at grid_list_index in Parallel_grids
383 /* returns the grid number
384  TODO: change this to returning the pointer */
385 extern "C" NRN_EXPORT int ICS_insert(int grid_list_index,
386  PyHocObject* my_states,
387  long num_nodes,
388  long* neighbors,
389  long* x_line_defs,
390  long x_lines_length,
391  long* y_line_defs,
392  long y_lines_length,
393  long* z_line_defs,
394  long z_lines_length,
395  double* dcs,
396  double dx,
397  bool is_diffusable,
398  double atolscale,
399  double* ics_alphas) {
400  ICS_Grid_node* new_Grid = new ICS_Grid_node(my_states,
401  num_nodes,
402  neighbors,
403  x_line_defs,
404  x_lines_length,
405  y_line_defs,
406  y_lines_length,
407  z_line_defs,
408  z_lines_length,
409  dcs,
410  NULL,
411  dx,
412  is_diffusable,
413  atolscale,
414  ics_alphas);
415 
416  return new_Grid->insert(grid_list_index);
417 }
418 
419 extern "C" NRN_EXPORT int ICS_insert_inhom(int grid_list_index,
420  PyHocObject* my_states,
421  long num_nodes,
422  long* neighbors,
423  long* x_line_defs,
424  long x_lines_length,
425  long* y_line_defs,
426  long y_lines_length,
427  long* z_line_defs,
428  long z_lines_length,
429  double* dcs,
430  double dx,
431  bool is_diffusable,
432  double atolscale,
433  double* ics_alphas) {
434  ICS_Grid_node* new_Grid = new ICS_Grid_node(my_states,
435  num_nodes,
436  neighbors,
437  x_line_defs,
438  x_lines_length,
439  y_line_defs,
440  y_lines_length,
441  z_line_defs,
442  z_lines_length,
443  NULL,
444  dcs,
445  dx,
446  is_diffusable,
447  atolscale,
448  ics_alphas);
449  return new_Grid->insert(grid_list_index);
450 }
451 
452 
453 extern "C" NRN_EXPORT int set_diffusion(int grid_list_index, int grid_id, double* dc, int length) {
454  int id = 0;
455  Grid_node* node = Parallel_grids[grid_list_index];
456  while (id < grid_id) {
457  node = node->next;
458  id++;
459  if (node == NULL)
460  return -1;
461  }
462  node->set_diffusion(dc, length);
463  return 0;
464 }
465 
466 extern "C" NRN_EXPORT int set_tortuosity(int grid_list_index,
467  int grid_id,
468  PyHocObject* my_permeability) {
469  int id = 0;
470  Grid_node* node = Parallel_grids[grid_list_index];
471  while (id < grid_id) {
472  node = node->next;
473  id++;
474  if (node == NULL)
475  return -1;
476  }
477  static_cast<ECS_Grid_node*>(node)->set_tortuosity(my_permeability);
478  return 0;
479 }
480 
481 
483  /*Check to see if variable tortuosity/volume fraction is used*/
484  /*note permeability is the 1/tortuosity^2*/
485  if (PyFloat_Check(my_permeability)) {
487  double new_permeability = PyFloat_AsDouble((PyObject*) my_permeability);
489  dc_x *= new_permeability / permeability[0];
490  dc_y *= new_permeability / permeability[0];
491  dc_z *= new_permeability / permeability[0];
492  permeability[0] = new_permeability;
493  } else {
494  permeability = (double*) malloc(sizeof(double));
495  permeability[0] = PyFloat_AsDouble((PyObject*) my_permeability);
496  /* don't free the old permeability -- let python do it */
497  dc_x *= permeability[0];
498  dc_y *= permeability[0];
499  dc_z *= permeability[0];
502  }
503  } else {
505  /* rescale to remove old permeability*/
506  dc_x /= permeability[0];
507  dc_y /= permeability[0];
508  dc_z /= permeability[0];
509  free(permeability);
510  permeability = static_cast<double*>(my_permeability->u.px_);
513  } else {
514  permeability = static_cast<double*>(my_permeability->u.px_);
515  }
516  }
517 }
518 
519 extern "C" NRN_EXPORT int set_volume_fraction(int grid_list_index,
520  int grid_id,
521  PyHocObject* my_alpha) {
522  int id = 0;
523  Grid_node* node = Parallel_grids[grid_list_index];
524  while (id < grid_id) {
525  node = node->next;
526  id++;
527  if (node == NULL)
528  return -1;
529  }
530  static_cast<ECS_Grid_node*>(node)->set_volume_fraction(my_alpha);
531  return 0;
532 }
533 
535  if (PyFloat_Check(my_alpha)) {
536  if (get_alpha == &get_alpha_scalar) {
537  alpha[0] = PyFloat_AsDouble((PyObject*) my_alpha);
538  } else {
539  alpha = (double*) malloc(sizeof(double));
540  alpha[0] = PyFloat_AsDouble((PyObject*) my_alpha);
543  : FALSE;
544  }
545  } else {
546  if (get_alpha == &get_alpha_scalar)
547  free(alpha);
548  alpha = static_cast<double*>(my_alpha->u.px_);
551  }
552 }
553 
554 
555 /*Set the diffusion coefficients*/
556 void ICS_Grid_node::set_diffusion(double* dc, int length) {
557  if (length == 1) {
558  ics_adi_dir_x->dc = dc[0];
559  ics_adi_dir_y->dc = dc[1];
560  ics_adi_dir_z->dc = dc[2];
561  // NOTE: dcgrid is owned by _IntracellularSpecies in python
562  if (ics_adi_dir_x->dcgrid != NULL) {
566  }
567  } else {
568  assert(length == _num_nodes);
569  ics_adi_dir_x->dcgrid = dc;
571  ics_adi_dir_z->dcgrid = &dc[2 * _num_nodes];
572  }
573  volume_setup();
574 }
575 
576 
577 /*Set the diffusion coefficients*/
578 void ECS_Grid_node::set_diffusion(double* dc, int) {
580  /* note permeability[0] is the tortuosity squared*/
581  dc_x = dc[0] * permeability[0];
582  dc_y = dc[1] * permeability[0];
583  dc_z = dc[2] * permeability[0];
584  } else {
585  dc_x = dc[0];
586  dc_y = dc[1];
587  dc_z = dc[2];
588  }
589  diffusable = (dc_x > 0) || (dc_y > 0) || (dc_z > 0);
590 }
591 
592 
593 extern "C" NRN_EXPORT void ics_set_grid_concentrations(int grid_list_index,
594  int index_in_list,
595  int64_t* nodes_per_seg,
596  int64_t* nodes_per_seg_start_indices,
597  PyObject* neuron_pointers) {
598  Grid_node* g;
599  ssize_t i;
600  ssize_t n = (ssize_t) PyList_Size(neuron_pointers); // number of segments.
601 
602  /* Find the Grid Object */
603  g = Parallel_grids[grid_list_index];
604  for (i = 0; i < index_in_list; i++) {
605  g = g->next;
606  }
607 
608  g->ics_surface_nodes_per_seg = nodes_per_seg;
609 
610  g->ics_surface_nodes_per_seg_start_indices = nodes_per_seg_start_indices;
611  g->ics_concentration_seg_handles.reserve(n);
612  for (i = 0; i < n; i++) {
613  g->ics_concentration_seg_handles.push_back(
614  reinterpret_cast<PyHocObject*>(PyList_GET_ITEM(neuron_pointers, i))->u.px_);
615  }
616 }
617 
618 extern "C" NRN_EXPORT void ics_set_grid_currents(int grid_list_index,
619  int index_in_list,
620  PyObject* neuron_pointers,
621  double* scale_factors) {
622  Grid_node* g;
623  ssize_t i;
624  ssize_t n = (ssize_t) PyList_Size(neuron_pointers);
625  /* Find the Grid Object */
626  g = Parallel_grids[grid_list_index];
627  for (i = 0; i < index_in_list; i++) {
628  g = g->next;
629  }
630  g->ics_scale_factors = scale_factors;
631 
632  g->ics_current_seg_ptrs = (double**) malloc(n * sizeof(double*));
633 
634  for (i = 0; i < n; i++) {
635  g->ics_current_seg_ptrs[i] = static_cast<double*>(
636  ((PyHocObject*) PyList_GET_ITEM(neuron_pointers, i))->u.px_);
637  }
638 }
639 
640 
641 /* TODO: make this work with Grid_node ptrs instead of pairs of list indices */
642 extern "C" NRN_EXPORT void set_grid_concentrations(int grid_list_index,
643  int index_in_list,
644  PyObject* grid_indices,
645  PyObject* neuron_pointers) {
646  /*
647  Preconditions:
648 
649  Assumes the specified grid has been created.
650  Assumes len(grid_indices) = len(neuron_pointers) and that both are proper python lists
651  */
652  /* TODO: note that these will need updating anytime the structure of the model changes... look
653  * at the structure change count at each advance and trigger a callback to regenerate if
654  * necessary */
655  Grid_node* g;
656  ssize_t i;
657  ssize_t n = (ssize_t) PyList_Size(grid_indices);
658 
659  /* Find the Grid Object */
660  g = Parallel_grids[grid_list_index];
661  for (i = 0; i < index_in_list; i++) {
662  g = g->next;
663  }
664 
665  /* free the old concentration list */
666  delete[] g->concentration_list;
667 
668  /* allocate space for the new list */
670  g->num_concentrations = n;
671 
672  /* populate the list */
673  /* TODO: it would be more general to iterate instead of assuming lists and using list lookups */
674  for (i = 0; i < n; i++) {
675  /* printf("set_grid_concentrations %ld\n", i); */
676  g->concentration_list[i].source = PyInt_AS_LONG(PyList_GET_ITEM(grid_indices, i));
678  reinterpret_cast<PyHocObject*>(PyList_GET_ITEM(neuron_pointers, i))->u.px_;
679  }
680 }
681 
682 /* TODO: make this work with Grid_node ptrs instead of pairs of list indices */
683 extern "C" NRN_EXPORT void set_grid_currents(int grid_list_index,
684  int index_in_list,
685  PyObject* grid_indices,
686  PyObject* neuron_pointers,
687  PyObject* scale_factors) {
688  /*
689  Preconditions:
690 
691  Assumes the specified grid has been created.
692  Assumes len(grid_indices) = len(neuron_pointers) = len(scale_factors) and that all are proper
693  python lists
694  */
695  /* TODO: note that these will need updating anytime the structure of the model changes... look
696  * at the structure change count at each advance and trigger a callback to regenerate if
697  * necessary */
698  Grid_node* g;
699  ssize_t i;
700  ssize_t n = (ssize_t) PyList_Size(grid_indices);
701  long* dests;
702 
703  /* Find the Grid Object */
704  g = Parallel_grids[grid_list_index];
705  for (i = 0; i < index_in_list; i++) {
706  g = g->next;
707  }
708 
709  /* free the old current list */
710  delete[] g->current_list;
711 
712  /* allocate space for the new list */
713  g->current_list = new Current_Triple[n];
714  g->num_currents = n;
715 
716  /* populate the list */
717  /* TODO: it would be more general to iterate instead of assuming lists and using list lookups */
718  for (i = 0; i < n; i++) {
719  g->current_list[i].destination = PyInt_AS_LONG(PyList_GET_ITEM(grid_indices, i));
720  g->current_list[i].scale_factor = PyFloat_AS_DOUBLE(PyList_GET_ITEM(scale_factors, i));
721  g->current_list[i].source =
722  reinterpret_cast<PyHocObject*>(PyList_GET_ITEM(neuron_pointers, i))->u.px_;
723  /* printf("set_grid_currents %ld out of %ld, %ld, %ld\n", i, n,
724  * PyList_Size(neuron_pointers), PyList_Size(scale_factors)); */
725  } /*
726  if (PyErr_Occurred()) {
727  printf("uh oh\n");
728  PyErr_PrintEx(0);
729  } else {
730  printf("no problems\n");
731  }
732  PyErr_Clear(); */
733 
734 #if NRNMPI
735  if (nrnmpi_use) {
736  /*Gather an array of the number of currents for each process*/
738  nrnmpi_int_allgather_inplace(g->proc_num_currents, 1);
739 
740  g->proc_offsets[0] = 0;
741  for (i = 1; i < nrnmpi_numprocs; i++)
742  g->proc_offsets[i] = g->proc_offsets[i - 1] + g->proc_num_currents[i - 1];
743  g->num_all_currents = g->proc_offsets[i - 1] + g->proc_num_currents[i - 1];
744 
745  /*Copy array of current destinations (index to states) across all processes*/
746  free(g->current_dest);
747  free(g->all_currents);
748  g->current_dest = (long*) malloc(g->num_all_currents * sizeof(long));
749  g->all_currents = (double*) malloc(g->num_all_currents * sizeof(double));
750  dests = g->current_dest + g->proc_offsets[nrnmpi_myid];
751  /*TODO: Get rid of duplication with current_list*/
752  for (i = 0; i < n; i++) {
753  dests[i] = g->current_list[i].destination;
754  }
755  nrnmpi_long_allgatherv_inplace(g->current_dest, g->proc_num_currents, g->proc_offsets);
756  } else {
757  free(g->all_currents);
758  g->all_currents = (double*) malloc(sizeof(double) * g->num_currents);
760  }
761 #else
762  free(g->all_currents);
763  g->all_currents = (double*) malloc(sizeof(double) * g->num_currents);
765 #endif
766 }
767 
768 // Delete a specific Grid_node from the list
770  if (*head == find) {
771  Grid_node* temp = *head;
772  *head = (*head)->next;
773  delete temp;
774  return 1;
775  }
776  Grid_node* temp = *head;
777  while (temp->next != find) {
778  temp = temp->next;
779  }
780  if (!temp)
781  return 0;
782  Grid_node* delete_me = temp->next;
783  temp->next = delete_me->next;
784  delete delete_me;
785  return 1;
786 }
787 
788 extern "C" NRN_EXPORT void delete_by_id(int id) {
789  Grid_node* grid;
790  int k;
791  for (k = 0, grid = Parallel_grids[0]; grid != NULL; grid = grid->next, k++) {
792  if (k == id) {
793  remove(Parallel_grids, grid);
794  break;
795  }
796  }
797 }
798 
799 
800 // Destroy the list located at list_index and free all memory
801 void empty_list(int list_index) {
802  Grid_node** head = &(Parallel_grids[list_index]);
803  while (*head != NULL) {
804  Grid_node* old_head = *head;
805  *head = (*head)->next;
806  delete old_head;
807  }
808 }
809 
810 int Grid_node::insert(int grid_list_index) {
811  int i = 0;
812  Grid_node** head = &(Parallel_grids[grid_list_index]);
813 
814  if (!(*head)) {
815  *head = this;
816  } else {
817  i++; /*count the head as the first grid*/
818  Grid_node* end = *head;
819  while (end->next != NULL) {
820  i++;
821  end = end->next;
822  }
823  end->next = this;
824  }
825  return i;
826 }
827 
828 /*****************************************************************************
829  *
830  * Begin ECS_Grid_node Functions
831  *
832  *****************************************************************************/
833 
835  int i;
836  if (ecs_tasks != NULL) {
837  for (i = 0; i < NUM_THREADS; i++) {
838  free(ecs_tasks[i].scratchpad);
839  }
840  }
841  free(ecs_tasks);
842  ecs_tasks = (ECSAdiGridData*) malloc(n * sizeof(ECSAdiGridData));
843  for (i = 0; i < n; i++) {
844  ecs_tasks[i].scratchpad = (double*) malloc(sizeof(double) *
845  MAX(size_x, MAX(size_y, size_z)));
846  ecs_tasks[i].g = this;
847  }
848 }
849 
850 static void* gather_currents(void* dataptr) {
851  CurrentData* d = (CurrentData*) dataptr;
852  Grid_node* grid = d->g;
853  double* val = d->val;
854  int i, start = d->onset, stop = d->offset;
855  Current_Triple* c = grid->current_list;
856  if (grid->VARIABLE_ECS_VOLUME == VOLUME_FRACTION) {
857  for (i = start; i < stop; i++)
858  val[i] = c[i].scale_factor * (*c[i].source) / grid->alpha[c[i].destination];
859  } else if (grid->VARIABLE_ECS_VOLUME == ICS_ALPHA) {
860  // TODO: Check if this is used.
861  for (i = start; i < stop; i++)
862  val[i] = c[i].scale_factor * (*c[i].source) /
863  ((ICS_Grid_node*) grid)->_ics_alphas[c[i].destination];
864  } else {
865  for (i = start; i < stop; i++)
866  val[i] = c[i].scale_factor * (*c[i].source) / grid->alpha[0];
867  }
868  return NULL;
869 }
870 
871 /*
872  * do_current - process the current for a given grid
873  * Grid_node* grid - the grid used
874  * double* output - for fixed step this is the states for variable ydot
875  * double dt - for fixed step the step size, for variable step 1
876  */
877 void ECS_Grid_node::do_grid_currents(double* output, double dt, int grid_id) {
878  ssize_t m, n, i;
879  /*Currents to broadcast via MPI*/
880  /*TODO: Handle multiple grids with one pass*/
881  /*Maybe TODO: Should check #currents << #voxels and not the other way round*/
882  double* val;
883  // memset(output, 0, sizeof(double)*grid->size_x*grid->size_y*grid->size_z);
884  /* currents, via explicit Euler */
886  m = num_currents;
887  CurrentData* tasks = (CurrentData*) malloc(NUM_THREADS * sizeof(CurrentData));
888 #if NRNMPI
890 #else
891  val = all_currents;
892 #endif
893  int tasks_per_thread = (m + NUM_THREADS - 1) / NUM_THREADS;
894 
895  for (i = 0; i < NUM_THREADS; i++) {
896  tasks[i].g = this;
897  tasks[i].onset = i * tasks_per_thread;
898  tasks[i].offset = MIN((i + 1) * tasks_per_thread, m);
899  tasks[i].val = val;
900  }
901  for (i = 0; i < NUM_THREADS - 1; i++) {
903  }
904  /* run one task in the main thread */
905  gather_currents(&tasks[NUM_THREADS - 1]);
906 
907  /* wait for them to finish */
909  free(tasks);
910 #if NRNMPI
911  if (nrnmpi_use) {
912  nrnmpi_dbl_allgatherv_inplace(all_currents, proc_num_currents, proc_offsets);
913  nrnmpi_dbl_allgatherv_inplace(induced_currents,
916  for (i = 0; i < n; i++)
917  output[current_dest[i]] += dt * all_currents[i];
918  } else {
919  for (i = 0; i < n; i++) {
920  output[current_list[i].destination] += dt * all_currents[i];
921  }
922  }
923 
924 #else
925  for (i = 0; i < n; i++)
926  output[current_list[i].destination] += dt * all_currents[i];
927 #endif
928 
929  /*Remove the contribution from membrane currents*/
930  for (i = 0; i < induced_current_count; i++)
932  memset(induced_currents, 0, induced_current_count * sizeof(double));
933 }
934 
935 double* ECS_Grid_node::set_rxd_currents(int current_count,
936  int* current_indices,
937  PyHocObject** ptrs) {
938  int i, j;
939  double volume_fraction;
940 
943  induced_currents_scale = (double*) calloc(current_count, sizeof(double));
945  induced_current_count = current_count;
946  induced_currents_index = current_indices;
947  for (i = 0; i < current_count; i++) {
948  for (j = 0; j < num_all_currents; j++) {
949  if (ptrs[i]->u.px_ == current_list[j].source) {
950  volume_fraction = (VARIABLE_ECS_VOLUME == VOLUME_FRACTION
952  : alpha[0]);
953  induced_currents_scale[i] = current_list[j].scale_factor / volume_fraction;
954  assert(current_list[j].destination == current_indices[i]);
955  break;
956  }
957  }
958  }
959  return induced_currents_scale;
960 }
961 
962 void ECS_Grid_node::apply_node_flux3D(double dt, double* ydot) {
963  double* dest;
964  if (ydot == NULL)
965  dest = states_cur;
966  else
967  dest = ydot;
968 #if NRNMPI
969  double* sources;
970  int i;
971  int offset;
972 
973  if (nrnmpi_use) {
974  sources = (double*) calloc(node_flux_count, sizeof(double));
975  offset = proc_flux_offsets[nrnmpi_myid];
977  NULL,
978  &node_flux_scale[offset],
980  dt,
981  &sources[offset]);
982 
983  nrnmpi_dbl_allgatherv_inplace(sources, proc_num_fluxes, proc_flux_offsets);
984 
985  for (i = 0; i < node_flux_count; i++)
986  dest[node_flux_idx[i]] += sources[i];
987  free(sources);
988  } else {
990  }
991 #else
993 #endif
994 }
995 
996 
998  switch (VARIABLE_ECS_VOLUME) {
999  case VOLUME_FRACTION:
1000  ecs_set_adi_vol(this);
1001  break;
1002  case TORTUOSITY:
1003  ecs_set_adi_tort(this);
1004  break;
1005  default:
1007  }
1008 }
1009 
1011  unsigned long i;
1012  if (diffusable) {
1013  /* first step: advance the x direction */
1015 
1016  /* second step: advance the y direction */
1018 
1019  /* third step: advance the z direction */
1021 
1022  /* transfer data */
1023  /*TODO: Avoid copy by switching pointers and updating Python copy
1024  tmp = g->states;
1025  g->states = g->adi_dir_z->states_out;
1026  g->adi_dir_z->states_out = tmp;
1027  */
1028  memcpy(states, ecs_adi_dir_z->states_out, sizeof(double) * size_x * size_y * size_z);
1029  } else {
1030  for (i = 0; i < size_x * size_y * size_z; i++)
1031  states[i] += states_cur[i];
1032  }
1033  // TODO: Should this return 0?
1034  return 0;
1035 }
1036 
1037 void ECS_Grid_node::variable_step_diffusion(const double* states, double* ydot) {
1038  switch (VARIABLE_ECS_VOLUME) {
1039  case VOLUME_FRACTION:
1041  break;
1042  case TORTUOSITY:
1044  break;
1045  default:
1046  _rhs_variable_step_helper(this, states, ydot);
1047  }
1048 }
1049 
1051  ssize_t i, n;
1052  Concentration_Pair* cp;
1053 
1055  cp = concentration_list;
1056  for (i = 0; i < n; i++) {
1057  (*cp[i].destination) = states[cp[i].source];
1058  }
1059 }
1060 
1062 
1064  double* const,
1065  const double*,
1066  double* const) {}
1067 
1068 int ECS_Grid_node::add_multicompartment_reaction(int nstates, int* indices, int step) {
1069  // Set the current reaction offsets and increment the local offset count
1070  // This is stored as an array/pointer so it can be updated if the grid is
1071  // on multiple MPI nodes.
1072  int i = 0, j = 0;
1073  int offset = react_offsets[react_offset_count - 1];
1074  // Increase the size of the reaction indices to accomidate the maximum
1075  // number of potential new indicies
1076  reaction_indices = (int*) realloc(reaction_indices, (offset + nstates) * sizeof(int));
1077  // TODO: Aggregate the common indicies on the local node, so the state
1078  // changes can be added together before they are broadcast.
1079  for (i = 0, j = 0; i < nstates; i++, j += step) {
1080  if (indices[j] != SPECIES_ABSENT)
1081  reaction_indices[offset++] = indices[j];
1082  }
1083  // Resize to remove ununused indices
1084  if (offset < react_offsets[react_offset_count - 1] + nstates)
1085  reaction_indices = (int*) realloc((void*) reaction_indices, offset * sizeof(int));
1086 
1087  // Store the new offset and return the start offset to the Reaction
1088  react_offsets = (int*) realloc((void*) react_offsets, (++react_offset_count) * sizeof(int));
1089  react_offsets[react_offset_count - 1] = offset;
1091  return react_offset_count - 2;
1092 }
1093 
1095  free(all_reaction_states);
1096  free(react_offsets);
1098  free(all_reaction_indices);
1099  else
1100  free(reaction_indices);
1104  react_offsets = (int*) calloc(1, sizeof(int));
1105  react_offset_count = 1;
1108 }
1109 
1110 
1112 #if NRNMPI
1113  int i, j;
1114  int start_state;
1115  int* proc_num_init;
1116  int* all_indices;
1117  double* all_scales;
1118  // Copy the reaction indices across all nodes and update the local
1119  // react_offsets used by Reaction to access the indicies.
1120  if (nrnmpi_use) {
1121  proc_num_init = (int*) calloc(nrnmpi_numprocs, sizeof(int));
1122  proc_num_init[nrnmpi_myid] = (int) multicompartment_inititalized;
1123  nrnmpi_int_allgather_inplace(proc_num_init, 1);
1124  for (i = 0; i < nrnmpi_numprocs; i++)
1125  if (!proc_num_init[i])
1126  break;
1127 
1128  if (i != nrnmpi_numprocs) {
1129  // number of offsets (Reaction) stored in each process
1130  proc_num_reactions = (int*) calloc(nrnmpi_numprocs, sizeof(int));
1132 
1133  // number of states/indices stored in each process
1134  proc_num_reaction_states = (int*) calloc(nrnmpi_numprocs, sizeof(int));
1136  nrnmpi_int_allgather_inplace(proc_num_reactions, 1);
1137  nrnmpi_int_allgather_inplace(proc_num_reaction_states, 1);
1138 
1139  for (i = 0; i < nrnmpi_numprocs; i++) {
1140  if (i == nrnmpi_myid)
1141  start_state = total_reaction_states;
1144  }
1145 
1146  // Move the offsets for each reaction so they reference the
1147  // corresponding indices in the all_reaction_indices array
1148  for (j = 0; j < react_offset_count; j++)
1149  react_offsets[j] += start_state;
1150 
1151  all_reaction_indices = (int*) malloc(total_reaction_states * sizeof(int));
1152  all_reaction_states = (double*) calloc(total_reaction_states, sizeof(double));
1153 
1154  memcpy(&all_reaction_indices[start_state],
1156  proc_num_reaction_states[nrnmpi_myid] * sizeof(int));
1157  nrnmpi_int_allgatherv_inplace(all_reaction_indices,
1160  free(reaction_indices);
1163 
1164  // Handle currents induced by multicompartment reactions.
1166  nrnmpi_int_allgather_inplace(proc_induced_current_count, 1);
1168  for (i = 1; i < nrnmpi_numprocs; i++)
1173 
1174  all_scales = (double*) malloc(induced_current_count * sizeof(double));
1175  all_indices = (int*) malloc(induced_current_count * sizeof(int));
1176  memcpy(&all_scales[proc_induced_current_offset[nrnmpi_myid]],
1178  sizeof(double) * proc_induced_current_count[nrnmpi_myid]);
1179 
1180  memcpy(&all_indices[proc_induced_current_offset[nrnmpi_myid]],
1182  sizeof(int) * proc_induced_current_count[nrnmpi_myid]);
1183 
1184  nrnmpi_dbl_allgatherv_inplace(all_scales,
1187 
1188  nrnmpi_int_allgatherv_inplace(all_indices,
1191  free(induced_currents_scale);
1192  free(induced_currents_index);
1193  free(induced_currents);
1194  induced_currents_scale = all_scales;
1195  induced_currents_index = all_indices;
1196  induced_currents = (double*) malloc(induced_current_count * sizeof(double));
1198  }
1199  } else {
1203  all_reaction_states = (double*) calloc(total_reaction_states, sizeof(double));
1205  induced_currents = (double*) malloc(induced_current_count * sizeof(double));
1207  }
1208  }
1209 #else
1213  all_reaction_states = (double*) calloc(total_reaction_states, sizeof(double));
1215  induced_currents = (double*) malloc(induced_current_count * sizeof(double));
1217  }
1218 #endif
1219 }
1220 
1222  int i;
1223 #if NRNMPI
1224  if (nrnmpi_use) {
1225  // Copy the states between all of the nodes
1226  nrnmpi_dbl_allgatherv_inplace(all_reaction_states,
1229  }
1230 #endif
1231  if (result == NULL) // fixed step
1232  {
1233  for (i = 0; i < total_reaction_states; i++)
1235  } else // variable step
1236  {
1237  for (i = 0; i < total_reaction_states; i++)
1239  }
1240  memset(all_reaction_states, 0, total_reaction_states * sizeof(int));
1241 }
1242 
1243 // TODO: Implement this
1245 
1246 // Free a single Grid_node
1248  int i;
1249  free(states_x);
1250  free(states_y);
1251  free(states_cur);
1252  delete[] concentration_list;
1253  delete[] current_list;
1254  free(bc);
1255  free(current_dest);
1256 #if NRNMPI
1257  if (nrnmpi_use) {
1258  free(proc_offsets);
1259  free(proc_num_currents);
1260  free(proc_flux_offsets);
1261  free(proc_num_fluxes);
1263  free(proc_num_reactions);
1264  }
1265 #endif
1266  free(all_currents);
1267  free(ecs_adi_dir_x);
1268  free(ecs_adi_dir_y);
1269  free(ecs_adi_dir_z);
1270  if (node_flux_count > 0) {
1271  free(node_flux_idx);
1272  free(node_flux_scale);
1273  free(node_flux_src);
1274  }
1275 
1276 
1277  if (ecs_tasks != NULL) {
1278  for (i = 0; i < NUM_THREADS; i++) {
1279  free(ecs_tasks[i].scratchpad);
1280  }
1281  }
1282  free(ecs_tasks);
1283 }
1284 
1285 /*****************************************************************************
1286  *
1287  * Begin ICS_Grid_node Functions
1288  *
1289  *****************************************************************************/
1290 static int find_min_element_index(const int thread_sizes[], const int nthreads) {
1291  int new_min_index = 0;
1292  int min_element = thread_sizes[0];
1293  for (int i = 0; i < nthreads; i++) {
1294  if (min_element > thread_sizes[i]) {
1295  min_element = thread_sizes[i];
1296  new_min_index = i;
1297  }
1298  }
1299  return new_min_index;
1300 }
1301 
1302 void ICS_Grid_node::divide_x_work(const int nthreads) {
1303  int i, j, k;
1304  // nodes in each thread. (work each thread has to do)
1305  int* nodes_per_thread = (int*) calloc(nthreads, sizeof(int));
1306  // number of lines in each thread
1307  int* lines_per_thread = (int*) calloc(nthreads, sizeof(int));
1308  // To determine which index to put the start node and line length in thread_line_defs
1309  int* thread_idx_counter = (int*) calloc(nthreads, sizeof(int));
1310  // To determine which thread array to put the start node and line length in thread_line_defs
1311  // warning: variable length arrays in C++ are a Clang extension [-Wvla-cxx-extension]
1312  // int line_thread_id[_x_lines_length / 2];
1313  std::vector<int> line_thread_id(_x_lines_length / 2);
1314  // Array of nthreads arrays that hold the line defs for each thread
1315  int** thread_line_defs = (int**) malloc(nthreads * sizeof(int*));
1316 
1317  int min_index = 0;
1318 
1319  // Find the total line length for each thread
1320  for (i = 0; i < _x_lines_length; i += 2) {
1321  min_index = find_min_element_index(nodes_per_thread, nthreads);
1322  nodes_per_thread[min_index] += _sorted_x_lines[i + 1];
1323  line_thread_id[i / 2] = min_index;
1324  lines_per_thread[min_index] += 1;
1325  }
1326 
1327  // Allocate memory for each array in thread_line_defs
1328  for (i = 0; i < nthreads; i++) {
1329  thread_line_defs[i] = (int*) malloc(lines_per_thread[i] * 2 * sizeof(int));
1330  }
1331 
1332  int thread_idx;
1333  int line_idx;
1334 
1335  // Populate each array of thread_line_defs
1336  for (i = 0; i < _x_lines_length; i += 2) {
1337  thread_idx = line_thread_id[i / 2];
1338  line_idx = thread_idx_counter[thread_idx];
1339  thread_line_defs[thread_idx][line_idx] = _sorted_x_lines[i];
1340  thread_line_defs[thread_idx][line_idx + 1] = _sorted_x_lines[i + 1];
1341  thread_idx_counter[thread_idx] += 2;
1342  }
1343 
1344  // Populate ordered_line_def
1345  int ordered_line_def_counter = 0;
1346  for (i = 0; i < nthreads; i++) {
1347  for (j = 0; j < lines_per_thread[i] * 2; j++) {
1348  ics_adi_dir_x->ordered_line_defs[ordered_line_def_counter] = thread_line_defs[i][j];
1349  ordered_line_def_counter++;
1350  }
1351  }
1352  // Set direction node and line start/stop indices
1353  // thread 0 start and stop
1355  ics_adi_dir_x->ordered_start_stop_indices[1] = nodes_per_thread[0];
1357  ics_adi_dir_x->line_start_stop_indices[1] = lines_per_thread[0] * 2;
1358  long start_node;
1359  long line_start_node;
1360  for (i = 2; i < nthreads * 2; i += 2) {
1361  start_node = ics_adi_dir_x->ordered_start_stop_indices[i - 1];
1363  ics_adi_dir_x->ordered_start_stop_indices[i + 1] = start_node + nodes_per_thread[i / 2];
1364 
1365  line_start_node = ics_adi_dir_x->line_start_stop_indices[i - 1];
1366  ics_adi_dir_x->line_start_stop_indices[i] = line_start_node;
1367  ics_adi_dir_x->line_start_stop_indices[i + 1] = line_start_node +
1368  lines_per_thread[i / 2] * 2;
1369  }
1370 
1371  // Put the Nodes in order in the ordered_nodes array
1372  int ordered_node_idx_counter = 0;
1373  int current_node;
1374  for (i = 0; i < nthreads; i++) {
1375  for (j = 0; j < lines_per_thread[i] * 2; j += 2) {
1376  current_node = thread_line_defs[i][j];
1377  ics_adi_dir_x->ordered_nodes[ordered_node_idx_counter] = current_node;
1378  ics_adi_dir_x->states_in[ordered_node_idx_counter] = states[current_node];
1379  ordered_node_idx_counter++;
1380  for (k = 1; k < thread_line_defs[i][j + 1]; k++) {
1381  current_node = _neighbors[current_node * 3];
1382  ics_adi_dir_x->ordered_nodes[ordered_node_idx_counter] = current_node;
1383  ics_adi_dir_x->states_in[ordered_node_idx_counter] = states[current_node];
1384  ordered_node_idx_counter++;
1385  }
1386  }
1387  }
1388 
1389  // Delete thread_line_defs array
1390  for (i = 0; i < nthreads; i++) {
1391  free(thread_line_defs[i]);
1392  }
1393  free(thread_line_defs);
1394  free(nodes_per_thread);
1395  free(lines_per_thread);
1396  free(thread_idx_counter);
1397 }
1398 
1399 void ICS_Grid_node::divide_y_work(const int nthreads) {
1400  int i, j, k;
1401  // nodes in each thread. (work each thread has to do)
1402  int* nodes_per_thread = (int*) calloc(nthreads, sizeof(int));
1403  // number of lines in each thread
1404  int* lines_per_thread = (int*) calloc(nthreads, sizeof(int));
1405  // To determine which index to put the start node and line length in thread_line_defs
1406  int* thread_idx_counter = (int*) calloc(nthreads, sizeof(int));
1407  // To determine which thread array to put the start node and line length in thread_line_defs
1408  std::vector<int> line_thread_id(_y_lines_length / 2);
1409  // Array of nthreads arrays that hold the line defs for each thread
1410  int** thread_line_defs = (int**) malloc(nthreads * sizeof(int*));
1411 
1412  int min_index = 0;
1413 
1414  // Find the total line length for each thread
1415  for (i = 0; i < _y_lines_length; i += 2) {
1416  min_index = find_min_element_index(nodes_per_thread, nthreads);
1417  nodes_per_thread[min_index] += _sorted_y_lines[i + 1];
1418  line_thread_id[i / 2] = min_index;
1419  lines_per_thread[min_index] += 1;
1420  }
1421 
1422  // Allocate memory for each array in thread_line_defs
1423  for (i = 0; i < nthreads; i++) {
1424  thread_line_defs[i] = (int*) malloc(lines_per_thread[i] * 2 * sizeof(int));
1425  }
1426 
1427  int thread_idx;
1428  int line_idx;
1429 
1430  // Populate each array of thread_line_defs
1431  for (i = 0; i < _y_lines_length; i += 2) {
1432  thread_idx = line_thread_id[i / 2];
1433  line_idx = thread_idx_counter[thread_idx];
1434  thread_line_defs[thread_idx][line_idx] = _sorted_y_lines[i];
1435  thread_line_defs[thread_idx][line_idx + 1] = _sorted_y_lines[i + 1];
1436  thread_idx_counter[thread_idx] += 2;
1437  }
1438 
1439  // Populate ordered_line_def
1440  int ordered_line_def_counter = 0;
1441  for (i = 0; i < nthreads; i++) {
1442  for (j = 0; j < lines_per_thread[i] * 2; j++) {
1443  ics_adi_dir_y->ordered_line_defs[ordered_line_def_counter] = thread_line_defs[i][j];
1444  ordered_line_def_counter++;
1445  }
1446  }
1447 
1448  // Set direction start/stop indices
1449  // thread 0 start and stop
1451  ics_adi_dir_y->ordered_start_stop_indices[1] = nodes_per_thread[0];
1453  ics_adi_dir_y->line_start_stop_indices[1] = lines_per_thread[0] * 2;
1454 
1455  long start_node;
1456  long line_start_node;
1457  for (i = 2; i < nthreads * 2; i += 2) {
1458  start_node = ics_adi_dir_y->ordered_start_stop_indices[i - 1];
1460  ics_adi_dir_y->ordered_start_stop_indices[i + 1] = start_node + nodes_per_thread[i / 2];
1461 
1462  line_start_node = ics_adi_dir_y->line_start_stop_indices[i - 1];
1463  ics_adi_dir_y->line_start_stop_indices[i] = line_start_node;
1464  ics_adi_dir_y->line_start_stop_indices[i + 1] = line_start_node +
1465  (lines_per_thread[i / 2] * 2);
1466  }
1467 
1468  // Put the Nodes in order in the ordered_nodes array
1469  int ordered_node_idx_counter = 0;
1470  int current_node;
1471  for (i = 0; i < nthreads; i++) {
1472  for (j = 0; j < lines_per_thread[i] * 2; j += 2) {
1473  current_node = thread_line_defs[i][j];
1474  ics_adi_dir_y->ordered_nodes[ordered_node_idx_counter] = current_node;
1475  ics_adi_dir_y->states_in[ordered_node_idx_counter] = states[current_node];
1476  ordered_node_idx_counter++;
1477  for (k = 1; k < thread_line_defs[i][j + 1]; k++) {
1478  current_node = _neighbors[(current_node * 3) + 1];
1479  ics_adi_dir_y->ordered_nodes[ordered_node_idx_counter] = current_node;
1480  ics_adi_dir_y->states_in[ordered_node_idx_counter] = states[current_node];
1481  ordered_node_idx_counter++;
1482  }
1483  }
1484  }
1485 
1486  // Delete thread_line_defs array
1487  for (i = 0; i < nthreads; i++) {
1488  free(thread_line_defs[i]);
1489  }
1490  free(thread_line_defs);
1491  free(nodes_per_thread);
1492  free(lines_per_thread);
1493  free(thread_idx_counter);
1494 }
1495 
1496 void ICS_Grid_node::divide_z_work(const int nthreads) {
1497  int i, j, k;
1498  // nodes in each thread. (work each thread has to do)
1499  int* nodes_per_thread = (int*) calloc(nthreads, sizeof(int));
1500  // number of lines in each thread
1501  int* lines_per_thread = (int*) calloc(nthreads, sizeof(int));
1502  // To determine which index to put the start node and line length in thread_line_defs
1503  int* thread_idx_counter = (int*) calloc(nthreads, sizeof(int));
1504  // To determine which thread array to put the start node and line length in thread_line_defs
1505  std::vector<int> line_thread_id(_z_lines_length / 2);
1506  // Array of nthreads arrays that hold the line defs for each thread
1507  int** thread_line_defs = (int**) malloc(nthreads * sizeof(int*));
1508 
1509  int min_index = 0;
1510 
1511  // Find the total line length for each thread
1512  for (i = 0; i < _z_lines_length; i += 2) {
1513  min_index = find_min_element_index(nodes_per_thread, nthreads);
1514  nodes_per_thread[min_index] += _sorted_z_lines[i + 1];
1515  line_thread_id[i / 2] = min_index;
1516  lines_per_thread[min_index] += 1;
1517  }
1518 
1519  // Allocate memory for each array in thread_line_defs
1520  // Add indices to Grid_data
1521  for (i = 0; i < nthreads; i++) {
1522  thread_line_defs[i] = (int*) malloc(lines_per_thread[i] * 2 * sizeof(int));
1523  }
1524 
1525  int thread_idx;
1526  int line_idx;
1527 
1528  // Populate each array of thread_line_defs
1529  for (i = 0; i < _z_lines_length; i += 2) {
1530  thread_idx = line_thread_id[i / 2];
1531  line_idx = thread_idx_counter[thread_idx];
1532  thread_line_defs[thread_idx][line_idx] = _sorted_z_lines[i];
1533  thread_line_defs[thread_idx][line_idx + 1] = _sorted_z_lines[i + 1];
1534  thread_idx_counter[thread_idx] += 2;
1535  }
1536 
1537  // Populate ordered_line_def
1538  int ordered_line_def_counter = 0;
1539  for (i = 0; i < nthreads; i++) {
1540  for (j = 0; j < lines_per_thread[i] * 2; j++) {
1541  ics_adi_dir_z->ordered_line_defs[ordered_line_def_counter] = thread_line_defs[i][j];
1542  ordered_line_def_counter++;
1543  }
1544  }
1545 
1546  // Set direction start/stop indices
1547  // thread 0 start and stop
1549  ics_adi_dir_z->ordered_start_stop_indices[1] = nodes_per_thread[0];
1551  ics_adi_dir_z->line_start_stop_indices[1] = lines_per_thread[0] * 2;
1552 
1553  long start_node;
1554  long line_start_node;
1555  for (i = 2; i < nthreads * 2; i += 2) {
1556  start_node = ics_adi_dir_z->ordered_start_stop_indices[i - 1];
1558  ics_adi_dir_z->ordered_start_stop_indices[i + 1] = start_node + nodes_per_thread[i / 2];
1559 
1560  line_start_node = ics_adi_dir_z->line_start_stop_indices[i - 1];
1561  ics_adi_dir_z->line_start_stop_indices[i] = line_start_node;
1562  ics_adi_dir_z->line_start_stop_indices[i + 1] = line_start_node +
1563  lines_per_thread[i / 2] * 2;
1564  }
1565 
1566  // Put the Nodes in order in the ordered_nodes array
1567  int ordered_node_idx_counter = 0;
1568  int current_node;
1569  for (i = 0; i < nthreads; i++) {
1570  for (j = 0; j < lines_per_thread[i] * 2; j += 2) {
1571  current_node = thread_line_defs[i][j];
1572  ics_adi_dir_z->ordered_nodes[ordered_node_idx_counter] = current_node;
1573  ics_adi_dir_z->states_in[ordered_node_idx_counter] = states[current_node];
1574  ordered_node_idx_counter++;
1575  for (k = 1; k < thread_line_defs[i][j + 1]; k++) {
1576  current_node = _neighbors[(current_node * 3) + 2];
1577  ics_adi_dir_z->ordered_nodes[ordered_node_idx_counter] = current_node;
1578  ics_adi_dir_z->states_in[ordered_node_idx_counter] = states[current_node];
1579  ordered_node_idx_counter++;
1580  }
1581  }
1582  }
1583 
1584  // Delete thread_line_defs array
1585  for (i = 0; i < nthreads; i++) {
1586  free(thread_line_defs[i]);
1587  }
1588  free(thread_line_defs);
1589  free(nodes_per_thread);
1590  free(lines_per_thread);
1591  free(thread_idx_counter);
1592 }
1593 
1594 
1596  int i;
1597  if (ics_tasks != NULL) {
1598  for (i = 0; i < NUM_THREADS; i++) {
1599  free(ics_tasks[i].scratchpad);
1600  free(ics_tasks[i].RHS);
1601  }
1602  }
1603  free(ics_tasks);
1604  ics_tasks = (ICSAdiGridData*) malloc(n * sizeof(ICSAdiGridData));
1605  for (i = 0; i < n; i++) {
1606  ics_tasks[i].RHS = (double*) malloc(sizeof(double) * _line_length_max);
1607  ics_tasks[i].scratchpad = (double*) malloc(sizeof(double) * _line_length_max - 1);
1608  ics_tasks[i].g = this;
1609  ics_tasks[i].u_diag = (double*) malloc(sizeof(double) * _line_length_max - 1);
1610  ics_tasks[i].diag = (double*) malloc(sizeof(double) * _line_length_max);
1611  ics_tasks[i].l_diag = (double*) malloc(sizeof(double) * _line_length_max - 1);
1612  }
1613 
1616 
1619 
1622 
1623  ics_adi_dir_x->ordered_start_stop_indices = (long*) malloc(sizeof(long) * n * 2);
1624  ics_adi_dir_x->line_start_stop_indices = (long*) malloc(sizeof(long) * n * 2);
1625 
1626  ics_adi_dir_y->ordered_start_stop_indices = (long*) malloc(sizeof(long) * n * 2);
1627  ics_adi_dir_y->line_start_stop_indices = (long*) malloc(sizeof(long) * n * 2);
1628 
1629  ics_adi_dir_z->ordered_start_stop_indices = (long*) malloc(sizeof(long) * n * 2);
1630  ics_adi_dir_z->line_start_stop_indices = (long*) malloc(sizeof(long) * n * 2);
1631 
1632  divide_x_work(n);
1633  divide_y_work(n);
1634  divide_z_work(n);
1635 }
1636 
1637 
1638 void ICS_Grid_node::apply_node_flux3D(double dt, double* ydot) {
1639  double* dest;
1640  if (ydot == NULL)
1641  dest = states_cur;
1642  else
1643  dest = ydot;
1645 }
1646 
1647 void ICS_Grid_node::do_grid_currents(double* output, double dt, int) {
1648  memset(states_cur, 0, sizeof(double) * _num_nodes);
1649  if (ics_current_seg_ptrs != NULL) {
1650  ssize_t i, j;
1651  int seg_start_index, seg_stop_index;
1652  int state_index;
1653  double seg_cur;
1654  auto const n = ics_concentration_seg_handles.size();
1655  for (i = 0; i < n; i++) {
1656  seg_start_index = ics_surface_nodes_per_seg_start_indices[i];
1657  seg_stop_index = ics_surface_nodes_per_seg_start_indices[i + 1];
1658  seg_cur = *ics_current_seg_ptrs[i];
1659  for (j = seg_start_index; j < seg_stop_index; j++) {
1660  state_index = ics_surface_nodes_per_seg[j];
1661  output[state_index] += seg_cur * ics_scale_factors[state_index] * dt;
1662  }
1663  }
1664  }
1665 }
1666 
1668  if (ics_adi_dir_x->dcgrid == NULL) {
1672  } else {
1676  }
1677 }
1678 
1680  if (diffusable) {
1687  }
1688  return 0;
1689 }
1690 
1691 
1692 void ICS_Grid_node::variable_step_diffusion(const double* states, double* ydot) {
1693  if (diffusable) {
1695  }
1696 
1697  // TODO: Get volume fraction/tortuosity working as well as take care of this in this file
1698  /*switch(VARIABLE_ECS_VOLUME)
1699  {
1700  case VOLUME_FRACTION:
1701  _ics_rhs_variable_step_helper_vol(this, states, ydot);
1702  break;
1703  case TORTUOSITY:
1704  _ics_rhs_variable_step_helper_tort(this, states, ydot);
1705  break;
1706  default:
1707  _ics_rhs_variable_step_helper(this, states, ydot);
1708  }*/
1709 }
1710 
1712  if (diffusable) {
1713  ics_ode_solve_helper(this, dt, RHS);
1714  }
1715 }
1716 
1718  _ics_hybrid_helper(this);
1719 }
1720 
1721 void ICS_Grid_node::variable_step_hybrid_connections(const double* cvode_states_3d,
1722  double* const ydot_3d,
1723  const double* cvode_states_1d,
1724  double* const ydot_1d) {
1725  _ics_variable_hybrid_helper(this, cvode_states_3d, ydot_3d, cvode_states_1d, ydot_1d);
1726 }
1727 
1729  auto const n = ics_concentration_seg_handles.size();
1730  for (auto i = 0ul; i < n; ++i) {
1731  double total_seg_concentration{};
1732  auto const seg_start_index = ics_surface_nodes_per_seg_start_indices[i];
1733  auto const seg_stop_index = ics_surface_nodes_per_seg_start_indices[i + 1];
1734  for (auto j = seg_start_index; j < seg_stop_index; j++) {
1735  total_seg_concentration += states[ics_surface_nodes_per_seg[j]];
1736  }
1737  auto const average_seg_concentration = total_seg_concentration /
1738  (seg_stop_index - seg_start_index);
1739  *ics_concentration_seg_handles[i] = average_seg_concentration;
1740  }
1741 }
1742 
1743 // Free a single Grid_node
1745  int i;
1746  free(states_x);
1747  free(states_y);
1748  free(states_z);
1749  free(states_cur);
1750  delete[] concentration_list;
1751  delete[] current_list;
1752  free(current_dest);
1753 #if NRNMPI
1754  if (nrnmpi_use) {
1755  free(proc_offsets);
1756  free(proc_num_currents);
1757  free(proc_num_fluxes);
1758  }
1759 #endif
1763  free(ics_adi_dir_x->deltas);
1764  free(ics_adi_dir_x);
1765 
1769  free(ics_adi_dir_y->deltas);
1770  free(ics_adi_dir_y);
1771 
1775  free(ics_adi_dir_z->deltas);
1776  free(ics_adi_dir_z);
1777 
1778  free(hybrid_data);
1779  if (node_flux_count > 0) {
1780  free(node_flux_idx);
1781  free(node_flux_scale);
1782  free(node_flux_src);
1783  }
1784 
1785  if (ics_tasks != NULL) {
1786  for (i = 0; i < NUM_THREADS; i++) {
1787  free(ics_tasks[i].scratchpad);
1788  free(ics_tasks[i].RHS);
1789  free(ics_tasks[i].u_diag);
1790  free(ics_tasks[i].l_diag);
1791  }
1792  }
1793  free(ics_tasks);
1794 }
double * induced_currents
Definition: grids.h:201
unsigned char multicompartment_inititalized
Definition: grids.h:195
double * induced_currents_scale
Definition: grids.h:203
ECS_Grid_node()
Definition: grids.cpp:50
void apply_node_flux3D(double dt, double *states)
Definition: grids.cpp:962
void set_tortuosity(PyHocObject *)
Definition: grids.cpp:482
int induced_current_count
Definition: grids.h:197
int react_offset_count
Definition: grids.h:189
int dg_adi()
Definition: grids.cpp:1010
int * all_reaction_indices
Definition: grids.h:191
void volume_setup()
Definition: grids.cpp:997
int * proc_induced_current_count
Definition: grids.h:198
void do_grid_currents(double *, double, int)
Definition: grids.cpp:877
void variable_step_diffusion(const double *states, double *ydot)
Definition: grids.cpp:1037
struct ECSAdiDirection * ecs_adi_dir_z
Definition: grids.h:184
double * set_rxd_currents(int, int *, PyHocObject **)
Definition: grids.cpp:935
void initialize_multicompartment_reaction()
Definition: grids.cpp:1111
void set_volume_fraction(PyHocObject *)
Definition: grids.cpp:534
struct ECSAdiDirection * ecs_adi_dir_y
Definition: grids.h:183
int add_multicompartment_reaction(int, int *, int)
Definition: grids.cpp:1068
struct ECSAdiGridData * ecs_tasks
Definition: grids.h:181
void clear_multicompartment_reaction()
Definition: grids.cpp:1094
void variable_step_ode_solve(double *RHS, double dt)
Definition: grids.cpp:1244
int total_reaction_states
Definition: grids.h:194
int * react_offsets
Definition: grids.h:188
void variable_step_hybrid_connections(const double *cvode_states_3d, double *const ydot_3d, const double *cvode_states_1d, double *const ydot_1d)
Definition: grids.cpp:1063
int * proc_num_reaction_states
Definition: grids.h:193
int * proc_induced_current_offset
Definition: grids.h:199
int * proc_num_reactions
Definition: grids.h:192
struct ECSAdiDirection * ecs_adi_dir_x
Definition: grids.h:182
void set_num_threads(const int n)
Definition: grids.cpp:834
void hybrid_connections()
Definition: grids.cpp:1061
void scatter_grid_concentrations()
Definition: grids.cpp:1050
double * all_reaction_states
Definition: grids.h:200
double * local_induced_currents
Definition: grids.h:202
void set_diffusion(double *, int)
Definition: grids.cpp:578
int * reaction_indices
Definition: grids.h:190
void do_multicompartment_reactions(double *)
Definition: grids.cpp:1221
int * induced_currents_index
Definition: grids.h:196
double(* get_alpha)(double *, int)
Definition: grids.h:127
int node_flux_count
Definition: grids.h:138
double * permeability
Definition: grids.h:122
bool hybrid
Definition: grids.h:101
int * proc_num_fluxes
Definition: grids.h:113
int size_y
Definition: grids.h:92
int * proc_offsets
Definition: grids.h:110
double * states_x
Definition: grids.h:87
int64_t * ics_surface_nodes_per_seg
Definition: grids.h:131
Concentration_Pair * concentration_list
Definition: grids.h:104
long * node_flux_idx
Definition: grids.h:139
int size_x
Definition: grids.h:91
double * states_cur
Definition: grids.h:90
bool diffusable
Definition: grids.h:100
double * states
Definition: grids.h:86
double atolscale
Definition: grids.h:129
int * proc_flux_offsets
Definition: grids.h:112
BoundaryConditions * bc
Definition: grids.h:102
double * node_flux_scale
Definition: grids.h:140
double dy
Definition: grids.h:98
double * states_z
Definition: grids.h:89
double * states_y
Definition: grids.h:88
Grid_node * next
Definition: grids.h:84
double(* get_permeability)(double *, int)
Definition: grids.h:128
Hybrid_data * hybrid_data
Definition: grids.h:103
int64_t * ics_surface_nodes_per_seg_start_indices
Definition: grids.h:132
PyObject ** node_flux_src
Definition: grids.h:141
int num_all_currents
Definition: grids.h:109
unsigned char VARIABLE_ECS_VOLUME
Definition: grids.h:118
int insert(int grid_list_index)
Definition: grids.cpp:810
double ** ics_current_seg_ptrs
Definition: grids.h:134
double dc_z
Definition: grids.h:96
double * all_currents
Definition: grids.h:115
double * ics_scale_factors
Definition: grids.h:135
double dc_y
Definition: grids.h:95
Current_Triple * current_list
Definition: grids.h:105
int size_z
Definition: grids.h:93
int * proc_num_currents
Definition: grids.h:111
long * current_dest
Definition: grids.h:114
double dc_x
Definition: grids.h:94
double dz
Definition: grids.h:99
std::vector< neuron::container::data_handle< double > > ics_concentration_seg_handles
Definition: grids.h:133
double * alpha
Definition: grids.h:124
ssize_t num_currents
Definition: grids.h:106
double dx
Definition: grids.h:97
ssize_t num_concentrations
Definition: grids.h:106
struct ICSAdiDirection * ics_adi_dir_x
Definition: grids.h:282
long _x_lines_length
Definition: grids.h:267
void run_threaded_ics_dg_adi(struct ICSAdiDirection *)
void scatter_grid_concentrations()
Definition: grids.cpp:1728
long * _sorted_y_lines
Definition: grids.h:263
void hybrid_connections()
Definition: grids.cpp:1717
void apply_node_flux3D(double dt, double *states)
Definition: grids.cpp:1638
struct ICSAdiDirection * ics_adi_dir_y
Definition: grids.h:283
void divide_y_work(const int nthreads)
Definition: grids.cpp:1399
long _y_lines_length
Definition: grids.h:268
struct ICSAdiGridData * ics_tasks
Definition: grids.h:281
struct ICSAdiDirection * ics_adi_dir_z
Definition: grids.h:284
void variable_step_ode_solve(double *RHS, double dt)
Definition: grids.cpp:1711
void variable_step_diffusion(const double *states, double *ydot)
Definition: grids.cpp:1692
int dg_adi()
Definition: grids.cpp:1679
long _z_lines_length
Definition: grids.h:269
void divide_x_work(const int nthreads)
Definition: grids.cpp:1302
long * _sorted_x_lines
Definition: grids.h:262
long _num_nodes
Definition: grids.h:275
void set_diffusion(double *, int)
Definition: grids.cpp:556
void do_grid_currents(double *, double, int)
Definition: grids.cpp:1647
double * _ics_alphas
Definition: grids.h:255
void set_num_threads(const int n)
Definition: grids.cpp:1595
long * _sorted_z_lines
Definition: grids.h:264
long * _neighbors
Definition: grids.h:258
long _line_length_max
Definition: grids.h:272
void divide_z_work(const int nthreads)
Definition: grids.cpp:1496
void variable_step_hybrid_connections(const double *cvode_states_3d, double *const ydot_3d, const double *cvode_states_1d, double *const ydot_1d)
Definition: grids.cpp:1721
void volume_setup()
Definition: grids.cpp:1667
#define i
Definition: md1redef.h:19
static RNG::key_type k
Definition: nrnran123.cpp:9
int _memb_curr_total
Definition: rxd.cpp:86
double * dt_ptr
Definition: grids.cpp:15
static double get_permeability_array(double *permeability, int idx)
Definition: grids.cpp:45
NRN_EXPORT void ics_set_grid_concentrations(int grid_list_index, int index_in_list, int64_t *nodes_per_seg, int64_t *nodes_per_seg_start_indices, PyObject *neuron_pointers)
Definition: grids.cpp:593
double * _rxd_induced_currents_scale
Definition: rxd.cpp:92
NRN_EXPORT int set_tortuosity(int grid_list_index, int grid_id, PyHocObject *my_permeability)
Definition: grids.cpp:466
static int find_min_element_index(const int thread_sizes[], const int nthreads)
Definition: grids.cpp:1290
NRN_EXPORT int set_volume_fraction(int grid_list_index, int grid_id, PyHocObject *my_alpha)
Definition: grids.cpp:519
static double get_alpha_array(double *alpha, int idx)
Definition: grids.cpp:37
double * t_ptr
Definition: grids.cpp:16
int * _rxd_induced_currents_ecs_idx
NRN_EXPORT int ICS_insert(int grid_list_index, PyHocObject *my_states, long num_nodes, long *neighbors, long *x_line_defs, long x_lines_length, long *y_line_defs, long y_lines_length, long *z_line_defs, long z_lines_length, double *dcs, double dx, bool is_diffusable, double atolscale, double *ics_alphas)
Definition: grids.cpp:385
NRN_EXPORT void set_grid_concentrations(int grid_list_index, int index_in_list, PyObject *grid_indices, PyObject *neuron_pointers)
Definition: grids.cpp:642
NRN_EXPORT void make_time_ptr(PyHocObject *my_dt_ptr, PyHocObject *my_t_ptr)
Definition: grids.cpp:29
TaskQueue * AllTasks
Definition: rxd.cpp:41
NRN_EXPORT void delete_by_id(int id)
Definition: grids.cpp:788
NRN_EXPORT void set_grid_currents(int grid_list_index, int index_in_list, PyObject *grid_indices, PyObject *neuron_pointers, PyObject *scale_factors)
Definition: grids.cpp:683
void empty_list(int list_index)
Definition: grids.cpp:801
int * _rxd_induced_currents_grid
Definition: rxd.cpp:107
NRN_EXPORT int set_diffusion(int grid_list_index, int grid_id, double *dc, int length)
Definition: grids.cpp:453
NRN_EXPORT void ics_set_grid_currents(int grid_list_index, int index_in_list, PyObject *neuron_pointers, double *scale_factors)
Definition: grids.cpp:618
static void * gather_currents(void *dataptr)
Definition: grids.cpp:850
int NUM_THREADS
Definition: rxd.cpp:34
static double get_alpha_scalar(double *alpha, int)
Definition: grids.cpp:34
NRN_EXPORT int ECS_insert(int grid_list_index, PyHocObject *my_states, int my_num_states_x, int my_num_states_y, int my_num_states_z, double my_dc_x, double my_dc_y, double my_dc_z, double my_dx, double my_dy, double my_dz, PyHocObject *my_alpha, PyHocObject *my_permeability, int bc, double bc_value, double atolscale)
Definition: grids.cpp:198
double * _rxd_induced_currents_ecs
static double get_permeability_scalar(double *, int)
Definition: grids.cpp:42
Grid_node * Parallel_grids[100]
Definition: grids.cpp:17
int remove(Grid_node **head, Grid_node *find)
Definition: grids.cpp:769
NRN_EXPORT int ICS_insert_inhom(int grid_list_index, PyHocObject *my_states, long num_nodes, long *neighbors, long *x_line_defs, long x_lines_length, long *y_line_defs, long y_lines_length, long *z_line_defs, long z_lines_length, double *dcs, double dx, bool is_diffusable, double atolscale, double *ics_alphas)
Definition: grids.cpp:419
#define TORTUOSITY
Definition: grids.h:24
#define MIN(a, b)
Definition: grids.h:32
#define VOLUME_FRACTION
Definition: grids.h:25
void apply_node_flux(int, long *, double *, PyObject **, double, double *)
Definition: rxd.cpp:314
#define TRUE
Definition: grids.h:22
#define FALSE
Definition: grids.h:23
#define MAX(a, b)
Definition: grids.h:31
#define ICS_ALPHA
Definition: grids.h:26
static int c
Definition: hoc.cpp:169
#define assert(ex)
Definition: hocassrt.h:24
step
Definition: extdef.h:7
#define RHS(i)
Definition: multisplit.cpp:57
static int nrnmpi_use
Definition: multisplit.cpp:41
#define NRN_EXPORT
Definition: nrn_export.hpp:6
static Node * node(Object *)
Definition: netcvode.cpp:291
int const size_t const size_t n
Definition: nrngsl.h:10
size_t j
_object PyObject
Definition: nrnpy.h:12
#define PyInt_AS_LONG
Definition: nrnpython.h:26
int nrnmpi_myid
double * states
Definition: rxd.cpp:75
void TaskQueue_add_task(TaskQueue *q, void *(*task)(void *), void *args, void *result)
Definition: rxd.cpp:1142
void TaskQueue_sync(TaskQueue *q)
Definition: rxd.cpp:1257
void _ics_variable_hybrid_helper(ICS_Grid_node *, const double *, double *const, const double *, double *const)
void _ics_rhs_variable_step_helper(ICS_Grid_node *, double const *const, double *)
void ecs_run_threaded_dg_adi(const int, const int, ECS_Grid_node *, ECSAdiDirection *, const int)
void ecs_set_adi_tort(ECS_Grid_node *)
Definition: rxd_vol.cpp:772
void ics_dg_adi_z(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
void ecs_set_adi_vol(ECS_Grid_node *)
Definition: rxd_vol.cpp:410
void run_threaded_deltas(ICS_Grid_node *g, ICSAdiDirection *ics_adi_dir)
void ics_ode_solve_helper(ICS_Grid_node *, double, double *)
void _rhs_variable_step_helper(Grid_node *, double const *const, double *)
void ics_dg_adi_x_inhom(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
int find(const int, const int, const int, const int, const int)
void ics_dg_adi_z_inhom(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
#define SPECIES_ABSENT
Definition: rxd.h:7
void ecs_set_adi_homogeneous(ECS_Grid_node *)
void _rhs_variable_step_helper_tort(Grid_node *, double const *const, double *)
Definition: rxd_vol.cpp:785
void ics_dg_adi_y(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
void _ics_hybrid_helper(ICS_Grid_node *)
void _rhs_variable_step_helper_vol(Grid_node *, double const *const, double *)
Definition: rxd_vol.cpp:894
void ics_dg_adi_x(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
void ics_dg_adi_y_inhom(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
#define NULL
Definition: spdefs.h:105
double value
Definition: grids.h:79
unsigned char type
Definition: grids.h:78
neuron::container::data_handle< double > destination
Definition: grids.h:45
long destination
Definition: grids.h:51
neuron::container::data_handle< double > source
Definition: grids.h:52
double scale_factor
Definition: grids.h:53
double * val
Definition: rxd.h:29
int onset
Definition: rxd.h:28
Grid_node * g
Definition: rxd.h:27
int offset
Definition: rxd.h:28
double * states_in
Definition: grids.h:236
int line_size
Definition: grids.h:238
double * states_out
Definition: grids.h:237
ECS_Grid_node * g
Definition: grids.h:244
double * scratchpad
Definition: grids.h:247
double * states_in
Definition: grids.h:338
double * dcgrid
Definition: grids.h:346
long * ordered_start_stop_indices
Definition: grids.h:343
double dc
Definition: grids.h:345
double d
Definition: grids.h:347
long * ordered_nodes
Definition: grids.h:342
void(* ics_dg_adi_dir)(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
Definition: grids.h:327
long * line_start_stop_indices
Definition: grids.h:344
double * states_out
Definition: grids.h:339
long * ordered_line_defs
Definition: grids.h:341
double * deltas
Definition: grids.h:340
double * RHS
Definition: grids.h:360
double * l_diag
Definition: grids.h:361
ICS_Grid_node * g
Definition: grids.h:357
double * diag
Definition: grids.h:362
double * u_diag
Definition: grids.h:363
double * scratchpad
Definition: grids.h:359
union PyHocObject::@37 u
neuron::container::data_handle< double > px_
Definition: rxd.h:89
static double alpha(double x)
Definition: synapse.cpp:85