30 dt_ptr =
static_cast<double*
>(my_dt_ptr->
u.
px_);
31 t_ptr =
static_cast<double*
>(my_t_ptr->
u.
px_);
46 return permeability[idx];
67 states =
static_cast<double*
>(my_states->
u.
px_);
71 states_x = (
double*) malloc(
sizeof(
double) * my_num_states_x * my_num_states_y *
73 states_y = (
double*) malloc(
sizeof(
double) * my_num_states_x * my_num_states_y *
75 states_cur = (
double*) malloc(
sizeof(
double) * my_num_states_x * my_num_states_y *
100 if (PyFloat_Check(my_permeability)) {
116 if (PyFloat_Check(my_alpha)) {
117 alpha = (
double*) malloc(
sizeof(
double));
122 alpha =
static_cast<double*
>(my_alpha->
u.
px_);
162 sizeof(
double) *
MAX(my_num_states_x,
MAX(my_num_states_y, my_num_states_z)));
230 return new_Grid->
insert(grid_list_index);
248 double* ics_alphas) {
254 states =
static_cast<double*
>(my_states->
u.
px_);
308 long x_max = x_line_defs[1];
309 long y_max = y_line_defs[1];
310 long z_max = z_line_defs[1];
311 long xy_max = (x_max > y_max) ? x_max : y_max;
358 if (dcgrid ==
NULL) {
399 double* ics_alphas) {
416 return new_Grid->
insert(grid_list_index);
433 double* ics_alphas) {
449 return new_Grid->
insert(grid_list_index);
456 while (
id < grid_id) {
462 node->set_diffusion(dc, length);
471 while (
id < grid_id) {
485 if (PyFloat_Check(my_permeability)) {
487 double new_permeability = PyFloat_AsDouble((
PyObject*) my_permeability);
524 while (
id < grid_id) {
535 if (PyFloat_Check(my_alpha)) {
539 alpha = (
double*) malloc(
sizeof(
double));
548 alpha =
static_cast<double*
>(my_alpha->
u.
px_);
595 int64_t* nodes_per_seg,
596 int64_t* nodes_per_seg_start_indices,
600 ssize_t
n = (ssize_t) PyList_Size(neuron_pointers);
604 for (
i = 0;
i < index_in_list;
i++) {
612 for (
i = 0;
i <
n;
i++) {
614 reinterpret_cast<PyHocObject*
>(PyList_GET_ITEM(neuron_pointers,
i))->
u.
px_);
621 double* scale_factors) {
624 ssize_t
n = (ssize_t) PyList_Size(neuron_pointers);
627 for (
i = 0;
i < index_in_list;
i++) {
634 for (
i = 0;
i <
n;
i++) {
636 ((
PyHocObject*) PyList_GET_ITEM(neuron_pointers,
i))->u.px_);
657 ssize_t
n = (ssize_t) PyList_Size(grid_indices);
661 for (
i = 0;
i < index_in_list;
i++) {
674 for (
i = 0;
i <
n;
i++) {
678 reinterpret_cast<PyHocObject*
>(PyList_GET_ITEM(neuron_pointers,
i))->u.
px_;
700 ssize_t
n = (ssize_t) PyList_Size(grid_indices);
705 for (
i = 0;
i < index_in_list;
i++) {
718 for (
i = 0;
i <
n;
i++) {
722 reinterpret_cast<PyHocObject*
>(PyList_GET_ITEM(neuron_pointers,
i))->u.
px_;
752 for (
i = 0;
i <
n;
i++) {
772 *head = (*head)->
next;
803 while (*head !=
NULL) {
805 *head = (*head)->
next;
843 for (
i = 0;
i <
n;
i++) {
853 double* val = d->
val;
857 for (
i = start;
i < stop;
i++)
858 val[
i] =
c[
i].scale_factor * (*
c[
i].source) / grid->
alpha[
c[
i].destination];
861 for (
i = start;
i < stop;
i++)
862 val[
i] =
c[
i].scale_factor * (*
c[
i].source) /
865 for (
i = start;
i < stop;
i++)
866 val[
i] =
c[
i].scale_factor * (*
c[
i].source) / grid->
alpha[0];
897 tasks[
i].
onset =
i * tasks_per_thread;
898 tasks[
i].
offset =
MIN((
i + 1) * tasks_per_thread, m);
916 for (
i = 0;
i <
n;
i++)
919 for (
i = 0;
i <
n;
i++) {
925 for (
i = 0;
i <
n;
i++)
936 int* current_indices,
939 double volume_fraction;
947 for (
i = 0;
i < current_count;
i++) {
1056 for (
i = 0;
i <
n;
i++) {
1079 for (
i = 0,
j = 0;
i < nstates;
i++,
j +=
step) {
1123 nrnmpi_int_allgather_inplace(proc_num_init, 1);
1125 if (!proc_num_init[
i])
1184 nrnmpi_dbl_allgatherv_inplace(all_scales,
1188 nrnmpi_int_allgatherv_inplace(all_indices,
1291 int new_min_index = 0;
1292 int min_element = thread_sizes[0];
1293 for (
int i = 0;
i < nthreads;
i++) {
1294 if (min_element > thread_sizes[
i]) {
1295 min_element = thread_sizes[
i];
1299 return new_min_index;
1305 int* nodes_per_thread = (
int*) calloc(nthreads,
sizeof(
int));
1307 int* lines_per_thread = (
int*) calloc(nthreads,
sizeof(
int));
1309 int* thread_idx_counter = (
int*) calloc(nthreads,
sizeof(
int));
1315 int** thread_line_defs = (
int**) malloc(nthreads *
sizeof(
int*));
1323 line_thread_id[
i / 2] = min_index;
1324 lines_per_thread[min_index] += 1;
1328 for (
i = 0;
i < nthreads;
i++) {
1329 thread_line_defs[
i] = (
int*) malloc(lines_per_thread[
i] * 2 *
sizeof(
int));
1337 thread_idx = line_thread_id[
i / 2];
1338 line_idx = thread_idx_counter[thread_idx];
1341 thread_idx_counter[thread_idx] += 2;
1345 int ordered_line_def_counter = 0;
1346 for (
i = 0;
i < nthreads;
i++) {
1347 for (
j = 0;
j < lines_per_thread[
i] * 2;
j++) {
1349 ordered_line_def_counter++;
1359 long line_start_node;
1360 for (
i = 2;
i < nthreads * 2;
i += 2) {
1368 lines_per_thread[
i / 2] * 2;
1372 int ordered_node_idx_counter = 0;
1374 for (
i = 0;
i < nthreads;
i++) {
1375 for (
j = 0;
j < lines_per_thread[
i] * 2;
j += 2) {
1376 current_node = thread_line_defs[
i][
j];
1379 ordered_node_idx_counter++;
1380 for (
k = 1;
k < thread_line_defs[
i][
j + 1];
k++) {
1384 ordered_node_idx_counter++;
1390 for (
i = 0;
i < nthreads;
i++) {
1391 free(thread_line_defs[
i]);
1393 free(thread_line_defs);
1394 free(nodes_per_thread);
1395 free(lines_per_thread);
1396 free(thread_idx_counter);
1402 int* nodes_per_thread = (
int*) calloc(nthreads,
sizeof(
int));
1404 int* lines_per_thread = (
int*) calloc(nthreads,
sizeof(
int));
1406 int* thread_idx_counter = (
int*) calloc(nthreads,
sizeof(
int));
1410 int** thread_line_defs = (
int**) malloc(nthreads *
sizeof(
int*));
1418 line_thread_id[
i / 2] = min_index;
1419 lines_per_thread[min_index] += 1;
1423 for (
i = 0;
i < nthreads;
i++) {
1424 thread_line_defs[
i] = (
int*) malloc(lines_per_thread[
i] * 2 *
sizeof(
int));
1432 thread_idx = line_thread_id[
i / 2];
1433 line_idx = thread_idx_counter[thread_idx];
1436 thread_idx_counter[thread_idx] += 2;
1440 int ordered_line_def_counter = 0;
1441 for (
i = 0;
i < nthreads;
i++) {
1442 for (
j = 0;
j < lines_per_thread[
i] * 2;
j++) {
1444 ordered_line_def_counter++;
1456 long line_start_node;
1457 for (
i = 2;
i < nthreads * 2;
i += 2) {
1465 (lines_per_thread[
i / 2] * 2);
1469 int ordered_node_idx_counter = 0;
1471 for (
i = 0;
i < nthreads;
i++) {
1472 for (
j = 0;
j < lines_per_thread[
i] * 2;
j += 2) {
1473 current_node = thread_line_defs[
i][
j];
1476 ordered_node_idx_counter++;
1477 for (
k = 1;
k < thread_line_defs[
i][
j + 1];
k++) {
1478 current_node =
_neighbors[(current_node * 3) + 1];
1481 ordered_node_idx_counter++;
1487 for (
i = 0;
i < nthreads;
i++) {
1488 free(thread_line_defs[
i]);
1490 free(thread_line_defs);
1491 free(nodes_per_thread);
1492 free(lines_per_thread);
1493 free(thread_idx_counter);
1499 int* nodes_per_thread = (
int*) calloc(nthreads,
sizeof(
int));
1501 int* lines_per_thread = (
int*) calloc(nthreads,
sizeof(
int));
1503 int* thread_idx_counter = (
int*) calloc(nthreads,
sizeof(
int));
1507 int** thread_line_defs = (
int**) malloc(nthreads *
sizeof(
int*));
1515 line_thread_id[
i / 2] = min_index;
1516 lines_per_thread[min_index] += 1;
1521 for (
i = 0;
i < nthreads;
i++) {
1522 thread_line_defs[
i] = (
int*) malloc(lines_per_thread[
i] * 2 *
sizeof(
int));
1530 thread_idx = line_thread_id[
i / 2];
1531 line_idx = thread_idx_counter[thread_idx];
1534 thread_idx_counter[thread_idx] += 2;
1538 int ordered_line_def_counter = 0;
1539 for (
i = 0;
i < nthreads;
i++) {
1540 for (
j = 0;
j < lines_per_thread[
i] * 2;
j++) {
1542 ordered_line_def_counter++;
1554 long line_start_node;
1555 for (
i = 2;
i < nthreads * 2;
i += 2) {
1563 lines_per_thread[
i / 2] * 2;
1567 int ordered_node_idx_counter = 0;
1569 for (
i = 0;
i < nthreads;
i++) {
1570 for (
j = 0;
j < lines_per_thread[
i] * 2;
j += 2) {
1571 current_node = thread_line_defs[
i][
j];
1574 ordered_node_idx_counter++;
1575 for (
k = 1;
k < thread_line_defs[
i][
j + 1];
k++) {
1576 current_node =
_neighbors[(current_node * 3) + 2];
1579 ordered_node_idx_counter++;
1585 for (
i = 0;
i < nthreads;
i++) {
1586 free(thread_line_defs[
i]);
1588 free(thread_line_defs);
1589 free(nodes_per_thread);
1590 free(lines_per_thread);
1591 free(thread_idx_counter);
1605 for (
i = 0;
i <
n;
i++) {
1651 int seg_start_index, seg_stop_index;
1655 for (
i = 0;
i <
n;
i++) {
1659 for (
j = seg_start_index;
j < seg_stop_index;
j++) {
1722 double*
const ydot_3d,
1723 const double* cvode_states_1d,
1724 double*
const ydot_1d) {
1730 for (
auto i = 0ul;
i <
n; ++
i) {
1731 double total_seg_concentration{};
1734 for (
auto j = seg_start_index;
j < seg_stop_index;
j++) {
1737 auto const average_seg_concentration = total_seg_concentration /
1738 (seg_stop_index - seg_start_index);
double * induced_currents
unsigned char multicompartment_inititalized
double * induced_currents_scale
void apply_node_flux3D(double dt, double *states)
void set_tortuosity(PyHocObject *)
int induced_current_count
int * all_reaction_indices
int * proc_induced_current_count
void do_grid_currents(double *, double, int)
void variable_step_diffusion(const double *states, double *ydot)
struct ECSAdiDirection * ecs_adi_dir_z
double * set_rxd_currents(int, int *, PyHocObject **)
void initialize_multicompartment_reaction()
void set_volume_fraction(PyHocObject *)
struct ECSAdiDirection * ecs_adi_dir_y
int add_multicompartment_reaction(int, int *, int)
struct ECSAdiGridData * ecs_tasks
void clear_multicompartment_reaction()
void variable_step_ode_solve(double *RHS, double dt)
int total_reaction_states
void variable_step_hybrid_connections(const double *cvode_states_3d, double *const ydot_3d, const double *cvode_states_1d, double *const ydot_1d)
int * proc_num_reaction_states
int * proc_induced_current_offset
struct ECSAdiDirection * ecs_adi_dir_x
void set_num_threads(const int n)
void hybrid_connections()
void scatter_grid_concentrations()
double * all_reaction_states
double * local_induced_currents
void set_diffusion(double *, int)
void do_multicompartment_reactions(double *)
int * induced_currents_index
double(* get_alpha)(double *, int)
int64_t * ics_surface_nodes_per_seg
Concentration_Pair * concentration_list
double(* get_permeability)(double *, int)
Hybrid_data * hybrid_data
int64_t * ics_surface_nodes_per_seg_start_indices
PyObject ** node_flux_src
unsigned char VARIABLE_ECS_VOLUME
int insert(int grid_list_index)
double ** ics_current_seg_ptrs
double * ics_scale_factors
Current_Triple * current_list
std::vector< neuron::container::data_handle< double > > ics_concentration_seg_handles
ssize_t num_concentrations
struct ICSAdiDirection * ics_adi_dir_x
void run_threaded_ics_dg_adi(struct ICSAdiDirection *)
void scatter_grid_concentrations()
void hybrid_connections()
void apply_node_flux3D(double dt, double *states)
struct ICSAdiDirection * ics_adi_dir_y
void divide_y_work(const int nthreads)
struct ICSAdiGridData * ics_tasks
struct ICSAdiDirection * ics_adi_dir_z
void variable_step_ode_solve(double *RHS, double dt)
void variable_step_diffusion(const double *states, double *ydot)
void divide_x_work(const int nthreads)
void set_diffusion(double *, int)
void do_grid_currents(double *, double, int)
void set_num_threads(const int n)
void divide_z_work(const int nthreads)
void variable_step_hybrid_connections(const double *cvode_states_3d, double *const ydot_3d, const double *cvode_states_1d, double *const ydot_1d)
static double get_permeability_array(double *permeability, int idx)
NRN_EXPORT void ics_set_grid_concentrations(int grid_list_index, int index_in_list, int64_t *nodes_per_seg, int64_t *nodes_per_seg_start_indices, PyObject *neuron_pointers)
double * _rxd_induced_currents_scale
NRN_EXPORT int set_tortuosity(int grid_list_index, int grid_id, PyHocObject *my_permeability)
static int find_min_element_index(const int thread_sizes[], const int nthreads)
NRN_EXPORT int set_volume_fraction(int grid_list_index, int grid_id, PyHocObject *my_alpha)
static double get_alpha_array(double *alpha, int idx)
int * _rxd_induced_currents_ecs_idx
NRN_EXPORT int ICS_insert(int grid_list_index, PyHocObject *my_states, long num_nodes, long *neighbors, long *x_line_defs, long x_lines_length, long *y_line_defs, long y_lines_length, long *z_line_defs, long z_lines_length, double *dcs, double dx, bool is_diffusable, double atolscale, double *ics_alphas)
NRN_EXPORT void set_grid_concentrations(int grid_list_index, int index_in_list, PyObject *grid_indices, PyObject *neuron_pointers)
NRN_EXPORT void make_time_ptr(PyHocObject *my_dt_ptr, PyHocObject *my_t_ptr)
NRN_EXPORT void delete_by_id(int id)
NRN_EXPORT void set_grid_currents(int grid_list_index, int index_in_list, PyObject *grid_indices, PyObject *neuron_pointers, PyObject *scale_factors)
void empty_list(int list_index)
int * _rxd_induced_currents_grid
NRN_EXPORT int set_diffusion(int grid_list_index, int grid_id, double *dc, int length)
NRN_EXPORT void ics_set_grid_currents(int grid_list_index, int index_in_list, PyObject *neuron_pointers, double *scale_factors)
static void * gather_currents(void *dataptr)
static double get_alpha_scalar(double *alpha, int)
NRN_EXPORT int ECS_insert(int grid_list_index, PyHocObject *my_states, int my_num_states_x, int my_num_states_y, int my_num_states_z, double my_dc_x, double my_dc_y, double my_dc_z, double my_dx, double my_dy, double my_dz, PyHocObject *my_alpha, PyHocObject *my_permeability, int bc, double bc_value, double atolscale)
double * _rxd_induced_currents_ecs
static double get_permeability_scalar(double *, int)
Grid_node * Parallel_grids[100]
int remove(Grid_node **head, Grid_node *find)
NRN_EXPORT int ICS_insert_inhom(int grid_list_index, PyHocObject *my_states, long num_nodes, long *neighbors, long *x_line_defs, long x_lines_length, long *y_line_defs, long y_lines_length, long *z_line_defs, long z_lines_length, double *dcs, double dx, bool is_diffusable, double atolscale, double *ics_alphas)
void apply_node_flux(int, long *, double *, PyObject **, double, double *)
static Node * node(Object *)
int const size_t const size_t n
void TaskQueue_add_task(TaskQueue *q, void *(*task)(void *), void *args, void *result)
void TaskQueue_sync(TaskQueue *q)
void _ics_variable_hybrid_helper(ICS_Grid_node *, const double *, double *const, const double *, double *const)
void _ics_rhs_variable_step_helper(ICS_Grid_node *, double const *const, double *)
void ecs_run_threaded_dg_adi(const int, const int, ECS_Grid_node *, ECSAdiDirection *, const int)
void ecs_set_adi_tort(ECS_Grid_node *)
void ics_dg_adi_z(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
void ecs_set_adi_vol(ECS_Grid_node *)
void run_threaded_deltas(ICS_Grid_node *g, ICSAdiDirection *ics_adi_dir)
void ics_ode_solve_helper(ICS_Grid_node *, double, double *)
void _rhs_variable_step_helper(Grid_node *, double const *const, double *)
void ics_dg_adi_x_inhom(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
int find(const int, const int, const int, const int, const int)
void ics_dg_adi_z_inhom(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
void ecs_set_adi_homogeneous(ECS_Grid_node *)
void _rhs_variable_step_helper_tort(Grid_node *, double const *const, double *)
void ics_dg_adi_y(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
void _ics_hybrid_helper(ICS_Grid_node *)
void _rhs_variable_step_helper_vol(Grid_node *, double const *const, double *)
void ics_dg_adi_x(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
void ics_dg_adi_y_inhom(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
neuron::container::data_handle< double > destination
neuron::container::data_handle< double > source
long * ordered_start_stop_indices
void(* ics_dg_adi_dir)(ICS_Grid_node *g, int, int, int, double, double *, double *, double *, double *, double *, double *)
long * line_start_stop_indices
neuron::container::data_handle< double > px_
static double alpha(double x)