43 const std::vector<int>& mech_types,
44 const std::vector<int>& nodecounts);
160 template <
typename F,
typename... Args>
165 #pragma omp parallel for private(i) shared(nrn_threads, job, nrn_nthread, \
166 nrnmpi_myid) schedule(static, 1)
199 double x = te - 1e-11;
200 if (x <= nt->_t && x > (nt->
_t - nt->
_dt)) {
for gpu builds with unified memory support
#define BEFORE_AFTER_SIZE
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
void nrn_threads_create(int n)
void nrn_finitialize(int setv, double v)
void nonvint(NrnThread *_nt)
void nrncore2nrn_send_values(NrnThread *nth)
void update(NrnThread *_nt)
constexpr int at_time(NrnThread *nt, double te)
void nrn_fixed_step_minimal()
void nrn_mk_table_check()
void nrn_thread_table_check()
void nrn_fixed_single_steps_minimal(int total_sim_steps, double tstop)
--> Coreneuron
bool use_solve_interleave
void nrn_fixed_step_group_minimal(int total_sim_steps)
void * setup_tree_matrix_minimal(NrnThread *)
void * nrn_fixed_step_lastpart(NrnThread *nth)
void nrn_solve_minimal(NrnThread *)
void dt2thread(double adt)
void nrn_multithread_job(F &&job, Args &&... args)
void nrncore2nrn_send_init()
void direct_mode_initialize()
All state from NEURON necessary to continue a run.
void nrn_ba(NrnThread *nt, int bat)
NrnThreadMembList * create_tml(NrnThread &nt, int mech_id, Memb_func &memb_func, int &shadow_rhs_cnt, const std::vector< int > &mech_types, const std::vector< int > &nodecounts)
int const size_t const size_t n
std::vector< Memb_func > memb_func
Represent main neuron object computed by single thread.
int _net_send_buffer_size
NrnThreadBAList * tbl[BEFORE_AFTER_SIZE]
PreSynHelper * presyns_helper
Memb_list * _ecell_memb_list
std::vector< int > _pnt_offset
NrnFastImem * nrn_fast_imem
size_t * _fornetcon_weight_perm
size_t * _fornetcon_perm_indices
std::size_t _fornetcon_perm_indices_size
std::unique_ptr< SummationReportMapping > summation_report_handler_
std::size_t _fornetcon_weight_perm_size
TrajectoryRequests * trajec_requests