1 #include <../../nrnconf.h>
17 #include <unordered_map>
32 nrnmpi_long_alltoallv_sparse(
s.data.data(),
39 nrnmpi_long_alltoallv(
s.data.data(),
52 nrnmpi_int_alltoallv_sparse(
s.data.data(),
149 extern double nrnmpi_transfer_wait_;
154 std::vector<neuron::container::data_handle<double>>
tv;
156 std::vector<neuron::container::data_handle<double>>
sv;
166 std::vector<Node*>
nd;
172 #define PPList partrans_PPList
173 typedef std::vector<Point_process*>
PPList;
179 static std::vector<neuron::container::data_handle<double>>
poutsrc_;
194 static std::vector<neuron::container::data_handle<double>>
targets_;
204 static std::unordered_map<sgid_t, std::pair<int, neuron::container::field_index>>
232 for (
auto i = 0;
i <
p->param_num_vars(); ++
i) {
233 for (
auto j = 0;
j <
p->param_array_dimension(
i); ++
j) {
234 if (h ==
p->param_handle(
i,
j)) {
248 if (
type ==
p->_type) {
249 return p->param_handle(ix);
252 hoc_execerror_fmt(
"partrans update: could not find parameter index ({}, {}) of {}",
265 if (
auto*
const nd =
sec->parentnode; nd) {
270 for (
int i = 0;
i <
sec->nnode; ++
i) {
271 auto*
const nd =
sec->pnode[
i];
285 auto const psv = hoc_hgetarg<double>(1);
286 auto const sgid = []() ->
sgid_t {
295 hoc_execerr_ext(
"source var sgid %lld already in use.", (
long long) sgid);
312 auto ptv = hoc_hgetarg<double>(iarg++);
315 hoc_execerr_ext(
"target_var sgid must be >= 0: arg %d is %g\n", iarg - 1, x);
317 if (pp && !pp->prop->owns(ptv)) {
320 auto const sgid =
static_cast<sgid_t>(x);
379 svib.
nd.resize(svib.
cnt);
380 svib.
val.resize(svib.
cnt);
388 int tid = nd->
_nt->
id;
390 svib.
nd[svib.
cnt++] = nd;
400 std::unordered_map<Node*, double*> ndvi2pd{1000};
406 for (
int i = 0;
i < svib.
cnt; ++
i) {
408 ndvi2pd[nd] = &svib.
val[
i];
416 auto const search = ndvi2pd.find(nd);
447 for (
i = 0;
i <
n; ++
i) {
452 "Do not know the POINT_PROCESS target for source id %lld\n"
453 "For multiple threads, the target pointer must reference a range variable\n"
454 "of a POINT_PROCESS. Note that even for a single thread, it is\n"
455 "fastest to supply a reference to the POINT_PROCESS as the first arg.",
468 for (
i = 0;
i <
n; ++
i) {
478 ttd.
tv.resize(ttd.
cnt);
479 ttd.
sv.resize(ttd.
cnt);
484 for (
i = 0;
i <
n; ++
i) {
506 auto search = ndvi2pd.find(nd);
526 hoc_execerr_ext(
"No source_var for target_var sid = %lld\n", (
long long) sid);
541 for (
int i = 0;
i < svb.
cnt; ++
i) {
550 for (
i = 0;
i <
n; ++
i) {
580 hoc_execerror(
"ParallelContext.setup_transfer()",
"needs to be called.");
603 for (
int i = 0;
i < ttd.
cnt; ++
i) {
604 *(ttd.
tv[
i]) = *(ttd.
sv[
i]);
624 "To use ParallelContext.setup_transfer when nhost > 1, NEURON must be configured with "
688 szalloc = szalloc ? szalloc : 1;
693 std::vector<sgid_t> needsrc{};
699 needsrc.push_back(sid);
706 std::vector<sgid_t> ownsrc =
sgids_;
709 auto [send_to_want, recv_from_have] = have_to_want<sgid_t>(ownsrc, needsrc,
sgid_alltoallv);
712 int n = send_to_want.displ[
nhost];
715 n = recv_from_have.displ[
nhost];
716 for (
int i = 0;
i <
n; ++
i) {
717 sgid_t sgid = recv_from_have.data[
i];
734 sgid_t sid = send_to_want.data[
i];
752 std::swap(
insrccnt_, recv_from_have.cnt);
812 "For impedance, pc.target_var requires that its first arg be a reference "
813 "to the POINT_PROCESS",
851 "number of gap junctions, %zd, not equal to number of pc.transfer_var, %zd",
862 if (ttd && ttd->
cnt) {
869 for (
int i = 0;
i < ttd->
cnt; ++
i) {
877 for (
int i = 0;
i < ttd->
cnt; ++
i) {
881 delete[] std::exchange(
vgap1,
nullptr);
882 delete[] std::exchange(
vgap2,
nullptr);
887 const std::vector<std::complex<double>>& x) {
889 for (
int real_imag = 0; real_imag < 2; ++real_imag) {
902 if (real_imag == 0) {
919 for (
int i = 0;
i < _nt->
end; ++
i) {
932 for (
int i = 0;
i < _nt->
end; ++
i) {
933 if (real_imag == 0) {
936 b[
i] += std::complex<double>(0,
vec_rhs[
i]);
997 if (g.src_sid.empty() && g.tar_sid.empty()) {
1002 Sprintf(fname,
"%s/%d_gap.dat", path, group_ids[tid]);
1003 FILE* f = fopen(fname,
"wb");
1006 fprintf(f,
"%d sizeof_sid_t\n",
int(
sizeof(
sgid_t)));
1008 int ntar = int(g.tar_sid.size());
1009 int nsrc = int(g.src_sid.size());
1010 fprintf(f,
"%d ntar\n", ntar);
1011 fprintf(f,
"%d nsrc\n", nsrc);
1014 #define CHKPNT fprintf(f, "chkpnt %d\n", chkpnt++);
1016 if (!g.src_sid.empty()) {
1017 CHKPNT fwrite(g.src_sid.data(), nsrc,
sizeof(
sgid_t), f);
1018 CHKPNT fwrite(g.src_type.data(), nsrc,
sizeof(
int), f);
1019 CHKPNT fwrite(g.src_index.data(), nsrc,
sizeof(
int), f);
1022 if (!g.tar_sid.empty()) {
1023 CHKPNT fwrite(g.tar_sid.data(), ntar,
sizeof(
sgid_t), f);
1024 CHKPNT fwrite(g.tar_type.data(), ntar,
sizeof(
int), f);
1025 CHKPNT fwrite(g.tar_index.data(), ntar,
sizeof(
int), f);
1048 int tid = nt ? nt->
id : 0;
1055 g.tar_sid.push_back(sid);
1056 g.tar_type.push_back(
type);
1057 g.tar_index.push_back(ix);
1063 for (
size_t i = 0;
i <
sgids_.size(); ++
i) {
1066 int tid = nd->
_nt ? nd->
_nt->
id : 0;
1071 type = it->second.first;
1087 cache_token.thread_cache(tid).node_data_offset;
1093 g.src_sid.push_back(sid);
1094 g.src_type.push_back(
type);
1095 g.src_index.push_back(ix);
static void nrnmpi_dbl_alltoallv(const double *s, const int *scnt, const int *sdispl, double *r, int *rcnt, int *rdispl)
static void nrnmpi_int_alltoallv(const int *s, const int *scnt, const int *sdispl, int *r, int *rcnt, int *rdispl)
static int nrnmpi_int_allmax(int x)
const char * secname(Section *sec)
name of section (for use in error messages)
int hoc_is_object_arg(int narg)
void hoc_execerr_ext(const char *fmt,...)
printf style specification of hoc_execerror message.
double * hoc_getarg(int narg)
char * hoc_object_name(Object *ob)
Point_process * ob2pntproc(Object *ob)
Object ** hoc_objgetarg(int)
static double nrnmpi_wtime()
void hoc_execerror(const char *s1, const char *s2)
constexpr do_not_search_t do_not_search
int Sprintf(char(&buf)[N], const char *fmt, Args &&... args)
Redirect sprintf to snprintf if the buffer size can be deduced.
neuron::model_sorted_token nrn_ensure_model_data_are_sorted()
Ensure neuron::container::* data are sorted.
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
int const size_t const size_t n
void nrn_partrans_clear()
static Memb_list ** imped_current_ml_
static MapSgid2Int sgid2srcindex_
static std::vector< neuron::container::data_handle< double > > targets_
static std::unordered_map< sgid_t, std::pair< int, neuron::container::field_index > > non_vsrc_update_info_
size_t nrnbbcore_gap_write(const char *path, int *group_ids)
void(* nrnthread_v_transfer_)(NrnThread *)
static Node * pv2node(sgid_t ssid, neuron::container::data_handle< double > const &v)
void pargap_jacobi_setup(int mode)
static std::vector< int > outsrccnt_
static bool non_vsrc_setinfo(sgid_t ssid, Node *nd, neuron::container::data_handle< double > const &h)
static int n_transfer_thread_data_
static std::unordered_map< Node *, double * > mk_svibuf()
static int * imped_current_type_
static std::vector< int > insrcdspl_
static int imped_current_type_count_
static NodePList visources_
static std::vector< neuron::container::data_handle< double > > poutsrc_
static SgidList sgid2targets_
static void delete_imped_info()
static SetupTransferInfo * nrncore_transfer_info(int)
std::vector< Node * > NodePList
std::unordered_map< sgid_t, int > MapSgid2Int
static void mpi_transfer()
std::vector< sgid_t > SgidList
static TransferThreadData * transfer_thread_data_
static std::vector< int > insrccnt_
static PPList target_pntlist_
static int outsrc_buf_size_
std::vector< int > IntList
static int * poutsrc_indices_
static std::vector< int > outsrcdspl_
void pargap_jacobi_rhs(std::vector< std::complex< double >> &b, const std::vector< std::complex< double >> &x)
void(* nrnthread_vi_compute_)(NrnThread *)
static int imped_change_cnt
static neuron::container::data_handle< double > non_vsrc_update(Node *nd, int type, neuron::container::field_index ix)
static double * outsrc_buf_
void nrnmpi_setup_transfer()
std::vector< Point_process * > PPList
static std::vector< SourceViBuf > source_vi_buf_
const char * bbcore_write_version
static MapSgid2Int sid2insrc_
static void thread_transfer(NrnThread *_nt)
static int insrc_buf_size_
static void thread_vi_compute(NrnThread *_nt)
static double * insrc_buf_
SetupTransferInfo * nrn_get_partrans_setup_info(int ngroup, int cn_nthread, size_t cn_sidt_sz)
void(* nrnmpi_v_transfer_)()
void(* nrn_mk_transfer_thread_data_)()
std::vector< Memb_func > memb_func
static double nhost(void *v)
A view into a set of mechanism instances.
std::ptrdiff_t legacy_index(double const *ptr) const
Calculate a legacy index of the given pointer in this mechanism data.
neuron::container::Node::owning_handle _node_handle
Represent main neuron object computed by single thread.
double * node_rhs_storage()
struct NrnThreadMembList * next
A point process is computed just like regular mechanisms.
std::vector< double > val
std::vector< neuron::container::data_handle< double > > sv
std::vector< neuron::container::data_handle< double > > tv
Struct used to index SoAoS data, such as array range variables.
std::size_t current_row() const
Return current offset in the underlying storage where this object lives.