30 namespace fs = std::filesystem;
43 fs::create_directories(
save);
56 std::string filename =
restore_ +
"/time.dat";
57 f.
open(filename, std::ios::in);
80 for (
int i = 0;
i < nb_threads;
i++) {
109 assert(
p >= 0 && p < eml->_nodecount_padded * esz);
114 if (!ml_pinv[etype]) {
117 ei_instance = ml_pinv[etype][ei_instance];
120 return ei_instance * esz + ei;
129 fh.
open(filename, std::ios::out);
139 fh << nt.
ncell <<
" ncell\n";
140 fh << n_outputgid <<
" ngid\n";
142 assert(ntc.n_outputgids == n_outputgid);
146 fh << nt.
end <<
" nnode\n";
155 fh << nmech <<
" nmech\n";
157 assert(nmech == ntc.nmech);
164 fh << current_tml->index <<
"\n";
165 fh << current_tml->ml->nodecount <<
"\n";
168 fh << nt.
_nidata <<
" nidata\n";
169 fh << nt.
_nvdata <<
" nvdata\n";
173 int* pinv_nt =
nullptr;
175 int* d =
new int[nt.
end];
177 for (
int i = 0;
i < nt.
end; ++
i) {
186 for (
int i = 0;
i < nt.
end; ++
i) {
194 for (
int i = 0;
i < nt.
end; ++
i) {
199 pinv_nt =
new int[nt.
end];
200 for (
int i = 0;
i < nt.
end; ++
i) {
209 for (
int i = 0;
i < nt.
end; ++
i) {
227 int type = current_tml->index;
246 int* nd_ix =
new int[
cnt];
247 for (
int i = 0;
i <
cnt; ++
i) {
250 nd_ix[
i] = pinv_nt[ipval];
262 std::vector<int> pointer2type;
264 for (
int i_instance = 0; i_instance <
cnt; ++i_instance) {
265 for (
int i = 0;
i < sz; ++
i) {
266 int ix = i_instance * sz +
i;
267 int s = semantics[
i];
271 }
else if (
s == -9) {
275 }
else if (
s == -5) {
288 pointer2type.push_back(ptype);
307 ntc.mlmap[
type]->pdata_not_permuted[i_instance * sz +
i]);
314 size_t s = pointer2type.size();
315 fh <<
s <<
" npointer\n";
322 s = indices.size() ? (1 + indices.size() + 5 *
cnt * indices.size()) : 0;
323 fh <<
s <<
" nmodlrandom\n";
325 std::vector<uint32_t> nmodlrandom{};
326 nmodlrandom.reserve(
s);
327 nmodlrandom.push_back((uint32_t) indices.size());
328 for (
auto ix: indices) {
329 nmodlrandom.push_back((uint32_t) ix);
331 for (
auto ix: indices) {
334 for (
int i = 0;
i <
cnt; ++
i) {
335 void*
v = nt.
_vdata[d[
i * sz + ix]];
339 data[4] = uint32_t(which);
341 nmodlrandom.push_back(
j);
345 fh.
write_array<uint32_t>(nmodlrandom.data(), nmodlrandom.size());
354 int* output_vindex =
new int[nt.
n_presyn];
367 output_threshold[
i] = 0.0;
368 output_vindex[
i] = -1;
376 if (!ml_pinv[
type]) {
380 ix = ml_pinv[
type][ix];
382 output_vindex[
i] = -(ix * 1000 +
type);
389 assert(ntc.output_vindex[
i] == output_vindex[
i]);
392 assert(ntc.output_threshold[
i] == output_threshold[
i]);
395 delete[] output_vindex;
396 delete[] output_threshold;
400 std::vector<int> pnt_offset(
memb_func.size(), -1);
402 int type = tml->index;
404 pnt_offset[
type] = synoffset;
405 synoffset += tml->ml->nodecount;
409 int* pnttype =
new int[nnetcon];
411 double* delay =
new double[nnetcon];
412 for (
int i = 0;
i < nnetcon; ++
i) {
415 if (pnt ==
nullptr) {
440 for (
int i = 0;
i < nnetcon; ++
i) {
441 assert(ntc.pnttype[
i] == pnttype[
i]);
458 fh << nbcp <<
" bbcorepointer\n";
466 int type = tml->index;
487 nullptr,
nullptr, &dcnt, &icnt, 0, aln_cntml, d, pd, ml->
_thread, &nt, ml, 0.0);
493 assert(ntc.bcpicnt[
i] == icnt);
494 assert(ntc.bcpdcnt[
i] == dcnt);
496 int* iArray =
nullptr;
497 double* dArray =
nullptr;
499 iArray =
new int[icnt];
502 dArray =
new double[dcnt];
516 dArray, iArray, &dcnt, &icnt, 0, aln_cntml, d, pd, ml->
_thread, &nt, ml, 0.0);
532 fh << nt.
n_vecplay <<
" VecPlay instances\n";
535 int vtype =
pr->type();
544 if (nn &&
pr->pd_ >= ml->
data &&
pr->pd_ < (ml->
data + nn)) {
546 ix = (
pr->pd_ - ml->
data);
558 if (ml_pinv[mtype]) {
559 icnt = ml_pinv[mtype][icnt];
570 assert(ntc.vecplay_ix[
i] == ix);
574 int sz = vpc->
y_.size();
579 std::cerr <<
"Error checkpointing vecplay type" << std::endl;
598 f.
open(filename, std::ios::out);
638 auto type = tml->index;
642 "Checkpoint is requested involving BBCOREPOINTER but there is no bbcore_write"
643 " function for %s\n",
653 template <
typename T>
658 T* d =
new T[
cnt * sz];
660 for (
int i = 0;
i <
cnt * sz; ++
i) {
665 for (
int i = 0;
i <
cnt; ++
i) {
670 for (
int j = 0;
j < sz; ++
j) {
671 d[
i * sz +
j] =
data[ip +
j * align_cnt];
678 template <
typename T>
700 fh << d->
type() <<
"\n";
707 fh << (nc - nt.
netcons) <<
"\n";
725 fh << (ps - nt.
presyns) <<
"\n";
734 fh <<
pr->type() <<
"\n";
762 std::shared_ptr<Phase2::EventTypeBase> event,
787 net_send(nt.
_vdata + e->movable, e->weight_index, pnt, e->time, e->flag);
820 fh << nt.
n_vecplay <<
" VecPlayContinuous state\n";
849 fh << -1 <<
" Presyn ConditionEvent flags\n";
860 fh << -1 <<
" TQItems from atomic_dq\n";
861 while ((
q = tqe->
atomic_dq(1e20)) !=
nullptr) {
865 fh << -1 <<
" TQItemsfrom binq_\n";
913 for (
const auto& event: p2.
events) {
static void nrnmpi_barrier()
static double restore(void *v)
std::string get_save_path() const
void restore_tqitem(int type, std::shared_ptr< Phase2::EventTypeBase > event, NrnThread &nt)
void write_tqueue(TQItem *q, NrnThread &nt, FileHandler &fh) const
void write_phase2(NrnThread &nt) const
T * soa2aos(T *data, int cnt, int sz, int layout, int *permute) const
void write_checkpoint(NrnThread *nt, int nb_threads) const
const std::string restore_
double restore_time() const
todo : need to broadcast this rather than all reading a double
void data_write(FileHandler &F, T *data, int cnt, int sz, int layout, int *permute) const
void restore_tqueue(NrnThread &, const Phase2 &p2)
bool should_restore() const
CheckPoints(const std::string &save, const std::string &restore)
auto & get_memb_func(size_t idx)
auto & get_prop_dparam_size()
auto & get_mech_data_layout()
auto & get_bbcore_write()
auto & get_is_artificial()
auto & get_prop_param_size()
void write_array(T *p, size_t nb_elements)
Write an 1D array.
T * read_array(T *p, size_t count)
Read an integer array of fixed length.
void open(const std::string &filename, std::ios::openmode mode=std::ios::in)
Preserving chkpnt state, move to a new file.
int checkpoint() const
Query chkpnt state.
void close()
Close currently open file.
virtual void send(double sendtime, NetCvode *, NrnThread *) override
std::vector< int > preSynConditionEventFlags
std::vector< VecPlayContinuous_ > vec_play_continuous
std::vector< std::pair< int, std::shared_ptr< EventTypeBase > > > events
virtual void send(double sendtime, NetCvode *, NrnThread *) override
TQItem * atomic_dq(double til)
std::size_t discon_index_
std::size_t ubound_index_
#define VecPlayContinuousType
#define PlayRecordEventType
short * nrn_is_artificial_
void nrnran123_getids3(nrnran123_State *s, std::uint32_t *id1, std::uint32_t *id2, std::uint32_t *id3)
void nrnran123_getseq(nrnran123_State *s, std::uint32_t *seq, char *which)
Get sequence number and selector from an nrnran123_State object.
bool nrn_semantics_is_ion(int i)
int nrn_semantics_ion_type(int i)
#define _threadargsproto_
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
void nrn_inverse_i_layout(int i, int &icnt, int cnt, int &isz, int sz, int layout)
int nrn_i_layout(int icnt, int cnt, int isz, int sz, int layout)
This function return the index in a flat array of a matrix coordinate (icnt, isz).
int checkpoint_save_patternstim(_threadargsproto_)
std::vector< int > & nrn_mech_random_indices(int type)
void nrn_thread_table_check()
int * inverse_permute(int *p, int n)
void * ecalloc(size_t n, size_t size)
void checkpoint_restore_patternstim(int, double, _threadargsproto_)
NrnThreadChkpnt * nrnthread_chkpnt
static int nrn_original_aos_index(int etype, int ix, NrnThread &nt, int **ml_pinv)
void net_send(void **, int, Point_process *, double, double)
void nrn_spike_exchange_init()
void dt2thread(double adt)
void allocate_data_in_mechanism_nrn_init()
int nrn_soa_padded_size(int cnt, int layout)
calculate size after padding for specific memory layout
NetCvode * net_cvode_instance
corenrn_parameters corenrn_param
Printing method.
static int permute(int i, NrnThread &nt)
icycle< ncycle;++icycle) { int istride=stride[icycle];nrn_pragma_acc(loop vector) nrn_pragma_omp(loop bind(parallel)) for(int icore=0;icore< warpsize;++icore) { int i=ii+icore;if(icore< istride) { int ip=GPU_PARENT(i);GPU_RHS(i) -=GPU_B(i) *GPU_RHS(ip);GPU_RHS(i)/=GPU_D(i);} i+=istride;} ii+=istride;} }}void solve_interleaved2(int ith) { NrnThread *nt=nrn_threads+ith;InterleaveInfo &ii=interleave_info[ith];int nwarp=ii.nwarp;if(nwarp==0) return;int ncore=nwarp *warpsize;int *ncycles=ii.cellsize;int *stridedispl=ii.stridedispl;int *strides=ii.stride;int *rootbegin=ii.firstnode;int *nodebegin=ii.lastnode;if(0) { nrn_pragma_acc(parallel loop gang present(nt[0:1], strides[0:nstride], ncycles[0:nwarp], stridedispl[0:nwarp+1], rootbegin[0:nwarp+1], nodebegin[0:nwarp+1]) async(nt->stream_id)) nrn_pragma_omp(target teams loop map(present, alloc:nt[:1], strides[:nstride], ncycles[:nwarp], stridedispl[:nwarp+1], rootbegin[:nwarp+1], nodebegin[:nwarp+1])) for(int icore=0;icore< ncore;icore+=warpsize) { solve_interleaved2_loop_body(nt, icore, ncycles, strides, stridedispl, rootbegin, nodebegin);} nrn_pragma_acc(wait(nt->stream_id)) } else { for(int icore=0;icore< ncore;icore+=warpsize) { solve_interleaved2_loop_body(nt, icore, ncycles, strides, stridedispl, rootbegin, nodebegin);} }}void solve_interleaved1(int ith) { NrnThread *nt=nrn_threads+ith;int ncell=nt-> ncell
int type_of_ntdata(NrnThread &, int index, bool reset)
std::string to_string(const T &obj)
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
int * nrn_prop_dparam_size_
int * nrn_prop_param_size_
std::vector< Memb_func > memb_func
static void pr(N_Vector x)
static double save(void *v)
virtual bool require_checkpoint()
virtual void send(double deliverytime, NetCvode *, NrnThread *)
PreSynHelper * presyns_helper
bool mpi_enable
Initialization seed for random number generator (int)