NEURON
memory_usage.hpp
Go to the documentation of this file.
1 #pragma once
2 
3 #include <cstdio>
4 #include <vector>
5 #include <ostream>
6 #include <cmath>
7 #include <sstream>
8 
9 namespace neuron::container {
10 /** @brief Size and capacity in bytes. */
12  VectorMemoryUsage() = default;
14  : size(size)
15  , capacity(capacity) {}
16 
17  /** Compute the memory requirements of the `std::vector`.
18  *
19  * Note, this returns the size and capacity of the memory allocated by
20  * the `std::vector`. If the element allocate memory, that memory
21  * isn't included.
22  *
23  * Essentially,
24  *
25  * size = vec.size() * sizeof(T);
26  * capacity = vec.capacity() * sizeof(T);
27  */
28  template <class T, class A>
29  VectorMemoryUsage(const std::vector<T, A>& vec)
30  : size(vec.size() * sizeof(T))
31  , capacity(vec.capacity() * sizeof(T)) {}
32 
33  /// @brief Number of bytes used.
34  size_t size{};
35 
36  /// @brief Number of bytes allocated.
37  size_t capacity{};
38 
40  size += other.size;
41  capacity += other.capacity;
42 
43  return *this;
44  }
45 };
46 
47 
48 /** @brief Memory usage of a storage/soa container. */
50  /// @brief The memory usage of the heavy data in a soa.
52 
53  /// @brief The memory usage for the stable identifiers in a soa.
55 
57  heavy_data += other.heavy_data;
59 
60  return *this;
61  }
62 
64  auto total = heavy_data;
65  total += stable_identifiers;
66 
67  return total;
68  }
69 };
70 
71 /** @brief Memory usage of a `neuron::Model`. */
73  /// @brief The memory usage of the nodes related data.
75 
76  /// @brief The memory usage of all mechanisms.
78 
80  nodes += other.nodes;
81  mechanisms += other.mechanisms;
82 
83  return *this;
84  }
85 
87  auto total = nodes.compute_total();
88  total += mechanisms.compute_total();
89 
90  return total;
91  }
92 };
93 
94 namespace cache {
95 /** @brief Memory usage of a `neuron::cache::Model`. */
97  /** @brief Memory usage required for NRN threads. */
99 
100  /** @brief Memory usage related to caching mechanisms. */
102 
104  threads += other.threads;
105  mechanisms += other.mechanisms;
106 
107  return *this;
108  }
109 
111  auto total = threads;
112  total += mechanisms;
113 
114  return total;
115  }
116 };
117 } // namespace cache
118 
119 /** @brief Overall SoA datastructures related memory usage. */
120 struct MemoryUsage {
124 
125  const MemoryUsage& operator+=(const MemoryUsage& other) {
126  model += other.model;
127  cache_model += other.cache_model;
129 
130  return *this;
131  }
132 
134  auto total = model.compute_total();
135  total += cache_model.compute_total();
136  total += stable_pointers;
137 
138  return total;
139  }
140 };
141 
143  /** @brief Data that are part of the algorithm. */
144  size_t required{};
145 
146  /** @brief Any memory that's (currently) required to run NEURON.
147  *
148  * This includes things like the live stable identifiers in each `soa`, the
149  * `cache::Model` and similar things that are needed to implement NEURON
150  * correctly, but are not required by the simulation.
151  *
152  * This category covers memory that needed to solve a computer science
153  * problem rather than a neuroscience problem. Hence, this category
154  * could potentially be optimized. It's not obvious how much this category
155  * can be optimized.
156  */
157  size_t convenient{};
158 
159  /** @brief Wasted memory due to the difference of `size` and `capacity`. */
160  size_t oversized{};
161 
162  /** @brief Essentially leaked memory.
163  *
164  * The current implementation doesn't know when it's safe to delete stable
165  * identifiers. Hence, when the owning identifier is deallocated the stable
166  * identifier is kept alive and leaked into a global collector.
167  */
168  size_t leaked{};
169 
171  add(memory_usage.model);
172  add(memory_usage.cache_model);
173  add(leaked, memory_usage.stable_pointers);
174  }
175 
176  private:
177  void add(size_t& accumulator, const VectorMemoryUsage& increment) {
178  oversized += increment.capacity - increment.size;
179  accumulator += increment.size;
180  }
181 
182  void add(const StorageMemoryUsage& increment) {
183  add(required, increment.heavy_data);
184  add(convenient, increment.stable_identifiers);
185  }
186 
187  void add(const ModelMemoryUsage& model) {
188  add(model.nodes);
189  add(model.mechanisms);
190  }
191 
193  add(convenient, model.mechanisms);
194  add(convenient, model.threads);
195  }
196 };
197 
198 /** @brief */
199 struct MemoryStats {
201 };
202 
203 /** @brief Gather memory usage of this process. */
205 
206 /** @brief Utility to format memory as a human readable string.
207  *
208  * Note, this is currently tailored to it's use in `format_memory_usage`
209  * and is therefore very rigid in it's padding. Generalize when needed.
210  *
211  * @internal
212  */
213 std::string format_memory(size_t bytes);
214 
215 /** @brief Aligned, human readable representation of `memory_usage`.
216  *
217  * @internal
218  */
220 
221 /** @brief Create a string representation of `usage`. */
222 std::string format_memory_usage(const MemoryUsage& usage);
223 
224 void print_memory_usage(const MemoryUsage& usage);
225 
226 } // namespace neuron::container
void print_memory_usage(const MemoryUsage &usage)
MemoryUsage local_memory_usage()
Gather memory usage of this process.
std::string format_memory_usage(const VectorMemoryUsage &memory_usage)
Aligned, human readable representation of memory_usage.
std::string format_memory(size_t bytes)
Utility to format memory as a human readable string.
cache::ModelMemoryUsage memory_usage(const std::optional< neuron::cache::Model > &model)
Model & model()
Access the global Model instance.
Definition: model_data.hpp:206
Overall SoA datastructures related memory usage.
VectorMemoryUsage compute_total() const
const MemoryUsage & operator+=(const MemoryUsage &other)
cache::ModelMemoryUsage cache_model
VectorMemoryUsage stable_pointers
void add(const ModelMemoryUsage &model)
size_t convenient
Any memory that's (currently) required to run NEURON.
void add(size_t &accumulator, const VectorMemoryUsage &increment)
size_t leaked
Essentially leaked memory.
size_t oversized
Wasted memory due to the difference of size and capacity.
void add(const cache::ModelMemoryUsage &model)
size_t required
Data that are part of the algorithm.
void add(const StorageMemoryUsage &increment)
MemoryUsageSummary(const MemoryUsage &memory_usage)
Memory usage of a neuron::Model.
const ModelMemoryUsage & operator+=(const ModelMemoryUsage &other)
StorageMemoryUsage nodes
The memory usage of the nodes related data.
VectorMemoryUsage compute_total() const
StorageMemoryUsage mechanisms
The memory usage of all mechanisms.
Memory usage of a storage/soa container.
VectorMemoryUsage stable_identifiers
The memory usage for the stable identifiers in a soa.
VectorMemoryUsage compute_total() const
const StorageMemoryUsage & operator+=(const StorageMemoryUsage &other)
VectorMemoryUsage heavy_data
The memory usage of the heavy data in a soa.
Size and capacity in bytes.
VectorMemoryUsage(const std::vector< T, A > &vec)
Compute the memory requirements of the std::vector.
size_t size
Number of bytes used.
size_t capacity
Number of bytes allocated.
const VectorMemoryUsage & operator+=(const VectorMemoryUsage &other)
VectorMemoryUsage(size_t size, size_t capacity)
Memory usage of a neuron::cache::Model.
const ModelMemoryUsage & operator+=(const ModelMemoryUsage &other)
VectorMemoryUsage threads
Memory usage required for NRN threads.
VectorMemoryUsage mechanisms
Memory usage related to caching mechanisms.