NEURON
container.cpp
Go to the documentation of this file.
3 #include "neuron/model_data.hpp"
4 #include "nrn_ansi.h"
5 
6 #include <catch2/catch_test_macros.hpp>
7 
8 #include <thread>
9 
10 using namespace neuron::container;
11 
12 // Mock up a neuron::container::soa<...>-based data structure that includes features that are not
13 // currently tested in the real NEURON data structure code.
14 
15 namespace {
16 namespace field {
17 /**
18  * @brief Tag type that has zero-parameter array_dimension() and no num_variables().
19  */
20 struct A {
21  using type = float;
22  [[nodiscard]] int array_dimension() const {
23  return 42;
24  }
25 };
26 
27 /** @brief Tag type for just one double per row.
28  */
29 struct B {
30  using type = double;
31 };
32 
33 /** @brief Tag type with multiple fields of differing array_dimension.
34  */
35 struct C {
36  using type = double;
37 
38  size_t num_variables() const {
39  return 3;
40  }
41 
42  int array_dimension(int field_index) const {
43  return field_index + 1;
44  }
45 };
46 
47 /** @brief Tag type for an optional field, intended to be Off.
48  */
49 struct DOff {
50  static constexpr bool optional = true;
51  using type = double;
52 };
53 
54 /** @brief Tag type for an optional field, intended to be On.
55  */
56 struct DOn {
57  static constexpr bool optional = true;
58  using type = double;
59 };
60 
61 } // namespace field
62 template <typename Identifier>
63 struct handle_interface: handle_base<Identifier> {
64  using base_type = handle_base<Identifier>;
65  using base_type::base_type;
66  /**
67  * @brief Return the above-diagonal element.
68  */
69  [[nodiscard]] field::A::type& a() {
70  return this->template get<field::A>();
71  }
72 };
74 using owning_handle = handle_interface<owning_identifier<storage>>;
75 } // namespace
76 
77 TEST_CASE("Tag type with array_dimension and without num_variables", "[Neuron][data_structures]") {
78  GIVEN("A standalone soa container") {
79  storage data; // Debian's GCC 10.2 doesn't like a {} before the ;
80  THEN("Can we create a handle to a row in it") {
81  REQUIRE_NOTHROW([&data]() { owning_handle instance{data}; }());
82  }
83  }
84 }
85 
86 TEST_CASE("Multi-threaded calls to nrn_ensure_model_data_are_sorted()",
87  "[Neuron][data_structures]") {
88  GIVEN("An initialised model (albeit empty for the moment)") {
89  REQUIRE(hoc_oc("create s\nfinitialize(-65)\n") == 0);
90  REQUIRE(neuron::model().node_data().size() == 3);
91  THEN("Call nrn_ensure_model_data_are_sorted multiple times concurrently") {
92  // Calling nrn_ensure_model_data_are_sorted() in multiple threads should
93  // succeed in all of them, but the underlying sort operations should be
94  // serialised.
95  constexpr auto num_threads = 7;
96  std::vector<std::thread> threads;
97  // Accumulate the tokens returned by nrn_ensure_model_data_are_sorted.
98  std::mutex token_mutex{};
99  std::vector<neuron::model_sorted_token> tokens;
100  // Make sure the data are not already sorted, otherwise we won't follow the complicated
101  // codepath
103  for (auto i = 0; i < num_threads; ++i) {
104  threads.emplace_back([&tokens, &token_mutex]() {
105  auto token = nrn_ensure_model_data_are_sorted();
106  std::unique_lock _{token_mutex};
107  tokens.push_back(token);
108  });
109  }
110  // Wait for all the threads to end
111  for (auto& thread: threads) {
112  thread.join();
113  }
114  REQUIRE(tokens.size() == num_threads);
115  }
116  REQUIRE(hoc_oc("delete_section()") == 0);
117  REQUIRE(neuron::model().node_data().size() == 0);
118  }
119 }
120 
121 TEST_CASE("soa::get_array_dims", "[Neuron][data_structures]") {
122  storage data;
123 
124  data.set_field_status<field::DOn>(true);
125  data.set_field_status<field::DOff>(false);
126 
127  auto c = field::C{};
128 
129  for (size_t field_index = 0; field_index < c.num_variables(); ++field_index) {
130  CHECK(data.template get_array_dims<field::C>()[field_index] ==
131  c.array_dimension(field_index));
132  CHECK(data.template get_array_dims<field::C>(field_index) ==
133  c.array_dimension(field_index));
134  }
135 
136  CHECK(data.template get_array_dims<field::DOff>(0) == 1ul);
137  CHECK(data.template get_array_dims<field::DOn>(0) == 1ul);
138 }
139 
140 TEST_CASE("soa::get_num_variables", "[Neuron][data_structures]") {
141  storage data;
142 
143  data.set_field_status<field::DOn>(true);
144  data.set_field_status<field::DOff>(false);
145 
146  auto c = field::C{};
147 
148  CHECK(data.get_num_variables<field::C>() == c.num_variables());
149  CHECK(data.get_num_variables<field::DOff>() == 1ul);
150  CHECK(data.get_num_variables<field::DOn>() == 1ul);
151 }
152 
153 TEST_CASE("defer delete storage pointer", "[Neuron][internal][data_structures]") {
154  REQUIRE(detail::defer_delete_storage != nullptr);
155 
156  auto usage_before = detail::compute_defer_delete_storage_size();
157  { storage data; }
158  auto usage_after = detail::compute_defer_delete_storage_size();
159 
160  CHECK(usage_after.size - usage_before.size > 0);
161  CHECK(usage_after.capacity > 0);
162  CHECK(usage_before.size <= usage_before.capacity);
163  CHECK(usage_after.size <= usage_after.capacity);
164 }
165 
166 template <class Tag, class Storage>
167 std::size_t compute_row_size(const Storage& data) {
168  std::size_t local_size = 0ul;
169  auto tag = data.template get_tag<Tag>();
171  local_size += data.template get_array_dims<Tag>()[field_index] * sizeof(typename Tag::type);
172  }
173 
174  return local_size;
175 }
176 
177 TEST_CASE("container memory usage", "[Neuron][internal][data_structures]") {
178  storage data;
179  data.set_field_status<field::DOn>(true);
180  data.set_field_status<field::DOff>(false);
181 
182  std::size_t row_size = compute_row_size<field::A>(data) + compute_row_size<field::B>(data) +
183  compute_row_size<field::C>(data) + compute_row_size<field::DOn>(data);
184 
185  auto r1 = owning_handle{data};
186  auto r2 = owning_handle{data};
187  auto r3 = owning_handle{data};
188 
189  auto n_rows = data.size();
190 
191  auto usage = memory_usage(data);
192 
193  CHECK(usage.heavy_data.size == row_size * n_rows);
194  CHECK(usage.heavy_data.size <= usage.heavy_data.capacity);
195 
196  CHECK(usage.stable_identifiers.size % n_rows == 0);
197  CHECK(usage.stable_identifiers.size >= n_rows * sizeof(std::size_t*));
198  CHECK(usage.stable_identifiers.size < n_rows * 4 * sizeof(std::size_t*));
199  CHECK(usage.stable_identifiers.size <= usage.stable_identifiers.capacity);
200 }
201 
202 TEST_CASE("model memory usage", "[Neuron][internal][data_structures]") {
203  auto& model = neuron::model();
204 
205  auto& nodes = model.node_data();
206  auto node1 = neuron::container::Node::owning_handle{nodes};
207 
208  auto& foo = model.add_mechanism(0,
209  "foo",
210  std::vector<neuron::container::Mechanism::Variable>{{"a", 1},
211  {"b", 2},
212  {"c", 1}});
215 
216  auto& bar = model.add_mechanism(1,
217  "bar",
218  std::vector<neuron::container::Mechanism::Variable>{{"a", 1}});
221 
223  CHECK(usage.nodes.heavy_data.size > 0);
224  CHECK(usage.nodes.heavy_data.size <= usage.nodes.heavy_data.capacity);
225  CHECK(usage.nodes.stable_identifiers.size > 0);
226  CHECK(usage.nodes.stable_identifiers.size <= usage.nodes.stable_identifiers.capacity);
227 
228  CHECK(usage.mechanisms.heavy_data.size > 0);
229  CHECK(usage.mechanisms.heavy_data.size <= usage.mechanisms.heavy_data.capacity);
230  CHECK(usage.mechanisms.stable_identifiers.size > 0);
231  CHECK(usage.mechanisms.stable_identifiers.size <= usage.mechanisms.stable_identifiers.capacity);
232 }
233 
234 TEST_CASE("cache::model memory_usage", "[Neuron][internal][data_structures]") {
235  auto& model = neuron::cache::model;
236 
237  // We can't manipulate `cache::Model`, hence there nothing to check other
238  // than the fact that it compiles and runs without throwing.
240 }
241 
242 TEST_CASE("format_memory", "[Neuron][internal]") {
243  size_t kb = 1e3;
244  size_t mb = 1e6;
245  size_t gb = 1e9;
246  size_t tb = 1e12;
247 
250  CHECK(neuron::container::format_memory(999) == " 999 ");
251  CHECK(neuron::container::format_memory(kb) == " 1.00 kB");
252  CHECK(neuron::container::format_memory(999 * kb) == "999.00 kB");
253  CHECK(neuron::container::format_memory(mb) == " 1.00 MB");
254  CHECK(neuron::container::format_memory(gb) == " 1.00 GB");
255  CHECK(neuron::container::format_memory(tb) == " 1.00 TB");
256 }
257 
259  auto model =
262  {4, 14}}};
263  auto cache_model = neuron::container::cache::ModelMemoryUsage{{5, 15}, {6, 16}};
264 
265  auto stable_pointers = neuron::container::VectorMemoryUsage(7, 17);
266  auto stable_identifiers = neuron::container::VectorMemoryUsage(8, 18);
267 
268  auto memory_usage = MemoryUsage{model, cache_model, stable_pointers};
269 
270  return memory_usage;
271 }
272 
273 
274 TEST_CASE("total memory usage", "[Neuron][internal][data_structures]") {
276  auto total = memory_usage.compute_total();
277  CHECK(total.size == (7 * 8) / 2);
278  CHECK(total.capacity == total.size + 7 * 10);
279 }
280 
281 TEST_CASE("memory usage summary", "[Neuron][data_structures]") {
282  auto usage = dummy_memory_usage();
283  auto summary = neuron::container::MemoryUsageSummary(usage);
284  auto total = usage.compute_total();
285 
286  size_t summary_total = summary.required + summary.convenient + summary.oversized +
287  summary.leaked;
288  CHECK(summary.required <= total.size);
289  CHECK(summary.convenient <= total.size);
290  CHECK(summary.leaked <= total.size);
291  CHECK(summary.oversized == total.capacity - total.size);
292  CHECK(summary_total == total.capacity);
293 }
#define data
Definition: md1redef.h:36
#define i
Definition: md1redef.h:19
int hoc_oc(const char *buf)
Definition: hoc.cpp:1314
static int c
Definition: hoc.cpp:169
std::optional< Model > model
Definition: container.cpp:59
handle_interface< owning_identifier< storage > > owning_handle
Owning handle to a Mechanism instance.
std::vector< void * > * defer_delete_storage
Defer deleting pointers to deallocated memory.
Definition: container.cpp:95
VectorMemoryUsage compute_defer_delete_storage_size()
size_t get_num_variables(T const &t)
std::string format_memory(size_t bytes)
Utility to format memory as a human readable string.
cache::ModelMemoryUsage memory_usage(const std::optional< neuron::cache::Model > &model)
Model & model()
Access the global Model instance.
Definition: model_data.hpp:206
neuron::model_sorted_token nrn_ensure_model_data_are_sorted()
Ensure neuron::container::* data are sorted.
Definition: treeset.cpp:2182
short type
Definition: cabvars.h:10
#define CHECK(name)
Definition: init.cpp:97
container::Mechanism::storage & add_mechanism(int type, Args &&... args)
Create a structure to hold the data of a new Mechanism.
Definition: model_data.hpp:60
container::Node::storage & node_data()
Access the structure containing the data of all Nodes.
Definition: model_data.hpp:24
Base class defining the public API of Mechanism handles.
Definition: mechanism.hpp:71
Overall SoA datastructures related memory usage.
Memory usage of a neuron::Model.
Owning handle to a Node.
Definition: node_data.hpp:31
Memory usage of a storage/soa container.
Size and capacity in bytes.
Memory usage of a neuron::cache::Model.
Struct used to index SoAoS data, such as array range variables.
Base class for neuron::container::soa<...> handles.
Definition: view_utils.hpp:34
Utility for generating SOA data structures.
void mark_as_unsorted()
Tell the container it is no longer sorted.
neuron::container::MemoryUsage dummy_memory_usage()
Definition: container.cpp:258
TEST_CASE("Tag type with array_dimension and without num_variables", "[Neuron][data_structures]")
Definition: container.cpp:77
std::size_t compute_row_size(const Storage &data)
Definition: container.cpp:167