xenium
hazard_pointer.hpp
1 //
2 // Copyright (c) 2018-2020 Manuel Pöter.
3 // Licensed under the MIT License. See LICENSE file in the project root for full license information.
4 //
5 
6 #ifndef HAZARD_POINTER_IMPL
7 #error "This is an impl file and must not be included directly!"
8 #endif
9 
10 #include <xenium/aligned_object.hpp>
11 #include <xenium/detail/port.hpp>
12 
13 #include <algorithm>
14 #include <new>
15 #include <vector>
16 
17 #ifdef _MSC_VER
18 #pragma warning(push)
19 #pragma warning(disable: 4324) // structure was padded due to alignment specifier
20 #endif
21 
22 namespace xenium { namespace reclamation {
23 
24  template <class Traits>
25  template <class T, class MarkedPtr>
26  hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(const MarkedPtr& p) :
27  base(p),
28  hp()
29  {
30  if (this->ptr.get() != nullptr)
31  {
32  hp = local_thread_data.alloc_hazard_pointer();
33  hp->set_object(this->ptr.get());
34  }
35  }
36 
37  template <class Traits>
38  template <class T, class MarkedPtr>
39  hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(const guard_ptr& p) :
40  guard_ptr(p.ptr)
41  {}
42 
43  template <class Traits>
44  template <class T, class MarkedPtr>
45  hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::guard_ptr(guard_ptr&& p) noexcept :
46  base(p.ptr),
47  hp(p.hp)
48  {
49  p.ptr.reset();
50  p.hp = nullptr;
51  }
52 
53  template <class Traits>
54  template <class T, class MarkedPtr>
55  auto hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::operator=(const guard_ptr& p) noexcept
56  -> guard_ptr&
57  {
58  if (&p == this)
59  return *this;
60 
61  if (hp == nullptr)
62  hp = local_thread_data.alloc_hazard_pointer();
63  this->ptr = p.ptr;
64  hp->set_object(this->ptr.get());
65  return *this;
66  }
67 
68  template <class Traits>
69  template <class T, class MarkedPtr>
70  auto hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::operator=(guard_ptr&& p) noexcept
71  -> guard_ptr&
72  {
73  if (&p == this)
74  return *this;
75 
76  reset();
77  this->ptr = std::move(p.ptr);
78  hp = p.hp;
79  p.ptr.reset();
80  p.hp = nullptr;
81  return *this;
82  }
83 
84  template <class Traits>
85  template <class T, class MarkedPtr>
86  void hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::acquire(const concurrent_ptr<T>& p,
87  std::memory_order order)
88  {
89  auto p1 = p.load(std::memory_order_relaxed);
90  if (p1 == this->ptr)
91  return;
92  if (p1 != nullptr && hp == nullptr)
93  hp = local_thread_data.alloc_hazard_pointer();
94  auto p2 = p1;
95  do
96  {
97  if (p2 == nullptr)
98  {
99  reset();
100  return;
101  }
102 
103  p1 = p2;
104  hp->set_object(p1.get());
105  // (1) - this load operation potentially synchronizes-with any release operation on p.
106  p2 = p.load(order);
107  } while (p1.get() != p2.get());
108 
109  this->ptr = p2;
110  }
111 
112  template <class Traits>
113  template <class T, class MarkedPtr>
114  bool hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::acquire_if_equal(
115  const concurrent_ptr<T>& p,
116  const MarkedPtr& expected,
117  std::memory_order order)
118  {
119  auto p1 = p.load(std::memory_order_relaxed);
120  if (p1 == nullptr || p1 != expected)
121  {
122  reset();
123  return p1 == expected;
124  }
125 
126  if (hp == nullptr)
127  hp = local_thread_data.alloc_hazard_pointer();
128  hp->set_object(p1.get());
129  // (2) - this load operation potentially synchronizes-with any release operation on p.
130  this->ptr = p.load(order);
131  if (this->ptr != p1)
132  {
133  reset();
134  return false;
135  }
136  return true;
137  }
138 
139  template <class Traits>
140  template <class T, class MarkedPtr>
141  void hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::reset() noexcept
142  {
143  local_thread_data.release_hazard_pointer(hp);
144  this->ptr.reset();
145  }
146 
147  template <class Traits>
148  template <class T, class MarkedPtr>
149  void hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::do_swap(guard_ptr& g) noexcept
150  {
151  std::swap(hp, g.hp);
152  }
153 
154  template <class Traits>
155  template <class T, class MarkedPtr>
156  void hazard_pointer<Traits>::guard_ptr<T, MarkedPtr>::reclaim(Deleter d) noexcept
157  {
158  auto p = this->ptr.get();
159  reset();
160  p->set_deleter(std::move(d));
161  if (local_thread_data.add_retired_node(p) >= allocation_strategy::retired_nodes_threshold())
162  local_thread_data.scan();
163  }
164 
165  namespace detail {
166  template <class Strategy, class Derived>
167  struct alignas(64) basic_hp_thread_control_block :
168  detail::thread_block_list<Derived>::entry,
169  aligned_object<basic_hp_thread_control_block<Strategy, Derived>>
170  {
171  struct hazard_pointer
172  {
173  void set_object(detail::deletable_object* obj)
174  {
175  // (3) - this release-store synchronizes-with the acquire-fence (9)
176  value.store(reinterpret_cast<void**>(obj), std::memory_order_release);
177  // This release is required because when acquire/acquire_if_equal is called on a
178  // guard_ptr with with an active HE entry, set_era is called without an intermediate
179  // call to set_link, i.e., the protected era is updated. This ensures the required
180  // happens-before relation between releasing a guard_ptr to a node and reclamation
181  // of that node.
182 
183  // (4) - this seq_cst-fence enforces a total order with the seq_cst-fence (8)
184  std::atomic_thread_fence(std::memory_order_seq_cst);
185  }
186 
187  bool try_get_object(detail::deletable_object*& result) const
188  {
189  // TSan does not support explicit fences, so we cannot rely on the acquire-fence (9)
190  // but have to perform an acquire-load here to avoid false positives.
191  constexpr auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
192  auto v = value.load(memory_order);
193  if (v.mark() == 0)
194  {
195  result = reinterpret_cast<detail::deletable_object*>(v.get());
196  return true;
197  }
198  return false; // value contains a link
199  }
200 
201  void set_link(hazard_pointer* link)
202  {
203  // (5) - this release store synchronizes-with the acquire fence (9)
204  value.store(marked_ptr<void*, 1>(reinterpret_cast<void**>(link), 1), std::memory_order_release);
205  }
206  hazard_pointer* get_link() const
207  {
208  assert(is_link());
209  return reinterpret_cast<hazard_pointer*>(value.load(std::memory_order_relaxed).get());
210  }
211 
212  bool is_link() const
213  {
214  return value.load(std::memory_order_relaxed).mark() != 0;
215  }
216  private:
217  // since we use the hazard pointer array to build our internal linked list of hazard pointers
218  // we set the LSB to signal that this is an internal pointer and not a pointer to a protected object.
219  std::atomic<marked_ptr<void*, 1>> value;
220  };
221 
222  using hint = hazard_pointer*;
223 
224  void initialize(hint& hint)
225  {
226  Strategy::number_of_active_hps.fetch_add(self().number_of_hps(), std::memory_order_relaxed);
227  hint = initialize_block(self());
228  }
229 
230  void abandon()
231  {
232  Strategy::number_of_active_hps.fetch_sub(self().number_of_hps(), std::memory_order_relaxed);
233  detail::thread_block_list<Derived>::entry::abandon();
234  }
235 
236  hazard_pointer* alloc_hazard_pointer(hint& hint)
237  {
238  auto result = hint;
239  if (result == nullptr)
240  result = self().need_more_hps();
241 
242  hint = result->get_link();
243  return result;
244  }
245 
246  void release_hazard_pointer(hazard_pointer*& hp, hint& hint)
247  {
248  if (hp != nullptr)
249  {
250  hp->set_link(hint);
251  hint = hp;
252  hp = nullptr;
253  }
254  }
255 
256  protected:
257  Derived& self() { return static_cast<Derived&>(*this); }
258 
259  hazard_pointer* begin() { return &pointers[0]; }
260  hazard_pointer* end() { return &pointers[Strategy::K]; }
261  const hazard_pointer* begin() const { return &pointers[0]; }
262  const hazard_pointer* end() const { return &pointers[Strategy::K]; }
263 
264  template <typename T>
265  static hazard_pointer* initialize_block(T& block)
266  {
267  auto begin = block.begin();
268  auto end = block.end() - 1; // the last element is handled specially, so loop only over n-1 entries
269  for (auto it = begin; it != end;)
270  {
271  auto next = it + 1;
272  it->set_link(next);
273  it = next;
274  }
275  end->set_link(block.initialize_next_block());
276  return begin;
277  }
278 
279  static void gather_protected_pointers(std::vector<const detail::deletable_object*>& protected_ptrs,
280  const hazard_pointer* begin, const hazard_pointer* end)
281  {
282  for (auto it = begin; it != end; ++it)
283  {
284  detail::deletable_object* obj;
285  if (it->try_get_object(obj))
286  protected_ptrs.push_back(obj);
287  }
288  }
289 
290  hazard_pointer pointers[Strategy::K];
291  };
292 
293  template <class Strategy>
294  struct static_hp_thread_control_block :
295  basic_hp_thread_control_block<Strategy, static_hp_thread_control_block<Strategy>>
296  {
297  using base = basic_hp_thread_control_block<Strategy, static_hp_thread_control_block>;
298  using hazard_pointer = typename base::hazard_pointer;
299  friend base;
300 
301  void gather_protected_pointers(std::vector<const detail::deletable_object*>& protected_ptrs) const
302  {
303  base::gather_protected_pointers(protected_ptrs, this->begin(), this->end());
304  }
305  private:
306  hazard_pointer* need_more_hps() { throw bad_hazard_pointer_alloc("hazard pointer pool exceeded"); }
307  constexpr size_t number_of_hps() const { return Strategy::K; }
308  constexpr hazard_pointer* initialize_next_block() const { return nullptr; }
309  };
310 
311  template <class Strategy>
312  struct dynamic_hp_thread_control_block :
313  basic_hp_thread_control_block<Strategy, dynamic_hp_thread_control_block<Strategy>>
314  {
315  using base = basic_hp_thread_control_block<Strategy, dynamic_hp_thread_control_block>;
316  using hazard_pointer = typename base::hazard_pointer;
317  friend base;
318 
319  void gather_protected_pointers(std::vector<const detail::deletable_object*>& protected_ptrs) const
320  {
321  gather_protected_pointers(*this, protected_ptrs);
322  }
323 
324  private:
325  struct alignas(64) hazard_pointer_block : aligned_object<hazard_pointer_block>
326  {
327  hazard_pointer_block(size_t size) : size(size) {}
328 
329  hazard_pointer* begin() { return reinterpret_cast<hazard_pointer*>(this + 1); }
330  hazard_pointer* end() { return begin() + size; }
331 
332  const hazard_pointer* begin() const { return reinterpret_cast<const hazard_pointer*>(this + 1); }
333  const hazard_pointer* end() const { return begin() + size; }
334 
335  const hazard_pointer_block* next_block() const { return next; }
336  hazard_pointer* initialize_next_block() { return next ? base::initialize_block(*next) : nullptr; }
337 
338  hazard_pointer_block* next = nullptr;
339  const size_t size;
340  };
341 
342  const hazard_pointer_block* next_block() const
343  {
344  // (6) - this acquire-load synchronizes-with the release-store (7)
345  return hp_block.load(std::memory_order_acquire);
346  }
347  size_t number_of_hps() const { return total_number_of_hps; }
348  hazard_pointer* need_more_hps() { return allocate_new_hazard_pointer_block(); }
349 
350 
351  hazard_pointer* initialize_next_block()
352  {
353  auto block = hp_block.load(std::memory_order_relaxed);
354  return block ? base::initialize_block(*block) : nullptr;
355  }
356 
357  template <typename T>
358  static void gather_protected_pointers(const T& block, std::vector<const detail::deletable_object*>& protected_ptrs)
359  {
360  base::gather_protected_pointers(protected_ptrs, block.begin(), block.end());
361 
362  auto next = block.next_block();
363  if (next)
364  gather_protected_pointers(*next, protected_ptrs);
365  }
366 
367  static detail::deletable_object* as_internal_pointer(hazard_pointer* p)
368  {
369  // since we use the hazard pointer array to build our internal linked list of hazard pointers
370  // we set the LSB to signal that this is an internal pointer and not a pointer to a protected object.
371  auto marked = reinterpret_cast<size_t>(p) | 1;
372  return reinterpret_cast<detail::deletable_object*>(marked);
373  }
374 
375  hazard_pointer* allocate_new_hazard_pointer_block()
376  {
377  size_t hps = std::max(static_cast<size_t>(Strategy::K), total_number_of_hps / 2);
378  total_number_of_hps += hps;
379  Strategy::number_of_active_hps.fetch_add(hps, std::memory_order_relaxed);
380 
381  size_t buffer_size = sizeof(hazard_pointer_block) + hps * sizeof(hazard_pointer);
382  void* buffer = hazard_pointer_block::operator new(buffer_size);
383  auto block = ::new(buffer) hazard_pointer_block(hps);
384  auto result = this->initialize_block(*block);
385  block->next = hp_block.load(std::memory_order_relaxed);
386  // (7) - this release-store synchronizes-with the acquire-load (6)
387  hp_block.store(block, std::memory_order_release);
388  return result;
389  }
390 
391  size_t total_number_of_hps = Strategy::K;
392  std::atomic<hazard_pointer_block*> hp_block;
393  };
394  }
395 
396  template <class Traits>
397  struct alignas(64) hazard_pointer<Traits>::thread_data : aligned_object<thread_data>
398  {
399  using HP = typename thread_control_block::hazard_pointer*;
400 
401  ~thread_data()
402  {
403  if (retire_list != nullptr)
404  {
405  scan();
406  if (retire_list != nullptr)
407  global_thread_block_list.abandon_retired_nodes(retire_list);
408  retire_list = nullptr;
409  }
410 
411  if (control_block != nullptr) {
412  global_thread_block_list.release_entry(control_block);
413  control_block = nullptr;
414  }
415  }
416 
417  HP alloc_hazard_pointer()
418  {
419  ensure_has_control_block();
420  return control_block->alloc_hazard_pointer(hint);
421  }
422 
423  void release_hazard_pointer(HP& hp)
424  {
425  control_block->release_hazard_pointer(hp, hint);
426  }
427 
428  std::size_t add_retired_node(detail::deletable_object* p)
429  {
430  p->next = retire_list;
431  retire_list = p;
432  return ++number_of_retired_nodes;
433  }
434 
435  void scan()
436  {
437  std::vector<const detail::deletable_object*> protected_pointers;
438  protected_pointers.reserve(allocation_strategy::number_of_active_hazard_pointers());
439 
440  // (8) - this seq_cst-fence enforces a total order with the seq_cst-fence (4)
441  std::atomic_thread_fence(std::memory_order_seq_cst);
442 
443  auto adopted_nodes = global_thread_block_list.adopt_abandoned_retired_nodes();
444 
445  std::for_each(global_thread_block_list.begin(), global_thread_block_list.end(),
446  [&protected_pointers](const auto& entry)
447  {
448  // TSan does not support explicit fences, so we cannot rely on the acquire-fence (9)
449  // but have to perform an acquire-load here to avoid false positives.
450  constexpr auto memory_order = TSAN_MEMORY_ORDER(std::memory_order_acquire, std::memory_order_relaxed);
451  if (entry.is_active(memory_order))
452  entry.gather_protected_pointers(protected_pointers);
453  });
454 
455  // (9) - this acquire-fence synchronizes-with the release-store (3, 5)
456  std::atomic_thread_fence(std::memory_order_acquire);
457 
458  std::sort(protected_pointers.begin(), protected_pointers.end());
459 
460  auto list = retire_list;
461  retire_list = nullptr;
462  number_of_retired_nodes = 0;
463  reclaim_nodes(list, protected_pointers);
464  reclaim_nodes(adopted_nodes, protected_pointers);
465  }
466 
467  private:
468  void ensure_has_control_block()
469  {
470  if (control_block == nullptr)
471  {
472  control_block = global_thread_block_list.acquire_entry();
473  control_block->initialize(hint);
474  }
475  }
476 
477  void reclaim_nodes(detail::deletable_object* list,
478  const std::vector<const detail::deletable_object*>& protected_pointers)
479  {
480  while (list != nullptr)
481  {
482  auto cur = list;
483  list = list->next;
484 
485  if (std::binary_search(protected_pointers.begin(), protected_pointers.end(), cur))
486  add_retired_node(cur);
487  else
488  cur->delete_self();
489  }
490  }
491  detail::deletable_object* retire_list = nullptr;
492  std::size_t number_of_retired_nodes = 0;
493  typename thread_control_block::hint hint{};
494 
495  thread_control_block* control_block = nullptr;
496 
497  friend class hazard_pointer;
498  ALLOCATION_COUNTER(hazard_pointer);
499  };
500 
501 #ifdef TRACK_ALLOCATIONS
502  template <class Traits>
503  inline void hazard_pointer<Traits>::count_allocation()
504  { local_thread_data.allocation_counter.count_allocation(); }
505 
506  template <class Traits>
507  inline void hazard_pointer<Traits>::count_reclamation()
508  { local_thread_data.allocation_counter.count_reclamation(); }
509 #endif
510 }}
511 
512 #ifdef _MSC_VER
513 #pragma warning(pop)
514 #endif