libstdc++
shared_ptr_atomic.h
Go to the documentation of this file.
1 // shared_ptr atomic access -*- C++ -*-
2 
3 // Copyright (C) 2014-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/shared_ptr_atomic.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{memory}
28  */
29 
30 #ifndef _SHARED_PTR_ATOMIC_H
31 #define _SHARED_PTR_ATOMIC_H 1
32 
33 #include <bits/atomic_base.h>
34 
35 // Annotations for the custom locking in atomic<shared_ptr<T>>.
36 #if defined _GLIBCXX_TSAN && __has_include(<sanitizer/tsan_interface.h>)
37 #include <sanitizer/tsan_interface.h>
38 #define _GLIBCXX_TSAN_MUTEX_DESTROY(X) \
39  __tsan_mutex_destroy(X, __tsan_mutex_not_static)
40 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X) \
41  __tsan_mutex_pre_lock(X, __tsan_mutex_not_static|__tsan_mutex_try_lock)
42 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X) __tsan_mutex_post_lock(X, \
43  __tsan_mutex_not_static|__tsan_mutex_try_lock_failed, 0)
44 #define _GLIBCXX_TSAN_MUTEX_LOCKED(X) \
45  __tsan_mutex_post_lock(X, __tsan_mutex_not_static, 0)
46 #define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X) __tsan_mutex_pre_unlock(X, 0)
47 #define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X) __tsan_mutex_post_unlock(X, 0)
48 #define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X) __tsan_mutex_pre_signal(X, 0)
49 #define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X) __tsan_mutex_post_signal(X, 0)
50 #else
51 #define _GLIBCXX_TSAN_MUTEX_DESTROY(X)
52 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X)
53 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X)
54 #define _GLIBCXX_TSAN_MUTEX_LOCKED(X)
55 #define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X)
56 #define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X)
57 #define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X)
58 #define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X)
59 #endif
60 
61 namespace std _GLIBCXX_VISIBILITY(default)
62 {
63 _GLIBCXX_BEGIN_NAMESPACE_VERSION
64 
65  /**
66  * @addtogroup pointer_abstractions
67  * @{
68  */
69  /// @relates shared_ptr @{
70 
71  /// @cond undocumented
72 
73  struct _Sp_locker
74  {
75  _Sp_locker(const _Sp_locker&) = delete;
76  _Sp_locker& operator=(const _Sp_locker&) = delete;
77 
78 #ifdef __GTHREADS
79  explicit
80  _Sp_locker(const void*) noexcept;
81  _Sp_locker(const void*, const void*) noexcept;
82  ~_Sp_locker();
83 
84  private:
85  unsigned char _M_key1;
86  unsigned char _M_key2;
87 #else
88  explicit _Sp_locker(const void*, const void* = nullptr) { }
89 #endif
90  };
91 
92  /// @endcond
93 
94  /**
95  * @brief Report whether shared_ptr atomic operations are lock-free.
96  * @param __p A non-null pointer to a shared_ptr object.
97  * @return True if atomic access to @c *__p is lock-free, false otherwise.
98  * @{
99  */
100  template<typename _Tp, _Lock_policy _Lp>
101  inline bool
102  atomic_is_lock_free(const __shared_ptr<_Tp, _Lp>* __p)
103  {
104 #ifdef __GTHREADS
105  return __gthread_active_p() == 0;
106 #else
107  return true;
108 #endif
109  }
110 
111  template<typename _Tp>
112  inline bool
113  atomic_is_lock_free(const shared_ptr<_Tp>* __p)
114  { return std::atomic_is_lock_free<_Tp, __default_lock_policy>(__p); }
115 
116  /// @}
117 
118  /**
119  * @brief Atomic load for shared_ptr objects.
120  * @param __p A non-null pointer to a shared_ptr object.
121  * @return @c *__p
122  *
123  * The memory order shall not be @c memory_order_release or
124  * @c memory_order_acq_rel.
125  * @{
126  */
127  template<typename _Tp>
128  inline shared_ptr<_Tp>
130  {
131  _Sp_locker __lock{__p};
132  return *__p;
133  }
134 
135  template<typename _Tp>
136  inline shared_ptr<_Tp>
137  atomic_load(const shared_ptr<_Tp>* __p)
138  { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
139 
140  template<typename _Tp, _Lock_policy _Lp>
141  inline __shared_ptr<_Tp, _Lp>
142  atomic_load_explicit(const __shared_ptr<_Tp, _Lp>* __p, memory_order)
143  {
144  _Sp_locker __lock{__p};
145  return *__p;
146  }
147 
148  template<typename _Tp, _Lock_policy _Lp>
149  inline __shared_ptr<_Tp, _Lp>
150  atomic_load(const __shared_ptr<_Tp, _Lp>* __p)
151  { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
152  /// @}
153 
154  /**
155  * @brief Atomic store for shared_ptr objects.
156  * @param __p A non-null pointer to a shared_ptr object.
157  * @param __r The value to store.
158  *
159  * The memory order shall not be @c memory_order_acquire or
160  * @c memory_order_acq_rel.
161  * @{
162  */
163  template<typename _Tp>
164  inline void
166  memory_order)
167  {
168  _Sp_locker __lock{__p};
169  __p->swap(__r); // use swap so that **__p not destroyed while lock held
170  }
171 
172  template<typename _Tp>
173  inline void
174  atomic_store(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
175  { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
176 
177  template<typename _Tp, _Lock_policy _Lp>
178  inline void
179  atomic_store_explicit(__shared_ptr<_Tp, _Lp>* __p,
180  __shared_ptr<_Tp, _Lp> __r,
181  memory_order)
182  {
183  _Sp_locker __lock{__p};
184  __p->swap(__r); // use swap so that **__p not destroyed while lock held
185  }
186 
187  template<typename _Tp, _Lock_policy _Lp>
188  inline void
189  atomic_store(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
190  { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
191  /// @}
192 
193  /**
194  * @brief Atomic exchange for shared_ptr objects.
195  * @param __p A non-null pointer to a shared_ptr object.
196  * @param __r New value to store in @c *__p.
197  * @return The original value of @c *__p
198  * @{
199  */
200  template<typename _Tp>
201  inline shared_ptr<_Tp>
203  memory_order)
204  {
205  _Sp_locker __lock{__p};
206  __p->swap(__r);
207  return __r;
208  }
209 
210  template<typename _Tp>
211  inline shared_ptr<_Tp>
212  atomic_exchange(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
213  {
214  return std::atomic_exchange_explicit(__p, std::move(__r),
215  memory_order_seq_cst);
216  }
217 
218  template<typename _Tp, _Lock_policy _Lp>
219  inline __shared_ptr<_Tp, _Lp>
220  atomic_exchange_explicit(__shared_ptr<_Tp, _Lp>* __p,
221  __shared_ptr<_Tp, _Lp> __r,
222  memory_order)
223  {
224  _Sp_locker __lock{__p};
225  __p->swap(__r);
226  return __r;
227  }
228 
229  template<typename _Tp, _Lock_policy _Lp>
230  inline __shared_ptr<_Tp, _Lp>
231  atomic_exchange(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
232  {
233  return std::atomic_exchange_explicit(__p, std::move(__r),
234  memory_order_seq_cst);
235  }
236  /// @}
237 
238  /**
239  * @brief Atomic compare-and-swap for shared_ptr objects.
240  * @param __p A non-null pointer to a shared_ptr object.
241  * @param __v A non-null pointer to a shared_ptr object.
242  * @param __w A non-null pointer to a shared_ptr object.
243  * @return True if @c *__p was equivalent to @c *__v, false otherwise.
244  *
245  * The memory order for failure shall not be @c memory_order_release or
246  * @c memory_order_acq_rel, or stronger than the memory order for success.
247  * @{
248  */
249  template<typename _Tp>
250  bool
252  shared_ptr<_Tp>* __v,
253  shared_ptr<_Tp> __w,
254  memory_order,
255  memory_order)
256  {
257  shared_ptr<_Tp> __x; // goes out of scope after __lock
258  _Sp_locker __lock{__p, __v};
260  if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
261  {
262  __x = std::move(*__p);
263  *__p = std::move(__w);
264  return true;
265  }
266  __x = std::move(*__v);
267  *__v = *__p;
268  return false;
269  }
270 
271  template<typename _Tp>
272  inline bool
273  atomic_compare_exchange_strong(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
274  shared_ptr<_Tp> __w)
275  {
276  return std::atomic_compare_exchange_strong_explicit(__p, __v,
277  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
278  }
279 
280  template<typename _Tp>
281  inline bool
282  atomic_compare_exchange_weak_explicit(shared_ptr<_Tp>* __p,
283  shared_ptr<_Tp>* __v,
284  shared_ptr<_Tp> __w,
285  memory_order __success,
286  memory_order __failure)
287  {
288  return std::atomic_compare_exchange_strong_explicit(__p, __v,
289  std::move(__w), __success, __failure);
290  }
291 
292  template<typename _Tp>
293  inline bool
294  atomic_compare_exchange_weak(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
295  shared_ptr<_Tp> __w)
296  {
297  return std::atomic_compare_exchange_weak_explicit(__p, __v,
298  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
299  }
300 
301  template<typename _Tp, _Lock_policy _Lp>
302  bool
303  atomic_compare_exchange_strong_explicit(__shared_ptr<_Tp, _Lp>* __p,
304  __shared_ptr<_Tp, _Lp>* __v,
305  __shared_ptr<_Tp, _Lp> __w,
306  memory_order,
307  memory_order)
308  {
309  __shared_ptr<_Tp, _Lp> __x; // goes out of scope after __lock
310  _Sp_locker __lock{__p, __v};
311  owner_less<__shared_ptr<_Tp, _Lp>> __less;
312  if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
313  {
314  __x = std::move(*__p);
315  *__p = std::move(__w);
316  return true;
317  }
318  __x = std::move(*__v);
319  *__v = *__p;
320  return false;
321  }
322 
323  template<typename _Tp, _Lock_policy _Lp>
324  inline bool
325  atomic_compare_exchange_strong(__shared_ptr<_Tp, _Lp>* __p,
326  __shared_ptr<_Tp, _Lp>* __v,
327  __shared_ptr<_Tp, _Lp> __w)
328  {
329  return std::atomic_compare_exchange_strong_explicit(__p, __v,
330  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
331  }
332 
333  template<typename _Tp, _Lock_policy _Lp>
334  inline bool
335  atomic_compare_exchange_weak_explicit(__shared_ptr<_Tp, _Lp>* __p,
336  __shared_ptr<_Tp, _Lp>* __v,
337  __shared_ptr<_Tp, _Lp> __w,
338  memory_order __success,
339  memory_order __failure)
340  {
341  return std::atomic_compare_exchange_strong_explicit(__p, __v,
342  std::move(__w), __success, __failure);
343  }
344 
345  template<typename _Tp, _Lock_policy _Lp>
346  inline bool
347  atomic_compare_exchange_weak(__shared_ptr<_Tp, _Lp>* __p,
348  __shared_ptr<_Tp, _Lp>* __v,
349  __shared_ptr<_Tp, _Lp> __w)
350  {
351  return std::atomic_compare_exchange_weak_explicit(__p, __v,
352  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
353  }
354  /// @}
355 
356 #if __cplusplus >= 202002L
357 # define __cpp_lib_atomic_shared_ptr 201711L
358  template<typename _Tp>
359  class atomic;
360 
361  template<typename _Up>
362  static constexpr bool __is_shared_ptr = false;
363  template<typename _Up>
364  static constexpr bool __is_shared_ptr<shared_ptr<_Up>> = true;
365 
366  template<typename _Tp>
367  class _Sp_atomic
368  {
369  using value_type = _Tp;
370 
371  friend class atomic<_Tp>;
372 
373  // An atomic version of __shared_count<> and __weak_count<>.
374  // Stores a _Sp_counted_base<>* but uses the LSB as a lock.
375  struct _Atomic_count
376  {
377  // Either __shared_count<> or __weak_count<>
378  using __count_type = decltype(_Tp::_M_refcount);
379 
380  // _Sp_counted_base<>*
381  using pointer = decltype(__count_type::_M_pi);
382 
383  // Ensure we can use the LSB as the lock bit.
384  static_assert(alignof(remove_pointer_t<pointer>) > 1);
385 
386  constexpr _Atomic_count() noexcept = default;
387 
388  explicit
389  _Atomic_count(__count_type&& __c) noexcept
390  : _M_val(reinterpret_cast<uintptr_t>(__c._M_pi))
391  {
392  __c._M_pi = nullptr;
393  }
394 
395  ~_Atomic_count()
396  {
397  auto __val = _M_val.load(memory_order_relaxed);
398  _GLIBCXX_TSAN_MUTEX_DESTROY(&_M_val);
399  __glibcxx_assert(!(__val & _S_lock_bit));
400  if (auto __pi = reinterpret_cast<pointer>(__val))
401  {
402  if constexpr (__is_shared_ptr<_Tp>)
403  __pi->_M_release();
404  else
405  __pi->_M_weak_release();
406  }
407  }
408 
409  _Atomic_count(const _Atomic_count&) = delete;
410  _Atomic_count& operator=(const _Atomic_count&) = delete;
411 
412  // Precondition: Caller does not hold lock!
413  // Returns the raw pointer value without the lock bit set.
414  pointer
415  lock(memory_order __o) const noexcept
416  {
417  // To acquire the lock we flip the LSB from 0 to 1.
418 
419  auto __current = _M_val.load(memory_order_relaxed);
420  while (__current & _S_lock_bit)
421  {
422 #if __cpp_lib_atomic_wait
423  __detail::__thread_relax();
424 #endif
425  __current = _M_val.load(memory_order_relaxed);
426  }
427 
428  _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val);
429 
430  while (!_M_val.compare_exchange_strong(__current,
431  __current | _S_lock_bit,
432  __o,
433  memory_order_relaxed))
434  {
435  _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(&_M_val);
436 #if __cpp_lib_atomic_wait
437  __detail::__thread_relax();
438 #endif
439  __current = __current & ~_S_lock_bit;
440  _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val);
441  }
442  _GLIBCXX_TSAN_MUTEX_LOCKED(&_M_val);
443  return reinterpret_cast<pointer>(__current);
444  }
445 
446  // Precondition: caller holds lock!
447  void
448  unlock(memory_order __o) const noexcept
449  {
450  _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
451  _M_val.fetch_sub(1, __o);
452  _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
453  }
454 
455  // Swaps the values of *this and __c, and unlocks *this.
456  // Precondition: caller holds lock!
457  void
458  _M_swap_unlock(__count_type& __c, memory_order __o) noexcept
459  {
460  if (__o != memory_order_seq_cst)
461  __o = memory_order_release;
462  auto __x = reinterpret_cast<uintptr_t>(__c._M_pi);
463  _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
464  __x = _M_val.exchange(__x, __o);
465  _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
466  __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit);
467  }
468 
469 #if __cpp_lib_atomic_wait
470  // Precondition: caller holds lock!
471  void
472  _M_wait_unlock(memory_order __o) const noexcept
473  {
474  _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
475  auto __v = _M_val.fetch_sub(1, memory_order_relaxed);
476  _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
477  _M_val.wait(__v & ~_S_lock_bit, __o);
478  }
479 
480  void
481  notify_one() noexcept
482  {
483  _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
484  _M_val.notify_one();
485  _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
486  }
487 
488  void
489  notify_all() noexcept
490  {
491  _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
492  _M_val.notify_all();
493  _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
494  }
495 #endif
496 
497  private:
498  mutable __atomic_base<uintptr_t> _M_val{0};
499  static constexpr uintptr_t _S_lock_bit{1};
500  };
501 
502  typename _Tp::element_type* _M_ptr = nullptr;
503  _Atomic_count _M_refcount;
504 
505  static typename _Atomic_count::pointer
506  _S_add_ref(typename _Atomic_count::pointer __p)
507  {
508  if (__p)
509  {
510  if constexpr (__is_shared_ptr<_Tp>)
511  __p->_M_add_ref_copy();
512  else
513  __p->_M_weak_add_ref();
514  }
515  return __p;
516  }
517 
518  constexpr _Sp_atomic() noexcept = default;
519 
520  explicit
521  _Sp_atomic(value_type __r) noexcept
522  : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
523  { }
524 
525  ~_Sp_atomic() = default;
526 
527  _Sp_atomic(const _Sp_atomic&) = delete;
528  void operator=(const _Sp_atomic&) = delete;
529 
530  value_type
531  load(memory_order __o) const noexcept
532  {
533  __glibcxx_assert(__o != memory_order_release
534  && __o != memory_order_acq_rel);
535  // Ensure that the correct value of _M_ptr is visible after locking.,
536  // by upgrading relaxed or consume to acquire.
537  if (__o != memory_order_seq_cst)
538  __o = memory_order_acquire;
539 
540  value_type __ret;
541  auto __pi = _M_refcount.lock(__o);
542  __ret._M_ptr = _M_ptr;
543  __ret._M_refcount._M_pi = _S_add_ref(__pi);
544  _M_refcount.unlock(memory_order_relaxed);
545  return __ret;
546  }
547 
548  void
549  swap(value_type& __r, memory_order __o) noexcept
550  {
551  _M_refcount.lock(memory_order_acquire);
552  std::swap(_M_ptr, __r._M_ptr);
553  _M_refcount._M_swap_unlock(__r._M_refcount, __o);
554  }
555 
556  bool
557  compare_exchange_strong(value_type& __expected, value_type __desired,
558  memory_order __o, memory_order __o2) noexcept
559  {
560  bool __result = true;
561  auto __pi = _M_refcount.lock(memory_order_acquire);
562  if (_M_ptr == __expected._M_ptr
563  && __pi == __expected._M_refcount._M_pi)
564  {
565  _M_ptr = __desired._M_ptr;
566  _M_refcount._M_swap_unlock(__desired._M_refcount, __o);
567  }
568  else
569  {
570  _Tp __sink = std::move(__expected);
571  __expected._M_ptr = _M_ptr;
572  __expected._M_refcount._M_pi = _S_add_ref(__pi);
573  _M_refcount.unlock(__o2);
574  __result = false;
575  }
576  return __result;
577  }
578 
579 #if __cpp_lib_atomic_wait
580  void
581  wait(value_type __old, memory_order __o) const noexcept
582  {
583  auto __pi = _M_refcount.lock(memory_order_acquire);
584  if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi)
585  _M_refcount._M_wait_unlock(__o);
586  else
587  _M_refcount.unlock(memory_order_relaxed);
588  }
589 
590  void
591  notify_one() noexcept
592  {
593  _M_refcount.notify_one();
594  }
595 
596  void
597  notify_all() noexcept
598  {
599  _M_refcount.notify_all();
600  }
601 #endif
602  };
603 
604  template<typename _Tp>
605  class atomic<shared_ptr<_Tp>>
606  {
607  public:
608  using value_type = shared_ptr<_Tp>;
609 
610  static constexpr bool is_always_lock_free = false;
611 
612  bool
613  is_lock_free() const noexcept
614  { return false; }
615 
616  constexpr atomic() noexcept = default;
617 
618  // _GLIBCXX_RESOLVE_LIB_DEFECTS
619  // 3661. constinit atomic<shared_ptr<T>> a(nullptr); should work
620  constexpr atomic(nullptr_t) noexcept : atomic() { }
621 
622  atomic(shared_ptr<_Tp> __r) noexcept
623  : _M_impl(std::move(__r))
624  { }
625 
626  atomic(const atomic&) = delete;
627  void operator=(const atomic&) = delete;
628 
629  shared_ptr<_Tp>
630  load(memory_order __o = memory_order_seq_cst) const noexcept
631  { return _M_impl.load(__o); }
632 
633  operator shared_ptr<_Tp>() const noexcept
634  { return _M_impl.load(memory_order_seq_cst); }
635 
636  void
637  store(shared_ptr<_Tp> __desired,
638  memory_order __o = memory_order_seq_cst) noexcept
639  { _M_impl.swap(__desired, __o); }
640 
641  void
642  operator=(shared_ptr<_Tp> __desired) noexcept
643  { _M_impl.swap(__desired, memory_order_seq_cst); }
644 
645  // _GLIBCXX_RESOLVE_LIB_DEFECTS
646  // 3893. LWG 3661 broke atomic<shared_ptr<T>> a; a = nullptr;
647  void
648  operator=(nullptr_t) noexcept
649  { store(nullptr); }
650 
651  shared_ptr<_Tp>
652  exchange(shared_ptr<_Tp> __desired,
653  memory_order __o = memory_order_seq_cst) noexcept
654  {
655  _M_impl.swap(__desired, __o);
656  return __desired;
657  }
658 
659  bool
660  compare_exchange_strong(shared_ptr<_Tp>& __expected,
661  shared_ptr<_Tp> __desired,
662  memory_order __o, memory_order __o2) noexcept
663  {
664  return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
665  }
666 
667  bool
668  compare_exchange_strong(value_type& __expected, value_type __desired,
669  memory_order __o = memory_order_seq_cst) noexcept
670  {
671  memory_order __o2;
672  switch (__o)
673  {
674  case memory_order_acq_rel:
675  __o2 = memory_order_acquire;
676  break;
677  case memory_order_release:
678  __o2 = memory_order_relaxed;
679  break;
680  default:
681  __o2 = __o;
682  }
683  return compare_exchange_strong(__expected, std::move(__desired),
684  __o, __o2);
685  }
686 
687  bool
688  compare_exchange_weak(value_type& __expected, value_type __desired,
689  memory_order __o, memory_order __o2) noexcept
690  {
691  return compare_exchange_strong(__expected, std::move(__desired),
692  __o, __o2);
693  }
694 
695  bool
696  compare_exchange_weak(value_type& __expected, value_type __desired,
697  memory_order __o = memory_order_seq_cst) noexcept
698  {
699  return compare_exchange_strong(__expected, std::move(__desired), __o);
700  }
701 
702 #if __cpp_lib_atomic_wait
703  void
704  wait(value_type __old,
705  memory_order __o = memory_order_seq_cst) const noexcept
706  {
707  _M_impl.wait(std::move(__old), __o);
708  }
709 
710  void
711  notify_one() noexcept
712  {
713  _M_impl.notify_one();
714  }
715 
716  void
717  notify_all() noexcept
718  {
719  _M_impl.notify_all();
720  }
721 #endif
722 
723  private:
724  _Sp_atomic<shared_ptr<_Tp>> _M_impl;
725  };
726 
727  template<typename _Tp>
728  class atomic<weak_ptr<_Tp>>
729  {
730  public:
731  using value_type = weak_ptr<_Tp>;
732 
733  static constexpr bool is_always_lock_free = false;
734 
735  bool
736  is_lock_free() const noexcept
737  { return false; }
738 
739  constexpr atomic() noexcept = default;
740 
741  atomic(weak_ptr<_Tp> __r) noexcept
742  : _M_impl(move(__r))
743  { }
744 
745  atomic(const atomic&) = delete;
746  void operator=(const atomic&) = delete;
747 
748  weak_ptr<_Tp>
749  load(memory_order __o = memory_order_seq_cst) const noexcept
750  { return _M_impl.load(__o); }
751 
752  operator weak_ptr<_Tp>() const noexcept
753  { return _M_impl.load(memory_order_seq_cst); }
754 
755  void
756  store(weak_ptr<_Tp> __desired,
757  memory_order __o = memory_order_seq_cst) noexcept
758  { _M_impl.swap(__desired, __o); }
759 
760  void
761  operator=(weak_ptr<_Tp> __desired) noexcept
762  { _M_impl.swap(__desired, memory_order_seq_cst); }
763 
764  weak_ptr<_Tp>
765  exchange(weak_ptr<_Tp> __desired,
766  memory_order __o = memory_order_seq_cst) noexcept
767  {
768  _M_impl.swap(__desired, __o);
769  return __desired;
770  }
771 
772  bool
773  compare_exchange_strong(weak_ptr<_Tp>& __expected,
774  weak_ptr<_Tp> __desired,
775  memory_order __o, memory_order __o2) noexcept
776  {
777  return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
778  }
779 
780  bool
781  compare_exchange_strong(value_type& __expected, value_type __desired,
782  memory_order __o = memory_order_seq_cst) noexcept
783  {
784  memory_order __o2;
785  switch (__o)
786  {
787  case memory_order_acq_rel:
788  __o2 = memory_order_acquire;
789  break;
790  case memory_order_release:
791  __o2 = memory_order_relaxed;
792  break;
793  default:
794  __o2 = __o;
795  }
796  return compare_exchange_strong(__expected, std::move(__desired),
797  __o, __o2);
798  }
799 
800  bool
801  compare_exchange_weak(value_type& __expected, value_type __desired,
802  memory_order __o, memory_order __o2) noexcept
803  {
804  return compare_exchange_strong(__expected, std::move(__desired),
805  __o, __o2);
806  }
807 
808  bool
809  compare_exchange_weak(value_type& __expected, value_type __desired,
810  memory_order __o = memory_order_seq_cst) noexcept
811  {
812  return compare_exchange_strong(__expected, std::move(__desired), __o);
813  }
814 
815 #if __cpp_lib_atomic_wait
816  void
817  wait(value_type __old,
818  memory_order __o = memory_order_seq_cst) const noexcept
819  {
820  _M_impl.wait(std::move(__old), __o);
821  }
822 
823  void
824  notify_one() noexcept
825  {
826  _M_impl.notify_one();
827  }
828 
829  void
830  notify_all() noexcept
831  {
832  _M_impl.notify_all();
833  }
834 #endif
835 
836  private:
837  _Sp_atomic<weak_ptr<_Tp>> _M_impl;
838  };
839 #endif // C++20
840 
841  /// @} relates shared_ptr
842  /// @} group pointer_abstractions
843 
844 _GLIBCXX_END_NAMESPACE_VERSION
845 } // namespace
846 
847 #endif // _SHARED_PTR_ATOMIC_H
shared_ptr< _Tp > atomic_exchange_explicit(shared_ptr< _Tp > *__p, shared_ptr< _Tp > __r, memory_order)
Atomic exchange for shared_ptr objects.
bool atomic_compare_exchange_strong_explicit(shared_ptr< _Tp > *__p, shared_ptr< _Tp > *__v, shared_ptr< _Tp > __w, memory_order, memory_order)
Atomic compare-and-swap for shared_ptr objects.
void atomic_store_explicit(shared_ptr< _Tp > *__p, shared_ptr< _Tp > __r, memory_order)
Atomic store for shared_ptr objects.
bool atomic_is_lock_free(const __shared_ptr< _Tp, _Lp > *__p)
Report whether shared_ptr atomic operations are lock-free.
void swap(shared_ptr< _Tp > &__a, shared_ptr< _Tp > &__b) noexcept
Swap overload for shared_ptr.
shared_ptr< _Tp > atomic_load_explicit(const shared_ptr< _Tp > *__p, memory_order)
Atomic load for shared_ptr objects.
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:104
void swap(any &__x, any &__y) noexcept
Exchange the states of two any objects.
Definition: any:429
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:62
void lock(_L1 &__l1, _L2 &__l2, _L3 &... __l3)
Generic lock.
Definition: mutex:648
ISO C++ entities toplevel namespace is std.
A smart pointer with reference-counted copy semantics.
Primary template owner_less.