forked from cplusplus/concurrency-ts
-
Notifications
You must be signed in to change notification settings - Fork 0
/
atomic_smart_ptr.html
197 lines (159 loc) · 7.63 KB
/
atomic_smart_ptr.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
<cxx-clause id="atomic">
<h1>Atomic Smart Pointers</h1>
<cxx-section id="atomic.smartptr.general">
<h1>General</h1>
<p>
This section provides alternatives to raw pointers for thread-safe atomic
pointer operations, and defines the <code>atomic_shared_ptr</code> and
<code>atomic_weak_ptr</code> class templates.
</p>
<p>
The class templates <code>atomic_shared_ptr<T></code> and
<code>atomic_weak_ptr<T></code> have the
corresponding non-atomic types <code>shared_ptr<T></code> and
<code>weak_ptr<T></code>. The template parameter <code>T</code> of
these class templates may be an incomplete type.
</p>
<p>
The behavior of all operations is as specified in
<cxx-ref in="cxx" to="atomics.types.operations.req"></cxx-ref>,
unless stated otherwise.
</p>
<p> All atomic operations in this section perform the indicated
<code>shared_ptr</code> operations. All changes to the atomic smart pointer
itself, and all associated <code>use_count</code> increments, are guaranteed to
be performed atomically. Associated <code>use_count</code> decrements shall be
sequenced after the atomic operation, but are not required to be part of it. Any
associated deletion and deallocation are sequenced after the atomic update step
and shall not be part of the atomic operation.
<cxx-note>If the atomic operation uses locks, locks acquired by the
implementation shall be held when any <code>use_count</code> increments are
performed, and shall not be held when any destruction or deallocation resulting
from this is performed. </cxx-note> </p>
<p> Compare_exchange operations shall atomically perform a comparison and a
<code>shared_ptr</code> move or copy operation using the appropriate
<code>shared_ptr</code> operations, as in <cxx-ref in="cxx"
to="util.smartptr.shared.atomic"></cxx-ref>. If the <code>desired</code> value
is passed by rvalue reference then it shall be moved from only if the
compare_exchange operation returns <code>true</code>. </p>
<p> If the compare_exchange operation returns <code>true</code>,
<code>expected</code> is not accessed after the atomic update. If it returns
<code>false</code>, <code>expected</code> is updated with the existing value
read from the <code>atomic_shared_ptr</code> object in the attempted atomic
update. The count update corresponding to the write to <code>expected</code> is
part of the atomic operation. The write to <code>expected</code> itself is not
required to be part of the atomic operation. </p>
<p> If the <code>desired</code> value is passed by lvalue reference then it
is not accessed after the atomic update step. </p>
<p> If the <code>desired</code> value is passed by rvalue reference and the
compare_exchange operation returns <code>true</code>, then <code>desired</code>
is moved-from after the atomic update step. </p>
</cxx-section>
<cxx-section id="atomic.smartptr.synop">
<h1>Header <experimental/atomic> synopsis</h1>
<pre><code>
#include <atomic>
namespace std {
namespace experimental {
inline namespace concurrency_v1 {
template <class T> struct atomic_shared_ptr;
template <class T> struct atomic_weak_ptr;
} // namespace concurrency_v1
} // namespace experimental
} // namespace st
</code></pre>
</cxx-section>
<cxx-section id="atomic.shared_ptr">
<h1>Class template <code>atomic_shared_ptr</code></h1>
<pre><code>
namespace std {
namespace experimental {
inline namespace concurrency_v1 {
template <class T> struct atomic_shared_ptr {
bool is_lock_free() const noexcept;
void store(shared_ptr<T>, memory_order = memory_order_seq_cst) noexcept;
shared_ptr<T> load(memory_order = memory_order_seq_cst) const noexcept;
operator shared_ptr<T>() const noexcept;
shared_ptr<T> exchange(shared_ptr<T>,
memory_order = memory_order_seq_cst) noexcept;
bool compare_exchange_weak(shared_ptr<T>&, const shared_ptr<T>&,
memory_order, memory_order) noexcept;
bool compare_exchange_weak(shared_ptr<T>&, shared_ptr<T>&&,
memory_order, memory_order) noexcept;
bool compare_exchange_weak(shared_ptr<T>&, const shared_ptr<T>&,
memory_order = memory_order_seq_cst) noexcept;
bool compare_exchange_weak(shared_ptr<T>&, shared_ptr<T>&&,
memory_order = memory_order_seq_cst) noexcept;
bool compare_exchange_strong(shared_ptr<T>&, const shared_ptr<T>&,
memory_order, memory_order) noexcept;
bool compare_exchange_strong(shared_ptr<T>&, shared_ptr<T>&&,
memory_order, memory_order) noexcept;
bool compare_exchange_strong(shared_ptr<T>&, const shared_ptr<T>&,
memory_order = memory_order_seq_cst) noexcept;
bool compare_exchange_strong(shared_ptr<T>&, shared_ptr<T>&&,
memory_order = memory_order_seq_cst) noexcept;
<code>constexpr</code> atomic_shared_ptr() noexcept = default;
atomic_shared_ptr(shared_ptr<T>) noexcept;
atomic_shared_ptr(const atomic_shared_ptr&) = delete;
void operator=(const atomic_shared_ptr&) = delete;
void operator=(shared_ptr<T>) noexcept;
};
} // namespace concurrency_v1
} // namespace experimental
} // namespace std</code></pre>
<br>
<cxx-function>
<cxx-signature><code>constexpr</code> atomic_shared_ptr() noexcept = default;</cxx-signature>
<cxx-effects>Initializes the atomic object to an empty value.</cxx-effects>
</cxx-function>
</cxx-section>
<cxx-section id="atomic.weak_ptr">
<h1>Class template <code>atomic_weak_ptr</code></h1>
<pre><code>
namespace std {
namespace experimental {
inline namespace concurrency_v1 {
template <class T> struct atomic_weak_ptr {
bool is_lock_free() const noexcept;
void store(weak_ptr<T>, memory_order = memory_order_seq_cst) noexcept;
weak_ptr<T> load(memory_order = memory_order_seq_cst) const noexcept;
operator weak_ptr<T>() const noexcept;
weak_ptr<T> exchange(weak_ptr<T>,
memory_order = memory_order_seq_cst) noexcept;
bool compare_exchange_weak(weak_ptr<T>&, const weak_ptr<T>&,
memory_order, memory_order) noexcept;
bool compare_exchange_weak(weak_ptr<T>&, weak_ptr<T>&&,
memory_order, memory_order) noexcept;
bool compare_exchange_weak(weak_ptr<T>&, const weak_ptr<T>&,
memory_order = memory_order_seq_cst) noexcept;
bool compare_exchange_weak(weak_ptr<T>&, weak_ptr<T>&&,
memory_order = memory_order_seq_cst) noexcept;
bool compare_exchange_strong(weak_ptr<T>&, const weak_ptr<T>&,
memory_order, memory_order) noexcept;
bool compare_exchange_strong(weak_ptr<T>&, weak_ptr<T>&&,
memory_order, memory_order) noexcept;
bool compare_exchange_strong(weak_ptr<T>&, const weak_ptr<T>&,
memory_order = memory_order_seq_cst) noexcept;
bool compare_exchange_strong(weak_ptr<T>&, weak_ptr<T>&&,
memory_order = memory_order_seq_cst) noexcept;
<code>constexpr</code> atomic_weak_ptr() noexcept = default;
atomic_weak_ptr(weak_ptr<T>) noexcept;
atomic_weak_ptr(const atomic_weak_ptr&) = delete;
void operator=(const atomic_weak_ptr&) = delete;
void operator=(weak_ptr<T>) noexcept;
};
} // namespace concurrency_v1
} // namespace experimental
} // namespace std
</code></pre>
<br>
<cxx-function>
<cxx-signature><code>constexpr</code> atomic_weak_ptr() noexcept = default;</cxx-signature>
<cxx-effects>Initializes the atomic object to an empty value.</cxx-effects>
</cxx-function>
<cxx-Note>
These types replace all known uses of the functions
in <cxx-ref in="cxx" to="util.smartptr.shared.atomic"></cxx-ref>.
</cxx-Note>
</p>
</cxx-section>