@@ -102,15 +102,15 @@ __MCF_C_DECLARATIONS_BEGIN
102
102
#define __MCF_atomic_load_ (WIDTH, ORDER, INTEGER ) \
103
103
__MCF_ALWAYS_INLINE \
104
104
INTEGER \
105
- _MCF_atomic_load_##WIDTH##_##ORDER(__MCF_zp_r __mem) __MCF_NOEXCEPT \
105
+ _MCF_atomic_load_##WIDTH##_##ORDER(__MCF_zp_r __mem) __MCF_noexcept \
106
106
{ \
107
107
return __MCF_atomic_load ((const __MCF_atomic (INTEGER)*) __mem, \
108
108
__MCF_memory_order_##ORDER); \
109
109
} \
110
110
\
111
111
__MCF_ALWAYS_INLINE \
112
112
void \
113
- _MCF_atomic_load_p##WIDTH##_##ORDER(__MCF_zp __res, __MCF_zp_r __mem) __MCF_NOEXCEPT \
113
+ _MCF_atomic_load_p##WIDTH##_##ORDER(__MCF_zp __res, __MCF_zp_r __mem) __MCF_noexcept \
114
114
{ \
115
115
*(INTEGER*) __res = __MCF_atomic_load ((const __MCF_atomic (INTEGER)*) __mem, \
116
116
__MCF_memory_order_##ORDER); \
@@ -153,15 +153,15 @@ __MCF_atomic_load_(z, cst, size_t)
153
153
#define __MCF_atomic_store_ (WIDTH, ORDER, INTEGER ) \
154
154
__MCF_ALWAYS_INLINE \
155
155
void \
156
- _MCF_atomic_store_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) __MCF_NOEXCEPT \
156
+ _MCF_atomic_store_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) __MCF_noexcept \
157
157
{ \
158
158
__MCF_atomic_store ((__MCF_atomic (INTEGER)*) __mem, __val, \
159
159
__MCF_memory_order_##ORDER); \
160
160
} \
161
161
\
162
162
__MCF_ALWAYS_INLINE \
163
163
void \
164
- _MCF_atomic_store_p##WIDTH##_##ORDER(__MCF_zp __mem, __MCF_zp_r __src) __MCF_NOEXCEPT \
164
+ _MCF_atomic_store_p##WIDTH##_##ORDER(__MCF_zp __mem, __MCF_zp_r __src) __MCF_noexcept \
165
165
{ \
166
166
__MCF_atomic_store ((__MCF_atomic (INTEGER)*) __mem, *(const INTEGER*) __src, \
167
167
__MCF_memory_order_##ORDER); \
@@ -205,15 +205,15 @@ __MCF_atomic_store_(z, cst, size_t)
205
205
#define __MCF_atomic_xchg_ (WIDTH, ORDER, INTEGER ) \
206
206
__MCF_ALWAYS_INLINE \
207
207
INTEGER \
208
- _MCF_atomic_xchg_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) __MCF_NOEXCEPT \
208
+ _MCF_atomic_xchg_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) __MCF_noexcept \
209
209
{ \
210
210
return __MCF_atomic_xchg ((__MCF_atomic (INTEGER)*) __mem, __val, \
211
211
__MCF_memory_order_##ORDER); \
212
212
} \
213
213
\
214
214
__MCF_ALWAYS_INLINE \
215
215
void \
216
- _MCF_atomic_xchg_p##WIDTH##_##ORDER(__MCF_zp __res, __MCF_zp __mem, __MCF_zp_r __src) __MCF_NOEXCEPT \
216
+ _MCF_atomic_xchg_p##WIDTH##_##ORDER(__MCF_zp __res, __MCF_zp __mem, __MCF_zp_r __src) __MCF_noexcept \
217
217
{ \
218
218
*(INTEGER*) __res = __MCF_atomic_xchg ((__MCF_atomic (INTEGER)*) __mem, \
219
219
*(const INTEGER*) __src, __MCF_memory_order_##ORDER); \
@@ -271,7 +271,7 @@ __MCF_atomic_xchg_(z, cst, size_t)
271
271
#define __MCF_atomic_cmpxchg_ (WIDTH, ORDER, INTEGER ) \
272
272
__MCF_ALWAYS_INLINE \
273
273
bool \
274
- _MCF_atomic_cmpxchg_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER* __restrict __cmp, INTEGER __val) __MCF_NOEXCEPT \
274
+ _MCF_atomic_cmpxchg_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER* __restrict __cmp, INTEGER __val) __MCF_noexcept \
275
275
{ \
276
276
return __MCF_atomic_cmpxchg ((__MCF_atomic (INTEGER)*) __mem, __cmp, \
277
277
__val, __MCF_memory_order_##ORDER, \
@@ -280,7 +280,7 @@ __MCF_atomic_xchg_(z, cst, size_t)
280
280
\
281
281
__MCF_ALWAYS_INLINE \
282
282
bool \
283
- _MCF_atomic_cmpxchg_p##WIDTH##_##ORDER(__MCF_zp __mem, __MCF_zp __cmp, __MCF_zp __val) __MCF_NOEXCEPT \
283
+ _MCF_atomic_cmpxchg_p##WIDTH##_##ORDER(__MCF_zp __mem, __MCF_zp __cmp, __MCF_zp __val) __MCF_noexcept \
284
284
{ \
285
285
return __MCF_atomic_cmpxchg ((__MCF_atomic (INTEGER)*) __mem, (INTEGER*) __cmp, \
286
286
*(const INTEGER*) __val, __MCF_memory_order_##ORDER, \
@@ -339,7 +339,7 @@ __MCF_atomic_cmpxchg_(z, cst, size_t)
339
339
#define __MCF_atomic_cmpxchg_weak_ (WIDTH, ORDER, INTEGER ) \
340
340
__MCF_ALWAYS_INLINE \
341
341
bool \
342
- _MCF_atomic_cmpxchg_weak_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER* __restrict __cmp, INTEGER __val) __MCF_NOEXCEPT \
342
+ _MCF_atomic_cmpxchg_weak_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER* __restrict __cmp, INTEGER __val) __MCF_noexcept \
343
343
{ \
344
344
return __MCF_atomic_cmpxchgw ((__MCF_atomic (INTEGER)*) __mem, __cmp, \
345
345
__val, __MCF_memory_order_##ORDER, \
@@ -348,7 +348,7 @@ __MCF_atomic_cmpxchg_(z, cst, size_t)
348
348
\
349
349
__MCF_ALWAYS_INLINE \
350
350
bool \
351
- _MCF_atomic_cmpxchg_weak_p##WIDTH##_##ORDER(__MCF_zp __mem, __MCF_zp __cmp, __MCF_zp __val) __MCF_NOEXCEPT \
351
+ _MCF_atomic_cmpxchg_weak_p##WIDTH##_##ORDER(__MCF_zp __mem, __MCF_zp __cmp, __MCF_zp __val) __MCF_noexcept \
352
352
{ \
353
353
return __MCF_atomic_cmpxchgw ((__MCF_atomic (INTEGER)*) __mem, (INTEGER*) __cmp, \
354
354
*(const INTEGER*) __val, __MCF_memory_order_##ORDER, \
@@ -400,7 +400,7 @@ __MCF_atomic_cmpxchg_weak_(z, cst, size_t)
400
400
#define __MCF_atomic_xadd_ (WIDTH, ORDER, INTEGER ) \
401
401
__MCF_ALWAYS_INLINE \
402
402
INTEGER \
403
- _MCF_atomic_xadd_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) __MCF_NOEXCEPT \
403
+ _MCF_atomic_xadd_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) __MCF_noexcept \
404
404
{ \
405
405
return __MCF_atomic_xadd ((__MCF_atomic (INTEGER)*) __mem, __val, \
406
406
__MCF_memory_order_##ORDER); \
@@ -451,7 +451,7 @@ __MCF_atomic_xadd_(z, cst, size_t)
451
451
#define __MCF_atomic_xsub_ (WIDTH, ORDER, INTEGER ) \
452
452
__MCF_ALWAYS_INLINE \
453
453
INTEGER \
454
- _MCF_atomic_xsub_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) __MCF_NOEXCEPT \
454
+ _MCF_atomic_xsub_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) __MCF_noexcept \
455
455
{ \
456
456
return __MCF_atomic_xsub ((__MCF_atomic (INTEGER)*) __mem, __val, \
457
457
__MCF_memory_order_##ORDER); \
@@ -501,7 +501,7 @@ __MCF_atomic_xsub_(z, cst, size_t)
501
501
#define __MCF_atomic_thread_fence_ (ORDER ) \
502
502
__MCF_ALWAYS_INLINE \
503
503
void \
504
- _MCF_thread_fence_##ORDER(void ) __MCF_NOEXCEPT \
504
+ _MCF_thread_fence_##ORDER(void ) __MCF_noexcept \
505
505
{ \
506
506
__MCF_atomic_thread_fence (__MCF_memory_order_##ORDER); \
507
507
}
@@ -520,7 +520,7 @@ __MCF_atomic_thread_fence_(cst)
520
520
#define __MCF_atomic_signal_fence_ (ORDER ) \
521
521
__MCF_ALWAYS_INLINE \
522
522
void \
523
- _MCF_signal_fence_##ORDER(void ) __MCF_NOEXCEPT \
523
+ _MCF_signal_fence_##ORDER(void ) __MCF_noexcept \
524
524
{ \
525
525
__MCF_atomic_signal_fence (__MCF_memory_order_##ORDER); \
526
526
}
0 commit comments