[go: up one dir, main page]

atomic/
lib.rs

1// Copyright 2016 Amanieu d'Antras
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8//! Generic `Atomic<T>` wrapper type
9//!
10//! Atomic types provide primitive shared-memory communication between
11//! threads, and are the building blocks of other concurrent types.
12//!
13//! This library defines a generic atomic wrapper type `Atomic<T>` for all
14//! `T: NoUninit` types.
15//! Atomic types present operations that, when used correctly, synchronize
16//! updates between threads.
17//!
18//! The `NoUninit` bound is from the [bytemuck] crate, and indicates that a
19//! type has no internal padding bytes. You will need to derive or implement
20//! this trait for all types used with `Atomic<T>`.
21//!
22//! Each method takes an `Ordering` which represents the strength of
23//! the memory barrier for that operation. These orderings are the
24//! same as [LLVM atomic orderings][1].
25//!
26//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
27//!
28//! Atomic variables are safe to share between threads (they implement `Sync`)
29//! but they do not themselves provide the mechanism for sharing. The most
30//! common way to share an atomic variable is to put it into an `Arc` (an
31//! atomically-reference-counted shared pointer).
32//!
33//! Most atomic types may be stored in static variables, initialized using
34//! the `const fn` constructors. Atomic statics are often used for lazy global
35//! initialization.
36//!
37//! [bytemuck]: https://docs.rs/bytemuck
38
39#![warn(missing_docs)]
40#![warn(rust_2018_idioms)]
41#![no_std]
42#![cfg_attr(feature = "nightly", feature(integer_atomics))]
43
44#[cfg(any(test, feature = "std"))]
45#[macro_use]
46extern crate std;
47
48use core::mem::MaybeUninit;
49// Re-export some useful definitions from libcore
50pub use core::sync::atomic::{fence, Ordering};
51
52use core::cell::UnsafeCell;
53use core::fmt;
54
55#[cfg(feature = "std")]
56use std::panic::RefUnwindSafe;
57
58use bytemuck::NoUninit;
59
60#[cfg(feature = "fallback")]
61mod fallback;
62mod ops;
63
64/// A generic atomic wrapper type which allows an object to be safely shared
65/// between threads.
66#[repr(transparent)]
67pub struct Atomic<T> {
68    // The MaybeUninit is here to work around rust-lang/rust#87341.
69    v: UnsafeCell<MaybeUninit<T>>,
70}
71
72// Atomic<T> is only Sync if T is Send
73unsafe impl<T: Copy + Send> Sync for Atomic<T> {}
74
75// Given that atomicity is guaranteed, Atomic<T> is RefUnwindSafe if T is
76//
77// This is trivially correct for native lock-free atomic types. For those whose
78// atomicity is emulated using a spinlock, it is still correct because the
79// `Atomic` API does not allow doing any panic-inducing operation after writing
80// to the target object.
81#[cfg(feature = "std")]
82impl<T: RefUnwindSafe> RefUnwindSafe for Atomic<T> {}
83
84impl<T: Default> Default for Atomic<T> {
85    #[inline]
86    fn default() -> Self {
87        Self::new(Default::default())
88    }
89}
90
91impl<T: NoUninit + fmt::Debug> fmt::Debug for Atomic<T> {
92    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
93        f.debug_tuple("Atomic")
94            .field(&self.load(Ordering::Relaxed))
95            .finish()
96    }
97}
98
99impl<T> Atomic<T> {
100    /// Creates a new `Atomic`.
101    #[inline]
102    pub const fn new(v: T) -> Atomic<T> {
103        Atomic {
104            v: UnsafeCell::new(MaybeUninit::new(v)),
105        }
106    }
107
108    /// Checks if `Atomic` objects of this type are lock-free.
109    ///
110    /// If an `Atomic` is not lock-free then it may be implemented using locks
111    /// internally, which makes it unsuitable for some situations (such as
112    /// communicating with a signal handler).
113    #[inline]
114    pub const fn is_lock_free() -> bool {
115        ops::atomic_is_lock_free::<T>()
116    }
117}
118
119impl<T: NoUninit> Atomic<T> {
120    #[inline]
121    fn inner_ptr(&self) -> *mut T {
122        self.v.get() as *mut T
123    }
124
125    /// Returns a mutable reference to the underlying type.
126    ///
127    /// This is safe because the mutable reference guarantees that no other threads are
128    /// concurrently accessing the atomic data.
129    #[inline]
130    pub fn get_mut(&mut self) -> &mut T {
131        unsafe { &mut *self.inner_ptr() }
132    }
133
134    /// Consumes the atomic and returns the contained value.
135    ///
136    /// This is safe because passing `self` by value guarantees that no other threads are
137    /// concurrently accessing the atomic data.
138    #[inline]
139    pub fn into_inner(self) -> T {
140        unsafe { self.v.into_inner().assume_init() }
141    }
142
143    /// Loads a value from the `Atomic`.
144    ///
145    /// `load` takes an `Ordering` argument which describes the memory ordering
146    /// of this operation.
147    ///
148    /// # Panics
149    ///
150    /// Panics if `order` is `Release` or `AcqRel`.
151    #[inline]
152    pub fn load(&self, order: Ordering) -> T {
153        unsafe { ops::atomic_load(self.inner_ptr(), order) }
154    }
155
156    /// Stores a value into the `Atomic`.
157    ///
158    /// `store` takes an `Ordering` argument which describes the memory ordering
159    /// of this operation.
160    ///
161    /// # Panics
162    ///
163    /// Panics if `order` is `Acquire` or `AcqRel`.
164    #[inline]
165    pub fn store(&self, val: T, order: Ordering) {
166        unsafe {
167            ops::atomic_store(self.inner_ptr(), val, order);
168        }
169    }
170
171    /// Stores a value into the `Atomic`, returning the old value.
172    ///
173    /// `swap` takes an `Ordering` argument which describes the memory ordering
174    /// of this operation.
175    #[inline]
176    pub fn swap(&self, val: T, order: Ordering) -> T {
177        unsafe { ops::atomic_swap(self.inner_ptr(), val, order) }
178    }
179
180    /// Stores a value into the `Atomic` if the current value is the same as the
181    /// `current` value.
182    ///
183    /// The return value is a result indicating whether the new value was
184    /// written and containing the previous value. On success this value is
185    /// guaranteed to be equal to `new`.
186    ///
187    /// `compare_exchange` takes two `Ordering` arguments to describe the memory
188    /// ordering of this operation. The first describes the required ordering if
189    /// the operation succeeds while the second describes the required ordering
190    /// when the operation fails. The failure ordering can't be `Release` or
191    /// `AcqRel` and must be equivalent or weaker than the success ordering.
192    #[inline]
193    pub fn compare_exchange(
194        &self,
195        current: T,
196        new: T,
197        success: Ordering,
198        failure: Ordering,
199    ) -> Result<T, T> {
200        unsafe { ops::atomic_compare_exchange(self.inner_ptr(), current, new, success, failure) }
201    }
202
203    /// Stores a value into the `Atomic` if the current value is the same as the
204    /// `current` value.
205    ///
206    /// Unlike `compare_exchange`, this function is allowed to spuriously fail
207    /// even when the comparison succeeds, which can result in more efficient
208    /// code on some platforms. The return value is a result indicating whether
209    /// the new value was written and containing the previous value.
210    ///
211    /// `compare_exchange` takes two `Ordering` arguments to describe the memory
212    /// ordering of this operation. The first describes the required ordering if
213    /// the operation succeeds while the second describes the required ordering
214    /// when the operation fails. The failure ordering can't be `Release` or
215    /// `AcqRel` and must be equivalent or weaker than the success ordering.
216    /// success ordering.
217    #[inline]
218    pub fn compare_exchange_weak(
219        &self,
220        current: T,
221        new: T,
222        success: Ordering,
223        failure: Ordering,
224    ) -> Result<T, T> {
225        unsafe {
226            ops::atomic_compare_exchange_weak(self.inner_ptr(), current, new, success, failure)
227        }
228    }
229
230    /// Fetches the value, and applies a function to it that returns an optional
231    /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
232    /// `Err(previous_value)`.
233    ///
234    /// Note: This may call the function multiple times if the value has been changed from other threads in
235    /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
236    /// only once to the stored value.
237    ///
238    /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
239    /// The first describes the required ordering for when the operation finally succeeds while the second
240    /// describes the required ordering for loads. These correspond to the success and failure orderings of
241    /// [`compare_exchange`] respectively.
242    ///
243    /// Using [`Acquire`] as success ordering makes the store part
244    /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
245    /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
246    /// and must be equivalent to or weaker than the success ordering.
247    ///
248    /// [`compare_exchange`]: #method.compare_exchange
249    /// [`Ordering`]: enum.Ordering.html
250    /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
251    /// [`Release`]: enum.Ordering.html#variant.Release
252    /// [`Acquire`]: enum.Ordering.html#variant.Acquire
253    /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
254    ///
255    /// # Examples
256    ///
257    /// ```rust
258    /// use atomic::{Atomic, Ordering};
259    ///
260    /// let x = Atomic::new(7);
261    /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
262    /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
263    /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
264    /// assert_eq!(x.load(Ordering::SeqCst), 9);
265    /// ```
266    #[inline]
267    pub fn fetch_update<F>(
268        &self,
269        set_order: Ordering,
270        fetch_order: Ordering,
271        mut f: F,
272    ) -> Result<T, T>
273    where
274        F: FnMut(T) -> Option<T>,
275    {
276        let mut prev = self.load(fetch_order);
277        while let Some(next) = f(prev) {
278            match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
279                x @ Ok(_) => return x,
280                Err(next_prev) => prev = next_prev,
281            }
282        }
283        Err(prev)
284    }
285}
286
287impl Atomic<bool> {
288    /// Logical "and" with a boolean value.
289    ///
290    /// Performs a logical "and" operation on the current value and the argument
291    /// `val`, and sets the new value to the result.
292    ///
293    /// Returns the previous value.
294    #[inline]
295    pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
296        unsafe { ops::atomic_and(self.inner_ptr(), val, order) }
297    }
298
299    /// Logical "or" with a boolean value.
300    ///
301    /// Performs a logical "or" operation on the current value and the argument
302    /// `val`, and sets the new value to the result.
303    ///
304    /// Returns the previous value.
305    #[inline]
306    pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
307        unsafe { ops::atomic_or(self.inner_ptr(), val, order) }
308    }
309
310    /// Logical "xor" with a boolean value.
311    ///
312    /// Performs a logical "xor" operation on the current value and the argument
313    /// `val`, and sets the new value to the result.
314    ///
315    /// Returns the previous value.
316    #[inline]
317    pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
318        unsafe { ops::atomic_xor(self.inner_ptr(), val, order) }
319    }
320}
321
322macro_rules! atomic_ops_common {
323    ($($t:ty)*) => ($(
324        impl Atomic<$t> {
325            /// Add to the current value, returning the previous value.
326            #[inline]
327            pub fn fetch_add(&self, val: $t, order: Ordering) -> $t {
328                unsafe { ops::atomic_add(self.inner_ptr(), val, order) }
329            }
330
331            /// Subtract from the current value, returning the previous value.
332            #[inline]
333            pub fn fetch_sub(&self, val: $t, order: Ordering) -> $t {
334                unsafe { ops::atomic_sub(self.inner_ptr(), val, order) }
335            }
336
337            /// Bitwise and with the current value, returning the previous value.
338            #[inline]
339            pub fn fetch_and(&self, val: $t, order: Ordering) -> $t {
340                unsafe { ops::atomic_and(self.inner_ptr(), val, order) }
341            }
342
343            /// Bitwise or with the current value, returning the previous value.
344            #[inline]
345            pub fn fetch_or(&self, val: $t, order: Ordering) -> $t {
346                unsafe { ops::atomic_or(self.inner_ptr(), val, order) }
347            }
348
349            /// Bitwise xor with the current value, returning the previous value.
350            #[inline]
351            pub fn fetch_xor(&self, val: $t, order: Ordering) -> $t {
352                unsafe { ops::atomic_xor(self.inner_ptr(), val, order) }
353            }
354        }
355    )*);
356}
357macro_rules! atomic_ops_signed {
358    ($($t:ty)*) => (
359        atomic_ops_common!{ $($t)* }
360        $(
361            impl Atomic<$t> {
362                /// Minimum with the current value.
363                #[inline]
364                pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
365                    unsafe { ops::atomic_min(self.inner_ptr(), val, order) }
366                }
367
368                /// Maximum with the current value.
369                #[inline]
370                pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
371                    unsafe { ops::atomic_max(self.inner_ptr(), val, order) }
372                }
373            }
374        )*
375    );
376}
377macro_rules! atomic_ops_unsigned {
378    ($($t:ty)*) => (
379        atomic_ops_common!{ $($t)* }
380        $(
381            impl Atomic<$t> {
382                /// Minimum with the current value.
383                #[inline]
384                pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
385                    unsafe { ops::atomic_umin(self.inner_ptr(), val, order) }
386                }
387
388                /// Maximum with the current value.
389                #[inline]
390                pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
391                    unsafe { ops::atomic_umax(self.inner_ptr(), val, order) }
392                }
393            }
394        )*
395    );
396}
397atomic_ops_signed! { i8 i16 i32 i64 isize i128 }
398atomic_ops_unsigned! { u8 u16 u32 u64 usize u128 }
399
400#[cfg(feature = "serde")]
401mod serde_impl;
402
403#[cfg(test)]
404mod tests {
405    use super::{Atomic, Ordering::*};
406    use bytemuck::NoUninit;
407    use core::mem;
408
409    #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]
410    #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
411    #[repr(C)]
412    struct Foo(u8, u8);
413
414    #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]
415    #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
416    #[repr(C)]
417    struct Bar(u64, u64);
418
419    #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]
420    #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
421    #[repr(C)]
422    struct Quux(u32);
423
424    #[cfg(feature = "serde")]
425    fn assert_serde<T>(atomic: &Atomic<T>, value: T)
426    where
427        T: NoUninit
428            + PartialEq
429            + std::fmt::Debug
430            + for<'a> serde::Deserialize<'a>
431            + serde::Serialize,
432    {
433        let s = serde_json::to_string(atomic).unwrap();
434        assert_eq!(s, serde_json::to_string(&value).unwrap());
435
436        let x: Atomic<T> = serde_json::from_str(&s).unwrap();
437        assert_eq!(x.load(SeqCst), value);
438    }
439
440    #[test]
441    fn atomic_bool() {
442        let a = Atomic::new(false);
443        assert_eq!(
444            Atomic::<bool>::is_lock_free(),
445            cfg!(target_has_atomic = "8"),
446        );
447        assert_eq!(format!("{:?}", a), "Atomic(false)");
448        assert_eq!(a.load(SeqCst), false);
449        a.store(true, SeqCst);
450        assert_eq!(a.swap(false, SeqCst), true);
451        assert_eq!(a.compare_exchange(true, false, SeqCst, SeqCst), Err(false));
452        assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));
453        assert_eq!(a.fetch_and(false, SeqCst), true);
454        assert_eq!(a.fetch_or(true, SeqCst), false);
455        assert_eq!(a.fetch_xor(false, SeqCst), true);
456        assert_eq!(a.load(SeqCst), true);
457
458        #[cfg(feature = "serde")]
459        assert_serde(&a, true);
460    }
461
462    #[test]
463    fn atomic_i8() {
464        let a = Atomic::new(0i8);
465        assert_eq!(Atomic::<i8>::is_lock_free(), cfg!(target_has_atomic = "8"));
466        assert_eq!(format!("{:?}", a), "Atomic(0)");
467        assert_eq!(a.load(SeqCst), 0);
468        a.store(1, SeqCst);
469        assert_eq!(a.swap(2, SeqCst), 1);
470        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
471        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
472        assert_eq!(a.fetch_add(123, SeqCst), 3);
473        // Make sure overflows are handled correctly
474        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
475        assert_eq!(a.fetch_and(7, SeqCst), -74);
476        assert_eq!(a.fetch_or(64, SeqCst), 6);
477        assert_eq!(a.fetch_xor(1, SeqCst), 70);
478        assert_eq!(a.fetch_min(30, SeqCst), 71);
479        assert_eq!(a.fetch_max(-25, SeqCst), 30);
480        assert_eq!(a.load(SeqCst), 30);
481
482        #[cfg(feature = "serde")]
483        assert_serde(&a, 30);
484    }
485
486    #[test]
487    fn atomic_i16() {
488        let a = Atomic::new(0i16);
489        assert_eq!(
490            Atomic::<i16>::is_lock_free(),
491            cfg!(target_has_atomic = "16")
492        );
493        assert_eq!(format!("{:?}", a), "Atomic(0)");
494        assert_eq!(a.load(SeqCst), 0);
495        a.store(1, SeqCst);
496        assert_eq!(a.swap(2, SeqCst), 1);
497        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
498        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
499        assert_eq!(a.fetch_add(123, SeqCst), 3);
500        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
501        assert_eq!(a.fetch_and(7, SeqCst), 182);
502        assert_eq!(a.fetch_or(64, SeqCst), 6);
503        assert_eq!(a.fetch_xor(1, SeqCst), 70);
504        assert_eq!(a.fetch_min(30, SeqCst), 71);
505        assert_eq!(a.fetch_max(-25, SeqCst), 30);
506        assert_eq!(a.load(SeqCst), 30);
507
508        #[cfg(feature = "serde")]
509        assert_serde(&a, 30);
510    }
511
512    #[test]
513    fn atomic_i32() {
514        let a = Atomic::new(0i32);
515        assert_eq!(
516            Atomic::<i32>::is_lock_free(),
517            cfg!(target_has_atomic = "32")
518        );
519        assert_eq!(format!("{:?}", a), "Atomic(0)");
520        assert_eq!(a.load(SeqCst), 0);
521        a.store(1, SeqCst);
522        assert_eq!(a.swap(2, SeqCst), 1);
523        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
524        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
525        assert_eq!(a.fetch_add(123, SeqCst), 3);
526        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
527        assert_eq!(a.fetch_and(7, SeqCst), 182);
528        assert_eq!(a.fetch_or(64, SeqCst), 6);
529        assert_eq!(a.fetch_xor(1, SeqCst), 70);
530        assert_eq!(a.fetch_min(30, SeqCst), 71);
531        assert_eq!(a.fetch_max(-25, SeqCst), 30);
532        assert_eq!(a.load(SeqCst), 30);
533
534        #[cfg(feature = "serde")]
535        assert_serde(&a, 30);
536    }
537
538    #[test]
539    fn atomic_i64() {
540        let a = Atomic::new(0i64);
541        assert_eq!(
542            Atomic::<i64>::is_lock_free(),
543            cfg!(target_has_atomic = "64") && mem::align_of::<i64>() == 8
544        );
545        assert_eq!(format!("{:?}", a), "Atomic(0)");
546        assert_eq!(a.load(SeqCst), 0);
547        a.store(1, SeqCst);
548        assert_eq!(a.swap(2, SeqCst), 1);
549        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
550        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
551        assert_eq!(a.fetch_add(123, SeqCst), 3);
552        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
553        assert_eq!(a.fetch_and(7, SeqCst), 182);
554        assert_eq!(a.fetch_or(64, SeqCst), 6);
555        assert_eq!(a.fetch_xor(1, SeqCst), 70);
556        assert_eq!(a.fetch_min(30, SeqCst), 71);
557        assert_eq!(a.fetch_max(-25, SeqCst), 30);
558        assert_eq!(a.load(SeqCst), 30);
559
560        #[cfg(feature = "serde")]
561        assert_serde(&a, 30);
562    }
563
564    #[test]
565    fn atomic_i128() {
566        let a = Atomic::new(0i128);
567        assert_eq!(
568            Atomic::<i128>::is_lock_free(),
569            cfg!(feature = "nightly") & cfg!(target_has_atomic = "128")
570        );
571        assert_eq!(format!("{:?}", a), "Atomic(0)");
572        assert_eq!(a.load(SeqCst), 0);
573        a.store(1, SeqCst);
574        assert_eq!(a.swap(2, SeqCst), 1);
575        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
576        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
577        assert_eq!(a.fetch_add(123, SeqCst), 3);
578        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
579        assert_eq!(a.fetch_and(7, SeqCst), 182);
580        assert_eq!(a.fetch_or(64, SeqCst), 6);
581        assert_eq!(a.fetch_xor(1, SeqCst), 70);
582        assert_eq!(a.fetch_min(30, SeqCst), 71);
583        assert_eq!(a.fetch_max(-25, SeqCst), 30);
584        assert_eq!(a.load(SeqCst), 30);
585
586        #[cfg(feature = "serde")]
587        assert_serde(&a, 30);
588    }
589
590    #[test]
591    fn atomic_isize() {
592        let a = Atomic::new(0isize);
593        assert_eq!(format!("{:?}", a), "Atomic(0)");
594        assert_eq!(a.load(SeqCst), 0);
595        a.store(1, SeqCst);
596        assert_eq!(a.swap(2, SeqCst), 1);
597        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
598        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
599        assert_eq!(a.fetch_add(123, SeqCst), 3);
600        assert_eq!(a.fetch_sub(-56, SeqCst), 126);
601        assert_eq!(a.fetch_and(7, SeqCst), 182);
602        assert_eq!(a.fetch_or(64, SeqCst), 6);
603        assert_eq!(a.fetch_xor(1, SeqCst), 70);
604        assert_eq!(a.fetch_min(30, SeqCst), 71);
605        assert_eq!(a.fetch_max(-25, SeqCst), 30);
606        assert_eq!(a.load(SeqCst), 30);
607
608        #[cfg(feature = "serde")]
609        assert_serde(&a, 30);
610    }
611
612    #[test]
613    fn atomic_u8() {
614        let a = Atomic::new(0u8);
615        assert_eq!(Atomic::<u8>::is_lock_free(), cfg!(target_has_atomic = "8"));
616        assert_eq!(format!("{:?}", a), "Atomic(0)");
617        assert_eq!(a.load(SeqCst), 0);
618        a.store(1, SeqCst);
619        assert_eq!(a.swap(2, SeqCst), 1);
620        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
621        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
622        assert_eq!(a.fetch_add(123, SeqCst), 3);
623        assert_eq!(a.fetch_sub(56, SeqCst), 126);
624        assert_eq!(a.fetch_and(7, SeqCst), 70);
625        assert_eq!(a.fetch_or(64, SeqCst), 6);
626        assert_eq!(a.fetch_xor(1, SeqCst), 70);
627        assert_eq!(a.fetch_min(30, SeqCst), 71);
628        assert_eq!(a.fetch_max(25, SeqCst), 30);
629        assert_eq!(a.load(SeqCst), 30);
630
631        #[cfg(feature = "serde")]
632        assert_serde(&a, 30);
633    }
634
635    #[test]
636    fn atomic_u16() {
637        let a = Atomic::new(0u16);
638        assert_eq!(
639            Atomic::<u16>::is_lock_free(),
640            cfg!(target_has_atomic = "16")
641        );
642        assert_eq!(format!("{:?}", a), "Atomic(0)");
643        assert_eq!(a.load(SeqCst), 0);
644        a.store(1, SeqCst);
645        assert_eq!(a.swap(2, SeqCst), 1);
646        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
647        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
648        assert_eq!(a.fetch_add(123, SeqCst), 3);
649        assert_eq!(a.fetch_sub(56, SeqCst), 126);
650        assert_eq!(a.fetch_and(7, SeqCst), 70);
651        assert_eq!(a.fetch_or(64, SeqCst), 6);
652        assert_eq!(a.fetch_xor(1, SeqCst), 70);
653        assert_eq!(a.fetch_min(30, SeqCst), 71);
654        assert_eq!(a.fetch_max(25, SeqCst), 30);
655        assert_eq!(a.load(SeqCst), 30);
656
657        #[cfg(feature = "serde")]
658        assert_serde(&a, 30);
659    }
660
661    #[test]
662    fn atomic_u32() {
663        let a = Atomic::new(0u32);
664        assert_eq!(
665            Atomic::<u32>::is_lock_free(),
666            cfg!(target_has_atomic = "32")
667        );
668        assert_eq!(format!("{:?}", a), "Atomic(0)");
669        assert_eq!(a.load(SeqCst), 0);
670        a.store(1, SeqCst);
671        assert_eq!(a.swap(2, SeqCst), 1);
672        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
673        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
674        assert_eq!(a.fetch_add(123, SeqCst), 3);
675        assert_eq!(a.fetch_sub(56, SeqCst), 126);
676        assert_eq!(a.fetch_and(7, SeqCst), 70);
677        assert_eq!(a.fetch_or(64, SeqCst), 6);
678        assert_eq!(a.fetch_xor(1, SeqCst), 70);
679        assert_eq!(a.fetch_min(30, SeqCst), 71);
680        assert_eq!(a.fetch_max(25, SeqCst), 30);
681        assert_eq!(a.load(SeqCst), 30);
682
683        #[cfg(feature = "serde")]
684        assert_serde(&a, 30);
685    }
686
687    #[test]
688    fn atomic_u64() {
689        let a = Atomic::new(0u64);
690        assert_eq!(
691            Atomic::<u64>::is_lock_free(),
692            cfg!(target_has_atomic = "64") && mem::align_of::<u64>() == 8
693        );
694        assert_eq!(format!("{:?}", a), "Atomic(0)");
695        assert_eq!(a.load(SeqCst), 0);
696        a.store(1, SeqCst);
697        assert_eq!(a.swap(2, SeqCst), 1);
698        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
699        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
700        assert_eq!(a.fetch_add(123, SeqCst), 3);
701        assert_eq!(a.fetch_sub(56, SeqCst), 126);
702        assert_eq!(a.fetch_and(7, SeqCst), 70);
703        assert_eq!(a.fetch_or(64, SeqCst), 6);
704        assert_eq!(a.fetch_xor(1, SeqCst), 70);
705        assert_eq!(a.fetch_min(30, SeqCst), 71);
706        assert_eq!(a.fetch_max(25, SeqCst), 30);
707        assert_eq!(a.load(SeqCst), 30);
708
709        #[cfg(feature = "serde")]
710        assert_serde(&a, 30);
711    }
712
713    #[test]
714    fn atomic_u128() {
715        let a = Atomic::new(0u128);
716        assert_eq!(
717            Atomic::<u128>::is_lock_free(),
718            cfg!(feature = "nightly") & cfg!(target_has_atomic = "128")
719        );
720        assert_eq!(format!("{:?}", a), "Atomic(0)");
721        assert_eq!(a.load(SeqCst), 0);
722        a.store(1, SeqCst);
723        assert_eq!(a.swap(2, SeqCst), 1);
724        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
725        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
726        assert_eq!(a.fetch_add(123, SeqCst), 3);
727        assert_eq!(a.fetch_sub(56, SeqCst), 126);
728        assert_eq!(a.fetch_and(7, SeqCst), 70);
729        assert_eq!(a.fetch_or(64, SeqCst), 6);
730        assert_eq!(a.fetch_xor(1, SeqCst), 70);
731        assert_eq!(a.fetch_min(30, SeqCst), 71);
732        assert_eq!(a.fetch_max(25, SeqCst), 30);
733        assert_eq!(a.load(SeqCst), 30);
734
735        #[cfg(feature = "serde")]
736        assert_serde(&a, 30);
737    }
738
739    #[test]
740    fn atomic_usize() {
741        let a = Atomic::new(0usize);
742        assert_eq!(format!("{:?}", a), "Atomic(0)");
743        assert_eq!(a.load(SeqCst), 0);
744        a.store(1, SeqCst);
745        assert_eq!(a.swap(2, SeqCst), 1);
746        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
747        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
748        assert_eq!(a.fetch_add(123, SeqCst), 3);
749        assert_eq!(a.fetch_sub(56, SeqCst), 126);
750        assert_eq!(a.fetch_and(7, SeqCst), 70);
751        assert_eq!(a.fetch_or(64, SeqCst), 6);
752        assert_eq!(a.fetch_xor(1, SeqCst), 70);
753        assert_eq!(a.fetch_min(30, SeqCst), 71);
754        assert_eq!(a.fetch_max(25, SeqCst), 30);
755        assert_eq!(a.load(SeqCst), 30);
756
757        #[cfg(feature = "serde")]
758        assert_serde(&a, 30);
759    }
760
761    #[test]
762    fn atomic_foo() {
763        let a = Atomic::default();
764        assert_eq!(Atomic::<Foo>::is_lock_free(), false);
765        assert_eq!(format!("{:?}", a), "Atomic(Foo(0, 0))");
766        assert_eq!(a.load(SeqCst), Foo(0, 0));
767        a.store(Foo(1, 1), SeqCst);
768        assert_eq!(a.swap(Foo(2, 2), SeqCst), Foo(1, 1));
769        assert_eq!(
770            a.compare_exchange(Foo(5, 5), Foo(45, 45), SeqCst, SeqCst),
771            Err(Foo(2, 2))
772        );
773        assert_eq!(
774            a.compare_exchange(Foo(2, 2), Foo(3, 3), SeqCst, SeqCst),
775            Ok(Foo(2, 2))
776        );
777        assert_eq!(a.load(SeqCst), Foo(3, 3));
778
779        #[cfg(feature = "serde")]
780        assert_serde(&a, Foo(3, 3));
781    }
782
783    #[test]
784    fn atomic_bar() {
785        let a = Atomic::default();
786        assert_eq!(Atomic::<Bar>::is_lock_free(), false);
787        assert_eq!(format!("{:?}", a), "Atomic(Bar(0, 0))");
788        assert_eq!(a.load(SeqCst), Bar(0, 0));
789        a.store(Bar(1, 1), SeqCst);
790        assert_eq!(a.swap(Bar(2, 2), SeqCst), Bar(1, 1));
791        assert_eq!(
792            a.compare_exchange(Bar(5, 5), Bar(45, 45), SeqCst, SeqCst),
793            Err(Bar(2, 2))
794        );
795        assert_eq!(
796            a.compare_exchange(Bar(2, 2), Bar(3, 3), SeqCst, SeqCst),
797            Ok(Bar(2, 2))
798        );
799        assert_eq!(a.load(SeqCst), Bar(3, 3));
800
801        #[cfg(feature = "serde")]
802        assert_serde(&a, Bar(3, 3));
803    }
804
805    #[test]
806    fn atomic_quxx() {
807        let a = Atomic::default();
808        assert_eq!(
809            Atomic::<Quux>::is_lock_free(),
810            cfg!(target_has_atomic = "32")
811        );
812        assert_eq!(format!("{:?}", a), "Atomic(Quux(0))");
813        assert_eq!(a.load(SeqCst), Quux(0));
814        a.store(Quux(1), SeqCst);
815        assert_eq!(a.swap(Quux(2), SeqCst), Quux(1));
816        assert_eq!(
817            a.compare_exchange(Quux(5), Quux(45), SeqCst, SeqCst),
818            Err(Quux(2))
819        );
820        assert_eq!(
821            a.compare_exchange(Quux(2), Quux(3), SeqCst, SeqCst),
822            Ok(Quux(2))
823        );
824        assert_eq!(a.load(SeqCst), Quux(3));
825
826        #[cfg(feature = "serde")]
827        assert_serde(&a, Quux(3));
828    }
829}