1#![warn(missing_docs)]
40#![warn(rust_2018_idioms)]
41#![no_std]
42#![cfg_attr(feature = "nightly", feature(integer_atomics))]
43
44#[cfg(any(test, feature = "std"))]
45#[macro_use]
46extern crate std;
47
48use core::mem::MaybeUninit;
49pub use core::sync::atomic::{fence, Ordering};
51
52use core::cell::UnsafeCell;
53use core::fmt;
54
55#[cfg(feature = "std")]
56use std::panic::RefUnwindSafe;
57
58use bytemuck::NoUninit;
59
60#[cfg(feature = "fallback")]
61mod fallback;
62mod ops;
63
64#[repr(transparent)]
67pub struct Atomic<T> {
68 v: UnsafeCell<MaybeUninit<T>>,
70}
71
72unsafe impl<T: Copy + Send> Sync for Atomic<T> {}
74
75#[cfg(feature = "std")]
82impl<T: RefUnwindSafe> RefUnwindSafe for Atomic<T> {}
83
84impl<T: Default> Default for Atomic<T> {
85 #[inline]
86 fn default() -> Self {
87 Self::new(Default::default())
88 }
89}
90
91impl<T: NoUninit + fmt::Debug> fmt::Debug for Atomic<T> {
92 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
93 f.debug_tuple("Atomic")
94 .field(&self.load(Ordering::Relaxed))
95 .finish()
96 }
97}
98
99impl<T> Atomic<T> {
100 #[inline]
102 pub const fn new(v: T) -> Atomic<T> {
103 Atomic {
104 v: UnsafeCell::new(MaybeUninit::new(v)),
105 }
106 }
107
108 #[inline]
114 pub const fn is_lock_free() -> bool {
115 ops::atomic_is_lock_free::<T>()
116 }
117}
118
119impl<T: NoUninit> Atomic<T> {
120 #[inline]
121 fn inner_ptr(&self) -> *mut T {
122 self.v.get() as *mut T
123 }
124
125 #[inline]
130 pub fn get_mut(&mut self) -> &mut T {
131 unsafe { &mut *self.inner_ptr() }
132 }
133
134 #[inline]
139 pub fn into_inner(self) -> T {
140 unsafe { self.v.into_inner().assume_init() }
141 }
142
143 #[inline]
152 pub fn load(&self, order: Ordering) -> T {
153 unsafe { ops::atomic_load(self.inner_ptr(), order) }
154 }
155
156 #[inline]
165 pub fn store(&self, val: T, order: Ordering) {
166 unsafe {
167 ops::atomic_store(self.inner_ptr(), val, order);
168 }
169 }
170
171 #[inline]
176 pub fn swap(&self, val: T, order: Ordering) -> T {
177 unsafe { ops::atomic_swap(self.inner_ptr(), val, order) }
178 }
179
180 #[inline]
193 pub fn compare_exchange(
194 &self,
195 current: T,
196 new: T,
197 success: Ordering,
198 failure: Ordering,
199 ) -> Result<T, T> {
200 unsafe { ops::atomic_compare_exchange(self.inner_ptr(), current, new, success, failure) }
201 }
202
203 #[inline]
218 pub fn compare_exchange_weak(
219 &self,
220 current: T,
221 new: T,
222 success: Ordering,
223 failure: Ordering,
224 ) -> Result<T, T> {
225 unsafe {
226 ops::atomic_compare_exchange_weak(self.inner_ptr(), current, new, success, failure)
227 }
228 }
229
230 #[inline]
267 pub fn fetch_update<F>(
268 &self,
269 set_order: Ordering,
270 fetch_order: Ordering,
271 mut f: F,
272 ) -> Result<T, T>
273 where
274 F: FnMut(T) -> Option<T>,
275 {
276 let mut prev = self.load(fetch_order);
277 while let Some(next) = f(prev) {
278 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
279 x @ Ok(_) => return x,
280 Err(next_prev) => prev = next_prev,
281 }
282 }
283 Err(prev)
284 }
285}
286
287impl Atomic<bool> {
288 #[inline]
295 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
296 unsafe { ops::atomic_and(self.inner_ptr(), val, order) }
297 }
298
299 #[inline]
306 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
307 unsafe { ops::atomic_or(self.inner_ptr(), val, order) }
308 }
309
310 #[inline]
317 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
318 unsafe { ops::atomic_xor(self.inner_ptr(), val, order) }
319 }
320}
321
322macro_rules! atomic_ops_common {
323 ($($t:ty)*) => ($(
324 impl Atomic<$t> {
325 #[inline]
327 pub fn fetch_add(&self, val: $t, order: Ordering) -> $t {
328 unsafe { ops::atomic_add(self.inner_ptr(), val, order) }
329 }
330
331 #[inline]
333 pub fn fetch_sub(&self, val: $t, order: Ordering) -> $t {
334 unsafe { ops::atomic_sub(self.inner_ptr(), val, order) }
335 }
336
337 #[inline]
339 pub fn fetch_and(&self, val: $t, order: Ordering) -> $t {
340 unsafe { ops::atomic_and(self.inner_ptr(), val, order) }
341 }
342
343 #[inline]
345 pub fn fetch_or(&self, val: $t, order: Ordering) -> $t {
346 unsafe { ops::atomic_or(self.inner_ptr(), val, order) }
347 }
348
349 #[inline]
351 pub fn fetch_xor(&self, val: $t, order: Ordering) -> $t {
352 unsafe { ops::atomic_xor(self.inner_ptr(), val, order) }
353 }
354 }
355 )*);
356}
357macro_rules! atomic_ops_signed {
358 ($($t:ty)*) => (
359 atomic_ops_common!{ $($t)* }
360 $(
361 impl Atomic<$t> {
362 #[inline]
364 pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
365 unsafe { ops::atomic_min(self.inner_ptr(), val, order) }
366 }
367
368 #[inline]
370 pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
371 unsafe { ops::atomic_max(self.inner_ptr(), val, order) }
372 }
373 }
374 )*
375 );
376}
377macro_rules! atomic_ops_unsigned {
378 ($($t:ty)*) => (
379 atomic_ops_common!{ $($t)* }
380 $(
381 impl Atomic<$t> {
382 #[inline]
384 pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
385 unsafe { ops::atomic_umin(self.inner_ptr(), val, order) }
386 }
387
388 #[inline]
390 pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
391 unsafe { ops::atomic_umax(self.inner_ptr(), val, order) }
392 }
393 }
394 )*
395 );
396}
397atomic_ops_signed! { i8 i16 i32 i64 isize i128 }
398atomic_ops_unsigned! { u8 u16 u32 u64 usize u128 }
399
400#[cfg(feature = "serde")]
401mod serde_impl;
402
403#[cfg(test)]
404mod tests {
405 use super::{Atomic, Ordering::*};
406 use bytemuck::NoUninit;
407 use core::mem;
408
409 #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]
410 #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
411 #[repr(C)]
412 struct Foo(u8, u8);
413
414 #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]
415 #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
416 #[repr(C)]
417 struct Bar(u64, u64);
418
419 #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]
420 #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
421 #[repr(C)]
422 struct Quux(u32);
423
424 #[cfg(feature = "serde")]
425 fn assert_serde<T>(atomic: &Atomic<T>, value: T)
426 where
427 T: NoUninit
428 + PartialEq
429 + std::fmt::Debug
430 + for<'a> serde::Deserialize<'a>
431 + serde::Serialize,
432 {
433 let s = serde_json::to_string(atomic).unwrap();
434 assert_eq!(s, serde_json::to_string(&value).unwrap());
435
436 let x: Atomic<T> = serde_json::from_str(&s).unwrap();
437 assert_eq!(x.load(SeqCst), value);
438 }
439
440 #[test]
441 fn atomic_bool() {
442 let a = Atomic::new(false);
443 assert_eq!(
444 Atomic::<bool>::is_lock_free(),
445 cfg!(target_has_atomic = "8"),
446 );
447 assert_eq!(format!("{:?}", a), "Atomic(false)");
448 assert_eq!(a.load(SeqCst), false);
449 a.store(true, SeqCst);
450 assert_eq!(a.swap(false, SeqCst), true);
451 assert_eq!(a.compare_exchange(true, false, SeqCst, SeqCst), Err(false));
452 assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));
453 assert_eq!(a.fetch_and(false, SeqCst), true);
454 assert_eq!(a.fetch_or(true, SeqCst), false);
455 assert_eq!(a.fetch_xor(false, SeqCst), true);
456 assert_eq!(a.load(SeqCst), true);
457
458 #[cfg(feature = "serde")]
459 assert_serde(&a, true);
460 }
461
462 #[test]
463 fn atomic_i8() {
464 let a = Atomic::new(0i8);
465 assert_eq!(Atomic::<i8>::is_lock_free(), cfg!(target_has_atomic = "8"));
466 assert_eq!(format!("{:?}", a), "Atomic(0)");
467 assert_eq!(a.load(SeqCst), 0);
468 a.store(1, SeqCst);
469 assert_eq!(a.swap(2, SeqCst), 1);
470 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
471 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
472 assert_eq!(a.fetch_add(123, SeqCst), 3);
473 assert_eq!(a.fetch_sub(-56, SeqCst), 126);
475 assert_eq!(a.fetch_and(7, SeqCst), -74);
476 assert_eq!(a.fetch_or(64, SeqCst), 6);
477 assert_eq!(a.fetch_xor(1, SeqCst), 70);
478 assert_eq!(a.fetch_min(30, SeqCst), 71);
479 assert_eq!(a.fetch_max(-25, SeqCst), 30);
480 assert_eq!(a.load(SeqCst), 30);
481
482 #[cfg(feature = "serde")]
483 assert_serde(&a, 30);
484 }
485
486 #[test]
487 fn atomic_i16() {
488 let a = Atomic::new(0i16);
489 assert_eq!(
490 Atomic::<i16>::is_lock_free(),
491 cfg!(target_has_atomic = "16")
492 );
493 assert_eq!(format!("{:?}", a), "Atomic(0)");
494 assert_eq!(a.load(SeqCst), 0);
495 a.store(1, SeqCst);
496 assert_eq!(a.swap(2, SeqCst), 1);
497 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
498 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
499 assert_eq!(a.fetch_add(123, SeqCst), 3);
500 assert_eq!(a.fetch_sub(-56, SeqCst), 126);
501 assert_eq!(a.fetch_and(7, SeqCst), 182);
502 assert_eq!(a.fetch_or(64, SeqCst), 6);
503 assert_eq!(a.fetch_xor(1, SeqCst), 70);
504 assert_eq!(a.fetch_min(30, SeqCst), 71);
505 assert_eq!(a.fetch_max(-25, SeqCst), 30);
506 assert_eq!(a.load(SeqCst), 30);
507
508 #[cfg(feature = "serde")]
509 assert_serde(&a, 30);
510 }
511
512 #[test]
513 fn atomic_i32() {
514 let a = Atomic::new(0i32);
515 assert_eq!(
516 Atomic::<i32>::is_lock_free(),
517 cfg!(target_has_atomic = "32")
518 );
519 assert_eq!(format!("{:?}", a), "Atomic(0)");
520 assert_eq!(a.load(SeqCst), 0);
521 a.store(1, SeqCst);
522 assert_eq!(a.swap(2, SeqCst), 1);
523 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
524 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
525 assert_eq!(a.fetch_add(123, SeqCst), 3);
526 assert_eq!(a.fetch_sub(-56, SeqCst), 126);
527 assert_eq!(a.fetch_and(7, SeqCst), 182);
528 assert_eq!(a.fetch_or(64, SeqCst), 6);
529 assert_eq!(a.fetch_xor(1, SeqCst), 70);
530 assert_eq!(a.fetch_min(30, SeqCst), 71);
531 assert_eq!(a.fetch_max(-25, SeqCst), 30);
532 assert_eq!(a.load(SeqCst), 30);
533
534 #[cfg(feature = "serde")]
535 assert_serde(&a, 30);
536 }
537
538 #[test]
539 fn atomic_i64() {
540 let a = Atomic::new(0i64);
541 assert_eq!(
542 Atomic::<i64>::is_lock_free(),
543 cfg!(target_has_atomic = "64") && mem::align_of::<i64>() == 8
544 );
545 assert_eq!(format!("{:?}", a), "Atomic(0)");
546 assert_eq!(a.load(SeqCst), 0);
547 a.store(1, SeqCst);
548 assert_eq!(a.swap(2, SeqCst), 1);
549 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
550 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
551 assert_eq!(a.fetch_add(123, SeqCst), 3);
552 assert_eq!(a.fetch_sub(-56, SeqCst), 126);
553 assert_eq!(a.fetch_and(7, SeqCst), 182);
554 assert_eq!(a.fetch_or(64, SeqCst), 6);
555 assert_eq!(a.fetch_xor(1, SeqCst), 70);
556 assert_eq!(a.fetch_min(30, SeqCst), 71);
557 assert_eq!(a.fetch_max(-25, SeqCst), 30);
558 assert_eq!(a.load(SeqCst), 30);
559
560 #[cfg(feature = "serde")]
561 assert_serde(&a, 30);
562 }
563
564 #[test]
565 fn atomic_i128() {
566 let a = Atomic::new(0i128);
567 assert_eq!(
568 Atomic::<i128>::is_lock_free(),
569 cfg!(feature = "nightly") & cfg!(target_has_atomic = "128")
570 );
571 assert_eq!(format!("{:?}", a), "Atomic(0)");
572 assert_eq!(a.load(SeqCst), 0);
573 a.store(1, SeqCst);
574 assert_eq!(a.swap(2, SeqCst), 1);
575 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
576 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
577 assert_eq!(a.fetch_add(123, SeqCst), 3);
578 assert_eq!(a.fetch_sub(-56, SeqCst), 126);
579 assert_eq!(a.fetch_and(7, SeqCst), 182);
580 assert_eq!(a.fetch_or(64, SeqCst), 6);
581 assert_eq!(a.fetch_xor(1, SeqCst), 70);
582 assert_eq!(a.fetch_min(30, SeqCst), 71);
583 assert_eq!(a.fetch_max(-25, SeqCst), 30);
584 assert_eq!(a.load(SeqCst), 30);
585
586 #[cfg(feature = "serde")]
587 assert_serde(&a, 30);
588 }
589
590 #[test]
591 fn atomic_isize() {
592 let a = Atomic::new(0isize);
593 assert_eq!(format!("{:?}", a), "Atomic(0)");
594 assert_eq!(a.load(SeqCst), 0);
595 a.store(1, SeqCst);
596 assert_eq!(a.swap(2, SeqCst), 1);
597 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
598 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
599 assert_eq!(a.fetch_add(123, SeqCst), 3);
600 assert_eq!(a.fetch_sub(-56, SeqCst), 126);
601 assert_eq!(a.fetch_and(7, SeqCst), 182);
602 assert_eq!(a.fetch_or(64, SeqCst), 6);
603 assert_eq!(a.fetch_xor(1, SeqCst), 70);
604 assert_eq!(a.fetch_min(30, SeqCst), 71);
605 assert_eq!(a.fetch_max(-25, SeqCst), 30);
606 assert_eq!(a.load(SeqCst), 30);
607
608 #[cfg(feature = "serde")]
609 assert_serde(&a, 30);
610 }
611
612 #[test]
613 fn atomic_u8() {
614 let a = Atomic::new(0u8);
615 assert_eq!(Atomic::<u8>::is_lock_free(), cfg!(target_has_atomic = "8"));
616 assert_eq!(format!("{:?}", a), "Atomic(0)");
617 assert_eq!(a.load(SeqCst), 0);
618 a.store(1, SeqCst);
619 assert_eq!(a.swap(2, SeqCst), 1);
620 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
621 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
622 assert_eq!(a.fetch_add(123, SeqCst), 3);
623 assert_eq!(a.fetch_sub(56, SeqCst), 126);
624 assert_eq!(a.fetch_and(7, SeqCst), 70);
625 assert_eq!(a.fetch_or(64, SeqCst), 6);
626 assert_eq!(a.fetch_xor(1, SeqCst), 70);
627 assert_eq!(a.fetch_min(30, SeqCst), 71);
628 assert_eq!(a.fetch_max(25, SeqCst), 30);
629 assert_eq!(a.load(SeqCst), 30);
630
631 #[cfg(feature = "serde")]
632 assert_serde(&a, 30);
633 }
634
635 #[test]
636 fn atomic_u16() {
637 let a = Atomic::new(0u16);
638 assert_eq!(
639 Atomic::<u16>::is_lock_free(),
640 cfg!(target_has_atomic = "16")
641 );
642 assert_eq!(format!("{:?}", a), "Atomic(0)");
643 assert_eq!(a.load(SeqCst), 0);
644 a.store(1, SeqCst);
645 assert_eq!(a.swap(2, SeqCst), 1);
646 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
647 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
648 assert_eq!(a.fetch_add(123, SeqCst), 3);
649 assert_eq!(a.fetch_sub(56, SeqCst), 126);
650 assert_eq!(a.fetch_and(7, SeqCst), 70);
651 assert_eq!(a.fetch_or(64, SeqCst), 6);
652 assert_eq!(a.fetch_xor(1, SeqCst), 70);
653 assert_eq!(a.fetch_min(30, SeqCst), 71);
654 assert_eq!(a.fetch_max(25, SeqCst), 30);
655 assert_eq!(a.load(SeqCst), 30);
656
657 #[cfg(feature = "serde")]
658 assert_serde(&a, 30);
659 }
660
661 #[test]
662 fn atomic_u32() {
663 let a = Atomic::new(0u32);
664 assert_eq!(
665 Atomic::<u32>::is_lock_free(),
666 cfg!(target_has_atomic = "32")
667 );
668 assert_eq!(format!("{:?}", a), "Atomic(0)");
669 assert_eq!(a.load(SeqCst), 0);
670 a.store(1, SeqCst);
671 assert_eq!(a.swap(2, SeqCst), 1);
672 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
673 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
674 assert_eq!(a.fetch_add(123, SeqCst), 3);
675 assert_eq!(a.fetch_sub(56, SeqCst), 126);
676 assert_eq!(a.fetch_and(7, SeqCst), 70);
677 assert_eq!(a.fetch_or(64, SeqCst), 6);
678 assert_eq!(a.fetch_xor(1, SeqCst), 70);
679 assert_eq!(a.fetch_min(30, SeqCst), 71);
680 assert_eq!(a.fetch_max(25, SeqCst), 30);
681 assert_eq!(a.load(SeqCst), 30);
682
683 #[cfg(feature = "serde")]
684 assert_serde(&a, 30);
685 }
686
687 #[test]
688 fn atomic_u64() {
689 let a = Atomic::new(0u64);
690 assert_eq!(
691 Atomic::<u64>::is_lock_free(),
692 cfg!(target_has_atomic = "64") && mem::align_of::<u64>() == 8
693 );
694 assert_eq!(format!("{:?}", a), "Atomic(0)");
695 assert_eq!(a.load(SeqCst), 0);
696 a.store(1, SeqCst);
697 assert_eq!(a.swap(2, SeqCst), 1);
698 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
699 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
700 assert_eq!(a.fetch_add(123, SeqCst), 3);
701 assert_eq!(a.fetch_sub(56, SeqCst), 126);
702 assert_eq!(a.fetch_and(7, SeqCst), 70);
703 assert_eq!(a.fetch_or(64, SeqCst), 6);
704 assert_eq!(a.fetch_xor(1, SeqCst), 70);
705 assert_eq!(a.fetch_min(30, SeqCst), 71);
706 assert_eq!(a.fetch_max(25, SeqCst), 30);
707 assert_eq!(a.load(SeqCst), 30);
708
709 #[cfg(feature = "serde")]
710 assert_serde(&a, 30);
711 }
712
713 #[test]
714 fn atomic_u128() {
715 let a = Atomic::new(0u128);
716 assert_eq!(
717 Atomic::<u128>::is_lock_free(),
718 cfg!(feature = "nightly") & cfg!(target_has_atomic = "128")
719 );
720 assert_eq!(format!("{:?}", a), "Atomic(0)");
721 assert_eq!(a.load(SeqCst), 0);
722 a.store(1, SeqCst);
723 assert_eq!(a.swap(2, SeqCst), 1);
724 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
725 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
726 assert_eq!(a.fetch_add(123, SeqCst), 3);
727 assert_eq!(a.fetch_sub(56, SeqCst), 126);
728 assert_eq!(a.fetch_and(7, SeqCst), 70);
729 assert_eq!(a.fetch_or(64, SeqCst), 6);
730 assert_eq!(a.fetch_xor(1, SeqCst), 70);
731 assert_eq!(a.fetch_min(30, SeqCst), 71);
732 assert_eq!(a.fetch_max(25, SeqCst), 30);
733 assert_eq!(a.load(SeqCst), 30);
734
735 #[cfg(feature = "serde")]
736 assert_serde(&a, 30);
737 }
738
739 #[test]
740 fn atomic_usize() {
741 let a = Atomic::new(0usize);
742 assert_eq!(format!("{:?}", a), "Atomic(0)");
743 assert_eq!(a.load(SeqCst), 0);
744 a.store(1, SeqCst);
745 assert_eq!(a.swap(2, SeqCst), 1);
746 assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
747 assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
748 assert_eq!(a.fetch_add(123, SeqCst), 3);
749 assert_eq!(a.fetch_sub(56, SeqCst), 126);
750 assert_eq!(a.fetch_and(7, SeqCst), 70);
751 assert_eq!(a.fetch_or(64, SeqCst), 6);
752 assert_eq!(a.fetch_xor(1, SeqCst), 70);
753 assert_eq!(a.fetch_min(30, SeqCst), 71);
754 assert_eq!(a.fetch_max(25, SeqCst), 30);
755 assert_eq!(a.load(SeqCst), 30);
756
757 #[cfg(feature = "serde")]
758 assert_serde(&a, 30);
759 }
760
761 #[test]
762 fn atomic_foo() {
763 let a = Atomic::default();
764 assert_eq!(Atomic::<Foo>::is_lock_free(), false);
765 assert_eq!(format!("{:?}", a), "Atomic(Foo(0, 0))");
766 assert_eq!(a.load(SeqCst), Foo(0, 0));
767 a.store(Foo(1, 1), SeqCst);
768 assert_eq!(a.swap(Foo(2, 2), SeqCst), Foo(1, 1));
769 assert_eq!(
770 a.compare_exchange(Foo(5, 5), Foo(45, 45), SeqCst, SeqCst),
771 Err(Foo(2, 2))
772 );
773 assert_eq!(
774 a.compare_exchange(Foo(2, 2), Foo(3, 3), SeqCst, SeqCst),
775 Ok(Foo(2, 2))
776 );
777 assert_eq!(a.load(SeqCst), Foo(3, 3));
778
779 #[cfg(feature = "serde")]
780 assert_serde(&a, Foo(3, 3));
781 }
782
783 #[test]
784 fn atomic_bar() {
785 let a = Atomic::default();
786 assert_eq!(Atomic::<Bar>::is_lock_free(), false);
787 assert_eq!(format!("{:?}", a), "Atomic(Bar(0, 0))");
788 assert_eq!(a.load(SeqCst), Bar(0, 0));
789 a.store(Bar(1, 1), SeqCst);
790 assert_eq!(a.swap(Bar(2, 2), SeqCst), Bar(1, 1));
791 assert_eq!(
792 a.compare_exchange(Bar(5, 5), Bar(45, 45), SeqCst, SeqCst),
793 Err(Bar(2, 2))
794 );
795 assert_eq!(
796 a.compare_exchange(Bar(2, 2), Bar(3, 3), SeqCst, SeqCst),
797 Ok(Bar(2, 2))
798 );
799 assert_eq!(a.load(SeqCst), Bar(3, 3));
800
801 #[cfg(feature = "serde")]
802 assert_serde(&a, Bar(3, 3));
803 }
804
805 #[test]
806 fn atomic_quxx() {
807 let a = Atomic::default();
808 assert_eq!(
809 Atomic::<Quux>::is_lock_free(),
810 cfg!(target_has_atomic = "32")
811 );
812 assert_eq!(format!("{:?}", a), "Atomic(Quux(0))");
813 assert_eq!(a.load(SeqCst), Quux(0));
814 a.store(Quux(1), SeqCst);
815 assert_eq!(a.swap(Quux(2), SeqCst), Quux(1));
816 assert_eq!(
817 a.compare_exchange(Quux(5), Quux(45), SeqCst, SeqCst),
818 Err(Quux(2))
819 );
820 assert_eq!(
821 a.compare_exchange(Quux(2), Quux(3), SeqCst, SeqCst),
822 Ok(Quux(2))
823 );
824 assert_eq!(a.load(SeqCst), Quux(3));
825
826 #[cfg(feature = "serde")]
827 assert_serde(&a, Quux(3));
828 }
829}