#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(cfg_target_has_atomic)]
+#![cfg_attr(not(stage0), feature(cfg_target_has_atomic_cas))]
#![feature(coerce_unsized)]
#![feature(collections_range)]
#![feature(const_fn)]
#[cfg(test)]
mod boxed_test;
pub mod collections;
-#[cfg(target_has_atomic = "ptr")]
+#[cfg_attr(stage0, cfg(target_has_atomic = "ptr"))]
+#[cfg_attr(not(stage0), cfg(all(target_has_atomic = "ptr", target_has_atomic_cas)))]
pub mod sync;
pub mod rc;
pub mod raw_vec;
pub use core::task::*;
-#[cfg(target_has_atomic = "ptr")]
+#[cfg_attr(stage0, cfg(target_has_atomic = "ptr"))]
+#[cfg_attr(not(stage0), cfg(all(target_has_atomic = "ptr", target_has_atomic_cas)))]
pub use self::if_arc::*;
-#[cfg(target_has_atomic = "ptr")]
+#[cfg_attr(stage0, cfg(target_has_atomic = "ptr"))]
+#[cfg_attr(not(stage0), cfg(all(target_has_atomic = "ptr", target_has_atomic_cas)))]
mod if_arc {
use super::*;
use core::marker::PhantomData;
}
}
- #[cfg(target_has_atomic = "ptr")]
+ #[cfg_attr(stage0, cfg(target_has_atomic = "ptr"))]
+ #[cfg_attr(not(stage0), cfg(all(target_has_atomic = "ptr", target_has_atomic_cas)))]
struct ArcWrapped<T>(PhantomData<T>);
unsafe impl<T: Wake + 'static> UnsafeWake for ArcWrapped<T> {
#![feature(associated_type_defaults)]
#![feature(attr_literals)]
#![feature(cfg_target_has_atomic)]
+#![cfg_attr(not(stage0), feature(cfg_target_has_atomic_cas))]
#![feature(concat_idents)]
#![feature(const_fn)]
#![feature(const_int_ops)]
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn swap(&self, val: bool, order: Ordering) -> bool {
unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
}
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
Ok(x) => x,
/// ```
#[inline]
#[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn compare_exchange(&self,
current: bool,
new: bool,
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
}
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
// We can't use atomic_nand here because it can result in a bool with
// an invalid value. This happens because the atomic operation is done
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
}
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
}
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
}
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
Ok(x) => x,
/// ```
#[inline]
#[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn compare_exchange(&self,
current: *mut T,
new: *mut T,
```"),
#[inline]
#[$stable]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
unsafe { atomic_swap(self.v.get(), val, order) }
}
```"),
#[inline]
#[$stable]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn compare_and_swap(&self,
current: $int_type,
new: $int_type,
```"),
#[inline]
#[$stable_cxchg]
+ #[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
pub fn compare_exchange(&self,
current: $int_type,
new: $int_type,
}
#[inline]
+#[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
fn strongest_failure_ordering(order: Ordering) -> Ordering {
match order {
Release => Relaxed,
}
#[inline]
+#[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xchg_acq(dst, val),
}
#[inline]
+#[cfg_attr(not(stage0), cfg(target_has_atomic_cas))]
unsafe fn atomic_compare_exchange<T>(dst: *mut T,
old: T,
new: T,
let vendor = &sess.target.target.target_vendor;
let min_atomic_width = sess.target.target.min_atomic_width();
let max_atomic_width = sess.target.target.max_atomic_width();
+ let atomic_cas = sess.target.target.options.atomic_cas;
let mut ret = HashSet::new();
// Target bindings.
}
}
}
+ if atomic_cas {
+ ret.insert((Symbol::intern("target_has_atomic_cas"), None));
+ }
if sess.opts.debug_assertions {
ret.insert((Symbol::intern("debug_assertions"), None));
}
/// Don't use this field; instead use the `.max_atomic_width()` method.
pub max_atomic_width: Option<u64>,
+ /// Whether the target supports atomic CAS operations natively
+ pub atomic_cas: bool,
+
/// Panic strategy: "unwind" or "abort"
pub panic_strategy: PanicStrategy,
no_integrated_as: false,
min_atomic_width: None,
max_atomic_width: None,
+ atomic_cas: true,
panic_strategy: PanicStrategy::Unwind,
abi_blacklist: vec![],
crt_static_allows_dylibs: false,
key!(no_integrated_as, bool);
key!(max_atomic_width, Option<u64>);
key!(min_atomic_width, Option<u64>);
+ key!(atomic_cas, bool);
try!(key!(panic_strategy, PanicStrategy));
key!(crt_static_allows_dylibs, bool);
key!(crt_static_default, bool);
target_option_val!(no_integrated_as);
target_option_val!(min_atomic_width);
target_option_val!(max_atomic_width);
+ target_option_val!(atomic_cas);
target_option_val!(panic_strategy);
target_option_val!(crt_static_allows_dylibs);
target_option_val!(crt_static_default);
linker: Some("msp430-elf-gcc".to_string()),
no_integrated_as: true,
- // There are no atomic instructions available in the MSP430
+ // There are no atomic CAS instructions available in the MSP430
// instruction set
- max_atomic_width: Some(0),
+ max_atomic_width: Some(16),
+
+ atomic_cas: false,
// Because these devices have very little resources having an
// unwinder is too onerous so we default to "abort" because the
features: "+strict-align".to_string(),
// There are no atomic instructions available in the instruction set of the ARMv6-M
// architecture
- max_atomic_width: Some(0),
+ atomic_cas: false,
.. super::thumb_base::opts()
}
})
// Allows async and await syntax
(active, async_await, "1.28.0", Some(50547), None),
+
+ // Allows async and await syntax
+ (active, cfg_target_has_atomic_cas, "1.28.0", Some(0), None),
);
declare_features! (
("target_vendor", "cfg_target_vendor", cfg_fn!(cfg_target_vendor)),
("target_thread_local", "cfg_target_thread_local", cfg_fn!(cfg_target_thread_local)),
("target_has_atomic", "cfg_target_has_atomic", cfg_fn!(cfg_target_has_atomic)),
+ ("target_has_atomic_cas", "cfg_target_has_atomic_cas", cfg_fn!(cfg_target_has_atomic_cas)),
];
#[derive(Debug, Eq, PartialEq)]