CFG_LDPATH_armv7-unknown-linux-gnueabihf :=
CFG_RUN_armv7-unknown-linux-gnueabihf=$(2)
CFG_RUN_TARG_armv7-unknown-linux-gnueabihf=$(call CFG_RUN_armv7-unknown-linux-gnueabihf,,$(2))
-RUSTC_FLAGS_armv7-unknown-linux-gnueabihf := -C target-feature=+v7,+vfp2,+neon
+RUSTC_FLAGS_armv7-unknown-linux-gnueabihf :=
RUSTC_CROSS_FLAGS_armv7-unknown-linux-gnueabihf :=
CFG_GNU_TRIPLE_armv7-unknown-linux-gnueabihf := armv7-unknown-linux-gnueabihf
#![stable(feature = "rust1", since = "1.0.0")]
-use alloc::raw_vec::RawVec;
use alloc::boxed::Box;
use alloc::heap::EMPTY;
+use alloc::raw_vec::RawVec;
+use borrow::ToOwned;
use core::cmp::Ordering;
use core::fmt;
use core::hash::{self, Hash};
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IntoIter<T> {}
+#[stable(feature = "vec_into_iter_clone", since = "1.8.0")]
+impl<T: Clone> Clone for IntoIter<T> {
+ fn clone(&self) -> IntoIter<T> {
+ unsafe {
+ slice::from_raw_parts(self.ptr, self.len()).to_owned().into_iter()
+ }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Drop for IntoIter<T> {
#[unsafe_destructor_blind_to_params]
assert_eq!(vec![1, 2, 3].into_iter().count(), 3);
}
+#[test]
+fn test_into_iter_clone() {
+ fn iter_equal<I: Iterator<Item=i32>>(it: I, slice: &[i32]) {
+ let v: Vec<i32> = it.collect();
+ assert_eq!(&v[..], slice);
+ }
+ let mut it = vec![1, 2, 3].into_iter();
+ iter_equal(it.clone(), &[1, 2, 3]);
+ assert_eq!(it.next(), Some(1));
+ let mut it = it.rev();
+ iter_equal(it.clone(), &[3, 2]);
+ assert_eq!(it.next(), Some(3));
+ iter_equal(it.clone(), &[2]);
+ assert_eq!(it.next(), Some(2));
+ iter_equal(it.clone(), &[]);
+ assert_eq!(it.next(), None);
+}
+
#[test]
fn test_cow_from() {
let borrowed: &[_] = &["borrowed", "(slice)"];
pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_acqrel<T>(dst: *mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_relaxed<T>(dst: *mut T, old: T, src: T) -> T;
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchg_failrelaxed<T>(dst: *mut T, old: T, src: T) -> T;
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchg_failacq<T>(dst: *mut T, old: T, src: T) -> T;
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchg_acq_failrelaxed<T>(dst: *mut T, old: T, src: T) -> T;
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchg_acqrel_failrelaxed<T>(dst: *mut T, old: T, src: T) -> T;
+
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchgweak<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchgweak_acq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchgweak_rel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchgweak_acqrel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchgweak_relaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchgweak_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchgweak_failacq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchgweak_acq_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+ #[cfg(not(stage0))]
+ pub fn atomic_cxchgweak_acqrel_failrelaxed<T>(dst: *mut T, old: T, src: T) -> (T, bool);
pub fn atomic_load<T>(src: *const T) -> T;
pub fn atomic_load_acq<T>(src: *const T) -> T;
/// assert_eq!(6, doubled[2]);
/// ```
///
- /// Using the 'turbofish' instead of annotationg `doubled`:
+ /// Using the 'turbofish' instead of annotating `doubled`:
///
/// ```
/// let a = [1, 2, 3];
/// `true`, then so does `all()`. If any of them return `false`, it
/// returns `false`.
///
- /// `all()` is short-circuting; in other words, it will stop processing
+ /// `all()` is short-circuiting; in other words, it will stop processing
/// as soon as it finds a `false`, given that no matter what else happens,
/// the result will also be `false`.
///
/// `true`, then so does `any()`. If they all return `false`, it
/// returns `false`.
///
- /// `any()` is short-circuting; in other words, it will stop processing
+ /// `any()` is short-circuiting; in other words, it will stop processing
/// as soon as it finds a `true`, given that no matter what else happens,
/// the result will also be `true`.
///
/// `true`, then `find()` returns `Some(element)`. If they all return
/// `false`, it returns `None`.
///
- /// `find()` is short-circuting; in other words, it will stop processing
+ /// `find()` is short-circuiting; in other words, it will stop processing
/// as soon as the closure returns `true`.
///
/// Because `find()` takes a reference, and many iterators iterate over
/// returns `true`, then `position()` returns `Some(index)`. If all of
/// them return `false`, it returns `None`.
///
- /// `position()` is short-circuting; in other words, it will stop
+ /// `position()` is short-circuiting; in other words, it will stop
/// processing as soon as it finds a `true`.
///
/// # Overflow Behavior
/// and if one of them returns `true`, then `rposition()` returns
/// `Some(index)`. If all of them return `false`, it returns `None`.
///
- /// `rposition()` is short-circuting; in other words, it will stop
+ /// `rposition()` is short-circuiting; in other words, it will stop
/// processing as soon as it finds a `true`.
///
/// # Examples
(ts, us)
}
- /// Creates an iterator which clone()s all of its elements.
+ /// Creates an iterator which `clone()`s all of its elements.
///
/// This is useful when you have an iterator over `&T`, but you need an
/// iterator over `T`.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
+ self.compare_exchange(current, new, order, strongest_failure_ordering(order))
+ }
+
+ /// Stores a value into the `bool` if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
+ /// operation. The first describes the required ordering if the operation succeeds while the
+ /// second describes the required ordering when the operation fails. The failure ordering can't
+ /// be `Acquire` or `AcqRel` and must be equivalent or weaker than the success ordering.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(extended_compare_and_swap)]
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.compare_exchange(true,
+ /// false,
+ /// Ordering::Acquire,
+ /// Ordering::Relaxed),
+ /// true);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ ///
+ /// assert_eq!(some_bool.compare_exchange(true, true,
+ /// Ordering::SeqCst,
+ /// Ordering::Acquire),
+ /// false);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ #[inline]
+ #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ pub fn compare_exchange(&self,
+ current: bool,
+ new: bool,
+ success: Ordering,
+ failure: Ordering) -> bool {
let current = if current { UINT_TRUE } else { 0 };
let new = if new { UINT_TRUE } else { 0 };
- unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) > 0 }
+ unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) > 0 }
+ }
+
+ /// Stores a value into the `bool` if the current value is the same as the `current` value.
+ ///
+ /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
+ /// comparison succeeds, which can result in more efficient code on some platforms. The
+ /// returned value is a tuple of the existing value and a flag indicating whether the
+ /// new value was written.
+ ///
+ /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if the operation
+ /// succeeds while the second describes the required ordering when the operation fails. The
+ /// failure ordering can't be `Acquire` or `AcqRel` and must be equivalent or weaker than the
+ /// success ordering.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(extended_compare_and_swap)]
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let val = AtomicBool::new(false);
+ ///
+ /// let new = true;
+ /// let mut old = val.load(Ordering::Relaxed);
+ /// loop {
+ /// let result = val.compare_exchange_weak(old, new,
+ /// Ordering::SeqCst,
+ /// Ordering::Relaxed);
+ /// if result.1 {
+ /// break;
+ /// } else {
+ /// old = result.0;
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ pub fn compare_exchange_weak(&self,
+ current: bool,
+ new: bool,
+ success: Ordering,
+ failure: Ordering) -> (bool, bool) {
+ let current = if current { UINT_TRUE } else { 0 };
+ let new = if new { UINT_TRUE } else { 0 };
+
+ let result = unsafe {
+ atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
+ };
+ (result.0 > 0, result.1)
}
/// Logical "and" with a boolean value.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn compare_and_swap(&self, current: isize, new: isize, order: Ordering) -> isize {
- unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
+ self.compare_exchange(current, new, order, strongest_failure_ordering(order))
+ }
+
+ /// Stores a value into the `isize` if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
+ /// operation. The first describes the required ordering if the operation succeeds while the
+ /// second describes the required ordering when the operation fails. The failure ordering can't
+ /// be `Acquire` or `AcqRel` and must be equivalent or weaker than the success ordering.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(extended_compare_and_swap)]
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let some_isize = AtomicIsize::new(5);
+ ///
+ /// assert_eq!(some_isize.compare_exchange(5, 10,
+ /// Ordering::Acquire,
+ /// Ordering::Relaxed),
+ /// 5);
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ ///
+ /// assert_eq!(some_isize.compare_exchange(6, 12,
+ /// Ordering::SeqCst,
+ /// Ordering::Acquire),
+ /// 10);
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ /// ```
+ #[inline]
+ #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ pub fn compare_exchange(&self,
+ current: isize,
+ new: isize,
+ success: Ordering,
+ failure: Ordering) -> isize {
+ unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
+ }
+
+ /// Stores a value into the `isize if the current value is the same as the `current` value.
+ ///
+ /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
+ /// comparison succeeds, which can result in more efficient code on some platforms. The
+ /// returned value is a tuple of the existing value and a flag indicating whether the
+ /// new value was written.
+ ///
+ /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if the operation
+ /// succeeds while the second describes the required ordering when the operation fails. The
+ /// failure ordering can't be `Acquire` or `AcqRel` and must be equivalent or weaker than the
+ /// success ordering.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(extended_compare_and_swap)]
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let val = AtomicIsize::new(4);
+ ///
+ /// let mut old = val.load(Ordering::Relaxed);
+ /// loop {
+ /// let new = old * 2;
+ /// let result = val.compare_exchange_weak(old, new,
+ /// Ordering::SeqCst,
+ /// Ordering::Relaxed);
+ /// if result.1 {
+ /// break;
+ /// } else {
+ /// old = result.0;
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ pub fn compare_exchange_weak(&self,
+ current: isize,
+ new: isize,
+ success: Ordering,
+ failure: Ordering) -> (isize, bool) {
+ unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) }
}
/// Add an isize to the current value, returning the previous value.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn compare_and_swap(&self, current: usize, new: usize, order: Ordering) -> usize {
- unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
+ self.compare_exchange(current, new, order, strongest_failure_ordering(order))
+ }
+
+ /// Stores a value into the `usize` if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
+ /// operation. The first describes the required ordering if the operation succeeds while the
+ /// second describes the required ordering when the operation fails. The failure ordering can't
+ /// be `Acquire` or `AcqRel` and must be equivalent or weaker than the success ordering.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(extended_compare_and_swap)]
+ /// use std::sync::atomic::{AtomicUsize, Ordering};
+ ///
+ /// let some_isize = AtomicUsize::new(5);
+ ///
+ /// assert_eq!(some_isize.compare_exchange(5, 10,
+ /// Ordering::Acquire,
+ /// Ordering::Relaxed),
+ /// 5);
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ ///
+ /// assert_eq!(some_isize.compare_exchange(6, 12,
+ /// Ordering::SeqCst,
+ /// Ordering::Acquire),
+ /// 10);
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ /// ```
+ #[inline]
+ #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ pub fn compare_exchange(&self,
+ current: usize,
+ new: usize,
+ success: Ordering,
+ failure: Ordering) -> usize {
+ unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
+ }
+
+ /// Stores a value into the `usize` if the current value is the same as the `current` value.
+ ///
+ /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
+ /// comparison succeeds, which can result in more efficient code on some platforms. The
+ /// returned value is a tuple of the existing value and a flag indicating whether the
+ /// new value was written.
+ ///
+ /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if the operation
+ /// succeeds while the second describes the required ordering when the operation fails. The
+ /// failure ordering can't be `Acquire` or `AcqRel` and must be equivalent or weaker than the
+ /// success ordering.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(extended_compare_and_swap)]
+ /// use std::sync::atomic::{AtomicUsize, Ordering};
+ ///
+ /// let val = AtomicUsize::new(4);
+ ///
+ /// let mut old = val.load(Ordering::Relaxed);
+ /// loop {
+ /// let new = old * 2;
+ /// let result = val.compare_exchange_weak(old, new,
+ /// Ordering::SeqCst,
+ /// Ordering::Relaxed);
+ /// if result.1 {
+ /// break;
+ /// } else {
+ /// old = result.0;
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ pub fn compare_exchange_weak(&self,
+ current: usize,
+ new: usize,
+ success: Ordering,
+ failure: Ordering) -> (usize, bool) {
+ unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) }
}
/// Add to the current usize, returning the previous value.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
+ self.compare_exchange(current, new, order, strongest_failure_ordering(order))
+ }
+
+ /// Stores a value into the pointer if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
+ /// operation. The first describes the required ordering if the operation succeeds while the
+ /// second describes the required ordering when the operation fails. The failure ordering can't
+ /// be `Acquire` or `AcqRel` and must be equivalent or weaker than the success ordering.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(extended_compare_and_swap)]
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ /// let another_ptr = &mut 10;
+ ///
+ /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
+ /// Ordering::SeqCst, Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ pub fn compare_exchange(&self,
+ current: *mut T,
+ new: *mut T,
+ success: Ordering,
+ failure: Ordering) -> *mut T {
unsafe {
- atomic_compare_and_swap(self.p.get() as *mut usize, current as usize,
- new as usize, order) as *mut T
+ atomic_compare_exchange(self.p.get() as *mut usize, current as usize,
+ new as usize, success, failure) as *mut T
}
}
+
+ /// Stores a value into the pointer if the current value is the same as the `current` value.
+ ///
+ /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
+ /// comparison succeeds, which can result in more efficient code on some platforms. The
+ /// returned value is a tuple of the existing value and a flag indicating whether the
+ /// new value was written.
+ ///
+ /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if the operation
+ /// succeeds while the second describes the required ordering when the operation fails. The
+ /// failure ordering can't be `Acquire` or `AcqRel` and must be equivalent or weaker than the
+ /// success ordering.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(extended_compare_and_swap)]
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let some_ptr = AtomicPtr::new(&mut 5);
+ ///
+ /// let new = &mut 10;
+ /// let mut old = some_ptr.load(Ordering::Relaxed);
+ /// loop {
+ /// let result = some_ptr.compare_exchange_weak(old, new,
+ /// Ordering::SeqCst,
+ /// Ordering::Relaxed);
+ /// if result.1 {
+ /// break;
+ /// } else {
+ /// old = result.0;
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ pub fn compare_exchange_weak(&self,
+ current: *mut T,
+ new: *mut T,
+ success: Ordering,
+ failure: Ordering) -> (*mut T, bool) {
+ let result = unsafe {
+ atomic_compare_exchange_weak(self.p.get() as *mut usize, current as usize,
+ new as usize, success, failure)
+ };
+ (result.0 as *mut T, result.1)
+ }
}
#[inline]
-unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
+fn strongest_failure_ordering(order: Ordering) -> Ordering {
+ match order {
+ Release => Relaxed,
+ Relaxed => Relaxed,
+ SeqCst => SeqCst,
+ Acquire => Acquire,
+ AcqRel => Acquire,
+ }
+}
+
+#[inline]
+unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
match order {
Release => intrinsics::atomic_store_rel(dst, val),
Relaxed => intrinsics::atomic_store_relaxed(dst, val),
}
#[inline]
-unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
+unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_load_acq(dst),
Relaxed => intrinsics::atomic_load_relaxed(dst),
}
#[inline]
-unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
- match order {
+#[cfg(not(stage0))]
+unsafe fn atomic_compare_exchange<T>(dst: *mut T,
+ old: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering) -> T {
+ match (success, failure) {
+ (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
+ (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
+ (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
+ (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
+ (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
+ (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
+ (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
+ (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
+ (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
+ (_, Release) => panic!("there is no such thing as an acquire/release failure ordering"),
+ (_, AcqRel) => panic!("there is no such thing as a release failure ordering"),
+ _ => panic!("a failure ordering can't be stronger than a success ordering"),
+ }
+}
+
+#[inline]
+#[cfg(stage0)]
+unsafe fn atomic_compare_exchange<T>(dst: *mut T,
+ old: T,
+ new: T,
+ success: Ordering,
+ _: Ordering) -> T {
+ match success {
Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
Release => intrinsics::atomic_cxchg_rel(dst, old, new),
AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
}
}
+#[inline]
+#[cfg(not(stage0))]
+unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
+ old: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering) -> (T, bool) {
+ match (success, failure) {
+ (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
+ (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
+ (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
+ (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
+ (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
+ (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
+ (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
+ (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
+ (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
+ (_, Release) => panic!("there is no such thing as an acquire/release failure ordering"),
+ (_, AcqRel) => panic!("there is no such thing as a release failure ordering"),
+ _ => panic!("a failure ordering can't be stronger than a success ordering"),
+ }
+}
+
+#[inline]
+#[cfg(stage0)]
+unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
+ old: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering) -> (T, bool)
+ where T: ::cmp::Eq + ::marker::Copy
+{
+ let result = atomic_compare_exchange(dst, old, new, success, failure);
+ (result, result == old)
+}
+
#[inline]
unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
target_vendor: "unknown".to_string(),
options: TargetOptions {
- features: "+v7,+vfp2,+neon".to_string(),
+ features: "+v7,+vfp3,+neon".to_string(),
cpu: "cortex-a8".to_string(),
.. base
}
CMP: ValueRef,
RHS: ValueRef,
Order: AtomicOrdering,
- FailureOrder: AtomicOrdering)
+ FailureOrder: AtomicOrdering,
+ Weak: Bool)
-> ValueRef;
pub fn LLVMBuildAtomicRMW(B: BuilderRef,
Op: AtomicBinOp,
pub fn AtomicCmpXchg(cx: Block, dst: ValueRef,
cmp: ValueRef, src: ValueRef,
order: AtomicOrdering,
- failure_order: AtomicOrdering) -> ValueRef {
- B(cx).atomic_cmpxchg(dst, cmp, src, order, failure_order)
+ failure_order: AtomicOrdering,
+ weak: llvm::Bool) -> ValueRef {
+ B(cx).atomic_cmpxchg(dst, cmp, src, order, failure_order, weak)
}
pub fn AtomicRMW(cx: Block, op: AtomicBinOp,
dst: ValueRef, src: ValueRef,
pub fn atomic_cmpxchg(&self, dst: ValueRef,
cmp: ValueRef, src: ValueRef,
order: AtomicOrdering,
- failure_order: AtomicOrdering) -> ValueRef {
+ failure_order: AtomicOrdering,
+ weak: llvm::Bool) -> ValueRef {
unsafe {
llvm::LLVMBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src,
- order, failure_order)
+ order, failure_order, weak)
}
}
pub fn atomic_rmw(&self, op: AtomicBinOp,
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
(_, name) if name.starts_with("atomic_") => {
let split: Vec<&str> = name.split('_').collect();
- assert!(split.len() >= 2, "Atomic intrinsic not correct format");
- let order = if split.len() == 2 {
- llvm::SequentiallyConsistent
- } else {
- match split[2] {
- "unordered" => llvm::Unordered,
- "relaxed" => llvm::Monotonic,
- "acq" => llvm::Acquire,
- "rel" => llvm::Release,
- "acqrel" => llvm::AcquireRelease,
+ let (order, failorder) = match split.len() {
+ 2 => (llvm::SequentiallyConsistent, llvm::SequentiallyConsistent),
+ 3 => match split[2] {
+ "unordered" => (llvm::Unordered, llvm::Unordered),
+ "relaxed" => (llvm::Monotonic, llvm::Monotonic),
+ "acq" => (llvm::Acquire, llvm::Acquire),
+ "rel" => (llvm::Release, llvm::Monotonic),
+ "acqrel" => (llvm::AcquireRelease, llvm::Acquire),
+ "failrelaxed" if split[1] == "cxchg" || split[1] == "cxchgweak" =>
+ (llvm::SequentiallyConsistent, llvm::Monotonic),
+ "failacq" if split[1] == "cxchg" || split[1] == "cxchgweak" =>
+ (llvm::SequentiallyConsistent, llvm::Acquire),
_ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
- }
+ },
+ 4 => match (split[2], split[3]) {
+ ("acq", "failrelaxed") if split[1] == "cxchg" || split[1] == "cxchgweak" =>
+ (llvm::Acquire, llvm::Monotonic),
+ ("acqrel", "failrelaxed") if split[1] == "cxchg" || split[1] == "cxchgweak" =>
+ (llvm::AcquireRelease, llvm::Monotonic),
+ _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
+ },
+ _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
};
match split[1] {
"cxchg" => {
- // See include/llvm/IR/Instructions.h for their implementation
- // of this, I assume that it's good enough for us to use for
- // now.
- let strongest_failure_ordering = match order {
- llvm::NotAtomic | llvm::Unordered =>
- ccx.sess().fatal("cmpxchg must be atomic"),
-
- llvm::Monotonic | llvm::Release =>
- llvm::Monotonic,
-
- llvm::Acquire | llvm::AcquireRelease =>
- llvm::Acquire,
-
- llvm::SequentiallyConsistent =>
- llvm::SequentiallyConsistent
- };
-
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
let cmp = from_arg_ty(bcx, llargs[1], tp_ty);
let src = from_arg_ty(bcx, llargs[2], tp_ty);
- let res = AtomicCmpXchg(bcx, ptr, cmp, src, order,
- strongest_failure_ordering);
+ let res = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::False);
ExtractValue(bcx, res, 0)
}
+ "cxchgweak" => {
+ let tp_ty = *substs.types.get(FnSpace, 0);
+ let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
+ let cmp = from_arg_ty(bcx, llargs[1], tp_ty);
+ let src = from_arg_ty(bcx, llargs[2], tp_ty);
+ let val = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::True);
+ let result = ExtractValue(bcx, val, 0);
+ let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
+ Store(bcx, result, StructGEP(bcx, llresult, 0));
+ Store(bcx, success, StructGEP(bcx, llresult, 1));
+ C_nil(ccx)
+ }
+
"load" => {
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
param(ccx, 0),
param(ccx, 0)),
param(ccx, 0)),
+ "cxchgweak" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)),
+ param(ccx, 0),
+ param(ccx, 0)),
+ tcx.mk_tup(vec!(param(ccx, 0), tcx.types.bool))),
"load" => (1, vec!(tcx.mk_imm_ptr(param(ccx, 0))),
param(ccx, 0)),
"store" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0)),
use externalfiles::ExternalHtml;
-use serialize::json::{self, ToJson};
+use serialize::json::{ToJson, Json, as_json};
use syntax::{abi, ast};
use syntax::feature_gate::UnstableFeatures;
use rustc::middle::cstore::LOCAL_CRATE;
path: String,
desc: String,
parent: Option<DefId>,
+ parent_idx: Option<usize>,
search_type: Option<IndexItemFunctionType>,
}
+impl ToJson for IndexItem {
+ fn to_json(&self) -> Json {
+ assert_eq!(self.parent.is_some(), self.parent_idx.is_some());
+
+ let mut data = Vec::with_capacity(6);
+ data.push((self.ty as usize).to_json());
+ data.push(self.name.to_json());
+ data.push(self.path.to_json());
+ data.push(self.desc.to_json());
+ data.push(self.parent_idx.to_json());
+ data.push(self.search_type.to_json());
+
+ Json::Array(data)
+ }
+}
+
/// A type used for the search index.
struct Type {
name: Option<String>,
}
-impl fmt::Display for Type {
- /// Formats type as {name: $name}.
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- // Wrapping struct fmt should never call us when self.name is None,
- // but just to be safe we write `null` in that case.
+impl ToJson for Type {
+ fn to_json(&self) -> Json {
match self.name {
- Some(ref n) => write!(f, "{{\"name\":\"{}\"}}", n),
- None => write!(f, "null")
+ Some(ref name) => {
+ let mut data = BTreeMap::new();
+ data.insert("name".to_owned(), name.to_json());
+ Json::Object(data)
+ },
+ None => Json::Null
}
}
}
output: Option<Type>
}
-impl fmt::Display for IndexItemFunctionType {
- /// Formats a full fn type as a JSON {inputs: [Type], outputs: Type/null}.
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+impl ToJson for IndexItemFunctionType {
+ fn to_json(&self) -> Json {
// If we couldn't figure out a type, just write `null`.
- if self.inputs.iter().any(|ref i| i.name.is_none()) ||
- (self.output.is_some() && self.output.as_ref().unwrap().name.is_none()) {
- return write!(f, "null")
+ if self.inputs.iter().chain(self.output.iter()).any(|ref i| i.name.is_none()) {
+ Json::Null
+ } else {
+ let mut data = BTreeMap::new();
+ data.insert("inputs".to_owned(), self.inputs.to_json());
+ data.insert("output".to_owned(), self.output.to_json());
+ Json::Object(data)
}
-
- let inputs: Vec<String> = self.inputs.iter().map(|ref t| {
- format!("{}", t)
- }).collect();
- try!(write!(f, "{{\"inputs\":[{}],\"output\":", inputs.join(",")));
-
- match self.output {
- Some(ref t) => try!(write!(f, "{}", t)),
- None => try!(write!(f, "null"))
- };
-
- Ok(try!(write!(f, "}}")))
}
}
cx.krate(krate)
}
+/// Build the search index from the collected metadata
fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String {
- // Build the search index from the collected metadata
let mut nodeid_to_pathid = HashMap::new();
- let mut pathid_to_nodeid = Vec::new();
- {
- let Cache { ref mut search_index,
- ref orphan_methods,
- ref mut paths, .. } = *cache;
-
- // Attach all orphan methods to the type's definition if the type
- // has since been learned.
- for &(did, ref item) in orphan_methods {
- match paths.get(&did) {
- Some(&(ref fqp, _)) => {
- // Needed to determine `self` type.
- let parent_basename = Some(fqp[fqp.len() - 1].clone());
- search_index.push(IndexItem {
- ty: shortty(item),
- name: item.name.clone().unwrap(),
- path: fqp[..fqp.len() - 1].join("::"),
- desc: Escape(&shorter(item.doc_value())).to_string(),
- parent: Some(did),
- search_type: get_index_search_type(&item, parent_basename),
- });
- },
- None => {}
- }
- }
-
- // Reduce `NodeId` in paths into smaller sequential numbers,
- // and prune the paths that do not appear in the index.
- for item in search_index.iter() {
- match item.parent {
- Some(nodeid) => {
- if !nodeid_to_pathid.contains_key(&nodeid) {
- let pathid = pathid_to_nodeid.len();
- nodeid_to_pathid.insert(nodeid, pathid);
- pathid_to_nodeid.push(nodeid);
- }
- }
- None => {}
- }
+ let mut crate_items = Vec::with_capacity(cache.search_index.len());
+ let mut crate_paths = Vec::<Json>::new();
+
+ let Cache { ref mut search_index,
+ ref orphan_methods,
+ ref mut paths, .. } = *cache;
+
+ // Attach all orphan methods to the type's definition if the type
+ // has since been learned.
+ for &(did, ref item) in orphan_methods {
+ match paths.get(&did) {
+ Some(&(ref fqp, _)) => {
+ // Needed to determine `self` type.
+ let parent_basename = Some(fqp[fqp.len() - 1].clone());
+ search_index.push(IndexItem {
+ ty: shortty(item),
+ name: item.name.clone().unwrap(),
+ path: fqp[..fqp.len() - 1].join("::"),
+ desc: Escape(&shorter(item.doc_value())).to_string(),
+ parent: Some(did),
+ parent_idx: None,
+ search_type: get_index_search_type(&item, parent_basename),
+ });
+ },
+ None => {}
}
- assert_eq!(nodeid_to_pathid.len(), pathid_to_nodeid.len());
}
- // Collect the index into a string
- let mut w = io::Cursor::new(Vec::new());
- write!(&mut w, r#"searchIndex['{}'] = {{"items":["#, krate.name).unwrap();
+ // Reduce `NodeId` in paths into smaller sequential numbers,
+ // and prune the paths that do not appear in the index.
+ let mut lastpath = String::new();
+ let mut lastpathid = 0usize;
- let mut lastpath = "".to_string();
- for (i, item) in cache.search_index.iter().enumerate() {
- // Omit the path if it is same to that of the prior item.
- let path;
- if lastpath == item.path {
- path = "";
- } else {
- lastpath = item.path.to_string();
- path = &item.path;
- };
+ for item in search_index {
+ item.parent_idx = item.parent.map(|nodeid| {
+ if nodeid_to_pathid.contains_key(&nodeid) {
+ *nodeid_to_pathid.get(&nodeid).unwrap()
+ } else {
+ let pathid = lastpathid;
+ nodeid_to_pathid.insert(nodeid, pathid);
+ lastpathid += 1;
- if i > 0 {
- write!(&mut w, ",").unwrap();
- }
- write!(&mut w, r#"[{},"{}","{}",{}"#,
- item.ty as usize, item.name, path,
- item.desc.to_json().to_string()).unwrap();
- match item.parent {
- Some(nodeid) => {
- let pathid = *nodeid_to_pathid.get(&nodeid).unwrap();
- write!(&mut w, ",{}", pathid).unwrap();
+ let &(ref fqp, short) = paths.get(&nodeid).unwrap();
+ crate_paths.push(((short as usize), fqp.last().unwrap().clone()).to_json());
+ pathid
}
- None => write!(&mut w, ",null").unwrap()
- }
- match item.search_type {
- Some(ref t) => write!(&mut w, ",{}", t).unwrap(),
- None => write!(&mut w, ",null").unwrap()
- }
- write!(&mut w, "]").unwrap();
- }
-
- write!(&mut w, r#"],"paths":["#).unwrap();
+ });
- for (i, &did) in pathid_to_nodeid.iter().enumerate() {
- let &(ref fqp, short) = cache.paths.get(&did).unwrap();
- if i > 0 {
- write!(&mut w, ",").unwrap();
+ // Omit the parent path if it is same to that of the prior item.
+ if lastpath == item.path {
+ item.path.clear();
+ } else {
+ lastpath = item.path.clone();
}
- write!(&mut w, r#"[{},"{}"]"#,
- short as usize, *fqp.last().unwrap()).unwrap();
+ crate_items.push(item.to_json());
}
- write!(&mut w, "]}};").unwrap();
+ let crate_doc = krate.module.as_ref().map(|module| {
+ Escape(&shorter(module.doc_value())).to_string()
+ }).unwrap_or(String::new());
- String::from_utf8(w.into_inner()).unwrap()
+ let mut crate_data = BTreeMap::new();
+ crate_data.insert("doc".to_owned(), Json::String(crate_doc));
+ crate_data.insert("items".to_owned(), Json::Array(crate_items));
+ crate_data.insert("paths".to_owned(), Json::Array(crate_paths));
+
+ // Collect the index into a string
+ format!("searchIndex[{}] = {};",
+ as_json(&krate.name),
+ Json::Object(crate_data))
}
fn write_shared(cx: &Context,
if !line.starts_with(key) {
continue
}
- if line.starts_with(&format!("{}['{}']", key, krate)) {
+ if line.starts_with(&format!(r#"{}["{}"]"#, key, krate)) {
continue
}
ret.push(line.to_string());
path: path.join("::").to_string(),
desc: Escape(&shorter(item.doc_value())).to_string(),
parent: parent,
+ parent_idx: None,
search_type: get_index_search_type(&item, parent_basename),
});
}
let js_dst = this.dst.join("sidebar-items.js");
let mut js_out = BufWriter::new(try_err!(File::create(&js_dst), &js_dst));
try_err!(write!(&mut js_out, "initSidebarItems({});",
- json::as_json(&items)), &js_dst);
+ as_json(&items)), &js_dst);
}
for item in m.items {
displayPath = "";
href = rootPath + item.path.replace(/::/g, '/') +
'/' + type + '.' + name + '.html';
+ } else if (type === "externcrate") {
+ displayPath = "";
+ href = rootPath + name + '/index.html';
} else if (item.parent !== undefined) {
var myparent = item.parent;
var anchor = '#' + type + '.' + name;
for (var crate in rawSearchIndex) {
if (!rawSearchIndex.hasOwnProperty(crate)) { continue; }
+ searchWords.push(crate);
+ searchIndex.push({
+ crate: crate,
+ ty: 1, // == ExternCrate
+ name: crate,
+ path: "",
+ desc: rawSearchIndex[crate].doc,
+ type: null,
+ });
+
// an array of [(Number) item type,
// (String) name,
// (String) full path or empty string for previous path,
}
.content a.primitive { color: #39a7bf; }
-.content span.mod, .content a.mod, block a.current.mod { color: #4d76ae; }
+.content span.externcrate, span.mod, .content a.mod, block a.current.mod { color: #4d76ae; }
.content span.fn, .content a.fn, .block a.current.fn,
.content span.method, .content a.method, .block a.current.method,
.content span.tymethod, .content a.tymethod, .block a.current.tymethod,
LLVMValueRef old,
LLVMValueRef source,
AtomicOrdering order,
- AtomicOrdering failure_order) {
- return wrap(unwrap(B)->CreateAtomicCmpXchg(unwrap(target), unwrap(old),
- unwrap(source), order,
- failure_order
- ));
+ AtomicOrdering failure_order,
+ LLVMBool weak) {
+ AtomicCmpXchgInst* acxi = unwrap(B)->CreateAtomicCmpXchg(unwrap(target),
+ unwrap(old),
+ unwrap(source),
+ order,
+ failure_order);
+ acxi->setWeak(weak);
+ return wrap(acxi);
}
extern "C" LLVMValueRef LLVMBuildAtomicFence(LLVMBuilderRef B,
AtomicOrdering order,
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(extended_compare_and_swap)]
+use std::sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT};
+use std::sync::atomic::Ordering::*;
+
+static ATOMIC: AtomicIsize = ATOMIC_ISIZE_INIT;
+
+fn main() {
+ // Make sure trans can emit all the intrinsics correctly
+ ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed);
+ ATOMIC.compare_exchange(0, 1, Acquire, Relaxed);
+ ATOMIC.compare_exchange(0, 1, Release, Relaxed);
+ ATOMIC.compare_exchange(0, 1, AcqRel, Relaxed);
+ ATOMIC.compare_exchange(0, 1, SeqCst, Relaxed);
+ ATOMIC.compare_exchange(0, 1, Acquire, Acquire);
+ ATOMIC.compare_exchange(0, 1, AcqRel, Acquire);
+ ATOMIC.compare_exchange(0, 1, SeqCst, Acquire);
+ ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst);
+ ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed);
+ ATOMIC.compare_exchange_weak(0, 1, Acquire, Relaxed);
+ ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed);
+ ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed);
+ ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed);
+ ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire);
+ ATOMIC.compare_exchange_weak(0, 1, AcqRel, Acquire);
+ ATOMIC.compare_exchange_weak(0, 1, SeqCst, Acquire);
+ ATOMIC.compare_exchange_weak(0, 1, SeqCst, SeqCst);
+}
pub fn atomic_cxchg_acq<T>(dst: *mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> T;
+ pub fn atomic_cxchgweak<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak_acq<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+ pub fn atomic_cxchgweak_rel<T>(dst: *mut T, old: T, src: T) -> (T, bool);
+
pub fn atomic_load<T>(src: *const T) -> T;
pub fn atomic_load_acq<T>(src: *const T) -> T;
assert_eq!(rusti::atomic_xsub_acq(&mut *x, 1), 2);
assert_eq!(rusti::atomic_xsub_rel(&mut *x, 1), 1);
assert_eq!(*x, 0);
+
+ loop {
+ let res = rusti::atomic_cxchgweak(&mut *x, 0, 1);
+ assert_eq!(res.0, 0);
+ if res.1 {
+ break;
+ }
+ }
+ assert_eq!(*x, 1);
+
+ loop {
+ let res = rusti::atomic_cxchgweak_acq(&mut *x, 1, 2);
+ assert_eq!(res.0, 1);
+ if res.1 {
+ break;
+ }
+ }
+ assert_eq!(*x, 2);
+
+ loop {
+ let res = rusti::atomic_cxchgweak_rel(&mut *x, 2, 3);
+ assert_eq!(res.0, 2);
+ if res.1 {
+ break;
+ }
+ }
+ assert_eq!(*x, 3);
}
}