valopt release-channel "dev" "the name of the release channel to build"
valopt musl-root "/usr/local" "MUSL root installation directory"
+# Used on systems where "cc" and "ar" are unavailable
+valopt default-linker "cc" "the default linker"
+valopt default-ar "ar" "the default ar"
+
# Many of these are saved below during the "writing configuration" step
# (others are conditionally saved).
opt_nosave manage-submodules 1 "let the build manage the git submodules"
# this exists can be found on issue #2400
export CFG_COMPILER_HOST_TRIPLE
+# Used as defaults for the runtime ar and cc tools
+export CFG_DEFAULT_LINKER
+export CFG_DEFAULT_AR
+
# The standard libraries should be held up to a higher standard than any old
# code, make sure that these common warnings are denied by default. These can
# be overridden during development temporarily. For stage0, we allow warnings
for all pointer types (including smart pointers like Box and Rc). Unsize is
only implemented automatically, and enables the following transformations:
-* `[T, ..n]` => `[T]`
+* `[T; n]` => `[T]`
* `T` => `Trait` where `T: Trait`
* `Foo<..., T, ...>` => `Foo<..., U, ...>` where:
* `T: Unsize<U>`
But it's probably not the implementation you want.
Rust has traditionally avoided making traits unsafe because it makes Unsafe
-pervasive, which is not desirable. Send and Sync are unsafe is because thread
+pervasive, which is not desirable. The reason Send and Sync are unsafe is because thread
safety is a *fundamental property* that unsafe code cannot possibly hope to defend
against in the same way it would defend against a bad Ord implementation. The
only way to possibly defend against thread-unsafety would be to *not use
```rust
use std::thread;
-let result = thread::spawn(move || {
+let handle = thread::spawn(move || {
panic!("oops!");
-}).join();
+});
+
+let result = handle.join();
assert!(result.is_err());
```
-Our `Thread` gives us a `Result` back, which allows us to check if the thread
+`Thread.join()` gives us a `Result` back, which allows us to check if the thread
has panicked or not.
If you're just showing plain text, choose `text`.
It's important to choose the correct annotation here, because `rustdoc` uses it
-in an interesting way: It can be used to actually test your examples, so that
-they don't get out of date. If you have some C code but `rustdoc` thinks it's
-Rust because you left off the annotation, `rustdoc` will complain when trying to
-generate the documentation.
+in an interesting way: It can be used to actually test your examples in a
+library crate, so that they don't get out of date. If you have some C code but
+`rustdoc` thinks it's Rust because you left off the annotation, `rustdoc` will
+complain when trying to generate the documentation.
## Documentation as tests
$ cargo test
```
-That's right, `cargo test` tests embedded documentation too. However,
-`cargo test` will not test binary crates, only library ones. This is
+That's right, `cargo test` tests embedded documentation too. **However,
+`cargo test` will not test binary crates, only library ones.** This is
due to the way `rustdoc` works: it links against the library to be tested,
but with a binary, there’s nothing to link to.
Nothing is better than documentation with examples. Nothing is worse than
examples that don't actually work, because the code has changed since the
documentation has been written. To this end, Rust supports automatically
-running examples in your documentation. Here's a fleshed-out `src/lib.rs`
-with examples:
+running examples in your documentation (**note:** this only works in library
+crates, not binary crates). Here's a fleshed-out `src/lib.rs` with examples:
```rust,ignore
//! The `adder` crate provides functions that add numbers to other numbers.
use boxed::Box;
-use core::atomic;
-use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst};
+use core::sync::atomic;
+use core::sync::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst};
use core::fmt;
use core::cmp::Ordering;
use core::mem::{align_of_val, size_of_val};
/// Weak pointers will not keep the data inside of the `Arc` alive, and can be
/// used to break cycles between `Arc` pointers.
#[unsafe_no_drop_flag]
-#[unstable(feature = "arc_weak",
- reason = "Weak pointers may not belong in this module.",
- issue = "27718")]
+#[unstable(feature = "arc_weak", reason = "needs FCP", issue = "27718")]
pub struct Weak<T: ?Sized> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
// the value usize::MAX acts as a sentinel for temporarily "locking" the
// ability to upgrade weak pointers or downgrade strong ones; this is used
- // to avoid races in `make_unique` and `get_mut`.
+ // to avoid races in `make_mut` and `get_mut`.
weak: atomic::AtomicUsize,
data: T,
};
Arc { _ptr: unsafe { NonZero::new(Box::into_raw(x)) } }
}
+
+ /// Unwraps the contained value if the `Arc<T>` has only one strong reference.
+ /// This will succeed even if there are outstanding weak references.
+ ///
+ /// Otherwise, an `Err` is returned with the same `Arc<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(arc_unique)]
+ /// use std::sync::Arc;
+ ///
+ /// let x = Arc::new(3);
+ /// assert_eq!(Arc::try_unwrap(x), Ok(3));
+ ///
+ /// let x = Arc::new(4);
+ /// let _y = x.clone();
+ /// assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
+ /// ```
+ #[inline]
+ #[unstable(feature = "arc_unique", reason = "needs FCP", issue = "27718")]
+ pub fn try_unwrap(this: Self) -> Result<T, Self> {
+ // See `drop` for why all these atomics are like this
+ if this.inner().strong.compare_and_swap(1, 0, Release) != 1 { return Err(this) }
+
+ atomic::fence(Acquire);
+
+ unsafe {
+ let ptr = *this._ptr;
+ let elem = ptr::read(&(*ptr).data);
+
+ // Make a weak pointer to clean up the implicit strong-weak reference
+ let _weak = Weak { _ptr: this._ptr };
+ mem::forget(this);
+
+ Ok(elem)
+ }
+ }
}
impl<T: ?Sized> Arc<T> {
///
/// ```
/// #![feature(arc_weak)]
- ///
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
- /// let weak_five = five.downgrade();
+ /// let weak_five = Arc::downgrade(&five);
/// ```
- #[unstable(feature = "arc_weak",
- reason = "Weak pointers may not belong in this module.",
- issue = "27718")]
- pub fn downgrade(&self) -> Weak<T> {
+ #[unstable(feature = "arc_weak", reason = "needs FCP", issue = "27718")]
+ pub fn downgrade(this: &Self) -> Weak<T> {
loop {
// This Relaxed is OK because we're checking the value in the CAS
// below.
- let cur = self.inner().weak.load(Relaxed);
+ let cur = this.inner().weak.load(Relaxed);
// check if the weak counter is currently "locked"; if so, spin.
if cur == usize::MAX { continue }
// Unlike with Clone(), we need this to be an Acquire read to
// synchronize with the write coming from `is_unique`, so that the
// events prior to that write happen before this read.
- if self.inner().weak.compare_and_swap(cur, cur + 1, Acquire) == cur {
- return Weak { _ptr: self._ptr }
+ if this.inner().weak.compare_and_swap(cur, cur + 1, Acquire) == cur {
+ return Weak { _ptr: this._ptr }
}
}
}
/// Get the number of weak references to this value.
#[inline]
- #[unstable(feature = "arc_counts", issue = "27718")]
- pub fn weak_count(this: &Arc<T>) -> usize {
+ #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy", issue = "27718")]
+ pub fn weak_count(this: &Self) -> usize {
this.inner().weak.load(SeqCst) - 1
}
/// Get the number of strong references to this value.
#[inline]
- #[unstable(feature = "arc_counts", issue = "27718")]
- pub fn strong_count(this: &Arc<T>) -> usize {
+ #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy", issue = "27718")]
+ pub fn strong_count(this: &Self) -> usize {
this.inner().strong.load(SeqCst)
}
}
impl<T: Clone> Arc<T> {
- /// Make a mutable reference from the given `Arc<T>`.
+ #[unstable(feature = "arc_unique", reason = "renamed to Arc::make_mut", issue = "27718")]
+ #[deprecated(since = "1.4.0", reason = "renamed to Arc::make_mut")]
+ pub fn make_unique(this: &mut Self) -> &mut T {
+ Arc::make_mut(this)
+ }
+
+ /// Make a mutable reference into the given `Arc<T>` by cloning the inner
+ /// data if the `Arc<T>` doesn't have one strong reference and no weak
+ /// references.
///
- /// This is also referred to as a copy-on-write operation because the inner
- /// data is cloned if the (strong) reference count is greater than one. If
- /// we hold the only strong reference, any existing weak references will no
- /// longer be upgradeable.
+ /// This is also referred to as a copy-on-write.
///
/// # Examples
///
/// ```
/// #![feature(arc_unique)]
- ///
/// use std::sync::Arc;
///
- /// let mut five = Arc::new(5);
+ /// let mut data = Arc::new(5);
+ ///
+ /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
+ /// let mut other_data = data.clone(); // Won't clone inner data
+ /// *Arc::make_mut(&mut data) += 1; // Clones inner data
+ /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
+ /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
+ ///
+ /// // Note: data and other_data now point to different numbers
+ /// assert_eq!(*data, 8);
+ /// assert_eq!(*other_data, 12);
///
- /// let mut_five = Arc::make_unique(&mut five);
/// ```
#[inline]
- #[unstable(feature = "arc_unique", issue = "27718")]
- pub fn make_unique(this: &mut Arc<T>) -> &mut T {
+ #[unstable(feature = "arc_unique", reason = "needs FCP", issue = "27718")]
+ pub fn make_mut(this: &mut Self) -> &mut T {
// Note that we hold both a strong reference and a weak reference.
// Thus, releasing our strong reference only will not, by itself, cause
// the memory to be deallocated.
}
impl<T: ?Sized> Arc<T> {
- /// Returns a mutable reference to the contained value if the `Arc<T>` is unique.
- ///
- /// Returns `None` if the `Arc<T>` is not unique.
+ /// Returns a mutable reference to the contained value if the `Arc<T>` has
+ /// one strong reference and no weak references.
///
/// # Examples
///
/// ```
- /// #![feature(arc_unique, alloc)]
- ///
- /// extern crate alloc;
- /// # fn main() {
- /// use alloc::arc::Arc;
+ /// #![feature(arc_unique)]
+ /// use std::sync::Arc;
///
/// let mut x = Arc::new(3);
/// *Arc::get_mut(&mut x).unwrap() = 4;
///
/// let _y = x.clone();
/// assert!(Arc::get_mut(&mut x).is_none());
- /// # }
/// ```
#[inline]
- #[unstable(feature = "arc_unique", issue = "27718")]
- pub fn get_mut(this: &mut Arc<T>) -> Option<&mut T> {
+ #[unstable(feature = "arc_unique", reason = "needs FCP", issue = "27718")]
+ pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if this.is_unique() {
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
}
}
-#[unstable(feature = "arc_weak",
- reason = "Weak pointers may not belong in this module.",
- issue = "27718")]
impl<T: ?Sized> Weak<T> {
/// Upgrades a weak reference to a strong reference.
///
///
/// ```
/// #![feature(arc_weak)]
- ///
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
- /// let weak_five = five.downgrade();
+ /// let weak_five = Arc::downgrade(&five);
///
/// let strong_five: Option<Arc<_>> = weak_five.upgrade();
/// ```
+ #[unstable(feature = "arc_weak", reason = "needs FCP", issue = "27718")]
pub fn upgrade(&self) -> Option<Arc<T>> {
// We use a CAS loop to increment the strong count instead of a
// fetch_add because once the count hits 0 it must never be above 0.
}
}
-#[unstable(feature = "arc_weak",
- reason = "Weak pointers may not belong in this module.",
- issue = "27718")]
+#[unstable(feature = "arc_weak", reason = "needs FCP", issue = "27718")]
impl<T: ?Sized> Clone for Weak<T> {
/// Makes a clone of the `Weak<T>`.
///
///
/// ```
/// #![feature(arc_weak)]
- ///
/// use std::sync::Arc;
///
- /// let weak_five = Arc::new(5).downgrade();
+ /// let weak_five = Arc::downgrade(&Arc::new(5));
///
/// weak_five.clone();
/// ```
///
/// ```
/// #![feature(arc_weak)]
- ///
/// use std::sync::Arc;
///
/// {
/// let five = Arc::new(5);
- /// let weak_five = five.downgrade();
+ /// let weak_five = Arc::downgrade(&five);
///
/// // stuff
///
/// }
/// {
/// let five = Arc::new(5);
- /// let weak_five = five.downgrade();
+ /// let weak_five = Arc::downgrade(&five);
///
/// // stuff
///
assert!(Arc::get_mut(&mut x).is_none());
drop(y);
assert!(Arc::get_mut(&mut x).is_some());
- let _w = x.downgrade();
+ let _w = Arc::downgrade(&x);
assert!(Arc::get_mut(&mut x).is_none());
}
#[test]
- fn test_cowarc_clone_make_unique() {
+ fn try_unwrap() {
+ let x = Arc::new(3);
+ assert_eq!(Arc::try_unwrap(x), Ok(3));
+ let x = Arc::new(4);
+ let _y = x.clone();
+ assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
+ let x = Arc::new(5);
+ let _w = Arc::downgrade(&x);
+ assert_eq!(Arc::try_unwrap(x), Ok(5));
+ }
+
+ #[test]
+ fn test_cowarc_clone_make_mut() {
let mut cow0 = Arc::new(75);
let mut cow1 = cow0.clone();
let mut cow2 = cow1.clone();
- assert!(75 == *Arc::make_unique(&mut cow0));
- assert!(75 == *Arc::make_unique(&mut cow1));
- assert!(75 == *Arc::make_unique(&mut cow2));
+ assert!(75 == *Arc::make_mut(&mut cow0));
+ assert!(75 == *Arc::make_mut(&mut cow1));
+ assert!(75 == *Arc::make_mut(&mut cow2));
- *Arc::make_unique(&mut cow0) += 1;
- *Arc::make_unique(&mut cow1) += 2;
- *Arc::make_unique(&mut cow2) += 3;
+ *Arc::make_mut(&mut cow0) += 1;
+ *Arc::make_mut(&mut cow1) += 2;
+ *Arc::make_mut(&mut cow2) += 3;
assert!(76 == *cow0);
assert!(77 == *cow1);
assert!(75 == *cow1);
assert!(75 == *cow2);
- *Arc::make_unique(&mut cow0) += 1;
-
+ *Arc::make_mut(&mut cow0) += 1;
assert!(76 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
#[test]
fn test_cowarc_clone_weak() {
let mut cow0 = Arc::new(75);
- let cow1_weak = cow0.downgrade();
+ let cow1_weak = Arc::downgrade(&cow0);
assert!(75 == *cow0);
assert!(75 == *cow1_weak.upgrade().unwrap());
- *Arc::make_unique(&mut cow0) += 1;
+ *Arc::make_mut(&mut cow0) += 1;
assert!(76 == *cow0);
assert!(cow1_weak.upgrade().is_none());
#[test]
fn test_live() {
let x = Arc::new(5);
- let y = x.downgrade();
+ let y = Arc::downgrade(&x);
assert!(y.upgrade().is_some());
}
#[test]
fn test_dead() {
let x = Arc::new(5);
- let y = x.downgrade();
+ let y = Arc::downgrade(&x);
drop(x);
assert!(y.upgrade().is_none());
}
}
let a = Arc::new(Cycle { x: Mutex::new(None) });
- let b = a.clone().downgrade();
+ let b = Arc::downgrade(&a.clone());
*a.x.lock().unwrap() = Some(b);
// hopefully we don't double-free (or leak)...
fn drop_arc_weak() {
let mut canary = atomic::AtomicUsize::new(0);
let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
- let arc_weak = arc.downgrade();
+ let arc_weak = Arc::downgrade(&arc);
assert!(canary.load(Acquire) == 0);
drop(arc);
assert!(canary.load(Acquire) == 1);
fn test_strong_count() {
let a = Arc::new(0u32);
assert!(Arc::strong_count(&a) == 1);
- let w = a.downgrade();
+ let w = Arc::downgrade(&a);
assert!(Arc::strong_count(&a) == 1);
let b = w.upgrade().expect("");
assert!(Arc::strong_count(&b) == 2);
let a = Arc::new(0u32);
assert!(Arc::strong_count(&a) == 1);
assert!(Arc::weak_count(&a) == 0);
- let w = a.downgrade();
+ let w = Arc::downgrade(&a);
assert!(Arc::strong_count(&a) == 1);
assert!(Arc::weak_count(&a) == 1);
let x = w.clone();
let c = a.clone();
assert!(Arc::strong_count(&a) == 2);
assert!(Arc::weak_count(&a) == 0);
- let d = c.downgrade();
+ let d = Arc::downgrade(&c);
assert!(Arc::weak_count(&c) == 1);
assert!(Arc::strong_count(&c) == 2);
fn test_unsized() {
let x: Arc<[i32]> = Arc::new([1, 2, 3]);
assert_eq!(format!("{:?}", x), "[1, 2, 3]");
- let y = x.clone().downgrade();
+ let y = Arc::downgrade(&x.clone());
drop(x);
assert!(y.upgrade().is_none());
}
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
test(no_crate_inject))]
#![no_std]
#![cfg_attr(not(stage0), needs_allocator)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// FIXME(27718): rc_counts stuff is useful internally, but was previously public
+#![allow(deprecated)]
+
//! Thread-local reference-counted boxes (the `Rc<T>` type).
//!
//! The `Rc<T>` type provides shared ownership of an immutable value.
//!
//! // Add the Gadgets to their Owner. To do this we mutably borrow from
//! // the RefCell holding the Owner's Gadgets.
-//! gadget_owner.gadgets.borrow_mut().push(gadget1.clone().downgrade());
-//! gadget_owner.gadgets.borrow_mut().push(gadget2.clone().downgrade());
+//! gadget_owner.gadgets.borrow_mut().push(Rc::downgrade(&gadget1));
+//! gadget_owner.gadgets.borrow_mut().push(Rc::downgrade(&gadget2));
//!
//! // Iterate over our Gadgets, printing their details out
//! for gadget_opt in gadget_owner.gadgets.borrow().iter() {
use core::hash::{Hasher, Hash};
use core::intrinsics::{assume, drop_in_place, abort};
use core::marker::{self, Unsize};
-use core::mem::{self, align_of, size_of, align_of_val, size_of_val, forget};
+use core::mem::{self, align_of_val, size_of_val, forget};
use core::nonzero::NonZero;
use core::ops::{CoerceUnsized, Deref};
use core::ptr;
}
}
- /// Unwraps the contained value if the `Rc<T>` is unique.
+ /// Unwraps the contained value if the `Rc<T>` has only one strong reference.
+ /// This will succeed even if there are outstanding weak references.
///
- /// If the `Rc<T>` is not unique, an `Err` is returned with the same
- /// `Rc<T>`.
+ /// Otherwise, an `Err` is returned with the same `Rc<T>`.
///
/// # Examples
///
/// assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4)));
/// ```
#[inline]
- #[unstable(feature = "rc_unique", issue = "27718")]
- pub fn try_unwrap(rc: Rc<T>) -> Result<T, Rc<T>> {
- if Rc::is_unique(&rc) {
+ #[unstable(feature = "rc_unique", reason= "needs FCP", issue = "27718")]
+ pub fn try_unwrap(this: Self) -> Result<T, Self> {
+ if Rc::would_unwrap(&this) {
unsafe {
- let val = ptr::read(&*rc); // copy the contained object
- // destruct the box and skip our Drop
- // we can ignore the refcounts because we know we're unique
- deallocate(*rc._ptr as *mut u8, size_of::<RcBox<T>>(),
- align_of::<RcBox<T>>());
- forget(rc);
+ let val = ptr::read(&*this); // copy the contained object
+
+ // Indicate to Weaks that they can't be promoted by decrememting
+ // the strong count, and then remove the implicit "strong weak"
+ // pointer while also handling drop logic by just crafting a
+ // fake Weak.
+ this.dec_strong();
+ let _weak = Weak { _ptr: this._ptr };
+ forget(this);
Ok(val)
}
} else {
- Err(rc)
+ Err(this)
}
}
+
+ /// Checks if `Rc::try_unwrap` would return `Ok`.
+ #[unstable(feature = "rc_would_unwrap", reason = "just added for niche usecase",
+ issue = "27718")]
+ pub fn would_unwrap(this: &Self) -> bool {
+ Rc::strong_count(&this) == 1
+ }
}
impl<T: ?Sized> Rc<T> {
///
/// let five = Rc::new(5);
///
- /// let weak_five = five.downgrade();
+ /// let weak_five = Rc::downgrade(&five);
/// ```
- #[unstable(feature = "rc_weak",
- reason = "Weak pointers may not belong in this module",
- issue = "27718")]
- pub fn downgrade(&self) -> Weak<T> {
- self.inc_weak();
- Weak { _ptr: self._ptr }
+ #[unstable(feature = "rc_weak", reason = "needs FCP", issue = "27718")]
+ pub fn downgrade(this: &Self) -> Weak<T> {
+ this.inc_weak();
+ Weak { _ptr: this._ptr }
}
/// Get the number of weak references to this value.
#[inline]
- #[unstable(feature = "rc_counts", issue = "27718")]
- pub fn weak_count(this: &Rc<T>) -> usize { this.weak() - 1 }
+ #[unstable(feature = "rc_counts", reason = "not clearly useful", issue = "27718")]
+ pub fn weak_count(this: &Self) -> usize { this.weak() - 1 }
/// Get the number of strong references to this value.
#[inline]
- #[unstable(feature = "rc_counts", issue= "27718")]
- pub fn strong_count(this: &Rc<T>) -> usize { this.strong() }
+ #[unstable(feature = "rc_counts", reason = "not clearly useful", issue = "27718")]
+ pub fn strong_count(this: &Self) -> usize { this.strong() }
/// Returns true if there are no other `Rc` or `Weak<T>` values that share
/// the same inner value.
/// # Examples
///
/// ```
- /// #![feature(rc_unique)]
+ /// #![feature(rc_counts)]
///
/// use std::rc::Rc;
///
/// assert!(Rc::is_unique(&five));
/// ```
#[inline]
- #[unstable(feature = "rc_unique", issue = "27718")]
- pub fn is_unique(rc: &Rc<T>) -> bool {
- Rc::weak_count(rc) == 0 && Rc::strong_count(rc) == 1
+ #[unstable(feature = "rc_counts", reason = "uniqueness has unclear meaning", issue = "27718")]
+ pub fn is_unique(this: &Self) -> bool {
+ Rc::weak_count(this) == 0 && Rc::strong_count(this) == 1
}
- /// Returns a mutable reference to the contained value if the `Rc<T>` is
- /// unique.
+ /// Returns a mutable reference to the contained value if the `Rc<T>` has
+ /// one strong reference and no weak references.
///
/// Returns `None` if the `Rc<T>` is not unique.
///
/// assert!(Rc::get_mut(&mut x).is_none());
/// ```
#[inline]
- #[unstable(feature = "rc_unique", issue = "27718")]
- pub fn get_mut(rc: &mut Rc<T>) -> Option<&mut T> {
- if Rc::is_unique(rc) {
- let inner = unsafe { &mut **rc._ptr };
+ #[unstable(feature = "rc_unique", reason = "needs FCP", issue = "27718")]
+ pub fn get_mut(this: &mut Self) -> Option<&mut T> {
+ if Rc::is_unique(this) {
+ let inner = unsafe { &mut **this._ptr };
Some(&mut inner.value)
} else {
None
}
impl<T: Clone> Rc<T> {
- /// Make a mutable reference from the given `Rc<T>`.
+ #[inline]
+ #[unstable(feature = "rc_unique", reason = "renamed to Rc::make_mut", issue = "27718")]
+ #[deprecated(since = "1.4.0", reason = "renamed to Rc::make_mut")]
+ pub fn make_unique(&mut self) -> &mut T {
+ Rc::make_mut(self)
+ }
+
+ /// Make a mutable reference into the given `Rc<T>` by cloning the inner
+ /// data if the `Rc<T>` doesn't have one strong reference and no weak
+ /// references.
///
- /// This is also referred to as a copy-on-write operation because the inner
- /// data is cloned if the reference count is greater than one.
+ /// This is also referred to as a copy-on-write.
///
/// # Examples
///
/// ```
/// #![feature(rc_unique)]
- ///
/// use std::rc::Rc;
///
- /// let mut five = Rc::new(5);
+ /// let mut data = Rc::new(5);
+ ///
+ /// *Rc::make_mut(&mut data) += 1; // Won't clone anything
+ /// let mut other_data = data.clone(); // Won't clone inner data
+ /// *Rc::make_mut(&mut data) += 1; // Clones inner data
+ /// *Rc::make_mut(&mut data) += 1; // Won't clone anything
+ /// *Rc::make_mut(&mut other_data) *= 2; // Won't clone anything
+ ///
+ /// // Note: data and other_data now point to different numbers
+ /// assert_eq!(*data, 8);
+ /// assert_eq!(*other_data, 12);
///
- /// let mut_five = five.make_unique();
/// ```
#[inline]
- #[unstable(feature = "rc_unique", issue = "27718")]
- pub fn make_unique(&mut self) -> &mut T {
- if !Rc::is_unique(self) {
- *self = Rc::new((**self).clone())
+ #[unstable(feature = "rc_unique", reason = "needs FCP", issue = "27718")]
+ pub fn make_mut(this: &mut Self) -> &mut T {
+ if Rc::strong_count(this) != 1 {
+ // Gotta clone the data, there are other Rcs
+ *this = Rc::new((**this).clone())
+ } else if Rc::weak_count(this) != 0 {
+ // Can just steal the data, all that's left is Weaks
+ unsafe {
+ let mut swap = Rc::new(ptr::read(&(**this._ptr).value));
+ mem::swap(this, &mut swap);
+ swap.dec_strong();
+ // Remove implicit strong-weak ref (no need to craft a fake
+ // Weak here -- we know other Weaks can clean up for us)
+ swap.dec_weak();
+ forget(swap);
+ }
}
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
// reference count is guaranteed to be 1 at this point, and we required
// the `Rc<T>` itself to be `mut`, so we're returning the only possible
// reference to the inner value.
- let inner = unsafe { &mut **self._ptr };
+ let inner = unsafe { &mut **this._ptr };
&mut inner.value
}
}
unsafe {
let ptr = *self._ptr;
if !(*(&ptr as *const _ as *const *const ())).is_null() &&
- ptr as *const () as usize != mem::POST_DROP_USIZE {
+ ptr as *const () as usize != mem::POST_DROP_USIZE {
self.dec_strong();
if self.strong() == 0 {
// destroy the contained object
///
/// See the [module level documentation](./index.html) for more.
#[unsafe_no_drop_flag]
-#[unstable(feature = "rc_weak",
- reason = "Weak pointers may not belong in this module.",
- issue = "27718")]
+#[unstable(feature = "rc_weak", reason = "needs FCP", issue = "27718")]
pub struct Weak<T: ?Sized> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
-#[unstable(feature = "rc_weak",
- reason = "Weak pointers may not belong in this module.",
- issue = "27718")]
impl<T: ?Sized> Weak<T> {
-
/// Upgrades a weak reference to a strong reference.
///
/// Upgrades the `Weak<T>` reference to an `Rc<T>`, if possible.
///
/// let five = Rc::new(5);
///
- /// let weak_five = five.downgrade();
+ /// let weak_five = Rc::downgrade(&five);
///
/// let strong_five: Option<Rc<_>> = weak_five.upgrade();
/// ```
+ #[unstable(feature = "rc_weak", reason = "needs FCP", issue = "27718")]
pub fn upgrade(&self) -> Option<Rc<T>> {
if self.strong() == 0 {
None
///
/// {
/// let five = Rc::new(5);
- /// let weak_five = five.downgrade();
+ /// let weak_five = Rc::downgrade(&five);
///
/// // stuff
///
/// }
/// {
/// let five = Rc::new(5);
- /// let weak_five = five.downgrade();
+ /// let weak_five = Rc::downgrade(&five);
///
/// // stuff
///
unsafe {
let ptr = *self._ptr;
if !(*(&ptr as *const _ as *const *const ())).is_null() &&
- ptr as *const () as usize != mem::POST_DROP_USIZE {
+ ptr as *const () as usize != mem::POST_DROP_USIZE {
self.dec_weak();
// the weak count starts at 1, and will only go to zero if all
// the strong pointers have disappeared.
}
}
-#[unstable(feature = "rc_weak",
- reason = "Weak pointers may not belong in this module.",
- issue = "27718")]
+#[unstable(feature = "rc_weak", reason = "needs FCP", issue = "27718")]
impl<T: ?Sized> Clone for Weak<T> {
/// Makes a clone of the `Weak<T>`.
///
/// use std::rc::Rc;
///
- /// let weak_five = Rc::new(5).downgrade();
+ /// let weak_five = Rc::downgrade(&Rc::new(5));
///
/// weak_five.clone();
/// ```
#[test]
fn test_live() {
let x = Rc::new(5);
- let y = x.downgrade();
+ let y = Rc::downgrade(&x);
assert!(y.upgrade().is_some());
}
#[test]
fn test_dead() {
let x = Rc::new(5);
- let y = x.downgrade();
+ let y = Rc::downgrade(&x);
drop(x);
assert!(y.upgrade().is_none());
}
}
let a = Rc::new(Cycle { x: RefCell::new(None) });
- let b = a.clone().downgrade();
+ let b = Rc::downgrade(&a.clone());
*a.x.borrow_mut() = Some(b);
// hopefully we don't double-free (or leak)...
assert!(!Rc::is_unique(&x));
drop(y);
assert!(Rc::is_unique(&x));
- let w = x.downgrade();
+ let w = Rc::downgrade(&x);
assert!(!Rc::is_unique(&x));
drop(w);
assert!(Rc::is_unique(&x));
fn test_strong_count() {
let a = Rc::new(0u32);
assert!(Rc::strong_count(&a) == 1);
- let w = a.downgrade();
+ let w = Rc::downgrade(&a);
assert!(Rc::strong_count(&a) == 1);
let b = w.upgrade().expect("upgrade of live rc failed");
assert!(Rc::strong_count(&b) == 2);
let a = Rc::new(0u32);
assert!(Rc::strong_count(&a) == 1);
assert!(Rc::weak_count(&a) == 0);
- let w = a.downgrade();
+ let w = Rc::downgrade(&a);
assert!(Rc::strong_count(&a) == 1);
assert!(Rc::weak_count(&a) == 1);
drop(w);
let _y = x.clone();
assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4)));
let x = Rc::new(5);
- let _w = x.downgrade();
- assert_eq!(Rc::try_unwrap(x), Err(Rc::new(5)));
+ let _w = Rc::downgrade(&x);
+ assert_eq!(Rc::try_unwrap(x), Ok(5));
}
#[test]
assert!(Rc::get_mut(&mut x).is_none());
drop(y);
assert!(Rc::get_mut(&mut x).is_some());
- let _w = x.downgrade();
+ let _w = Rc::downgrade(&x);
assert!(Rc::get_mut(&mut x).is_none());
}
let mut cow1 = cow0.clone();
let mut cow2 = cow1.clone();
- assert!(75 == *cow0.make_unique());
- assert!(75 == *cow1.make_unique());
- assert!(75 == *cow2.make_unique());
+ assert!(75 == *Rc::make_mut(&mut cow0));
+ assert!(75 == *Rc::make_mut(&mut cow1));
+ assert!(75 == *Rc::make_mut(&mut cow2));
- *cow0.make_unique() += 1;
- *cow1.make_unique() += 2;
- *cow2.make_unique() += 3;
+ *Rc::make_mut(&mut cow0) += 1;
+ *Rc::make_mut(&mut cow1) += 2;
+ *Rc::make_mut(&mut cow2) += 3;
assert!(76 == *cow0);
assert!(77 == *cow1);
assert!(75 == *cow1);
assert!(75 == *cow2);
- *cow0.make_unique() += 1;
+ *Rc::make_mut(&mut cow0) += 1;
assert!(76 == *cow0);
assert!(75 == *cow1);
#[test]
fn test_cowrc_clone_weak() {
let mut cow0 = Rc::new(75);
- let cow1_weak = cow0.downgrade();
+ let cow1_weak = Rc::downgrade(&cow0);
assert!(75 == *cow0);
assert!(75 == *cow1_weak.upgrade().unwrap());
- *cow0.make_unique() += 1;
+ *Rc::make_mut(&mut cow0) += 1;
assert!(76 == *cow0);
assert!(cow1_weak.upgrade().is_none());
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/",
html_playground_url = "https://play.rust-lang.org/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
test(no_crate_inject))]
#![allow(trivial_casts)]
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Atomic types
-//!
-//! Atomic types provide primitive shared-memory communication between
-//! threads, and are the building blocks of other concurrent
-//! types.
-//!
-//! This module defines atomic versions of a select number of primitive
-//! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`.
-//! Atomic types present operations that, when used correctly, synchronize
-//! updates between threads.
-//!
-//! Each method takes an `Ordering` which represents the strength of
-//! the memory barrier for that operation. These orderings are the
-//! same as [LLVM atomic orderings][1].
-//!
-//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
-//!
-//! Atomic variables are safe to share between threads (they implement `Sync`)
-//! but they do not themselves provide the mechanism for sharing. The most
-//! common way to share an atomic variable is to put it into an `Arc` (an
-//! atomically-reference-counted shared pointer).
-//!
-//! Most atomic types may be stored in static variables, initialized using
-//! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
-//! are often used for lazy global initialization.
-//!
-//!
-//! # Examples
-//!
-//! A simple spinlock:
-//!
-//! ```
-//! use std::sync::Arc;
-//! use std::sync::atomic::{AtomicUsize, Ordering};
-//! use std::thread;
-//!
-//! fn main() {
-//! let spinlock = Arc::new(AtomicUsize::new(1));
-//!
-//! let spinlock_clone = spinlock.clone();
-//! thread::spawn(move|| {
-//! spinlock_clone.store(0, Ordering::SeqCst);
-//! });
-//!
-//! // Wait for the other thread to release the lock
-//! while spinlock.load(Ordering::SeqCst) != 0 {}
-//! }
-//! ```
-//!
-//! Keep a global count of live threads:
-//!
-//! ```
-//! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
-//!
-//! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
-//!
-//! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
-//! println!("live threads: {}", old_thread_count + 1);
-//! ```
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-use self::Ordering::*;
-
-use marker::{Send, Sync};
-
-use intrinsics;
-use cell::UnsafeCell;
-
-use default::Default;
-use fmt;
-
-/// A boolean type which can be safely shared between threads.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct AtomicBool {
- v: UnsafeCell<usize>,
-}
-
-impl Default for AtomicBool {
- fn default() -> Self {
- Self::new(Default::default())
- }
-}
-
-unsafe impl Sync for AtomicBool {}
-
-/// A signed integer type which can be safely shared between threads.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct AtomicIsize {
- v: UnsafeCell<isize>,
-}
-
-impl Default for AtomicIsize {
- fn default() -> Self {
- Self::new(Default::default())
- }
-}
-
-unsafe impl Sync for AtomicIsize {}
-
-/// An unsigned integer type which can be safely shared between threads.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct AtomicUsize {
- v: UnsafeCell<usize>,
-}
-
-impl Default for AtomicUsize {
- fn default() -> Self {
- Self::new(Default::default())
- }
-}
-
-unsafe impl Sync for AtomicUsize {}
-
-/// A raw pointer type which can be safely shared between threads.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct AtomicPtr<T> {
- p: UnsafeCell<*mut T>,
-}
-
-impl<T> Default for AtomicPtr<T> {
- fn default() -> AtomicPtr<T> {
- AtomicPtr::new(::ptr::null_mut())
- }
-}
-
-unsafe impl<T> Send for AtomicPtr<T> {}
-unsafe impl<T> Sync for AtomicPtr<T> {}
-
-/// Atomic memory orderings
-///
-/// Memory orderings limit the ways that both the compiler and CPU may reorder
-/// instructions around atomic operations. At its most restrictive,
-/// "sequentially consistent" atomics allow neither reads nor writes
-/// to be moved either before or after the atomic operation; on the other end
-/// "relaxed" atomics allow all reorderings.
-///
-/// Rust's memory orderings are [the same as
-/// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
-#[stable(feature = "rust1", since = "1.0.0")]
-#[derive(Copy, Clone)]
-pub enum Ordering {
- /// No ordering constraints, only atomic operations.
- #[stable(feature = "rust1", since = "1.0.0")]
- Relaxed,
- /// When coupled with a store, all previous writes become visible
- /// to another thread that performs a load with `Acquire` ordering
- /// on the same value.
- #[stable(feature = "rust1", since = "1.0.0")]
- Release,
- /// When coupled with a load, all subsequent loads will see data
- /// written before a store with `Release` ordering on the same value
- /// in another thread.
- #[stable(feature = "rust1", since = "1.0.0")]
- Acquire,
- /// When coupled with a load, uses `Acquire` ordering, and with a store
- /// `Release` ordering.
- #[stable(feature = "rust1", since = "1.0.0")]
- AcqRel,
- /// Like `AcqRel` with the additional guarantee that all threads see all
- /// sequentially consistent operations in the same order.
- #[stable(feature = "rust1", since = "1.0.0")]
- SeqCst,
-}
-
-/// An `AtomicBool` initialized to `false`.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
-/// An `AtomicIsize` initialized to `0`.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
-/// An `AtomicUsize` initialized to `0`.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
-
-// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
-const UINT_TRUE: usize = !0;
-
-impl AtomicBool {
- /// Creates a new `AtomicBool`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::AtomicBool;
- ///
- /// let atomic_true = AtomicBool::new(true);
- /// let atomic_false = AtomicBool::new(false);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub const fn new(v: bool) -> AtomicBool {
- AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) }
- }
-
- /// Loads a value from the bool.
- ///
- /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Release` or `AcqRel`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicBool, Ordering};
- ///
- /// let some_bool = AtomicBool::new(true);
- ///
- /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn load(&self, order: Ordering) -> bool {
- unsafe { atomic_load(self.v.get(), order) > 0 }
- }
-
- /// Stores a value into the bool.
- ///
- /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicBool, Ordering};
- ///
- /// let some_bool = AtomicBool::new(true);
- ///
- /// some_bool.store(false, Ordering::Relaxed);
- /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
- /// ```
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Acquire` or `AcqRel`.
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn store(&self, val: bool, order: Ordering) {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_store(self.v.get(), val, order); }
- }
-
- /// Stores a value into the bool, returning the old value.
- ///
- /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicBool, Ordering};
- ///
- /// let some_bool = AtomicBool::new(true);
- ///
- /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
- /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn swap(&self, val: bool, order: Ordering) -> bool {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_swap(self.v.get(), val, order) > 0 }
- }
-
- /// Stores a value into the `bool` if the current value is the same as the `current` value.
- ///
- /// The return value is always the previous value. If it is equal to `current`, then the value
- /// was updated.
- ///
- /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
- /// this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicBool, Ordering};
- ///
- /// let some_bool = AtomicBool::new(true);
- ///
- /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
- /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
- ///
- /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
- /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
- let current = if current { UINT_TRUE } else { 0 };
- let new = if new { UINT_TRUE } else { 0 };
-
- unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) > 0 }
- }
-
- /// Logical "and" with a boolean value.
- ///
- /// Performs a logical "and" operation on the current value and the argument `val`, and sets
- /// the new value to the result.
- ///
- /// Returns the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicBool, Ordering};
- ///
- /// let foo = AtomicBool::new(true);
- /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
- /// assert_eq!(foo.load(Ordering::SeqCst), false);
- ///
- /// let foo = AtomicBool::new(true);
- /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
- /// assert_eq!(foo.load(Ordering::SeqCst), true);
- ///
- /// let foo = AtomicBool::new(false);
- /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
- /// assert_eq!(foo.load(Ordering::SeqCst), false);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_and(self.v.get(), val, order) > 0 }
- }
-
- /// Logical "nand" with a boolean value.
- ///
- /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
- /// the new value to the result.
- ///
- /// Returns the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicBool, Ordering};
- ///
- /// let foo = AtomicBool::new(true);
- /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
- /// assert_eq!(foo.load(Ordering::SeqCst), true);
- ///
- /// let foo = AtomicBool::new(true);
- /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
- /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
- /// assert_eq!(foo.load(Ordering::SeqCst), false);
- ///
- /// let foo = AtomicBool::new(false);
- /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
- /// assert_eq!(foo.load(Ordering::SeqCst), true);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_nand(self.v.get(), val, order) > 0 }
- }
-
- /// Logical "or" with a boolean value.
- ///
- /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
- /// new value to the result.
- ///
- /// Returns the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicBool, Ordering};
- ///
- /// let foo = AtomicBool::new(true);
- /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
- /// assert_eq!(foo.load(Ordering::SeqCst), true);
- ///
- /// let foo = AtomicBool::new(true);
- /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
- /// assert_eq!(foo.load(Ordering::SeqCst), true);
- ///
- /// let foo = AtomicBool::new(false);
- /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
- /// assert_eq!(foo.load(Ordering::SeqCst), false);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_or(self.v.get(), val, order) > 0 }
- }
-
- /// Logical "xor" with a boolean value.
- ///
- /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
- /// the new value to the result.
- ///
- /// Returns the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicBool, Ordering};
- ///
- /// let foo = AtomicBool::new(true);
- /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
- /// assert_eq!(foo.load(Ordering::SeqCst), true);
- ///
- /// let foo = AtomicBool::new(true);
- /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
- /// assert_eq!(foo.load(Ordering::SeqCst), false);
- ///
- /// let foo = AtomicBool::new(false);
- /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
- /// assert_eq!(foo.load(Ordering::SeqCst), false);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_xor(self.v.get(), val, order) > 0 }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl AtomicIsize {
- /// Creates a new `AtomicIsize`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::AtomicIsize;
- ///
- /// let atomic_forty_two = AtomicIsize::new(42);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub const fn new(v: isize) -> AtomicIsize {
- AtomicIsize {v: UnsafeCell::new(v)}
- }
-
- /// Loads a value from the isize.
- ///
- /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Release` or `AcqRel`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let some_isize = AtomicIsize::new(5);
- ///
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn load(&self, order: Ordering) -> isize {
- unsafe { atomic_load(self.v.get(), order) }
- }
-
- /// Stores a value into the isize.
- ///
- /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let some_isize = AtomicIsize::new(5);
- ///
- /// some_isize.store(10, Ordering::Relaxed);
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
- /// ```
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Acquire` or `AcqRel`.
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn store(&self, val: isize, order: Ordering) {
- unsafe { atomic_store(self.v.get(), val, order); }
- }
-
- /// Stores a value into the isize, returning the old value.
- ///
- /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let some_isize = AtomicIsize::new(5);
- ///
- /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn swap(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_swap(self.v.get(), val, order) }
- }
-
- /// Stores a value into the `isize` if the current value is the same as the `current` value.
- ///
- /// The return value is always the previous value. If it is equal to `current`, then the value
- /// was updated.
- ///
- /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
- /// this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let some_isize = AtomicIsize::new(5);
- ///
- /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
- ///
- /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn compare_and_swap(&self, current: isize, new: isize, order: Ordering) -> isize {
- unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
- }
-
- /// Add an isize to the current value, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let foo = AtomicIsize::new(0);
- /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
- /// assert_eq!(foo.load(Ordering::SeqCst), 10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_add(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_add(self.v.get(), val, order) }
- }
-
- /// Subtract an isize from the current value, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let foo = AtomicIsize::new(0);
- /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
- /// assert_eq!(foo.load(Ordering::SeqCst), -10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_sub(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_sub(self.v.get(), val, order) }
- }
-
- /// Bitwise and with the current isize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let foo = AtomicIsize::new(0b101101);
- /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_and(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_and(self.v.get(), val, order) }
- }
-
- /// Bitwise or with the current isize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let foo = AtomicIsize::new(0b101101);
- /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_or(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_or(self.v.get(), val, order) }
- }
-
- /// Bitwise xor with the current isize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let foo = AtomicIsize::new(0b101101);
- /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_xor(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_xor(self.v.get(), val, order) }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl AtomicUsize {
- /// Creates a new `AtomicUsize`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::AtomicUsize;
- ///
- /// let atomic_forty_two = AtomicUsize::new(42);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub const fn new(v: usize) -> AtomicUsize {
- AtomicUsize { v: UnsafeCell::new(v) }
- }
-
- /// Loads a value from the usize.
- ///
- /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Release` or `AcqRel`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let some_usize = AtomicUsize::new(5);
- ///
- /// assert_eq!(some_usize.load(Ordering::Relaxed), 5);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn load(&self, order: Ordering) -> usize {
- unsafe { atomic_load(self.v.get(), order) }
- }
-
- /// Stores a value into the usize.
- ///
- /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let some_usize = AtomicUsize::new(5);
- ///
- /// some_usize.store(10, Ordering::Relaxed);
- /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
- /// ```
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Acquire` or `AcqRel`.
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn store(&self, val: usize, order: Ordering) {
- unsafe { atomic_store(self.v.get(), val, order); }
- }
-
- /// Stores a value into the usize, returning the old value.
- ///
- /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let some_usize= AtomicUsize::new(5);
- ///
- /// assert_eq!(some_usize.swap(10, Ordering::Relaxed), 5);
- /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn swap(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_swap(self.v.get(), val, order) }
- }
-
- /// Stores a value into the `usize` if the current value is the same as the `current` value.
- ///
- /// The return value is always the previous value. If it is equal to `current`, then the value
- /// was updated.
- ///
- /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
- /// this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let some_usize = AtomicUsize::new(5);
- ///
- /// assert_eq!(some_usize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
- /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
- ///
- /// assert_eq!(some_usize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
- /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn compare_and_swap(&self, current: usize, new: usize, order: Ordering) -> usize {
- unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
- }
-
- /// Add to the current usize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let foo = AtomicUsize::new(0);
- /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
- /// assert_eq!(foo.load(Ordering::SeqCst), 10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_add(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_add(self.v.get(), val, order) }
- }
-
- /// Subtract from the current usize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let foo = AtomicUsize::new(10);
- /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 10);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_sub(self.v.get(), val, order) }
- }
-
- /// Bitwise and with the current usize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let foo = AtomicUsize::new(0b101101);
- /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_and(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_and(self.v.get(), val, order) }
- }
-
- /// Bitwise or with the current usize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let foo = AtomicUsize::new(0b101101);
- /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_or(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_or(self.v.get(), val, order) }
- }
-
- /// Bitwise xor with the current usize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let foo = AtomicUsize::new(0b101101);
- /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_xor(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_xor(self.v.get(), val, order) }
- }
-}
-
-impl<T> AtomicPtr<T> {
- /// Creates a new `AtomicPtr`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::AtomicPtr;
- ///
- /// let ptr = &mut 5;
- /// let atomic_ptr = AtomicPtr::new(ptr);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub const fn new(p: *mut T) -> AtomicPtr<T> {
- AtomicPtr { p: UnsafeCell::new(p) }
- }
-
- /// Loads a value from the pointer.
- ///
- /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Release` or `AcqRel`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicPtr, Ordering};
- ///
- /// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
- ///
- /// let value = some_ptr.load(Ordering::Relaxed);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn load(&self, order: Ordering) -> *mut T {
- unsafe {
- atomic_load(self.p.get() as *mut usize, order) as *mut T
- }
- }
-
- /// Stores a value into the pointer.
- ///
- /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicPtr, Ordering};
- ///
- /// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
- ///
- /// let other_ptr = &mut 10;
- ///
- /// some_ptr.store(other_ptr, Ordering::Relaxed);
- /// ```
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Acquire` or `AcqRel`.
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn store(&self, ptr: *mut T, order: Ordering) {
- unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
- }
-
- /// Stores a value into the pointer, returning the old value.
- ///
- /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicPtr, Ordering};
- ///
- /// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
- ///
- /// let other_ptr = &mut 10;
- ///
- /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
- unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
- }
-
- /// Stores a value into the pointer if the current value is the same as the `current` value.
- ///
- /// The return value is always the previous value. If it is equal to `current`, then the value
- /// was updated.
- ///
- /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
- /// this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicPtr, Ordering};
- ///
- /// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
- ///
- /// let other_ptr = &mut 10;
- /// let another_ptr = &mut 10;
- ///
- /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
- unsafe {
- atomic_compare_and_swap(self.p.get() as *mut usize, current as usize,
- new as usize, order) as *mut T
- }
- }
-}
-
-#[inline]
-unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
- match order {
- Release => intrinsics::atomic_store_rel(dst, val),
- Relaxed => intrinsics::atomic_store_relaxed(dst, val),
- SeqCst => intrinsics::atomic_store(dst, val),
- Acquire => panic!("there is no such thing as an acquire store"),
- AcqRel => panic!("there is no such thing as an acquire/release store"),
- }
-}
-
-#[inline]
-unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
- match order {
- Acquire => intrinsics::atomic_load_acq(dst),
- Relaxed => intrinsics::atomic_load_relaxed(dst),
- SeqCst => intrinsics::atomic_load(dst),
- Release => panic!("there is no such thing as a release load"),
- AcqRel => panic!("there is no such thing as an acquire/release load"),
- }
-}
-
-#[inline]
-unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
- match order {
- Acquire => intrinsics::atomic_xchg_acq(dst, val),
- Release => intrinsics::atomic_xchg_rel(dst, val),
- AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
- Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
- SeqCst => intrinsics::atomic_xchg(dst, val)
- }
-}
-
-/// Returns the old value (like __sync_fetch_and_add).
-#[inline]
-unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
- match order {
- Acquire => intrinsics::atomic_xadd_acq(dst, val),
- Release => intrinsics::atomic_xadd_rel(dst, val),
- AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
- Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
- SeqCst => intrinsics::atomic_xadd(dst, val)
- }
-}
-
-/// Returns the old value (like __sync_fetch_and_sub).
-#[inline]
-unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
- match order {
- Acquire => intrinsics::atomic_xsub_acq(dst, val),
- Release => intrinsics::atomic_xsub_rel(dst, val),
- AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
- Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
- SeqCst => intrinsics::atomic_xsub(dst, val)
- }
-}
-
-#[inline]
-unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
- match order {
- Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
- Release => intrinsics::atomic_cxchg_rel(dst, old, new),
- AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
- Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
- SeqCst => intrinsics::atomic_cxchg(dst, old, new),
- }
-}
-
-#[inline]
-unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
- match order {
- Acquire => intrinsics::atomic_and_acq(dst, val),
- Release => intrinsics::atomic_and_rel(dst, val),
- AcqRel => intrinsics::atomic_and_acqrel(dst, val),
- Relaxed => intrinsics::atomic_and_relaxed(dst, val),
- SeqCst => intrinsics::atomic_and(dst, val)
- }
-}
-
-#[inline]
-unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
- match order {
- Acquire => intrinsics::atomic_nand_acq(dst, val),
- Release => intrinsics::atomic_nand_rel(dst, val),
- AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
- Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
- SeqCst => intrinsics::atomic_nand(dst, val)
- }
-}
-
-
-#[inline]
-unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
- match order {
- Acquire => intrinsics::atomic_or_acq(dst, val),
- Release => intrinsics::atomic_or_rel(dst, val),
- AcqRel => intrinsics::atomic_or_acqrel(dst, val),
- Relaxed => intrinsics::atomic_or_relaxed(dst, val),
- SeqCst => intrinsics::atomic_or(dst, val)
- }
-}
-
-
-#[inline]
-unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
- match order {
- Acquire => intrinsics::atomic_xor_acq(dst, val),
- Release => intrinsics::atomic_xor_rel(dst, val),
- AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
- Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
- SeqCst => intrinsics::atomic_xor(dst, val)
- }
-}
-
-
-/// An atomic fence.
-///
-/// A fence 'A' which has `Release` ordering semantics, synchronizes with a
-/// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
-/// atomic operations X and Y, both operating on some atomic object 'M' such
-/// that A is sequenced before X, Y is synchronized before B and Y observes
-/// the change to M. This provides a happens-before dependence between A and B.
-///
-/// Atomic operations with `Release` or `Acquire` semantics can also synchronize
-/// with a fence.
-///
-/// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
-/// and `Release` semantics, participates in the global program order of the
-/// other `SeqCst` operations and/or fences.
-///
-/// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
-///
-/// # Panics
-///
-/// Panics if `order` is `Relaxed`.
-#[inline]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub fn fence(order: Ordering) {
- unsafe {
- match order {
- Acquire => intrinsics::atomic_fence_acq(),
- Release => intrinsics::atomic_fence_rel(),
- AcqRel => intrinsics::atomic_fence_acqrel(),
- SeqCst => intrinsics::atomic_fence(),
- Relaxed => panic!("there is no such thing as a relaxed fence")
- }
- }
-}
-
-macro_rules! impl_Debug {
- ($($t:ident)*) => ($(
- #[stable(feature = "atomic_debug", since = "1.3.0")]
- impl fmt::Debug for $t {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_tuple(stringify!($t)).field(&self.load(Ordering::SeqCst)).finish()
- }
- }
- )*);
-}
-
-impl_Debug!{ AtomicUsize AtomicIsize AtomicBool }
-
-#[stable(feature = "atomic_debug", since = "1.3.0")]
-impl<T> fmt::Debug for AtomicPtr<T> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()
- }
-}
/// ```
/// use std::mem;
///
- /// let v: &[u8] = unsafe { mem::transmute("L") };
- /// assert!(v == [76]);
+ /// let array: &[u8] = unsafe { mem::transmute("Rust") };
+ /// assert_eq!(array, [82, 117, 115, 116]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn transmute<T,U>(e: T) -> U;
+ pub fn transmute<T, U>(e: T) -> U;
/// Gives the address for the return value of the enclosing function.
///
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/",
- html_playground_url = "https://play.rust-lang.org/")]
+ html_playground_url = "https://play.rust-lang.org/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")]
#![doc(test(no_crate_inject))]
#![no_core]
pub mod any;
pub mod array;
-pub mod atomic;
+pub mod sync;
pub mod cell;
pub mod char;
pub mod panicking;
issue = "0")]
use prelude::v1::*;
-use num::ParseFloatError as PFE;
-use num::FloatErrorKind;
+use fmt;
+use str::FromStr;
+
use self::parse::{parse_decimal, Decimal, Sign};
use self::parse::ParseResult::{self, Valid, ShortcutToInf, ShortcutToZero};
use self::num::digits_to_big;
pub mod rawfp;
pub mod parse;
-/// Entry point for decimal-to-f32 conversion.
-pub fn to_f32(s: &str) -> Result<f32, PFE> {
- dec2flt(s)
+macro_rules! from_str_float_impl {
+ ($t:ty, $func:ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl FromStr for $t {
+ type Err = ParseFloatError;
+
+ /// Converts a string in base 10 to a float.
+ /// Accepts an optional decimal exponent.
+ ///
+ /// This function accepts strings such as
+ ///
+ /// * '3.14'
+ /// * '-3.14'
+ /// * '2.5E10', or equivalently, '2.5e10'
+ /// * '2.5E-10'
+ /// * '.' (understood as 0)
+ /// * '5.'
+ /// * '.5', or, equivalently, '0.5'
+ /// * 'inf', '-inf', 'NaN'
+ ///
+ /// Leading and trailing whitespace represent an error.
+ ///
+ /// # Arguments
+ ///
+ /// * src - A string
+ ///
+ /// # Return value
+ ///
+ /// `Err(ParseFloatError)` if the string did not represent a valid
+ /// number. Otherwise, `Ok(n)` where `n` is the floating-point
+ /// number represented by `src`.
+ #[inline]
+ fn from_str(src: &str) -> Result<Self, ParseFloatError> {
+ dec2flt(src)
+ }
+ }
+ }
+}
+from_str_float_impl!(f32, to_f32);
+from_str_float_impl!(f64, to_f64);
+
+/// An error which can be returned when parsing a float.
+#[derive(Debug, Clone, PartialEq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct ParseFloatError {
+ kind: FloatErrorKind
+}
+
+#[derive(Debug, Clone, PartialEq)]
+enum FloatErrorKind {
+ Empty,
+ Invalid,
+}
+
+impl ParseFloatError {
+ #[unstable(feature = "int_error_internals",
+ reason = "available through Error trait and this method should \
+ not be exposed publicly",
+ issue = "0")]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ match self.kind {
+ FloatErrorKind::Empty => "cannot parse float from empty string",
+ FloatErrorKind::Invalid => "invalid float literal",
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for ParseFloatError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+pub fn pfe_empty() -> ParseFloatError {
+ ParseFloatError { kind: FloatErrorKind::Empty }
}
-/// Entry point for decimal-to-f64 conversion.
-pub fn to_f64(s: &str) -> Result<f64, PFE> {
- dec2flt(s)
+pub fn pfe_invalid() -> ParseFloatError {
+ ParseFloatError { kind: FloatErrorKind::Invalid }
}
/// Split decimal string into sign and the rest, without inspecting or validating the rest.
}
/// Convert a decimal string into a floating point number.
-fn dec2flt<T: RawFloat>(s: &str) -> Result<T, PFE> {
+fn dec2flt<T: RawFloat>(s: &str) -> Result<T, ParseFloatError> {
if s.is_empty() {
- return Err(PFE { __kind: FloatErrorKind::Empty });
+ return Err(pfe_empty())
}
let (sign, s) = extract_sign(s);
let flt = match parse_decimal(s) {
ParseResult::Invalid => match s {
"inf" => T::infinity(),
"NaN" => T::nan(),
- _ => { return Err(PFE { __kind: FloatErrorKind::Invalid }); }
+ _ => { return Err(pfe_invalid()); }
}
};
/// The main workhorse for the decimal-to-float conversion: Orchestrate all the preprocessing
/// and figure out which algorithm should do the actual conversion.
-fn convert<T: RawFloat>(mut decimal: Decimal) -> Result<T, PFE> {
+fn convert<T: RawFloat>(mut decimal: Decimal) -> Result<T, ParseFloatError> {
simplify(&mut decimal);
if let Some(x) = trivial_cases(&decimal) {
return Ok(x);
// If we exceed this, perhaps while calculating `f * 10^e` in Algorithm R or Algorithm M,
// we'll crash. So we error out before getting too close, with a generous safety margin.
if max_digits > 375 {
- return Err(PFE { __kind: FloatErrorKind::Invalid });
+ return Err(pfe_invalid());
}
let f = digits_to_big(decimal.integral, decimal.fractional);
($T:ty) => {
fn from_str_radix(src: &str, radix: u32)
-> Result<$T, ParseFloatError> {
- use num::FloatErrorKind::*;
- use num::ParseFloatError as PFE;
+ use num::dec2flt::{pfe_empty, pfe_invalid};
// Special values
match src {
}
let (is_positive, src) = match src.slice_shift_char() {
- None => return Err(PFE { __kind: Empty }),
- Some(('-', "")) => return Err(PFE { __kind: Empty }),
+ None => return Err(pfe_empty()),
+ Some(('-', "")) => return Err(pfe_empty()),
Some(('-', src)) => (false, src),
Some((_, _)) => (true, src),
};
break; // start of fractional part
},
_ => {
- return Err(PFE { __kind: Invalid });
+ return Err(pfe_invalid())
},
},
}
break; // start of exponent
},
_ => {
- return Err(PFE { __kind: Invalid });
+ return Err(pfe_invalid())
},
},
}
let base = match c {
'E' | 'e' if radix == 10 => 10.0,
'P' | 'p' if radix == 16 => 2.0,
- _ => return Err(PFE { __kind: Invalid }),
+ _ => return Err(pfe_invalid()),
};
// Parse the exponent as decimal integer
Some(('-', src)) => (false, src.parse::<usize>()),
Some(('+', src)) => (true, src.parse::<usize>()),
Some((_, _)) => (true, src.parse::<usize>()),
- None => return Err(PFE { __kind: Invalid }),
+ None => return Err(pfe_invalid()),
};
match (is_positive, exp) {
(true, Ok(exp)) => base.powi(exp as i32),
(false, Ok(exp)) => 1.0 / base.powi(exp as i32),
- (_, Err(_)) => return Err(PFE { __kind: Invalid }),
+ (_, Err(_)) => return Err(pfe_invalid()),
}
},
None => 1.0, // no exponent
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for signed 16-bits integers (`i16` type)
+//! The 16-bit signed integer type.
+//!
+//! *[See also the `i16` primitive type](../primitive.i16.html).*
#![stable(feature = "rust1", since = "1.0.0")]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for signed 32-bits integers (`i32` type)
+//! The 32-bit signed integer type.
+//!
+//! *[See also the `i32` primitive type](../primitive.i32.html).*
#![stable(feature = "rust1", since = "1.0.0")]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for signed 64-bits integers (`i64` type)
+//! The 64-bit signed integer type.
+//!
+//! *[See also the `i64` primitive type](../primitive.i64.html).*
#![stable(feature = "rust1", since = "1.0.0")]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for signed 8-bits integers (`i8` type)
+//! The 8-bit signed integer type.
+//!
+//! *[See also the `i8` primitive type](../primitive.i8.html).*
#![stable(feature = "rust1", since = "1.0.0")]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for pointer-sized signed integers (`isize` type)
+//! The pointer-sized signed integer type.
+//!
+//! *[See also the `isize` primitive type](../primitive.isize.html).*
#![stable(feature = "rust1", since = "1.0.0")]
fn to_radians(self) -> Self;
}
-macro_rules! from_str_float_impl {
- ($t:ty, $func:ident) => {
- #[stable(feature = "rust1", since = "1.0.0")]
- impl FromStr for $t {
- type Err = ParseFloatError;
-
- /// Converts a string in base 10 to a float.
- /// Accepts an optional decimal exponent.
- ///
- /// This function accepts strings such as
- ///
- /// * '3.14'
- /// * '-3.14'
- /// * '2.5E10', or equivalently, '2.5e10'
- /// * '2.5E-10'
- /// * '.' (understood as 0)
- /// * '5.'
- /// * '.5', or, equivalently, '0.5'
- /// * 'inf', '-inf', 'NaN'
- ///
- /// Leading and trailing whitespace represent an error.
- ///
- /// # Arguments
- ///
- /// * src - A string
- ///
- /// # Return value
- ///
- /// `Err(ParseFloatError)` if the string did not represent a valid
- /// number. Otherwise, `Ok(n)` where `n` is the floating-point
- /// number represented by `src`.
- #[inline]
- fn from_str(src: &str) -> Result<Self, ParseFloatError> {
- dec2flt::$func(src)
- }
- }
- }
-}
-from_str_float_impl!(f32, to_f32);
-from_str_float_impl!(f64, to_f64);
-
macro_rules! from_str_radix_int_impl {
($($t:ty)*) => {$(
#[stable(feature = "rust1", since = "1.0.0")]
}
}
-/// An error which can be returned when parsing a float.
-#[derive(Debug, Clone, PartialEq)]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct ParseFloatError {
- #[doc(hidden)]
- #[unstable(feature = "float_error_internals",
- reason = "should not be exposed publicly",
- issue = "0")]
- pub __kind: FloatErrorKind
-}
-
-#[derive(Debug, Clone, PartialEq)]
-#[unstable(feature = "float_error_internals",
- reason = "should not be exposed publicly",
- issue = "0")]
-#[doc(hidden)]
-pub enum FloatErrorKind {
- Empty,
- Invalid,
-}
-
-impl ParseFloatError {
- #[doc(hidden)]
- pub fn __description(&self) -> &str {
- match self.__kind {
- FloatErrorKind::Empty => "cannot parse float from empty string",
- FloatErrorKind::Invalid => "invalid float literal",
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl fmt::Display for ParseFloatError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- self.__description().fmt(f)
- }
-}
+pub use num::dec2flt::ParseFloatError;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for unsigned 16-bits integers (`u16` type)
+//! The 16-bit unsigned integer type.
+//!
+//! *[See also the `u16` primitive type](../primitive.u16.html).*
#![stable(feature = "rust1", since = "1.0.0")]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for unsigned 32-bits integers (`u32` type)
+//! The 32-bit unsigned integer type.
+//!
+//! *[See also the `u32` primitive type](../primitive.u32.html).*
#![stable(feature = "rust1", since = "1.0.0")]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for unsigned 64-bits integer (`u64` type)
+//! The 64-bit unsigned integer type.
+//!
+//! *[See also the `u64` primitive type](../primitive.u64.html).*
#![stable(feature = "rust1", since = "1.0.0")]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for unsigned 8-bits integers (`u8` type)
+//! The 8-bit unsigned integer type.
+//!
+//! *[See also the `u8` primitive type](../primitive.u8.html).*
#![stable(feature = "rust1", since = "1.0.0")]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Operations and constants for pointer-sized unsigned integers (`usize` type)
+//! The pointer-sized unsigned integer type.
+//!
+//! *[See also the `usize` primitive type](../primitive.usize.html).*
#![stable(feature = "rust1", since = "1.0.0")]
//! The libcore prelude
+#![stable(feature = "core_prelude", since = "1.4.0")]
+
pub mod v1;
//! well. This module is imported by default when `#![no_std]` is used in the
//! same manner as the standard library's prelude.
-#![unstable(feature = "core_prelude",
- reason = "the libcore prelude has not been scrutinized and \
- stabilized yet",
- issue = "27701")]
+#![stable(feature = "core_prelude", since = "1.4.0")]
// Reexported core operators
-pub use marker::{Copy, Send, Sized, Sync};
-pub use ops::{Drop, Fn, FnMut, FnOnce};
+#[doc(no_inline)] pub use marker::{Copy, Send, Sized, Sync};
+#[doc(no_inline)] pub use ops::{Drop, Fn, FnMut, FnOnce};
// Reexported functions
-pub use mem::drop;
+#[doc(no_inline)] pub use mem::drop;
// Reexported types and traits
-pub use char::CharExt;
-pub use clone::Clone;
-pub use cmp::{PartialEq, PartialOrd, Eq, Ord};
-pub use convert::{AsRef, AsMut, Into, From};
-pub use default::Default;
-pub use iter::IntoIterator;
-pub use iter::{Iterator, DoubleEndedIterator, Extend, ExactSizeIterator};
-pub use option::Option::{self, Some, None};
-pub use result::Result::{self, Ok, Err};
-pub use slice::SliceExt;
-pub use str::StrExt;
+#[doc(no_inline)] pub use clone::Clone;
+#[doc(no_inline)] pub use cmp::{PartialEq, PartialOrd, Eq, Ord};
+#[doc(no_inline)] pub use convert::{AsRef, AsMut, Into, From};
+#[doc(no_inline)] pub use default::Default;
+#[doc(no_inline)] pub use iter::{Iterator, Extend, IntoIterator};
+#[doc(no_inline)] pub use iter::{DoubleEndedIterator, ExactSizeIterator};
+#[doc(no_inline)] pub use option::Option::{self, Some, None};
+#[doc(no_inline)] pub use result::Result::{self, Ok, Err};
+
+// Reexported extension traits for primitive types
+#[doc(no_inline)] pub use slice::SliceExt;
+#[doc(no_inline)] pub use str::StrExt;
+#[doc(no_inline)] pub use char::CharExt;
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Atomic types
+//!
+//! Atomic types provide primitive shared-memory communication between
+//! threads, and are the building blocks of other concurrent
+//! types.
+//!
+//! This module defines atomic versions of a select number of primitive
+//! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`.
+//! Atomic types present operations that, when used correctly, synchronize
+//! updates between threads.
+//!
+//! Each method takes an `Ordering` which represents the strength of
+//! the memory barrier for that operation. These orderings are the
+//! same as [LLVM atomic orderings][1].
+//!
+//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
+//!
+//! Atomic variables are safe to share between threads (they implement `Sync`)
+//! but they do not themselves provide the mechanism for sharing. The most
+//! common way to share an atomic variable is to put it into an `Arc` (an
+//! atomically-reference-counted shared pointer).
+//!
+//! Most atomic types may be stored in static variables, initialized using
+//! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
+//! are often used for lazy global initialization.
+//!
+//!
+//! # Examples
+//!
+//! A simple spinlock:
+//!
+//! ```
+//! use std::sync::Arc;
+//! use std::sync::atomic::{AtomicUsize, Ordering};
+//! use std::thread;
+//!
+//! fn main() {
+//! let spinlock = Arc::new(AtomicUsize::new(1));
+//!
+//! let spinlock_clone = spinlock.clone();
+//! thread::spawn(move|| {
+//! spinlock_clone.store(0, Ordering::SeqCst);
+//! });
+//!
+//! // Wait for the other thread to release the lock
+//! while spinlock.load(Ordering::SeqCst) != 0 {}
+//! }
+//! ```
+//!
+//! Keep a global count of live threads:
+//!
+//! ```
+//! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+//!
+//! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
+//!
+//! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
+//! println!("live threads: {}", old_thread_count + 1);
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use self::Ordering::*;
+
+use marker::{Send, Sync};
+
+use intrinsics;
+use cell::UnsafeCell;
+
+use default::Default;
+use fmt;
+
+/// A boolean type which can be safely shared between threads.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct AtomicBool {
+ v: UnsafeCell<usize>,
+}
+
+impl Default for AtomicBool {
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+}
+
+unsafe impl Sync for AtomicBool {}
+
+/// A signed integer type which can be safely shared between threads.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct AtomicIsize {
+ v: UnsafeCell<isize>,
+}
+
+impl Default for AtomicIsize {
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+}
+
+unsafe impl Sync for AtomicIsize {}
+
+/// An unsigned integer type which can be safely shared between threads.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct AtomicUsize {
+ v: UnsafeCell<usize>,
+}
+
+impl Default for AtomicUsize {
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+}
+
+unsafe impl Sync for AtomicUsize {}
+
+/// A raw pointer type which can be safely shared between threads.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct AtomicPtr<T> {
+ p: UnsafeCell<*mut T>,
+}
+
+impl<T> Default for AtomicPtr<T> {
+ fn default() -> AtomicPtr<T> {
+ AtomicPtr::new(::ptr::null_mut())
+ }
+}
+
+unsafe impl<T> Send for AtomicPtr<T> {}
+unsafe impl<T> Sync for AtomicPtr<T> {}
+
+/// Atomic memory orderings
+///
+/// Memory orderings limit the ways that both the compiler and CPU may reorder
+/// instructions around atomic operations. At its most restrictive,
+/// "sequentially consistent" atomics allow neither reads nor writes
+/// to be moved either before or after the atomic operation; on the other end
+/// "relaxed" atomics allow all reorderings.
+///
+/// Rust's memory orderings are [the same as
+/// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Copy, Clone)]
+pub enum Ordering {
+ /// No ordering constraints, only atomic operations.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Relaxed,
+ /// When coupled with a store, all previous writes become visible
+ /// to another thread that performs a load with `Acquire` ordering
+ /// on the same value.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Release,
+ /// When coupled with a load, all subsequent loads will see data
+ /// written before a store with `Release` ordering on the same value
+ /// in another thread.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Acquire,
+ /// When coupled with a load, uses `Acquire` ordering, and with a store
+ /// `Release` ordering.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ AcqRel,
+ /// Like `AcqRel` with the additional guarantee that all threads see all
+ /// sequentially consistent operations in the same order.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ SeqCst,
+}
+
+/// An `AtomicBool` initialized to `false`.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
+/// An `AtomicIsize` initialized to `0`.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
+/// An `AtomicUsize` initialized to `0`.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
+
+// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
+const UINT_TRUE: usize = !0;
+
+impl AtomicBool {
+ /// Creates a new `AtomicBool`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicBool;
+ ///
+ /// let atomic_true = AtomicBool::new(true);
+ /// let atomic_false = AtomicBool::new(false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn new(v: bool) -> AtomicBool {
+ AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) }
+ }
+
+ /// Loads a value from the bool.
+ ///
+ /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Release` or `AcqRel`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn load(&self, order: Ordering) -> bool {
+ unsafe { atomic_load(self.v.get(), order) > 0 }
+ }
+
+ /// Stores a value into the bool.
+ ///
+ /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// some_bool.store(false, Ordering::Relaxed);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Acquire` or `AcqRel`.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn store(&self, val: bool, order: Ordering) {
+ let val = if val { UINT_TRUE } else { 0 };
+
+ unsafe { atomic_store(self.v.get(), val, order); }
+ }
+
+ /// Stores a value into the bool, returning the old value.
+ ///
+ /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn swap(&self, val: bool, order: Ordering) -> bool {
+ let val = if val { UINT_TRUE } else { 0 };
+
+ unsafe { atomic_swap(self.v.get(), val, order) > 0 }
+ }
+
+ /// Stores a value into the `bool` if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
+ /// this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ ///
+ /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
+ let current = if current { UINT_TRUE } else { 0 };
+ let new = if new { UINT_TRUE } else { 0 };
+
+ unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) > 0 }
+ }
+
+ /// Logical "and" with a boolean value.
+ ///
+ /// Performs a logical "and" operation on the current value and the argument `val`, and sets
+ /// the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
+ let val = if val { UINT_TRUE } else { 0 };
+
+ unsafe { atomic_and(self.v.get(), val, order) > 0 }
+ }
+
+ /// Logical "nand" with a boolean value.
+ ///
+ /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
+ /// the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
+ let val = if val { UINT_TRUE } else { 0 };
+
+ unsafe { atomic_nand(self.v.get(), val, order) > 0 }
+ }
+
+ /// Logical "or" with a boolean value.
+ ///
+ /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
+ /// new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
+ let val = if val { UINT_TRUE } else { 0 };
+
+ unsafe { atomic_or(self.v.get(), val, order) > 0 }
+ }
+
+ /// Logical "xor" with a boolean value.
+ ///
+ /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
+ /// the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
+ let val = if val { UINT_TRUE } else { 0 };
+
+ unsafe { atomic_xor(self.v.get(), val, order) > 0 }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AtomicIsize {
+ /// Creates a new `AtomicIsize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicIsize;
+ ///
+ /// let atomic_forty_two = AtomicIsize::new(42);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn new(v: isize) -> AtomicIsize {
+ AtomicIsize {v: UnsafeCell::new(v)}
+ }
+
+ /// Loads a value from the isize.
+ ///
+ /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Release` or `AcqRel`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let some_isize = AtomicIsize::new(5);
+ ///
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn load(&self, order: Ordering) -> isize {
+ unsafe { atomic_load(self.v.get(), order) }
+ }
+
+ /// Stores a value into the isize.
+ ///
+ /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let some_isize = AtomicIsize::new(5);
+ ///
+ /// some_isize.store(10, Ordering::Relaxed);
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Acquire` or `AcqRel`.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn store(&self, val: isize, order: Ordering) {
+ unsafe { atomic_store(self.v.get(), val, order); }
+ }
+
+ /// Stores a value into the isize, returning the old value.
+ ///
+ /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let some_isize = AtomicIsize::new(5);
+ ///
+ /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn swap(&self, val: isize, order: Ordering) -> isize {
+ unsafe { atomic_swap(self.v.get(), val, order) }
+ }
+
+ /// Stores a value into the `isize` if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
+ /// this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let some_isize = AtomicIsize::new(5);
+ ///
+ /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ ///
+ /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn compare_and_swap(&self, current: isize, new: isize, order: Ordering) -> isize {
+ unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
+ }
+
+ /// Add an isize to the current value, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let foo = AtomicIsize::new(0);
+ /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_add(&self, val: isize, order: Ordering) -> isize {
+ unsafe { atomic_add(self.v.get(), val, order) }
+ }
+
+ /// Subtract an isize from the current value, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let foo = AtomicIsize::new(0);
+ /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
+ /// assert_eq!(foo.load(Ordering::SeqCst), -10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_sub(&self, val: isize, order: Ordering) -> isize {
+ unsafe { atomic_sub(self.v.get(), val, order) }
+ }
+
+ /// Bitwise and with the current isize, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let foo = AtomicIsize::new(0b101101);
+ /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_and(&self, val: isize, order: Ordering) -> isize {
+ unsafe { atomic_and(self.v.get(), val, order) }
+ }
+
+ /// Bitwise or with the current isize, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let foo = AtomicIsize::new(0b101101);
+ /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_or(&self, val: isize, order: Ordering) -> isize {
+ unsafe { atomic_or(self.v.get(), val, order) }
+ }
+
+ /// Bitwise xor with the current isize, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let foo = AtomicIsize::new(0b101101);
+ /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_xor(&self, val: isize, order: Ordering) -> isize {
+ unsafe { atomic_xor(self.v.get(), val, order) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AtomicUsize {
+ /// Creates a new `AtomicUsize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicUsize;
+ ///
+ /// let atomic_forty_two = AtomicUsize::new(42);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn new(v: usize) -> AtomicUsize {
+ AtomicUsize { v: UnsafeCell::new(v) }
+ }
+
+ /// Loads a value from the usize.
+ ///
+ /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Release` or `AcqRel`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicUsize, Ordering};
+ ///
+ /// let some_usize = AtomicUsize::new(5);
+ ///
+ /// assert_eq!(some_usize.load(Ordering::Relaxed), 5);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn load(&self, order: Ordering) -> usize {
+ unsafe { atomic_load(self.v.get(), order) }
+ }
+
+ /// Stores a value into the usize.
+ ///
+ /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicUsize, Ordering};
+ ///
+ /// let some_usize = AtomicUsize::new(5);
+ ///
+ /// some_usize.store(10, Ordering::Relaxed);
+ /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Acquire` or `AcqRel`.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn store(&self, val: usize, order: Ordering) {
+ unsafe { atomic_store(self.v.get(), val, order); }
+ }
+
+ /// Stores a value into the usize, returning the old value.
+ ///
+ /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicUsize, Ordering};
+ ///
+ /// let some_usize= AtomicUsize::new(5);
+ ///
+ /// assert_eq!(some_usize.swap(10, Ordering::Relaxed), 5);
+ /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn swap(&self, val: usize, order: Ordering) -> usize {
+ unsafe { atomic_swap(self.v.get(), val, order) }
+ }
+
+ /// Stores a value into the `usize` if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
+ /// this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicUsize, Ordering};
+ ///
+ /// let some_usize = AtomicUsize::new(5);
+ ///
+ /// assert_eq!(some_usize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
+ /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
+ ///
+ /// assert_eq!(some_usize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
+ /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn compare_and_swap(&self, current: usize, new: usize, order: Ordering) -> usize {
+ unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
+ }
+
+ /// Add to the current usize, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicUsize, Ordering};
+ ///
+ /// let foo = AtomicUsize::new(0);
+ /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_add(&self, val: usize, order: Ordering) -> usize {
+ unsafe { atomic_add(self.v.get(), val, order) }
+ }
+
+ /// Subtract from the current usize, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicUsize, Ordering};
+ ///
+ /// let foo = AtomicUsize::new(10);
+ /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 10);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize {
+ unsafe { atomic_sub(self.v.get(), val, order) }
+ }
+
+ /// Bitwise and with the current usize, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicUsize, Ordering};
+ ///
+ /// let foo = AtomicUsize::new(0b101101);
+ /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_and(&self, val: usize, order: Ordering) -> usize {
+ unsafe { atomic_and(self.v.get(), val, order) }
+ }
+
+ /// Bitwise or with the current usize, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicUsize, Ordering};
+ ///
+ /// let foo = AtomicUsize::new(0b101101);
+ /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_or(&self, val: usize, order: Ordering) -> usize {
+ unsafe { atomic_or(self.v.get(), val, order) }
+ }
+
+ /// Bitwise xor with the current usize, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicUsize, Ordering};
+ ///
+ /// let foo = AtomicUsize::new(0b101101);
+ /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn fetch_xor(&self, val: usize, order: Ordering) -> usize {
+ unsafe { atomic_xor(self.v.get(), val, order) }
+ }
+}
+
+impl<T> AtomicPtr<T> {
+ /// Creates a new `AtomicPtr`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicPtr;
+ ///
+ /// let ptr = &mut 5;
+ /// let atomic_ptr = AtomicPtr::new(ptr);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn new(p: *mut T) -> AtomicPtr<T> {
+ AtomicPtr { p: UnsafeCell::new(p) }
+ }
+
+ /// Loads a value from the pointer.
+ ///
+ /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Release` or `AcqRel`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let value = some_ptr.load(Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn load(&self, order: Ordering) -> *mut T {
+ unsafe {
+ atomic_load(self.p.get() as *mut usize, order) as *mut T
+ }
+ }
+
+ /// Stores a value into the pointer.
+ ///
+ /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ ///
+ /// some_ptr.store(other_ptr, Ordering::Relaxed);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Acquire` or `AcqRel`.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn store(&self, ptr: *mut T, order: Ordering) {
+ unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
+ }
+
+ /// Stores a value into the pointer, returning the old value.
+ ///
+ /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ ///
+ /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
+ unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
+ }
+
+ /// Stores a value into the pointer if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
+ /// this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ /// let another_ptr = &mut 10;
+ ///
+ /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
+ unsafe {
+ atomic_compare_and_swap(self.p.get() as *mut usize, current as usize,
+ new as usize, order) as *mut T
+ }
+ }
+}
+
+#[inline]
+unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
+ match order {
+ Release => intrinsics::atomic_store_rel(dst, val),
+ Relaxed => intrinsics::atomic_store_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_store(dst, val),
+ Acquire => panic!("there is no such thing as an acquire store"),
+ AcqRel => panic!("there is no such thing as an acquire/release store"),
+ }
+}
+
+#[inline]
+unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
+ match order {
+ Acquire => intrinsics::atomic_load_acq(dst),
+ Relaxed => intrinsics::atomic_load_relaxed(dst),
+ SeqCst => intrinsics::atomic_load(dst),
+ Release => panic!("there is no such thing as a release load"),
+ AcqRel => panic!("there is no such thing as an acquire/release load"),
+ }
+}
+
+#[inline]
+unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
+ match order {
+ Acquire => intrinsics::atomic_xchg_acq(dst, val),
+ Release => intrinsics::atomic_xchg_rel(dst, val),
+ AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_xchg(dst, val)
+ }
+}
+
+/// Returns the old value (like __sync_fetch_and_add).
+#[inline]
+unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
+ match order {
+ Acquire => intrinsics::atomic_xadd_acq(dst, val),
+ Release => intrinsics::atomic_xadd_rel(dst, val),
+ AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_xadd(dst, val)
+ }
+}
+
+/// Returns the old value (like __sync_fetch_and_sub).
+#[inline]
+unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
+ match order {
+ Acquire => intrinsics::atomic_xsub_acq(dst, val),
+ Release => intrinsics::atomic_xsub_rel(dst, val),
+ AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_xsub(dst, val)
+ }
+}
+
+#[inline]
+unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
+ match order {
+ Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
+ Release => intrinsics::atomic_cxchg_rel(dst, old, new),
+ AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
+ Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
+ SeqCst => intrinsics::atomic_cxchg(dst, old, new),
+ }
+}
+
+#[inline]
+unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
+ match order {
+ Acquire => intrinsics::atomic_and_acq(dst, val),
+ Release => intrinsics::atomic_and_rel(dst, val),
+ AcqRel => intrinsics::atomic_and_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_and_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_and(dst, val)
+ }
+}
+
+#[inline]
+unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
+ match order {
+ Acquire => intrinsics::atomic_nand_acq(dst, val),
+ Release => intrinsics::atomic_nand_rel(dst, val),
+ AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_nand(dst, val)
+ }
+}
+
+
+#[inline]
+unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
+ match order {
+ Acquire => intrinsics::atomic_or_acq(dst, val),
+ Release => intrinsics::atomic_or_rel(dst, val),
+ AcqRel => intrinsics::atomic_or_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_or_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_or(dst, val)
+ }
+}
+
+
+#[inline]
+unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
+ match order {
+ Acquire => intrinsics::atomic_xor_acq(dst, val),
+ Release => intrinsics::atomic_xor_rel(dst, val),
+ AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_xor(dst, val)
+ }
+}
+
+
+/// An atomic fence.
+///
+/// A fence 'A' which has `Release` ordering semantics, synchronizes with a
+/// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
+/// atomic operations X and Y, both operating on some atomic object 'M' such
+/// that A is sequenced before X, Y is synchronized before B and Y observes
+/// the change to M. This provides a happens-before dependence between A and B.
+///
+/// Atomic operations with `Release` or `Acquire` semantics can also synchronize
+/// with a fence.
+///
+/// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
+/// and `Release` semantics, participates in the global program order of the
+/// other `SeqCst` operations and/or fences.
+///
+/// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
+///
+/// # Panics
+///
+/// Panics if `order` is `Relaxed`.
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn fence(order: Ordering) {
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_fence_acq(),
+ Release => intrinsics::atomic_fence_rel(),
+ AcqRel => intrinsics::atomic_fence_acqrel(),
+ SeqCst => intrinsics::atomic_fence(),
+ Relaxed => panic!("there is no such thing as a relaxed fence")
+ }
+ }
+}
+
+macro_rules! impl_Debug {
+ ($($t:ident)*) => ($(
+ #[stable(feature = "atomic_debug", since = "1.3.0")]
+ impl fmt::Debug for $t {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_tuple(stringify!($t)).field(&self.load(Ordering::SeqCst)).finish()
+ }
+ }
+ )*);
+}
+
+impl_Debug!{ AtomicUsize AtomicIsize AtomicBool }
+
+#[stable(feature = "atomic_debug", since = "1.3.0")]
+impl<T> fmt::Debug for AtomicPtr<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Synchronization primitives
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+pub mod atomic;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use core::atomic::*;
-use core::atomic::Ordering::SeqCst;
+use core::sync::atomic::*;
+use core::sync::atomic::Ordering::SeqCst;
#[test]
fn bool_() {
use std::{i64, f32, f64};
use test;
-use core::num::dec2flt::{to_f32, to_f64};
mod parse;
mod rawfp;
let inputs = &[stringify!($x).into(), format!("{:?}", x64), format!("{:e}", x64)];
for input in inputs {
if input != "inf" {
- assert_eq!(to_f64(input), Ok(x64));
- assert_eq!(to_f32(input), Ok(x32));
+ assert_eq!(input.parse(), Ok(x64));
+ assert_eq!(input.parse(), Ok(x32));
let neg_input = &format!("-{}", input);
- assert_eq!(to_f64(neg_input), Ok(-x64));
- assert_eq!(to_f32(neg_input), Ok(-x32));
+ assert_eq!(neg_input.parse(), Ok(-x64));
+ assert_eq!(neg_input.parse(), Ok(-x32));
}
}
})
#[test]
fn lonely_dot() {
- assert_eq!(to_f64("."), Ok(0.0));
+ assert_eq!(".".parse(), Ok(0.0));
}
#[test]
fn nan() {
- assert!(to_f64("NaN").unwrap().is_nan());
- assert!(to_f32("NaN").unwrap().is_nan());
+ assert!("NaN".parse::<f32>().unwrap().is_nan());
+ assert!("NaN".parse::<f64>().unwrap().is_nan());
}
#[test]
fn inf() {
- assert_eq!(to_f64("inf"), Ok(f64::INFINITY));
- assert_eq!(to_f64("-inf"), Ok(f64::NEG_INFINITY));
- assert_eq!(to_f32("inf"), Ok(f32::INFINITY));
- assert_eq!(to_f32("-inf"), Ok(f32::NEG_INFINITY));
+ assert_eq!("inf".parse(), Ok(f64::INFINITY));
+ assert_eq!("-inf".parse(), Ok(f64::NEG_INFINITY));
+ assert_eq!("inf".parse(), Ok(f32::INFINITY));
+ assert_eq!("-inf".parse(), Ok(f32::NEG_INFINITY));
}
#[test]
fn massive_exponent() {
let max = i64::MAX;
- assert_eq!(to_f64(&format!("1e{}000", max)), Ok(f64::INFINITY));
- assert_eq!(to_f64(&format!("1e-{}000", max)), Ok(0.0));
- assert_eq!(to_f64(&format!("1e{}000", max)), Ok(f64::INFINITY));
+ assert_eq!(format!("1e{}000", max).parse(), Ok(f64::INFINITY));
+ assert_eq!(format!("1e-{}000", max).parse(), Ok(0.0));
+ assert_eq!(format!("1e{}000", max).parse(), Ok(f64::INFINITY));
}
#[bench]
fn bench_0(b: &mut test::Bencher) {
- b.iter(|| to_f64("0.0"));
+ b.iter(|| "0.0".parse::<f64>());
}
#[bench]
fn bench_42(b: &mut test::Bencher) {
- b.iter(|| to_f64("42"));
+ b.iter(|| "42".parse::<f64>());
}
#[bench]
fn bench_huge_int(b: &mut test::Bencher) {
// 2^128 - 1
- b.iter(|| to_f64("170141183460469231731687303715884105727"));
+ b.iter(|| "170141183460469231731687303715884105727".parse::<f64>());
}
#[bench]
fn bench_short_decimal(b: &mut test::Bencher) {
- b.iter(|| to_f64("1234.5678"));
+ b.iter(|| "1234.5678".parse::<f64>());
}
#[bench]
fn bench_pi_long(b: &mut test::Bencher) {
- b.iter(|| to_f64("3.14159265358979323846264338327950288"));
+ b.iter(|| "3.14159265358979323846264338327950288".parse::<f64>());
}
#[bench]
fn bench_pi_short(b: &mut test::Bencher) {
- b.iter(|| to_f64("3.141592653589793"))
+ b.iter(|| "3.141592653589793".parse::<f64>())
}
#[bench]
fn bench_1e150(b: &mut test::Bencher) {
- b.iter(|| to_f64("1e150"));
+ b.iter(|| "1e150".parse::<f64>());
}
#[bench]
fn bench_long_decimal_and_exp(b: &mut test::Bencher) {
- b.iter(|| to_f64("727501488517303786137132964064381141071e-123"));
+ b.iter(|| "727501488517303786137132964064381141071e-123".parse::<f64>());
}
#[bench]
fn bench_min_subnormal(b: &mut test::Bencher) {
- b.iter(|| to_f64("5e-324"));
+ b.iter(|| "5e-324".parse::<f64>());
}
#[bench]
fn bench_min_normal(b: &mut test::Bencher) {
- b.iter(|| to_f64("2.2250738585072014e-308"));
+ b.iter(|| "2.2250738585072014e-308".parse::<f64>());
}
#[bench]
fn bench_max(b: &mut test::Bencher) {
- b.iter(|| to_f64("1.7976931348623157e308"));
+ b.iter(|| "1.7976931348623157e308".parse::<f64>());
}
use core::$T_i::*;
use num;
use core::ops::{BitOr, BitAnd, BitXor, Shl, Shr, Not};
+ use std::str::FromStr;
#[test]
fn test_overflows() {
assert!((10 as $T).checked_div(2) == Some(5));
assert!((5 as $T).checked_div(0) == None);
}
-}
+ fn from_str<T: FromStr>(t: &str) -> Option<T> {
+ FromStr::from_str(t).ok()
+ }
+
+ #[test]
+ pub fn test_from_str() {
+ assert_eq!(from_str::<$T>("0"), Some(0 as $T));
+ assert_eq!(from_str::<$T>("3"), Some(3 as $T));
+ assert_eq!(from_str::<$T>("10"), Some(10 as $T));
+ assert_eq!(from_str::<u32>("123456789"), Some(123456789 as u32));
+ assert_eq!(from_str::<$T>("00100"), Some(100 as $T));
+
+ assert_eq!(from_str::<$T>(""), None);
+ assert_eq!(from_str::<$T>(" "), None);
+ assert_eq!(from_str::<$T>("x"), None);
+ }
+
+ #[test]
+ pub fn test_parse_bytes() {
+ assert_eq!($T::from_str_radix("123", 10), Ok(123 as $T));
+ assert_eq!($T::from_str_radix("1001", 2), Ok(9 as $T));
+ assert_eq!($T::from_str_radix("123", 8), Ok(83 as $T));
+ assert_eq!(u16::from_str_radix("123", 16), Ok(291 as u16));
+ assert_eq!(u16::from_str_radix("ffff", 16), Ok(65535 as u16));
+ assert_eq!($T::from_str_radix("z", 36), Ok(35 as $T));
+
+ assert_eq!($T::from_str_radix("Z", 10).ok(), None::<$T>);
+ assert_eq!($T::from_str_radix("_", 2).ok(), None::<$T>);
+ }
+}
)}
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/",
- html_playground_url = "https://play.rust-lang.org/")]
+ html_playground_url = "https://play.rust-lang.org/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")]
#![cfg_attr(test, feature(test))]
//! Bindings for the C standard library and other platform libraries
}
fn lookup_and_handle_definition(&mut self, id: &ast::NodeId) {
+ use middle::ty::TypeVariants::{TyEnum, TyStruct};
+
+ // If `bar` is a trait item, make sure to mark Foo as alive in `Foo::bar`
+ self.tcx.tables.borrow().item_substs.get(id)
+ .and_then(|substs| substs.substs.self_ty())
+ .map(|ty| match ty.sty {
+ TyEnum(tyid, _) | TyStruct(tyid, _) => self.check_def_id(tyid.did),
+ _ => (),
+ });
+
self.tcx.def_map.borrow().get(id).map(|def| {
match def.full_def() {
def::DefConst(_) | def::DefAssociatedConst(..) => {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! This file defines
-
+//! This file handles the relationships between free regions --
+//! meaning lifetime parameters. Ordinarily, free regions are
+//! unrelated to one another, but they can be related vai implied or
+//! explicit bounds. In that case, we track the bounds using the
+//! `TransitiveRelation` type and use that to decide when one free
+//! region outlives another and so forth.
+
+use middle::ty::{self, FreeRegion, Region};
use middle::wf::ImpliedBound;
-use middle::ty::{self, FreeRegion};
-use util::common::can_reach;
-use util::nodemap::{FnvHashMap, FnvHashSet};
+use rustc_data_structures::transitive_relation::TransitiveRelation;
#[derive(Clone)]
pub struct FreeRegionMap {
- /// `map` maps from a free region `a` to a list of
- /// free regions `bs` such that `a <= b for all b in bs`
- map: FnvHashMap<FreeRegion, Vec<FreeRegion>>,
- /// regions that are required to outlive (and therefore be
- /// equal to) 'static.
- statics: FnvHashSet<FreeRegion>
+ // Stores the relation `a < b`, where `a` and `b` are regions.
+ relation: TransitiveRelation<Region>
}
impl FreeRegionMap {
pub fn new() -> FreeRegionMap {
- FreeRegionMap { map: FnvHashMap(), statics: FnvHashSet() }
+ FreeRegionMap { relation: TransitiveRelation::new() }
}
pub fn relate_free_regions_from_implied_bounds<'tcx>(&mut self,
}
fn relate_to_static(&mut self, sup: FreeRegion) {
- self.statics.insert(sup);
+ self.relation.add(ty::ReStatic, ty::ReFree(sup));
}
fn relate_free_regions(&mut self, sub: FreeRegion, sup: FreeRegion) {
- let mut sups = self.map.entry(sub).or_insert(Vec::new());
- if !sups.contains(&sup) {
- sups.push(sup);
- }
+ self.relation.add(ty::ReFree(sub), ty::ReFree(sup))
}
/// Determines whether two free regions have a subregion relationship
/// it is possible that `sub != sup` and `sub <= sup` and `sup <= sub`
/// (that is, the user can give two different names to the same lifetime).
pub fn sub_free_region(&self, sub: FreeRegion, sup: FreeRegion) -> bool {
- can_reach(&self.map, sub, sup) || self.is_static(&sup)
+ let result = sub == sup || {
+ let sub = ty::ReFree(sub);
+ let sup = ty::ReFree(sup);
+ self.relation.contains(&sub, &sup) || self.relation.contains(&ty::ReStatic, &sup)
+ };
+ debug!("sub_free_region(sub={:?}, sup={:?}) = {:?}", sub, sup, result);
+ result
+ }
+
+ pub fn lub_free_regions(&self, fr_a: FreeRegion, fr_b: FreeRegion) -> Region {
+ let r_a = ty::ReFree(fr_a);
+ let r_b = ty::ReFree(fr_b);
+ let result = if fr_a == fr_b { r_a } else {
+ match self.relation.postdom_upper_bound(&r_a, &r_b) {
+ None => ty::ReStatic,
+ Some(r) => *r,
+ }
+ };
+ debug!("lub_free_regions(fr_a={:?}, fr_b={:?}) = {:?}", fr_a, fr_b, result);
+ result
}
/// Determines whether one region is a subregion of another. This is intended to run *after
sub_region: ty::Region,
super_region: ty::Region)
-> bool {
- debug!("is_subregion_of(sub_region={:?}, super_region={:?})",
- sub_region, super_region);
-
- sub_region == super_region || {
+ let result = sub_region == super_region || {
match (sub_region, super_region) {
(ty::ReEmpty, _) |
(_, ty::ReStatic) =>
(ty::ReScope(sub_scope), ty::ReScope(super_scope)) =>
tcx.region_maps.is_subscope_of(sub_scope, super_scope),
- (ty::ReScope(sub_scope), ty::ReFree(ref fr)) =>
- tcx.region_maps.is_subscope_of(sub_scope, fr.scope.to_code_extent()),
+ (ty::ReScope(sub_scope), ty::ReFree(fr)) =>
+ tcx.region_maps.is_subscope_of(sub_scope, fr.scope.to_code_extent()) ||
+ self.is_static(fr),
(ty::ReFree(sub_fr), ty::ReFree(super_fr)) =>
self.sub_free_region(sub_fr, super_fr),
- (ty::ReStatic, ty::ReFree(ref sup_fr)) => self.is_static(sup_fr),
+ (ty::ReStatic, ty::ReFree(sup_fr)) =>
+ self.is_static(sup_fr),
_ =>
false,
}
- }
+ };
+ debug!("is_subregion_of(sub_region={:?}, super_region={:?}) = {:?}",
+ sub_region, super_region, result);
+ result
}
/// Determines whether this free-region is required to be 'static
- pub fn is_static(&self, super_region: &ty::FreeRegion) -> bool {
+ pub fn is_static(&self, super_region: ty::FreeRegion) -> bool {
debug!("is_static(super_region={:?})", super_region);
- self.statics.iter().any(|s| can_reach(&self.map, *s, *super_region))
+ self.relation.contains(&ty::ReStatic, &ty::ReFree(super_region))
}
}
+
+#[cfg(test)]
+fn free_region(index: u32) -> FreeRegion {
+ use middle::region::DestructionScopeData;
+ FreeRegion { scope: DestructionScopeData::new(0),
+ bound_region: ty::BoundRegion::BrAnon(index) }
+}
+
+#[test]
+fn lub() {
+ // a very VERY basic test, but see the tests in
+ // TransitiveRelation, which are much more thorough.
+ let frs: Vec<_> = (0..3).map(|i| free_region(i)).collect();
+ let mut map = FreeRegionMap::new();
+ map.relate_free_regions(frs[0], frs[2]);
+ map.relate_free_regions(frs[1], frs[2]);
+ assert_eq!(map.lub_free_regions(frs[0], frs[1]), ty::ReFree(frs[2]));
+}
+
ReScope(self.tcx.region_maps.nearest_common_ancestor(a_id, b_id))
}
- (ReFree(ref a_fr), ReFree(ref b_fr)) => {
- self.lub_free_regions(free_regions, a_fr, b_fr)
+ (ReFree(a_fr), ReFree(b_fr)) => {
+ free_regions.lub_free_regions(a_fr, b_fr)
}
// For these types, we cannot define any additional
}
}
- /// Computes a region that encloses both free region arguments. Guarantee that if the same two
- /// regions are given as argument, in any order, a consistent result is returned.
- fn lub_free_regions(&self,
- free_regions: &FreeRegionMap,
- a: &FreeRegion,
- b: &FreeRegion)
- -> ty::Region
- {
- return match a.cmp(b) {
- Less => helper(self, free_regions, a, b),
- Greater => helper(self, free_regions, b, a),
- Equal => ty::ReFree(*a)
- };
-
- fn helper(_this: &RegionVarBindings,
- free_regions: &FreeRegionMap,
- a: &FreeRegion,
- b: &FreeRegion) -> ty::Region
- {
- if free_regions.sub_free_region(*a, *b) {
- ty::ReFree(*b)
- } else if free_regions.sub_free_region(*b, *a) {
- ty::ReFree(*a)
- } else {
- ty::ReStatic
- }
- }
- }
-
fn glb_concrete_regions(&self,
free_regions: &FreeRegionMap,
a: Region,
b));
}
- (ReFree(ref fr), ReScope(s_id)) |
- (ReScope(s_id), ReFree(ref fr)) => {
+ (ReFree(fr), ReScope(s_id)) |
+ (ReScope(s_id), ReFree(fr)) => {
let s = ReScope(s_id);
// Free region is something "at least as big as
// `fr.scope_id`." If we find that the scope `fr.scope_id` is bigger
return v.flag;
}
-/// K: Eq + Hash<S>, V, S, H: Hasher<S>
-///
-/// Determines whether there exists a path from `source` to `destination`. The
-/// graph is defined by the `edges_map`, which maps from a node `S` to a list of
-/// its adjacent nodes `T`.
-///
-/// Efficiency note: This is implemented in an inefficient way because it is
-/// typically invoked on very small graphs. If the graphs become larger, a more
-/// efficient graph representation and algorithm would probably be advised.
-pub fn can_reach<T, S>(edges_map: &HashMap<T, Vec<T>, S>, source: T,
- destination: T) -> bool
- where S: HashState, T: Hash + Eq + Clone,
-{
- if source == destination {
- return true;
- }
-
- // Do a little breadth-first-search here. The `queue` list
- // doubles as a way to detect if we've seen a particular FR
- // before. Note that we expect this graph to be an *extremely
- // shallow* tree.
- let mut queue = vec!(source);
- let mut i = 0;
- while i < queue.len() {
- match edges_map.get(&queue[i]) {
- Some(edges) => {
- for target in edges {
- if *target == destination {
- return true;
- }
-
- if !queue.iter().any(|x| x == target) {
- queue.push((*target).clone());
- }
- }
- }
- None => {}
- }
- i += 1;
- }
- return false;
-}
-
/// Memoizes a one-argument closure using the given RefCell containing
/// a type implementing MutableMap to serve as a cache.
///
fn default() -> TargetOptions {
TargetOptions {
data_layout: String::new(),
- linker: "cc".to_string(),
- ar: "ar".to_string(),
+ linker: option_env!("CFG_DEFAULT_LINKER").unwrap_or("cc").to_string(),
+ ar: option_env!("CFG_DEFAULT_AR").unwrap_or("ar").to_string(),
pre_link_args: Vec::new(),
post_link_args: Vec::new(),
cpu: "generic".to_string(),
impl BitVector {
pub fn new(num_bits: usize) -> BitVector {
- let num_words = (num_bits + 63) / 64;
+ let num_words = u64s(num_bits);
BitVector { data: vec![0; num_words] }
}
- fn word_mask(&self, bit: usize) -> (usize, u64) {
- let word = bit / 64;
- let mask = 1 << (bit % 64);
- (word, mask)
- }
-
pub fn contains(&self, bit: usize) -> bool {
- let (word, mask) = self.word_mask(bit);
+ let (word, mask) = word_mask(bit);
(self.data[word] & mask) != 0
}
pub fn insert(&mut self, bit: usize) -> bool {
- let (word, mask) = self.word_mask(bit);
+ let (word, mask) = word_mask(bit);
let data = &mut self.data[word];
let value = *data;
*data = value | mask;
(value | mask) != value
}
+
+ pub fn insert_all(&mut self, all: &BitVector) -> bool {
+ assert!(self.data.len() == all.data.len());
+ let mut changed = false;
+ for (i, j) in self.data.iter_mut().zip(&all.data) {
+ let value = *i;
+ *i = value | *j;
+ if value != *i { changed = true; }
+ }
+ changed
+ }
+
+ pub fn grow(&mut self, num_bits: usize) {
+ let num_words = u64s(num_bits);
+ let extra_words = self.data.len() - num_words;
+ self.data.extend((0..extra_words).map(|_| 0));
+ }
+}
+
+/// A "bit matrix" is basically a square matrix of booleans
+/// represented as one gigantic bitvector. In other words, it is as if
+/// you have N bitvectors, each of length N. Note that `elements` here is `N`/
+#[derive(Clone)]
+pub struct BitMatrix {
+ elements: usize,
+ vector: Vec<u64>,
+}
+
+impl BitMatrix {
+ // Create a new `elements x elements` matrix, initially empty.
+ pub fn new(elements: usize) -> BitMatrix {
+ // For every element, we need one bit for every other
+ // element. Round up to an even number of u64s.
+ let u64s_per_elem = u64s(elements);
+ BitMatrix {
+ elements: elements,
+ vector: vec![0; elements * u64s_per_elem]
+ }
+ }
+
+ /// The range of bits for a given element.
+ fn range(&self, element: usize) -> (usize, usize) {
+ let u64s_per_elem = u64s(self.elements);
+ let start = element * u64s_per_elem;
+ (start, start + u64s_per_elem)
+ }
+
+ pub fn add(&mut self, source: usize, target: usize) -> bool {
+ let (start, _) = self.range(source);
+ let (word, mask) = word_mask(target);
+ let mut vector = &mut self.vector[..];
+ let v1 = vector[start+word];
+ let v2 = v1 | mask;
+ vector[start+word] = v2;
+ v1 != v2
+ }
+
+ /// Do the bits from `source` contain `target`?
+ ///
+ /// Put another way, if the matrix represents (transitive)
+ /// reachability, can `source` reach `target`?
+ pub fn contains(&self, source: usize, target: usize) -> bool {
+ let (start, _) = self.range(source);
+ let (word, mask) = word_mask(target);
+ (self.vector[start+word] & mask) != 0
+ }
+
+ /// Returns those indices that are reachable from both `a` and
+ /// `b`. This is an O(n) operation where `n` is the number of
+ /// elements (somewhat independent from the actual size of the
+ /// intersection, in particular).
+ pub fn intersection(&self, a: usize, b: usize) -> Vec<usize> {
+ let (a_start, a_end) = self.range(a);
+ let (b_start, b_end) = self.range(b);
+ let mut result = Vec::with_capacity(self.elements);
+ for (base, (i, j)) in (a_start..a_end).zip(b_start..b_end).enumerate() {
+ let mut v = self.vector[i] & self.vector[j];
+ for bit in 0..64 {
+ if v == 0 { break; }
+ if v & 0x1 != 0 { result.push(base*64 + bit); }
+ v >>= 1;
+ }
+ }
+ result
+ }
+
+ /// Add the bits from `read` to the bits from `write`,
+ /// return true if anything changed.
+ ///
+ /// This is used when computing transitive reachability because if
+ /// you have an edge `write -> read`, because in that case
+ /// `write` can reach everything that `read` can (and
+ /// potentially more).
+ pub fn merge(&mut self, read: usize, write: usize) -> bool {
+ let (read_start, read_end) = self.range(read);
+ let (write_start, write_end) = self.range(write);
+ let vector = &mut self.vector[..];
+ let mut changed = false;
+ for (read_index, write_index) in
+ (read_start..read_end).zip(write_start..write_end)
+ {
+ let v1 = vector[write_index];
+ let v2 = v1 | vector[read_index];
+ vector[write_index] = v2;
+ changed = changed | (v1 != v2);
+ }
+ changed
+ }
+}
+
+fn u64s(elements: usize) -> usize {
+ (elements + 63) / 64
+}
+
+fn word_mask(index: usize) -> (usize, u64) {
+ let word = index / 64;
+ let mask = 1 << (index % 64);
+ (word, mask)
+}
+
+#[test]
+fn union_two_vecs() {
+ let mut vec1 = BitVector::new(65);
+ let mut vec2 = BitVector::new(65);
+ assert!(vec1.insert(3));
+ assert!(!vec1.insert(3));
+ assert!(vec2.insert(5));
+ assert!(vec2.insert(64));
+ assert!(vec1.insert_all(&vec2));
+ assert!(!vec1.insert_all(&vec2));
+ assert!(vec1.contains(3));
+ assert!(!vec1.contains(4));
+ assert!(vec1.contains(5));
+ assert!(!vec1.contains(63));
+ assert!(vec1.contains(64));
+}
+
+#[test]
+fn grow() {
+ let mut vec1 = BitVector::new(65);
+ assert!(vec1.insert(3));
+ assert!(!vec1.insert(3));
+ assert!(vec1.insert(5));
+ assert!(vec1.insert(64));
+ vec1.grow(128);
+ assert!(vec1.contains(3));
+ assert!(vec1.contains(5));
+ assert!(vec1.contains(64));
+ assert!(!vec1.contains(126));
+}
+
+#[test]
+fn matrix_intersection() {
+ let mut vec1 = BitMatrix::new(200);
+
+ // (*) Elements reachable from both 2 and 65.
+
+ vec1.add(2, 3);
+ vec1.add(2, 6);
+ vec1.add(2, 10); // (*)
+ vec1.add(2, 64); // (*)
+ vec1.add(2, 65);
+ vec1.add(2, 130);
+ vec1.add(2, 160); // (*)
+
+ vec1.add(64, 133);
+
+ vec1.add(65, 2);
+ vec1.add(65, 8);
+ vec1.add(65, 10); // (*)
+ vec1.add(65, 64); // (*)
+ vec1.add(65, 68);
+ vec1.add(65, 133);
+ vec1.add(65, 160); // (*)
+
+ let intersection = vec1.intersection(2, 64);
+ assert!(intersection.is_empty());
+
+ let intersection = vec1.intersection(2, 65);
+ assert_eq!(intersection, &[10, 64, 160]);
}
#[macro_use] extern crate log;
extern crate serialize as rustc_serialize; // used by deriving
-pub mod snapshot_vec;
-pub mod graph;
pub mod bitvec;
+pub mod graph;
pub mod ivar;
+pub mod snapshot_vec;
+pub mod transitive_relation;
pub mod unify;
// See comments in src/librustc/lib.rs
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use bitvec::BitMatrix;
+use std::cell::RefCell;
+use std::fmt::Debug;
+use std::mem;
+
+#[derive(Clone)]
+pub struct TransitiveRelation<T:Debug+PartialEq> {
+ // List of elements. This is used to map from a T to a usize. We
+ // expect domain to be small so just use a linear list versus a
+ // hashmap or something.
+ elements: Vec<T>,
+
+ // List of base edges in the graph. Require to compute transitive
+ // closure.
+ edges: Vec<Edge>,
+
+ // This is a cached transitive closure derived from the edges.
+ // Currently, we build it lazilly and just throw out any existing
+ // copy whenever a new edge is added. (The RefCell is to permit
+ // the lazy computation.) This is kind of silly, except for the
+ // fact its size is tied to `self.elements.len()`, so I wanted to
+ // wait before building it up to avoid reallocating as new edges
+ // are added with new elements. Perhaps better would be to ask the
+ // user for a batch of edges to minimize this effect, but I
+ // already wrote the code this way. :P -nmatsakis
+ closure: RefCell<Option<BitMatrix>>
+}
+
+#[derive(Clone, PartialEq, PartialOrd)]
+struct Index(usize);
+
+#[derive(Clone, PartialEq)]
+struct Edge {
+ source: Index,
+ target: Index,
+}
+
+impl<T:Debug+PartialEq> TransitiveRelation<T> {
+ pub fn new() -> TransitiveRelation<T> {
+ TransitiveRelation { elements: vec![],
+ edges: vec![],
+ closure: RefCell::new(None) }
+ }
+
+ fn index(&self, a: &T) -> Option<Index> {
+ self.elements.iter().position(|e| *e == *a).map(Index)
+ }
+
+ fn add_index(&mut self, a: T) -> Index {
+ match self.index(&a) {
+ Some(i) => i,
+ None => {
+ self.elements.push(a);
+
+ // if we changed the dimensions, clear the cache
+ *self.closure.borrow_mut() = None;
+
+ Index(self.elements.len() - 1)
+ }
+ }
+ }
+
+ /// Indicate that `a < b` (where `<` is this relation)
+ pub fn add(&mut self, a: T, b: T) {
+ let a = self.add_index(a);
+ let b = self.add_index(b);
+ let edge = Edge { source: a, target: b };
+ if !self.edges.contains(&edge) {
+ self.edges.push(edge);
+
+ // added an edge, clear the cache
+ *self.closure.borrow_mut() = None;
+ }
+ }
+
+ /// Check whether `a < target` (transitively)
+ pub fn contains(&self, a: &T, b: &T) -> bool {
+ match (self.index(a), self.index(b)) {
+ (Some(a), Some(b)) =>
+ self.with_closure(|closure| closure.contains(a.0, b.0)),
+ (None, _) | (_, None) =>
+ false,
+ }
+ }
+
+ /// Picks what I am referring to as the "postdominating"
+ /// upper-bound for `a` and `b`. This is usually the least upper
+ /// bound, but in cases where there is no single least upper
+ /// bound, it is the "mutual immediate postdominator", if you
+ /// imagine a graph where `a < b` means `a -> b`.
+ ///
+ /// This function is needed because region inference currently
+ /// requires that we produce a single "UB", and there is no best
+ /// choice for the LUB. Rather than pick arbitrarily, I pick a
+ /// less good, but predictable choice. This should help ensure
+ /// that region inference yields predictable results (though it
+ /// itself is not fully sufficient).
+ ///
+ /// Examples are probably clearer than any prose I could write
+ /// (there are corresponding tests below, btw). In each case,
+ /// the query is `postdom_upper_bound(a, b)`:
+ ///
+ /// ```
+ /// // returns Some(x), which is also LUB
+ /// a -> a1 -> x
+ /// ^
+ /// |
+ /// b -> b1 ---+
+ ///
+ /// // returns Some(x), which is not LUB (there is none)
+ /// // diagonal edges run left-to-right
+ /// a -> a1 -> x
+ /// \/ ^
+ /// /\ |
+ /// b -> b1 ---+
+ ///
+ /// // returns None
+ /// a -> a1
+ /// b -> b1
+ /// ```
+ pub fn postdom_upper_bound(&self, a: &T, b: &T) -> Option<&T> {
+ let mut mubs = self.minimal_upper_bounds(a, b);
+ loop {
+ match mubs.len() {
+ 0 => return None,
+ 1 => return Some(mubs[0]),
+ _ => {
+ let m = mubs.pop().unwrap();
+ let n = mubs.pop().unwrap();
+ mubs.extend(self.minimal_upper_bounds(n, m));
+ }
+ }
+ }
+ }
+
+ /// Returns the set of bounds `X` such that:
+ ///
+ /// - `a < X` and `b < X`
+ /// - there is no `Y != X` such that `a < Y` and `Y < X`
+ /// - except for the case where `X < a` (i.e., a strongly connected
+ /// component in the graph). In that case, the smallest
+ /// representative of the SCC is returned (as determined by the
+ /// internal indices).
+ ///
+ /// Note that this set can, in principle, have any size.
+ pub fn minimal_upper_bounds(&self, a: &T, b: &T) -> Vec<&T> {
+ let (mut a, mut b) = match (self.index(a), self.index(b)) {
+ (Some(a), Some(b)) => (a, b),
+ (None, _) | (_, None) => { return vec![]; }
+ };
+
+ // in some cases, there are some arbitrary choices to be made;
+ // it doesn't really matter what we pick, as long as we pick
+ // the same thing consistently when queried, so ensure that
+ // (a, b) are in a consistent relative order
+ if a > b {
+ mem::swap(&mut a, &mut b);
+ }
+
+ let lub_indices = self.with_closure(|closure| {
+ // Easy case is when either a < b or b < a:
+ if closure.contains(a.0, b.0) {
+ return vec![b.0];
+ }
+ if closure.contains(b.0, a.0) {
+ return vec![a.0];
+ }
+
+ // Otherwise, the tricky part is that there may be some c
+ // where a < c and b < c. In fact, there may be many such
+ // values. So here is what we do:
+ //
+ // 1. Find the vector `[X | a < X && b < X]` of all values
+ // `X` where `a < X` and `b < X`. In terms of the
+ // graph, this means all values reachable from both `a`
+ // and `b`. Note that this vector is also a set, but we
+ // use the term vector because the order matters
+ // to the steps below.
+ // - This vector contains upper bounds, but they are
+ // not minimal upper bounds. So you may have e.g.
+ // `[x, y, tcx, z]` where `x < tcx` and `y < tcx` and
+ // `z < x` and `z < y`:
+ //
+ // z --+---> x ----+----> tcx
+ // | |
+ // | |
+ // +---> y ----+
+ //
+ // In this case, we really want to return just `[z]`.
+ // The following steps below achieve this by gradually
+ // reducing the list.
+ // 2. Pare down the vector using `pare_down`. This will
+ // remove elements from the vector that can be reached
+ // by an earlier element.
+ // - In the example above, this would convert `[x, y,
+ // tcx, z]` to `[x, y, z]`. Note that `x` and `y` are
+ // still in the vector; this is because while `z < x`
+ // (and `z < y`) holds, `z` comes after them in the
+ // vector.
+ // 3. Reverse the vector and repeat the pare down process.
+ // - In the example above, we would reverse to
+ // `[z, y, x]` and then pare down to `[z]`.
+ // 4. Reverse once more just so that we yield a vector in
+ // increasing order of index. Not necessary, but why not.
+ //
+ // I believe this algorithm yields a minimal set. The
+ // argument is that, after step 2, we know that no element
+ // can reach its successors (in the vector, not the graph).
+ // After step 3, we know that no element can reach any of
+ // its predecesssors (because of step 2) nor successors
+ // (because we just called `pare_down`)
+
+ let mut candidates = closure.intersection(a.0, b.0); // (1)
+ pare_down(&mut candidates, closure); // (2)
+ candidates.reverse(); // (3a)
+ pare_down(&mut candidates, closure); // (3b)
+ candidates
+ });
+
+ lub_indices.into_iter()
+ .rev() // (4)
+ .map(|i| &self.elements[i])
+ .collect()
+ }
+
+ fn with_closure<OP,R>(&self, op: OP) -> R
+ where OP: FnOnce(&BitMatrix) -> R
+ {
+ let mut closure_cell = self.closure.borrow_mut();
+ let mut closure = closure_cell.take();
+ if closure.is_none() {
+ closure = Some(self.compute_closure());
+ }
+ let result = op(closure.as_ref().unwrap());
+ *closure_cell = closure;
+ result
+ }
+
+ fn compute_closure(&self) -> BitMatrix {
+ let mut matrix = BitMatrix::new(self.elements.len());
+ let mut changed = true;
+ while changed {
+ changed = false;
+ for edge in self.edges.iter() {
+ // add an edge from S -> T
+ changed |= matrix.add(edge.source.0, edge.target.0);
+
+ // add all outgoing edges from T into S
+ changed |= matrix.merge(edge.target.0, edge.source.0);
+ }
+ }
+ matrix
+ }
+}
+
+/// Pare down is used as a step in the LUB computation. It edits the
+/// candidates array in place by removing any element j for which
+/// there exists an earlier element i<j such that i -> j. That is,
+/// after you run `pare_down`, you know that for all elements that
+/// remain in candidates, they cannot reach any of the elements that
+/// come after them.
+///
+/// Examples follow. Assume that a -> b -> c and x -> y -> z.
+///
+/// - Input: `[a, b, x]`. Output: `[a, x]`.
+/// - Input: `[b, a, x]`. Output: `[b, a, x]`.
+/// - Input: `[a, x, b, y]`. Output: `[a, x]`.
+fn pare_down(candidates: &mut Vec<usize>, closure: &BitMatrix) {
+ let mut i = 0;
+ while i < candidates.len() {
+ let candidate_i = candidates[i];
+ i += 1;
+
+ let mut j = i;
+ let mut dead = 0;
+ while j < candidates.len() {
+ let candidate_j = candidates[j];
+ if closure.contains(candidate_i, candidate_j) {
+ // If `i` can reach `j`, then we can remove `j`. So just
+ // mark it as dead and move on; subsequent indices will be
+ // shifted into its place.
+ dead += 1;
+ } else {
+ candidates[j - dead] = candidate_j;
+ }
+ j += 1;
+ }
+ candidates.truncate(j - dead);
+ }
+}
+
+#[test]
+fn test_one_step() {
+ let mut relation = TransitiveRelation::new();
+ relation.add("a", "b");
+ relation.add("a", "c");
+ assert!(relation.contains(&"a", &"c"));
+ assert!(relation.contains(&"a", &"b"));
+ assert!(!relation.contains(&"b", &"a"));
+ assert!(!relation.contains(&"a", &"d"));
+}
+
+#[test]
+fn test_many_steps() {
+ let mut relation = TransitiveRelation::new();
+ relation.add("a", "b");
+ relation.add("a", "c");
+ relation.add("a", "f");
+
+ relation.add("b", "c");
+ relation.add("b", "d");
+ relation.add("b", "e");
+
+ relation.add("e", "g");
+
+ assert!(relation.contains(&"a", &"b"));
+ assert!(relation.contains(&"a", &"c"));
+ assert!(relation.contains(&"a", &"d"));
+ assert!(relation.contains(&"a", &"e"));
+ assert!(relation.contains(&"a", &"f"));
+ assert!(relation.contains(&"a", &"g"));
+
+ assert!(relation.contains(&"b", &"g"));
+
+ assert!(!relation.contains(&"a", &"x"));
+ assert!(!relation.contains(&"b", &"f"));
+}
+
+#[test]
+fn mubs_triange() {
+ let mut relation = TransitiveRelation::new();
+ relation.add("a", "tcx");
+ relation.add("b", "tcx");
+ assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"tcx"]);
+}
+
+#[test]
+fn mubs_best_choice1() {
+ // 0 -> 1 <- 3
+ // | ^ |
+ // | | |
+ // +--> 2 <--+
+ //
+ // mubs(0,3) = [1]
+
+ // This tests a particular state in the algorithm, in which we
+ // need the second pare down call to get the right result (after
+ // intersection, we have [1, 2], but 2 -> 1).
+
+ let mut relation = TransitiveRelation::new();
+ relation.add("0", "1");
+ relation.add("0", "2");
+
+ relation.add("2", "1");
+
+ relation.add("3", "1");
+ relation.add("3", "2");
+
+ assert_eq!(relation.minimal_upper_bounds(&"0", &"3"), vec![&"2"]);
+}
+
+#[test]
+fn mubs_best_choice2() {
+ // 0 -> 1 <- 3
+ // | | |
+ // | v |
+ // +--> 2 <--+
+ //
+ // mubs(0,3) = [2]
+
+ // Like the precedecing test, but in this case intersection is [2,
+ // 1], and hence we rely on the first pare down call.
+
+ let mut relation = TransitiveRelation::new();
+ relation.add("0", "1");
+ relation.add("0", "2");
+
+ relation.add("1", "2");
+
+ relation.add("3", "1");
+ relation.add("3", "2");
+
+ assert_eq!(relation.minimal_upper_bounds(&"0", &"3"), vec![&"1"]);
+}
+
+#[test]
+fn mubs_no_best_choice() {
+ // in this case, the intersection yields [1, 2], and the "pare
+ // down" calls find nothing to remove.
+ let mut relation = TransitiveRelation::new();
+ relation.add("0", "1");
+ relation.add("0", "2");
+
+ relation.add("3", "1");
+ relation.add("3", "2");
+
+ assert_eq!(relation.minimal_upper_bounds(&"0", &"3"), vec![&"1", &"2"]);
+}
+
+#[test]
+fn mubs_best_choice_scc() {
+ let mut relation = TransitiveRelation::new();
+ relation.add("0", "1");
+ relation.add("0", "2");
+
+ relation.add("1", "2");
+ relation.add("2", "1");
+
+ relation.add("3", "1");
+ relation.add("3", "2");
+
+ assert_eq!(relation.minimal_upper_bounds(&"0", &"3"), vec![&"1"]);
+}
+
+#[test]
+fn pdub_crisscross() {
+ // diagonal edges run left-to-right
+ // a -> a1 -> x
+ // \/ ^
+ // /\ |
+ // b -> b1 ---+
+
+ let mut relation = TransitiveRelation::new();
+ relation.add("a", "a1");
+ relation.add("a", "b1");
+ relation.add("b", "a1");
+ relation.add("b", "b1");
+ relation.add("a1", "x");
+ relation.add("b1", "x");
+
+ assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"a1", &"b1"]);
+ assert_eq!(relation.postdom_upper_bound(&"a", &"b"), Some(&"x"));
+}
+
+#[test]
+fn pdub_crisscross_more() {
+ // diagonal edges run left-to-right
+ // a -> a1 -> a2 -> a3 -> x
+ // \/ \/ ^
+ // /\ /\ |
+ // b -> b1 -> b2 ---------+
+
+ let mut relation = TransitiveRelation::new();
+ relation.add("a", "a1");
+ relation.add("a", "b1");
+ relation.add("b", "a1");
+ relation.add("b", "b1");
+
+ relation.add("a1", "a2");
+ relation.add("a1", "b2");
+ relation.add("b1", "a2");
+ relation.add("b1", "b2");
+
+ relation.add("a2", "a3");
+
+ relation.add("a3", "x");
+ relation.add("b2", "x");
+
+ assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"a1", &"b1"]);
+ assert_eq!(relation.minimal_upper_bounds(&"a1", &"b1"), vec![&"a2", &"b2"]);
+ assert_eq!(relation.postdom_upper_bound(&"a", &"b"), Some(&"x"));
+}
+
+#[test]
+fn pdub_lub() {
+ // a -> a1 -> x
+ // ^
+ // |
+ // b -> b1 ---+
+
+ let mut relation = TransitiveRelation::new();
+ relation.add("a", "a1");
+ relation.add("b", "b1");
+ relation.add("a1", "x");
+ relation.add("b1", "x");
+
+ assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"x"]);
+ assert_eq!(relation.postdom_upper_bound(&"a", &"b"), Some(&"x"));
+}
+
+#[test]
+fn mubs_intermediate_node_on_one_side_only() {
+ // a -> c -> d
+ // ^
+ // |
+ // b
+
+ // "digraph { a -> c -> d; b -> d; }",
+ let mut relation = TransitiveRelation::new();
+ relation.add("a", "c");
+ relation.add("c", "d");
+ relation.add("b", "d");
+
+ assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"d"]);
+}
+
+#[test]
+fn mubs_scc_1() {
+ // +-------------+
+ // | +----+ |
+ // | v | |
+ // a -> c -> d <-+
+ // ^
+ // |
+ // b
+
+ // "digraph { a -> c -> d; d -> c; a -> d; b -> d; }",
+ let mut relation = TransitiveRelation::new();
+ relation.add("a", "c");
+ relation.add("c", "d");
+ relation.add("d", "c");
+ relation.add("a", "d");
+ relation.add("b", "d");
+
+ assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
+}
+
+#[test]
+fn mubs_scc_2() {
+ // +----+
+ // v |
+ // a -> c -> d
+ // ^ ^
+ // | |
+ // +--- b
+
+ // "digraph { a -> c -> d; d -> c; b -> d; b -> c; }",
+ let mut relation = TransitiveRelation::new();
+ relation.add("a", "c");
+ relation.add("c", "d");
+ relation.add("d", "c");
+ relation.add("b", "d");
+ relation.add("b", "c");
+
+ assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
+}
+
+#[test]
+fn mubs_scc_3() {
+ // +---------+
+ // v |
+ // a -> c -> d -> e
+ // ^ ^
+ // | |
+ // b ---+
+
+ // "digraph { a -> c -> d -> e -> c; b -> d; b -> e; }",
+ let mut relation = TransitiveRelation::new();
+ relation.add("a", "c");
+ relation.add("c", "d");
+ relation.add("d", "e");
+ relation.add("e", "c");
+ relation.add("b", "d");
+ relation.add("b", "e");
+
+ assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
+}
+
+#[test]
+fn mubs_scc_4() {
+ // +---------+
+ // v |
+ // a -> c -> d -> e
+ // | ^ ^
+ // +---------+ |
+ // |
+ // b ---+
+
+ // "digraph { a -> c -> d -> e -> c; a -> d; b -> e; }"
+ let mut relation = TransitiveRelation::new();
+ relation.add("a", "c");
+ relation.add("c", "d");
+ relation.add("d", "e");
+ relation.add("e", "c");
+ relation.add("a", "d");
+ relation.add("b", "e");
+
+ assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]);
+}
extern crate test;
use self::test::Bencher;
-use std::collections::HashSet;
use unify::{UnifyKey, UnificationTable};
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
"vsqrtq_f32" => plain!("llvm.sqrt.v4f32", (f32x4) -> f32x4),
"vsqrtq_f64" => plain!("llvm.sqrt.v2f64", (f64x2) -> f64x2),
- "vrsqrteq_f32" => p!("vrsqrte.v4f32", (f32x4) -> f32x4),
- "vrsqrteq_f64" => p!("vrsqrte.v2f64", (f64x2) -> f64x2),
- "vrecpeq_f32" => p!("vrecpe.v4f32", (f32x4) -> f32x4),
- "vrecpeq_f64" => p!("vrecpe.v2f64", (f64x2) -> f64x2),
+ "vrsqrteq_f32" => p!("frsqrte.v4f32", (f32x4) -> f32x4),
+ "vrsqrteq_f64" => p!("frsqrte.v2f64", (f64x2) -> f64x2),
+ "vrecpeq_f32" => p!("frecpe.v4f32", (f32x4) -> f32x4),
+ "vrecpeq_f64" => p!("frecpe.v2f64", (f64x2) -> f64x2),
"vmaxq_f32" => p!("fmax.v4f32", (f32x4, f32x4) -> f32x4),
"vmaxq_f64" => p!("fmax.v2f64", (f64x2, f64x2) -> f64x2),
"vminq_f32" => p!("fmin.v4f32", (f32x4, f32x4) -> f32x4),
"vminq_f64" => p!("fmin.v2f64", (f64x2, f64x2) -> f64x2),
+
+ "vqtbl1q_u8" => p!("tbl1.v16i8", (i8x16, i8x16) -> i8x16),
+ "vqtbl1q_s8" => p!("tbl1.v16i8", (i8x16, i8x16) -> i8x16),
_ => return None,
})
}
}
fn get_parent_link(&mut self, parent: &Rc<Module>, name: Name) -> ParentLink {
- ModuleParentLink(parent.downgrade(), name)
+ ModuleParentLink(Rc::downgrade(parent), name)
}
/// Constructs the reduced graph for one item.
if let Some(crate_id) = self.session.cstore.find_extern_mod_stmt_cnum(item.id) {
let def_id = DefId { krate: crate_id, node: 0 };
self.external_exports.insert(def_id);
- let parent_link = ModuleParentLink(parent.downgrade(), name);
+ let parent_link = ModuleParentLink(Rc::downgrade(parent), name);
let external_module = Rc::new(Module::new(parent_link,
Some(def_id),
NormalModuleKind,
block_id);
let new_module = Rc::new(Module::new(
- BlockParentLink(parent.downgrade(), block_id),
+ BlockParentLink(Rc::downgrade(parent), block_id),
None,
AnonymousModuleKind,
false,
#![feature(associated_consts)]
#![feature(borrow_state)]
-#![feature(rc_weak)]
#![feature(rustc_diagnostic_macros)]
#![feature(rustc_private)]
#![feature(slice_splits)]
#![feature(staged_api)]
+#![feature(rc_weak)]
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
#![feature(path_relative_from)]
#![feature(path_relative_from)]
#![feature(quote)]
-#![feature(rc_weak)]
#![feature(rustc_diagnostic_macros)]
#![feature(rustc_private)]
#![feature(staged_api)]
#![feature(unicode)]
#![feature(unicode)]
#![feature(vec_push_all)]
+#![feature(rc_weak)]
#![allow(trivial_casts)]
let node = Rc::new(NamespaceTreeNode {
name: name,
scope: scope,
- parent: parent_node.map(|parent| parent.downgrade()),
+ parent: parent_node.map(|parent| Rc::downgrade(&parent)),
});
debug_context(cx).namespace_map.borrow_mut()
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/",
html_playground_url = "https://play.rust-lang.org/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
test(no_crate_inject))]
#![no_std]
pub feature: String,
pub since: String,
pub deprecated_since: String,
- pub reason: String
+ pub reason: String,
+ pub issue: Option<u32>
}
impl Clean<Stability> for attr::Stability {
|istr| istr.to_string()),
reason: self.reason.as_ref().map_or("".to_string(),
|interned| interned.to_string()),
+ issue: self.issue,
}
}
}
|istr| istr.to_string()),
reason: self.reason.as_ref().map_or("".to_string(),
|interned| interned.to_string()),
+ issue: self.issue,
}
}
}
pub render_redirect_pages: bool,
/// All the passes that were run on this crate.
pub passes: HashSet<String>,
+ /// The base-URL of the issue tracker for when an item has been tagged with
+ /// an issue number.
+ pub issue_tracker_base_url: Option<String>,
}
/// Indicates where an external crate can be found.
},
include_sources: true,
render_redirect_pages: false,
+ issue_tracker_base_url: None,
};
try!(mkdir(&cx.dst));
}
});
}
+ clean::NameValue(ref x, ref s)
+ if "issue_tracker_base_url" == *x => {
+ cx.issue_tracker_base_url = Some(s.to_string());
+ }
clean::Word(ref x)
if "html_no_source" == *x => {
cx.include_sources = false;
item_module(fmt, self.cx, self.item, &m.items)
}
clean::FunctionItem(ref f) | clean::ForeignFunctionItem(ref f) =>
- item_function(fmt, self.item, f),
+ item_function(fmt, self.cx, self.item, f),
clean::TraitItem(ref t) => item_trait(fmt, self.cx, self.item, t),
- clean::StructItem(ref s) => item_struct(fmt, self.item, s),
- clean::EnumItem(ref e) => item_enum(fmt, self.item, e),
- clean::TypedefItem(ref t, _) => item_typedef(fmt, self.item, t),
- clean::MacroItem(ref m) => item_macro(fmt, self.item, m),
- clean::PrimitiveItem(ref p) => item_primitive(fmt, self.item, p),
+ clean::StructItem(ref s) => item_struct(fmt, self.cx, self.item, s),
+ clean::EnumItem(ref e) => item_enum(fmt, self.cx, self.item, e),
+ clean::TypedefItem(ref t, _) => item_typedef(fmt, self.cx, self.item, t),
+ clean::MacroItem(ref m) => item_macro(fmt, self.cx, self.item, m),
+ clean::PrimitiveItem(ref p) => item_primitive(fmt, self.cx, self.item, p),
clean::StaticItem(ref i) | clean::ForeignStaticItem(ref i) =>
- item_static(fmt, self.item, i),
- clean::ConstantItem(ref c) => item_constant(fmt, self.item, c),
+ item_static(fmt, self.cx, self.item, i),
+ clean::ConstantItem(ref c) => item_constant(fmt, self.cx, self.item, c),
_ => Ok(())
}
}
markdown::plain_summary_line(&line[..])
}
-fn document(w: &mut fmt::Formatter, item: &clean::Item) -> fmt::Result {
- if let Some(s) = short_stability(item, true) {
+fn document(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result {
+ if let Some(s) = short_stability(item, cx, true) {
try!(write!(w, "<div class='stability'>{}</div>", s));
}
if let Some(s) = item.doc_value() {
fn item_module(w: &mut fmt::Formatter, cx: &Context,
item: &clean::Item, items: &[clean::Item]) -> fmt::Result {
- try!(document(w, item));
+ try!(document(w, cx, item));
let mut indices = (0..items.len()).filter(|i| {
!cx.ignore_private_item(&items[*i])
_ => {
if myitem.name.is_none() { continue }
- let stab_docs = if let Some(s) = short_stability(myitem, false) {
+ let stab_docs = if let Some(s) = short_stability(myitem, cx, false) {
format!("[{}]", s)
} else {
String::new()
write!(w, "</table>")
}
-fn short_stability(item: &clean::Item, show_reason: bool) -> Option<String> {
+fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Option<String> {
item.stability.as_ref().and_then(|stab| {
let reason = if show_reason && !stab.reason.is_empty() {
format!(": {}", stab.reason)
};
format!("Deprecated{}{}", since, Markdown(&reason))
} else if stab.level == attr::Unstable {
- format!("Unstable{}", Markdown(&reason))
+ let unstable_extra = if show_reason {
+ match (!stab.feature.is_empty(), &cx.issue_tracker_base_url, stab.issue) {
+ (true, &Some(ref tracker_url), Some(issue_no)) =>
+ format!(" (<code>{}</code> <a href=\"{}{}\">#{}</a>)",
+ Escape(&stab.feature), tracker_url, issue_no, issue_no),
+ (false, &Some(ref tracker_url), Some(issue_no)) =>
+ format!(" (<a href=\"{}{}\">#{}</a>)", Escape(&tracker_url), issue_no,
+ issue_no),
+ (true, _, _) =>
+ format!(" (<code>{}</code>)", Escape(&stab.feature)),
+ _ => String::new(),
+ }
+ } else {
+ String::new()
+ };
+ format!("Unstable{}{}", unstable_extra, Markdown(&reason))
} else {
return None
};
}
}
-fn item_constant(w: &mut fmt::Formatter, it: &clean::Item,
+fn item_constant(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
c: &clean::Constant) -> fmt::Result {
try!(write!(w, "<pre class='rust const'>{vis}const \
{name}: {typ}{init}</pre>",
name = it.name.as_ref().unwrap(),
typ = c.type_,
init = Initializer(&c.expr)));
- document(w, it)
+ document(w, cx, it)
}
-fn item_static(w: &mut fmt::Formatter, it: &clean::Item,
+fn item_static(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
s: &clean::Static) -> fmt::Result {
try!(write!(w, "<pre class='rust static'>{vis}static {mutability}\
{name}: {typ}{init}</pre>",
name = it.name.as_ref().unwrap(),
typ = s.type_,
init = Initializer(&s.expr)));
- document(w, it)
+ document(w, cx, it)
}
-fn item_function(w: &mut fmt::Formatter, it: &clean::Item,
+fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
f: &clean::Function) -> fmt::Result {
try!(write!(w, "<pre class='rust fn'>{vis}{unsafety}{abi}{constness}fn \
{name}{generics}{decl}{where_clause}</pre>",
generics = f.generics,
where_clause = WhereClause(&f.generics),
decl = f.decl));
- document(w, it)
+ document(w, cx, it)
}
fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
try!(write!(w, "</pre>"));
// Trait documentation
- try!(document(w, it));
+ try!(document(w, cx, it));
- fn trait_item(w: &mut fmt::Formatter, m: &clean::Item)
+ fn trait_item(w: &mut fmt::Formatter, cx: &Context, m: &clean::Item)
-> fmt::Result {
try!(write!(w, "<h3 id='{ty}.{name}' class='method stab {stab}'><code>",
ty = shortty(m),
stab = m.stability_class()));
try!(render_assoc_item(w, m, AssocItemLink::Anchor));
try!(write!(w, "</code></h3>"));
- try!(document(w, m));
+ try!(document(w, cx, m));
Ok(())
}
<div class='methods'>
"));
for t in &types {
- try!(trait_item(w, *t));
+ try!(trait_item(w, cx, *t));
}
try!(write!(w, "</div>"));
}
<div class='methods'>
"));
for t in &consts {
- try!(trait_item(w, *t));
+ try!(trait_item(w, cx, *t));
}
try!(write!(w, "</div>"));
}
<div class='methods'>
"));
for m in &required {
- try!(trait_item(w, *m));
+ try!(trait_item(w, cx, *m));
}
try!(write!(w, "</div>"));
}
<div class='methods'>
"));
for m in &provided {
- try!(trait_item(w, *m));
+ try!(trait_item(w, cx, *m));
}
try!(write!(w, "</div>"));
}
// If there are methods directly on this trait object, render them here.
- try!(render_assoc_items(w, it.def_id, AssocItemRender::All));
+ try!(render_assoc_items(w, cx, it.def_id, AssocItemRender::All));
let cache = cache();
try!(write!(w, "
}
}
-fn item_struct(w: &mut fmt::Formatter, it: &clean::Item,
+fn item_struct(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
s: &clean::Struct) -> fmt::Result {
try!(write!(w, "<pre class='rust struct'>"));
try!(render_attributes(w, it));
true));
try!(write!(w, "</pre>"));
- try!(document(w, it));
+ try!(document(w, cx, it));
let mut fields = s.fields.iter().filter(|f| {
match f.inner {
clean::StructFieldItem(clean::HiddenStructField) => false,
<code>{name}</code></td><td>",
stab = field.stability_class(),
name = field.name.as_ref().unwrap()));
- try!(document(w, field));
+ try!(document(w, cx, field));
try!(write!(w, "</td></tr>"));
}
try!(write!(w, "</table>"));
}
}
- render_assoc_items(w, it.def_id, AssocItemRender::All)
+ render_assoc_items(w, cx, it.def_id, AssocItemRender::All)
}
-fn item_enum(w: &mut fmt::Formatter, it: &clean::Item,
+fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
e: &clean::Enum) -> fmt::Result {
try!(write!(w, "<pre class='rust enum'>"));
try!(render_attributes(w, it));
}
try!(write!(w, "</pre>"));
- try!(document(w, it));
+ try!(document(w, cx, it));
if !e.variants.is_empty() {
try!(write!(w, "<h2 class='variants'>Variants</h2>\n<table>"));
for variant in &e.variants {
try!(write!(w, "<tr><td id='variant.{name}'><code>{name}</code></td><td>",
name = variant.name.as_ref().unwrap()));
- try!(document(w, variant));
+ try!(document(w, cx, variant));
match variant.inner {
clean::VariantItem(ref var) => {
match var.kind {
<code>{f}</code></td><td>",
v = variant.name.as_ref().unwrap(),
f = field.name.as_ref().unwrap()));
- try!(document(w, field));
+ try!(document(w, cx, field));
try!(write!(w, "</td></tr>"));
}
try!(write!(w, "</table>"));
try!(write!(w, "</table>"));
}
- try!(render_assoc_items(w, it.def_id, AssocItemRender::All));
+ try!(render_assoc_items(w, cx, it.def_id, AssocItemRender::All));
Ok(())
}
}
fn render_assoc_items(w: &mut fmt::Formatter,
+ cx: &Context,
it: ast::DefId,
what: AssocItemRender) -> fmt::Result {
let c = cache();
}
};
for i in &non_trait {
- try!(render_impl(w, i, AssocItemLink::Anchor, render_header));
+ try!(render_impl(w, cx, i, AssocItemLink::Anchor, render_header));
}
}
if let AssocItemRender::DerefFor { .. } = what {
}
});
if let Some(impl_) = deref_impl {
- try!(render_deref_methods(w, impl_));
+ try!(render_deref_methods(w, cx, impl_));
}
try!(write!(w, "<h2 id='implementations'>Trait \
Implementations</h2>"));
});
for i in &manual {
let did = i.trait_did().unwrap();
- try!(render_impl(w, i, AssocItemLink::GotoSource(did), true));
+ try!(render_impl(w, cx, i, AssocItemLink::GotoSource(did), true));
}
if !derived.is_empty() {
try!(write!(w, "<h3 id='derived_implementations'>\
</h3>"));
for i in &derived {
let did = i.trait_did().unwrap();
- try!(render_impl(w, i, AssocItemLink::GotoSource(did), true));
+ try!(render_impl(w, cx, i, AssocItemLink::GotoSource(did), true));
}
}
}
Ok(())
}
-fn render_deref_methods(w: &mut fmt::Formatter, impl_: &Impl) -> fmt::Result {
+fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl) -> fmt::Result {
let deref_type = impl_.impl_.trait_.as_ref().unwrap();
let target = impl_.impl_.items.iter().filter_map(|item| {
match item.inner {
}).next().expect("Expected associated type binding");
let what = AssocItemRender::DerefFor { trait_: deref_type, type_: target };
match *target {
- clean::ResolvedPath { did, .. } => render_assoc_items(w, did, what),
+ clean::ResolvedPath { did, .. } => render_assoc_items(w, cx, did, what),
_ => {
if let Some(prim) = target.primitive_type() {
if let Some(c) = cache().primitive_locations.get(&prim) {
let did = ast::DefId { krate: *c, node: prim.to_node_id() };
- try!(render_assoc_items(w, did, what));
+ try!(render_assoc_items(w, cx, did, what));
}
}
Ok(())
// Render_header is false when we are rendering a `Deref` impl and true
// otherwise. If render_header is false, we will avoid rendering static
// methods, since they are not accessible for the type implementing `Deref`
-fn render_impl(w: &mut fmt::Formatter, i: &Impl, link: AssocItemLink,
+fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLink,
render_header: bool) -> fmt::Result {
if render_header {
try!(write!(w, "<h3 class='impl'><code>{}</code></h3>", i.impl_));
}
}
- fn doctraititem(w: &mut fmt::Formatter, item: &clean::Item,
+ fn doctraititem(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item,
link: AssocItemLink, render_static: bool) -> fmt::Result {
match item.inner {
clean::MethodItem(..) | clean::TyMethodItem(..) => {
if is_static_method(item) && !render_static {
Ok(())
} else {
- document(w, item)
+ document(w, cx, item)
}
} else {
Ok(())
try!(write!(w, "<div class='impl-items'>"));
for trait_item in &i.impl_.items {
- try!(doctraititem(w, trait_item, link, render_header));
+ try!(doctraititem(w, cx, trait_item, link, render_header));
}
fn render_default_items(w: &mut fmt::Formatter,
+ cx: &Context,
did: ast::DefId,
t: &clean::Trait,
i: &clean::Impl,
None => {}
}
- try!(doctraititem(w, trait_item, AssocItemLink::GotoSource(did), render_static));
+ try!(doctraititem(w, cx, trait_item, AssocItemLink::GotoSource(did), render_static));
}
Ok(())
}
// for them work.
if let Some(clean::ResolvedPath { did, .. }) = i.impl_.trait_ {
if let Some(t) = cache().traits.get(&did) {
- try!(render_default_items(w, did, t, &i.impl_, render_header));
+ try!(render_default_items(w, cx, did, t, &i.impl_, render_header));
}
}
Ok(())
}
-fn item_typedef(w: &mut fmt::Formatter, it: &clean::Item,
+fn item_typedef(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
t: &clean::Typedef) -> fmt::Result {
try!(write!(w, "<pre class='rust typedef'>type {}{}{where_clause} = {type_};</pre>",
it.name.as_ref().unwrap(),
where_clause = WhereClause(&t.generics),
type_ = t.type_));
- document(w, it)
+ document(w, cx, it)
}
impl<'a> fmt::Display for Sidebar<'a> {
}
}
-fn item_macro(w: &mut fmt::Formatter, it: &clean::Item,
+fn item_macro(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
t: &clean::Macro) -> fmt::Result {
try!(w.write_str(&highlight::highlight(&t.source,
Some("macro"),
None)));
- document(w, it)
+ document(w, cx, it)
}
-fn item_primitive(w: &mut fmt::Formatter,
+fn item_primitive(w: &mut fmt::Formatter, cx: &Context,
it: &clean::Item,
_p: &clean::PrimitiveType) -> fmt::Result {
- try!(document(w, it));
- render_assoc_items(w, it.def_id, AssocItemRender::All)
+ try!(document(w, cx, it));
+ render_assoc_items(w, cx, it.def_id, AssocItemRender::All)
}
fn get_basic_keywords() -> &'static str {
margin-left: 20px;
}
+.content .stability code {
+ font-size: 90%;
+}
+
nav {
border-bottom: 1px solid #e0e0e0;
padding-bottom: 10px;
background: transparent;
}
-.docblock a {
+.docblock a, .stability a {
color: #4e8bca;
}
-.docblock a:hover {
+.docblock a:hover, .stability a {
text-decoration: underline;
}
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/",
html_playground_url = "https://play.rust-lang.org/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
test(no_crate_inject, attr(deny(warnings))),
test(attr(allow(dead_code, deprecated, unused_variables, unused_mut))))]
// Don't link to std. We are std.
#![no_std]
-#![allow(trivial_casts)]
#![deny(missing_docs)]
#[cfg(test)] extern crate test;
// imported by the compiler (via our #[no_std] attribute) In this case we just
// add a new crate name so we can attach the reexports to it.
#[macro_reexport(assert, assert_eq, debug_assert, debug_assert_eq,
- unreachable, unimplemented, write, writeln)]
+ unreachable, unimplemented, write, writeln)]
extern crate core as __core;
#[macro_use]
pub use core_collections::slice;
pub use core_collections::str;
pub use core_collections::string;
-#[stable(feature = "rust1", since = "1.0.0")]
pub use core_collections::vec;
pub use rustc_unicode::char;
/* Primitive types */
-// NB: slice and str are primitive types too, but their module docs + primitive doc pages
-// are inlined from the public re-exports of core_collections::{slice, str} above.
-
-#[path = "num/float_macros.rs"]
-#[macro_use]
-mod float_macros;
-
-#[path = "num/int_macros.rs"]
-#[macro_use]
-mod int_macros;
-
-#[path = "num/uint_macros.rs"]
-#[macro_use]
-mod uint_macros;
-
-#[path = "num/isize.rs"] pub mod isize;
-#[path = "num/i8.rs"] pub mod i8;
-#[path = "num/i16.rs"] pub mod i16;
-#[path = "num/i32.rs"] pub mod i32;
-#[path = "num/i64.rs"] pub mod i64;
-
-#[path = "num/usize.rs"] pub mod usize;
-#[path = "num/u8.rs"] pub mod u8;
-#[path = "num/u16.rs"] pub mod u16;
-#[path = "num/u32.rs"] pub mod u32;
-#[path = "num/u64.rs"] pub mod u64;
+// NB: slice and str are primitive types too, but their module docs + primitive
+// doc pages are inlined from the public re-exports of core_collections::{slice,
+// str} above.
+
+pub use core::isize;
+pub use core::i8;
+pub use core::i16;
+pub use core::i32;
+pub use core::i64;
+
+pub use core::usize;
+pub use core::u8;
+pub use core::u16;
+pub use core::u32;
+pub use core::u64;
#[path = "num/f32.rs"] pub mod f32;
#[path = "num/f64.rs"] pub mod f64;
)
}
+#[cfg(test)]
+macro_rules! assert_approx_eq {
+ ($a:expr, $b:expr) => ({
+ let (a, b) = (&$a, &$b);
+ assert!((*a - *b).abs() < 1.0e-6,
+ "{} is not approximately equal to {}", *a, *b);
+ })
+}
+
/// Built-in macros to the compiler itself.
///
/// These macros do not have any corresponding definition with a `macro_rules!`
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![doc(hidden)]
-
-macro_rules! assert_approx_eq {
- ($a:expr, $b:expr) => ({
- let (a, b) = (&$a, &$b);
- assert!((*a - *b).abs() < 1.0e-6,
- "{} is not approximately equal to {}", *a, *b);
- })
-}
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The 16-bit signed integer type.
-//!
-//! *[See also the `i16` primitive type](../primitive.i16.html).*
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-pub use core::i16::{BITS, BYTES, MIN, MAX};
-
-int_module! { i16 }
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The 32-bit signed integer type.
-//!
-//! *[See also the `i32` primitive type](../primitive.i32.html).*
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-pub use core::i32::{BITS, BYTES, MIN, MAX};
-
-int_module! { i32 }
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The 64-bit signed integer type.
-//!
-//! *[See also the `i64` primitive type](../primitive.i64.html).*
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-pub use core::i64::{BITS, BYTES, MIN, MAX};
-
-int_module! { i64 }
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The 8-bit signed integer type.
-//!
-//! *[See also the `i8` primitive type](../primitive.i8.html).*
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-pub use core::i8::{BITS, BYTES, MIN, MAX};
-
-int_module! { i8 }
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![doc(hidden)]
-
-macro_rules! int_module { ($T:ty) => (
-
-) }
+++ /dev/null
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The pointer-sized signed integer type.
-//!
-//! *[See also the `isize` primitive type](../primitive.isize.html).*
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-pub use core::isize::{BITS, BYTES, MIN, MAX};
-
-int_module! { isize }
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The 16-bit unsigned integer type.
-//!
-//! *[See also the `u16` primitive type](../primitive.u16.html).*
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-pub use core::u16::{BITS, BYTES, MIN, MAX};
-
-uint_module! { u16 }
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The 32-bit unsigned integer type.
-//!
-//! *[See also the `u32` primitive type](../primitive.u32.html).*
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-pub use core::u32::{BITS, BYTES, MIN, MAX};
-
-uint_module! { u32 }
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The 64-bit unsigned integer type.
-//!
-//! *[See also the `u64` primitive type](../primitive.u64.html).*
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-pub use core::u64::{BITS, BYTES, MIN, MAX};
-
-uint_module! { u64 }
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The 8-bit unsigned integer type.
-//!
-//! *[See also the `u8` primitive type](../primitive.u8.html).*
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-pub use core::u8::{BITS, BYTES, MIN, MAX};
-
-uint_module! { u8 }
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![doc(hidden)]
-
-macro_rules! uint_module { ($T:ident) => (
-
-#[cfg(test)]
-mod tests {
- use prelude::v1::*;
-
- fn from_str<T: ::str::FromStr>(t: &str) -> Option<T> {
- ::str::FromStr::from_str(t).ok()
- }
-
- #[test]
- pub fn test_from_str() {
- assert_eq!(from_str::<$T>("0"), Some(0 as $T));
- assert_eq!(from_str::<$T>("3"), Some(3 as $T));
- assert_eq!(from_str::<$T>("10"), Some(10 as $T));
- assert_eq!(from_str::<u32>("123456789"), Some(123456789 as u32));
- assert_eq!(from_str::<$T>("00100"), Some(100 as $T));
-
- assert_eq!(from_str::<$T>(""), None);
- assert_eq!(from_str::<$T>(" "), None);
- assert_eq!(from_str::<$T>("x"), None);
- }
-
- #[test]
- pub fn test_parse_bytes() {
- assert_eq!($T::from_str_radix("123", 10), Ok(123 as $T));
- assert_eq!($T::from_str_radix("1001", 2), Ok(9 as $T));
- assert_eq!($T::from_str_radix("123", 8), Ok(83 as $T));
- assert_eq!(u16::from_str_radix("123", 16), Ok(291 as u16));
- assert_eq!(u16::from_str_radix("ffff", 16), Ok(65535 as u16));
- assert_eq!($T::from_str_radix("z", 36), Ok(35 as $T));
-
- assert_eq!($T::from_str_radix("Z", 10).ok(), None::<$T>);
- assert_eq!($T::from_str_radix("_", 2).ok(), None::<$T>);
- }
-}
-
-) }
+++ /dev/null
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! The pointer-sized unsigned integer type.
-//!
-//! *[See also the `usize` primitive type](../primitive.usize.html).*
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-pub use core::usize::{BITS, BYTES, MIN, MAX};
-
-uint_module! { usize }
use sys::time::SteadyTime;
use time::Duration;
+/// A type indicating whether a timed wait on a condition variable returned
+/// due to a time out or not.
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+#[unstable(feature = "wait_timeout", reason = "newly added", issue = "27772")]
+pub struct WaitTimeoutResult(bool);
+
+impl WaitTimeoutResult {
+ /// Returns whether the wait was known to have timed out.
+ #[unstable(feature = "wait_timeout", reason = "newly added", issue = "27772")]
+ pub fn timed_out(&self) -> bool {
+ self.0
+ }
+}
+
/// A Condition Variable
///
/// Condition variables represent the ability to block a thread such that it
/// preemption or platform differences that may not cause the maximum
/// amount of time waited to be precisely `dur`.
///
- /// The returned boolean is `false` only if the timeout is known
- /// to have elapsed.
+ /// The returned `WaitTimeoutResult` value indicates if the timeout is
+ /// known to have elapsed.
///
/// Like `wait`, the lock specified will be re-acquired when this function
/// returns, regardless of whether the timeout elapsed or not.
issue = "27772")]
pub fn wait_timeout<'a, T>(&self, guard: MutexGuard<'a, T>,
dur: Duration)
- -> LockResult<(MutexGuard<'a, T>, bool)> {
+ -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
unsafe {
let me: &'static Condvar = &*(self as *const _);
me.inner.wait_timeout(guard, dur)
guard: MutexGuard<'a, T>,
dur: Duration,
f: F)
- -> LockResult<(MutexGuard<'a, T>, bool)>
+ -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)>
where F: FnMut(LockResult<&mut T>) -> bool {
unsafe {
let me: &'static Condvar = &*(self as *const _);
issue = "27717")]
pub fn wait_timeout_ms<'a, T>(&'static self, guard: MutexGuard<'a, T>, ms: u32)
-> LockResult<(MutexGuard<'a, T>, bool)> {
- self.wait_timeout(guard, Duration::from_millis(ms as u64))
+ match self.wait_timeout(guard, Duration::from_millis(ms as u64)) {
+ Ok((guard, timed_out)) => Ok((guard, !timed_out.timed_out())),
+ Err(poison) => {
+ let (guard, timed_out) = poison.into_inner();
+ Err(PoisonError::new((guard, !timed_out.timed_out())))
+ }
+ }
}
/// Waits on this condition variable for a notification, timing out after a
pub fn wait_timeout<'a, T>(&'static self,
guard: MutexGuard<'a, T>,
timeout: Duration)
- -> LockResult<(MutexGuard<'a, T>, bool)> {
- let (poisoned, success) = unsafe {
+ -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
+ let (poisoned, result) = unsafe {
let lock = mutex::guard_lock(&guard);
self.verify(lock);
let success = self.inner.wait_timeout(lock, timeout);
- (mutex::guard_poison(&guard).get(), success)
+ (mutex::guard_poison(&guard).get(), WaitTimeoutResult(!success))
};
if poisoned {
- Err(PoisonError::new((guard, success)))
+ Err(PoisonError::new((guard, result)))
} else {
- Ok((guard, success))
+ Ok((guard, result))
}
}
guard: MutexGuard<'a, T>,
dur: Duration,
mut f: F)
- -> LockResult<(MutexGuard<'a, T>, bool)>
+ -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)>
where F: FnMut(LockResult<&mut T>) -> bool {
// This could be made more efficient by pushing the implementation into
// sys::condvar
let now = SteadyTime::now();
let consumed = &now - &start;
let guard = guard_result.unwrap_or_else(|e| e.into_inner());
- let (new_guard_result, no_timeout) = if consumed > dur {
- (Ok(guard), false)
+ let (new_guard_result, timed_out) = if consumed > dur {
+ (Ok(guard), WaitTimeoutResult(true))
} else {
match self.wait_timeout(guard, dur - consumed) {
- Ok((new_guard, no_timeout)) => (Ok(new_guard), no_timeout),
+ Ok((new_guard, timed_out)) => (Ok(new_guard), timed_out),
Err(err) => {
let (new_guard, no_timeout) = err.into_inner();
(Err(PoisonError::new(new_guard)), no_timeout)
}
};
guard_result = new_guard_result;
- if !no_timeout {
+ if timed_out.timed_out() {
let result = f(guard_result
.as_mut()
.map(|g| &mut **g)
.map_err(|e| PoisonError::new(&mut **e.get_mut())));
+ let result = WaitTimeoutResult(!result);
return poison::map_result(guard_result, |g| (g, result));
}
}
- poison::map_result(guard_result, |g| (g, true))
+ poison::map_result(guard_result, |g| (g, WaitTimeoutResult(false)))
}
/// Wakes up one blocked thread on this condvar.
static S: AtomicUsize = AtomicUsize::new(0);
let g = M.lock().unwrap();
- let (g, success) = C.wait_timeout_with(g, Duration::new(0, 1000), |_| {
+ let (g, timed_out) = C.wait_timeout_with(g, Duration::new(0, 1000), |_| {
false
}).unwrap();
- assert!(!success);
+ assert!(timed_out.timed_out());
let (tx, rx) = channel();
let _t = thread::spawn(move || {
let mut state = 0;
let day = 24 * 60 * 60;
- let (_g, success) = C.wait_timeout_with(g, Duration::new(day, 0), |_| {
+ let (_g, timed_out) = C.wait_timeout_with(g, Duration::new(day, 0), |_| {
assert_eq!(state, S.load(Ordering::SeqCst));
tx.send(()).unwrap();
state += 1;
_ => true,
}
}).unwrap();
- assert!(success);
+ assert!(!timed_out.timed_out());
}
#[test]
#![stable(feature = "rust1", since = "1.0.0")]
pub use alloc::arc::{Arc, Weak};
-pub use core::atomic;
+pub use core::sync::atomic;
pub use self::barrier::{Barrier, BarrierWaitResult};
-pub use self::condvar::{Condvar, StaticCondvar, CONDVAR_INIT};
+pub use self::condvar::{Condvar, StaticCondvar, WaitTimeoutResult, CONDVAR_INIT};
pub use self::mutex::MUTEX_INIT;
pub use self::mutex::{Mutex, MutexGuard, StaticMutex};
pub use self::once::{Once, ONCE_INIT};
#[stable(feature = "rust1", since = "1.0.0")]
pub mod prelude {
#[doc(no_inline)]
- pub use super::io::{RawFd, AsRawFd, FromRawFd};
+ pub use super::io::{RawFd, AsRawFd, FromRawFd, IntoRawFd};
#[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")]
pub use super::ffi::{OsStrExt, OsStringExt};
#[doc(no_inline)]
#[doc(no_inline)]
pub use super::io::{RawSocket, RawHandle, AsRawSocket, AsRawHandle};
#[doc(no_inline)]
- pub use super::io::{FromRawSocket, FromRawHandle};
+ pub use super::io::{FromRawSocket, FromRawHandle, IntoRawSocket, IntoRawHandle};
#[doc(no_inline)] #[stable(feature = "rust1", since = "1.0.0")]
pub use super::ffi::{OsStrExt, OsStringExt};
#[doc(no_inline)]
UnsuffixedIntLit(Sign)
}
-impl LitIntType {
- pub fn suffix_len(&self) -> usize {
- match *self {
- UnsuffixedIntLit(_) => 0,
- SignedIntLit(s, _) => s.suffix_len(),
- UnsignedIntLit(u) => u.suffix_len()
- }
- }
-}
-
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub enum Lit_ {
/// A string literal (`"foo"`)
}
impl IntTy {
- pub fn suffix_len(&self) -> usize {
- match *self {
- TyIs | TyI8 => 2,
- TyI16 | TyI32 | TyI64 => 3,
- }
- }
pub fn bit_width(&self) -> Option<usize> {
Some(match *self {
TyIs => return None,
}
impl UintTy {
- pub fn suffix_len(&self) -> usize {
- match *self {
- TyUs | TyU8 => 2,
- TyU16 | TyU32 | TyU64 => 3,
- }
- }
pub fn bit_width(&self) -> Option<usize> {
Some(match *self {
TyUs => return None,
}
impl FloatTy {
- pub fn suffix_len(&self) -> usize {
- match *self {
- TyF32 | TyF64 => 3, // add F128 handling here
- }
- }
pub fn bit_width(&self) -> usize {
match *self {
TyF32 => 32,
self.pclose()
}
+ pub fn check_expr_bin_needs_paren(&mut self, sub_expr: &ast::Expr,
+ binop: ast::BinOp) -> bool {
+ match sub_expr.node {
+ ast::ExprBinary(ref sub_op, _, _) => {
+ if ast_util::operator_prec(sub_op.node) <
+ ast_util::operator_prec(binop.node) {
+ true
+ } else {
+ false
+ }
+ }
+ _ => true
+ }
+ }
+
pub fn print_expr_maybe_paren(&mut self, expr: &ast::Expr) -> io::Result<()> {
let needs_par = needs_parentheses(expr);
if needs_par {
op: ast::BinOp,
lhs: &ast::Expr,
rhs: &ast::Expr) -> io::Result<()> {
- try!(self.print_expr(lhs));
+ if self.check_expr_bin_needs_paren(lhs, op) {
+ try!(self.print_expr_maybe_paren(lhs));
+ } else {
+ try!(self.print_expr(lhs));
+ }
try!(space(&mut self.s));
try!(self.word_space(ast_util::binop_to_string(op.node)));
- self.print_expr(rhs)
+ if self.check_expr_bin_needs_paren(rhs, op) {
+ self.print_expr_maybe_paren(rhs)
+ } else {
+ self.print_expr(rhs)
+ }
}
fn print_expr_unary(&mut self,
try!(self.print_literal(&**lit));
}
ast::ExprCast(ref expr, ref ty) => {
- try!(self.print_expr(&**expr));
+ if let ast::ExprCast(..) = expr.node {
+ try!(self.print_expr(&**expr));
+ } else {
+ try!(self.print_expr_maybe_paren(&**expr));
+ }
try!(space(&mut self.s));
try!(self.word_space("as"));
try!(self.print_type(&**ty));
fn assert_clone<T>() where T : Clone { }
fn main() {
- assert_clone::<foo::core::atomic::AtomicBool>();
+ assert_clone::<foo::core::sync::atomic::AtomicBool>();
//~^ ERROR the trait `foo::core::clone::Clone` is not implemented for the type `foo::core::
-}
\ No newline at end of file
+}
fn assert_clone<T>() where T : Clone { }
fn main() {
- assert_clone::<bar::atomic::AtomicBool>();
- //~^ ERROR the trait `bar::clone::Clone` is not implemented for the type `bar::atomic::
-}
\ No newline at end of file
+ assert_clone::<bar::sync::atomic::AtomicBool>();
+ //~^ ERROR the trait `bar::clone::Clone` is not implemented for the type `bar::sync::atomic::
+}
fn assert_clone<T>() where T : Clone { }
fn main() {
- assert_clone::<foo::core::atomic::AtomicBool>();
- //~^ ERROR the trait `core::clone::Clone` is not implemented for the type `core::atomic::
-}
\ No newline at end of file
+ assert_clone::<foo::core::sync::atomic::AtomicBool>();
+ //~^ ERROR the trait `core::clone::Clone` is not implemented for the type `core::sync::atomic::
+}
let b: Rc<Baz> = a.clone();
assert_eq!(b.get(), 42);
- let c: Weak<i32> = a.downgrade();
+ let c: Weak<i32> = Rc::downgrade(&a);
let d: Weak<Baz> = c.clone();
let _c = b.clone();
let a: Rc<RefCell<i32>> = Rc::new(RefCell::new(42));
let b: Rc<RefCell<Baz>> = a.clone();
assert_eq!(b.borrow().get(), 42);
- let c: Weak<RefCell<Baz>> = a.downgrade();
+ // FIXME
+ let c: Weak<RefCell<Baz>> = Rc::downgrade(&a) as Weak<_>;
}
pub fn main() {
unsafe {
thread::spawn(move|| {
- let i = 100;
- rust_dbg_call(callback, mem::transmute(&i));
- }).join();
+ let i: isize = 100;
+ rust_dbg_call(callback_isize, mem::transmute(&i));
+ }).join().unwrap();
+
+ thread::spawn(move|| {
+ let i: i32 = 100;
+ rust_dbg_call(callback_i32, mem::transmute(&i));
+ }).join().unwrap();
+
+ thread::spawn(move|| {
+ let i: i64 = 100;
+ rust_dbg_call(callback_i64, mem::transmute(&i));
+ }).join().unwrap();
}
}
-extern fn callback(data: libc::uintptr_t) {
+extern fn callback_isize(data: libc::uintptr_t) {
unsafe {
let data: *const isize = mem::transmute(data);
assert_eq!(*data, 100);
}
}
+
+extern fn callback_i64(data: libc::uintptr_t) {
+ unsafe {
+ let data: *const i64 = mem::transmute(data);
+ assert_eq!(*data, 100);
+ }
+}
+
+extern fn callback_i32(data: libc::uintptr_t) {
+ unsafe {
+ let data: *const i32 = mem::transmute(data);
+ assert_eq!(*data, 100);
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_consts)]
+#![deny(dead_code)]
+
+// use different types / traits to test all combinations
+
+trait Const {
+ const C: ();
+}
+
+trait StaticFn {
+ fn sfn();
+}
+
+struct ConstStruct;
+struct StaticFnStruct;
+
+enum ConstEnum {}
+enum StaticFnEnum {}
+
+struct AliasedConstStruct;
+struct AliasedStaticFnStruct;
+
+enum AliasedConstEnum {}
+enum AliasedStaticFnEnum {}
+
+type AliasConstStruct = AliasedConstStruct;
+type AliasStaticFnStruct = AliasedStaticFnStruct;
+type AliasConstEnum = AliasedConstEnum;
+type AliasStaticFnEnum = AliasedStaticFnEnum;
+
+macro_rules! impl_Const {($($T:ident),*) => {$(
+ impl Const for $T {
+ const C: () = ();
+ }
+)*}}
+
+macro_rules! impl_StaticFn {($($T:ident),*) => {$(
+ impl StaticFn for $T {
+ fn sfn() {}
+ }
+)*}}
+
+impl_Const!(ConstStruct, ConstEnum, AliasedConstStruct, AliasedConstEnum);
+impl_StaticFn!(StaticFnStruct, StaticFnEnum, AliasedStaticFnStruct, AliasedStaticFnEnum);
+
+fn main() {
+ let _ = ConstStruct::C;
+ let _ = ConstEnum::C;
+
+ StaticFnStruct::sfn();
+ StaticFnEnum::sfn();
+
+ let _ = AliasConstStruct::C;
+ let _ = AliasConstEnum::C;
+
+ AliasStaticFnStruct::sfn();
+ AliasStaticFnEnum::sfn();
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for issue #27583. Unclear how useful this will be
+// going forward, since the issue in question was EXTREMELY sensitive
+// to compiler internals (like the precise numbering of nodes), but
+// what the hey.
+
+#![allow(warnings)]
+
+use std::cell::Cell;
+use std::marker::PhantomData;
+
+pub trait Delegate<'tcx> { }
+
+pub struct InferCtxt<'a, 'tcx: 'a> {
+ x: PhantomData<&'a Cell<&'tcx ()>>
+}
+
+pub struct MemCategorizationContext<'t, 'a: 't, 'tcx : 'a> {
+ x: &'t InferCtxt<'a, 'tcx>,
+}
+
+pub struct ExprUseVisitor<'d, 't, 'a: 't, 'tcx:'a+'d> {
+ typer: &'t InferCtxt<'a, 'tcx>,
+ mc: MemCategorizationContext<'t, 'a, 'tcx>,
+ delegate: &'d mut (Delegate<'tcx>+'d),
+}
+
+impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
+ pub fn new(delegate: &'d mut Delegate<'tcx>,
+ typer: &'t InferCtxt<'a, 'tcx>)
+ -> ExprUseVisitor<'d,'t,'a,'tcx>
+ {
+ ExprUseVisitor {
+ typer: typer,
+ mc: MemCategorizationContext::new(typer),
+ delegate: delegate,
+ }
+ }
+}
+
+impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> {
+ pub fn new(typer: &'t InferCtxt<'a, 'tcx>) -> MemCategorizationContext<'t, 'a, 'tcx> {
+ MemCategorizationContext { x: typer }
+ }
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that we recognize that if you have
+//
+// 'a : 'static
+//
+// then
+//
+// 'a : 'b
+
+fn test<'a,'b>(x: &'a i32) -> &'b i32
+ where 'a: 'static
+{
+ x
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(staged_api)]
+#![staged_api]
+#![doc(issue_tracker_base_url = "http://issue_url/")]
+
+// @has issue_27759/unstable/index.html
+// @has - '<code>test</code>'
+// @has - '<a href="http://issue_url/27759">#27759</a>'
+#[unstable(feature="test", issue="27759")]
+pub mod unstable {
+ // @has issue_27759/unstable/fn.issue.html
+ // @has - '<code>test_function</code>'
+ // @has - '<a href="http://issue_url/1234567890">#1234567890</a>'
+ #[unstable(feature="test_function", issue="1234567890")]
+ pub fn issue() {}
+}