CFG_STATIC_LIB_NAME_x86_64-unknown-dragonfly=lib$(1).a
CFG_LIB_GLOB_x86_64-unknown-dragonfly=lib$(1)-*.so
CFG_LIB_DSYM_GLOB_x86_64-unknown-dragonfly=$(1)-*.dylib.dSYM
-CFG_JEMALLOC_CFLAGS_x86_64-unknown-dragonfly := -I/usr/include -I/usr/local/include $(CFLAGS)
-CFG_GCCISH_CFLAGS_x86_64-unknown-dragonfly := -Wall -Werror -g -fPIC -I/usr/include -I/usr/local/include $(CFLAGS)
-CFG_GCCISH_LINK_FLAGS_x86_64-unknown-dragonfly := -shared -fPIC -g -pthread -lrt
+CFG_JEMALLOC_CFLAGS_x86_64-unknown-dragonfly := -m64 -I/usr/include -I/usr/local/include $(CFLAGS)
+CFG_GCCISH_CFLAGS_x86_64-unknown-dragonfly := -Wall -Werror -g -fPIC -m64 -I/usr/include -I/usr/local/include $(CFLAGS)
+CFG_GCCISH_LINK_FLAGS_x86_64-unknown-dragonfly := -shared -fPIC -g -pthread -lrt -m64
CFG_GCCISH_DEF_FLAG_x86_64-unknown-dragonfly := -Wl,--export-dynamic,--dynamic-list=
CFG_GCCISH_PRE_LIB_FLAGS_x86_64-unknown-dragonfly := -Wl,-whole-archive
CFG_GCCISH_POST_LIB_FLAGS_x86_64-unknown-dragonfly := -Wl,-no-whole-archive
$(filter-out rustc_borrowck, \
$(filter-out rustc_resolve, \
$(filter-out rustc_driver, \
- $(filter-out syntax, $(CRATES))))))))
+ $(filter-out log, \
+ $(filter-out regex, \
+ $(filter-out regex_macros, \
+ $(filter-out getopts, \
+ $(filter-out time, \
+ $(filter-out syntax, $(CRATES)))))))))))))
COMPILER_DOC_CRATES := rustc rustc_trans rustc_borrowck rustc_resolve \
rustc_typeck rustc_driver syntax
$$(RSINPUTS_$(1)) \
$$(RUSTDOC_EXE) \
$$(foreach dep,$$(RUST_DEPS_$(1)), \
- $$(TLIB2_T_$(CFG_BUILD)_H_$(CFG_BUILD))/stamp.$$(dep) \
+ $$(TLIB2_T_$(CFG_BUILD)_H_$(CFG_BUILD))/stamp.$$(dep)) \
+ $$(foreach dep,$$(filter $$(DOC_CRATES), $$(RUST_DEPS_$(1))), \
doc/$$(dep)/)
else
LIB_DOC_DEP_$(1) = $$(CRATEFILE_$(1)) $$(RSINPUTS_$(1))
AR="$$(AR_$(1))" \
RANLIB="$$(AR_$(1)) s" \
CPPFLAGS="-I $(S)src/rt/" \
- EXTRA_CFLAGS="-g1"
+ EXTRA_CFLAGS="-g1 -ffunction-sections -fdata-sections"
$$(Q)$$(MAKE) -C "$$(JEMALLOC_BUILD_DIR_$(1))" build_lib_static
ifeq ($$(CFG_DISABLE_JEMALLOC),)
re: &Regex) -> Option<(WhichLine, ExpectedError)> {
re.captures(line).and_then(|caps| {
let adjusts = caps.name("adjusts").unwrap_or("").len();
- let kind = caps.name("kind").unwrap_or("").to_ascii_lower();
+ let kind = caps.name("kind").unwrap_or("").to_ascii_lowercase();
let msg = caps.name("msg").unwrap_or("").trim().to_string();
let follow = caps.name("follow").unwrap_or("").len() > 0;
#[cfg(target_os = "windows")]
use util;
+#[cfg(target_os = "windows")]
+use std::ascii::AsciiExt;
use std::io::File;
use std::io::fs::PathExtensions;
use std::io::fs;
format!("{}:{}:", testfile.display(), ee.line)
}).collect::<Vec<String> >();
- #[cfg(target_os = "windows")]
- fn to_lower( s : &str ) -> String {
- let i = s.chars();
- let c : Vec<char> = i.map( |c| {
- if c.is_ascii() {
- c.to_ascii().to_lowercase().as_char()
- } else {
- c
- }
- } ).collect();
- String::from_chars(c.as_slice())
- }
-
#[cfg(windows)]
fn prefix_matches( line : &str, prefix : &str ) -> bool {
- to_lower(line).as_slice().starts_with(to_lower(prefix).as_slice())
+ line.to_ascii_lowercase().starts_with(prefix.to_ascii_lowercase().as_slice())
}
#[cfg(unix)]
let mut m = [1i, 2i, 3i]; // mut m: [int, ..3]
```
-You can create an array with a given number of elements, all initialized to the
-same value, with `[val, ..N]` syntax. The compiler ensures that arrays are
-always initialized.
-
There's a shorthand for initializing each element of an array to the same
value. In this example, each element of `a` will be initialized to `0i`:
Cargo gets this information from your environment. If it's not correct, go ahead
and fix that.
-Finally, Cargo generated a hello, world for us. Check out `src/main.rs`:
+Finally, Cargo generated a "Hello, world!" for us. Check out `src/main.rs`:
```{rust}
fn main() {
for i in range(0u, 3u) {
let number = numbers.clone();
Thread::spawn(move || {
- let mut array = number.lock();
+ let mut array = number.lock().unwrap();
(*array)[i] += 1;
* `stringify!` : pretty-print the Rust expression given as an argument
* `include!` : include the Rust expression in the given file
* `include_str!` : include the contents of the given file as a string
-* `include_bin!` : include the contents of the given file as a binary blob
+* `include_bytes!` : include the contents of the given file as a binary blob
* `error!`, `warn!`, `info!`, `debug!` : provide diagnostic information.
All of the above extensions are expressions with values.
done
}
+create_tmp_dir() {
+ local TMP_DIR=./rustup-tmp-install
+
+ rm -Rf "${TMP_DIR}"
+ need_ok "failed to remove temporary installation directory"
+
+ mkdir -p "${TMP_DIR}"
+ need_ok "failed to create create temporary installation directory"
+
+ echo $TMP_DIR
+}
+
probe_need CFG_CURL curl
probe_need CFG_TAR tar
probe_need CFG_FILE file
CFG_INSTALL_FLAGS="${CFG_INSTALL_FLAGS} --prefix=${CFG_PREFIX}"
fi
-CFG_TMP_DIR="./rustup-tmp-install"
+CFG_TMP_DIR=$(mktemp -d 2>/dev/null \
+ || mktemp -d -t 'rustup-tmp-install' 2>/dev/null \
+ || create_tmp_dir)
RUST_URL="https://static.rust-lang.org/dist"
RUST_PACKAGE_NAME=rust-nightly
msg "Downloading ${remote_tarball} to ${local_tarball}"
- mkdir -p "${CFG_TMP_DIR}"
- need_ok "failed to create create download directory"
-
"${CFG_CURL}" -f -o "${local_tarball}" "${remote_tarball}"
if [ $? -ne 0 ]
then
"macos": ["bin/rustc"],
"winnt": ["bin/rustc.exe"],
"freebsd": ["bin/rustc"],
+ "dragonfly": ["bin/rustc"],
}
winnt_runtime_deps_32 = ["libgcc_s_dw2-1.dll",
return "macos"
if os_name == "freebsd":
return "freebsd"
+ if os_name == "dragonfly":
+ return "dragonfly"
return "linux"
def get_cpu(triple):
//! let five = five.clone();
//!
//! Thread::spawn(move || {
-//! let mut number = five.lock();
+//! let mut number = five.lock().unwrap();
//!
//! *number += 1;
//!
use core::kinds::{Sync, Send};
use core::mem::{min_align_of, size_of, drop};
use core::mem;
+use core::nonzero::NonZero;
use core::ops::{Drop, Deref};
use core::option::Option;
use core::option::Option::{Some, None};
-use core::ptr::RawPtr;
-use core::ptr;
+use core::ptr::{mod, PtrExt};
use heap::deallocate;
/// An atomically reference counted wrapper for shared state.
pub struct Arc<T> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
- _ptr: *mut ArcInner<T>,
+ _ptr: NonZero<*mut ArcInner<T>>,
}
+unsafe impl<T: Sync + Send> Send for Arc<T> { }
+unsafe impl<T: Sync + Send> Sync for Arc<T> { }
+
+
/// A weak pointer to an `Arc`.
///
/// Weak pointers will not keep the data inside of the `Arc` alive, and can be used to break cycles
pub struct Weak<T> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
- _ptr: *mut ArcInner<T>,
+ _ptr: NonZero<*mut ArcInner<T>>,
}
+unsafe impl<T: Sync + Send> Send for Weak<T> { }
+unsafe impl<T: Sync + Send> Sync for Weak<T> { }
+
struct ArcInner<T> {
strong: atomic::AtomicUint,
weak: atomic::AtomicUint,
data: T,
}
-impl<T: Sync + Send> Arc<T> {
+unsafe impl<T: Sync + Send> Send for ArcInner<T> {}
+unsafe impl<T: Sync + Send> Sync for ArcInner<T> {}
+
+impl<T> Arc<T> {
/// Constructs a new `Arc<T>`.
///
/// # Examples
weak: atomic::AtomicUint::new(1),
data: data,
};
- Arc { _ptr: unsafe { mem::transmute(x) } }
+ Arc { _ptr: unsafe { NonZero::new(mem::transmute(x)) } }
}
/// Downgrades the `Arc<T>` to a `Weak<T>` reference.
// pointer is valid. Furthermore, we know that the `ArcInner` structure itself is `Sync`
// because the inner data is `Sync` as well, so we're ok loaning out an immutable pointer
// to these contents.
- unsafe { &*self._ptr }
+ unsafe { &**self._ptr }
}
}
// pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at
// this point, and we required the Arc itself to be `mut`, so we're returning the only
// possible reference to the inner data.
- let inner = unsafe { &mut *self._ptr };
+ let inner = unsafe { &mut **self._ptr };
&mut inner.data
}
}
fn drop(&mut self) {
// This structure has #[unsafe_no_drop_flag], so this drop glue may run more than once (but
// it is guaranteed to be zeroed after the first if it's run more than once)
- if self._ptr.is_null() { return }
+ let ptr = *self._ptr;
+ if ptr.is_null() { return }
// Because `fetch_sub` is already atomic, we do not need to synchronize with other threads
// unless we are going to delete the object. This same logic applies to the below
if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
atomic::fence(atomic::Acquire);
- unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
+ unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(),
min_align_of::<ArcInner<T>>()) }
}
}
#[inline]
fn inner(&self) -> &ArcInner<T> {
// See comments above for why this is "safe"
- unsafe { &*self._ptr }
+ unsafe { &**self._ptr }
}
}
/// } // implicit drop
/// ```
fn drop(&mut self) {
+ let ptr = *self._ptr;
+
// see comments above for why this check is here
- if self._ptr.is_null() { return }
+ if ptr.is_null() { return }
// If we find out that we were the last weak pointer, then its time to deallocate the data
// entirely. See the discussion in Arc::drop() about the memory orderings
if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
atomic::fence(atomic::Acquire);
- unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
+ unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(),
min_align_of::<ArcInner<T>>()) }
}
}
use std::str::Str;
use std::sync::atomic;
use std::task;
+ use std::kinds::Send;
use std::vec::Vec;
use super::{Arc, Weak, weak_count, strong_count};
use std::sync::Mutex;
let a = Arc::new(Cycle { x: Mutex::new(None) });
let b = a.clone().downgrade();
- *a.x.lock() = Some(b);
+ *a.x.lock().unwrap() = Some(b);
// hopefully we don't double-free (or leak)...
}
use core::kinds::Sized;
use core::mem;
use core::option::Option;
+use core::ptr::Unique;
use core::raw::TraitObject;
use core::result::Result;
use core::result::Result::{Ok, Err};
/// A type that represents a uniquely-owned value.
#[lang = "owned_box"]
#[unstable = "custom allocators will add an additional type parameter (with default)"]
-pub struct Box<T>(*mut T);
+pub struct Box<T>(Unique<T>);
#[stable]
impl<T: Default> Default for Box<T> {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use core::ptr::RawPtr;
+use core::ptr::PtrExt;
// FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias`
mod test {
extern crate test;
use self::test::Bencher;
- use core::ptr::RawPtr;
+ use core::ptr::PtrExt;
use heap;
#[test]
use core::hash::{mod, Hash};
use core::kinds::marker;
use core::mem::{transmute, min_align_of, size_of, forget};
+use core::nonzero::NonZero;
use core::ops::{Deref, Drop};
use core::option::Option;
use core::option::Option::{Some, None};
-use core::ptr;
-use core::ptr::RawPtr;
+use core::ptr::{mod, PtrExt};
use core::result::Result;
use core::result::Result::{Ok, Err};
pub struct Rc<T> {
// FIXME #12808: strange names to try to avoid interfering with field accesses of the contained
// type via Deref
- _ptr: *mut RcBox<T>,
+ _ptr: NonZero<*mut RcBox<T>>,
_nosend: marker::NoSend,
_noshare: marker::NoSync
}
// there is an implicit weak pointer owned by all the strong pointers, which
// ensures that the weak destructor never frees the allocation while the strong
// destructor is running, even if the weak pointer is stored inside the strong one.
- _ptr: transmute(box RcBox {
+ _ptr: NonZero::new(transmute(box RcBox {
value: value,
strong: Cell::new(1),
weak: Cell::new(1)
- }),
+ })),
_nosend: marker::NoSend,
_noshare: marker::NoSync
}
let val = ptr::read(&*rc); // copy the contained object
// destruct the box and skip our Drop
// we can ignore the refcounts because we know we're unique
- deallocate(rc._ptr as *mut u8, size_of::<RcBox<T>>(),
+ deallocate(*rc._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>());
forget(rc);
Ok(val)
#[experimental]
pub fn get_mut<'a, T>(rc: &'a mut Rc<T>) -> Option<&'a mut T> {
if is_unique(rc) {
- let inner = unsafe { &mut *rc._ptr };
+ let inner = unsafe { &mut **rc._ptr };
Some(&mut inner.value)
} else {
None
// pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at
// this point, and we required the `Rc<T>` itself to be `mut`, so we're returning the only
// possible reference to the inner value.
- let inner = unsafe { &mut *self._ptr };
+ let inner = unsafe { &mut **self._ptr };
&mut inner.value
}
}
/// ```
fn drop(&mut self) {
unsafe {
- if !self._ptr.is_null() {
+ let ptr = *self._ptr;
+ if !ptr.is_null() {
self.dec_strong();
if self.strong() == 0 {
ptr::read(&**self); // destroy the contained object
self.dec_weak();
if self.weak() == 0 {
- deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
+ deallocate(ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
pub struct Weak<T> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
- _ptr: *mut RcBox<T>,
+ _ptr: NonZero<*mut RcBox<T>>,
_nosend: marker::NoSend,
_noshare: marker::NoSync
}
/// ```
fn drop(&mut self) {
unsafe {
- if !self._ptr.is_null() {
+ let ptr = *self._ptr;
+ if !ptr.is_null() {
self.dec_weak();
// the weak count starts at 1, and will only go to zero if all the strong pointers
// have disappeared.
if self.weak() == 0 {
- deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
+ deallocate(ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
impl<T> RcBoxPtr<T> for Rc<T> {
#[inline(always)]
- fn inner(&self) -> &RcBox<T> { unsafe { &(*self._ptr) } }
+ fn inner(&self) -> &RcBox<T> { unsafe { &(**self._ptr) } }
}
impl<T> RcBoxPtr<T> for Weak<T> {
#[inline(always)]
- fn inner(&self) -> &RcBox<T> { unsafe { &(*self._ptr) } }
+ fn inner(&self) -> &RcBox<T> { unsafe { &(**self._ptr) } }
}
#[cfg(test)]
//! A priority queue implemented with a binary heap.
//!
//! Insertion and popping the largest element have `O(log n)` time complexity. Checking the largest
-//! element is `O(1)`. Converting a vector to a priority queue can be done in-place, and has `O(n)`
-//! complexity. A priority queue can also be converted to a sorted vector in-place, allowing it to
+//! element is `O(1)`. Converting a vector to a binary heap can be done in-place, and has `O(n)`
+//! complexity. A binary heap can also be converted to a sorted vector in-place, allowing it to
//! be used for an `O(n log n)` in-place heapsort.
//!
//! # Examples
//!
-//! This is a larger example which implements [Dijkstra's algorithm][dijkstra]
+//! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
//! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
-//! It showcases how to use the `BinaryHeap` with custom types.
+//! It shows how to use `BinaryHeap` with custom types.
//!
//! [dijkstra]: http://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
//! [sssp]: http://en.wikipedia.org/wiki/Shortest_path_problem
//! #[deriving(Copy, Eq, PartialEq)]
//! struct State {
//! cost: uint,
-//! position: uint
+//! position: uint,
//! }
//!
//! // The priority queue depends on `Ord`.
//! // Each node is represented as an `uint`, for a shorter implementation.
//! struct Edge {
//! node: uint,
-//! cost: uint
+//! cost: uint,
//! }
//!
//! // Dijkstra's shortest path algorithm.
//!
//! // Start at `start` and use `dist` to track the current shortest distance
-//! // to each node. This implementation isn't memory efficient as it may leave duplicate
+//! // to each node. This implementation isn't memory-efficient as it may leave duplicate
//! // nodes in the queue. It also uses `uint::MAX` as a sentinel value,
//! // for a simpler implementation.
//! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: uint, goal: uint) -> uint {
//! let mut heap = BinaryHeap::new();
//!
//! // We're at `start`, with a zero cost
-//! dist[start] = 0u;
-//! heap.push(State { cost: 0u, position: start });
+//! dist[start] = 0;
+//! heap.push(State { cost: 0, position: start });
//!
//! // Examine the frontier with lower cost nodes first (min-heap)
-//! loop {
-//! let State { cost, position } = match heap.pop() {
-//! None => break, // empty
-//! Some(s) => s
-//! };
-//!
+//! while let Some(State { cost, position }) = heap.pop() {
//! // Alternatively we could have continued to find all shortest paths
-//! if position == goal { return cost }
+//! if position == goal { return cost; }
//!
//! // Important as we may have already found a better way
-//! if cost > dist[position] { continue }
+//! if cost > dist[position] { continue; }
//!
//! // For each node we can reach, see if we can find a way with
//! // a lower cost going through this node
//! fn main() {
//! // This is the directed graph we're going to use.
//! // The node numbers correspond to the different states,
-//! // and the edge weights symbolises the cost of moving
+//! // and the edge weights symbolize the cost of moving
//! // from one node to another.
//! // Note that the edges are one-way.
//! //
//! //
//! // The graph is represented as an adjacency list where each index,
//! // corresponding to a node value, has a list of outgoing edges.
-//! // Chosen for it's efficiency.
+//! // Chosen for its efficiency.
//! let graph = vec![
//! // Node 0
//! vec![Edge { node: 2, cost: 10 },
///
/// ```
/// use std::collections::BinaryHeap;
- /// let heap: BinaryHeap<uint> = BinaryHeap::new();
+ /// let mut heap = BinaryHeap::new();
+ /// heap.push(4u);
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
- pub fn new() -> BinaryHeap<T> { BinaryHeap{data: vec!(),} }
+ pub fn new() -> BinaryHeap<T> { BinaryHeap { data: vec![] } }
/// Creates an empty `BinaryHeap` with a specific capacity.
/// This preallocates enough memory for `capacity` elements,
///
/// ```
/// use std::collections::BinaryHeap;
- /// let heap: BinaryHeap<uint> = BinaryHeap::with_capacity(10u);
+ /// let mut heap = BinaryHeap::with_capacity(10);
+ /// heap.push(4u);
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn with_capacity(capacity: uint) -> BinaryHeap<T> {
/// use std::collections::BinaryHeap;
/// let heap = BinaryHeap::from_vec(vec![9i, 1, 2, 7, 3, 2]);
/// ```
- pub fn from_vec(xs: Vec<T>) -> BinaryHeap<T> {
- let mut q = BinaryHeap{data: xs,};
- let mut n = q.len() / 2;
+ pub fn from_vec(vec: Vec<T>) -> BinaryHeap<T> {
+ let mut heap = BinaryHeap { data: vec };
+ let mut n = heap.len() / 2;
while n > 0 {
n -= 1;
- q.siftdown(n)
+ heap.sift_down(n);
}
- q
+ heap
}
- /// An iterator visiting all values in underlying vector, in
+ /// Returns an iterator visiting all values in the underlying vector, in
/// arbitrary order.
///
/// # Examples
}
/// Creates a consuming iterator, that is, one that moves each value out of
- /// the binary heap in arbitrary order. The binary heap cannot be used
+ /// the binary heap in arbitrary order. The binary heap cannot be used
/// after calling this.
///
/// # Examples
///
/// ```
/// use std::collections::BinaryHeap;
- /// let pq = BinaryHeap::from_vec(vec![1i, 2, 3, 4]);
+ /// let heap = BinaryHeap::from_vec(vec![1i, 2, 3, 4]);
///
/// // Print 1, 2, 3, 4 in arbitrary order
- /// for x in pq.into_iter() {
+ /// for x in heap.into_iter() {
/// // x has type int, not &int
/// println!("{}", x);
/// }
IntoIter { iter: self.data.into_iter() }
}
- /// Returns the greatest item in a queue, or `None` if it is empty.
+ /// Returns the greatest item in the binary heap, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// use std::collections::BinaryHeap;
- ///
/// let mut heap = BinaryHeap::new();
/// assert_eq!(heap.peek(), None);
///
/// heap.push(1i);
- /// heap.push(5i);
- /// heap.push(2i);
- /// assert_eq!(heap.peek(), Some(&5i));
+ /// heap.push(5);
+ /// heap.push(2);
+ /// assert_eq!(heap.peek(), Some(&5));
///
/// ```
#[stable]
self.data.get(0)
}
- /// Returns the number of elements the queue can hold without reallocating.
+ /// Returns the number of elements the binary heap can hold without reallocating.
///
/// # Examples
///
/// ```
/// use std::collections::BinaryHeap;
- ///
- /// let heap: BinaryHeap<uint> = BinaryHeap::with_capacity(100u);
- /// assert!(heap.capacity() >= 100u);
+ /// let mut heap = BinaryHeap::with_capacity(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4u);
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn capacity(&self) -> uint { self.data.capacity() }
///
/// ```
/// use std::collections::BinaryHeap;
- ///
- /// let mut heap: BinaryHeap<uint> = BinaryHeap::new();
- /// heap.reserve_exact(100u);
- /// assert!(heap.capacity() >= 100u);
+ /// let mut heap = BinaryHeap::new();
+ /// heap.reserve_exact(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4u);
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
- pub fn reserve_exact(&mut self, additional: uint) { self.data.reserve_exact(additional) }
+ pub fn reserve_exact(&mut self, additional: uint) {
+ self.data.reserve_exact(additional);
+ }
/// Reserves capacity for at least `additional` more elements to be inserted in the
/// `BinaryHeap`. The collection may reserve more space to avoid frequent reallocations.
///
/// ```
/// use std::collections::BinaryHeap;
- ///
- /// let mut heap: BinaryHeap<uint> = BinaryHeap::new();
- /// heap.reserve(100u);
- /// assert!(heap.capacity() >= 100u);
+ /// let mut heap = BinaryHeap::new();
+ /// heap.reserve(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4u);
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn reserve(&mut self, additional: uint) {
- self.data.reserve(additional)
+ self.data.reserve(additional);
}
/// Discards as much additional capacity as possible.
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn shrink_to_fit(&mut self) {
- self.data.shrink_to_fit()
+ self.data.shrink_to_fit();
}
- /// Removes the greatest item from a queue and returns it, or `None` if it
+ /// Removes the greatest item from the binary heap and returns it, or `None` if it
/// is empty.
///
/// # Examples
///
/// ```
/// use std::collections::BinaryHeap;
- ///
/// let mut heap = BinaryHeap::from_vec(vec![1i, 3]);
///
- /// assert_eq!(heap.pop(), Some(3i));
- /// assert_eq!(heap.pop(), Some(1i));
+ /// assert_eq!(heap.pop(), Some(3));
+ /// assert_eq!(heap.pop(), Some(1));
/// assert_eq!(heap.pop(), None);
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn pop(&mut self) -> Option<T> {
- match self.data.pop() {
- None => { None }
- Some(mut item) => {
- if !self.is_empty() {
- swap(&mut item, &mut self.data[0]);
- self.siftdown(0);
- }
- Some(item)
+ self.data.pop().map(|mut item| {
+ if !self.is_empty() {
+ swap(&mut item, &mut self.data[0]);
+ self.sift_down(0);
}
- }
+ item
+ })
}
- /// Pushes an item onto the queue.
+ /// Pushes an item onto the binary heap.
///
/// # Examples
///
/// ```
/// use std::collections::BinaryHeap;
- ///
/// let mut heap = BinaryHeap::new();
/// heap.push(3i);
- /// heap.push(5i);
- /// heap.push(1i);
+ /// heap.push(5);
+ /// heap.push(1);
///
/// assert_eq!(heap.len(), 3);
- /// assert_eq!(heap.peek(), Some(&5i));
+ /// assert_eq!(heap.peek(), Some(&5));
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn push(&mut self, item: T) {
let old_len = self.len();
self.data.push(item);
- self.siftup(0, old_len);
+ self.sift_up(0, old_len);
}
- /// Pushes an item onto a queue then pops the greatest item off the queue in
+ /// Pushes an item onto the binary heap, then pops the greatest item off the queue in
/// an optimized fashion.
///
/// # Examples
///
/// ```
/// use std::collections::BinaryHeap;
- ///
/// let mut heap = BinaryHeap::new();
/// heap.push(1i);
- /// heap.push(5i);
+ /// heap.push(5);
///
- /// assert_eq!(heap.push_pop(3i), 5);
- /// assert_eq!(heap.push_pop(9i), 9);
+ /// assert_eq!(heap.push_pop(3), 5);
+ /// assert_eq!(heap.push_pop(9), 9);
/// assert_eq!(heap.len(), 2);
- /// assert_eq!(heap.peek(), Some(&3i));
+ /// assert_eq!(heap.peek(), Some(&3));
/// ```
pub fn push_pop(&mut self, mut item: T) -> T {
match self.data.get_mut(0) {
},
}
- self.siftdown(0);
+ self.sift_down(0);
item
}
- /// Pops the greatest item off a queue then pushes an item onto the queue in
- /// an optimized fashion. The push is done regardless of whether the queue
+ /// Pops the greatest item off the binary heap, then pushes an item onto the queue in
+ /// an optimized fashion. The push is done regardless of whether the binary heap
/// was empty.
///
/// # Examples
///
/// ```
/// use std::collections::BinaryHeap;
- ///
/// let mut heap = BinaryHeap::new();
///
/// assert_eq!(heap.replace(1i), None);
- /// assert_eq!(heap.replace(3i), Some(1i));
+ /// assert_eq!(heap.replace(3), Some(1));
/// assert_eq!(heap.len(), 1);
- /// assert_eq!(heap.peek(), Some(&3i));
+ /// assert_eq!(heap.peek(), Some(&3));
/// ```
pub fn replace(&mut self, mut item: T) -> Option<T> {
if !self.is_empty() {
swap(&mut item, &mut self.data[0]);
- self.siftdown(0);
+ self.sift_down(0);
Some(item)
} else {
self.push(item);
///
/// ```
/// use std::collections::BinaryHeap;
- ///
/// let heap = BinaryHeap::from_vec(vec![1i, 2, 3, 4, 5, 6, 7]);
/// let vec = heap.into_vec();
///
while end > 1 {
end -= 1;
self.data.swap(0, end);
- self.siftdown_range(0, end)
+ self.sift_down_range(0, end);
}
self.into_vec()
}
- // The implementations of siftup and siftdown use unsafe blocks in
+ // The implementations of sift_up and sift_down use unsafe blocks in
// order to move an element out of the vector (leaving behind a
// zeroed element), shift along the others and move it back into the
- // vector over the junk element. This reduces the constant factor
+ // vector over the junk element. This reduces the constant factor
// compared to using swaps, which involves twice as many moves.
- fn siftup(&mut self, start: uint, mut pos: uint) {
+ fn sift_up(&mut self, start: uint, mut pos: uint) {
unsafe {
let new = replace(&mut self.data[pos], zeroed());
while pos > start {
let parent = (pos - 1) >> 1;
- if new > self.data[parent] {
- let x = replace(&mut self.data[parent], zeroed());
- ptr::write(&mut self.data[pos], x);
- pos = parent;
- continue
- }
- break
+
+ if new <= self.data[parent] { break; }
+
+ let x = replace(&mut self.data[parent], zeroed());
+ ptr::write(&mut self.data[pos], x);
+ pos = parent;
}
ptr::write(&mut self.data[pos], new);
}
}
- fn siftdown_range(&mut self, mut pos: uint, end: uint) {
+ fn sift_down_range(&mut self, mut pos: uint, end: uint) {
unsafe {
let start = pos;
let new = replace(&mut self.data[pos], zeroed());
}
ptr::write(&mut self.data[pos], new);
- self.siftup(start, pos);
+ self.sift_up(start, pos);
}
}
- fn siftdown(&mut self, pos: uint) {
+ fn sift_down(&mut self, pos: uint) {
let len = self.len();
- self.siftdown_range(pos, len);
+ self.sift_down_range(pos, len);
}
- /// Returns the length of the queue.
+ /// Returns the length of the binary heap.
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn len(&self) -> uint { self.data.len() }
- /// Returns true if the queue contains no elements
+ /// Checks if the binary heap is empty.
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn is_empty(&self) -> bool { self.len() == 0 }
- /// Clears the queue, returning an iterator over the removed elements.
+ /// Clears the binary heap, returning an iterator over the removed elements.
#[inline]
#[unstable = "matches collection reform specification, waiting for dust to settle"]
- pub fn drain<'a>(&'a mut self) -> Drain<'a, T> {
- Drain {
- iter: self.data.drain(),
- }
+ pub fn drain(&mut self) -> Drain<T> {
+ Drain { iter: self.data.drain() }
}
- /// Drops all items from the queue.
+ /// Drops all items from the binary heap.
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn clear(&mut self) { self.drain(); }
}
}
impl<T> Copy for Rawlink<T> {}
+unsafe impl<T:'static+Send> Send for Rawlink<T> {}
+unsafe impl<T:Send+Sync> Sync for Rawlink<T> {}
struct Node<T> {
next: Link<T>,
/// Convert the `Rawlink` into an Option value
fn resolve_immut<'a>(&self) -> Option<&'a T> {
unsafe {
- self.p.as_ref()
+ mem::transmute(self.p.as_ref())
}
}
impl<'a, A> ExactSizeIterator<&'a mut A> for IterMut<'a, A> {}
/// Allows mutating a `DList` while iterating.
+#[deprecated = "Trait is deprecated, use inherent methods on the iterator instead"]
pub trait ListInsertion<A> {
/// Inserts `elt` just after to the element most recently returned by
/// `.next()`
}
}
-impl<'a, A> ListInsertion<A> for IterMut<'a, A> {
+impl<'a, A> IterMut<'a, A> {
+ /// Inserts `elt` just after the element most recently returned by `.next()`.
+ /// The inserted element does not appear in the iteration.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::collections::DList;
+ ///
+ /// let mut list: DList<int> = vec![1, 3, 4].into_iter().collect();
+ ///
+ /// {
+ /// let mut it = list.iter_mut();
+ /// assert_eq!(it.next().unwrap(), &1);
+ /// // insert `2` after `1`
+ /// it.insert_next(2);
+ /// }
+ /// {
+ /// let vec: Vec<int> = list.into_iter().collect();
+ /// assert_eq!(vec, vec![1i, 2, 3, 4]);
+ /// }
+ /// ```
#[inline]
- fn insert_next(&mut self, elt: A) {
+ pub fn insert_next(&mut self, elt: A) {
self.insert_next_node(box Node::new(elt))
}
+ /// Provides a reference to the next element, without changing the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::collections::DList;
+ ///
+ /// let mut list: DList<int> = vec![1, 2, 3].into_iter().collect();
+ ///
+ /// let mut it = list.iter_mut();
+ /// assert_eq!(it.next().unwrap(), &1);
+ /// assert_eq!(it.peek_next().unwrap(), &2);
+ /// // We just peeked at 2, so it was not consumed from the iterator.
+ /// assert_eq!(it.next().unwrap(), &2);
+ /// ```
#[inline]
- fn peek_next(&mut self) -> Option<&mut A> {
+ pub fn peek_next(&mut self) -> Option<&mut A> {
if self.nelem == 0 {
return None
}
use test::Bencher;
use test;
- use super::{DList, Node, ListInsertion};
+ use super::{DList, Node};
pub fn check_links<T>(list: &DList<T>) {
let mut len = 0u;
if contiguous {
let (empty, buf) = buf.split_at_mut(0);
- (buf[mut tail..head], empty)
+ (buf.slice_mut(tail, head), empty)
} else {
let (mid, right) = buf.split_at_mut(tail);
let (left, _) = mid.split_at_mut(head);
use core::kinds::Sized;
use core::mem::size_of;
use core::mem;
-use core::ops::FnMut;
+use core::ops::{FnMut,SliceMut};
use core::prelude::{Clone, Greater, Iterator, IteratorExt, Less, None, Option};
-use core::prelude::{Ord, Ordering, RawPtr, Some, range};
+use core::prelude::{Ord, Ordering, PtrExt, Some, range};
use core::ptr;
use core::slice as core_slice;
use self::Direction::*;
#[inline]
fn move_from(&mut self, mut src: Vec<T>, start: uint, end: uint) -> uint {
- for (a, b) in self.iter_mut().zip(src[mut start..end].iter_mut()) {
+ for (a, b) in self.iter_mut().zip(src.slice_mut(start, end).iter_mut()) {
mem::swap(a, b);
}
cmp::min(self.len(), end-start)
#[unstable = "trait is unstable"]
impl<T> BorrowFromMut<Vec<T>> for [T] {
- fn borrow_from_mut(owned: &mut Vec<T>) -> &mut [T] { owned[mut] }
+ fn borrow_from_mut(owned: &mut Vec<T>) -> &mut [T] { owned.as_mut_slice_() }
}
#[unstable = "trait is unstable"]
assert!(a == [7i,2,3,4]);
let mut a = [1i,2,3,4,5];
let b = vec![5i,6,7,8,9,0];
- assert_eq!(a[mut 2..4].move_from(b,1,6), 2);
+ assert_eq!(a.slice_mut(2, 4).move_from(b,1,6), 2);
assert!(a == [1i,2,6,7,5]);
}
#[test]
fn test_reverse_part() {
let mut values = [1i,2,3,4,5];
- values[mut 1..4].reverse();
+ values.slice_mut(1, 4).reverse();
assert!(values == [1,4,3,2,5]);
}
fn test_bytes_set_memory() {
use slice::bytes::MutableByteVector;
let mut values = [1u8,2,3,4,5];
- values[mut 0..5].set_memory(0xAB);
+ values.slice_mut(0, 5).set_memory(0xAB);
assert!(values == [0xAB, 0xAB, 0xAB, 0xAB, 0xAB]);
- values[mut 2..4].set_memory(0xFF);
+ values.slice_mut(2, 4).set_memory(0xFF);
assert!(values == [0xAB, 0xAB, 0xFF, 0xFF, 0xAB]);
}
pub use core::str::{from_utf8, CharEq, Chars, CharIndices};
pub use core::str::{Bytes, CharSplits, is_utf8};
-pub use core::str::{CharSplitsN, Lines, LinesAny, MatchIndices, StrSplits};
+pub use core::str::{CharSplitsN, Lines, LinesAny, MatchIndices, StrSplits, SplitStr};
pub use core::str::{CharRange};
pub use core::str::{FromStr, from_str, Utf8Error};
pub use core::str::Str;
pub use core::str::{from_utf8_unchecked, from_c_str};
pub use unicode::str::{Words, Graphemes, GraphemeIndices};
+pub use core::str::{Split, SplitTerminator};
+pub use core::str::{SplitN, RSplitN};
// FIXME(conventions): ensure bit/char conventions are followed by str's API
/// // not found, so no change.
/// assert_eq!(s.replace("cookie monster", "little lamb"), s);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
+ #[stable]
fn replace(&self, from: &str, to: &str) -> String {
let mut result = String::new();
let mut last_end = 0;
}
}
- /// Returns true if one string contains another
+ /// Returns true if a string contains a string pattern.
///
/// # Arguments
///
- /// - needle - The string to look for
+ /// - pat - The string pattern to look for
///
/// # Example
///
/// ```rust
/// assert!("bananas".contains("nana"));
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn contains(&self, needle: &str) -> bool {
- core_str::StrExt::contains(self[], needle)
+ #[stable]
+ fn contains(&self, pat: &str) -> bool {
+ core_str::StrExt::contains(self[], pat)
}
- /// Returns true if a string contains a char.
+ /// Returns true if a string contains a char pattern.
///
/// # Arguments
///
- /// - needle - The char to look for
+ /// - pat - The char pattern to look for
///
/// # Example
///
/// ```rust
/// assert!("hello".contains_char('e'));
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn contains_char(&self, needle: char) -> bool {
- core_str::StrExt::contains_char(self[], needle)
+ #[unstable = "might get removed in favour of a more generic contains()"]
+ fn contains_char<P: CharEq>(&self, pat: P) -> bool {
+ core_str::StrExt::contains_char(self[], pat)
}
/// An iterator over the characters of `self`. Note, this iterates
}
/// An iterator over substrings of `self`, separated by characters
- /// matched by `sep`.
+ /// matched by the pattern `pat`.
///
/// # Example
///
/// let v: Vec<&str> = "".split('X').collect();
/// assert_eq!(v, vec![""]);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn split<Sep: CharEq>(&self, sep: Sep) -> CharSplits<Sep> {
- core_str::StrExt::split(self[], sep)
+ #[stable]
+ fn split<P: CharEq>(&self, pat: P) -> Split<P> {
+ core_str::StrExt::split(self[], pat)
}
/// An iterator over substrings of `self`, separated by characters
- /// matched by `sep`, restricted to splitting at most `count`
+ /// matched by the pattern `pat`, restricted to splitting at most `count`
/// times.
///
/// # Example
/// let v: Vec<&str> = "".splitn(1, 'X').collect();
/// assert_eq!(v, vec![""]);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn splitn<Sep: CharEq>(&self, count: uint, sep: Sep) -> CharSplitsN<Sep> {
- core_str::StrExt::splitn(self[], count, sep)
+ #[stable]
+ fn splitn<P: CharEq>(&self, count: uint, pat: P) -> SplitN<P> {
+ core_str::StrExt::splitn(self[], count, pat)
}
/// An iterator over substrings of `self`, separated by characters
- /// matched by `sep`.
+ /// matched by the pattern `pat`.
///
/// Equivalent to `split`, except that the trailing substring
/// is skipped if empty (terminator semantics).
/// let v: Vec<&str> = "lionXXtigerXleopard".split('X').rev().collect();
/// assert_eq!(v, vec!["leopard", "tiger", "", "lion"]);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn split_terminator<Sep: CharEq>(&self, sep: Sep) -> CharSplits<Sep> {
- core_str::StrExt::split_terminator(self[], sep)
+ #[unstable = "might get removed"]
+ fn split_terminator<P: CharEq>(&self, pat: P) -> SplitTerminator<P> {
+ core_str::StrExt::split_terminator(self[], pat)
}
/// An iterator over substrings of `self`, separated by characters
- /// matched by `sep`, starting from the end of the string.
+ /// matched by the pattern `pat`, starting from the end of the string.
/// Restricted to splitting at most `count` times.
///
/// # Example
/// let v: Vec<&str> = "lionXXtigerXleopard".rsplitn(2, 'X').collect();
/// assert_eq!(v, vec!["leopard", "tiger", "lionX"]);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn rsplitn<Sep: CharEq>(&self, count: uint, sep: Sep) -> CharSplitsN<Sep> {
- core_str::StrExt::rsplitn(self[], count, sep)
+ #[stable]
+ fn rsplitn<P: CharEq>(&self, count: uint, pat: P) -> RSplitN<P> {
+ core_str::StrExt::rsplitn(self[], count, pat)
}
/// An iterator over the start and end indices of the disjoint
- /// matches of `sep` within `self`.
+ /// matches of the pattern `pat` within `self`.
///
/// That is, each returned value `(start, end)` satisfies
/// `self.slice(start, end) == sep`. For matches of `sep` within
/// let v: Vec<(uint, uint)> = "ababa".match_indices("aba").collect();
/// assert_eq!(v, vec![(0, 3)]); // only the first `aba`
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a> {
- core_str::StrExt::match_indices(self[], sep)
+ #[unstable = "might have its iterator type changed"]
+ fn match_indices<'a>(&'a self, pat: &'a str) -> MatchIndices<'a> {
+ core_str::StrExt::match_indices(self[], pat)
}
- /// An iterator over the substrings of `self` separated by `sep`.
+ /// An iterator over the substrings of `self` separated by the pattern `sep`.
///
/// # Example
///
/// let v: Vec<&str> = "1abcabc2".split_str("abc").collect();
/// assert_eq!(v, vec!["1", "", "2"]);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn split_str<'a>(&'a self, s: &'a str) -> StrSplits<'a> {
- core_str::StrExt::split_str(self[], s)
+ #[unstable = "might get removed in the future in favor of a more generic split()"]
+ fn split_str<'a>(&'a self, pat: &'a str) -> StrSplits<'a> {
+ core_str::StrExt::split_str(self[], pat)
}
/// An iterator over the lines of a string (subsequences separated
core_str::StrExt::slice_unchecked(self[], begin, end)
}
- /// Returns true if `needle` is a prefix of the string.
+ /// Returns true if the pattern `pat` is a prefix of the string.
///
/// # Example
///
/// ```rust
/// assert!("banana".starts_with("ba"));
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn starts_with(&self, needle: &str) -> bool {
- core_str::StrExt::starts_with(self[], needle)
+ #[stable]
+ fn starts_with(&self, pat: &str) -> bool {
+ core_str::StrExt::starts_with(self[], pat)
}
- /// Returns true if `needle` is a suffix of the string.
+ /// Returns true if the pattern `pat` is a suffix of the string.
///
/// # Example
///
/// ```rust
/// assert!("banana".ends_with("nana"));
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn ends_with(&self, needle: &str) -> bool {
- core_str::StrExt::ends_with(self[], needle)
+ #[stable]
+ fn ends_with(&self, pat: &str) -> bool {
+ core_str::StrExt::ends_with(self[], pat)
}
- /// Returns a string with characters that match `to_trim` removed from the left and the right.
+ /// Returns a string with all pre- and suffixes that match
+ /// the pattern `pat` repeatedly removed.
///
/// # Arguments
///
- /// * to_trim - a character matcher
+ /// * pat - a string pattern
///
/// # Example
///
/// ```rust
- /// assert_eq!("11foo1bar11".trim_chars('1'), "foo1bar");
+ /// assert_eq!("11foo1bar11".trim_matches('1'), "foo1bar");
/// let x: &[_] = &['1', '2'];
- /// assert_eq!("12foo1bar12".trim_chars(x), "foo1bar");
- /// assert_eq!("123foo1bar123".trim_chars(|&: c: char| c.is_numeric()), "foo1bar");
+ /// assert_eq!("12foo1bar12".trim_matches(x), "foo1bar");
+ /// assert_eq!("123foo1bar123".trim_matches(|&: c: char| c.is_numeric()), "foo1bar");
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn trim_chars<C: CharEq>(&self, to_trim: C) -> &str {
- core_str::StrExt::trim_chars(self[], to_trim)
+ #[stable]
+ fn trim_matches<P: CharEq>(&self, pat: P) -> &str {
+ core_str::StrExt::trim_matches(self[], pat)
}
- /// Returns a string with leading `chars_to_trim` removed.
+ /// Deprecated
+ #[deprecated = "Replaced by `trim_matches`"]
+ fn trim_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str {
+ self.trim_matches(to_trim)
+ }
+
+ /// Returns a string with all prefixes that match
+ /// the pattern `pat` repeatedly removed.
///
/// # Arguments
///
- /// * to_trim - a character matcher
+ /// * pat - a string pattern
///
/// # Example
///
/// ```rust
- /// assert_eq!("11foo1bar11".trim_left_chars('1'), "foo1bar11");
+ /// assert_eq!("11foo1bar11".trim_left_matches('1'), "foo1bar11");
/// let x: &[_] = &['1', '2'];
- /// assert_eq!("12foo1bar12".trim_left_chars(x), "foo1bar12");
- /// assert_eq!("123foo1bar123".trim_left_chars(|&: c: char| c.is_numeric()), "foo1bar123");
+ /// assert_eq!("12foo1bar12".trim_left_matches(x), "foo1bar12");
+ /// assert_eq!("123foo1bar123".trim_left_matches(|&: c: char| c.is_numeric()), "foo1bar123");
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn trim_left_chars<C: CharEq>(&self, to_trim: C) -> &str {
- core_str::StrExt::trim_left_chars(self[], to_trim)
+ #[stable]
+ fn trim_left_matches<P: CharEq>(&self, pat: P) -> &str {
+ core_str::StrExt::trim_left_matches(self[], pat)
}
- /// Returns a string with trailing `chars_to_trim` removed.
+ /// Deprecated
+ #[deprecated = "Replaced by `trim_left_matches`"]
+ fn trim_left_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str {
+ self.trim_left_matches(to_trim)
+ }
+
+ /// Returns a string with all suffixes that match
+ /// the pattern `pat` repeatedly removed.
///
/// # Arguments
///
- /// * to_trim - a character matcher
+ /// * pat - a string pattern
///
/// # Example
///
/// ```rust
- /// assert_eq!("11foo1bar11".trim_right_chars('1'), "11foo1bar");
+ /// assert_eq!("11foo1bar11".trim_right_matches('1'), "11foo1bar");
/// let x: &[_] = &['1', '2'];
- /// assert_eq!("12foo1bar12".trim_right_chars(x), "12foo1bar");
- /// assert_eq!("123foo1bar123".trim_right_chars(|&: c: char| c.is_numeric()), "123foo1bar");
+ /// assert_eq!("12foo1bar12".trim_right_matches(x), "12foo1bar");
+ /// assert_eq!("123foo1bar123".trim_right_matches(|&: c: char| c.is_numeric()), "123foo1bar");
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn trim_right_chars<C: CharEq>(&self, to_trim: C) -> &str {
- core_str::StrExt::trim_right_chars(self[], to_trim)
+ #[stable]
+ fn trim_right_matches<P: CharEq>(&self, pat: P) -> &str {
+ core_str::StrExt::trim_right_matches(self[], pat)
+ }
+
+ /// Deprecated
+ #[deprecated = "Replaced by `trim_right_matches`"]
+ fn trim_right_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str {
+ self.trim_right_matches(to_trim)
}
/// Check that `index`-th byte lies at the start and/or end of a
}
/// Returns the byte index of the first character of `self` that
- /// matches `search`.
+ /// matches the pattern `pat`.
///
/// # Return value
///
/// let x: &[_] = &['1', '2'];
/// assert_eq!(s.find(x), None);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn find<C: CharEq>(&self, search: C) -> Option<uint> {
- core_str::StrExt::find(self[], search)
+ #[stable]
+ fn find<P: CharEq>(&self, pat: P) -> Option<uint> {
+ core_str::StrExt::find(self[], pat)
}
/// Returns the byte index of the last character of `self` that
- /// matches `search`.
+ /// matches the pattern `pat`.
///
/// # Return value
///
/// let x: &[_] = &['1', '2'];
/// assert_eq!(s.rfind(x), None);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn rfind<C: CharEq>(&self, search: C) -> Option<uint> {
- core_str::StrExt::rfind(self[], search)
+ #[stable]
+ fn rfind<P: CharEq>(&self, pat: P) -> Option<uint> {
+ core_str::StrExt::rfind(self[], pat)
}
/// Returns the byte index of the first matching substring
/// assert_eq!(s.find_str("老虎 L"), Some(6));
/// assert_eq!(s.find_str("muffin man"), None);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
+ #[unstable = "might get removed in favor of a more generic find in the future"]
fn find_str(&self, needle: &str) -> Option<uint> {
core_str::StrExt::find_str(self[], needle)
}
/// assert!(string.subslice_offset(lines[1]) == 2); // &"b"
/// assert!(string.subslice_offset(lines[2]) == 4); // &"c"
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
+ #[unstable = "awaiting convention about comparability of arbitrary slices"]
fn subslice_offset(&self, inner: &str) -> uint {
core_str::StrExt::subslice_offset(self[], inner)
}
#[cfg(test)]
mod tests {
- use std::iter::AdditiveIterator;
- use std::iter::range;
- use std::default::Default;
- use std::char::Char;
- use std::clone::Clone;
- use std::cmp::{Ord, PartialOrd, Equiv};
- use std::cmp::Ordering::{Equal, Greater, Less};
- use std::option::Option::{mod, Some, None};
- use std::result::Result::{Ok, Err};
- use std::ptr::RawPtr;
- use std::iter::{Iterator, IteratorExt, DoubleEndedIteratorExt};
+ use prelude::*;
- use super::*;
+ use core::default::Default;
+ use core::iter::AdditiveIterator;
+ use super::{eq_slice, from_utf8, is_utf8, is_utf16, raw};
+ use super::truncate_utf16_at_nul;
use super::MaybeOwned::{Owned, Slice};
use std::slice::{AsSlice, SliceExt};
use string::{String, ToString};
#[cfg(test)]
mod bench {
+ use super::*;
use prelude::*;
use test::Bencher;
use test::black_box;
- use super::*;
#[bench]
fn char_iterator(b: &mut Bencher) {
}
impl<T: fmt::Show> ToString for T {
+ // NOTE(stage0): Remove cfg after a snapshot
+ #[cfg(not(stage0))]
+ fn to_string(&self) -> String {
+ let mut buf = Vec::<u8>::new();
+ let _ = fmt::write(&mut buf, format_args!("{}", *self));
+ String::from_utf8(buf).unwrap()
+ }
+ // NOTE(stage0): Remove method after a snapshot
+ #[cfg(stage0)]
fn to_string(&self) -> String {
let mut buf = Vec::<u8>::new();
let _ = format_args!(|args| fmt::write(&mut buf, args), "{}", self);
use core::iter::repeat;
use core::kinds::marker::{ContravariantLifetime, InvariantType};
use core::mem;
+use core::nonzero::NonZero;
use core::num::{Int, UnsignedInt};
use core::ops;
use core::ptr;
#[unsafe_no_drop_flag]
#[stable]
pub struct Vec<T> {
- ptr: *mut T,
+ ptr: NonZero<*mut T>,
len: uint,
cap: uint,
}
+unsafe impl<T: Send> Send for Vec<T> { }
+unsafe impl<T: Sync> Sync for Vec<T> { }
+
/// A clone-on-write vector
pub type CowVec<'a, T> = Cow<'a, Vec<T>, [T]>;
// non-null value which is fine since we never call deallocate on the ptr
// if cap is 0. The reason for this is because the pointer of a slice
// being NULL would break the null pointer optimization for enums.
- Vec { ptr: EMPTY as *mut T, len: 0, cap: 0 }
+ Vec { ptr: unsafe { NonZero::new(EMPTY as *mut T) }, len: 0, cap: 0 }
}
/// Constructs a new, empty `Vec<T>` with the specified capacity.
#[stable]
pub fn with_capacity(capacity: uint) -> Vec<T> {
if mem::size_of::<T>() == 0 {
- Vec { ptr: EMPTY as *mut T, len: 0, cap: uint::MAX }
+ Vec { ptr: unsafe { NonZero::new(EMPTY as *mut T) }, len: 0, cap: uint::MAX }
} else if capacity == 0 {
Vec::new()
} else {
.expect("capacity overflow");
let ptr = unsafe { allocate(size, mem::min_align_of::<T>()) };
if ptr.is_null() { ::alloc::oom() }
- Vec { ptr: ptr as *mut T, len: 0, cap: capacity }
+ Vec { ptr: unsafe { NonZero::new(ptr as *mut T) }, len: 0, cap: capacity }
}
}
#[unstable = "needs finalization"]
pub unsafe fn from_raw_parts(ptr: *mut T, length: uint,
capacity: uint) -> Vec<T> {
- Vec { ptr: ptr, len: length, cap: capacity }
+ Vec { ptr: NonZero::new(ptr), len: length, cap: capacity }
}
/// Creates a vector by copying the elements from a raw pointer.
if self.len == 0 {
if self.cap != 0 {
unsafe {
- dealloc(self.ptr, self.cap)
+ dealloc(*self.ptr, self.cap)
}
self.cap = 0;
}
unsafe {
// Overflow check is unnecessary as the vector is already at
// least this large.
- self.ptr = reallocate(self.ptr as *mut u8,
- self.cap * mem::size_of::<T>(),
- self.len * mem::size_of::<T>(),
- mem::min_align_of::<T>()) as *mut T;
- if self.ptr.is_null() { ::alloc::oom() }
+ let ptr = reallocate(*self.ptr as *mut u8,
+ self.cap * mem::size_of::<T>(),
+ self.len * mem::size_of::<T>(),
+ mem::min_align_of::<T>()) as *mut T;
+ if ptr.is_null() { ::alloc::oom() }
+ self.ptr = NonZero::new(ptr);
}
self.cap = self.len;
}
pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] {
unsafe {
mem::transmute(RawSlice {
- data: self.ptr as *const T,
+ data: *self.ptr as *const T,
len: self.len,
})
}
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn into_iter(self) -> IntoIter<T> {
unsafe {
- let ptr = self.ptr;
+ let ptr = *self.ptr;
let cap = self.cap;
- let begin = self.ptr as *const T;
+ let begin = ptr as *const T;
let end = if mem::size_of::<T>() == 0 {
(ptr as uint + self.len()) as *const T
} else {
let size = max(old_size, 2 * mem::size_of::<T>()) * 2;
if old_size > size { panic!("capacity overflow") }
unsafe {
- self.ptr = alloc_or_realloc(self.ptr, old_size, size);
- if self.ptr.is_null() { ::alloc::oom() }
+ let ptr = alloc_or_realloc(*self.ptr, old_size, size);
+ if ptr.is_null() { ::alloc::oom() }
+ self.ptr = NonZero::new(ptr);
}
self.cap = max(self.cap, 2) * 2;
}
unsafe {
- let end = (self.ptr as *const T).offset(self.len as int) as *mut T;
+ let end = (*self.ptr).offset(self.len as int);
ptr::write(&mut *end, value);
self.len += 1;
}
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn drain<'a>(&'a mut self) -> Drain<'a, T> {
unsafe {
- let begin = self.ptr as *const T;
+ let begin = *self.ptr as *const T;
let end = if mem::size_of::<T>() == 0 {
- (self.ptr as uint + self.len()) as *const T
+ (*self.ptr as uint + self.len()) as *const T
} else {
- self.ptr.offset(self.len() as int) as *const T
+ (*self.ptr).offset(self.len() as int) as *const T
};
self.set_len(0);
Drain {
let size = capacity.checked_mul(mem::size_of::<T>())
.expect("capacity overflow");
unsafe {
- self.ptr = alloc_or_realloc(self.ptr, self.cap * mem::size_of::<T>(), size);
- if self.ptr.is_null() { ::alloc::oom() }
+ let ptr = alloc_or_realloc(*self.ptr, self.cap * mem::size_of::<T>(), size);
+ if ptr.is_null() { ::alloc::oom() }
+ self.ptr = NonZero::new(ptr);
}
self.cap = capacity;
}
fn as_slice<'a>(&'a self) -> &'a [T] {
unsafe {
mem::transmute(RawSlice {
- data: self.ptr as *const T,
+ data: *self.ptr as *const T,
len: self.len
})
}
for x in self.iter() {
ptr::read(x);
}
- dealloc(self.ptr, self.cap)
+ dealloc(*self.ptr, self.cap)
}
}
}
for _x in self { }
let IntoIter { allocation, cap, ptr: _ptr, end: _end } = self;
mem::forget(self);
- Vec { ptr: allocation, cap: cap, len: 0 }
+ Vec { ptr: NonZero::new(allocation), cap: cap, len: 0 }
}
}
self.v.capacity()
}
+ /// Reserves capacity for the given `VecMap` to contain `len` distinct keys.
+ /// In the case of `VecMap` this means reallocations will not occur as long
+ /// as all inserted keys are less than `len`.
+ ///
+ /// The collection may reserve more space to avoid frequent reallocations.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecMap;
+ /// let mut map: VecMap<&str> = VecMap::new();
+ /// map.reserve_len(10);
+ /// assert!(map.capacity() >= 10);
+ /// ```
+ #[unstable = "matches collection reform specification, waiting for dust to settle"]
+ pub fn reserve_len(&mut self, len: uint) {
+ let cur_len = self.v.len();
+ if len >= cur_len {
+ self.v.reserve(len - cur_len);
+ }
+ }
+
+ /// Reserves the minimum capacity for the given `VecMap` to contain `len` distinct keys.
+ /// In the case of `VecMap` this means reallocations will not occur as long as all inserted
+ /// keys are less than `len`.
+ ///
+ /// Note that the allocator may give the collection more space than it requests.
+ /// Therefore capacity cannot be relied upon to be precisely minimal. Prefer
+ /// `reserve_len` if future insertions are expected.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecMap;
+ /// let mut map: VecMap<&str> = VecMap::new();
+ /// map.reserve_len_exact(10);
+ /// assert!(map.capacity() >= 10);
+ /// ```
+ #[unstable = "matches collection reform specification, waiting for dust to settle"]
+ pub fn reserve_len_exact(&mut self, len: uint) {
+ let cur_len = self.v.len();
+ if len >= cur_len {
+ self.v.reserve_exact(len - cur_len);
+ }
+ }
+
/// Returns an iterator visiting all keys in ascending order by the keys.
/// The iterator's element type is `uint`.
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub use self::Ordering::*;
+use kinds::Sync;
+
use intrinsics;
use cell::UnsafeCell;
v: UnsafeCell<uint>,
}
+unsafe impl Sync for AtomicBool {}
+
/// A signed integer type which can be safely shared between threads.
#[stable]
pub struct AtomicInt {
v: UnsafeCell<int>,
}
+unsafe impl Sync for AtomicInt {}
+
/// An unsigned integer type which can be safely shared between threads.
#[stable]
pub struct AtomicUint {
v: UnsafeCell<uint>,
}
+unsafe impl Sync for AtomicUint {}
+
/// A raw pointer type which can be safely shared between threads.
#[stable]
pub struct AtomicPtr<T> {
p: UnsafeCell<uint>,
}
+unsafe impl<T> Sync for AtomicPtr<T> {}
+
/// Atomic memory orderings
///
/// Memory orderings limit the ways that both the compiler and CPU may reorder
/// but can be overridden to reuse the resources of `a` to avoid unnecessary
/// allocations.
#[inline(always)]
- #[unstable = "this function rarely unused"]
+ #[unstable = "this function is rarely used"]
fn clone_from(&mut self, source: &Self) {
*self = source.clone()
}
_ => ()
}
- buf[mut ..end].reverse();
+ buf.slice_to_mut(end).reverse();
// Remember start of the fractional digits.
// Points one beyond end of buf if none get generated,
impl<'a> fmt::FormatWriter for Filler<'a> {
fn write(&mut self, bytes: &[u8]) -> fmt::Result {
- slice::bytes::copy_memory(self.buf[mut *self.end..],
+ slice::bytes::copy_memory(self.buf.slice_from_mut(*self.end),
bytes);
*self.end += bytes.len();
Ok(())
let mut filler = Filler { buf: &mut buf, end: &mut end };
match sign {
+ // NOTE(stage0): Remove cfg after a snapshot
+ #[cfg(not(stage0))]
+ SignNeg => {
+ let _ = fmt::write(&mut filler, format_args!("{:-}", exp));
+ }
+ // NOTE(stage0): Remove match arm after a snapshot
+ #[cfg(stage0)]
SignNeg => {
let _ = format_args!(|args| {
fmt::write(&mut filler, args)
/// This function will return an instance of `FormatError` on error.
fn write(&mut self, bytes: &[u8]) -> Result;
+ // NOTE(stage0): Remove cfg after a snapshot
+ #[cfg(not(stage0))]
+ /// Glue for usage of the `write!` macro with implementers of this trait.
+ ///
+ /// This method should generally not be invoked manually, but rather through
+ /// the `write!` macro itself.
+ fn write_fmt(&mut self, args: Arguments) -> Result { write(self, args) }
+
+ // NOTE(stage0): Remove method after a snapshot
+ #[cfg(stage0)]
/// Glue for usage of the `write!` macro with implementers of this trait.
///
/// This method should generally not be invoked manually, but rather through
/// macro validates the format string at compile-time so usage of the `write`
/// and `format` functions can be safely performed.
#[stable]
+#[deriving(Copy)]
pub struct Arguments<'a> {
// Format string pieces to print.
pieces: &'a [&'a str],
}
impl<'a> Show for Arguments<'a> {
+ // NOTE(stage0): Remove cfg after a snapshot
+ #[cfg(not(stage0))]
+ fn fmt(&self, fmt: &mut Formatter) -> Result {
+ write(fmt.buf, *self)
+ }
+
+ // NOTE(stage0): Remove method after a snapshot
+ #[cfg(stage0)]
fn fmt(&self, fmt: &mut Formatter) -> Result {
write(fmt.buf, self)
}
}
};
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// The `write` function takes an output stream, a precompiled format string,
+/// and a list of arguments. The arguments will be formatted according to the
+/// specified format string into the output stream provided.
+///
+/// # Arguments
+///
+/// * output - the buffer to write output to
+/// * args - the precompiled arguments generated by `format_args!`
+#[experimental = "libcore and I/O have yet to be reconciled, and this is an \
+ implementation detail which should not otherwise be exported"]
+pub fn write(output: &mut FormatWriter, args: Arguments) -> Result {
+ let mut formatter = Formatter {
+ flags: 0,
+ width: None,
+ precision: None,
+ buf: output,
+ align: rt::AlignUnknown,
+ fill: ' ',
+ args: args.args,
+ curarg: args.args.iter(),
+ };
+
+ let mut pieces = args.pieces.iter();
+
+ match args.fmt {
+ None => {
+ // We can use default formatting parameters for all arguments.
+ for _ in range(0, args.args.len()) {
+ try!(formatter.buf.write(pieces.next().unwrap().as_bytes()));
+ try!(formatter.run(&DEFAULT_ARGUMENT));
+ }
+ }
+ Some(fmt) => {
+ // Every spec has a corresponding argument that is preceded by
+ // a string piece.
+ for (arg, piece) in fmt.iter().zip(pieces.by_ref()) {
+ try!(formatter.buf.write(piece.as_bytes()));
+ try!(formatter.run(arg));
+ }
+ }
+ }
+
+ // There can be only one trailing string piece left.
+ match pieces.next() {
+ Some(piece) => {
+ try!(formatter.buf.write(piece.as_bytes()));
+ }
+ None => {}
+ }
+
+ Ok(())
+}
+
+// NOTE(stage0): Remove function after a snapshot
+#[cfg(stage0)]
/// The `write` function takes an output stream, a precompiled format string,
/// and a list of arguments. The arguments will be formatted according to the
/// specified format string into the output stream provided.
self.buf.write(data)
}
+ // NOTE(stage0): Remove cfg after a snapshot
+ #[cfg(not(stage0))]
+ /// Writes some formatted information into this instance
+ #[unstable = "reconciling core and I/O may alter this definition"]
+ pub fn write_fmt(&mut self, fmt: Arguments) -> Result {
+ write(self.buf, fmt)
+ }
+
+ // NOTE(stage0): Remove method after a snapshot
+ #[cfg(stage0)]
/// Writes some formatted information into this instance
#[unstable = "reconciling core and I/O may alter this definition"]
pub fn write_fmt(&mut self, fmt: &Arguments) -> Result {
/// Types able to be transferred across task boundaries.
#[lang="send"]
-pub trait Send for Sized? : 'static {
+pub unsafe trait Send for Sized? : 'static {
// empty.
}
/// reference; not doing this is undefined behaviour (for example,
/// `transmute`-ing from `&T` to `&mut T` is illegal).
#[lang="sync"]
-pub trait Sync for Sized? {
+pub unsafe trait Sync for Sized? {
// Empty
}
//! distribution.
//!
//! * `rust_begin_unwind` - This function takes three arguments, a
-//! `&fmt::Arguments`, a `&str`, and a `uint`. These three arguments dictate
+//! `fmt::Arguments`, a `&str`, and a `uint`. These three arguments dictate
//! the panic message, the file at which panic was invoked, and the line.
//! It is up to consumers of this core library to define this panic
//! function; it is only required to never return.
pub mod intrinsics;
pub mod mem;
+pub mod nonzero;
pub mod ptr;
/* Core language traits */
#![macro_escape]
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// Entry point of task panic, for details, see std::macros
+#[macro_export]
+macro_rules! panic {
+ () => (
+ panic!("explicit panic")
+ );
+ ($msg:expr) => ({
+ static _MSG_FILE_LINE: (&'static str, &'static str, uint) = ($msg, file!(), line!());
+ ::core::panicking::panic(&_MSG_FILE_LINE)
+ });
+ ($fmt:expr, $($arg:tt)*) => ({
+ // The leading _'s are to avoid dead code warnings if this is
+ // used inside a dead function. Just `#[allow(dead_code)]` is
+ // insufficient, since the user may have
+ // `#[forbid(dead_code)]` and which cannot be overridden.
+ static _FILE_LINE: (&'static str, uint) = (file!(), line!());
+ ::core::panicking::panic_fmt(format_args!($fmt, $($arg)*), &_FILE_LINE)
+ });
+}
+
+// NOTE(stage0): Remove macro after a snapshot
+#[cfg(stage0)]
/// Entry point of task panic, for details, see std::macros
#[macro_export]
macro_rules! panic {
);
}
-/// Runtime assertion, only without `--cfg ndebug`
-#[macro_export]
-macro_rules! debug_assert {
- ($(a:tt)*) => ({
- if cfg!(not(ndebug)) {
- assert!($($a)*);
- }
- })
-}
-
/// Runtime assertion for equality, for details see std::macros
#[macro_export]
macro_rules! assert_eq {
})
}
-/// Runtime assertion, disableable at compile time
+/// Runtime assertion, disableable at compile time with `--cfg ndebug`
#[macro_export]
macro_rules! debug_assert {
($($arg:tt)*) => (if cfg!(not(ndebug)) { assert!($($arg)*); })
($e:expr) => (match $e { Ok(e) => e, Err(e) => return Err(e) })
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// Writing a formatted string into a writer
+#[macro_export]
+macro_rules! write {
+ ($dst:expr, $($arg:tt)*) => ((&mut *$dst).write_fmt(format_args!($($arg)*)))
+}
+
+// NOTE(stage0): Remove macro after a snapshot
+#[cfg(stage0)]
/// Writing a formatted string into a writer
#[macro_export]
macro_rules! write {
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Exposes the NonZero lang item which provides optimization hints.
+
+use ops::Deref;
+
+/// Unsafe trait to indicate what types are usable with the NonZero struct
+pub unsafe trait Zeroable {}
+
+unsafe impl<T> Zeroable for *const T {}
+unsafe impl<T> Zeroable for *mut T {}
+unsafe impl Zeroable for int {}
+unsafe impl Zeroable for uint {}
+unsafe impl Zeroable for i8 {}
+unsafe impl Zeroable for u8 {}
+unsafe impl Zeroable for i16 {}
+unsafe impl Zeroable for u16 {}
+unsafe impl Zeroable for i32 {}
+unsafe impl Zeroable for u32 {}
+unsafe impl Zeroable for i64 {}
+unsafe impl Zeroable for u64 {}
+
+/// A wrapper type for raw pointers and integers that will never be
+/// NULL or 0 that might allow certain optimizations.
+#[lang="non_zero"]
+#[deriving(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Show)]
+#[experimental]
+pub struct NonZero<T: Zeroable>(T);
+
+impl<T: Zeroable> NonZero<T> {
+ /// Create an instance of NonZero with the provided value.
+ /// You must indeed ensure that the value is actually "non-zero".
+ #[inline(always)]
+ pub unsafe fn new(inner: T) -> NonZero<T> {
+ NonZero(inner)
+ }
+}
+
+impl<T: Zeroable> Deref<T> for NonZero<T> {
+ #[inline]
+ fn deref<'a>(&'a self) -> &'a T {
+ let NonZero(ref inner) = *self;
+ inner
+ }
+}
}
}
+/// A range which is only bounded above.
+#[deriving(Copy)]
+#[lang="range_to"]
+pub struct RangeTo<Idx> {
+ /// The upper bound of the range (exclusive).
+ pub end: Idx,
+}
+
/// The `Deref` trait is used to specify the functionality of dereferencing
/// operations like `*v`.
//! interface for panicking is:
//!
//! ```ignore
-//! fn panic_impl(fmt: &fmt::Arguments, &(&'static str, uint)) -> !;
+//! fn panic_impl(fmt: fmt::Arguments, &(&'static str, uint)) -> !;
//! ```
//!
//! This definition allows for panicking with any general message, but it does not
#![allow(dead_code, missing_docs)]
use fmt;
-use intrinsics;
+// NOTE(stage0): Remove import after a snapshot
+#[cfg(stage0)] use intrinsics;
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+#[cold] #[inline(never)] // this is the slow path, always
+#[lang="panic"]
+pub fn panic(expr_file_line: &(&'static str, &'static str, uint)) -> ! {
+ let (expr, file, line) = *expr_file_line;
+ panic_fmt(format_args!("{}", expr), &(file, line))
+}
+
+// NOTE(stage0): Remove function after a snapshot
+#[cfg(stage0)]
#[cold] #[inline(never)] // this is the slow path, always
#[lang="panic"]
pub fn panic(expr_file_line: &(&'static str, &'static str, uint)) -> ! {
unsafe { intrinsics::abort() }
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+#[cold] #[inline(never)]
+#[lang="panic_bounds_check"]
+fn panic_bounds_check(file_line: &(&'static str, uint),
+ index: uint, len: uint) -> ! {
+ panic_fmt(format_args!("index out of bounds: the len is {} but the index is {}",
+ len, index), file_line)
+}
+
+// NOTE(stage0): Remove function after a snapshot
+#[cfg(stage0)]
#[cold] #[inline(never)]
#[lang="panic_bounds_check"]
fn panic_bounds_check(file_line: &(&'static str, uint),
unsafe { intrinsics::abort() }
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+#[cold] #[inline(never)]
+pub fn panic_fmt(fmt: fmt::Arguments, file_line: &(&'static str, uint)) -> ! {
+ #[allow(improper_ctypes)]
+ extern {
+ #[lang = "panic_fmt"]
+ fn panic_impl(fmt: fmt::Arguments, file: &'static str, line: uint) -> !;
+ }
+ let (file, line) = *file_line;
+ unsafe { panic_impl(fmt, file, line) }
+}
+
+// NOTE(stage0): Remove function after a snapshot
+#[cfg(stage0)]
#[cold] #[inline(never)]
pub fn panic_fmt(fmt: &fmt::Arguments, file_line: &(&'static str, uint)) -> ! {
#[allow(improper_ctypes)]
pub use num::{ToPrimitive, FromPrimitive};
pub use option::Option;
pub use option::Option::{Some, None};
-pub use ptr::RawPtr;
+pub use ptr::{PtrExt, MutPtrExt};
pub use result::Result;
pub use result::Result::{Ok, Err};
pub use str::{Str, StrExt};
//! typically limited to a few patterns.
//!
//! Use the [`null` function](fn.null.html) to create null pointers,
-//! the [`is_null`](trait.RawPtr.html#tymethod.is_null)
-//! and [`is_not_null`](trait.RawPtr.html#method.is_not_null)
-//! methods of the [`RawPtr` trait](trait.RawPtr.html) to check for null.
-//! The `RawPtr` trait is imported by the prelude, so `is_null` etc.
-//! work everywhere. The `RawPtr` also defines the `offset` method,
+//! the [`is_null`](trait.PtrExt.html#tymethod.is_null)
+//! methods of the [`PtrExt` trait](trait.PtrExt.html) to check for null.
+//! The `PtrExt` trait is imported by the prelude, so `is_null` etc.
+//! work everywhere. The `PtrExt` also defines the `offset` method,
//! for pointer math.
//!
//! # Common ways to create unsafe pointers
//! but C APIs hand out a lot of pointers generally, so are a common source
//! of unsafe pointers in Rust.
+#![stable]
+
use mem;
use clone::Clone;
use intrinsics;
-use option::Option;
-use option::Option::{Some, None};
+use option::Option::{mod, Some, None};
+use kinds::{Send, Sync};
use cmp::{PartialEq, Eq, Ord, PartialOrd, Equiv};
-use cmp::Ordering;
-use cmp::Ordering::{Less, Equal, Greater};
+use cmp::Ordering::{mod, Less, Equal, Greater};
// FIXME #19649: instrinsic docs don't render, so these have no docs :(
#[experimental = "uncertain about naming and semantics"]
pub use intrinsics::set_memory;
+
/// Creates a null raw pointer.
///
/// # Examples
/// assert!(p.is_null());
/// ```
#[inline]
-#[unstable = "may need a different name after pending changes to pointer types"]
+#[stable]
pub fn null<T>() -> *const T { 0 as *const T }
/// Creates a null mutable raw pointer.
/// assert!(p.is_null());
/// ```
#[inline]
-#[unstable = "may need a different name after pending changes to pointer types"]
+#[stable]
pub fn null_mut<T>() -> *mut T { 0 as *mut T }
-/// Zeroes out `count * size_of::<T>` bytes of memory at `dst`. `count` may be `0`.
+/// Zeroes out `count * size_of::<T>` bytes of memory at `dst`. `count` may be
+/// `0`.
///
/// # Safety
///
-/// Beyond accepting a raw pointer, this is unsafe because it will not drop the contents of `dst`,
-/// and may be used to create invalid instances of `T`.
+/// Beyond accepting a raw pointer, this is unsafe because it will not drop the
+/// contents of `dst`, and may be used to create invalid instances of `T`.
#[inline]
-#[experimental = "uncertain about naming and semantics"]
-#[allow(experimental)]
+#[unstable = "may play a larger role in std::ptr future extensions"]
pub unsafe fn zero_memory<T>(dst: *mut T, count: uint) {
set_memory(dst, 0, count);
}
/// Swaps the values at two mutable locations of the same type, without
-/// deinitialising either. They may overlap, unlike `mem::swap` which is otherwise
-/// equivalent.
+/// deinitialising either. They may overlap, unlike `mem::swap` which is
+/// otherwise equivalent.
///
/// # Safety
///
/// This is only unsafe because it accepts a raw pointer.
#[inline]
-#[unstable]
+#[stable]
pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with
let mut tmp: T = mem::uninitialized();
/// This is only unsafe because it accepts a raw pointer.
/// Otherwise, this operation is identical to `mem::replace`.
#[inline]
-#[unstable]
+#[stable]
pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
mem::swap(mem::transmute(dest), &mut src); // cannot overlap
src
/// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use
/// because it will attempt to drop the value previously at `*src`.
#[inline(always)]
-#[unstable]
+#[stable]
pub unsafe fn read<T>(src: *const T) -> T {
let mut tmp: T = mem::uninitialized();
copy_nonoverlapping_memory(&mut tmp, src, 1);
///
/// This is unsafe for the same reasons that `read` is unsafe.
#[inline(always)]
-#[experimental]
-#[allow(experimental)]
+#[unstable = "may play a larger role in std::ptr future extensions"]
pub unsafe fn read_and_zero<T>(dest: *mut T) -> T {
// Copy the data out from `dest`:
let tmp = read(&*dest);
tmp
}
-/// Overwrites a memory location with the given value without reading or dropping
-/// the old value.
+/// Overwrites a memory location with the given value without reading or
+/// dropping the old value.
///
/// # Safety
///
/// not drop the contents of `dst`. This could leak allocations or resources,
/// so care must be taken not to overwrite an object that should be dropped.
///
-/// This is appropriate for initializing uninitialized memory, or overwritting memory
-/// that has previously been `read` from.
+/// This is appropriate for initializing uninitialized memory, or overwritting
+/// memory that has previously been `read` from.
#[inline]
-#[unstable]
+#[stable]
pub unsafe fn write<T>(dst: *mut T, src: T) {
intrinsics::move_val_init(&mut *dst, src)
}
/// Methods on raw pointers
-pub trait RawPtr<T> {
- /// Returns a null raw pointer.
+#[stable]
+pub trait PtrExt<T> {
+ /// Returns the null pointer.
+ #[deprecated = "call ptr::null instead"]
fn null() -> Self;
/// Returns true if the pointer is null.
- fn is_null(&self) -> bool;
+ #[stable]
+ fn is_null(self) -> bool;
- /// Returns true if the pointer is not null.
- fn is_not_null(&self) -> bool { !self.is_null() }
+ /// Returns true if the pointer is not equal to the null pointer.
+ #[deprecated = "use !p.is_null() instead"]
+ fn is_not_null(self) -> bool { !self.is_null() }
- /// Returns the address of the pointer.
- fn to_uint(&self) -> uint;
+ /// Returns true if the pointer is not null.
+ #[deprecated = "use `as uint` instead"]
+ fn to_uint(self) -> uint;
- /// Returns `None` if the pointer is null, or else returns a reference to the
- /// value wrapped in `Some`.
+ /// Returns `None` if the pointer is null, or else returns a reference to
+ /// the value wrapped in `Some`.
///
/// # Safety
///
- /// While this method and its mutable counterpart are useful for null-safety,
- /// it is important to note that this is still an unsafe operation because
- /// the returned value could be pointing to invalid memory.
+ /// While this method and its mutable counterpart are useful for
+ /// null-safety, it is important to note that this is still an unsafe
+ /// operation because the returned value could be pointing to invalid
+ /// memory.
+ #[unstable = "Option is not clearly the right return type, and we may want \
+ to tie the return lifetime to a borrow of the raw pointer"]
unsafe fn as_ref<'a>(&self) -> Option<&'a T>;
/// Calculates the offset from a pointer. `count` is in units of T; e.g. a
///
/// # Safety
///
- /// The offset must be in-bounds of the object, or one-byte-past-the-end. Otherwise
- /// `offset` invokes Undefined Behaviour, regardless of whether the pointer is used.
+ /// The offset must be in-bounds of the object, or one-byte-past-the-end.
+ /// Otherwise `offset` invokes Undefined Behaviour, regardless of whether
+ /// the pointer is used.
+ #[stable]
unsafe fn offset(self, count: int) -> Self;
}
/// Methods on mutable raw pointers
-pub trait RawMutPtr<T>{
- /// Returns `None` if the pointer is null, or else returns a mutable reference
- /// to the value wrapped in `Some`.
+#[stable]
+pub trait MutPtrExt<T>{
+ /// Returns `None` if the pointer is null, or else returns a mutable
+ /// reference to the value wrapped in `Some`.
///
/// # Safety
///
/// As with `as_ref`, this is unsafe because it cannot verify the validity
/// of the returned pointer.
+ #[unstable = "Option is not clearly the right return type, and we may want \
+ to tie the return lifetime to a borrow of the raw pointer"]
unsafe fn as_mut<'a>(&self) -> Option<&'a mut T>;
}
-impl<T> RawPtr<T> for *const T {
+#[stable]
+impl<T> PtrExt<T> for *const T {
#[inline]
+ #[deprecated = "call ptr::null instead"]
fn null() -> *const T { null() }
#[inline]
- fn is_null(&self) -> bool { *self == RawPtr::null() }
+ #[stable]
+ fn is_null(self) -> bool { self as uint == 0 }
#[inline]
- fn to_uint(&self) -> uint { *self as uint }
+ #[deprecated = "use `as uint` instead"]
+ fn to_uint(self) -> uint { self as uint }
#[inline]
+ #[stable]
unsafe fn offset(self, count: int) -> *const T {
intrinsics::offset(self, count)
}
#[inline]
+ #[unstable = "return value does not necessarily convey all possible \
+ information"]
unsafe fn as_ref<'a>(&self) -> Option<&'a T> {
if self.is_null() {
None
}
}
-impl<T> RawPtr<T> for *mut T {
+#[stable]
+impl<T> PtrExt<T> for *mut T {
#[inline]
+ #[deprecated = "call ptr::null instead"]
fn null() -> *mut T { null_mut() }
#[inline]
- fn is_null(&self) -> bool { *self == RawPtr::null() }
+ #[stable]
+ fn is_null(self) -> bool { self as uint == 0 }
#[inline]
- fn to_uint(&self) -> uint { *self as uint }
+ #[deprecated = "use `as uint` instead"]
+ fn to_uint(self) -> uint { self as uint }
#[inline]
+ #[stable]
unsafe fn offset(self, count: int) -> *mut T {
intrinsics::offset(self as *const T, count) as *mut T
}
#[inline]
+ #[unstable = "return value does not necessarily convey all possible \
+ information"]
unsafe fn as_ref<'a>(&self) -> Option<&'a T> {
if self.is_null() {
None
}
}
-impl<T> RawMutPtr<T> for *mut T {
+#[stable]
+impl<T> MutPtrExt<T> for *mut T {
#[inline]
+ #[unstable = "return value does not necessarily convey all possible \
+ information"]
unsafe fn as_mut<'a>(&self) -> Option<&'a mut T> {
if self.is_null() {
None
#[inline]
fn ge(&self, other: &*mut T) -> bool { *self >= *other }
}
+
+/// A wrapper around a raw `*mut T` that indicates that the possessor
+/// of this wrapper owns the referent. This in turn implies that the
+/// `Unique<T>` is `Send`/`Sync` if `T` is `Send`/`Sync`, unlike a
+/// raw `*mut T` (which conveys no particular ownership semantics).
+/// Useful for building abstractions like `Vec<T>` or `Box<T>`, which
+/// internally use raw pointers to manage the memory that they own.
+#[unstable = "recently added to this module"]
+pub struct Unique<T>(pub *mut T);
+
+/// `Unique` pointers are `Send` if `T` is `Send` because the data they
+/// reference is unaliased. Note that this aliasing invariant is
+/// unenforced by the type system; the abstraction using the
+/// `Unique` must enforce it.
+#[unstable = "recently added to this module"]
+unsafe impl<T:Send> Send for Unique<T> { }
+
+/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
+/// reference is unaliased. Note that this aliasing invariant is
+/// unenforced by the type system; the abstraction using the
+/// `Unique` must enforce it.
+#[unstable = "recently added to this module"]
+unsafe impl<T:Sync> Sync for Unique<T> { }
+
+impl<T> Unique<T> {
+ /// Returns a null Unique.
+ #[unstable = "recently added to this module"]
+ pub fn null() -> Unique<T> {
+ Unique(null_mut())
+ }
+
+ /// Return an (unsafe) pointer into the memory owned by `self`.
+ #[unstable = "recently added to this module"]
+ pub unsafe fn offset(self, offset: int) -> *mut T {
+ self.0.offset(offset)
+ }
+}
use option::Option;
use option::Option::{None, Some};
use ptr;
-use ptr::RawPtr;
+use ptr::PtrExt;
use mem;
use mem::size_of;
use kinds::{Sized, marker};
fn as_mut_slice(&mut self) -> &mut [T] { self }
fn slice_mut(&mut self, start: uint, end: uint) -> &mut [T] {
- self[mut start..end]
+ ops::SliceMut::slice_or_fail_mut(self, &start, &end)
}
#[inline]
fn slice_from_mut(&mut self, start: uint) -> &mut [T] {
- self[mut start..]
+ ops::SliceMut::slice_from_or_fail_mut(self, &start)
}
#[inline]
fn slice_to_mut(&mut self, end: uint) -> &mut [T] {
- self[mut ..end]
+ ops::SliceMut::slice_to_or_fail_mut(self, &end)
}
#[inline]
fn split_at_mut(&mut self, mid: uint) -> (&mut [T], &mut [T]) {
unsafe {
let self2: &mut [T] = mem::transmute_copy(&self);
- (self[mut ..mid], self2[mut mid..])
+
+ (ops::SliceMut::slice_to_or_fail_mut(self, &mid),
+ ops::SliceMut::slice_from_or_fail_mut(self2, &mid))
}
}
#[inline]
fn tail_mut(&mut self) -> &mut [T] {
- let len = self.len();
- self[mut 1..len]
+ self.slice_from_mut(1)
}
#[inline]
fn init_mut(&mut self) -> &mut [T] {
let len = self.len();
- self[mut 0..len - 1]
+ self.slice_to_mut(len-1)
}
#[inline]
self.swap(j, i-1);
// Step 4: Reverse the (previously) weakly decreasing part
- self[mut i..].reverse();
+ self.slice_from_mut(i).reverse();
true
}
}
// Step 2: Reverse the weakly increasing part
- self[mut i..].reverse();
+ self.slice_from_mut(i).reverse();
// Step 3: Find the rightmost element equal to or bigger than the pivot (i-1)
let mut j = self.len() - 1;
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
- self.v = tail[mut 1..];
+ self.v = tail.slice_from_mut(1);
Some(head)
}
}
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = head;
- Some(tail[mut 1..])
+ Some(tail.slice_from_mut(1))
}
}
}
#[deprecated]
pub mod raw {
use mem::transmute;
- use ptr::RawPtr;
+ use ptr::PtrExt;
use raw::Slice;
use ops::FnOnce;
use option::Option;
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//
-// ignore-lexer-test FIXME #15679
-
-//! String manipulation
-//!
-//! For more details, see std::str
-
-#![doc(primitive = "str")]
-
-use self::Searcher::{Naive, TwoWay, TwoWayLong};
-
-use clone::Clone;
-use cmp::{mod, Eq};
-use default::Default;
-use iter::range;
-use iter::{DoubleEndedIteratorExt, ExactSizeIterator};
-use iter::{Map, Iterator, IteratorExt, DoubleEndedIterator};
-use kinds::Sized;
-use mem;
-use num::Int;
-use ops::{Fn, FnMut};
-use option::Option::{mod, None, Some};
-use ptr::RawPtr;
-use raw::{Repr, Slice};
-use result::Result::{mod, Ok, Err};
-use slice::{mod, SliceExt};
-use uint;
-
-/// A trait to abstract the idea of creating a new instance of a type from a
-/// string.
-// FIXME(#17307): there should be an `E` associated type for a `Result` return
-#[unstable = "will return a Result once associated types are working"]
-pub trait FromStr {
- /// Parses a string `s` to return an optional value of this type. If the
- /// string is ill-formatted, the None is returned.
- fn from_str(s: &str) -> Option<Self>;
-}
-
-/// A utility function that just calls FromStr::from_str
-#[deprecated = "call the .parse() method on the string instead"]
-pub fn from_str<A: FromStr>(s: &str) -> Option<A> {
- FromStr::from_str(s)
-}
-
-impl FromStr for bool {
- /// Parse a `bool` from a string.
- ///
- /// Yields an `Option<bool>`, because `s` may or may not actually be parseable.
- ///
- /// # Examples
- ///
- /// ```rust
- /// assert_eq!("true".parse(), Some(true));
- /// assert_eq!("false".parse(), Some(false));
- /// assert_eq!("not even a boolean".parse::<bool>(), None);
- /// ```
- #[inline]
- fn from_str(s: &str) -> Option<bool> {
- match s {
- "true" => Some(true),
- "false" => Some(false),
- _ => None,
- }
- }
-}
-
-/*
-Section: Creating a string
-*/
-
-/// Errors which can occur when attempting to interpret a byte slice as a `str`.
-#[deriving(Copy, Eq, PartialEq, Clone)]
-pub enum Utf8Error {
- /// An invalid byte was detected at the byte offset given.
- ///
- /// The offset is guaranteed to be in bounds of the slice in question, and
- /// the byte at the specified offset was the first invalid byte in the
- /// sequence detected.
- InvalidByte(uint),
-
- /// The byte slice was invalid because more bytes were needed but no more
- /// bytes were available.
- TooShort,
-}
-
-/// Converts a slice of bytes to a string slice without performing any
-/// allocations.
-///
-/// Once the slice has been validated as utf-8, it is transmuted in-place and
-/// returned as a '&str' instead of a '&[u8]'
-///
-/// # Failure
-///
-/// Returns `Err` if the slice is not utf-8 with a description as to why the
-/// provided slice is not utf-8.
-pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
- try!(run_utf8_validation_iterator(&mut v.iter()));
- Ok(unsafe { from_utf8_unchecked(v) })
-}
-
-/// Converts a slice of bytes to a string slice without checking
-/// that the string contains valid UTF-8.
-#[stable]
-pub unsafe fn from_utf8_unchecked<'a>(v: &'a [u8]) -> &'a str {
- mem::transmute(v)
-}
-
-/// Constructs a static string slice from a given raw pointer.
-///
-/// This function will read memory starting at `s` until it finds a 0, and then
-/// transmute the memory up to that point as a string slice, returning the
-/// corresponding `&'static str` value.
-///
-/// This function is unsafe because the caller must ensure the C string itself
-/// has the static lifetime and that the memory `s` is valid up to and including
-/// the first null byte.
-///
-/// # Panics
-///
-/// This function will panic if the string pointed to by `s` is not valid UTF-8.
-#[unstable = "may change location based on the outcome of the c_str module"]
-pub unsafe fn from_c_str(s: *const i8) -> &'static str {
- let s = s as *const u8;
- let mut len = 0u;
- while *s.offset(len as int) != 0 {
- len += 1u;
- }
- let v: &'static [u8] = ::mem::transmute(Slice { data: s, len: len });
- from_utf8(v).ok().expect("from_c_str passed invalid utf-8 data")
-}
-
-/// Something that can be used to compare against a character
-#[unstable = "definition may change as pattern-related methods are stabilized"]
-pub trait CharEq {
- /// Determine if the splitter should split at the given character
- fn matches(&mut self, char) -> bool;
- /// Indicate if this is only concerned about ASCII characters,
- /// which can allow for a faster implementation.
- fn only_ascii(&self) -> bool;
-}
-
-impl CharEq for char {
- #[inline]
- fn matches(&mut self, c: char) -> bool { *self == c }
-
- #[inline]
- fn only_ascii(&self) -> bool { (*self as uint) < 128 }
-}
-
-impl<F> CharEq for F where F: FnMut(char) -> bool {
- #[inline]
- fn matches(&mut self, c: char) -> bool { (*self)(c) }
-
- #[inline]
- fn only_ascii(&self) -> bool { false }
-}
-
-impl<'a> CharEq for &'a [char] {
- #[inline]
- fn matches(&mut self, c: char) -> bool {
- self.iter().any(|&mut m| m.matches(c))
- }
-
- #[inline]
- fn only_ascii(&self) -> bool {
- self.iter().all(|m| m.only_ascii())
- }
-}
-
-/*
-Section: Iterators
-*/
-
-/// Iterator for the char (representing *Unicode Scalar Values*) of a string
-///
-/// Created with the method `.chars()`.
-#[deriving(Clone, Copy)]
-pub struct Chars<'a> {
- iter: slice::Iter<'a, u8>
-}
-
-// Return the initial codepoint accumulator for the first byte.
-// The first byte is special, only want bottom 5 bits for width 2, 4 bits
-// for width 3, and 3 bits for width 4
-macro_rules! utf8_first_byte {
- ($byte:expr, $width:expr) => (($byte & (0x7F >> $width)) as u32)
-}
-
-// return the value of $ch updated with continuation byte $byte
-macro_rules! utf8_acc_cont_byte {
- ($ch:expr, $byte:expr) => (($ch << 6) | ($byte & CONT_MASK) as u32)
-}
-
-macro_rules! utf8_is_cont_byte {
- ($byte:expr) => (($byte & !CONT_MASK) == TAG_CONT_U8)
-}
-
-#[inline]
-fn unwrap_or_0(opt: Option<&u8>) -> u8 {
- match opt {
- Some(&byte) => byte,
- None => 0,
- }
-}
-
-impl<'a> Iterator<char> for Chars<'a> {
- #[inline]
- fn next(&mut self) -> Option<char> {
- // Decode UTF-8, using the valid UTF-8 invariant
- let x = match self.iter.next() {
- None => return None,
- Some(&next_byte) if next_byte < 128 => return Some(next_byte as char),
- Some(&next_byte) => next_byte,
- };
-
- // Multibyte case follows
- // Decode from a byte combination out of: [[[x y] z] w]
- // NOTE: Performance is sensitive to the exact formulation here
- let init = utf8_first_byte!(x, 2);
- let y = unwrap_or_0(self.iter.next());
- let mut ch = utf8_acc_cont_byte!(init, y);
- if x >= 0xE0 {
- // [[x y z] w] case
- // 5th bit in 0xE0 .. 0xEF is always clear, so `init` is still valid
- let z = unwrap_or_0(self.iter.next());
- let y_z = utf8_acc_cont_byte!((y & CONT_MASK) as u32, z);
- ch = init << 12 | y_z;
- if x >= 0xF0 {
- // [x y z w] case
- // use only the lower 3 bits of `init`
- let w = unwrap_or_0(self.iter.next());
- ch = (init & 7) << 18 | utf8_acc_cont_byte!(y_z, w);
- }
- }
-
- // str invariant says `ch` is a valid Unicode Scalar Value
- unsafe {
- Some(mem::transmute(ch))
- }
- }
-
- #[inline]
- fn size_hint(&self) -> (uint, Option<uint>) {
- let (len, _) = self.iter.size_hint();
- (len.saturating_add(3) / 4, Some(len))
- }
-}
-
-impl<'a> DoubleEndedIterator<char> for Chars<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<char> {
- let w = match self.iter.next_back() {
- None => return None,
- Some(&back_byte) if back_byte < 128 => return Some(back_byte as char),
- Some(&back_byte) => back_byte,
- };
-
- // Multibyte case follows
- // Decode from a byte combination out of: [x [y [z w]]]
- let mut ch;
- let z = unwrap_or_0(self.iter.next_back());
- ch = utf8_first_byte!(z, 2);
- if utf8_is_cont_byte!(z) {
- let y = unwrap_or_0(self.iter.next_back());
- ch = utf8_first_byte!(y, 3);
- if utf8_is_cont_byte!(y) {
- let x = unwrap_or_0(self.iter.next_back());
- ch = utf8_first_byte!(x, 4);
- ch = utf8_acc_cont_byte!(ch, y);
- }
- ch = utf8_acc_cont_byte!(ch, z);
- }
- ch = utf8_acc_cont_byte!(ch, w);
-
- // str invariant says `ch` is a valid Unicode Scalar Value
- unsafe {
- Some(mem::transmute(ch))
- }
- }
-}
-
-/// External iterator for a string's characters and their byte offsets.
-/// Use with the `std::iter` module.
-#[deriving(Clone)]
-pub struct CharIndices<'a> {
- front_offset: uint,
- iter: Chars<'a>,
-}
-
-impl<'a> Iterator<(uint, char)> for CharIndices<'a> {
- #[inline]
- fn next(&mut self) -> Option<(uint, char)> {
- let (pre_len, _) = self.iter.iter.size_hint();
- match self.iter.next() {
- None => None,
- Some(ch) => {
- let index = self.front_offset;
- let (len, _) = self.iter.iter.size_hint();
- self.front_offset += pre_len - len;
- Some((index, ch))
- }
- }
- }
-
- #[inline]
- fn size_hint(&self) -> (uint, Option<uint>) {
- self.iter.size_hint()
- }
-}
-
-impl<'a> DoubleEndedIterator<(uint, char)> for CharIndices<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<(uint, char)> {
- match self.iter.next_back() {
- None => None,
- Some(ch) => {
- let (len, _) = self.iter.iter.size_hint();
- let index = self.front_offset + len;
- Some((index, ch))
- }
- }
- }
-}
-
-/// External iterator for a string's bytes.
-/// Use with the `std::iter` module.
-#[stable]
-#[deriving(Clone)]
-pub struct Bytes<'a> {
- inner: Map<&'a u8, u8, slice::Iter<'a, u8>, BytesFn>,
-}
-
-/// A temporary new type wrapper that ensures that the `Bytes` iterator
-/// is cloneable.
-#[deriving(Copy)]
-struct BytesFn(fn(&u8) -> u8);
-
-impl<'a> Fn(&'a u8) -> u8 for BytesFn {
- extern "rust-call" fn call(&self, (ptr,): (&'a u8,)) -> u8 {
- (self.0)(ptr)
- }
-}
-
-impl Clone for BytesFn {
- fn clone(&self) -> BytesFn { *self }
-}
-
-/// An iterator over the substrings of a string, separated by `sep`.
-#[deriving(Clone)]
-pub struct CharSplits<'a, Sep> {
- /// The slice remaining to be iterated
- string: &'a str,
- sep: Sep,
- /// Whether an empty string at the end is allowed
- allow_trailing_empty: bool,
- only_ascii: bool,
- finished: bool,
-}
-
-/// An iterator over the substrings of a string, separated by `sep`,
-/// splitting at most `count` times.
-#[deriving(Clone)]
-pub struct CharSplitsN<'a, Sep> {
- iter: CharSplits<'a, Sep>,
- /// The number of splits remaining
- count: uint,
- invert: bool,
-}
-
-/// An iterator over the lines of a string, separated by `\n`.
-#[stable]
-pub struct Lines<'a> {
- inner: CharSplits<'a, char>,
-}
-
-/// An iterator over the lines of a string, separated by either `\n` or (`\r\n`).
-#[stable]
-pub struct LinesAny<'a> {
- inner: Map<&'a str, &'a str, Lines<'a>, fn(&str) -> &str>,
-}
-
-impl<'a, Sep> CharSplits<'a, Sep> {
- #[inline]
- fn get_end(&mut self) -> Option<&'a str> {
- if !self.finished && (self.allow_trailing_empty || self.string.len() > 0) {
- self.finished = true;
- Some(self.string)
- } else {
- None
- }
- }
-}
-
-impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplits<'a, Sep> {
- #[inline]
- fn next(&mut self) -> Option<&'a str> {
- if self.finished { return None }
-
- let mut next_split = None;
- if self.only_ascii {
- for (idx, byte) in self.string.bytes().enumerate() {
- if self.sep.matches(byte as char) && byte < 128u8 {
- next_split = Some((idx, idx + 1));
- break;
- }
- }
- } else {
- for (idx, ch) in self.string.char_indices() {
- if self.sep.matches(ch) {
- next_split = Some((idx, self.string.char_range_at(idx).next));
- break;
- }
- }
- }
- match next_split {
- Some((a, b)) => unsafe {
- let elt = self.string.slice_unchecked(0, a);
- self.string = self.string.slice_unchecked(b, self.string.len());
- Some(elt)
- },
- None => self.get_end(),
- }
- }
-}
-
-impl<'a, Sep: CharEq> DoubleEndedIterator<&'a str>
-for CharSplits<'a, Sep> {
- #[inline]
- fn next_back(&mut self) -> Option<&'a str> {
- if self.finished { return None }
-
- if !self.allow_trailing_empty {
- self.allow_trailing_empty = true;
- match self.next_back() {
- Some(elt) if !elt.is_empty() => return Some(elt),
- _ => if self.finished { return None }
- }
- }
- let len = self.string.len();
- let mut next_split = None;
-
- if self.only_ascii {
- for (idx, byte) in self.string.bytes().enumerate().rev() {
- if self.sep.matches(byte as char) && byte < 128u8 {
- next_split = Some((idx, idx + 1));
- break;
- }
- }
- } else {
- for (idx, ch) in self.string.char_indices().rev() {
- if self.sep.matches(ch) {
- next_split = Some((idx, self.string.char_range_at(idx).next));
- break;
- }
- }
- }
- match next_split {
- Some((a, b)) => unsafe {
- let elt = self.string.slice_unchecked(b, len);
- self.string = self.string.slice_unchecked(0, a);
- Some(elt)
- },
- None => { self.finished = true; Some(self.string) }
- }
- }
-}
-
-impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplitsN<'a, Sep> {
- #[inline]
- fn next(&mut self) -> Option<&'a str> {
- if self.count != 0 {
- self.count -= 1;
- if self.invert { self.iter.next_back() } else { self.iter.next() }
- } else {
- self.iter.get_end()
- }
- }
-}
-
-/// The internal state of an iterator that searches for matches of a substring
-/// within a larger string using naive search
-#[deriving(Clone)]
-struct NaiveSearcher {
- position: uint
-}
-
-impl NaiveSearcher {
- fn new() -> NaiveSearcher {
- NaiveSearcher { position: 0 }
- }
-
- fn next(&mut self, haystack: &[u8], needle: &[u8]) -> Option<(uint, uint)> {
- while self.position + needle.len() <= haystack.len() {
- if haystack[self.position .. self.position + needle.len()] == needle {
- let match_pos = self.position;
- self.position += needle.len(); // add 1 for all matches
- return Some((match_pos, match_pos + needle.len()));
- } else {
- self.position += 1;
- }
- }
- None
- }
-}
-
-/// The internal state of an iterator that searches for matches of a substring
-/// within a larger string using two-way search
-#[deriving(Clone)]
-struct TwoWaySearcher {
- // constants
- crit_pos: uint,
- period: uint,
- byteset: u64,
-
- // variables
- position: uint,
- memory: uint
-}
-
-/*
- This is the Two-Way search algorithm, which was introduced in the paper:
- Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
-
- Here's some background information.
-
- A *word* is a string of symbols. The *length* of a word should be a familiar
- notion, and here we denote it for any word x by |x|.
- (We also allow for the possibility of the *empty word*, a word of length zero).
-
- If x is any non-empty word, then an integer p with 0 < p <= |x| is said to be a
- *period* for x iff for all i with 0 <= i <= |x| - p - 1, we have x[i] == x[i+p].
- For example, both 1 and 2 are periods for the string "aa". As another example,
- the only period of the string "abcd" is 4.
-
- We denote by period(x) the *smallest* period of x (provided that x is non-empty).
- This is always well-defined since every non-empty word x has at least one period,
- |x|. We sometimes call this *the period* of x.
-
- If u, v and x are words such that x = uv, where uv is the concatenation of u and
- v, then we say that (u, v) is a *factorization* of x.
-
- Let (u, v) be a factorization for a word x. Then if w is a non-empty word such
- that both of the following hold
-
- - either w is a suffix of u or u is a suffix of w
- - either w is a prefix of v or v is a prefix of w
-
- then w is said to be a *repetition* for the factorization (u, v).
-
- Just to unpack this, there are four possibilities here. Let w = "abc". Then we
- might have:
-
- - w is a suffix of u and w is a prefix of v. ex: ("lolabc", "abcde")
- - w is a suffix of u and v is a prefix of w. ex: ("lolabc", "ab")
- - u is a suffix of w and w is a prefix of v. ex: ("bc", "abchi")
- - u is a suffix of w and v is a prefix of w. ex: ("bc", "a")
-
- Note that the word vu is a repetition for any factorization (u,v) of x = uv,
- so every factorization has at least one repetition.
-
- If x is a string and (u, v) is a factorization for x, then a *local period* for
- (u, v) is an integer r such that there is some word w such that |w| = r and w is
- a repetition for (u, v).
-
- We denote by local_period(u, v) the smallest local period of (u, v). We sometimes
- call this *the local period* of (u, v). Provided that x = uv is non-empty, this
- is well-defined (because each non-empty word has at least one factorization, as
- noted above).
-
- It can be proven that the following is an equivalent definition of a local period
- for a factorization (u, v): any positive integer r such that x[i] == x[i+r] for
- all i such that |u| - r <= i <= |u| - 1 and such that both x[i] and x[i+r] are
- defined. (i.e. i > 0 and i + r < |x|).
-
- Using the above reformulation, it is easy to prove that
-
- 1 <= local_period(u, v) <= period(uv)
-
- A factorization (u, v) of x such that local_period(u,v) = period(x) is called a
- *critical factorization*.
-
- The algorithm hinges on the following theorem, which is stated without proof:
-
- **Critical Factorization Theorem** Any word x has at least one critical
- factorization (u, v) such that |u| < period(x).
-
- The purpose of maximal_suffix is to find such a critical factorization.
-
-*/
-impl TwoWaySearcher {
- fn new(needle: &[u8]) -> TwoWaySearcher {
- let (crit_pos1, period1) = TwoWaySearcher::maximal_suffix(needle, false);
- let (crit_pos2, period2) = TwoWaySearcher::maximal_suffix(needle, true);
-
- let crit_pos;
- let period;
- if crit_pos1 > crit_pos2 {
- crit_pos = crit_pos1;
- period = period1;
- } else {
- crit_pos = crit_pos2;
- period = period2;
- }
-
- // This isn't in the original algorithm, as far as I'm aware.
- let byteset = needle.iter()
- .fold(0, |a, &b| (1 << ((b & 0x3f) as uint)) | a);
-
- // A particularly readable explanation of what's going on here can be found
- // in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
- // see the code for "Algorithm CP" on p. 323.
- //
- // What's going on is we have some critical factorization (u, v) of the
- // needle, and we want to determine whether u is a suffix of
- // v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
- // "Algorithm CP2", which is optimized for when the period of the needle
- // is large.
- if needle[..crit_pos] == needle[period.. period + crit_pos] {
- TwoWaySearcher {
- crit_pos: crit_pos,
- period: period,
- byteset: byteset,
-
- position: 0,
- memory: 0
- }
- } else {
- TwoWaySearcher {
- crit_pos: crit_pos,
- period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
- byteset: byteset,
-
- position: 0,
- memory: uint::MAX // Dummy value to signify that the period is long
- }
- }
- }
-
- // One of the main ideas of Two-Way is that we factorize the needle into
- // two halves, (u, v), and begin trying to find v in the haystack by scanning
- // left to right. If v matches, we try to match u by scanning right to left.
- // How far we can jump when we encounter a mismatch is all based on the fact
- // that (u, v) is a critical factorization for the needle.
- #[inline]
- fn next(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> Option<(uint, uint)> {
- 'search: loop {
- // Check that we have room to search in
- if self.position + needle.len() > haystack.len() {
- return None;
- }
-
- // Quickly skip by large portions unrelated to our substring
- if (self.byteset >>
- ((haystack[self.position + needle.len() - 1] & 0x3f)
- as uint)) & 1 == 0 {
- self.position += needle.len();
- if !long_period {
- self.memory = 0;
- }
- continue 'search;
- }
-
- // See if the right part of the needle matches
- let start = if long_period { self.crit_pos }
- else { cmp::max(self.crit_pos, self.memory) };
- for i in range(start, needle.len()) {
- if needle[i] != haystack[self.position + i] {
- self.position += i - self.crit_pos + 1;
- if !long_period {
- self.memory = 0;
- }
- continue 'search;
- }
- }
-
- // See if the left part of the needle matches
- let start = if long_period { 0 } else { self.memory };
- for i in range(start, self.crit_pos).rev() {
- if needle[i] != haystack[self.position + i] {
- self.position += self.period;
- if !long_period {
- self.memory = needle.len() - self.period;
- }
- continue 'search;
- }
- }
-
- // We have found a match!
- let match_pos = self.position;
- self.position += needle.len(); // add self.period for all matches
- if !long_period {
- self.memory = 0; // set to needle.len() - self.period for all matches
- }
- return Some((match_pos, match_pos + needle.len()));
- }
- }
-
- // Computes a critical factorization (u, v) of `arr`.
- // Specifically, returns (i, p), where i is the starting index of v in some
- // critical factorization (u, v) and p = period(v)
- #[inline]
- fn maximal_suffix(arr: &[u8], reversed: bool) -> (uint, uint) {
- let mut left = -1; // Corresponds to i in the paper
- let mut right = 0; // Corresponds to j in the paper
- let mut offset = 1; // Corresponds to k in the paper
- let mut period = 1; // Corresponds to p in the paper
-
- while right + offset < arr.len() {
- let a;
- let b;
- if reversed {
- a = arr[left + offset];
- b = arr[right + offset];
- } else {
- a = arr[right + offset];
- b = arr[left + offset];
- }
- if a < b {
- // Suffix is smaller, period is entire prefix so far.
- right += offset;
- offset = 1;
- period = right - left;
- } else if a == b {
- // Advance through repetition of the current period.
- if offset == period {
- right += offset;
- offset = 1;
- } else {
- offset += 1;
- }
- } else {
- // Suffix is larger, start over from current location.
- left = right;
- right += 1;
- offset = 1;
- period = 1;
- }
- }
- (left + 1, period)
- }
-}
-
-/// The internal state of an iterator that searches for matches of a substring
-/// within a larger string using a dynamically chosen search algorithm
-#[deriving(Clone)]
-enum Searcher {
- Naive(NaiveSearcher),
- TwoWay(TwoWaySearcher),
- TwoWayLong(TwoWaySearcher)
-}
-
-impl Searcher {
- fn new(haystack: &[u8], needle: &[u8]) -> Searcher {
- // FIXME: Tune this.
- // FIXME(#16715): This unsigned integer addition will probably not
- // overflow because that would mean that the memory almost solely
- // consists of the needle. Needs #16715 to be formally fixed.
- if needle.len() + 20 > haystack.len() {
- Naive(NaiveSearcher::new())
- } else {
- let searcher = TwoWaySearcher::new(needle);
- if searcher.memory == uint::MAX { // If the period is long
- TwoWayLong(searcher)
- } else {
- TwoWay(searcher)
- }
- }
- }
-}
-
-/// An iterator over the start and end indices of the matches of a
-/// substring within a larger string
-#[deriving(Clone)]
-pub struct MatchIndices<'a> {
- // constants
- haystack: &'a str,
- needle: &'a str,
- searcher: Searcher
-}
-
-/// An iterator over the substrings of a string separated by a given
-/// search string
-#[deriving(Clone)]
-pub struct StrSplits<'a> {
- it: MatchIndices<'a>,
- last_end: uint,
- finished: bool
-}
-
-impl<'a> Iterator<(uint, uint)> for MatchIndices<'a> {
- #[inline]
- fn next(&mut self) -> Option<(uint, uint)> {
- match self.searcher {
- Naive(ref mut searcher)
- => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes()),
- TwoWay(ref mut searcher)
- => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), false),
- TwoWayLong(ref mut searcher)
- => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), true)
- }
- }
-}
-
-impl<'a> Iterator<&'a str> for StrSplits<'a> {
- #[inline]
- fn next(&mut self) -> Option<&'a str> {
- if self.finished { return None; }
-
- match self.it.next() {
- Some((from, to)) => {
- let ret = Some(self.it.haystack.slice(self.last_end, from));
- self.last_end = to;
- ret
- }
- None => {
- self.finished = true;
- Some(self.it.haystack.slice(self.last_end, self.it.haystack.len()))
- }
- }
- }
-}
-
-/*
-Section: Comparing strings
-*/
-
-// share the implementation of the lang-item vs. non-lang-item
-// eq_slice.
-/// NOTE: This function is (ab)used in rustc::middle::trans::_match
-/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
-#[inline]
-fn eq_slice_(a: &str, b: &str) -> bool {
- #[allow(improper_ctypes)]
- extern { fn memcmp(s1: *const i8, s2: *const i8, n: uint) -> i32; }
- a.len() == b.len() && unsafe {
- memcmp(a.as_ptr() as *const i8,
- b.as_ptr() as *const i8,
- a.len()) == 0
- }
-}
-
-/// Bytewise slice equality
-/// NOTE: This function is (ab)used in rustc::middle::trans::_match
-/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
-#[lang="str_eq"]
-#[inline]
-fn eq_slice(a: &str, b: &str) -> bool {
- eq_slice_(a, b)
-}
-
-/*
-Section: Misc
-*/
-
-/// Walk through `iter` checking that it's a valid UTF-8 sequence,
-/// returning `true` in that case, or, if it is invalid, `false` with
-/// `iter` reset such that it is pointing at the first byte in the
-/// invalid sequence.
-#[inline(always)]
-fn run_utf8_validation_iterator(iter: &mut slice::Iter<u8>)
- -> Result<(), Utf8Error> {
- let whole = iter.as_slice();
- loop {
- // save the current thing we're pointing at.
- let old = *iter;
-
- // restore the iterator we had at the start of this codepoint.
- macro_rules! err (() => { {
- *iter = old;
- return Err(Utf8Error::InvalidByte(whole.len() - iter.as_slice().len()))
- } });
- macro_rules! next ( () => {
- match iter.next() {
- Some(a) => *a,
- // we needed data, but there was none: error!
- None => return Err(Utf8Error::TooShort),
- }
- });
-
- let first = match iter.next() {
- Some(&b) => b,
- // we're at the end of the iterator and a codepoint
- // boundary at the same time, so this string is valid.
- None => return Ok(())
- };
-
- // ASCII characters are always valid, so only large
- // bytes need more examination.
- if first >= 128 {
- let w = UTF8_CHAR_WIDTH[first as uint] as uint;
- let second = next!();
- // 2-byte encoding is for codepoints \u{0080} to \u{07ff}
- // first C2 80 last DF BF
- // 3-byte encoding is for codepoints \u{0800} to \u{ffff}
- // first E0 A0 80 last EF BF BF
- // excluding surrogates codepoints \u{d800} to \u{dfff}
- // ED A0 80 to ED BF BF
- // 4-byte encoding is for codepoints \u{1000}0 to \u{10ff}ff
- // first F0 90 80 80 last F4 8F BF BF
- //
- // Use the UTF-8 syntax from the RFC
- //
- // https://tools.ietf.org/html/rfc3629
- // UTF8-1 = %x00-7F
- // UTF8-2 = %xC2-DF UTF8-tail
- // UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) /
- // %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail )
- // UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
- // %xF4 %x80-8F 2( UTF8-tail )
- match w {
- 2 => if second & !CONT_MASK != TAG_CONT_U8 {err!()},
- 3 => {
- match (first, second, next!() & !CONT_MASK) {
- (0xE0 , 0xA0 ... 0xBF, TAG_CONT_U8) |
- (0xE1 ... 0xEC, 0x80 ... 0xBF, TAG_CONT_U8) |
- (0xED , 0x80 ... 0x9F, TAG_CONT_U8) |
- (0xEE ... 0xEF, 0x80 ... 0xBF, TAG_CONT_U8) => {}
- _ => err!()
- }
- }
- 4 => {
- match (first, second, next!() & !CONT_MASK, next!() & !CONT_MASK) {
- (0xF0 , 0x90 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
- (0xF1 ... 0xF3, 0x80 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
- (0xF4 , 0x80 ... 0x8F, TAG_CONT_U8, TAG_CONT_U8) => {}
- _ => err!()
- }
- }
- _ => err!()
- }
- }
- }
-}
-
-/// Determines if a vector of bytes contains valid UTF-8.
-#[deprecated = "call from_utf8 instead"]
-pub fn is_utf8(v: &[u8]) -> bool {
- run_utf8_validation_iterator(&mut v.iter()).is_ok()
-}
-
-/// Deprecated function
-#[deprecated = "this function will be removed"]
-pub fn truncate_utf16_at_nul<'a>(v: &'a [u16]) -> &'a [u16] {
- match v.iter().position(|c| *c == 0) {
- // don't include the 0
- Some(i) => v[..i],
- None => v
- }
-}
-
-// https://tools.ietf.org/html/rfc3629
-static UTF8_CHAR_WIDTH: [u8, ..256] = [
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x1F
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x3F
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x5F
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x7F
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0x9F
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0xBF
-0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
-2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // 0xDF
-3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, // 0xEF
-4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF
-];
-
-/// Given a first byte, determine how many bytes are in this UTF-8 character
-#[inline]
-#[deprecated = "this function has moved to libunicode"]
-pub fn utf8_char_width(b: u8) -> uint {
- return UTF8_CHAR_WIDTH[b as uint] as uint;
-}
-
-/// Struct that contains a `char` and the index of the first byte of
-/// the next `char` in a string. This can be used as a data structure
-/// for iterating over the UTF-8 bytes of a string.
-#[deriving(Copy)]
-#[unstable = "naming is uncertain with container conventions"]
-pub struct CharRange {
- /// Current `char`
- pub ch: char,
- /// Index of the first byte of the next `char`
- pub next: uint,
-}
-
-/// Mask of the value bits of a continuation byte
-const CONT_MASK: u8 = 0b0011_1111u8;
-/// Value of the tag bits (tag mask is !CONT_MASK) of a continuation byte
-const TAG_CONT_U8: u8 = 0b1000_0000u8;
-
-/// Unsafe operations
-#[deprecated]
-pub mod raw {
- use ptr::RawPtr;
- use raw::Slice;
- use slice::SliceExt;
- use str::StrExt;
-
- /// Converts a slice of bytes to a string slice without checking
- /// that the string contains valid UTF-8.
- #[deprecated = "renamed to str::from_utf8_unchecked"]
- pub unsafe fn from_utf8<'a>(v: &'a [u8]) -> &'a str {
- super::from_utf8_unchecked(v)
- }
-
- /// Form a slice from a C string. Unsafe because the caller must ensure the
- /// C string has the static lifetime, or else the return value may be
- /// invalidated later.
- #[deprecated = "renamed to str::from_c_str"]
- pub unsafe fn c_str_to_static_slice(s: *const i8) -> &'static str {
- let s = s as *const u8;
- let mut curr = s;
- let mut len = 0u;
- while *curr != 0u8 {
- len += 1u;
- curr = s.offset(len as int);
- }
- let v = Slice { data: s, len: len };
- super::from_utf8(::mem::transmute(v)).unwrap()
- }
-
- /// Takes a bytewise (not UTF-8) slice from a string.
- ///
- /// Returns the substring from [`begin`..`end`).
- ///
- /// # Panics
- ///
- /// If begin is greater than end.
- /// If end is greater than the length of the string.
- #[inline]
- #[deprecated = "call the slice_unchecked method instead"]
- pub unsafe fn slice_bytes<'a>(s: &'a str, begin: uint, end: uint) -> &'a str {
- assert!(begin <= end);
- assert!(end <= s.len());
- s.slice_unchecked(begin, end)
- }
-
- /// Takes a bytewise (not UTF-8) slice from a string.
- ///
- /// Returns the substring from [`begin`..`end`).
- ///
- /// Caller must check slice boundaries!
- #[inline]
- #[deprecated = "this has moved to a method on `str` directly"]
- pub unsafe fn slice_unchecked<'a>(s: &'a str, begin: uint, end: uint) -> &'a str {
- s.slice_unchecked(begin, end)
- }
-}
-
-/*
-Section: Trait implementations
-*/
-
-#[allow(missing_docs)]
-pub mod traits {
- use cmp::{Ordering, Ord, PartialEq, PartialOrd, Equiv, Eq};
- use cmp::Ordering::{Less, Equal, Greater};
- use iter::IteratorExt;
- use option::Option;
- use option::Option::Some;
- use ops;
- use str::{Str, StrExt, eq_slice};
-
- impl Ord for str {
- #[inline]
- fn cmp(&self, other: &str) -> Ordering {
- for (s_b, o_b) in self.bytes().zip(other.bytes()) {
- match s_b.cmp(&o_b) {
- Greater => return Greater,
- Less => return Less,
- Equal => ()
- }
- }
-
- self.len().cmp(&other.len())
- }
- }
-
- impl PartialEq for str {
- #[inline]
- fn eq(&self, other: &str) -> bool {
- eq_slice(self, other)
- }
- #[inline]
- fn ne(&self, other: &str) -> bool { !(*self).eq(other) }
- }
-
- impl Eq for str {}
-
- impl PartialOrd for str {
- #[inline]
- fn partial_cmp(&self, other: &str) -> Option<Ordering> {
- Some(self.cmp(other))
- }
- }
-
- #[allow(deprecated)]
- #[deprecated = "Use overloaded `core::cmp::PartialEq`"]
- impl<S: Str> Equiv<S> for str {
- #[inline]
- fn equiv(&self, other: &S) -> bool { eq_slice(self, other.as_slice()) }
- }
-
- impl ops::Slice<uint, str> for str {
- #[inline]
- fn as_slice_<'a>(&'a self) -> &'a str {
- self
- }
-
- #[inline]
- fn slice_from_or_fail<'a>(&'a self, from: &uint) -> &'a str {
- self.slice_from(*from)
- }
-
- #[inline]
- fn slice_to_or_fail<'a>(&'a self, to: &uint) -> &'a str {
- self.slice_to(*to)
- }
-
- #[inline]
- fn slice_or_fail<'a>(&'a self, from: &uint, to: &uint) -> &'a str {
- self.slice(*from, *to)
- }
- }
-}
-
-/// Any string that can be represented as a slice
-#[unstable = "Instead of taking this bound generically, this trait will be \
- replaced with one of slicing syntax, deref coercions, or \
- a more generic conversion trait"]
-pub trait Str for Sized? {
- /// Work with `self` as a slice.
- fn as_slice<'a>(&'a self) -> &'a str;
-}
-
-#[allow(deprecated)]
-impl Str for str {
- #[inline]
- fn as_slice<'a>(&'a self) -> &'a str { self }
-}
-
-#[allow(deprecated)]
-impl<'a, Sized? S> Str for &'a S where S: Str {
- #[inline]
- fn as_slice(&self) -> &str { Str::as_slice(*self) }
-}
-
-/// Methods for string slices
-#[allow(missing_docs)]
-pub trait StrExt for Sized? {
- // NB there are no docs here are they're all located on the StrExt trait in
- // libcollections, not here.
-
- fn contains(&self, needle: &str) -> bool;
- fn contains_char(&self, needle: char) -> bool;
- fn chars<'a>(&'a self) -> Chars<'a>;
- fn bytes<'a>(&'a self) -> Bytes<'a>;
- fn char_indices<'a>(&'a self) -> CharIndices<'a>;
- fn split<'a, Sep: CharEq>(&'a self, sep: Sep) -> CharSplits<'a, Sep>;
- fn splitn<'a, Sep: CharEq>(&'a self, count: uint, sep: Sep) -> CharSplitsN<'a, Sep>;
- fn split_terminator<'a, Sep: CharEq>(&'a self, sep: Sep) -> CharSplits<'a, Sep>;
- fn rsplitn<'a, Sep: CharEq>(&'a self, count: uint, sep: Sep) -> CharSplitsN<'a, Sep>;
- fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a>;
- fn split_str<'a>(&'a self, &'a str) -> StrSplits<'a>;
- fn lines<'a>(&'a self) -> Lines<'a>;
- fn lines_any<'a>(&'a self) -> LinesAny<'a>;
- fn char_len(&self) -> uint;
- fn slice<'a>(&'a self, begin: uint, end: uint) -> &'a str;
- fn slice_from<'a>(&'a self, begin: uint) -> &'a str;
- fn slice_to<'a>(&'a self, end: uint) -> &'a str;
- fn slice_chars<'a>(&'a self, begin: uint, end: uint) -> &'a str;
- unsafe fn slice_unchecked<'a>(&'a self, begin: uint, end: uint) -> &'a str;
- fn starts_with(&self, needle: &str) -> bool;
- fn ends_with(&self, needle: &str) -> bool;
- fn trim_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str;
- fn trim_left_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str;
- fn trim_right_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str;
- fn is_char_boundary(&self, index: uint) -> bool;
- fn char_range_at(&self, start: uint) -> CharRange;
- fn char_range_at_reverse(&self, start: uint) -> CharRange;
- fn char_at(&self, i: uint) -> char;
- fn char_at_reverse(&self, i: uint) -> char;
- fn as_bytes<'a>(&'a self) -> &'a [u8];
- fn find<C: CharEq>(&self, search: C) -> Option<uint>;
- fn rfind<C: CharEq>(&self, search: C) -> Option<uint>;
- fn find_str(&self, &str) -> Option<uint>;
- fn slice_shift_char<'a>(&'a self) -> Option<(char, &'a str)>;
- fn subslice_offset(&self, inner: &str) -> uint;
- fn as_ptr(&self) -> *const u8;
- fn len(&self) -> uint;
- fn is_empty(&self) -> bool;
-}
-
-#[inline(never)]
-fn slice_error_fail(s: &str, begin: uint, end: uint) -> ! {
- assert!(begin <= end);
- panic!("index {} and/or {} in `{}` do not lie on character boundary",
- begin, end, s);
-}
-
-impl StrExt for str {
- #[inline]
- fn contains(&self, needle: &str) -> bool {
- self.find_str(needle).is_some()
- }
-
- #[inline]
- fn contains_char(&self, needle: char) -> bool {
- self.find(needle).is_some()
- }
-
- #[inline]
- fn chars(&self) -> Chars {
- Chars{iter: self.as_bytes().iter()}
- }
-
- #[inline]
- fn bytes(&self) -> Bytes {
- fn deref(&x: &u8) -> u8 { x }
-
- Bytes { inner: self.as_bytes().iter().map(BytesFn(deref)) }
- }
-
- #[inline]
- fn char_indices(&self) -> CharIndices {
- CharIndices { front_offset: 0, iter: self.chars() }
- }
-
- #[inline]
- fn split<Sep: CharEq>(&self, sep: Sep) -> CharSplits<Sep> {
- CharSplits {
- string: self,
- only_ascii: sep.only_ascii(),
- sep: sep,
- allow_trailing_empty: true,
- finished: false,
- }
- }
-
- #[inline]
- fn splitn<Sep: CharEq>(&self, count: uint, sep: Sep)
- -> CharSplitsN<Sep> {
- CharSplitsN {
- iter: self.split(sep),
- count: count,
- invert: false,
- }
- }
-
- #[inline]
- fn split_terminator<Sep: CharEq>(&self, sep: Sep)
- -> CharSplits<Sep> {
- CharSplits {
- allow_trailing_empty: false,
- ..self.split(sep)
- }
- }
-
- #[inline]
- fn rsplitn<Sep: CharEq>(&self, count: uint, sep: Sep)
- -> CharSplitsN<Sep> {
- CharSplitsN {
- iter: self.split(sep),
- count: count,
- invert: true,
- }
- }
-
- #[inline]
- fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a> {
- assert!(!sep.is_empty());
- MatchIndices {
- haystack: self,
- needle: sep,
- searcher: Searcher::new(self.as_bytes(), sep.as_bytes())
- }
- }
-
- #[inline]
- fn split_str<'a>(&'a self, sep: &'a str) -> StrSplits<'a> {
- StrSplits {
- it: self.match_indices(sep),
- last_end: 0,
- finished: false
- }
- }
-
- #[inline]
- fn lines(&self) -> Lines {
- Lines { inner: self.split_terminator('\n') }
- }
-
- fn lines_any(&self) -> LinesAny {
- fn f(line: &str) -> &str {
- let l = line.len();
- if l > 0 && line.as_bytes()[l - 1] == b'\r' { line.slice(0, l - 1) }
- else { line }
- }
-
- let f: fn(&str) -> &str = f; // coerce to fn pointer
- LinesAny { inner: self.lines().map(f) }
- }
-
- #[inline]
- fn char_len(&self) -> uint { self.chars().count() }
-
- #[inline]
- fn slice(&self, begin: uint, end: uint) -> &str {
- // is_char_boundary checks that the index is in [0, .len()]
- if begin <= end &&
- self.is_char_boundary(begin) &&
- self.is_char_boundary(end) {
- unsafe { self.slice_unchecked(begin, end) }
- } else {
- slice_error_fail(self, begin, end)
- }
- }
-
- #[inline]
- fn slice_from(&self, begin: uint) -> &str {
- // is_char_boundary checks that the index is in [0, .len()]
- if self.is_char_boundary(begin) {
- unsafe { self.slice_unchecked(begin, self.len()) }
- } else {
- slice_error_fail(self, begin, self.len())
- }
- }
-
- #[inline]
- fn slice_to(&self, end: uint) -> &str {
- // is_char_boundary checks that the index is in [0, .len()]
- if self.is_char_boundary(end) {
- unsafe { self.slice_unchecked(0, end) }
- } else {
- slice_error_fail(self, 0, end)
- }
- }
-
- fn slice_chars(&self, begin: uint, end: uint) -> &str {
- assert!(begin <= end);
- let mut count = 0;
- let mut begin_byte = None;
- let mut end_byte = None;
-
- // This could be even more efficient by not decoding,
- // only finding the char boundaries
- for (idx, _) in self.char_indices() {
- if count == begin { begin_byte = Some(idx); }
- if count == end { end_byte = Some(idx); break; }
- count += 1;
- }
- if begin_byte.is_none() && count == begin { begin_byte = Some(self.len()) }
- if end_byte.is_none() && count == end { end_byte = Some(self.len()) }
-
- match (begin_byte, end_byte) {
- (None, _) => panic!("slice_chars: `begin` is beyond end of string"),
- (_, None) => panic!("slice_chars: `end` is beyond end of string"),
- (Some(a), Some(b)) => unsafe { self.slice_unchecked(a, b) }
- }
- }
-
- #[inline]
- unsafe fn slice_unchecked(&self, begin: uint, end: uint) -> &str {
- mem::transmute(Slice {
- data: self.as_ptr().offset(begin as int),
- len: end - begin,
- })
- }
-
- #[inline]
- fn starts_with(&self, needle: &str) -> bool {
- let n = needle.len();
- self.len() >= n && needle.as_bytes() == self.as_bytes()[..n]
- }
-
- #[inline]
- fn ends_with(&self, needle: &str) -> bool {
- let (m, n) = (self.len(), needle.len());
- m >= n && needle.as_bytes() == self.as_bytes()[m-n..]
- }
-
- #[inline]
- fn trim_chars<C: CharEq>(&self, mut to_trim: C) -> &str {
- let cur = match self.find(|&mut: c: char| !to_trim.matches(c)) {
- None => "",
- Some(i) => unsafe { self.slice_unchecked(i, self.len()) }
- };
- match cur.rfind(|&mut: c: char| !to_trim.matches(c)) {
- None => "",
- Some(i) => {
- let right = cur.char_range_at(i).next;
- unsafe { cur.slice_unchecked(0, right) }
- }
- }
- }
-
- #[inline]
- fn trim_left_chars<C: CharEq>(&self, mut to_trim: C) -> &str {
- match self.find(|&mut: c: char| !to_trim.matches(c)) {
- None => "",
- Some(first) => unsafe { self.slice_unchecked(first, self.len()) }
- }
- }
-
- #[inline]
- fn trim_right_chars<C: CharEq>(&self, mut to_trim: C) -> &str {
- match self.rfind(|&mut: c: char| !to_trim.matches(c)) {
- None => "",
- Some(last) => {
- let next = self.char_range_at(last).next;
- unsafe { self.slice_unchecked(0u, next) }
- }
- }
- }
-
- #[inline]
- fn is_char_boundary(&self, index: uint) -> bool {
- if index == self.len() { return true; }
- match self.as_bytes().get(index) {
- None => false,
- Some(&b) => b < 128u8 || b >= 192u8,
- }
- }
-
- #[inline]
- fn char_range_at(&self, i: uint) -> CharRange {
- if self.as_bytes()[i] < 128u8 {
- return CharRange {ch: self.as_bytes()[i] as char, next: i + 1 };
- }
-
- // Multibyte case is a fn to allow char_range_at to inline cleanly
- fn multibyte_char_range_at(s: &str, i: uint) -> CharRange {
- let mut val = s.as_bytes()[i] as u32;
- let w = UTF8_CHAR_WIDTH[val as uint] as uint;
- assert!((w != 0));
-
- val = utf8_first_byte!(val, w);
- val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 1]);
- if w > 2 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 2]); }
- if w > 3 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 3]); }
-
- return CharRange {ch: unsafe { mem::transmute(val) }, next: i + w};
- }
-
- return multibyte_char_range_at(self, i);
- }
-
- #[inline]
- fn char_range_at_reverse(&self, start: uint) -> CharRange {
- let mut prev = start;
-
- prev = prev.saturating_sub(1);
- if self.as_bytes()[prev] < 128 {
- return CharRange{ch: self.as_bytes()[prev] as char, next: prev}
- }
-
- // Multibyte case is a fn to allow char_range_at_reverse to inline cleanly
- fn multibyte_char_range_at_reverse(s: &str, mut i: uint) -> CharRange {
- // while there is a previous byte == 10......
- while i > 0 && s.as_bytes()[i] & !CONT_MASK == TAG_CONT_U8 {
- i -= 1u;
- }
-
- let mut val = s.as_bytes()[i] as u32;
- let w = UTF8_CHAR_WIDTH[val as uint] as uint;
- assert!((w != 0));
-
- val = utf8_first_byte!(val, w);
- val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 1]);
- if w > 2 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 2]); }
- if w > 3 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 3]); }
-
- return CharRange {ch: unsafe { mem::transmute(val) }, next: i};
- }
-
- return multibyte_char_range_at_reverse(self, prev);
- }
-
- #[inline]
- fn char_at(&self, i: uint) -> char {
- self.char_range_at(i).ch
- }
-
- #[inline]
- fn char_at_reverse(&self, i: uint) -> char {
- self.char_range_at_reverse(i).ch
- }
-
- #[inline]
- fn as_bytes(&self) -> &[u8] {
- unsafe { mem::transmute(self) }
- }
-
- fn find<C: CharEq>(&self, mut search: C) -> Option<uint> {
- if search.only_ascii() {
- self.bytes().position(|b| search.matches(b as char))
- } else {
- for (index, c) in self.char_indices() {
- if search.matches(c) { return Some(index); }
- }
- None
- }
- }
-
- fn rfind<C: CharEq>(&self, mut search: C) -> Option<uint> {
- if search.only_ascii() {
- self.bytes().rposition(|b| search.matches(b as char))
- } else {
- for (index, c) in self.char_indices().rev() {
- if search.matches(c) { return Some(index); }
- }
- None
- }
- }
-
- fn find_str(&self, needle: &str) -> Option<uint> {
- if needle.is_empty() {
- Some(0)
- } else {
- self.match_indices(needle)
- .next()
- .map(|(start, _end)| start)
- }
- }
-
- #[inline]
- fn slice_shift_char(&self) -> Option<(char, &str)> {
- if self.is_empty() {
- None
- } else {
- let CharRange {ch, next} = self.char_range_at(0u);
- let next_s = unsafe { self.slice_unchecked(next, self.len()) };
- Some((ch, next_s))
- }
- }
-
- fn subslice_offset(&self, inner: &str) -> uint {
- let a_start = self.as_ptr() as uint;
- let a_end = a_start + self.len();
- let b_start = inner.as_ptr() as uint;
- let b_end = b_start + inner.len();
-
- assert!(a_start <= b_start);
- assert!(b_end <= a_end);
- b_start - a_start
- }
-
- #[inline]
- fn as_ptr(&self) -> *const u8 {
- self.repr().data
- }
-
- #[inline]
- fn len(&self) -> uint { self.repr().len }
-
- #[inline]
- fn is_empty(&self) -> bool { self.len() == 0 }
-}
-
-#[stable]
-impl<'a> Default for &'a str {
- #[stable]
- fn default() -> &'a str { "" }
-}
-
-impl<'a> Iterator<&'a str> for Lines<'a> {
- #[inline]
- fn next(&mut self) -> Option<&'a str> { self.inner.next() }
- #[inline]
- fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
-}
-impl<'a> DoubleEndedIterator<&'a str> for Lines<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<&'a str> { self.inner.next_back() }
-}
-impl<'a> Iterator<&'a str> for LinesAny<'a> {
- #[inline]
- fn next(&mut self) -> Option<&'a str> { self.inner.next() }
- #[inline]
- fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
-}
-impl<'a> DoubleEndedIterator<&'a str> for LinesAny<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<&'a str> { self.inner.next_back() }
-}
-impl<'a> Iterator<u8> for Bytes<'a> {
- #[inline]
- fn next(&mut self) -> Option<u8> { self.inner.next() }
- #[inline]
- fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
-}
-impl<'a> DoubleEndedIterator<u8> for Bytes<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<u8> { self.inner.next_back() }
-}
-impl<'a> ExactSizeIterator<u8> for Bytes<'a> {}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-lexer-test FIXME #15679
+
+//! String manipulation
+//!
+//! For more details, see std::str
+
+#![doc(primitive = "str")]
+
+use self::Searcher::{Naive, TwoWay, TwoWayLong};
+
+use cmp::{mod, Eq};
+use default::Default;
+use iter::range;
+use iter::{DoubleEndedIteratorExt, ExactSizeIterator};
+use iter::{Map, Iterator, IteratorExt, DoubleEndedIterator};
+use kinds::Sized;
+use mem;
+use num::Int;
+use ops::{Fn, FnMut};
+use option::Option::{mod, None, Some};
+use ptr::PtrExt;
+use raw::{Repr, Slice};
+use result::Result::{mod, Ok, Err};
+use slice::{mod, SliceExt};
+use uint;
+
+macro_rules! delegate_iter {
+ (exact $te:ty in $ti:ty) => {
+ delegate_iter!{$te in $ti}
+ impl<'a> ExactSizeIterator<$te> for $ti {
+ #[inline]
+ fn rposition<P>(&mut self, predicate: P) -> Option<uint> where P: FnMut($te) -> bool{
+ self.0.rposition(predicate)
+ }
+ #[inline]
+ fn len(&self) -> uint {
+ self.0.len()
+ }
+ }
+ };
+ ($te:ty in $ti:ty) => {
+ impl<'a> Iterator<$te> for $ti {
+ #[inline]
+ fn next(&mut self) -> Option<$te> {
+ self.0.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.0.size_hint()
+ }
+ }
+ impl<'a> DoubleEndedIterator<$te> for $ti {
+ #[inline]
+ fn next_back(&mut self) -> Option<$te> {
+ self.0.next_back()
+ }
+ }
+ };
+ (pattern $te:ty in $ti:ty) => {
+ impl<'a, P: CharEq> Iterator<$te> for $ti {
+ #[inline]
+ fn next(&mut self) -> Option<$te> {
+ self.0.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.0.size_hint()
+ }
+ }
+ impl<'a, P: CharEq> DoubleEndedIterator<$te> for $ti {
+ #[inline]
+ fn next_back(&mut self) -> Option<$te> {
+ self.0.next_back()
+ }
+ }
+ };
+ (pattern forward $te:ty in $ti:ty) => {
+ impl<'a, P: CharEq> Iterator<$te> for $ti {
+ #[inline]
+ fn next(&mut self) -> Option<$te> {
+ self.0.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.0.size_hint()
+ }
+ }
+ }
+}
+
+/// A trait to abstract the idea of creating a new instance of a type from a
+/// string.
+// FIXME(#17307): there should be an `E` associated type for a `Result` return
+#[unstable = "will return a Result once associated types are working"]
+pub trait FromStr {
+ /// Parses a string `s` to return an optional value of this type. If the
+ /// string is ill-formatted, the None is returned.
+ fn from_str(s: &str) -> Option<Self>;
+}
+
+/// A utility function that just calls FromStr::from_str
+#[deprecated = "call the .parse() method on the string instead"]
+pub fn from_str<A: FromStr>(s: &str) -> Option<A> {
+ FromStr::from_str(s)
+}
+
+impl FromStr for bool {
+ /// Parse a `bool` from a string.
+ ///
+ /// Yields an `Option<bool>`, because `s` may or may not actually be parseable.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// assert_eq!("true".parse(), Some(true));
+ /// assert_eq!("false".parse(), Some(false));
+ /// assert_eq!("not even a boolean".parse::<bool>(), None);
+ /// ```
+ #[inline]
+ fn from_str(s: &str) -> Option<bool> {
+ match s {
+ "true" => Some(true),
+ "false" => Some(false),
+ _ => None,
+ }
+ }
+}
+
+/*
+Section: Creating a string
+*/
+
+/// Errors which can occur when attempting to interpret a byte slice as a `str`.
+#[deriving(Copy, Eq, PartialEq, Clone)]
+pub enum Utf8Error {
+ /// An invalid byte was detected at the byte offset given.
+ ///
+ /// The offset is guaranteed to be in bounds of the slice in question, and
+ /// the byte at the specified offset was the first invalid byte in the
+ /// sequence detected.
+ InvalidByte(uint),
+
+ /// The byte slice was invalid because more bytes were needed but no more
+ /// bytes were available.
+ TooShort,
+}
+
+/// Converts a slice of bytes to a string slice without performing any
+/// allocations.
+///
+/// Once the slice has been validated as utf-8, it is transmuted in-place and
+/// returned as a '&str' instead of a '&[u8]'
+///
+/// # Failure
+///
+/// Returns `Err` if the slice is not utf-8 with a description as to why the
+/// provided slice is not utf-8.
+pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
+ try!(run_utf8_validation_iterator(&mut v.iter()));
+ Ok(unsafe { from_utf8_unchecked(v) })
+}
+
+/// Converts a slice of bytes to a string slice without checking
+/// that the string contains valid UTF-8.
+#[stable]
+pub unsafe fn from_utf8_unchecked<'a>(v: &'a [u8]) -> &'a str {
+ mem::transmute(v)
+}
+
+/// Constructs a static string slice from a given raw pointer.
+///
+/// This function will read memory starting at `s` until it finds a 0, and then
+/// transmute the memory up to that point as a string slice, returning the
+/// corresponding `&'static str` value.
+///
+/// This function is unsafe because the caller must ensure the C string itself
+/// has the static lifetime and that the memory `s` is valid up to and including
+/// the first null byte.
+///
+/// # Panics
+///
+/// This function will panic if the string pointed to by `s` is not valid UTF-8.
+#[unstable = "may change location based on the outcome of the c_str module"]
+pub unsafe fn from_c_str(s: *const i8) -> &'static str {
+ let s = s as *const u8;
+ let mut len = 0u;
+ while *s.offset(len as int) != 0 {
+ len += 1u;
+ }
+ let v: &'static [u8] = ::mem::transmute(Slice { data: s, len: len });
+ from_utf8(v).ok().expect("from_c_str passed invalid utf-8 data")
+}
+
+/// Something that can be used to compare against a character
+#[unstable = "definition may change as pattern-related methods are stabilized"]
+pub trait CharEq {
+ /// Determine if the splitter should split at the given character
+ fn matches(&mut self, char) -> bool;
+ /// Indicate if this is only concerned about ASCII characters,
+ /// which can allow for a faster implementation.
+ fn only_ascii(&self) -> bool;
+}
+
+impl CharEq for char {
+ #[inline]
+ fn matches(&mut self, c: char) -> bool { *self == c }
+
+ #[inline]
+ fn only_ascii(&self) -> bool { (*self as uint) < 128 }
+}
+
+impl<F> CharEq for F where F: FnMut(char) -> bool {
+ #[inline]
+ fn matches(&mut self, c: char) -> bool { (*self)(c) }
+
+ #[inline]
+ fn only_ascii(&self) -> bool { false }
+}
+
+impl<'a> CharEq for &'a [char] {
+ #[inline]
+ fn matches(&mut self, c: char) -> bool {
+ self.iter().any(|&mut m| m.matches(c))
+ }
+
+ #[inline]
+ fn only_ascii(&self) -> bool {
+ self.iter().all(|m| m.only_ascii())
+ }
+}
+
+/*
+Section: Iterators
+*/
+
+/// Iterator for the char (representing *Unicode Scalar Values*) of a string
+///
+/// Created with the method `.chars()`.
+#[deriving(Clone, Copy)]
+pub struct Chars<'a> {
+ iter: slice::Iter<'a, u8>
+}
+
+// Return the initial codepoint accumulator for the first byte.
+// The first byte is special, only want bottom 5 bits for width 2, 4 bits
+// for width 3, and 3 bits for width 4
+macro_rules! utf8_first_byte {
+ ($byte:expr, $width:expr) => (($byte & (0x7F >> $width)) as u32)
+}
+
+// return the value of $ch updated with continuation byte $byte
+macro_rules! utf8_acc_cont_byte {
+ ($ch:expr, $byte:expr) => (($ch << 6) | ($byte & CONT_MASK) as u32)
+}
+
+macro_rules! utf8_is_cont_byte {
+ ($byte:expr) => (($byte & !CONT_MASK) == TAG_CONT_U8)
+}
+
+#[inline]
+fn unwrap_or_0(opt: Option<&u8>) -> u8 {
+ match opt {
+ Some(&byte) => byte,
+ None => 0,
+ }
+}
+
+impl<'a> Iterator<char> for Chars<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<char> {
+ // Decode UTF-8, using the valid UTF-8 invariant
+ let x = match self.iter.next() {
+ None => return None,
+ Some(&next_byte) if next_byte < 128 => return Some(next_byte as char),
+ Some(&next_byte) => next_byte,
+ };
+
+ // Multibyte case follows
+ // Decode from a byte combination out of: [[[x y] z] w]
+ // NOTE: Performance is sensitive to the exact formulation here
+ let init = utf8_first_byte!(x, 2);
+ let y = unwrap_or_0(self.iter.next());
+ let mut ch = utf8_acc_cont_byte!(init, y);
+ if x >= 0xE0 {
+ // [[x y z] w] case
+ // 5th bit in 0xE0 .. 0xEF is always clear, so `init` is still valid
+ let z = unwrap_or_0(self.iter.next());
+ let y_z = utf8_acc_cont_byte!((y & CONT_MASK) as u32, z);
+ ch = init << 12 | y_z;
+ if x >= 0xF0 {
+ // [x y z w] case
+ // use only the lower 3 bits of `init`
+ let w = unwrap_or_0(self.iter.next());
+ ch = (init & 7) << 18 | utf8_acc_cont_byte!(y_z, w);
+ }
+ }
+
+ // str invariant says `ch` is a valid Unicode Scalar Value
+ unsafe {
+ Some(mem::transmute(ch))
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ let (len, _) = self.iter.size_hint();
+ (len.saturating_add(3) / 4, Some(len))
+ }
+}
+
+impl<'a> DoubleEndedIterator<char> for Chars<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<char> {
+ let w = match self.iter.next_back() {
+ None => return None,
+ Some(&back_byte) if back_byte < 128 => return Some(back_byte as char),
+ Some(&back_byte) => back_byte,
+ };
+
+ // Multibyte case follows
+ // Decode from a byte combination out of: [x [y [z w]]]
+ let mut ch;
+ let z = unwrap_or_0(self.iter.next_back());
+ ch = utf8_first_byte!(z, 2);
+ if utf8_is_cont_byte!(z) {
+ let y = unwrap_or_0(self.iter.next_back());
+ ch = utf8_first_byte!(y, 3);
+ if utf8_is_cont_byte!(y) {
+ let x = unwrap_or_0(self.iter.next_back());
+ ch = utf8_first_byte!(x, 4);
+ ch = utf8_acc_cont_byte!(ch, y);
+ }
+ ch = utf8_acc_cont_byte!(ch, z);
+ }
+ ch = utf8_acc_cont_byte!(ch, w);
+
+ // str invariant says `ch` is a valid Unicode Scalar Value
+ unsafe {
+ Some(mem::transmute(ch))
+ }
+ }
+}
+
+/// External iterator for a string's characters and their byte offsets.
+/// Use with the `std::iter` module.
+#[deriving(Clone)]
+pub struct CharIndices<'a> {
+ front_offset: uint,
+ iter: Chars<'a>,
+}
+
+impl<'a> Iterator<(uint, char)> for CharIndices<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<(uint, char)> {
+ let (pre_len, _) = self.iter.iter.size_hint();
+ match self.iter.next() {
+ None => None,
+ Some(ch) => {
+ let index = self.front_offset;
+ let (len, _) = self.iter.iter.size_hint();
+ self.front_offset += pre_len - len;
+ Some((index, ch))
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.iter.size_hint()
+ }
+}
+
+impl<'a> DoubleEndedIterator<(uint, char)> for CharIndices<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<(uint, char)> {
+ match self.iter.next_back() {
+ None => None,
+ Some(ch) => {
+ let (len, _) = self.iter.iter.size_hint();
+ let index = self.front_offset + len;
+ Some((index, ch))
+ }
+ }
+ }
+}
+
+/// External iterator for a string's bytes.
+/// Use with the `std::iter` module.
+///
+/// Created with `StrExt::bytes`
+#[stable]
+#[deriving(Clone)]
+pub struct Bytes<'a>(Map<&'a u8, u8, slice::Iter<'a, u8>, BytesDeref>);
+delegate_iter!{exact u8 in Bytes<'a>}
+
+/// A temporary fn new type that ensures that the `Bytes` iterator
+/// is cloneable.
+#[deriving(Copy, Clone)]
+struct BytesDeref;
+
+impl<'a> Fn(&'a u8) -> u8 for BytesDeref {
+ #[inline]
+ extern "rust-call" fn call(&self, (ptr,): (&'a u8,)) -> u8 {
+ *ptr
+ }
+}
+
+/// An iterator over the substrings of a string, separated by `sep`.
+#[deriving(Clone)]
+#[deprecated = "Type is now named `Split` or `SplitTerminator`"]
+pub struct CharSplits<'a, Sep> {
+ /// The slice remaining to be iterated
+ string: &'a str,
+ sep: Sep,
+ /// Whether an empty string at the end is allowed
+ allow_trailing_empty: bool,
+ only_ascii: bool,
+ finished: bool,
+}
+
+/// An iterator over the substrings of a string, separated by `sep`,
+/// splitting at most `count` times.
+#[deriving(Clone)]
+#[deprecated = "Type is now named `SplitN` or `RSplitN`"]
+pub struct CharSplitsN<'a, Sep> {
+ iter: CharSplits<'a, Sep>,
+ /// The number of splits remaining
+ count: uint,
+ invert: bool,
+}
+
+/// An iterator over the lines of a string, separated by `\n`.
+#[stable]
+pub struct Lines<'a> {
+ inner: CharSplits<'a, char>,
+}
+
+/// An iterator over the lines of a string, separated by either `\n` or (`\r\n`).
+#[stable]
+pub struct LinesAny<'a> {
+ inner: Map<&'a str, &'a str, Lines<'a>, fn(&str) -> &str>,
+}
+
+impl<'a, Sep> CharSplits<'a, Sep> {
+ #[inline]
+ fn get_end(&mut self) -> Option<&'a str> {
+ if !self.finished && (self.allow_trailing_empty || self.string.len() > 0) {
+ self.finished = true;
+ Some(self.string)
+ } else {
+ None
+ }
+ }
+}
+
+impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplits<'a, Sep> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ if self.finished { return None }
+
+ let mut next_split = None;
+ if self.only_ascii {
+ for (idx, byte) in self.string.bytes().enumerate() {
+ if self.sep.matches(byte as char) && byte < 128u8 {
+ next_split = Some((idx, idx + 1));
+ break;
+ }
+ }
+ } else {
+ for (idx, ch) in self.string.char_indices() {
+ if self.sep.matches(ch) {
+ next_split = Some((idx, self.string.char_range_at(idx).next));
+ break;
+ }
+ }
+ }
+ match next_split {
+ Some((a, b)) => unsafe {
+ let elt = self.string.slice_unchecked(0, a);
+ self.string = self.string.slice_unchecked(b, self.string.len());
+ Some(elt)
+ },
+ None => self.get_end(),
+ }
+ }
+}
+
+impl<'a, Sep: CharEq> DoubleEndedIterator<&'a str>
+for CharSplits<'a, Sep> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ if self.finished { return None }
+
+ if !self.allow_trailing_empty {
+ self.allow_trailing_empty = true;
+ match self.next_back() {
+ Some(elt) if !elt.is_empty() => return Some(elt),
+ _ => if self.finished { return None }
+ }
+ }
+ let len = self.string.len();
+ let mut next_split = None;
+
+ if self.only_ascii {
+ for (idx, byte) in self.string.bytes().enumerate().rev() {
+ if self.sep.matches(byte as char) && byte < 128u8 {
+ next_split = Some((idx, idx + 1));
+ break;
+ }
+ }
+ } else {
+ for (idx, ch) in self.string.char_indices().rev() {
+ if self.sep.matches(ch) {
+ next_split = Some((idx, self.string.char_range_at(idx).next));
+ break;
+ }
+ }
+ }
+ match next_split {
+ Some((a, b)) => unsafe {
+ let elt = self.string.slice_unchecked(b, len);
+ self.string = self.string.slice_unchecked(0, a);
+ Some(elt)
+ },
+ None => { self.finished = true; Some(self.string) }
+ }
+ }
+}
+
+impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplitsN<'a, Sep> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ if self.count != 0 {
+ self.count -= 1;
+ if self.invert { self.iter.next_back() } else { self.iter.next() }
+ } else {
+ self.iter.get_end()
+ }
+ }
+}
+
+/// The internal state of an iterator that searches for matches of a substring
+/// within a larger string using naive search
+#[deriving(Clone)]
+struct NaiveSearcher {
+ position: uint
+}
+
+impl NaiveSearcher {
+ fn new() -> NaiveSearcher {
+ NaiveSearcher { position: 0 }
+ }
+
+ fn next(&mut self, haystack: &[u8], needle: &[u8]) -> Option<(uint, uint)> {
+ while self.position + needle.len() <= haystack.len() {
+ if haystack[self.position .. self.position + needle.len()] == needle {
+ let match_pos = self.position;
+ self.position += needle.len(); // add 1 for all matches
+ return Some((match_pos, match_pos + needle.len()));
+ } else {
+ self.position += 1;
+ }
+ }
+ None
+ }
+}
+
+/// The internal state of an iterator that searches for matches of a substring
+/// within a larger string using two-way search
+#[deriving(Clone)]
+struct TwoWaySearcher {
+ // constants
+ crit_pos: uint,
+ period: uint,
+ byteset: u64,
+
+ // variables
+ position: uint,
+ memory: uint
+}
+
+/*
+ This is the Two-Way search algorithm, which was introduced in the paper:
+ Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
+
+ Here's some background information.
+
+ A *word* is a string of symbols. The *length* of a word should be a familiar
+ notion, and here we denote it for any word x by |x|.
+ (We also allow for the possibility of the *empty word*, a word of length zero).
+
+ If x is any non-empty word, then an integer p with 0 < p <= |x| is said to be a
+ *period* for x iff for all i with 0 <= i <= |x| - p - 1, we have x[i] == x[i+p].
+ For example, both 1 and 2 are periods for the string "aa". As another example,
+ the only period of the string "abcd" is 4.
+
+ We denote by period(x) the *smallest* period of x (provided that x is non-empty).
+ This is always well-defined since every non-empty word x has at least one period,
+ |x|. We sometimes call this *the period* of x.
+
+ If u, v and x are words such that x = uv, where uv is the concatenation of u and
+ v, then we say that (u, v) is a *factorization* of x.
+
+ Let (u, v) be a factorization for a word x. Then if w is a non-empty word such
+ that both of the following hold
+
+ - either w is a suffix of u or u is a suffix of w
+ - either w is a prefix of v or v is a prefix of w
+
+ then w is said to be a *repetition* for the factorization (u, v).
+
+ Just to unpack this, there are four possibilities here. Let w = "abc". Then we
+ might have:
+
+ - w is a suffix of u and w is a prefix of v. ex: ("lolabc", "abcde")
+ - w is a suffix of u and v is a prefix of w. ex: ("lolabc", "ab")
+ - u is a suffix of w and w is a prefix of v. ex: ("bc", "abchi")
+ - u is a suffix of w and v is a prefix of w. ex: ("bc", "a")
+
+ Note that the word vu is a repetition for any factorization (u,v) of x = uv,
+ so every factorization has at least one repetition.
+
+ If x is a string and (u, v) is a factorization for x, then a *local period* for
+ (u, v) is an integer r such that there is some word w such that |w| = r and w is
+ a repetition for (u, v).
+
+ We denote by local_period(u, v) the smallest local period of (u, v). We sometimes
+ call this *the local period* of (u, v). Provided that x = uv is non-empty, this
+ is well-defined (because each non-empty word has at least one factorization, as
+ noted above).
+
+ It can be proven that the following is an equivalent definition of a local period
+ for a factorization (u, v): any positive integer r such that x[i] == x[i+r] for
+ all i such that |u| - r <= i <= |u| - 1 and such that both x[i] and x[i+r] are
+ defined. (i.e. i > 0 and i + r < |x|).
+
+ Using the above reformulation, it is easy to prove that
+
+ 1 <= local_period(u, v) <= period(uv)
+
+ A factorization (u, v) of x such that local_period(u,v) = period(x) is called a
+ *critical factorization*.
+
+ The algorithm hinges on the following theorem, which is stated without proof:
+
+ **Critical Factorization Theorem** Any word x has at least one critical
+ factorization (u, v) such that |u| < period(x).
+
+ The purpose of maximal_suffix is to find such a critical factorization.
+
+*/
+impl TwoWaySearcher {
+ fn new(needle: &[u8]) -> TwoWaySearcher {
+ let (crit_pos1, period1) = TwoWaySearcher::maximal_suffix(needle, false);
+ let (crit_pos2, period2) = TwoWaySearcher::maximal_suffix(needle, true);
+
+ let crit_pos;
+ let period;
+ if crit_pos1 > crit_pos2 {
+ crit_pos = crit_pos1;
+ period = period1;
+ } else {
+ crit_pos = crit_pos2;
+ period = period2;
+ }
+
+ // This isn't in the original algorithm, as far as I'm aware.
+ let byteset = needle.iter()
+ .fold(0, |a, &b| (1 << ((b & 0x3f) as uint)) | a);
+
+ // A particularly readable explanation of what's going on here can be found
+ // in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
+ // see the code for "Algorithm CP" on p. 323.
+ //
+ // What's going on is we have some critical factorization (u, v) of the
+ // needle, and we want to determine whether u is a suffix of
+ // v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
+ // "Algorithm CP2", which is optimized for when the period of the needle
+ // is large.
+ if needle[..crit_pos] == needle[period.. period + crit_pos] {
+ TwoWaySearcher {
+ crit_pos: crit_pos,
+ period: period,
+ byteset: byteset,
+
+ position: 0,
+ memory: 0
+ }
+ } else {
+ TwoWaySearcher {
+ crit_pos: crit_pos,
+ period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
+ byteset: byteset,
+
+ position: 0,
+ memory: uint::MAX // Dummy value to signify that the period is long
+ }
+ }
+ }
+
+ // One of the main ideas of Two-Way is that we factorize the needle into
+ // two halves, (u, v), and begin trying to find v in the haystack by scanning
+ // left to right. If v matches, we try to match u by scanning right to left.
+ // How far we can jump when we encounter a mismatch is all based on the fact
+ // that (u, v) is a critical factorization for the needle.
+ #[inline]
+ fn next(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> Option<(uint, uint)> {
+ 'search: loop {
+ // Check that we have room to search in
+ if self.position + needle.len() > haystack.len() {
+ return None;
+ }
+
+ // Quickly skip by large portions unrelated to our substring
+ if (self.byteset >>
+ ((haystack[self.position + needle.len() - 1] & 0x3f)
+ as uint)) & 1 == 0 {
+ self.position += needle.len();
+ if !long_period {
+ self.memory = 0;
+ }
+ continue 'search;
+ }
+
+ // See if the right part of the needle matches
+ let start = if long_period { self.crit_pos }
+ else { cmp::max(self.crit_pos, self.memory) };
+ for i in range(start, needle.len()) {
+ if needle[i] != haystack[self.position + i] {
+ self.position += i - self.crit_pos + 1;
+ if !long_period {
+ self.memory = 0;
+ }
+ continue 'search;
+ }
+ }
+
+ // See if the left part of the needle matches
+ let start = if long_period { 0 } else { self.memory };
+ for i in range(start, self.crit_pos).rev() {
+ if needle[i] != haystack[self.position + i] {
+ self.position += self.period;
+ if !long_period {
+ self.memory = needle.len() - self.period;
+ }
+ continue 'search;
+ }
+ }
+
+ // We have found a match!
+ let match_pos = self.position;
+ self.position += needle.len(); // add self.period for all matches
+ if !long_period {
+ self.memory = 0; // set to needle.len() - self.period for all matches
+ }
+ return Some((match_pos, match_pos + needle.len()));
+ }
+ }
+
+ // Computes a critical factorization (u, v) of `arr`.
+ // Specifically, returns (i, p), where i is the starting index of v in some
+ // critical factorization (u, v) and p = period(v)
+ #[inline]
+ fn maximal_suffix(arr: &[u8], reversed: bool) -> (uint, uint) {
+ let mut left = -1; // Corresponds to i in the paper
+ let mut right = 0; // Corresponds to j in the paper
+ let mut offset = 1; // Corresponds to k in the paper
+ let mut period = 1; // Corresponds to p in the paper
+
+ while right + offset < arr.len() {
+ let a;
+ let b;
+ if reversed {
+ a = arr[left + offset];
+ b = arr[right + offset];
+ } else {
+ a = arr[right + offset];
+ b = arr[left + offset];
+ }
+ if a < b {
+ // Suffix is smaller, period is entire prefix so far.
+ right += offset;
+ offset = 1;
+ period = right - left;
+ } else if a == b {
+ // Advance through repetition of the current period.
+ if offset == period {
+ right += offset;
+ offset = 1;
+ } else {
+ offset += 1;
+ }
+ } else {
+ // Suffix is larger, start over from current location.
+ left = right;
+ right += 1;
+ offset = 1;
+ period = 1;
+ }
+ }
+ (left + 1, period)
+ }
+}
+
+/// The internal state of an iterator that searches for matches of a substring
+/// within a larger string using a dynamically chosen search algorithm
+#[deriving(Clone)]
+enum Searcher {
+ Naive(NaiveSearcher),
+ TwoWay(TwoWaySearcher),
+ TwoWayLong(TwoWaySearcher)
+}
+
+impl Searcher {
+ fn new(haystack: &[u8], needle: &[u8]) -> Searcher {
+ // FIXME: Tune this.
+ // FIXME(#16715): This unsigned integer addition will probably not
+ // overflow because that would mean that the memory almost solely
+ // consists of the needle. Needs #16715 to be formally fixed.
+ if needle.len() + 20 > haystack.len() {
+ Naive(NaiveSearcher::new())
+ } else {
+ let searcher = TwoWaySearcher::new(needle);
+ if searcher.memory == uint::MAX { // If the period is long
+ TwoWayLong(searcher)
+ } else {
+ TwoWay(searcher)
+ }
+ }
+ }
+}
+
+/// An iterator over the start and end indices of the matches of a
+/// substring within a larger string
+#[deriving(Clone)]
+pub struct MatchIndices<'a> {
+ // constants
+ haystack: &'a str,
+ needle: &'a str,
+ searcher: Searcher
+}
+
+/// An iterator over the substrings of a string separated by a given
+/// search string
+#[deriving(Clone)]
+#[unstable = "Type might get removed"]
+pub struct SplitStr<'a> {
+ it: MatchIndices<'a>,
+ last_end: uint,
+ finished: bool
+}
+
+/// Deprecated
+#[deprecated = "Type is now named `SplitStr`"]
+pub type StrSplits<'a> = SplitStr<'a>;
+
+impl<'a> Iterator<(uint, uint)> for MatchIndices<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<(uint, uint)> {
+ match self.searcher {
+ Naive(ref mut searcher)
+ => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes()),
+ TwoWay(ref mut searcher)
+ => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), false),
+ TwoWayLong(ref mut searcher)
+ => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), true)
+ }
+ }
+}
+
+impl<'a> Iterator<&'a str> for SplitStr<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ if self.finished { return None; }
+
+ match self.it.next() {
+ Some((from, to)) => {
+ let ret = Some(self.it.haystack.slice(self.last_end, from));
+ self.last_end = to;
+ ret
+ }
+ None => {
+ self.finished = true;
+ Some(self.it.haystack.slice(self.last_end, self.it.haystack.len()))
+ }
+ }
+ }
+}
+
+
+/*
+Section: Comparing strings
+*/
+
+// share the implementation of the lang-item vs. non-lang-item
+// eq_slice.
+/// NOTE: This function is (ab)used in rustc::middle::trans::_match
+/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
+#[inline]
+fn eq_slice_(a: &str, b: &str) -> bool {
+ #[allow(improper_ctypes)]
+ extern { fn memcmp(s1: *const i8, s2: *const i8, n: uint) -> i32; }
+ a.len() == b.len() && unsafe {
+ memcmp(a.as_ptr() as *const i8,
+ b.as_ptr() as *const i8,
+ a.len()) == 0
+ }
+}
+
+/// Bytewise slice equality
+/// NOTE: This function is (ab)used in rustc::middle::trans::_match
+/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
+#[lang="str_eq"]
+#[inline]
+fn eq_slice(a: &str, b: &str) -> bool {
+ eq_slice_(a, b)
+}
+
+/*
+Section: Misc
+*/
+
+/// Walk through `iter` checking that it's a valid UTF-8 sequence,
+/// returning `true` in that case, or, if it is invalid, `false` with
+/// `iter` reset such that it is pointing at the first byte in the
+/// invalid sequence.
+#[inline(always)]
+fn run_utf8_validation_iterator(iter: &mut slice::Iter<u8>)
+ -> Result<(), Utf8Error> {
+ let whole = iter.as_slice();
+ loop {
+ // save the current thing we're pointing at.
+ let old = *iter;
+
+ // restore the iterator we had at the start of this codepoint.
+ macro_rules! err (() => { {
+ *iter = old;
+ return Err(Utf8Error::InvalidByte(whole.len() - iter.as_slice().len()))
+ } });
+ macro_rules! next ( () => {
+ match iter.next() {
+ Some(a) => *a,
+ // we needed data, but there was none: error!
+ None => return Err(Utf8Error::TooShort),
+ }
+ });
+
+ let first = match iter.next() {
+ Some(&b) => b,
+ // we're at the end of the iterator and a codepoint
+ // boundary at the same time, so this string is valid.
+ None => return Ok(())
+ };
+
+ // ASCII characters are always valid, so only large
+ // bytes need more examination.
+ if first >= 128 {
+ let w = UTF8_CHAR_WIDTH[first as uint] as uint;
+ let second = next!();
+ // 2-byte encoding is for codepoints \u{0080} to \u{07ff}
+ // first C2 80 last DF BF
+ // 3-byte encoding is for codepoints \u{0800} to \u{ffff}
+ // first E0 A0 80 last EF BF BF
+ // excluding surrogates codepoints \u{d800} to \u{dfff}
+ // ED A0 80 to ED BF BF
+ // 4-byte encoding is for codepoints \u{1000}0 to \u{10ff}ff
+ // first F0 90 80 80 last F4 8F BF BF
+ //
+ // Use the UTF-8 syntax from the RFC
+ //
+ // https://tools.ietf.org/html/rfc3629
+ // UTF8-1 = %x00-7F
+ // UTF8-2 = %xC2-DF UTF8-tail
+ // UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) /
+ // %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail )
+ // UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
+ // %xF4 %x80-8F 2( UTF8-tail )
+ match w {
+ 2 => if second & !CONT_MASK != TAG_CONT_U8 {err!()},
+ 3 => {
+ match (first, second, next!() & !CONT_MASK) {
+ (0xE0 , 0xA0 ... 0xBF, TAG_CONT_U8) |
+ (0xE1 ... 0xEC, 0x80 ... 0xBF, TAG_CONT_U8) |
+ (0xED , 0x80 ... 0x9F, TAG_CONT_U8) |
+ (0xEE ... 0xEF, 0x80 ... 0xBF, TAG_CONT_U8) => {}
+ _ => err!()
+ }
+ }
+ 4 => {
+ match (first, second, next!() & !CONT_MASK, next!() & !CONT_MASK) {
+ (0xF0 , 0x90 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
+ (0xF1 ... 0xF3, 0x80 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
+ (0xF4 , 0x80 ... 0x8F, TAG_CONT_U8, TAG_CONT_U8) => {}
+ _ => err!()
+ }
+ }
+ _ => err!()
+ }
+ }
+ }
+}
+
+/// Determines if a vector of bytes contains valid UTF-8.
+#[deprecated = "call from_utf8 instead"]
+pub fn is_utf8(v: &[u8]) -> bool {
+ run_utf8_validation_iterator(&mut v.iter()).is_ok()
+}
+
+/// Deprecated function
+#[deprecated = "this function will be removed"]
+pub fn truncate_utf16_at_nul<'a>(v: &'a [u16]) -> &'a [u16] {
+ match v.iter().position(|c| *c == 0) {
+ // don't include the 0
+ Some(i) => v[..i],
+ None => v
+ }
+}
+
+// https://tools.ietf.org/html/rfc3629
+static UTF8_CHAR_WIDTH: [u8, ..256] = [
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x1F
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x3F
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x5F
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x7F
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0x9F
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0xBF
+0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // 0xDF
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, // 0xEF
+4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF
+];
+
+/// Given a first byte, determine how many bytes are in this UTF-8 character
+#[inline]
+#[deprecated = "this function has moved to libunicode"]
+pub fn utf8_char_width(b: u8) -> uint {
+ return UTF8_CHAR_WIDTH[b as uint] as uint;
+}
+
+/// Struct that contains a `char` and the index of the first byte of
+/// the next `char` in a string. This can be used as a data structure
+/// for iterating over the UTF-8 bytes of a string.
+#[deriving(Copy)]
+#[unstable = "naming is uncertain with container conventions"]
+pub struct CharRange {
+ /// Current `char`
+ pub ch: char,
+ /// Index of the first byte of the next `char`
+ pub next: uint,
+}
+
+/// Mask of the value bits of a continuation byte
+const CONT_MASK: u8 = 0b0011_1111u8;
+/// Value of the tag bits (tag mask is !CONT_MASK) of a continuation byte
+const TAG_CONT_U8: u8 = 0b1000_0000u8;
+
+/// Unsafe operations
+#[deprecated]
+pub mod raw {
+ use ptr::PtrExt;
+ use raw::Slice;
+ use slice::SliceExt;
+ use str::StrExt;
+
+ /// Converts a slice of bytes to a string slice without checking
+ /// that the string contains valid UTF-8.
+ #[deprecated = "renamed to str::from_utf8_unchecked"]
+ pub unsafe fn from_utf8<'a>(v: &'a [u8]) -> &'a str {
+ super::from_utf8_unchecked(v)
+ }
+
+ /// Form a slice from a C string. Unsafe because the caller must ensure the
+ /// C string has the static lifetime, or else the return value may be
+ /// invalidated later.
+ #[deprecated = "renamed to str::from_c_str"]
+ pub unsafe fn c_str_to_static_slice(s: *const i8) -> &'static str {
+ let s = s as *const u8;
+ let mut curr = s;
+ let mut len = 0u;
+ while *curr != 0u8 {
+ len += 1u;
+ curr = s.offset(len as int);
+ }
+ let v = Slice { data: s, len: len };
+ super::from_utf8(::mem::transmute(v)).unwrap()
+ }
+
+ /// Takes a bytewise (not UTF-8) slice from a string.
+ ///
+ /// Returns the substring from [`begin`..`end`).
+ ///
+ /// # Panics
+ ///
+ /// If begin is greater than end.
+ /// If end is greater than the length of the string.
+ #[inline]
+ #[deprecated = "call the slice_unchecked method instead"]
+ pub unsafe fn slice_bytes<'a>(s: &'a str, begin: uint, end: uint) -> &'a str {
+ assert!(begin <= end);
+ assert!(end <= s.len());
+ s.slice_unchecked(begin, end)
+ }
+
+ /// Takes a bytewise (not UTF-8) slice from a string.
+ ///
+ /// Returns the substring from [`begin`..`end`).
+ ///
+ /// Caller must check slice boundaries!
+ #[inline]
+ #[deprecated = "this has moved to a method on `str` directly"]
+ pub unsafe fn slice_unchecked<'a>(s: &'a str, begin: uint, end: uint) -> &'a str {
+ s.slice_unchecked(begin, end)
+ }
+}
+
+/*
+Section: Trait implementations
+*/
+
+#[allow(missing_docs)]
+pub mod traits {
+ use cmp::{Ordering, Ord, PartialEq, PartialOrd, Equiv, Eq};
+ use cmp::Ordering::{Less, Equal, Greater};
+ use iter::IteratorExt;
+ use option::Option;
+ use option::Option::Some;
+ use ops;
+ use str::{Str, StrExt, eq_slice};
+
+ impl Ord for str {
+ #[inline]
+ fn cmp(&self, other: &str) -> Ordering {
+ for (s_b, o_b) in self.bytes().zip(other.bytes()) {
+ match s_b.cmp(&o_b) {
+ Greater => return Greater,
+ Less => return Less,
+ Equal => ()
+ }
+ }
+
+ self.len().cmp(&other.len())
+ }
+ }
+
+ impl PartialEq for str {
+ #[inline]
+ fn eq(&self, other: &str) -> bool {
+ eq_slice(self, other)
+ }
+ #[inline]
+ fn ne(&self, other: &str) -> bool { !(*self).eq(other) }
+ }
+
+ impl Eq for str {}
+
+ impl PartialOrd for str {
+ #[inline]
+ fn partial_cmp(&self, other: &str) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+ }
+
+ #[allow(deprecated)]
+ #[deprecated = "Use overloaded `core::cmp::PartialEq`"]
+ impl<S: Str> Equiv<S> for str {
+ #[inline]
+ fn equiv(&self, other: &S) -> bool { eq_slice(self, other.as_slice()) }
+ }
+
+ impl ops::Slice<uint, str> for str {
+ #[inline]
+ fn as_slice_<'a>(&'a self) -> &'a str {
+ self
+ }
+
+ #[inline]
+ fn slice_from_or_fail<'a>(&'a self, from: &uint) -> &'a str {
+ self.slice_from(*from)
+ }
+
+ #[inline]
+ fn slice_to_or_fail<'a>(&'a self, to: &uint) -> &'a str {
+ self.slice_to(*to)
+ }
+
+ #[inline]
+ fn slice_or_fail<'a>(&'a self, from: &uint, to: &uint) -> &'a str {
+ self.slice(*from, *to)
+ }
+ }
+}
+
+/// Any string that can be represented as a slice
+#[unstable = "Instead of taking this bound generically, this trait will be \
+ replaced with one of slicing syntax, deref coercions, or \
+ a more generic conversion trait"]
+pub trait Str for Sized? {
+ /// Work with `self` as a slice.
+ fn as_slice<'a>(&'a self) -> &'a str;
+}
+
+#[allow(deprecated)]
+impl Str for str {
+ #[inline]
+ fn as_slice<'a>(&'a self) -> &'a str { self }
+}
+
+#[allow(deprecated)]
+impl<'a, Sized? S> Str for &'a S where S: Str {
+ #[inline]
+ fn as_slice(&self) -> &str { Str::as_slice(*self) }
+}
+
+/// Return type of `StrExt::split`
+#[deriving(Clone)]
+#[stable]
+pub struct Split<'a, P>(CharSplits<'a, P>);
+delegate_iter!{pattern &'a str in Split<'a, P>}
+
+/// Return type of `StrExt::split_terminator`
+#[deriving(Clone)]
+#[unstable = "might get removed in favour of a constructor method on Split"]
+pub struct SplitTerminator<'a, P>(CharSplits<'a, P>);
+delegate_iter!{pattern &'a str in SplitTerminator<'a, P>}
+
+/// Return type of `StrExt::splitn`
+#[deriving(Clone)]
+#[stable]
+pub struct SplitN<'a, P>(CharSplitsN<'a, P>);
+delegate_iter!{pattern forward &'a str in SplitN<'a, P>}
+
+/// Return type of `StrExt::rsplitn`
+#[deriving(Clone)]
+#[stable]
+pub struct RSplitN<'a, P>(CharSplitsN<'a, P>);
+delegate_iter!{pattern forward &'a str in RSplitN<'a, P>}
+
+/// Methods for string slices
+#[allow(missing_docs)]
+pub trait StrExt for Sized? {
+ // NB there are no docs here are they're all located on the StrExt trait in
+ // libcollections, not here.
+
+ fn contains(&self, pat: &str) -> bool;
+ fn contains_char<P: CharEq>(&self, pat: P) -> bool;
+ fn chars<'a>(&'a self) -> Chars<'a>;
+ fn bytes<'a>(&'a self) -> Bytes<'a>;
+ fn char_indices<'a>(&'a self) -> CharIndices<'a>;
+ fn split<'a, P: CharEq>(&'a self, pat: P) -> Split<'a, P>;
+ fn splitn<'a, P: CharEq>(&'a self, count: uint, pat: P) -> SplitN<'a, P>;
+ fn split_terminator<'a, P: CharEq>(&'a self, pat: P) -> SplitTerminator<'a, P>;
+ fn rsplitn<'a, P: CharEq>(&'a self, count: uint, pat: P) -> RSplitN<'a, P>;
+ fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a>;
+ fn split_str<'a>(&'a self, pat: &'a str) -> SplitStr<'a>;
+ fn lines<'a>(&'a self) -> Lines<'a>;
+ fn lines_any<'a>(&'a self) -> LinesAny<'a>;
+ fn char_len(&self) -> uint;
+ fn slice<'a>(&'a self, begin: uint, end: uint) -> &'a str;
+ fn slice_from<'a>(&'a self, begin: uint) -> &'a str;
+ fn slice_to<'a>(&'a self, end: uint) -> &'a str;
+ fn slice_chars<'a>(&'a self, begin: uint, end: uint) -> &'a str;
+ unsafe fn slice_unchecked<'a>(&'a self, begin: uint, end: uint) -> &'a str;
+ fn starts_with(&self, pat: &str) -> bool;
+ fn ends_with(&self, pat: &str) -> bool;
+ fn trim_matches<'a, P: CharEq>(&'a self, pat: P) -> &'a str;
+ fn trim_left_matches<'a, P: CharEq>(&'a self, pat: P) -> &'a str;
+ fn trim_right_matches<'a, P: CharEq>(&'a self, pat: P) -> &'a str;
+ fn is_char_boundary(&self, index: uint) -> bool;
+ fn char_range_at(&self, start: uint) -> CharRange;
+ fn char_range_at_reverse(&self, start: uint) -> CharRange;
+ fn char_at(&self, i: uint) -> char;
+ fn char_at_reverse(&self, i: uint) -> char;
+ fn as_bytes<'a>(&'a self) -> &'a [u8];
+ fn find<P: CharEq>(&self, pat: P) -> Option<uint>;
+ fn rfind<P: CharEq>(&self, pat: P) -> Option<uint>;
+ fn find_str(&self, pat: &str) -> Option<uint>;
+ fn slice_shift_char<'a>(&'a self) -> Option<(char, &'a str)>;
+ fn subslice_offset(&self, inner: &str) -> uint;
+ fn as_ptr(&self) -> *const u8;
+ fn len(&self) -> uint;
+ fn is_empty(&self) -> bool;
+}
+
+#[inline(never)]
+fn slice_error_fail(s: &str, begin: uint, end: uint) -> ! {
+ assert!(begin <= end);
+ panic!("index {} and/or {} in `{}` do not lie on character boundary",
+ begin, end, s);
+}
+
+impl StrExt for str {
+ #[inline]
+ fn contains(&self, needle: &str) -> bool {
+ self.find_str(needle).is_some()
+ }
+
+ #[inline]
+ fn contains_char<P: CharEq>(&self, pat: P) -> bool {
+ self.find(pat).is_some()
+ }
+
+ #[inline]
+ fn chars(&self) -> Chars {
+ Chars{iter: self.as_bytes().iter()}
+ }
+
+ #[inline]
+ fn bytes(&self) -> Bytes {
+ Bytes(self.as_bytes().iter().map(BytesDeref))
+ }
+
+ #[inline]
+ fn char_indices(&self) -> CharIndices {
+ CharIndices { front_offset: 0, iter: self.chars() }
+ }
+
+ #[inline]
+ #[allow(deprecated)] // For using CharSplits
+ fn split<P: CharEq>(&self, pat: P) -> Split<P> {
+ Split(CharSplits {
+ string: self,
+ only_ascii: pat.only_ascii(),
+ sep: pat,
+ allow_trailing_empty: true,
+ finished: false,
+ })
+ }
+
+ #[inline]
+ #[allow(deprecated)] // For using CharSplitsN
+ fn splitn<P: CharEq>(&self, count: uint, pat: P) -> SplitN<P> {
+ SplitN(CharSplitsN {
+ iter: self.split(pat).0,
+ count: count,
+ invert: false,
+ })
+ }
+
+ #[inline]
+ #[allow(deprecated)] // For using CharSplits
+ fn split_terminator<P: CharEq>(&self, pat: P) -> SplitTerminator<P> {
+ SplitTerminator(CharSplits {
+ allow_trailing_empty: false,
+ ..self.split(pat).0
+ })
+ }
+
+ #[inline]
+ #[allow(deprecated)] // For using CharSplitsN
+ fn rsplitn<P: CharEq>(&self, count: uint, pat: P) -> RSplitN<P> {
+ RSplitN(CharSplitsN {
+ iter: self.split(pat).0,
+ count: count,
+ invert: true,
+ })
+ }
+
+ #[inline]
+ fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a> {
+ assert!(!sep.is_empty());
+ MatchIndices {
+ haystack: self,
+ needle: sep,
+ searcher: Searcher::new(self.as_bytes(), sep.as_bytes())
+ }
+ }
+
+ #[inline]
+ fn split_str<'a>(&'a self, sep: &'a str) -> SplitStr<'a> {
+ SplitStr {
+ it: self.match_indices(sep),
+ last_end: 0,
+ finished: false
+ }
+ }
+
+ #[inline]
+ fn lines(&self) -> Lines {
+ Lines { inner: self.split_terminator('\n').0 }
+ }
+
+ fn lines_any(&self) -> LinesAny {
+ fn f(line: &str) -> &str {
+ let l = line.len();
+ if l > 0 && line.as_bytes()[l - 1] == b'\r' { line.slice(0, l - 1) }
+ else { line }
+ }
+
+ let f: fn(&str) -> &str = f; // coerce to fn pointer
+ LinesAny { inner: self.lines().map(f) }
+ }
+
+ #[inline]
+ fn char_len(&self) -> uint { self.chars().count() }
+
+ #[inline]
+ fn slice(&self, begin: uint, end: uint) -> &str {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if begin <= end &&
+ self.is_char_boundary(begin) &&
+ self.is_char_boundary(end) {
+ unsafe { self.slice_unchecked(begin, end) }
+ } else {
+ slice_error_fail(self, begin, end)
+ }
+ }
+
+ #[inline]
+ fn slice_from(&self, begin: uint) -> &str {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if self.is_char_boundary(begin) {
+ unsafe { self.slice_unchecked(begin, self.len()) }
+ } else {
+ slice_error_fail(self, begin, self.len())
+ }
+ }
+
+ #[inline]
+ fn slice_to(&self, end: uint) -> &str {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if self.is_char_boundary(end) {
+ unsafe { self.slice_unchecked(0, end) }
+ } else {
+ slice_error_fail(self, 0, end)
+ }
+ }
+
+ fn slice_chars(&self, begin: uint, end: uint) -> &str {
+ assert!(begin <= end);
+ let mut count = 0;
+ let mut begin_byte = None;
+ let mut end_byte = None;
+
+ // This could be even more efficient by not decoding,
+ // only finding the char boundaries
+ for (idx, _) in self.char_indices() {
+ if count == begin { begin_byte = Some(idx); }
+ if count == end { end_byte = Some(idx); break; }
+ count += 1;
+ }
+ if begin_byte.is_none() && count == begin { begin_byte = Some(self.len()) }
+ if end_byte.is_none() && count == end { end_byte = Some(self.len()) }
+
+ match (begin_byte, end_byte) {
+ (None, _) => panic!("slice_chars: `begin` is beyond end of string"),
+ (_, None) => panic!("slice_chars: `end` is beyond end of string"),
+ (Some(a), Some(b)) => unsafe { self.slice_unchecked(a, b) }
+ }
+ }
+
+ #[inline]
+ unsafe fn slice_unchecked(&self, begin: uint, end: uint) -> &str {
+ mem::transmute(Slice {
+ data: self.as_ptr().offset(begin as int),
+ len: end - begin,
+ })
+ }
+
+ #[inline]
+ fn starts_with(&self, needle: &str) -> bool {
+ let n = needle.len();
+ self.len() >= n && needle.as_bytes() == self.as_bytes()[..n]
+ }
+
+ #[inline]
+ fn ends_with(&self, needle: &str) -> bool {
+ let (m, n) = (self.len(), needle.len());
+ m >= n && needle.as_bytes() == self.as_bytes()[m-n..]
+ }
+
+ #[inline]
+ fn trim_matches<P: CharEq>(&self, mut pat: P) -> &str {
+ let cur = match self.find(|&mut: c: char| !pat.matches(c)) {
+ None => "",
+ Some(i) => unsafe { self.slice_unchecked(i, self.len()) }
+ };
+ match cur.rfind(|&mut: c: char| !pat.matches(c)) {
+ None => "",
+ Some(i) => {
+ let right = cur.char_range_at(i).next;
+ unsafe { cur.slice_unchecked(0, right) }
+ }
+ }
+ }
+
+ #[inline]
+ fn trim_left_matches<P: CharEq>(&self, mut pat: P) -> &str {
+ match self.find(|&mut: c: char| !pat.matches(c)) {
+ None => "",
+ Some(first) => unsafe { self.slice_unchecked(first, self.len()) }
+ }
+ }
+
+ #[inline]
+ fn trim_right_matches<P: CharEq>(&self, mut pat: P) -> &str {
+ match self.rfind(|&mut: c: char| !pat.matches(c)) {
+ None => "",
+ Some(last) => {
+ let next = self.char_range_at(last).next;
+ unsafe { self.slice_unchecked(0u, next) }
+ }
+ }
+ }
+
+ #[inline]
+ fn is_char_boundary(&self, index: uint) -> bool {
+ if index == self.len() { return true; }
+ match self.as_bytes().get(index) {
+ None => false,
+ Some(&b) => b < 128u8 || b >= 192u8,
+ }
+ }
+
+ #[inline]
+ fn char_range_at(&self, i: uint) -> CharRange {
+ if self.as_bytes()[i] < 128u8 {
+ return CharRange {ch: self.as_bytes()[i] as char, next: i + 1 };
+ }
+
+ // Multibyte case is a fn to allow char_range_at to inline cleanly
+ fn multibyte_char_range_at(s: &str, i: uint) -> CharRange {
+ let mut val = s.as_bytes()[i] as u32;
+ let w = UTF8_CHAR_WIDTH[val as uint] as uint;
+ assert!((w != 0));
+
+ val = utf8_first_byte!(val, w);
+ val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 1]);
+ if w > 2 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 2]); }
+ if w > 3 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 3]); }
+
+ return CharRange {ch: unsafe { mem::transmute(val) }, next: i + w};
+ }
+
+ return multibyte_char_range_at(self, i);
+ }
+
+ #[inline]
+ fn char_range_at_reverse(&self, start: uint) -> CharRange {
+ let mut prev = start;
+
+ prev = prev.saturating_sub(1);
+ if self.as_bytes()[prev] < 128 {
+ return CharRange{ch: self.as_bytes()[prev] as char, next: prev}
+ }
+
+ // Multibyte case is a fn to allow char_range_at_reverse to inline cleanly
+ fn multibyte_char_range_at_reverse(s: &str, mut i: uint) -> CharRange {
+ // while there is a previous byte == 10......
+ while i > 0 && s.as_bytes()[i] & !CONT_MASK == TAG_CONT_U8 {
+ i -= 1u;
+ }
+
+ let mut val = s.as_bytes()[i] as u32;
+ let w = UTF8_CHAR_WIDTH[val as uint] as uint;
+ assert!((w != 0));
+
+ val = utf8_first_byte!(val, w);
+ val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 1]);
+ if w > 2 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 2]); }
+ if w > 3 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 3]); }
+
+ return CharRange {ch: unsafe { mem::transmute(val) }, next: i};
+ }
+
+ return multibyte_char_range_at_reverse(self, prev);
+ }
+
+ #[inline]
+ fn char_at(&self, i: uint) -> char {
+ self.char_range_at(i).ch
+ }
+
+ #[inline]
+ fn char_at_reverse(&self, i: uint) -> char {
+ self.char_range_at_reverse(i).ch
+ }
+
+ #[inline]
+ fn as_bytes(&self) -> &[u8] {
+ unsafe { mem::transmute(self) }
+ }
+
+ fn find<P: CharEq>(&self, mut pat: P) -> Option<uint> {
+ if pat.only_ascii() {
+ self.bytes().position(|b| pat.matches(b as char))
+ } else {
+ for (index, c) in self.char_indices() {
+ if pat.matches(c) { return Some(index); }
+ }
+ None
+ }
+ }
+
+ fn rfind<P: CharEq>(&self, mut pat: P) -> Option<uint> {
+ if pat.only_ascii() {
+ self.bytes().rposition(|b| pat.matches(b as char))
+ } else {
+ for (index, c) in self.char_indices().rev() {
+ if pat.matches(c) { return Some(index); }
+ }
+ None
+ }
+ }
+
+ fn find_str(&self, needle: &str) -> Option<uint> {
+ if needle.is_empty() {
+ Some(0)
+ } else {
+ self.match_indices(needle)
+ .next()
+ .map(|(start, _end)| start)
+ }
+ }
+
+ #[inline]
+ fn slice_shift_char(&self) -> Option<(char, &str)> {
+ if self.is_empty() {
+ None
+ } else {
+ let CharRange {ch, next} = self.char_range_at(0u);
+ let next_s = unsafe { self.slice_unchecked(next, self.len()) };
+ Some((ch, next_s))
+ }
+ }
+
+ fn subslice_offset(&self, inner: &str) -> uint {
+ let a_start = self.as_ptr() as uint;
+ let a_end = a_start + self.len();
+ let b_start = inner.as_ptr() as uint;
+ let b_end = b_start + inner.len();
+
+ assert!(a_start <= b_start);
+ assert!(b_end <= a_end);
+ b_start - a_start
+ }
+
+ #[inline]
+ fn as_ptr(&self) -> *const u8 {
+ self.repr().data
+ }
+
+ #[inline]
+ fn len(&self) -> uint { self.repr().len }
+
+ #[inline]
+ fn is_empty(&self) -> bool { self.len() == 0 }
+}
+
+#[stable]
+impl<'a> Default for &'a str {
+ #[stable]
+ fn default() -> &'a str { "" }
+}
+
+impl<'a> Iterator<&'a str> for Lines<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> { self.inner.next() }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
+}
+impl<'a> DoubleEndedIterator<&'a str> for Lines<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> { self.inner.next_back() }
+}
+impl<'a> Iterator<&'a str> for LinesAny<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> { self.inner.next() }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
+}
+impl<'a> DoubleEndedIterator<&'a str> for LinesAny<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> { self.inner.next_back() }
+}
mod hash;
mod iter;
mod mem;
+mod nonzero;
mod num;
mod ops;
mod option;
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::nonzero::NonZero;
+use core::option::Option;
+use core::option::Option::{Some, None};
+use std::mem::size_of;
+
+#[test]
+fn test_create_nonzero_instance() {
+ let _a = unsafe {
+ NonZero::new(21i)
+ };
+}
+
+#[test]
+fn test_size_nonzero_in_option() {
+ assert_eq!(size_of::<NonZero<u32>>(), size_of::<Option<NonZero<u32>>>());
+}
+
+#[test]
+fn test_match_on_nonzero_option() {
+ let a = Some(unsafe {
+ NonZero::new(42i)
+ });
+ match a {
+ Some(val) => assert_eq!(*val, 42),
+ None => panic!("unexpected None while matching on Some(NonZero(_))")
+ }
+
+ match unsafe { Some(NonZero::new(43i)) } {
+ Some(val) => assert_eq!(*val, 43),
+ None => panic!("unexpected None while matching on Some(NonZero(_))")
+ }
+}
+
+#[test]
+fn test_match_option_empty_vec() {
+ let a: Option<Vec<int>> = Some(vec![]);
+ match a {
+ None => panic!("unexpected None while matching on Some(vec![])"),
+ _ => {}
+ }
+}
+
+#[test]
+fn test_match_option_vec() {
+ let a = Some(vec![1i, 2, 3, 4]);
+ match a {
+ Some(v) => assert_eq!(v, vec![1i, 2, 3, 4]),
+ None => panic!("unexpected None while matching on Some(vec![1, 2, 3, 4])")
+ }
+}
+
+#[test]
+fn test_match_option_rc() {
+ use std::rc::Rc;
+
+ let five = Rc::new(5i);
+ match Some(five) {
+ Some(r) => assert_eq!(*r, 5i),
+ None => panic!("unexpected None while matching on Some(Rc::new(5))")
+ }
+}
+
+#[test]
+fn test_match_option_arc() {
+ use std::sync::Arc;
+
+ let five = Arc::new(5i);
+ match Some(five) {
+ Some(a) => assert_eq!(*a, 5i),
+ None => panic!("unexpected None while matching on Some(Arc::new(5))")
+ }
+}
+
+#[test]
+fn test_match_option_empty_string() {
+ let a = Some(String::new());
+ match a {
+ None => panic!("unexpected None while matching on Some(String::new())"),
+ _ => {}
+ }
+}
+
+#[test]
+fn test_match_option_string() {
+ let five = "Five".into_string();
+ match Some(five) {
+ Some(s) => assert_eq!(s, "Five"),
+ None => panic!("unexpected None while matching on Some(String { ... })")
+ }
+}
// except according to those terms.
use test::Bencher;
-use core::ops::{Range, FullRange, RangeFrom};
+use core::ops::{Range, FullRange, RangeFrom, RangeTo};
// Overhead of dtors
assert!(count == 10);
}
+#[test]
+fn test_range_to() {
+ // Not much to test.
+ let _ = RangeTo { end: 42u };
+}
+
#[test]
fn test_full_range() {
// Not much to test.
extern crate libc;
-use std::c_vec::CVec;
use libc::{c_void, size_t, c_int};
+use std::c_vec::CVec;
+use std::ptr::Unique;
#[link(name = "miniz", kind = "static")]
extern {
&mut outsz,
flags);
if !res.is_null() {
- Some(CVec::new_with_dtor(res as *mut u8, outsz as uint, move|:| libc::free(res)))
+ let res = Unique(res);
+ Some(CVec::new_with_dtor(res.0 as *mut u8, outsz as uint, move|:| libc::free(res.0)))
} else {
None
}
&mut outsz,
flags);
if !res.is_null() {
- Some(CVec::new_with_dtor(res as *mut u8, outsz as uint, move|:| libc::free(res)))
+ let res = Unique(res);
+ Some(CVec::new_with_dtor(res.0 as *mut u8, outsz as uint, move|:| libc::free(res.0)))
} else {
None
}
fn test_input(g: LabelledGraph) -> IoResult<String> {
let mut writer = Vec::new();
render(&g, &mut writer).unwrap();
- (&mut writer[]).read_to_string()
+ (&mut writer.as_slice()).read_to_string()
}
// All of the tests use raw-strings as the format for the expected outputs,
edge(1, 3, ";"), edge(2, 3, ";" )));
render(&g, &mut writer).unwrap();
- let r = (&mut writer[]).read_to_string();
+ let r = (&mut writer.as_slice()).read_to_string();
assert_eq!(r.unwrap(),
r#"digraph syntax_tree {
use std::default::Default;
use std::fmt;
-use std::iter::FromIterator;
use std::path::BytesContainer;
use std::slice;
pub type sighandler_t = size_t;
}
pub mod bsd44 {
+ use types::common::c95::{c_void};
use types::os::arch::c95::{c_char, c_int, c_uint};
pub type socklen_t = u32;
pub sun_family: sa_family_t,
pub sun_path: [c_char, ..104]
}
+ #[repr(C)]
+ #[deriving(Copy)] pub struct ifaddrs {
+ pub ifa_next: *mut ifaddrs,
+ pub ifa_name: *mut c_char,
+ pub ifa_flags: c_uint,
+ pub ifa_addr: *mut sockaddr,
+ pub ifa_netmask: *mut sockaddr,
+ pub ifa_dstaddr: *mut sockaddr,
+ pub ifa_data: *mut c_void
+ }
+
}
}
}
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
/// This function is called directly by the compiler when using the logging
/// macros. This function does not take into account whether the log level
/// specified is active or not, it will always log something if this method is
/// It is not recommended to call this function directly, rather it should be
/// invoked through the logging family of macros.
#[doc(hidden)]
-pub fn log(level: u32, loc: &'static LogLocation, args: &fmt::Arguments) {
+pub fn log(level: u32, loc: &'static LogLocation, args: fmt::Arguments) {
// Test the literal string from args against the current filter, if there
// is one.
match unsafe { FILTER.as_ref() } {
set_logger(logger);
}
+// NOTE(stage0): Remove function after a snapshot
+#[cfg(stage0)]
+/// This function is called directly by the compiler when using the logging
+/// macros. This function does not take into account whether the log level
+/// specified is active or not, it will always log something if this method is
+/// called.
+///
+/// It is not recommended to call this function directly, rather it should be
+/// invoked through the logging family of macros.
+#[doc(hidden)]
+pub fn log(level: u32, loc: &'static LogLocation, args: &fmt::Arguments) {
+ // Test the literal string from args against the current filter, if there
+ // is one.
+ match unsafe { FILTER.as_ref() } {
+ Some(filter) if !filter.is_match(args.to_string().as_slice()) => return,
+ _ => {}
+ }
+
+ // Completely remove the local logger from TLS in case anyone attempts to
+ // frob the slot while we're doing the logging. This will destroy any logger
+ // set during logging.
+ let mut logger = LOCAL_LOGGER.with(|s| {
+ s.borrow_mut().take()
+ }).unwrap_or_else(|| {
+ box DefaultLogger { handle: io::stderr() } as Box<Logger + Send>
+ });
+ logger.log(&LogRecord {
+ level: LogLevel(level),
+ args: *args,
+ file: loc.file,
+ module_path: loc.module_path,
+ line: loc.line,
+ });
+ set_logger(logger);
+}
+
/// Getter for the global log level. This is a function so that it can be called
/// safely
#[doc(hidden)]
pub level: LogLevel,
/// The arguments from the log line.
- pub args: &'a fmt::Arguments<'a>,
+ pub args: fmt::Arguments<'a>,
/// The file of where the LogRecord originated.
pub file: &'a str,
#![macro_escape]
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// The standard logging macro
+///
+/// This macro will generically log over a provided level (of type u32) with a
+/// format!-based argument list. See documentation in `std::fmt` for details on
+/// how to use the syntax.
+///
+/// # Example
+///
+/// ```
+/// #![feature(phase)]
+/// #[phase(plugin, link)] extern crate log;
+///
+/// fn main() {
+/// log!(log::WARN, "this is a warning {}", "message");
+/// log!(log::DEBUG, "this is a debug message");
+/// log!(6, "this is a custom logging level: {level}", level=6u);
+/// }
+/// ```
+///
+/// Assumes the binary is `main`:
+///
+/// ```{.bash}
+/// $ RUST_LOG=warn ./main
+/// WARN:main: this is a warning message
+/// ```
+///
+/// ```{.bash}
+/// $ RUST_LOG=debug ./main
+/// DEBUG:main: this is a debug message
+/// WARN:main: this is a warning message
+/// ```
+///
+/// ```{.bash}
+/// $ RUST_LOG=6 ./main
+/// DEBUG:main: this is a debug message
+/// WARN:main: this is a warning message
+/// 6:main: this is a custom logging level: 6
+/// ```
+#[macro_export]
+macro_rules! log {
+ ($lvl:expr, $($arg:tt)+) => ({
+ static LOC: ::log::LogLocation = ::log::LogLocation {
+ line: line!(),
+ file: file!(),
+ module_path: module_path!(),
+ };
+ let lvl = $lvl;
+ if log_enabled!(lvl) {
+ ::log::log(lvl, &LOC, format_args!($($arg)+))
+ }
+ })
+}
+
+// NOTE(stage0): Remove macro after a snapshot
+#[cfg(stage0)]
/// The standard logging macro
///
/// This macro will generically log over a provided level (of type u32) with a
// Do the necessary writes
if left.len() > 0 {
- slice::bytes::copy_memory(self.buf[mut self.pos..], left);
+ slice::bytes::copy_memory(self.buf.slice_from_mut(self.pos), left);
}
if right.len() > 0 {
self.buf.push_all(right);
}
fn push_repeater(&mut self, c: char) -> Result<(), Error> {
- if self.stack.len() == 0 {
- return self.err(
- "A repeat operator must be preceded by a valid expression.")
+ match self.stack.last() {
+ Some(&Expr(..)) => (),
+ // self.stack is empty, or the top item is not an Expr
+ _ => return self.err("A repeat operator must be preceded by a valid expression."),
}
let rep: Repeater = match c {
'?' => ZeroOne, '*' => ZeroMore, '+' => OneMore,
noparse!{fail_range_end_no_begin, r"[a-\A]"}
noparse!{fail_range_end_no_end, r"[a-\z]"}
noparse!{fail_range_end_no_boundary, r"[a-\b]"}
+noparse!{fail_repeat_no_expr, r"-|+"}
macro_rules! mat {
($name:ident, $re:expr, $text:expr, $($loc:tt)+) => (
E0173,
E0174,
E0177,
- E0178
+ E0178,
+ E0179
}
fn is_camel_case(ident: ast::Ident) -> bool {
let ident = token::get_ident(ident);
if ident.get().is_empty() { return true; }
- let ident = ident.get().trim_chars('_');
+ let ident = ident.get().trim_matches('_');
// start with a non-lowercase letter rather than non-uppercase
// ones (some scripts don't have a concept of upper/lowercase)
fn is_snake_case(ident: ast::Ident) -> bool {
let ident = token::get_ident(ident);
if ident.get().is_empty() { return true; }
- let ident = ident.get().trim_left_chars('\'');
- let ident = ident.trim_chars('_');
+ let ident = ident.get().trim_left_matches('\'');
+ let ident = ident.trim_matches('_');
let mut allow_underscore = true;
ident.chars().all(|c| {
}
ty::mk_struct(cx.tcx,
ast_util::local_def(item.id),
- Substs::empty())
+ cx.tcx.mk_substs(Substs::empty()))
}
ast::ItemEnum(_, ref ast_generics) => {
if ast_generics.is_parameterized() {
}
ty::mk_enum(cx.tcx,
ast_util::local_def(item.id),
- Substs::empty())
+ cx.tcx.mk_substs(Substs::empty()))
}
_ => return,
};
if self.is_internal(cx, item.span) { return }
match item.node {
- ast::ItemTrait(_, _, _, ref supertraits, _) => {
+ ast::ItemTrait(_, _, ref supertraits, _) => {
for t in supertraits.iter() {
- if let ast::TraitTyParamBound(ref t) = *t {
+ if let ast::TraitTyParamBound(ref t, _) = *t {
let id = ty::trait_ref_to_def_id(cx.tcx, &t.trait_ref);
self.lint(cx, id, t.trait_ref.path.span);
}
impl Lint {
/// Get the lint's name, with ASCII letters converted to lowercase.
pub fn name_lower(&self) -> String {
- self.name.to_ascii_lower()
+ self.name.to_ascii_lowercase()
}
}
tcx)
}
+pub fn get_trait_name(cstore: &cstore::CStore, def: ast::DefId) -> ast::Name {
+ let cdata = cstore.get_crate_data(def.krate);
+ decoder::get_trait_name(cstore.intr.clone(),
+ &*cdata,
+ def.node)
+}
+
pub fn get_trait_item_name_and_kind(cstore: &cstore::CStore, def: ast::DefId)
-> (ast::Name, def::TraitItemKind) {
let cdata = cstore.get_crate_data(def.krate);
impl_items
}
+pub fn get_trait_name(intr: Rc<IdentInterner>,
+ cdata: Cmd,
+ id: ast::NodeId)
+ -> ast::Name {
+ let doc = lookup_item(id, cdata.data());
+ item_name(&*intr, doc)
+}
+
pub fn get_trait_item_name_and_kind(intr: Rc<IdentInterner>,
cdata: Cmd,
id: ast::NodeId)
let space = subst::ParamSpace::from_uint(reader::doc_as_u64(doc) as uint);
let doc = reader::get_doc(rp_doc, tag_region_param_def_index);
- let index = reader::doc_as_u64(doc) as uint;
+ let index = reader::doc_as_u64(doc) as u32;
let mut bounds = Vec::new();
reader::tagged_docs(rp_doc, tag_items_data_region, |p| {
}
}
}
- ast::ItemTrait(_, _, _, _, ref ms) => {
+ ast::ItemTrait(_, _, _, ref ms) => {
add_to_index(item, rbml_w, index);
rbml_w.start_tag(tag_items_data_item);
encode_def_id(rbml_w, def_id);
let types =
parse_vec_per_param_space(st, |st| parse_ty(st, |x,y| conv(x,y)));
- return subst::Substs { types: types,
- regions: regions };
+ subst::Substs { types: types,
+ regions: regions }
}
fn parse_region_substs(st: &mut PState, conv: conv_did) -> subst::RegionSubsts {
fn parse_bound_region(st: &mut PState, conv: conv_did) -> ty::BoundRegion {
match next(st) {
'a' => {
- let id = parse_uint(st);
+ let id = parse_u32(st);
assert_eq!(next(st), '|');
ty::BrAnon(id)
}
ty::BrNamed(def, ident.name)
}
'f' => {
- let id = parse_uint(st);
+ let id = parse_u32(st);
assert_eq!(next(st), '|');
ty::BrFresh(id)
}
match next(st) {
'b' => {
assert_eq!(next(st), '[');
- let id = ty::DebruijnIndex::new(parse_uint(st));
+ let id = ty::DebruijnIndex::new(parse_u32(st));
assert_eq!(next(st), '|');
let br = parse_bound_region(st, |x,y| conv(x,y));
assert_eq!(next(st), ']');
assert_eq!(next(st), '|');
let space = parse_param_space(st);
assert_eq!(next(st), '|');
- let index = parse_uint(st);
+ let index = parse_u32(st);
assert_eq!(next(st), '|');
let nm = token::str_to_ident(parse_str(st, ']')[]);
ty::ReEarlyBound(node_id, space, index, nm.name)
-> ty::TraitRef<'tcx> {
let def = parse_def(st, NominalType, |x,y| conv(x,y));
let substs = parse_substs(st, |x,y| conv(x,y));
- ty::TraitRef {def_id: def, substs: substs}
+ ty::TraitRef {def_id: def, substs: st.tcx.mk_substs(substs)}
}
fn parse_ty<'a, 'tcx>(st: &mut PState<'a, 'tcx>, conv: conv_did) -> Ty<'tcx> {
let def = parse_def(st, NominalType, |x,y| conv(x,y));
let substs = parse_substs(st, |x,y| conv(x,y));
assert_eq!(next(st), ']');
- return ty::mk_enum(st.tcx, def, substs);
+ return ty::mk_enum(st.tcx, def, st.tcx.mk_substs(substs));
}
'x' => {
assert_eq!(next(st), '[');
'p' => {
let did = parse_def(st, TypeParameter, |x,y| conv(x,y));
debug!("parsed ty_param: did={}", did);
- let index = parse_uint(st);
+ let index = parse_u32(st);
assert_eq!(next(st), '|');
let space = parse_param_space(st);
assert_eq!(next(st), '|');
'&' => {
let r = parse_region(st, |x,y| conv(x,y));
let mt = parse_mt(st, |x,y| conv(x,y));
- return ty::mk_rptr(st.tcx, r, mt);
+ return ty::mk_rptr(st.tcx, st.tcx.mk_region(r), mt);
}
'V' => {
let t = parse_ty(st, |x,y| conv(x,y));
}
'F' => {
let def_id = parse_def(st, NominalType, |x,y| conv(x,y));
- return ty::mk_bare_fn(st.tcx, Some(def_id), parse_bare_fn_ty(st, |x,y| conv(x,y)));
+ return ty::mk_bare_fn(st.tcx, Some(def_id),
+ st.tcx.mk_bare_fn(parse_bare_fn_ty(st, |x,y| conv(x,y))));
}
'G' => {
- return ty::mk_bare_fn(st.tcx, None, parse_bare_fn_ty(st, |x,y| conv(x,y)));
+ return ty::mk_bare_fn(st.tcx, None,
+ st.tcx.mk_bare_fn(parse_bare_fn_ty(st, |x,y| conv(x,y))));
}
'#' => {
let pos = parse_hex(st);
let did = parse_def(st, NominalType, |x,y| conv(x,y));
let substs = parse_substs(st, |x,y| conv(x,y));
assert_eq!(next(st), ']');
- return ty::mk_struct(st.tcx, did, substs);
+ return ty::mk_struct(st.tcx, did, st.tcx.mk_substs(substs));
}
'k' => {
assert_eq!(next(st), '[');
let region = parse_region(st, |x,y| conv(x,y));
let substs = parse_substs(st, |x,y| conv(x,y));
assert_eq!(next(st), ']');
- return ty::mk_unboxed_closure(st.tcx, did, region, substs);
+ return ty::mk_unboxed_closure(st.tcx, did,
+ st.tcx.mk_region(region), st.tcx.mk_substs(substs));
}
'e' => {
return ty::mk_err();
};
}
+fn parse_u32(st: &mut PState) -> u32 {
+ let n = parse_uint(st);
+ let m = n as u32;
+ assert_eq!(m as uint, n);
+ m
+}
+
fn parse_param_space(st: &mut PState) -> subst::ParamSpace {
subst::ParamSpace::from_uint(parse_uint(st))
}
let def_id = parse_def(st, NominalType, |x,y| conv(x,y));
let space = parse_param_space(st);
assert_eq!(next(st), '|');
- let index = parse_uint(st);
+ let index = parse_u32(st);
assert_eq!(next(st), '|');
let associated_with = parse_opt(st, |st| {
parse_def(st, NominalType, |x,y| conv(x,y))
ast::TyF64 => mywrite!(w, "MF"),
}
}
- ty::ty_enum(def, ref substs) => {
+ ty::ty_enum(def, substs) => {
mywrite!(w, "t[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
mywrite!(w, "]");
ty::ty_ptr(mt) => { mywrite!(w, "*"); enc_mt(w, cx, mt); }
ty::ty_rptr(r, mt) => {
mywrite!(w, "&");
- enc_region(w, cx, r);
+ enc_region(w, cx, *r);
enc_mt(w, cx, mt);
}
ty::ty_vec(t, sz) => {
mywrite!(w, "f");
enc_closure_ty(w, cx, &**f);
}
- ty::ty_bare_fn(Some(def_id), ref f) => {
+ ty::ty_bare_fn(Some(def_id), f) => {
mywrite!(w, "F");
mywrite!(w, "{}|", (cx.ds)(def_id));
enc_bare_fn_ty(w, cx, f);
}
- ty::ty_bare_fn(None, ref f) => {
+ ty::ty_bare_fn(None, f) => {
mywrite!(w, "G");
enc_bare_fn_ty(w, cx, f);
}
ty::ty_param(ParamTy {space, idx: id, def_id: did}) => {
mywrite!(w, "p{}|{}|{}|", (cx.ds)(did), id, space.to_uint())
}
- ty::ty_struct(def, ref substs) => {
+ ty::ty_struct(def, substs) => {
mywrite!(w, "a[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
- ty::ty_unboxed_closure(def, region, ref substs) => {
+ ty::ty_unboxed_closure(def, region, substs) => {
mywrite!(w, "k[{}|", (cx.ds)(def));
- enc_region(w, cx, region);
+ enc_region(w, cx, *region);
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
pub fn enc_trait_ref<'a, 'tcx>(w: &mut SeekableMemWriter, cx: &ctxt<'a, 'tcx>,
s: &ty::TraitRef<'tcx>) {
mywrite!(w, "{}|", (cx.ds)(s.def_id));
- enc_substs(w, cx, &s.substs);
+ enc_substs(w, cx, s.substs);
}
pub fn enc_trait_store(w: &mut SeekableMemWriter, cx: &ctxt, s: ty::TraitStore) {
self.call(expr, pred, &**l, Some(&**r).into_iter())
}
- ast::ExprSlice(ref base, ref start, ref end, _) => {
- self.call(expr,
- pred,
- &**base,
- start.iter().chain(end.iter()).map(|x| &**x))
- }
-
ast::ExprRange(ref start, ref end) => {
- let fields = Some(&**start).into_iter()
+ let fields = start.as_ref().map(|e| &**e).into_iter()
.chain(end.as_ref().map(|e| &**e).into_iter());
self.straightline(expr, pred, fields)
}
use middle::traits;
use middle::mem_categorization as mc;
use middle::expr_use_visitor as euv;
+use util::common::ErrorReported;
use util::nodemap::NodeSet;
use syntax::ast;
let ty = ty::node_id_to_type(self.tcx, e.id);
let infcx = infer::new_infer_ctxt(self.tcx);
let mut fulfill_cx = traits::FulfillmentContext::new();
- fulfill_cx.register_builtin_bound(self.tcx, ty, ty::BoundSync,
- traits::ObligationCause::dummy());
- let env = ty::empty_parameter_environment();
- if !fulfill_cx.select_all_or_error(&infcx, &env, self.tcx).is_ok() {
- self.tcx.sess.span_err(e.span, "shared static items must have a \
- type which implements Sync");
+ match traits::poly_trait_ref_for_builtin_bound(self.tcx, ty::BoundSync, ty) {
+ Ok(trait_ref) => {
+ let cause = traits::ObligationCause::new(e.span, e.id, traits::SharedStatic);
+ fulfill_cx.register_trait_ref(self.tcx, trait_ref, cause);
+ let env = ty::empty_parameter_environment();
+ match fulfill_cx.select_all_or_error(&infcx, &env, self.tcx) {
+ Ok(()) => { },
+ Err(ref errors) => {
+ traits::report_fulfillment_errors(&infcx, errors);
+ }
+ }
+ }
+ Err(ErrorReported) => { }
}
}
}
DefAssociatedPath(TyParamProvenance, ast::Ident),
DefTrait(ast::DefId),
DefPrimTy(ast::PrimTy),
- DefTyParam(ParamSpace, ast::DefId, uint),
+ DefTyParam(ParamSpace, ast::DefId, u32),
DefUse(ast::DefId),
DefUpvar(ast::NodeId, // id of closed over local
ast::NodeId, // expr node that creates the closure
param_env: ParameterEnvironment<'tcx>,
}
-// If the TYPER results in an error, it's because the type check
-// failed (or will fail, when the error is uncovered and reported
-// during writeback). In this case, we just ignore this part of the
-// code.
-//
-// Note that this macro appears similar to try!(), but, unlike try!(),
-// it does not propagate the error.
-macro_rules! return_if_err {
- ($inp: expr) => (
- match $inp {
- Ok(v) => v,
- Err(()) => return
- }
- )
-}
-
/// Whether the elements of an overloaded operation are passed by value or by reference
enum PassArgs {
ByValue,
decl: &ast::FnDecl,
body: &ast::Block) {
for arg in decl.inputs.iter() {
- let arg_ty = return_if_err!(self.typer.node_ty(arg.pat.id));
+ let arg_ty = self.typer.node_ty(arg.pat.id);
let fn_body_scope = region::CodeExtent::from_node_id(body.id);
let arg_cmt = self.mc.cat_rvalue(
pub fn consume_expr(&mut self, expr: &ast::Expr) {
debug!("consume_expr(expr={})", expr.repr(self.tcx()));
- let cmt = return_if_err!(self.mc.cat_expr(expr));
+ let cmt = self.mc.cat_expr(expr);
self.delegate_consume(expr.id, expr.span, cmt);
self.walk_expr(expr);
}
assignment_expr: &ast::Expr,
expr: &ast::Expr,
mode: MutateMode) {
- let cmt = return_if_err!(self.mc.cat_expr(expr));
+ let cmt = self.mc.cat_expr(expr);
self.delegate.mutate(assignment_expr.id, assignment_expr.span, cmt, mode);
self.walk_expr(expr);
}
debug!("borrow_expr(expr={}, r={}, bk={})",
expr.repr(self.tcx()), r.repr(self.tcx()), bk.repr(self.tcx()));
- let cmt = return_if_err!(self.mc.cat_expr(expr));
+ let cmt = self.mc.cat_expr(expr);
self.delegate.borrow(expr.id, expr.span, cmt, r, bk, cause);
// Note: Unlike consume, we can ignore ExprParen. cat_expr
}
ast::ExprIndex(ref lhs, ref rhs) => { // lhs[rhs]
- if !self.walk_overloaded_operator(expr, &**lhs, vec![&**rhs], PassArgs::ByRef) {
- self.select_from_expr(&**lhs);
- self.consume_expr(&**rhs);
+ match rhs.node {
+ ast::ExprRange(ref start, ref end) => {
+ // Hacked slicing syntax (KILLME).
+ let args = match (start, end) {
+ (&Some(ref e1), &Some(ref e2)) => vec![&**e1, &**e2],
+ (&Some(ref e), &None) => vec![&**e],
+ (&None, &Some(ref e)) => vec![&**e],
+ (&None, &None) => Vec::new()
+ };
+ let overloaded =
+ self.walk_overloaded_operator(expr, &**lhs, args, PassArgs::ByRef);
+ assert!(overloaded);
+ }
+ _ => {
+ if !self.walk_overloaded_operator(expr,
+ &**lhs,
+ vec![&**rhs],
+ PassArgs::ByRef) {
+ self.select_from_expr(&**lhs);
+ self.consume_expr(&**rhs);
+ }
+ }
}
}
- ast::ExprSlice(ref base, ref start, ref end, _) => { // base[start..end]
- let args = match (start, end) {
- (&Some(ref e1), &Some(ref e2)) => vec![&**e1, &**e2],
- (&Some(ref e), &None) => vec![&**e],
- (&None, &Some(ref e)) => vec![&**e],
- (&None, &None) => Vec::new()
- };
- let overloaded =
- self.walk_overloaded_operator(expr, &**base, args, PassArgs::ByRef);
- assert!(overloaded);
- }
-
ast::ExprRange(ref start, ref end) => {
- self.consume_expr(&**start);
+ start.as_ref().map(|e| self.consume_expr(&**e));
end.as_ref().map(|e| self.consume_expr(&**e));
}
}
ast::ExprMatch(ref discr, ref arms, _) => {
- let discr_cmt = return_if_err!(self.mc.cat_expr(&**discr));
+ let discr_cmt = self.mc.cat_expr(&**discr);
self.borrow_expr(&**discr, ty::ReEmpty, ty::ImmBorrow, MatchDiscriminant);
// treatment of the discriminant is handled while walking the arms.
// Fetch the type of the value that the iteration yields to
// produce the pattern's categorized mutable type.
- let pattern_type = return_if_err!(self.typer.node_ty(pat.id));
+ let pattern_type = self.typer.node_ty(pat.id);
let blk_scope = region::CodeExtent::from_node_id(blk.id);
let pat_cmt = self.mc.cat_rvalue(pat.id,
pat.span,
}
fn walk_callee(&mut self, call: &ast::Expr, callee: &ast::Expr) {
- let callee_ty = ty::expr_ty_adjusted(self.tcx(), callee);
+ let callee_ty = self.typer.expr_ty_adjusted(callee);
debug!("walk_callee: callee={} callee_ty={}",
callee.repr(self.tcx()), callee_ty.repr(self.tcx()));
let call_scope = region::CodeExtent::from_node_id(call.id);
}
_ => {
let overloaded_call_type =
- match self.tcx()
- .method_map
- .borrow()
- .get(&MethodCall::expr(call.id)) {
- Some(ref method_callee) => {
- OverloadedCallType::from_method_origin(
- self.tcx(),
- &method_callee.origin)
- }
- None => {
- self.tcx().sess.span_bug(
- callee.span,
- format!("unexpected callee type {}",
- callee_ty.repr(self.tcx()))[])
- }
- };
+ match self.typer.node_method_origin(MethodCall::expr(call.id)) {
+ Some(method_origin) => {
+ OverloadedCallType::from_method_origin(
+ self.tcx(),
+ &method_origin)
+ }
+ None => {
+ self.tcx().sess.span_bug(
+ callee.span,
+ format!("unexpected callee type {}",
+ callee_ty.repr(self.tcx())).as_slice())
+ }
+ };
match overloaded_call_type {
FnMutOverloadedCall => {
self.borrow_expr(callee,
// "assigns", which is handled by
// `walk_pat`:
self.walk_expr(&**expr);
- let init_cmt = return_if_err!(self.mc.cat_expr(&**expr));
+ let init_cmt = self.mc.cat_expr(&**expr);
self.walk_irrefutable_pat(init_cmt, &*local.pat);
}
}
None => { return; }
};
- let with_cmt = return_if_err!(self.mc.cat_expr(&*with_expr));
+ let with_cmt = self.mc.cat_expr(&*with_expr);
// Select just those fields of the `with`
// expression that will actually be used
let with_fields = match with_cmt.ty.sty {
- ty::ty_struct(did, ref substs) => {
+ ty::ty_struct(did, substs) => {
ty::struct_fields(self.tcx(), did, substs)
}
_ => {
// rvalue.
debug!("walk_adjustment(AutoAddEnv|AdjustReifyFnPointer)");
let cmt_unadjusted =
- return_if_err!(self.mc.cat_expr_unadjusted(expr));
+ self.mc.cat_expr_unadjusted(expr);
self.delegate_consume(expr.id, expr.span, cmt_unadjusted);
}
ty::AdjustDerefRef(ty::AutoDerefRef {
match self.typer.node_method_ty(deref_id) {
None => {}
Some(method_ty) => {
- let cmt = return_if_err!(self.mc.cat_expr_autoderefd(expr, i));
+ let cmt = self.mc.cat_expr_autoderefd(expr, i);
let self_ty = ty::ty_fn_args(method_ty)[0];
let (m, r) = match self_ty.sty {
ty::ty_rptr(r, ref m) => (m.mutbl, r),
};
let bk = ty::BorrowKind::from_mutbl(m);
self.delegate.borrow(expr.id, expr.span, cmt,
- r, bk, AutoRef);
+ *r, bk, AutoRef);
}
}
}
assert!(n == 1, format!("Expected exactly 1 deref with Uniq \
AutoRefs, found: {}", n));
let cmt_unadjusted =
- return_if_err!(self.mc.cat_expr_unadjusted(expr));
+ self.mc.cat_expr_unadjusted(expr);
self.delegate_consume(expr.id, expr.span, cmt_unadjusted);
return;
}
_ => {}
}
- let cmt_derefd = return_if_err!(
- self.mc.cat_expr_autoderefd(expr, n));
+ let cmt_derefd = self.mc.cat_expr_autoderefd(expr, n);
debug!("walk_adjustment: cmt_derefd={}",
cmt_derefd.repr(self.tcx()));
mode: &mut TrackMatchMode<Span>) {
debug!("determine_pat_move_mode cmt_discr={} pat={}", cmt_discr.repr(self.tcx()),
pat.repr(self.tcx()));
- return_if_err!(self.mc.cat_pattern(cmt_discr, pat, |_mc, cmt_pat, pat| {
+ self.mc.cat_pattern(cmt_discr, pat, |_mc, cmt_pat, pat| {
let tcx = self.typer.tcx();
let def_map = &self.typer.tcx().def_map;
if pat_util::pat_is_binding(def_map, pat) {
}
}
}
- }));
+ });
}
/// The core driver for walking a pattern; `match_mode` must be
let mc = &self.mc;
let typer = self.typer;
- let tcx = typer.tcx();
let def_map = &self.typer.tcx().def_map;
let delegate = &mut self.delegate;
let param_env = &mut self.param_env;
- return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |mc, cmt_pat, pat| {
+
+ mc.cat_pattern(cmt_discr.clone(), pat, |mc, cmt_pat, pat| {
if pat_util::pat_is_binding(def_map, pat) {
let tcx = typer.tcx();
match_mode);
// pat_ty: the type of the binding being produced.
- let pat_ty = return_if_err!(typer.node_ty(pat.id));
+ let pat_ty = typer.node_ty(pat.id);
// Each match binding is effectively an assignment to the
// binding being produced.
let def = def_map.borrow()[pat.id].clone();
- match mc.cat_def(pat.id, pat.span, pat_ty, def) {
- Ok(binding_cmt) => {
- delegate.mutate(pat.id, pat.span, binding_cmt, Init);
- }
- Err(_) => { }
- }
+ let binding_cmt = mc.cat_def(pat.id, pat.span, pat_ty, def);
+ delegate.mutate(pat.id, pat.span, binding_cmt, Init);
// It is also a borrow or copy/move of the value being matched.
match pat.node {
// borrow of the elements of the vector being
// matched.
- let (slice_cmt, slice_mutbl, slice_r) = {
- match mc.cat_slice_pattern(cmt_pat, &**slice_pat) {
- Ok(v) => v,
- Err(()) => {
- tcx.sess.span_bug(slice_pat.span,
- "Err from mc")
- }
- }
- };
+ let (slice_cmt, slice_mutbl, slice_r) =
+ mc.cat_slice_pattern(cmt_pat, &**slice_pat);
// Note: We declare here that the borrow
// occurs upon entering the `[...]`
_ => { }
}
}
- }));
+ });
// Do a second pass over the pattern, calling `matched_pat` on
// the interior nodes (enum variants and structs), as opposed
// to the above loop's visit of than the bindings that form
// the leaves of the pattern tree structure.
- return_if_err!(mc.cat_pattern(cmt_discr, pat, |mc, cmt_pat, pat| {
+ mc.cat_pattern(cmt_discr, pat, |mc, cmt_pat, pat| {
let def_map = def_map.borrow();
let tcx = typer.tcx();
// cases either.
}
}
- }));
+ });
}
fn walk_captures(&mut self, closure_expr: &ast::Expr) {
freevars: &[ty::Freevar]) {
for freevar in freevars.iter() {
let id_var = freevar.def.def_id().node;
- let cmt_var = return_if_err!(self.cat_captured_var(closure_expr.id,
- closure_expr.span,
- freevar.def));
+ let cmt_var = self.cat_captured_var(closure_expr.id,
+ closure_expr.span,
+ freevar.def);
// Lookup the kind of borrow the callee requires, as
// inferred by regionbk
let upvar_id = ty::UpvarId { var_id: id_var,
closure_expr_id: closure_expr.id };
- let upvar_borrow = self.tcx().upvar_borrow_map.borrow()[upvar_id].clone();
+ let upvar_borrow = self.typer.upvar_borrow(upvar_id);
self.delegate.borrow(closure_expr.id,
closure_expr.span,
closure_expr: &ast::Expr,
freevars: &[ty::Freevar]) {
for freevar in freevars.iter() {
- let cmt_var = return_if_err!(self.cat_captured_var(closure_expr.id,
- closure_expr.span,
- freevar.def));
- let mode = copy_or_move(self.tcx(),
- cmt_var.ty,
- &self.param_env,
- CaptureMove);
+ let cmt_var = self.cat_captured_var(closure_expr.id,
+ closure_expr.span,
+ freevar.def);
+ let mode = copy_or_move(self.tcx(), cmt_var.ty,
+ &self.param_env, CaptureMove);
self.delegate.consume(closure_expr.id, freevar.span, cmt_var, mode);
}
}
closure_id: ast::NodeId,
closure_span: Span,
upvar_def: def::Def)
- -> mc::McResult<mc::cmt<'tcx>> {
+ -> mc::cmt<'tcx> {
// Create the cmt for the variable being borrowed, from the
// caller's perspective
let var_id = upvar_def.def_id().node;
- let var_ty = try!(self.typer.node_ty(var_id));
+ let var_ty = self.typer.node_ty(var_id);
self.mc.cat_def(closure_id, closure_span, var_ty, upvar_def)
}
}
Copy
}
}
-
self.unpack_actual_value(a, |a| {
match a.sty {
- ty::ty_bare_fn(Some(a_def_id), ref a_f) => {
+ ty::ty_bare_fn(Some(a_def_id), a_f) => {
// Function items are coercible to any closure
// type; function pointers are not (that would
// require double indirection).
};
let a_borrowed = ty::mk_rptr(self.tcx(),
- r_borrow,
+ self.tcx().mk_region(r_borrow),
mt {ty: inner_ty, mutbl: mutbl_b});
try!(sub.tys(a_borrowed, b));
let coercion = Coercion(self.get_ref().trace.clone());
let r_borrow = self.get_ref().infcx.next_region_var(coercion);
let ty = ty::mk_rptr(self.tcx(),
- r_borrow,
+ self.tcx().mk_region(r_borrow),
ty::mt{ty: ty, mutbl: mt_b.mutbl});
try!(self.get_ref().infcx.try(|_| sub.tys(ty, b)));
debug!("Success, coerced with AutoDerefRef(1, \
bounds: bounds },
ty_a)))
}
- (&ty::ty_struct(did_a, ref substs_a), &ty::ty_struct(did_b, ref substs_b))
+ (&ty::ty_struct(did_a, substs_a), &ty::ty_struct(did_b, substs_b))
if did_a == did_b => {
debug!("unsizing a struct");
// Try unsizing each type param in turn to see if we end up with ty_b.
// Check that the whole types match.
let mut new_substs = substs_a.clone();
new_substs.types.get_mut_slice(subst::TypeSpace)[i] = new_tp;
- let ty = ty::mk_struct(tcx, did_a, new_substs);
+ let ty = ty::mk_struct(tcx, did_a, tcx.mk_substs(new_substs));
if self.get_ref().infcx.try(|_| sub.tys(ty, ty_b)).is_err() {
debug!("Unsized type parameter '{}', but still \
could not match types {} and {}",
let r_a = self.get_ref().infcx.next_region_var(coercion);
self.coerce_object(a, b, b_mutbl,
- |tr| ty::mk_rptr(tcx, r_a, ty::mt{ mutbl: b_mutbl, ty: tr }),
+ |tr| ty::mk_rptr(tcx, tcx.mk_region(r_a),
+ ty::mt{ mutbl: b_mutbl, ty: tr }),
|| AutoPtr(r_a, b_mutbl, None))
}
b.repr(self.tcx()));
match a.sty {
- ty::ty_bare_fn(Some(a_def_id), ref f) => {
+ ty::ty_bare_fn(Some(a_def_id), f) => {
self.coerce_from_fn_item(a, a_def_id, f, b)
}
_ => {
fn coerce_from_fn_item(&self,
a: Ty<'tcx>,
fn_def_id_a: ast::DefId,
- fn_ty_a: &ty::BareFnTy<'tcx>,
+ fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
/*!
Ok(Some(adj))
}
ty::ty_bare_fn(None, _) => {
- let a_fn_pointer = ty::mk_bare_fn(self.tcx(), None, (*fn_ty_a).clone());
+ let a_fn_pointer = ty::mk_bare_fn(self.tcx(), None, fn_ty_a);
try!(self.subtype(a_fn_pointer, b));
Ok(Some(ty::AdjustReifyFnPointer(fn_def_id_a)))
}
if a.def_id != b.def_id {
Err(ty::terr_traits(expected_found(self, a.def_id, b.def_id)))
} else {
- let substs = try!(self.substs(a.def_id, &a.substs, &b.substs));
- Ok(ty::TraitRef { def_id: a.def_id, substs: substs })
+ let substs = try!(self.substs(a.def_id, a.substs, b.substs));
+ Ok(ty::TraitRef { def_id: a.def_id, substs: self.tcx().mk_substs(substs) })
}
}
Ok(a)
}
- (&ty::ty_enum(a_id, ref a_substs),
- &ty::ty_enum(b_id, ref b_substs))
+ (&ty::ty_enum(a_id, a_substs),
+ &ty::ty_enum(b_id, b_substs))
if a_id == b_id => {
let substs = try!(this.substs(a_id,
a_substs,
b_substs));
- Ok(ty::mk_enum(tcx, a_id, substs))
+ Ok(ty::mk_enum(tcx, a_id, tcx.mk_substs(substs)))
}
(&ty::ty_trait(ref a_),
Ok(ty::mk_trait(tcx, principal, bounds))
}
- (&ty::ty_struct(a_id, ref a_substs), &ty::ty_struct(b_id, ref b_substs))
+ (&ty::ty_struct(a_id, a_substs), &ty::ty_struct(b_id, b_substs))
if a_id == b_id => {
let substs = try!(this.substs(a_id, a_substs, b_substs));
- Ok(ty::mk_struct(tcx, a_id, substs))
+ Ok(ty::mk_struct(tcx, a_id, tcx.mk_substs(substs)))
}
- (&ty::ty_unboxed_closure(a_id, a_region, ref a_substs),
- &ty::ty_unboxed_closure(b_id, b_region, ref b_substs))
+ (&ty::ty_unboxed_closure(a_id, a_region, a_substs),
+ &ty::ty_unboxed_closure(b_id, b_region, b_substs))
if a_id == b_id => {
// All ty_unboxed_closure types with the same id represent
// the (anonymous) type of the same closure expression. So
// all of their regions should be equated.
- let region = try!(this.equate().regions(a_region, b_region));
+ let region = try!(this.equate().regions(*a_region, *b_region));
let substs = try!(this.substs_variances(None, a_substs, b_substs));
- Ok(ty::mk_unboxed_closure(tcx, a_id, region, substs))
+ Ok(ty::mk_unboxed_closure(tcx, a_id, tcx.mk_region(region), tcx.mk_substs(substs)))
}
(&ty::ty_uniq(a_inner), &ty::ty_uniq(b_inner)) => {
}
(&ty::ty_rptr(a_r, ref a_mt), &ty::ty_rptr(b_r, ref b_mt)) => {
- let r = try!(this.contraregions(a_r, b_r));
+ let r = try!(this.contraregions(*a_r, *b_r));
// FIXME(14985) If we have mutable references to trait objects, we
// used to use covariant subtyping. I have preserved this behaviour,
// even though it is probably incorrect. So don't go down the usual
}
_ => try!(this.mts(a_mt, b_mt))
};
- Ok(ty::mk_rptr(tcx, r, mt))
+ Ok(ty::mk_rptr(tcx, tcx.mk_region(r), mt))
}
(&ty::ty_vec(a_t, Some(sz_a)), &ty::ty_vec(b_t, Some(sz_b))) => {
}
}
- (&ty::ty_bare_fn(a_opt_def_id, ref a_fty), &ty::ty_bare_fn(b_opt_def_id, ref b_fty))
+ (&ty::ty_bare_fn(a_opt_def_id, a_fty), &ty::ty_bare_fn(b_opt_def_id, b_fty))
if a_opt_def_id == b_opt_def_id =>
{
let fty = try!(this.bare_fn_tys(a_fty, b_fty));
- Ok(ty::mk_bare_fn(tcx, a_opt_def_id, fty))
+ Ok(ty::mk_bare_fn(tcx, a_opt_def_id, tcx.mk_bare_fn(fty)))
}
(&ty::ty_closure(ref a_fty), &ty::ty_closure(ref b_fty)) => {
self.infcx.next_region_var(MiscVariable(self.span))
}
}
-
-
struct RebuildPathInfo<'a> {
path: &'a ast::Path,
// indexes to insert lifetime on path.lifetimes
- indexes: Vec<uint>,
+ indexes: Vec<u32>,
// number of lifetimes we expect to see on the type referred by `path`
// (e.g., expected=1 for struct Foo<'a>)
- expected: uint,
- anon_nums: &'a HashSet<uint>,
+ expected: u32,
+ anon_nums: &'a HashSet<u32>,
region_names: &'a HashSet<ast::Name>
}
generics: &'a ast::Generics,
same_regions: &'a [SameRegions],
life_giver: &'a LifeGiver,
- cur_anon: Cell<uint>,
- inserted_anons: RefCell<HashSet<uint>>,
+ cur_anon: Cell<u32>,
+ inserted_anons: RefCell<HashSet<u32>>,
}
enum FreshOrKept {
}
fn extract_anon_nums_and_names(&self, same_regions: &SameRegions)
- -> (HashSet<uint>, HashSet<ast::Name>) {
+ -> (HashSet<u32>, HashSet<ast::Name>) {
let mut anon_nums = HashSet::new();
let mut region_names = HashSet::new();
for br in same_regions.regions.iter() {
all_region_names
}
- fn inc_cur_anon(&self, n: uint) {
+ fn inc_cur_anon(&self, n: u32) {
let anon = self.cur_anon.get();
self.cur_anon.set(anon+n);
}
self.cur_anon.set(anon);
}
- fn inc_and_offset_cur_anon(&self, n: uint) {
+ fn inc_and_offset_cur_anon(&self, n: u32) {
self.inc_cur_anon(n);
self.offset_cur_anon();
}
- fn track_anon(&self, anon: uint) {
+ fn track_anon(&self, anon: u32) {
self.inserted_anons.borrow_mut().insert(anon);
}
ident: ty_param.ident,
id: ty_param.id,
bounds: bounds,
- unbound: ty_param.unbound.clone(),
default: ty_param.default.clone(),
span: ty_param.span,
}
// be passing down a map.
ast::RegionTyParamBound(lt)
}
- &ast::TraitTyParamBound(ref poly_tr) => {
+ &ast::TraitTyParamBound(ref poly_tr, modifier) => {
let tr = &poly_tr.trait_ref;
let last_seg = tr.path.segments.last().unwrap();
let mut insert = Vec::new();
let lifetimes = last_seg.parameters.lifetimes();
for (i, lt) in lifetimes.iter().enumerate() {
if region_names.contains(<.name) {
- insert.push(i);
+ insert.push(i as u32);
}
}
let rebuild_info = RebuildPathInfo {
path: &tr.path,
indexes: insert,
- expected: lifetimes.len(),
+ expected: lifetimes.len() as u32,
anon_nums: &HashSet::new(),
region_names: region_names
};
path: new_path,
ref_id: tr.ref_id,
}
- })
+ }, modifier)
}
}
})
fn rebuild_expl_self(&self,
expl_self_opt: Option<ast::ExplicitSelf_>,
lifetime: ast::Lifetime,
- anon_nums: &HashSet<uint>,
+ anon_nums: &HashSet<u32>,
region_names: &HashSet<ast::Name>)
-> Option<ast::ExplicitSelf_> {
match expl_self_opt {
fn rebuild_args_ty(&self,
inputs: &[ast::Arg],
lifetime: ast::Lifetime,
- anon_nums: &HashSet<uint>,
+ anon_nums: &HashSet<u32>,
region_names: &HashSet<ast::Name>)
-> Vec<ast::Arg> {
let mut new_inputs = Vec::new();
fn rebuild_output(&self, ty: &ast::FunctionRetTy,
lifetime: ast::Lifetime,
- anon_nums: &HashSet<uint>,
+ anon_nums: &HashSet<u32>,
region_names: &HashSet<ast::Name>) -> ast::FunctionRetTy {
match *ty {
ast::Return(ref ret_ty) => ast::Return(
fn rebuild_arg_ty_or_output(&self,
ty: &ast::Ty,
lifetime: ast::Lifetime,
- anon_nums: &HashSet<uint>,
+ anon_nums: &HashSet<u32>,
region_names: &HashSet<ast::Name>)
-> P<ast::Ty> {
let mut new_ty = P(ty.clone());
let generics = ty::lookup_item_type(self.tcx, did).generics;
let expected =
- generics.regions.len(subst::TypeSpace);
+ generics.regions.len(subst::TypeSpace) as u32;
let lifetimes =
path.segments.last().unwrap().parameters.lifetimes();
let mut insert = Vec::new();
for (i, a) in range(anon,
anon+expected).enumerate() {
if anon_nums.contains(&a) {
- insert.push(i);
+ insert.push(i as u32);
}
self.track_anon(a);
}
} else {
for (i, lt) in lifetimes.iter().enumerate() {
if region_names.contains(<.name) {
- insert.push(i);
+ insert.push(i as u32);
}
}
}
}
} else {
for (i, lt) in data.lifetimes.iter().enumerate() {
- if indexes.contains(&i) {
+ if indexes.contains(&(i as u32)) {
new_lts.push(lifetime);
} else {
new_lts.push(*lt);
pub struct TypeFreshener<'a, 'tcx:'a> {
infcx: &'a InferCtxt<'a, 'tcx>,
- freshen_count: uint,
+ freshen_count: u32,
freshen_map: hash_map::HashMap<ty::InferTy, Ty<'tcx>>,
}
key: ty::InferTy,
freshener: F)
-> Ty<'tcx> where
- F: FnOnce(uint) -> ty::InferTy,
+ F: FnOnce(u32) -> ty::InferTy,
{
match opt_ty {
Some(ty) => { return ty.fold_with(self); }
use util::ppaux::Repr;
use std::cell::{Cell, RefCell};
-use std::uint;
+use std::u32;
use syntax::ast;
mod doc;
lubs: RefCell<CombineMap>,
glbs: RefCell<CombineMap>,
- skolemization_count: Cell<uint>,
- bound_count: Cell<uint>,
+ skolemization_count: Cell<u32>,
+ bound_count: Cell<u32>,
// The undo log records actions that might later be undone.
//
#[allow(missing_copy_implementations)]
pub struct RegionSnapshot {
length: uint,
- skolemization_count: uint,
+ skolemization_count: u32,
}
impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
AddVar(vid) => {
let mut var_origins = self.var_origins.borrow_mut();
var_origins.pop().unwrap();
- assert_eq!(var_origins.len(), vid.index);
+ assert_eq!(var_origins.len(), vid.index as uint);
}
AddConstraint(ref constraint) => {
self.constraints.borrow_mut().remove(constraint);
self.skolemization_count.set(snapshot.skolemization_count);
}
- pub fn num_vars(&self) -> uint {
- self.var_origins.borrow().len()
+ pub fn num_vars(&self) -> u32 {
+ let len = self.var_origins.borrow().len();
+ // enforce no overflow
+ assert!(len as u32 as uint == len);
+ len as u32
}
pub fn new_region_var(&self, origin: RegionVariableOrigin<'tcx>) -> RegionVid {
match *self.values.borrow() {
None => {
self.tcx.sess.span_bug(
- (*self.var_origins.borrow())[rid.index].span(),
+ (*self.var_origins.borrow())[rid.index as uint].span(),
"attempt to resolve region variable before values have \
been computed!")
}
(ReInfer(ReVar(v_id)), _) | (_, ReInfer(ReVar(v_id))) => {
self.tcx.sess.span_bug(
- (*self.var_origins.borrow())[v_id.index].span(),
+ (*self.var_origins.borrow())[v_id.index as uint].span(),
format!("lub_concrete_regions invoked with \
non-concrete regions: {}, {}",
a,
(ReInfer(ReVar(v_id)), _) |
(_, ReInfer(ReVar(v_id))) => {
self.tcx.sess.span_bug(
- (*self.var_origins.borrow())[v_id.index].span(),
+ (*self.var_origins.borrow())[v_id.index as uint].span(),
format!("glb_concrete_regions invoked with \
non-concrete regions: {}, {}",
a,
}
fn construct_var_data(&self) -> Vec<VarData> {
- Vec::from_fn(self.num_vars(), |_| {
+ Vec::from_fn(self.num_vars() as uint, |_| {
VarData {
// All nodes are initially classified as contracting; during
// the expansion phase, we will shift the classification for
.repr(self.tcx));
match *constraint {
ConstrainRegSubVar(a_region, b_vid) => {
- let b_data = &mut var_data[b_vid.index];
+ let b_data = &mut var_data[b_vid.index as uint];
self.expand_node(a_region, b_vid, b_data)
}
ConstrainVarSubVar(a_vid, b_vid) => {
- match var_data[a_vid.index].value {
+ match var_data[a_vid.index as uint].value {
NoValue | ErrorValue => false,
Value(a_region) => {
- let b_node = &mut var_data[b_vid.index];
+ let b_node = &mut var_data[b_vid.index as uint];
self.expand_node(a_region, b_vid, b_node)
}
}
false
}
ConstrainVarSubVar(a_vid, b_vid) => {
- match var_data[b_vid.index].value {
+ match var_data[b_vid.index as uint].value {
NoValue | ErrorValue => false,
Value(b_region) => {
- let a_data = &mut var_data[a_vid.index];
+ let a_data = &mut var_data[a_vid.index as uint];
self.contract_node(a_vid, a_data, b_region)
}
}
}
ConstrainVarSubReg(a_vid, b_region) => {
- let a_data = &mut var_data[a_vid.index];
+ let a_data = &mut var_data[a_vid.index as uint];
self.contract_node(a_vid, a_data, b_region)
}
}
// idea is to report errors that derive from independent
// regions of the graph, but not those that derive from
// overlapping locations.
- let mut dup_vec = Vec::from_elem(self.num_vars(), uint::MAX);
+ let mut dup_vec = Vec::from_elem(self.num_vars() as uint, u32::MAX);
let mut opt_graph = None;
- for idx in range(0u, self.num_vars()) {
+ for idx in range(0u, self.num_vars() as uint) {
match var_data[idx].value {
Value(_) => {
/* Inference successful */
}
let graph = opt_graph.as_ref().unwrap();
- let node_vid = RegionVid { index: idx };
+ let node_vid = RegionVid { index: idx as u32 };
match var_data[idx].classification {
Expanding => {
self.collect_error_for_expanding_node(
}
}
- Vec::from_fn(self.num_vars(), |idx| var_data[idx].value)
+ Vec::from_fn(self.num_vars() as uint, |idx| var_data[idx].value)
}
fn construct_graph(&self) -> RegionGraph {
let constraints = self.constraints.borrow();
let num_edges = constraints.len();
- let mut graph = graph::Graph::with_capacity(num_vars + 1,
+ let mut graph = graph::Graph::with_capacity(num_vars as uint + 1,
num_edges);
- for _ in range(0u, num_vars) {
+ for _ in range(0, num_vars) {
graph.add_node(());
}
let dummy_idx = graph.add_node(());
for (constraint, _) in constraints.iter() {
match *constraint {
ConstrainVarSubVar(a_id, b_id) => {
- graph.add_edge(NodeIndex(a_id.index),
- NodeIndex(b_id.index),
+ graph.add_edge(NodeIndex(a_id.index as uint),
+ NodeIndex(b_id.index as uint),
*constraint);
}
ConstrainRegSubVar(_, b_id) => {
graph.add_edge(dummy_idx,
- NodeIndex(b_id.index),
+ NodeIndex(b_id.index as uint),
*constraint);
}
ConstrainVarSubReg(a_id, _) => {
- graph.add_edge(NodeIndex(a_id.index),
+ graph.add_edge(NodeIndex(a_id.index as uint),
dummy_idx,
*constraint);
}
&self,
graph: &RegionGraph,
var_data: &[VarData],
- dup_vec: &mut [uint],
+ dup_vec: &mut [u32],
node_idx: RegionVid,
errors: &mut Vec<RegionResolutionError<'tcx>>)
{
if !self.is_subregion_of(lower_bound.region,
upper_bound.region) {
errors.push(SubSupConflict(
- (*self.var_origins.borrow())[node_idx.index].clone(),
+ (*self.var_origins.borrow())[node_idx.index as uint].clone(),
lower_bound.origin.clone(),
lower_bound.region,
upper_bound.origin.clone(),
}
self.tcx.sess.span_bug(
- (*self.var_origins.borrow())[node_idx.index].span(),
+ (*self.var_origins.borrow())[node_idx.index as uint].span(),
format!("collect_error_for_expanding_node() could not find error \
for var {}, lower_bounds={}, upper_bounds={}",
node_idx,
&self,
graph: &RegionGraph,
var_data: &[VarData],
- dup_vec: &mut [uint],
+ dup_vec: &mut [u32],
node_idx: RegionVid,
errors: &mut Vec<RegionResolutionError<'tcx>>)
{
Ok(_) => {}
Err(_) => {
errors.push(SupSupConflict(
- (*self.var_origins.borrow())[node_idx.index].clone(),
+ (*self.var_origins.borrow())[node_idx.index as uint].clone(),
upper_bound_1.origin.clone(),
upper_bound_1.region,
upper_bound_2.origin.clone(),
}
self.tcx.sess.span_bug(
- (*self.var_origins.borrow())[node_idx.index].span(),
+ (*self.var_origins.borrow())[node_idx.index as uint].span(),
format!("collect_error_for_contracting_node() could not find error \
for var {}, upper_bounds={}",
node_idx,
var_data: &[VarData],
orig_node_idx: RegionVid,
dir: Direction,
- dup_vec: &mut [uint])
+ dup_vec: &mut [u32])
-> (Vec<RegionAndOrigin<'tcx>>, bool) {
struct WalkState<'tcx> {
set: FnvHashSet<RegionVid>,
while !state.stack.is_empty() {
let node_idx = state.stack.pop().unwrap();
- let classification = var_data[node_idx.index].classification;
+ let classification = var_data[node_idx.index as uint].classification;
// check whether we've visited this node on some previous walk
- if dup_vec[node_idx.index] == uint::MAX {
- dup_vec[node_idx.index] = orig_node_idx.index;
- } else if dup_vec[node_idx.index] != orig_node_idx.index {
+ if dup_vec[node_idx.index as uint] == u32::MAX {
+ dup_vec[node_idx.index as uint] = orig_node_idx.index;
+ } else if dup_vec[node_idx.index as uint] != orig_node_idx.index {
state.dup_found = true;
}
dir: Direction) {
debug!("process_edges(source_vid={}, dir={})", source_vid, dir);
- let source_node_index = NodeIndex(source_vid.index);
+ let source_node_index = NodeIndex(source_vid.index as uint);
graph.each_adjacent_edge(source_node_index, dir, |_, edge| {
match edge.data {
ConstrainVarSubVar(from_vid, to_vid) => {
}
fn lookup(values: &Vec<VarValue>, rid: ty::RegionVid) -> ty::Region {
- match values[rid.index] {
+ match values[rid.index as uint] {
Value(r) => r,
NoValue => ReEmpty, // No constraints, return ty::ReEmpty
ErrorValue => ReStatic, // Previously reported error.
use middle::ty::{mod, Ty};
use std::cmp::min;
use std::mem;
-use std::uint;
+use std::u32;
use util::snapshot_vec as sv;
pub struct TypeVariableTable<'tcx> {
}
fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
- relations(self.values.get_mut(a.index))
+ relations(self.values.get_mut(a.index as uint))
}
pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool {
- self.values.get(vid.index).diverging
+ self.values.get(vid.index as uint).diverging
}
/// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>)
{
let old_value = {
- let value_ptr = &mut self.values.get_mut(vid.index).value;
+ let value_ptr = &mut self.values.get_mut(vid.index as uint).value;
mem::replace(value_ptr, Known(ty))
};
value: Bounded(vec![]),
diverging: diverging
});
- ty::TyVid { index: index }
+ ty::TyVid { index: index as u32 }
}
pub fn probe(&self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
- match self.values.get(vid.index).value {
+ match self.values.get(vid.index as uint).value {
Bounded(..) => None,
Known(t) => Some(t)
}
* unified `V1` with `T1`, this function would return `{T0}`.
*/
- let mut new_elem_threshold = uint::MAX;
+ let mut new_elem_threshold = u32::MAX;
let mut escaping_types = Vec::new();
let actions_since_snapshot = self.values.actions_since_snapshot(&s.snapshot);
debug!("actions_since_snapshot.len() = {}", actions_since_snapshot.len());
// always be the first one we see). Note that this
// action must precede those variables being
// specified.
- new_elem_threshold = min(new_elem_threshold, index);
+ new_elem_threshold = min(new_elem_threshold, index as u32);
debug!("NewElem({}) new_elem_threshold={}", index, new_elem_threshold);
}
action: UndoEntry) {
match action {
SpecifyVar(vid, relations) => {
- values[vid.index].value = Bounded(relations);
+ values[vid.index as uint].value = Bounded(relations);
}
Relate(a, b) => {
- relations(&mut (*values)[a.index]).pop();
- relations(&mut (*values)[b.index]).pop();
+ relations(&mut (*values)[a.index as uint]).pop();
+ relations(&mut (*values)[b.index as uint]).pop();
}
}
}
Bounded(ref mut relations) => relations
}
}
-
// Integral type keys
impl<'tcx> UnifyKey<'tcx, Option<IntVarValue>> for ty::IntVid {
- fn index(&self) -> uint { self.index }
+ fn index(&self) -> uint { self.index as uint }
- fn from_index(i: uint) -> ty::IntVid { ty::IntVid { index: i } }
+ fn from_index(i: uint) -> ty::IntVid { ty::IntVid { index: i as u32 } }
fn unification_table<'v>(infcx: &'v InferCtxt)
-> &'v RefCell<UnificationTable<ty::IntVid, Option<IntVarValue>>>
// Floating point type keys
impl<'tcx> UnifyKey<'tcx, Option<ast::FloatTy>> for ty::FloatVid {
- fn index(&self) -> uint { self.index }
+ fn index(&self) -> uint { self.index as uint }
- fn from_index(i: uint) -> ty::FloatVid { ty::FloatVid { index: i } }
+ fn from_index(i: uint) -> ty::FloatVid { ty::FloatVid { index: i as u32 } }
fn unification_table<'v>(infcx: &'v InferCtxt)
-> &'v RefCell<UnificationTable<ty::FloatVid, Option<ast::FloatTy>>>
// No need to continue; we now know the result.
false
}
- ty::ty_enum(did, ref substs) => {
+ ty::ty_enum(did, substs) => {
for enum_variant in (*ty::enum_variants(tcx, did)).iter() {
for argument_type in enum_variant.args.iter() {
let argument_type = argument_type.subst(tcx, substs);
// Don't traverse substitutions.
false
}
- ty::ty_struct(did, ref substs) => {
+ ty::ty_struct(did, substs) => {
for field in ty::struct_fields(tcx, did, substs).iter() {
result = result ||
type_size_is_affected_by_type_parameters(tcx,
SliceMutTraitLangItem, "slice_mut", slice_mut_trait;
RangeStructLangItem, "range", range_struct;
RangeFromStructLangItem, "range_from", range_from_struct;
+ RangeToStructLangItem, "range_to", range_to_struct;
FullRangeStructLangItem, "full_range", full_range_struct;
UnsafeTypeLangItem, "unsafe", unsafe_type;
NoSyncItem, "no_sync_bound", no_sync_bound;
ManagedItem, "managed_bound", managed_bound;
+ NonZeroItem, "non_zero", non_zero;
+
IteratorItem, "iterator", iterator;
StackExhaustedLangItem, "stack_exhausted", stack_exhausted;
ast::ExprBlock(..) | ast::ExprAssign(..) | ast::ExprAssignOp(..) |
ast::ExprMac(..) | ast::ExprStruct(..) | ast::ExprRepeat(..) |
ast::ExprParen(..) | ast::ExprInlineAsm(..) | ast::ExprBox(..) |
- ast::ExprSlice(..) | ast::ExprRange(..) => {
+ ast::ExprRange(..) => {
visit::walk_expr(ir, expr);
}
}
self.propagate_through_expr(&**l, r_succ)
}
- ast::ExprSlice(ref e1, ref e2, ref e3, _) => {
- let succ = e3.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ));
- let succ = e2.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ));
- self.propagate_through_expr(&**e1, succ)
- }
-
ast::ExprRange(ref e1, ref e2) => {
let succ = e2.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ));
- self.propagate_through_expr(&**e1, succ)
+ e1.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ))
}
ast::ExprBox(None, ref e) |
ast::ExprBlock(..) | ast::ExprMac(..) | ast::ExprAddrOf(..) |
ast::ExprStruct(..) | ast::ExprRepeat(..) | ast::ExprParen(..) |
ast::ExprClosure(..) | ast::ExprPath(..) | ast::ExprBox(..) |
- ast::ExprSlice(..) | ast::ExprRange(..) => {
+ ast::ExprRange(..) => {
visit::walk_expr(this, expr);
}
ast::ExprIfLet(..) => {
// different kinds of pointers:
#[deriving(Clone, Copy, PartialEq, Eq, Hash, Show)]
pub enum PointerKind {
- OwnedPtr,
+ Unique,
BorrowedPtr(ty::BorrowKind, ty::Region),
Implicit(ty::BorrowKind, ty::Region), // Implicit deref of a borrowed ptr.
UnsafePtr(ast::Mutability)
match t.sty {
ty::ty_uniq(_) |
ty::ty_closure(box ty::ClosureTy {store: ty::UniqTraitStore, ..}) => {
- Some(deref_ptr(OwnedPtr))
+ Some(deref_ptr(Unique))
}
ty::ty_rptr(r, mt) => {
let kind = ty::BorrowKind::from_mutbl(mt.mutbl);
- Some(deref_ptr(BorrowedPtr(kind, r)))
+ Some(deref_ptr(BorrowedPtr(kind, *r)))
}
ty::ty_closure(box ty::ClosureTy {
impl<'t,TYPER:'t> Copy for MemCategorizationContext<'t,TYPER> {}
-pub type McResult<T> = Result<T, ()>;
-
/// The `Typer` trait provides the interface for the mem-categorization
/// module to the results of the type check. It can be used to query
/// the type assigned to an expression node, to inquire after adjustments,
/// can be sure that only `Ok` results will occur.
pub trait Typer<'tcx> {
fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
- fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>>;
+ fn node_ty(&self, id: ast::NodeId) -> Ty<'tcx>;
+ fn expr_ty_adjusted(&self, expr: &ast::Expr) -> Ty<'tcx>;
fn node_method_ty(&self, method_call: ty::MethodCall) -> Option<Ty<'tcx>>;
+ fn node_method_origin(&self, method_call: ty::MethodCall)
+ -> Option<ty::MethodOrigin<'tcx>>;
fn adjustments<'a>(&'a self) -> &'a RefCell<NodeMap<ty::AutoAdjustment<'tcx>>>;
fn is_method_call(&self, id: ast::NodeId) -> bool;
fn temporary_scope(&self, rvalue_id: ast::NodeId) -> Option<region::CodeExtent>;
pub fn from_pointer_kind(base_mutbl: MutabilityCategory,
ptr: PointerKind) -> MutabilityCategory {
match ptr {
- OwnedPtr => {
+ Unique => {
base_mutbl.inherit()
}
BorrowedPtr(borrow_kind, _) | Implicit(borrow_kind, _) => {
}
}
-macro_rules! if_ok {
- ($inp: expr) => (
- match $inp {
- Ok(v) => { v }
- Err(e) => { return Err(e); }
- }
- )
-}
-
impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
pub fn new(typer: &'t TYPER) -> MemCategorizationContext<'t,TYPER> {
MemCategorizationContext { typer: typer }
self.typer.tcx()
}
- fn expr_ty(&self, expr: &ast::Expr) -> McResult<Ty<'tcx>> {
+ fn expr_ty(&self, expr: &ast::Expr) -> Ty<'tcx> {
self.typer.node_ty(expr.id)
}
- fn expr_ty_adjusted(&self, expr: &ast::Expr) -> McResult<Ty<'tcx>> {
- let unadjusted_ty = if_ok!(self.expr_ty(expr));
- Ok(ty::adjust_ty(self.tcx(), expr.span, expr.id, unadjusted_ty,
- self.typer.adjustments().borrow().get(&expr.id),
- |method_call| self.typer.node_method_ty(method_call)))
+ fn expr_ty_adjusted(&self, expr: &ast::Expr) -> Ty<'tcx> {
+ let unadjusted_ty = self.expr_ty(expr);
+ ty::adjust_ty(self.tcx(), expr.span, expr.id, unadjusted_ty,
+ self.typer.adjustments().borrow().get(&expr.id),
+ |method_call| self.typer.node_method_ty(method_call))
}
- fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
+ fn node_ty(&self, id: ast::NodeId) -> Ty<'tcx> {
self.typer.node_ty(id)
}
- fn pat_ty(&self, pat: &ast::Pat) -> McResult<Ty<'tcx>> {
+ fn pat_ty(&self, pat: &ast::Pat) -> Ty<'tcx> {
let tcx = self.typer.tcx();
let base_ty = self.typer.node_ty(pat.id);
// FIXME (Issue #18207): This code detects whether we are
// a bind-by-ref means that the base_ty will be the type of the ident itself,
// but what we want here is the type of the underlying value being borrowed.
// So peel off one-level, turning the &T into T.
- base_ty.map(|t| {
- ty::deref(t, false).unwrap_or_else(|| {
- panic!("encountered BindByRef with non &-type");
- }).ty
- })
+ ty::deref(base_ty, false).unwrap_or_else(|| {
+ panic!("encountered BindByRef with non &-type");
+ }).ty
}
_ => base_ty,
};
ret_ty
}
- pub fn cat_expr(&self, expr: &ast::Expr) -> McResult<cmt<'tcx>> {
+ pub fn cat_expr(&self, expr: &ast::Expr) -> cmt<'tcx> {
match self.typer.adjustments().borrow().get(&expr.id) {
None => {
// No adjustments.
expr.repr(self.tcx()));
// Convert a bare fn to a closure by adding NULL env.
// Result is an rvalue.
- let expr_ty = if_ok!(self.expr_ty_adjusted(expr));
- Ok(self.cat_rvalue_node(expr.id(), expr.span(), expr_ty))
+ let expr_ty = self.expr_ty_adjusted(expr);
+ self.cat_rvalue_node(expr.id(), expr.span(), expr_ty)
}
ty::AdjustDerefRef(
expr.repr(self.tcx()));
// Equivalent to &*expr or something similar.
// Result is an rvalue.
- let expr_ty = if_ok!(self.expr_ty_adjusted(expr));
- Ok(self.cat_rvalue_node(expr.id(), expr.span(), expr_ty))
+ let expr_ty = self.expr_ty_adjusted(expr);
+ self.cat_rvalue_node(expr.id(), expr.span(), expr_ty)
}
ty::AdjustDerefRef(
pub fn cat_expr_autoderefd(&self,
expr: &ast::Expr,
autoderefs: uint)
- -> McResult<cmt<'tcx>> {
- let mut cmt = if_ok!(self.cat_expr_unadjusted(expr));
+ -> cmt<'tcx> {
+ let mut cmt = self.cat_expr_unadjusted(expr);
debug!("cat_expr_autoderefd: autoderefs={}, cmt={}",
autoderefs,
cmt.repr(self.tcx()));
for deref in range(1u, autoderefs + 1) {
cmt = self.cat_deref(expr, cmt, deref, false);
}
- return Ok(cmt);
+ return cmt;
}
- pub fn cat_expr_unadjusted(&self, expr: &ast::Expr) -> McResult<cmt<'tcx>> {
+ pub fn cat_expr_unadjusted(&self, expr: &ast::Expr) -> cmt<'tcx> {
debug!("cat_expr: id={} expr={}", expr.id, expr.repr(self.tcx()));
- let expr_ty = if_ok!(self.expr_ty(expr));
+ let expr_ty = self.expr_ty(expr);
match expr.node {
ast::ExprUnary(ast::UnDeref, ref e_base) => {
- let base_cmt = if_ok!(self.cat_expr(&**e_base));
- Ok(self.cat_deref(expr, base_cmt, 0, false))
+ let base_cmt = self.cat_expr(&**e_base);
+ self.cat_deref(expr, base_cmt, 0, false)
}
ast::ExprField(ref base, f_name) => {
- let base_cmt = if_ok!(self.cat_expr(&**base));
+ let base_cmt = self.cat_expr(&**base);
debug!("cat_expr(cat_field): id={} expr={} base={}",
expr.id,
expr.repr(self.tcx()),
base_cmt.repr(self.tcx()));
- Ok(self.cat_field(expr, base_cmt, f_name.node.name, expr_ty))
+ self.cat_field(expr, base_cmt, f_name.node.name, expr_ty)
}
ast::ExprTupField(ref base, idx) => {
- let base_cmt = if_ok!(self.cat_expr(&**base));
- Ok(self.cat_tup_field(expr, base_cmt, idx.node, expr_ty))
+ let base_cmt = self.cat_expr(&**base);
+ self.cat_tup_field(expr, base_cmt, idx.node, expr_ty)
}
- ast::ExprIndex(ref base, _) => {
- let method_call = ty::MethodCall::expr(expr.id());
- match self.typer.node_method_ty(method_call) {
- Some(method_ty) => {
- // If this is an index implemented by a method call, then it will
- // include an implicit deref of the result.
- let ret_ty = ty::ty_fn_ret(method_ty).unwrap();
- Ok(self.cat_deref(expr,
- self.cat_rvalue_node(expr.id(),
- expr.span(),
- ret_ty), 1, true))
+ ast::ExprIndex(ref base, ref idx) => {
+ match idx.node {
+ ast::ExprRange(..) => {
+ // Slicing syntax special case (KILLME).
+ self.cat_rvalue_node(expr.id(), expr.span(), expr_ty)
}
- None => {
- let base_cmt = if_ok!(self.cat_expr(&**base));
- Ok(self.cat_index(expr, base_cmt))
+ _ => {
+ let method_call = ty::MethodCall::expr(expr.id());
+ match self.typer.node_method_ty(method_call) {
+ Some(method_ty) => {
+ // If this is an index implemented by a method call, then it will
+ // include an implicit deref of the result.
+ let ret_ty = ty::ty_fn_ret(method_ty).unwrap();
+ self.cat_deref(expr,
+ self.cat_rvalue_node(expr.id(),
+ expr.span(),
+ ret_ty), 1, true)
+ }
+ None => {
+ self.cat_index(expr, self.cat_expr(&**base))
+ }
+ }
}
}
}
ast::ExprAddrOf(..) | ast::ExprCall(..) |
ast::ExprAssign(..) | ast::ExprAssignOp(..) |
ast::ExprClosure(..) | ast::ExprRet(..) |
- ast::ExprUnary(..) | ast::ExprSlice(..) | ast::ExprRange(..) |
+ ast::ExprUnary(..) | ast::ExprRange(..) |
ast::ExprMethodCall(..) | ast::ExprCast(..) |
ast::ExprVec(..) | ast::ExprTup(..) | ast::ExprIf(..) |
ast::ExprBinary(..) | ast::ExprWhile(..) |
ast::ExprAgain(..) | ast::ExprStruct(..) | ast::ExprRepeat(..) |
ast::ExprInlineAsm(..) | ast::ExprBox(..) |
ast::ExprForLoop(..) => {
- Ok(self.cat_rvalue_node(expr.id(), expr.span(), expr_ty))
+ self.cat_rvalue_node(expr.id(), expr.span(), expr_ty)
}
ast::ExprIfLet(..) => {
span: Span,
expr_ty: Ty<'tcx>,
def: def::Def)
- -> McResult<cmt<'tcx>> {
+ -> cmt<'tcx> {
debug!("cat_def: id={} expr={} def={}",
id, expr_ty.repr(self.tcx()), def);
match def {
def::DefStruct(..) | def::DefVariant(..) | def::DefFn(..) |
def::DefStaticMethod(..) | def::DefConst(..) => {
- Ok(self.cat_rvalue_node(id, span, expr_ty))
+ self.cat_rvalue_node(id, span, expr_ty)
}
def::DefMod(_) | def::DefForeignMod(_) | def::DefUse(_) |
def::DefTrait(_) | def::DefTy(..) | def::DefPrimTy(_) |
def::DefTyParam(..) | def::DefTyParamBinder(..) | def::DefRegion(_) |
def::DefLabel(_) | def::DefSelfTy(..) | def::DefMethod(..) |
def::DefAssociatedTy(..) | def::DefAssociatedPath(..)=> {
- Ok(Rc::new(cmt_ {
+ Rc::new(cmt_ {
id:id,
span:span,
cat:cat_static_item,
mutbl: McImmutable,
ty:expr_ty,
note: NoteNone
- }))
+ })
}
def::DefStatic(_, mutbl) => {
- Ok(Rc::new(cmt_ {
+ Rc::new(cmt_ {
id:id,
span:span,
cat:cat_static_item,
mutbl: if mutbl { McDeclared } else { McImmutable},
ty:expr_ty,
note: NoteNone
- }))
+ })
}
def::DefUpvar(var_id, fn_node_id, _) => {
- let ty = if_ok!(self.node_ty(fn_node_id));
+ let ty = self.node_ty(fn_node_id);
match ty.sty {
ty::ty_closure(ref closure_ty) => {
// Translate old closure type info into unboxed
}
def::DefLocal(vid) => {
- Ok(Rc::new(cmt_ {
+ Rc::new(cmt_ {
id: id,
span: span,
cat: cat_local(vid),
mutbl: MutabilityCategory::from_local(self.tcx(), vid),
ty: expr_ty,
note: NoteNone
- }))
+ })
}
}
}
kind: ty::UnboxedClosureKind,
mode: ast::CaptureClause,
is_unboxed: bool)
- -> McResult<cmt<'tcx>> {
+ -> cmt<'tcx> {
// An upvar can have up to 3 components. The base is a
// `cat_upvar`. Next, we add a deref through the implicit
// environment pointer with an anonymous free region 'env and
// FnOnce | copied | upvar -> &'up bk
// old stack | N/A | upvar -> &'env mut -> &'up bk
// old proc/once | copied | N/A
- let var_ty = if_ok!(self.node_ty(var_id));
+ let var_ty = self.node_ty(var_id);
let upvar_id = ty::UpvarId { var_id: var_id,
closure_expr_id: fn_node_id };
});
// First, switch by capture mode
- Ok(match mode {
+ match mode {
ast::CaptureByValue => {
let mut base = cmt_ {
id: id,
note: NoteUpvarRef(upvar_id)
})
}
- })
+ }
}
pub fn cat_rvalue_node(&self,
pub fn cat_slice_pattern(&self,
vec_cmt: cmt<'tcx>,
slice_pat: &ast::Pat)
- -> McResult<(cmt<'tcx>, ast::Mutability, ty::Region)> {
- let slice_ty = if_ok!(self.node_ty(slice_pat.id));
+ -> (cmt<'tcx>, ast::Mutability, ty::Region) {
+ let slice_ty = self.node_ty(slice_pat.id);
let (slice_mutbl, slice_r) = vec_slice_info(self.tcx(),
slice_pat,
slice_ty);
let cmt_slice = self.cat_index(slice_pat, self.deref_vec(slice_pat, vec_cmt));
- return Ok((cmt_slice, slice_mutbl, slice_r));
+ return (cmt_slice, slice_mutbl, slice_r);
/// In a pattern like [a, b, ..c], normally `c` has slice type, but if you have [a, b,
/// ..ref c], then the type of `ref c` will be `&&[]`, so to extract the slice details we
-> (ast::Mutability, ty::Region) {
match slice_ty.sty {
ty::ty_rptr(r, ref mt) => match mt.ty.sty {
- ty::ty_vec(_, None) => (mt.mutbl, r),
+ ty::ty_vec(_, None) => (mt.mutbl, *r),
_ => vec_slice_info(tcx, pat, mt.ty),
},
cmt: cmt<'tcx>,
pat: &ast::Pat,
op: |&MemCategorizationContext<'t, TYPER>, cmt<'tcx>, &ast::Pat|)
- -> McResult<()> {
+ {
// Here, `cmt` is the categorization for the value being
// matched and pat is the pattern it is being matched against.
//
Some(&def::DefVariant(..)) => {
// variant(x, y, z)
for (i, subpat) in subpats.iter().enumerate() {
- let subpat_ty = if_ok!(self.pat_ty(&**subpat)); // see (*2)
+ let subpat_ty = self.pat_ty(&**subpat); // see (*2)
let subcmt =
self.cat_imm_interior(
pat, cmt.clone(), subpat_ty,
InteriorField(PositionalField(i)));
- if_ok!(self.cat_pattern(subcmt, &**subpat, |x,y,z| op(x,y,z)));
+ self.cat_pattern(subcmt, &**subpat, |x,y,z| op(x,y,z));
}
}
Some(&def::DefStruct(..)) => {
for (i, subpat) in subpats.iter().enumerate() {
- let subpat_ty = if_ok!(self.pat_ty(&**subpat)); // see (*2)
+ let subpat_ty = self.pat_ty(&**subpat); // see (*2)
let cmt_field =
self.cat_imm_interior(
pat, cmt.clone(), subpat_ty,
InteriorField(PositionalField(i)));
- if_ok!(self.cat_pattern(cmt_field, &**subpat,
- |x,y,z| op(x,y,z)));
+ self.cat_pattern(cmt_field, &**subpat,
+ |x,y,z| op(x,y,z));
}
}
Some(&def::DefConst(..)) => {
for subpat in subpats.iter() {
- if_ok!(self.cat_pattern(cmt.clone(), &**subpat, |x,y,z| op(x,y,z)));
+ self.cat_pattern(cmt.clone(), &**subpat, |x,y,z| op(x,y,z));
}
}
_ => {
}
ast::PatIdent(_, _, Some(ref subpat)) => {
- if_ok!(self.cat_pattern(cmt, &**subpat, op));
+ self.cat_pattern(cmt, &**subpat, op);
}
ast::PatIdent(_, _, None) => {
ast::PatStruct(_, ref field_pats, _) => {
// {f1: p1, ..., fN: pN}
for fp in field_pats.iter() {
- let field_ty = if_ok!(self.pat_ty(&*fp.node.pat)); // see (*2)
+ let field_ty = self.pat_ty(&*fp.node.pat); // see (*2)
let cmt_field = self.cat_field(pat, cmt.clone(), fp.node.ident.name, field_ty);
- if_ok!(self.cat_pattern(cmt_field, &*fp.node.pat, |x,y,z| op(x,y,z)));
+ self.cat_pattern(cmt_field, &*fp.node.pat, |x,y,z| op(x,y,z));
}
}
ast::PatTup(ref subpats) => {
// (p1, ..., pN)
for (i, subpat) in subpats.iter().enumerate() {
- let subpat_ty = if_ok!(self.pat_ty(&**subpat)); // see (*2)
+ let subpat_ty = self.pat_ty(&**subpat); // see (*2)
let subcmt =
self.cat_imm_interior(
pat, cmt.clone(), subpat_ty,
InteriorField(PositionalField(i)));
- if_ok!(self.cat_pattern(subcmt, &**subpat, |x,y,z| op(x,y,z)));
+ self.cat_pattern(subcmt, &**subpat, |x,y,z| op(x,y,z));
}
}
ast::PatBox(ref subpat) | ast::PatRegion(ref subpat) => {
// @p1, ~p1, ref p1
let subcmt = self.cat_deref(pat, cmt, 0, false);
- if_ok!(self.cat_pattern(subcmt, &**subpat, op));
+ self.cat_pattern(subcmt, &**subpat, op);
}
ast::PatVec(ref before, ref slice, ref after) => {
let elt_cmt = self.cat_index(pat, self.deref_vec(pat, cmt));
for before_pat in before.iter() {
- if_ok!(self.cat_pattern(elt_cmt.clone(), &**before_pat,
- |x,y,z| op(x,y,z)));
+ self.cat_pattern(elt_cmt.clone(), &**before_pat,
+ |x,y,z| op(x,y,z));
}
for slice_pat in slice.iter() {
- let slice_ty = if_ok!(self.pat_ty(&**slice_pat));
+ let slice_ty = self.pat_ty(&**slice_pat);
let slice_cmt = self.cat_rvalue_node(pat.id(), pat.span(), slice_ty);
- if_ok!(self.cat_pattern(slice_cmt, &**slice_pat, |x,y,z| op(x,y,z)));
+ self.cat_pattern(slice_cmt, &**slice_pat, |x,y,z| op(x,y,z));
}
for after_pat in after.iter() {
- if_ok!(self.cat_pattern(elt_cmt.clone(), &**after_pat, |x,y,z| op(x,y,z)));
+ self.cat_pattern(elt_cmt.clone(), &**after_pat, |x,y,z| op(x,y,z));
}
}
self.tcx().sess.span_bug(pat.span, "unexpanded macro");
}
}
-
- Ok(())
}
pub fn cmt_to_string(&self, cmt: &cmt_<'tcx>) -> String {
Implicit(..) => {
"dereference (dereference is implicit, due to indexing)".to_string()
}
- OwnedPtr => format!("dereference of `{}`", ptr_sigil(pk)),
+ Unique => format!("dereference of `{}`", ptr_sigil(pk)),
_ => format!("dereference of `{}`-pointer", ptr_sigil(pk))
}
}
}
cat_downcast(ref b, _) |
cat_interior(ref b, _) |
- cat_deref(ref b, _, OwnedPtr) => {
+ cat_deref(ref b, _, Unique) => {
b.guarantor()
}
}
cat_deref(ref b, _, BorrowedPtr(ty::UniqueImmBorrow, _)) |
cat_deref(ref b, _, Implicit(ty::UniqueImmBorrow, _)) |
cat_downcast(ref b, _) |
- cat_deref(ref b, _, OwnedPtr) |
+ cat_deref(ref b, _, Unique) |
cat_interior(ref b, _) => {
// Aliasability depends on base cmt
b.freely_aliasable(ctxt)
pub fn ptr_sigil(ptr: PointerKind) -> &'static str {
match ptr {
- OwnedPtr => "Box",
+ Unique => "Box",
BorrowedPtr(ty::ImmBorrow, _) |
Implicit(ty::ImmBorrow, _) => "&",
BorrowedPtr(ty::MutBorrow, _) |
// method to the root. In this case, if the trait is private, then
// parent all the methods to the trait to indicate that they're
// private.
- ast::ItemTrait(_, _, _, _, ref methods) if item.vis != ast::Public => {
+ ast::ItemTrait(_, _, _, ref methods) if item.vis != ast::Public => {
for m in methods.iter() {
match *m {
ast::ProvidedMethod(ref m) => {
// Default methods on traits are all public so long as the trait
// is public
- ast::ItemTrait(_, _, _, _, ref methods) if public_first => {
+ ast::ItemTrait(_, _, _, ref methods) if public_first => {
for method in methods.iter() {
match *method {
ast::ProvidedMethod(ref m) => {
}
}
- ast::ItemTrait(_, _, _, _, ref methods) => {
+ ast::ItemTrait(_, _, _, ref methods) => {
for m in methods.iter() {
match *m {
ast::ProvidedMethod(ref m) => {
ast::ItemStruct(ref def, _) => check_struct(&**def),
- ast::ItemTrait(_, _, _, _, ref methods) => {
+ ast::ItemTrait(_, _, _, ref methods) => {
for m in methods.iter() {
match *m {
ast::RequiredMethod(..) => {}
fn check_ty_param_bound(&self,
ty_param_bound: &ast::TyParamBound) {
- if let ast::TraitTyParamBound(ref trait_ref) = *ty_param_bound {
+ if let ast::TraitTyParamBound(ref trait_ref, _) = *ty_param_bound {
if !self.tcx.sess.features.borrow().visible_private_types &&
self.path_is_private_type(trait_ref.trait_ref.ref_id) {
let span = trait_ref.trait_ref.path.span;
// namespace (the contents have their own privacies).
ast::ItemForeignMod(_) => {}
- ast::ItemTrait(_, _, _, ref bounds, _) => {
+ ast::ItemTrait(_, _, ref bounds, _) => {
if !self.trait_is_public(item.id) {
return
}
let prev_cx = visitor.cx;
visitor.cx.parent = Some(expr.id);
+
{
let region_maps = &mut visitor.region_maps;
let terminating = |id| {
pub enum DefRegion {
DefStaticRegion,
DefEarlyBoundRegion(/* space */ subst::ParamSpace,
- /* index */ uint,
+ /* index */ u32,
/* lifetime decl */ ast::NodeId),
DefLateBoundRegion(ty::DebruijnIndex,
/* lifetime decl */ ast::NodeId),
ast::ItemTy(_, ref generics) |
ast::ItemEnum(_, ref generics) |
ast::ItemStruct(_, ref generics) |
- ast::ItemTrait(_, ref generics, _, _, _) |
+ ast::ItemTrait(_, ref generics, _, _) |
ast::ItemImpl(_, ref generics, _, _, _) => {
// These kinds of items have only early bound lifetime parameters.
let lifetimes = &generics.lifetimes;
}
}
- fn visit_poly_trait_ref(&mut self, trait_ref: &ast::PolyTraitRef) {
+ fn visit_poly_trait_ref(&mut self, trait_ref:
+ &ast::PolyTraitRef,
+ _modifier: &ast::TraitBoundModifier) {
debug!("visit_poly_trait_ref trait_ref={}", trait_ref);
self.with(LateScope(&trait_ref.bound_lifetimes, self.scope), |old_scope, this| {
fn search_lifetimes<'a>(lifetimes: &'a Vec<ast::LifetimeDef>,
lifetime_ref: &ast::Lifetime)
- -> Option<(uint, &'a ast::Lifetime)> {
+ -> Option<(u32, &'a ast::Lifetime)> {
for (i, lifetime_decl) in lifetimes.iter().enumerate() {
if lifetime_decl.lifetime.name == lifetime_ref.name {
- return Some((i, &lifetime_decl.lifetime));
+ return Some((i as u32, &lifetime_decl.lifetime));
}
}
return None;
}
pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> {
- *self.types.get(ty_param_def.space, ty_param_def.index)
+ *self.types.get(ty_param_def.space, ty_param_def.index as uint)
}
- pub fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ pub fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.types.iter().any(|&t| ty::type_escapes_depth(t, depth)) || {
match self.regions {
ErasedRegions =>
ty_stack_depth: uint,
// Number of region binders we have passed through while doing the substitution
- region_binders_passed: uint,
+ region_binders_passed: u32,
}
impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
match self.substs.regions {
ErasedRegions => ty::ReStatic,
NonerasedRegions(ref regions) =>
- match regions.opt_get(space, i) {
+ match regions.opt_get(space, i as uint) {
Some(&r) => {
self.shift_region_through_binders(r)
}
impl<'a,'tcx> SubstFolder<'a,'tcx> {
fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
// Look up the type in the substitutions. It really should be in there.
- let opt_ty = self.substs.types.opt_get(p.space, p.idx);
+ let opt_ty = self.substs.types.opt_get(p.space, p.idx as uint);
let ty = match opt_ty {
Some(t) => *t,
None => {
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::{FulfillmentError, FulfillmentErrorCode,
+ ObligationCauseCode, SelectionError,
+ PredicateObligation, OutputTypeParameterMismatch};
+
+use middle::infer::InferCtxt;
+use middle::ty::{mod};
+use syntax::codemap::Span;
+use util::ppaux::{Repr, UserString};
+
+pub fn report_fulfillment_errors<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ errors: &Vec<FulfillmentError<'tcx>>) {
+ for error in errors.iter() {
+ report_fulfillment_error(infcx, error);
+ }
+}
+
+fn report_fulfillment_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ error: &FulfillmentError<'tcx>) {
+ match error.code {
+ FulfillmentErrorCode::CodeSelectionError(ref e) => {
+ report_selection_error(infcx, &error.obligation, e);
+ }
+ FulfillmentErrorCode::CodeAmbiguity => {
+ maybe_report_ambiguity(infcx, &error.obligation);
+ }
+ }
+}
+
+pub fn report_selection_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ obligation: &PredicateObligation<'tcx>,
+ error: &SelectionError<'tcx>)
+{
+ match *error {
+ SelectionError::Overflow => {
+ // We could track the stack here more precisely if we wanted, I imagine.
+ match obligation.trait_ref {
+ ty::Predicate::Trait(ref trait_ref) => {
+ let trait_ref =
+ infcx.resolve_type_vars_if_possible(&**trait_ref);
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "overflow evaluating the trait `{}` for the type `{}`",
+ trait_ref.user_string(infcx.tcx),
+ trait_ref.self_ty().user_string(infcx.tcx))[]);
+ }
+
+ ty::Predicate::Equate(ref predicate) => {
+ let predicate = infcx.resolve_type_vars_if_possible(predicate);
+ let err = infcx.equality_predicate(obligation.cause.span,
+ &predicate).unwrap_err();
+
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the requirement `{}` is not satisfied (`{}`)",
+ predicate.user_string(infcx.tcx),
+ ty::type_err_to_str(infcx.tcx, &err)).as_slice());
+ }
+
+ ty::Predicate::TypeOutlives(..) |
+ ty::Predicate::RegionOutlives(..) => {
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!("overflow evaluating lifetime predicate").as_slice());
+ }
+ }
+
+ let current_limit = infcx.tcx.sess.recursion_limit.get();
+ let suggested_limit = current_limit * 2;
+ infcx.tcx.sess.span_note(
+ obligation.cause.span,
+ format!(
+ "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate",
+ suggested_limit)[]);
+
+ note_obligation_cause(infcx, obligation);
+ }
+ SelectionError::Unimplemented => {
+ match obligation.trait_ref {
+ ty::Predicate::Trait(ref trait_ref) => {
+ let trait_ref =
+ infcx.resolve_type_vars_if_possible(
+ &**trait_ref);
+ if !ty::type_is_error(trait_ref.self_ty()) {
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the trait `{}` is not implemented for the type `{}`",
+ trait_ref.user_string(infcx.tcx),
+ trait_ref.self_ty().user_string(infcx.tcx)).as_slice());
+ note_obligation_cause(infcx, obligation);
+ }
+ }
+
+ ty::Predicate::Equate(ref predicate) => {
+ let predicate = infcx.resolve_type_vars_if_possible(predicate);
+ let err = infcx.equality_predicate(obligation.cause.span,
+ &predicate).unwrap_err();
+
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the requirement `{}` is not satisfied (`{}`)",
+ predicate.user_string(infcx.tcx),
+ ty::type_err_to_str(infcx.tcx, &err)).as_slice());
+ }
+
+ ty::Predicate::TypeOutlives(..) |
+ ty::Predicate::RegionOutlives(..) => {
+ let predicate = infcx.resolve_type_vars_if_possible(&obligation.trait_ref);
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the requirement `{}` is not satisfied",
+ predicate.user_string(infcx.tcx)).as_slice());
+ }
+ }
+ }
+ OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
+ let expected_trait_ref =
+ infcx.resolve_type_vars_if_possible(
+ &**expected_trait_ref);
+ let actual_trait_ref =
+ infcx.resolve_type_vars_if_possible(
+ &**actual_trait_ref);
+ if !ty::type_is_error(actual_trait_ref.self_ty()) {
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "type mismatch: the type `{}` implements the trait `{}`, \
+ but the trait `{}` is required ({})",
+ expected_trait_ref.self_ty().user_string(infcx.tcx),
+ expected_trait_ref.user_string(infcx.tcx),
+ actual_trait_ref.user_string(infcx.tcx),
+ ty::type_err_to_str(infcx.tcx, e)).as_slice());
+ note_obligation_cause(infcx, obligation);
+ }
+ }
+ }
+}
+
+fn maybe_report_ambiguity<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ obligation: &PredicateObligation<'tcx>) {
+ // Unable to successfully determine, probably means
+ // insufficient type information, but could mean
+ // ambiguous impls. The latter *ought* to be a
+ // coherence violation, so we don't report it here.
+
+ let trait_ref = match obligation.trait_ref {
+ ty::Predicate::Trait(ref trait_ref) => {
+ infcx.resolve_type_vars_if_possible(&**trait_ref)
+ }
+ _ => {
+ infcx.tcx.sess.span_bug(
+ obligation.cause.span,
+ format!("ambiguity from something other than a trait: {}",
+ obligation.trait_ref.repr(infcx.tcx)).as_slice());
+ }
+ };
+ let self_ty = trait_ref.self_ty();
+
+ debug!("maybe_report_ambiguity(trait_ref={}, self_ty={}, obligation={})",
+ trait_ref.repr(infcx.tcx),
+ self_ty.repr(infcx.tcx),
+ obligation.repr(infcx.tcx));
+ let all_types = &trait_ref.substs().types;
+ if all_types.iter().any(|&t| ty::type_is_error(t)) {
+ } else if all_types.iter().any(|&t| ty::type_needs_infer(t)) {
+ // This is kind of a hack: it frequently happens that some earlier
+ // error prevents types from being fully inferred, and then we get
+ // a bunch of uninteresting errors saying something like "<generic
+ // #0> doesn't implement Sized". It may even be true that we
+ // could just skip over all checks where the self-ty is an
+ // inference variable, but I was afraid that there might be an
+ // inference variable created, registered as an obligation, and
+ // then never forced by writeback, and hence by skipping here we'd
+ // be ignoring the fact that we don't KNOW the type works
+ // out. Though even that would probably be harmless, given that
+ // we're only talking about builtin traits, which are known to be
+ // inhabited. But in any case I just threw in this check for
+ // has_errors() to be sure that compilation isn't happening
+ // anyway. In that case, why inundate the user.
+ if !infcx.tcx.sess.has_errors() {
+ if infcx.tcx.lang_items.sized_trait()
+ .map_or(false, |sized_id| sized_id == trait_ref.def_id()) {
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "unable to infer enough type information about `{}`; type annotations \
+ required",
+ self_ty.user_string(infcx.tcx)).as_slice());
+ } else {
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "unable to infer enough type information to \
+ locate the impl of the trait `{}` for \
+ the type `{}`; type annotations required",
+ trait_ref.user_string(infcx.tcx),
+ self_ty.user_string(infcx.tcx))[]);
+ note_obligation_cause(infcx, obligation);
+ }
+ }
+ } else if !infcx.tcx.sess.has_errors() {
+ // Ambiguity. Coherence should have reported an error.
+ infcx.tcx.sess.span_bug(
+ obligation.cause.span,
+ format!(
+ "coherence failed to report ambiguity: \
+ cannot locate the impl of the trait `{}` for \
+ the type `{}`",
+ trait_ref.user_string(infcx.tcx),
+ self_ty.user_string(infcx.tcx))[]);
+ }
+}
+
+fn note_obligation_cause<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ obligation: &PredicateObligation<'tcx>)
+{
+ let trait_ref = match obligation.trait_ref {
+ ty::Predicate::Trait(ref trait_ref) => {
+ infcx.resolve_type_vars_if_possible(&**trait_ref)
+ }
+ _ => {
+ infcx.tcx.sess.span_bug(
+ obligation.cause.span,
+ format!("ambiguity from something other than a trait: {}",
+ obligation.trait_ref.repr(infcx.tcx)).as_slice());
+ }
+ };
+
+ note_obligation_cause_code(infcx,
+ &trait_ref,
+ obligation.cause.span,
+ &obligation.cause.code)
+}
+
+fn note_obligation_cause_code<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ trait_ref: &ty::PolyTraitRef<'tcx>,
+ cause_span: Span,
+ cause_code: &ObligationCauseCode<'tcx>)
+{
+ let tcx = infcx.tcx;
+ let trait_name = ty::item_path_str(tcx, trait_ref.def_id());
+ match *cause_code {
+ ObligationCauseCode::MiscObligation => { }
+ ObligationCauseCode::ItemObligation(item_def_id) => {
+ let item_name = ty::item_path_str(tcx, item_def_id);
+ tcx.sess.span_note(
+ cause_span,
+ format!(
+ "the trait `{}` must be implemented because it is required by `{}`",
+ trait_name,
+ item_name).as_slice());
+ }
+ ObligationCauseCode::ObjectCastObligation(object_ty) => {
+ tcx.sess.span_note(
+ cause_span,
+ format!(
+ "the trait `{}` must be implemented for the cast \
+ to the object type `{}`",
+ trait_name,
+ infcx.ty_to_string(object_ty)).as_slice());
+ }
+ ObligationCauseCode::RepeatVec => {
+ tcx.sess.span_note(
+ cause_span,
+ "the `Copy` trait is required because the \
+ repeated element will be copied");
+ }
+ ObligationCauseCode::VariableType(_) => {
+ tcx.sess.span_note(
+ cause_span,
+ "all local variables must have a statically known size");
+ }
+ ObligationCauseCode::ReturnType => {
+ tcx.sess.span_note(
+ cause_span,
+ "the return type of a function must have a \
+ statically known size");
+ }
+ ObligationCauseCode::AssignmentLhsSized => {
+ tcx.sess.span_note(
+ cause_span,
+ "the left-hand-side of an assignment must have a statically known size");
+ }
+ ObligationCauseCode::StructInitializerSized => {
+ tcx.sess.span_note(
+ cause_span,
+ "structs must have a statically known size to be initialized");
+ }
+ ObligationCauseCode::ClosureCapture(var_id, closure_span, builtin_bound) => {
+ let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap();
+ let trait_name = ty::item_path_str(tcx, def_id);
+ let name = ty::local_var_name_str(tcx, var_id);
+ span_note!(tcx.sess, closure_span,
+ "the closure that captures `{}` requires that all captured variables \
+ implement the trait `{}`",
+ name,
+ trait_name);
+ }
+ ObligationCauseCode::FieldSized => {
+ span_note!(tcx.sess, cause_span,
+ "only the last field of a struct or enum variant \
+ may have a dynamically sized type")
+ }
+ ObligationCauseCode::ObjectSized => {
+ span_note!(tcx.sess, cause_span,
+ "only sized types can be made into objects");
+ }
+ ObligationCauseCode::SharedStatic => {
+ span_note!(tcx.sess, cause_span,
+ "shared static variables must have a type that implements `Sync`");
+ }
+ ObligationCauseCode::BuiltinDerivedObligation(ref root_trait_ref, ref root_cause_code) => {
+ let root_trait_ref =
+ infcx.resolve_type_vars_if_possible(&**root_trait_ref);
+ span_note!(tcx.sess, cause_span,
+ "the type `{}` must implement `{}` because it appears within the type `{}`",
+ trait_ref.self_ty().user_string(infcx.tcx),
+ trait_ref.user_string(infcx.tcx),
+ root_trait_ref.self_ty().user_string(infcx.tcx));
+ note_obligation_cause_code(infcx, &root_trait_ref, cause_span, &**root_cause_code);
+ }
+ ObligationCauseCode::ImplDerivedObligation(ref root_trait_ref, ref root_cause_code) => {
+ let root_trait_ref =
+ infcx.resolve_type_vars_if_possible(&**root_trait_ref);
+ span_note!(tcx.sess, cause_span,
+ "the type `{}` must implement `{}` due to the requirements \
+ on the impl of `{}` for the type `{}`",
+ trait_ref.self_ty().user_string(infcx.tcx),
+ trait_ref.user_string(infcx.tcx),
+ root_trait_ref.user_string(infcx.tcx),
+ root_trait_ref.self_ty().user_string(infcx.tcx));
+ note_obligation_cause_code(infcx, &root_trait_ref, cause_span, &**root_cause_code);
+ }
+ }
+}
let tcx = selcx.tcx();
match predicate.trait_ref {
ty::Predicate::Trait(ref trait_ref) => {
- let trait_obligation = Obligation { cause: predicate.cause,
+ let trait_obligation = Obligation { cause: predicate.cause.clone(),
recursion_depth: predicate.recursion_depth,
trait_ref: trait_ref.clone() };
match selcx.select(&trait_obligation) {
CodeSelectionError(Unimplemented)));
} else {
let ty::OutlivesPredicate(t_a, r_b) = binder.0;
- register_region_obligation(tcx, t_a, r_b, predicate.cause, region_obligations);
+ register_region_obligation(tcx, t_a, r_b,
+ predicate.cause.clone(),
+ region_obligations);
}
true
}
use syntax::ast;
use syntax::codemap::{Span, DUMMY_SP};
+pub use self::error_reporting::report_fulfillment_errors;
pub use self::fulfill::{FulfillmentContext, RegionObligation};
pub use self::select::SelectionContext;
pub use self::select::SelectionCache;
pub use self::util::poly_trait_ref_for_builtin_bound;
mod coherence;
+mod error_reporting;
mod fulfill;
mod select;
mod util;
pub type TraitObligation<'tcx> = Obligation<'tcx, Rc<ty::PolyTraitRef<'tcx>>>;
/// Why did we incur this obligation? Used for error reporting.
-#[deriving(Copy, Clone)]
+#[deriving(Clone)]
pub struct ObligationCause<'tcx> {
pub span: Span,
pub code: ObligationCauseCode<'tcx>
}
-#[deriving(Copy, Clone)]
+#[deriving(Clone)]
pub enum ObligationCauseCode<'tcx> {
/// Not well classified or should be obvious from span.
MiscObligation,
/// Obligation incurred due to an object cast.
ObjectCastObligation(/* Object type */ Ty<'tcx>),
- /// To implement drop, type must be sendable.
- DropTrait,
-
/// Various cases where expressions must be sized/copy/etc:
AssignmentLhsSized, // L = X implies that L is Sized
StructInitializerSized, // S { ... } must be Sized
// Only Sized types can be made into objects
ObjectSized,
+
+ // static items must have `Sync` type
+ SharedStatic,
+
+ BuiltinDerivedObligation(Rc<ty::PolyTraitRef<'tcx>>, Rc<ObligationCauseCode<'tcx>>),
+
+ ImplDerivedObligation(Rc<ty::PolyTraitRef<'tcx>>, Rc<ObligationCauseCode<'tcx>>),
}
pub type Obligations<'tcx, O> = subst::VecPerParamSpace<Obligation<'tcx, O>>;
use self::EvaluationResult::*;
use super::{PredicateObligation, Obligation, TraitObligation, ObligationCause};
+use super::{ObligationCauseCode, BuiltinDerivedObligation};
use super::{SelectionError, Unimplemented, Overflow, OutputTypeParameterMismatch};
use super::{Selection};
use super::{SelectionResult};
let obligation =
util::predicate_for_builtin_bound(
self.tcx(),
- previous_stack.obligation.cause,
+ previous_stack.obligation.cause.clone(),
bound,
previous_stack.obligation.recursion_depth + 1,
ty);
Ok(substs) => {
let vtable_impl = self.vtable_impl(impl_def_id,
substs,
- obligation.cause,
+ obligation.cause.clone(),
obligation.recursion_depth + 1,
skol_map,
snapshot);
// behavior, ignore user-defined impls here. This will
// go away by the time 1.0 is released.
if !self.tcx().sess.features.borrow().opt_out_copy {
- try!(self.assemble_candidates_from_impls(obligation, &mut candidates));
+ try!(self.assemble_candidates_from_impls(obligation, &mut candidates.vec));
}
try!(self.assemble_builtin_bound_candidates(ty::BoundCopy,
stack,
&mut candidates));
}
+ Some(bound @ ty::BoundSend) |
+ Some(bound @ ty::BoundSync) => {
+ try!(self.assemble_candidates_from_impls(obligation, &mut candidates.vec));
+
+ // No explicit impls were declared for this type, consider the fallback rules.
+ if candidates.vec.is_empty() {
+ try!(self.assemble_builtin_bound_candidates(bound, stack, &mut candidates));
+ }
+ }
+
+ Some(bound @ ty::BoundSized) => {
+ // Sized and Copy are always automatically computed.
+ try!(self.assemble_builtin_bound_candidates(bound, stack, &mut candidates));
+ }
None => {
// For the time being, we ignore user-defined impls for builtin-bounds, other than
// (And unboxed candidates only apply to the Fn/FnMut/etc traits.)
try!(self.assemble_unboxed_closure_candidates(obligation, &mut candidates));
try!(self.assemble_fn_pointer_candidates(obligation, &mut candidates));
- try!(self.assemble_candidates_from_impls(obligation, &mut candidates));
- }
-
- Some(bound) => {
- try!(self.assemble_builtin_bound_candidates(bound, stack, &mut candidates));
+ try!(self.assemble_candidates_from_impls(obligation, &mut candidates.vec));
}
}
}
// provide an impl, but only for suitable `fn` pointers
- ty::ty_bare_fn(_, ty::BareFnTy {
+ ty::ty_bare_fn(_, &ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
/// Search for impls that might apply to `obligation`.
fn assemble_candidates_from_impls(&mut self,
obligation: &TraitObligation<'tcx>,
- candidates: &mut CandidateSet<'tcx>)
+ candidate_vec: &mut Vec<Candidate<'tcx>>)
-> Result<(), SelectionError<'tcx>>
{
let all_impls = self.all_impls(obligation.trait_ref.def_id());
match self.match_impl(impl_def_id, obligation, snapshot,
&skol_map, Rc::new(skol_obligation_trait_ref)) {
Ok(_) => {
- candidates.vec.push(ImplCandidate(impl_def_id));
+ candidate_vec.push(ImplCandidate(impl_def_id));
}
Err(()) => { }
}
}
}
- ty::ty_ptr(ty::mt { ty: referent_ty, .. }) => { // *const T, *mut T
+ ty::ty_ptr(..) => { // *const T, *mut T
match bound {
ty::BoundCopy |
ty::BoundSized => {
ty::BoundSync |
ty::BoundSend => {
- Ok(If(vec![referent_ty]))
+ // sync and send are not implemented for *const, *mut
+ Err(Unimplemented)
}
}
}
} else {
// Recursively check all supertraits to find out if any further
// bounds are required and thus we must fulfill.
- let tmp_tr = data.principal_trait_ref_with_self_ty(ty::mk_err());
+ let tmp_tr = data.principal_trait_ref_with_self_ty(self.tcx(),
+ ty::mk_err());
for tr in util::supertraits(self.tcx(), tmp_tr) {
let td = ty::lookup_trait_def(self.tcx(), tr.def_id());
Ok(If(tys.clone()))
}
- ty::ty_unboxed_closure(def_id, _, ref substs) => {
+ ty::ty_unboxed_closure(def_id, _, substs) => {
// FIXME -- This case is tricky. In the case of by-ref
// closures particularly, we need the results of
// inference to decide how to reflect the type of each
.iter()
.map(|freevar| {
let freevar_def_id = freevar.def.def_id();
- self.typer.node_ty(freevar_def_id.node)
- .unwrap_or(ty::mk_err()).subst(self.tcx(), substs)
+ self.typer.node_ty(freevar_def_id.node).subst(self.tcx(), substs)
})
.collect();
Ok(If(tys))
}
}
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) => {
let types: Vec<Ty> =
ty::struct_fields(self.tcx(), def_id, substs)
.iter()
nominal(self, bound, def_id, types)
}
- ty::ty_enum(def_id, ref substs) => {
+ ty::ty_enum(def_id, substs) => {
let types: Vec<Ty> =
ty::substd_enum_variants(self.tcx(), def_id, substs)
.iter()
ty::BoundSync => {
if
Some(def_id) == tcx.lang_items.no_sync_bound() ||
- Some(def_id) == tcx.lang_items.managed_bound()
- {
- return Err(Unimplemented)
- } else if
+ Some(def_id) == tcx.lang_items.managed_bound() ||
Some(def_id) == tcx.lang_items.unsafe_type()
{
- // FIXME(#13231) -- we currently consider `UnsafeCell<T>`
- // to always be sync. This is allow for types like `Queue`
- // and `Mutex`, where `Queue<T> : Sync` is `T : Send`.
- return Ok(If(Vec::new()));
+ return Err(Unimplemented)
}
}
// where-clause trait-ref could be unified with the obligation
// trait-ref. Repeat that unification now without any
// transactional boundary; it should not fail.
- match self.confirm_poly_trait_refs(obligation.cause,
+ match self.confirm_poly_trait_refs(obligation.cause.clone(),
obligation.trait_ref.clone(),
param.bound.clone()) {
Ok(()) => Ok(param),
nested: Vec<Ty<'tcx>>)
-> VtableBuiltinData<PredicateObligation<'tcx>>
{
+ let derived_cause = self.derived_cause(obligation, BuiltinDerivedObligation);
let obligations = nested.iter().map(|&t| {
util::predicate_for_builtin_bound(
self.tcx(),
- obligation.cause,
+ derived_cause.clone(),
bound,
obligation.recursion_depth + 1,
t)
// as a special case, `Send` requires `'static`
if bound == ty::BoundSend {
obligations.push(Obligation {
- cause: obligation.cause,
+ cause: obligation.cause.clone(),
recursion_depth: obligation.recursion_depth+1,
trait_ref: ty::Binder(ty::OutlivesPredicate(obligation.self_ty(),
ty::ReStatic)).as_predicate(),
let substs = self.rematch_impl(impl_def_id, obligation,
snapshot, &skol_map, Rc::new(skol_obligation_trait_ref));
debug!("confirm_impl_candidate substs={}", substs);
- Ok(self.vtable_impl(impl_def_id, substs, obligation.cause,
+ Ok(self.vtable_impl(impl_def_id, substs, obligation.cause.clone(),
obligation.recursion_depth + 1, skol_map, snapshot))
})
}
let self_ty = self.infcx.shallow_resolve(obligation.self_ty());
let sig = match self_ty.sty {
- ty::ty_bare_fn(_, ty::BareFnTy {
+ ty::ty_bare_fn(_, &ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
ref sig
self_ty);
let trait_ref = Rc::new(ty::Binder(ty::TraitRef {
def_id: obligation.trait_ref.def_id(),
- substs: substs,
+ substs: self.tcx().mk_substs(substs),
}));
- try!(self.confirm_poly_trait_refs(obligation.cause,
+ try!(self.confirm_poly_trait_refs(obligation.cause.clone(),
obligation.trait_ref.clone(),
trait_ref));
-
Ok(self_ty)
}
obligation.self_ty());
let trait_ref = Rc::new(ty::Binder(ty::TraitRef {
def_id: obligation.trait_ref.def_id(),
- substs: substs,
+ substs: self.tcx().mk_substs(substs),
}));
debug!("confirm_unboxed_closure_candidate(closure_def_id={}, trait_ref={})",
closure_def_id.repr(self.tcx()),
trait_ref.repr(self.tcx()));
- self.confirm_poly_trait_refs(obligation.cause,
+ self.confirm_poly_trait_refs(obligation.cause.clone(),
obligation.trait_ref.clone(),
trait_ref)
}
/// back `Ok(T=int)`.
fn match_inherent_impl(&mut self,
impl_def_id: ast::DefId,
- obligation_cause: ObligationCause,
+ obligation_cause: &ObligationCause,
obligation_self_ty: Ty<'tcx>)
-> Result<Substs<'tcx>,()>
{
}
fn match_self_types(&mut self,
- cause: ObligationCause,
+ cause: &ObligationCause,
// The self type provided by the impl/caller-obligation:
provided_self_ty: Ty<'tcx>,
None
}
}
+
+ #[allow(unused_comparisons)]
+ fn derived_cause(&self,
+ obligation: &TraitObligation<'tcx>,
+ variant: fn(Rc<ty::Binder<ty::TraitRef<'tcx>>>,
+ Rc<ObligationCauseCode<'tcx>>)
+ -> ObligationCauseCode<'tcx>)
+ -> ObligationCause<'tcx>
+ {
+ /*!
+ * Creates a cause for obligations that are derived from
+ * `obligation` by a recursive search (e.g., for a builtin
+ * bound, or eventually a `impl Foo for ..`). If `obligation`
+ * is itself a derived obligation, this is just a clone, but
+ * otherwise we create a "derived obligation" cause so as to
+ * keep track of the original root obligation for error
+ * reporting.
+ */
+
+ // NOTE(flaper87): As of now, it keeps track of the whole error
+ // chain. Ideally, we should have a way to configure this either
+ // by using -Z verbose or just a CLI argument.
+ if obligation.recursion_depth >= 0 {
+ ObligationCause::new(obligation.cause.span,
+ obligation.trait_ref.def_id().node,
+ variant(obligation.trait_ref.clone(),
+ Rc::new(obligation.cause.code.clone())))
+ } else {
+ obligation.cause.clone()
+ }
+ }
}
impl<'tcx> Repr<'tcx> for Candidate<'tcx> {
-
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
generic_bounds.repr(tcx));
generic_bounds.predicates.map(|predicate| {
- Obligation { cause: cause,
+ Obligation { cause: cause.clone(),
recursion_depth: recursion_depth,
trait_ref: predicate.clone() }
})
Ok(def_id) => {
Ok(Rc::new(ty::Binder(ty::TraitRef {
def_id: def_id,
- substs: Substs::empty().with_self_ty(param_ty)
+ substs: tcx.mk_substs(Substs::empty().with_self_ty(param_ty))
})))
}
Err(e) => {
use std::ops;
use std::rc::Rc;
use collections::enum_set::{EnumSet, CLike};
-use std::collections::hash_map::HashMap;
+use std::collections::{HashMap, HashSet};
use std::collections::hash_map::Entry::{Occupied, Vacant};
use syntax::abi;
use syntax::ast::{CrateNum, DefId, DUMMY_NODE_ID, Ident, ItemTrait, LOCAL_CRATE};
pub ty_cx: ty::ctxt<'tcx>,
pub reachable: NodeSet,
pub name: String,
+ pub glob_map: Option<GlobMap>,
}
#[deriving(Copy, PartialEq, Eq, Hash)]
},
&AutoPtr(r, m, Some(box ref autoref)) => {
match type_of_autoref(cx, autoref) {
- Some(ty) => Some(mk_rptr(cx, r, mt {mutbl: m, ty: ty})),
+ Some(ty) => Some(mk_rptr(cx, cx.mk_region(r), mt {mutbl: m, ty: ty})),
None => None
}
}
pub id: ast::NodeId,
}
+/// Internal storage
+pub struct CtxtArenas<'tcx> {
+ type_: TypedArena<TyS<'tcx>>,
+ substs: TypedArena<Substs<'tcx>>,
+ bare_fn: TypedArena<BareFnTy<'tcx>>,
+ region: TypedArena<Region>,
+}
+
+impl<'tcx> CtxtArenas<'tcx> {
+ pub fn new() -> CtxtArenas<'tcx> {
+ CtxtArenas {
+ type_: TypedArena::new(),
+ substs: TypedArena::new(),
+ bare_fn: TypedArena::new(),
+ region: TypedArena::new(),
+ }
+ }
+}
+
/// The data structure to keep track of all the information that typechecker
/// generates so that so that it can be reused and doesn't have to be redone
/// later on.
pub struct ctxt<'tcx> {
- /// The arena that types are allocated from.
- type_arena: &'tcx TypedArena<TyS<'tcx>>,
+ /// The arenas that types etc are allocated from.
+ arenas: &'tcx CtxtArenas<'tcx>,
/// Specifically use a speedy hash algorithm for this hash map, it's used
/// quite often.
// FIXME(eddyb) use a FnvHashSet<InternedTy<'tcx>> when equivalent keys can
// queried from a HashSet.
interner: RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>,
+ // FIXME as above, use a hashset if equivalent elements can be queried.
+ substs_interner: RefCell<FnvHashMap<&'tcx Substs<'tcx>, &'tcx Substs<'tcx>>>,
+ bare_fn_interner: RefCell<FnvHashMap<&'tcx BareFnTy<'tcx>, &'tcx BareFnTy<'tcx>>>,
+ region_interner: RefCell<FnvHashMap<&'tcx Region, &'tcx Region>>,
+
pub sess: Session,
pub def_map: DefMap,
}
}
+macro_rules! sty_debug_print {
+ ($ctxt: expr, $($variant: ident),*) => {{
+ // curious inner module to allow variant names to be used as
+ // variable names.
+ mod inner {
+ use middle::ty;
+ #[deriving(Copy)]
+ struct DebugStat {
+ total: uint,
+ region_infer: uint,
+ ty_infer: uint,
+ both_infer: uint,
+ }
+
+ pub fn go(tcx: &ty::ctxt) {
+ let mut total = DebugStat {
+ total: 0,
+ region_infer: 0, ty_infer: 0, both_infer: 0,
+ };
+ $(let mut $variant = total;)*
+
+
+ for (_, t) in tcx.interner.borrow().iter() {
+ let variant = match t.sty {
+ ty::ty_bool | ty::ty_char | ty::ty_int(..) | ty::ty_uint(..) |
+ ty::ty_float(..) | ty::ty_str => continue,
+ ty::ty_err => /* unimportant */ continue,
+ $(ty::$variant(..) => &mut $variant,)*
+ };
+ let region = t.flags.intersects(ty::HAS_RE_INFER);
+ let ty = t.flags.intersects(ty::HAS_TY_INFER);
+
+ variant.total += 1;
+ total.total += 1;
+ if region { total.region_infer += 1; variant.region_infer += 1 }
+ if ty { total.ty_infer += 1; variant.ty_infer += 1 }
+ if region && ty { total.both_infer += 1; variant.both_infer += 1 }
+ }
+ println!("Ty interner total ty region both");
+ $(println!(" {:18}: {uses:6} {usespc:4.1}%, \
+{ty:4.1}% {region:5.1}% {both:4.1}%",
+ stringify!($variant),
+ uses = $variant.total,
+ usespc = $variant.total as f64 * 100.0 / total.total as f64,
+ ty = $variant.ty_infer as f64 * 100.0 / total.total as f64,
+ region = $variant.region_infer as f64 * 100.0 / total.total as f64,
+ both = $variant.both_infer as f64 * 100.0 / total.total as f64);
+ )*
+ println!(" total {uses:6} \
+{ty:4.1}% {region:5.1}% {both:4.1}%",
+ uses = total.total,
+ ty = total.ty_infer as f64 * 100.0 / total.total as f64,
+ region = total.region_infer as f64 * 100.0 / total.total as f64,
+ both = total.both_infer as f64 * 100.0 / total.total as f64)
+ }
+ }
+
+ inner::go($ctxt)
+ }}
+}
+
+impl<'tcx> ctxt<'tcx> {
+ pub fn print_debug_stats(&self) {
+ sty_debug_print!(
+ self,
+ ty_enum, ty_uniq, ty_vec, ty_ptr, ty_rptr, ty_bare_fn, ty_closure, ty_trait,
+ ty_struct, ty_unboxed_closure, ty_tup, ty_param, ty_open, ty_infer);
+
+ println!("Substs interner: #{}", self.substs_interner.borrow().len());
+ println!("BareFnTy interner: #{}", self.bare_fn_interner.borrow().len());
+ println!("Region interner: #{}", self.region_interner.borrow().len());
+ }
+}
+
#[deriving(Show)]
pub struct TyS<'tcx> {
pub sty: sty<'tcx>,
pub flags: TypeFlags,
// the maximal depth of any bound regions appearing in this type.
- region_depth: uint,
+ region_depth: u32,
}
impl fmt::Show for TypeFlags {
type_escapes_depth(ty, 0)
}
-pub fn type_escapes_depth(ty: Ty, depth: uint) -> bool {
+pub fn type_escapes_depth(ty: Ty, depth: u32) -> bool {
ty.region_depth > depth
}
#[deriving(Clone, Copy, PartialEq, Eq, Hash, Show)]
pub struct ParamTy {
pub space: subst::ParamSpace,
- pub idx: uint,
+ pub idx: u32,
pub def_id: DefId
}
pub struct DebruijnIndex {
// We maintain the invariant that this is never 0. So 1 indicates
// the innermost binder. To ensure this, create with `DebruijnIndex::new`.
- pub depth: uint,
+ pub depth: u32,
}
/// Representation of regions:
// parameters are substituted.
ReEarlyBound(/* param id */ ast::NodeId,
subst::ParamSpace,
- /*index*/ uint,
+ /*index*/ u32,
ast::Name),
// Region bound in a function scope, which will be substituted when the
}
}
- pub fn escapes_depth(&self, depth: uint) -> bool {
+ pub fn escapes_depth(&self, depth: u32) -> bool {
match *self {
ty::ReLateBound(debruijn, _) => debruijn.depth > depth,
_ => false,
RustcEncodable, RustcDecodable, Show, Copy)]
pub enum BoundRegion {
/// An anonymous region parameter for a given fn (&T)
- BrAnon(uint),
+ BrAnon(u32),
/// Named region parameters for functions (a in &'a T)
///
BrNamed(ast::DefId, ast::Name),
/// Fresh bound identifiers created during GLB computations.
- BrFresh(uint),
+ BrFresh(u32),
// Anonymous region for the implicit env pointer parameter
// to a closure
/// from the tcx, use the `NodeId` from the `ast::Ty` and look it up in
/// the `ast_ty_to_ty_cache`. This is probably true for `ty_struct` as
/// well.`
- ty_enum(DefId, Substs<'tcx>),
+ ty_enum(DefId, &'tcx Substs<'tcx>),
ty_uniq(Ty<'tcx>),
ty_str,
ty_vec(Ty<'tcx>, Option<uint>), // Second field is length.
ty_ptr(mt<'tcx>),
- ty_rptr(Region, mt<'tcx>),
+ ty_rptr(&'tcx Region, mt<'tcx>),
// If the def-id is Some(_), then this is the type of a specific
// fn item. Otherwise, if None(_), it a fn pointer type.
- ty_bare_fn(Option<DefId>, BareFnTy<'tcx>),
+ ty_bare_fn(Option<DefId>, &'tcx BareFnTy<'tcx>),
ty_closure(Box<ClosureTy<'tcx>>),
ty_trait(Box<TyTrait<'tcx>>),
- ty_struct(DefId, Substs<'tcx>),
+ ty_struct(DefId, &'tcx Substs<'tcx>),
- ty_unboxed_closure(DefId, Region, Substs<'tcx>),
+ ty_unboxed_closure(DefId, &'tcx Region, &'tcx Substs<'tcx>),
ty_tup(Vec<Ty<'tcx>>),
/// we convert the principal trait-ref into a normal trait-ref,
/// you must give *some* self-type. A common choice is `mk_err()`
/// or some skolemized type.
- pub fn principal_trait_ref_with_self_ty(&self, self_ty: Ty<'tcx>)
+ pub fn principal_trait_ref_with_self_ty(&self,
+ tcx: &ctxt<'tcx>, self_ty: Ty<'tcx>)
-> Rc<ty::PolyTraitRef<'tcx>>
{
Rc::new(ty::Binder(ty::TraitRef {
def_id: self.principal.def_id(),
- substs: self.principal.substs().with_self_ty(self_ty),
+ substs: tcx.mk_substs(self.principal.substs().with_self_ty(self_ty)),
}))
}
}
#[deriving(Clone, PartialEq, Eq, Hash, Show)]
pub struct TraitRef<'tcx> {
pub def_id: DefId,
- pub substs: Substs<'tcx>,
+ pub substs: &'tcx Substs<'tcx>,
}
pub type PolyTraitRef<'tcx> = Binder<TraitRef<'tcx>>;
self.0.def_id
}
- pub fn substs(&self) -> &Substs<'tcx> {
- &self.0.substs
+ pub fn substs(&self) -> &'tcx Substs<'tcx> {
+ self.0.substs
}
pub fn input_types(&self) -> &[Ty<'tcx>] {
#[deriving(Clone, Copy, PartialEq, Eq, Hash)]
pub struct TyVid {
- pub index: uint
+ pub index: u32
}
#[deriving(Clone, Copy, PartialEq, Eq, Hash)]
pub struct IntVid {
- pub index: uint
+ pub index: u32
}
#[deriving(Clone, Copy, PartialEq, Eq, Hash)]
pub struct FloatVid {
- pub index: uint
+ pub index: u32
}
#[deriving(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)]
pub struct RegionVid {
- pub index: uint
+ pub index: u32
}
#[deriving(Clone, Copy, PartialEq, Eq, Hash)]
/// A `FreshTy` is one that is generated as a replacement for an
/// unbound type variable. This is convenient for caching etc. See
/// `middle::infer::freshen` for more details.
- FreshTy(uint),
+ FreshTy(u32),
// FIXME -- once integral fallback is impl'd, we should remove
// this type. It's only needed to prevent spurious errors for
// integers whose type winds up never being constrained.
- FreshIntTy(uint),
+ FreshIntTy(u32),
}
#[deriving(Clone, RustcEncodable, RustcDecodable, Eq, Hash, Show, Copy)]
pub enum InferRegion {
ReVar(RegionVid),
- ReSkolemized(uint, BoundRegion)
+ ReSkolemized(u32, BoundRegion)
}
impl cmp::PartialEq for InferRegion {
pub name: ast::Name,
pub def_id: ast::DefId,
pub space: subst::ParamSpace,
- pub index: uint,
+ pub index: u32,
pub associated_with: Option<ast::DefId>,
pub bounds: ParamBounds<'tcx>,
pub default: Option<Ty<'tcx>>,
pub name: ast::Name,
pub def_id: ast::DefId,
pub space: subst::ParamSpace,
- pub index: uint,
+ pub index: u32,
pub bounds: Vec<ty::Region>,
}
}
impl<'tcx> TraitRef<'tcx> {
- pub fn new(def_id: ast::DefId, substs: Substs<'tcx>) -> TraitRef<'tcx> {
+ pub fn new(def_id: ast::DefId, substs: &'tcx Substs<'tcx>) -> TraitRef<'tcx> {
TraitRef { def_id: def_id, substs: substs }
}
}
pub fn mk_ctxt<'tcx>(s: Session,
- type_arena: &'tcx TypedArena<TyS<'tcx>>,
+ arenas: &'tcx CtxtArenas<'tcx>,
dm: DefMap,
named_region_map: resolve_lifetime::NamedRegionMap,
map: ast_map::Map<'tcx>,
lang_items: middle::lang_items::LanguageItems,
stability: stability::Index) -> ctxt<'tcx> {
ctxt {
- type_arena: type_arena,
+ arenas: arenas,
interner: RefCell::new(FnvHashMap::new()),
+ substs_interner: RefCell::new(FnvHashMap::new()),
+ bare_fn_interner: RefCell::new(FnvHashMap::new()),
+ region_interner: RefCell::new(FnvHashMap::new()),
named_region_map: named_region_map,
item_variance_map: RefCell::new(DefIdMap::new()),
variance_computed: Cell::new(false),
// Type constructors
+impl<'tcx> ctxt<'tcx> {
+ pub fn mk_substs(&self, substs: Substs<'tcx>) -> &'tcx Substs<'tcx> {
+ if let Some(substs) = self.substs_interner.borrow().get(&substs) {
+ return *substs;
+ }
+
+ let substs = self.arenas.substs.alloc(substs);
+ self.substs_interner.borrow_mut().insert(substs, substs);
+ substs
+ }
+
+ pub fn mk_bare_fn(&self, bare_fn: BareFnTy<'tcx>) -> &'tcx BareFnTy<'tcx> {
+ if let Some(bare_fn) = self.bare_fn_interner.borrow().get(&bare_fn) {
+ return *bare_fn;
+ }
+
+ let bare_fn = self.arenas.bare_fn.alloc(bare_fn);
+ self.bare_fn_interner.borrow_mut().insert(bare_fn, bare_fn);
+ bare_fn
+ }
+
+ pub fn mk_region(&self, region: Region) -> &'tcx Region {
+ if let Some(region) = self.region_interner.borrow().get(®ion) {
+ return *region;
+ }
+
+ let region = self.arenas.region.alloc(region);
+ self.region_interner.borrow_mut().insert(region, region);
+ region
+ }
+}
+
// Interns a type/name combination, stores the resulting box in cx.interner,
// and returns the box as cast to an unsafe ptr (see comments for Ty above).
pub fn mk_t<'tcx>(cx: &ctxt<'tcx>, st: sty<'tcx>) -> Ty<'tcx> {
let flags = FlagComputation::for_sty(&st);
- let ty = cx.type_arena.alloc(TyS {
+ let ty = cx.arenas.type_.alloc(TyS {
sty: st,
flags: flags.flags,
region_depth: flags.depth,
flags: TypeFlags,
// maximum depth of any bound region that we have seen thus far
- depth: uint,
+ depth: u32,
}
impl FlagComputation {
self.flags = self.flags | flags;
}
- fn add_depth(&mut self, depth: uint) {
+ fn add_depth(&mut self, depth: u32) {
if depth > self.depth {
self.depth = depth;
}
}
}
- &ty_unboxed_closure(_, ref region, ref substs) => {
+ &ty_unboxed_closure(_, region, substs) => {
self.add_region(*region);
self.add_substs(substs);
}
self.add_flags(HAS_TY_INFER)
}
- &ty_enum(_, ref substs) | &ty_struct(_, ref substs) => {
+ &ty_enum(_, substs) | &ty_struct(_, substs) => {
self.add_substs(substs);
}
}
&ty_rptr(r, ref m) => {
- self.add_region(r);
+ self.add_region(*r);
self.add_ty(m.ty);
}
mk_t(cx, ty_str)
}
-pub fn mk_str_slice<'tcx>(cx: &ctxt<'tcx>, r: Region, m: ast::Mutability) -> Ty<'tcx> {
+pub fn mk_str_slice<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, m: ast::Mutability) -> Ty<'tcx> {
mk_rptr(cx, r,
mt {
ty: mk_t(cx, ty_str),
})
}
-pub fn mk_enum<'tcx>(cx: &ctxt<'tcx>, did: ast::DefId, substs: Substs<'tcx>) -> Ty<'tcx> {
+pub fn mk_enum<'tcx>(cx: &ctxt<'tcx>, did: ast::DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
// take a copy of substs so that we own the vectors inside
mk_t(cx, ty_enum(did, substs))
}
pub fn mk_ptr<'tcx>(cx: &ctxt<'tcx>, tm: mt<'tcx>) -> Ty<'tcx> { mk_t(cx, ty_ptr(tm)) }
-pub fn mk_rptr<'tcx>(cx: &ctxt<'tcx>, r: Region, tm: mt<'tcx>) -> Ty<'tcx> {
+pub fn mk_rptr<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, tm: mt<'tcx>) -> Ty<'tcx> {
mk_t(cx, ty_rptr(r, tm))
}
-pub fn mk_mut_rptr<'tcx>(cx: &ctxt<'tcx>, r: Region, ty: Ty<'tcx>) -> Ty<'tcx> {
+pub fn mk_mut_rptr<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
mk_rptr(cx, r, mt {ty: ty, mutbl: ast::MutMutable})
}
-pub fn mk_imm_rptr<'tcx>(cx: &ctxt<'tcx>, r: Region, ty: Ty<'tcx>) -> Ty<'tcx> {
+pub fn mk_imm_rptr<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
mk_rptr(cx, r, mt {ty: ty, mutbl: ast::MutImmutable})
}
mk_t(cx, ty_vec(ty, sz))
}
-pub fn mk_slice<'tcx>(cx: &ctxt<'tcx>, r: Region, tm: mt<'tcx>) -> Ty<'tcx> {
+pub fn mk_slice<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, tm: mt<'tcx>) -> Ty<'tcx> {
mk_rptr(cx, r,
mt {
ty: mk_vec(cx, tm.ty, None),
pub fn mk_bare_fn<'tcx>(cx: &ctxt<'tcx>,
opt_def_id: Option<ast::DefId>,
- fty: BareFnTy<'tcx>) -> Ty<'tcx> {
+ fty: &'tcx BareFnTy<'tcx>) -> Ty<'tcx> {
mk_t(cx, ty_bare_fn(opt_def_id, fty))
}
let input_args = input_tys.iter().map(|ty| *ty).collect();
mk_bare_fn(cx,
Some(def_id),
- BareFnTy {
+ cx.mk_bare_fn(BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(FnSig {
output: ty::FnConverging(output),
variadic: false
})
- })
+ }))
}
}
pub fn mk_struct<'tcx>(cx: &ctxt<'tcx>, struct_id: ast::DefId,
- substs: Substs<'tcx>) -> Ty<'tcx> {
+ substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
// take a copy of substs so that we own the vectors inside
mk_t(cx, ty_struct(struct_id, substs))
}
pub fn mk_unboxed_closure<'tcx>(cx: &ctxt<'tcx>, closure_id: ast::DefId,
- region: Region, substs: Substs<'tcx>)
+ region: &'tcx Region, substs: &'tcx Substs<'tcx>)
-> Ty<'tcx> {
mk_t(cx, ty_unboxed_closure(closure_id, region, substs))
}
}
pub fn mk_param<'tcx>(cx: &ctxt<'tcx>, space: subst::ParamSpace,
- n: uint, k: DefId) -> Ty<'tcx> {
+ n: u32, k: DefId) -> Ty<'tcx> {
mk_t(cx, ty_param(ParamTy { space: space, idx: n, def_id: k }))
}
impl ParamTy {
pub fn new(space: subst::ParamSpace,
- index: uint,
+ index: u32,
def_id: ast::DefId)
-> ParamTy {
ParamTy { space: space, idx: index, def_id: def_id }
pub fn simd_type<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
match ty.sty {
- ty_struct(did, ref substs) => {
+ ty_struct(did, substs) => {
let fields = lookup_struct_fields(cx, did);
lookup_field_type(cx, did, fields[0].id, substs)
}
ty_bool | ty_int(_) | ty_uint(_) |
ty_float(_) | ty_tup(_) | ty_ptr(_) => false,
- ty_enum(did, ref substs) =>
+ ty_enum(did, substs) =>
enum_variants(cx, did).iter().any(|v|
v.args.iter().any(|aty| {
let t = aty.subst(cx, substs);
ty_rptr(r, ref mt) => {
TC::ReachesFfiUnsafe | match mt.ty.sty {
- ty_str => borrowed_contents(r, ast::MutImmutable),
- ty_vec(..) => tc_ty(cx, mt.ty, cache).reference(borrowed_contents(r, mt.mutbl)),
- _ => tc_ty(cx, mt.ty, cache).reference(borrowed_contents(r, mt.mutbl)),
+ ty_str => borrowed_contents(*r, ast::MutImmutable),
+ ty_vec(..) => tc_ty(cx, mt.ty, cache).reference(borrowed_contents(*r,
+ mt.mutbl)),
+ _ => tc_ty(cx, mt.ty, cache).reference(borrowed_contents(*r, mt.mutbl)),
}
}
}
ty_str => TC::Nonsized,
- ty_struct(did, ref substs) => {
+ ty_struct(did, substs) => {
let flds = struct_fields(cx, did, substs);
let mut res =
TypeContents::union(flds[],
apply_lang_items(cx, did, res)
}
- ty_unboxed_closure(did, r, ref substs) => {
+ ty_unboxed_closure(did, r, substs) => {
// FIXME(#14449): `borrowed_contents` below assumes `&mut`
// unboxed closure.
let upvars = unboxed_closure_upvars(cx, did, substs);
- TypeContents::union(upvars[],
- |f| tc_ty(cx, f.ty, cache)) |
- borrowed_contents(r, MutMutable)
+ TypeContents::union(upvars.as_slice(),
+ |f| tc_ty(cx, f.ty, cache))
+ | borrowed_contents(*r, MutMutable)
}
ty_tup(ref tys) => {
|ty| tc_ty(cx, *ty, cache))
}
- ty_enum(did, ref substs) => {
+ ty_enum(did, substs) => {
let variants = substd_enum_variants(cx, did, substs);
let mut res =
TypeContents::union(variants[], |variant| {
false
}
- ty_struct(did, ref substs) => {
+ ty_struct(did, substs) => {
seen.push(did);
let fields = struct_fields(cx, did, substs);
let r = fields.iter().any(|f| type_requires(cx, seen, r_ty, f.mt.ty));
r
}
- ty_unboxed_closure(did, _, ref substs) => {
+ ty_unboxed_closure(did, _, substs) => {
let upvars = unboxed_closure_upvars(cx, did, substs);
upvars.iter().any(|f| type_requires(cx, seen, r_ty, f.ty))
}
false
}
- ty_enum(did, ref substs) => {
+ ty_enum(did, substs) => {
seen.push(did);
let vs = enum_variants(cx, did);
let r = !vs.is_empty() && vs.iter().all(|variant| {
ty_vec(ty, Some(_)) => {
is_type_structurally_recursive(cx, sp, seen, ty)
}
- ty_struct(did, ref substs) => {
+ ty_struct(did, substs) => {
let fields = struct_fields(cx, did, substs);
find_nonrepresentable(cx, sp, seen, fields.iter().map(|f| f.mt.ty))
}
- ty_enum(did, ref substs) => {
+ ty_enum(did, substs) => {
let vs = enum_variants(cx, did);
let iter = vs.iter()
.flat_map(|variant| { variant.args.iter() })
find_nonrepresentable(cx, sp, seen, iter)
}
- ty_unboxed_closure(did, _, ref substs) => {
+ ty_unboxed_closure(did, _, substs) => {
let upvars = unboxed_closure_upvars(cx, did, substs);
find_nonrepresentable(cx, sp, seen, upvars.iter().map(|f| f.ty))
}
pub fn unsized_part_of_type<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
match ty.sty {
ty_str | ty_trait(..) | ty_vec(..) => ty,
- ty_struct(def_id, ref substs) => {
+ ty_struct(def_id, substs) => {
let unsized_fields: Vec<_> = struct_fields(cx, def_id, substs).iter()
.map(|f| f.mt.ty).filter(|ty| !type_is_sized(cx, *ty)).collect();
// Exactly one of the fields must be unsized.
pub fn close_type<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
match ty.sty {
- ty_open(ty) => mk_rptr(cx, ReStatic, mt {ty: ty, mutbl:ast::MutImmutable}),
+ ty_open(ty) => mk_rptr(cx, cx.mk_region(ReStatic), mt {ty: ty, mutbl:ast::MutImmutable}),
_ => cx.sess.bug(format!("Trying to close a non-open type {}",
ty_to_string(cx, ty))[])
}
(&ty_tup(ref v), None) => v.get(i).map(|&t| t),
- (&ty_struct(def_id, ref substs), None) => lookup_struct_fields(cx, def_id)
+ (&ty_struct(def_id, substs), None) => lookup_struct_fields(cx, def_id)
.get(i)
.map(|&t|lookup_item_type(cx, t.id).ty.subst(cx, substs)),
- (&ty_enum(def_id, ref substs), Some(variant_def_id)) => {
+ (&ty_enum(def_id, substs), Some(variant_def_id)) => {
let variant_info = enum_variant_with_id(cx, def_id, variant_def_id);
variant_info.args.get(i).map(|t|t.subst(cx, substs))
}
- (&ty_enum(def_id, ref substs), None) => {
+ (&ty_enum(def_id, substs), None) => {
assert!(enum_is_univariant(cx, def_id));
let enum_variants = enum_variants(cx, def_id);
let variant_info = &(*enum_variants)[0];
variant: Option<ast::DefId>) -> Option<Ty<'tcx>> {
match (&ty.sty, variant) {
- (&ty_struct(def_id, ref substs), None) => {
+ (&ty_struct(def_id, substs), None) => {
let r = lookup_struct_fields(cx, def_id);
r.iter().find(|f| f.name == n)
.map(|&f| lookup_field_type(cx, def_id, f.id, substs))
}
- (&ty_enum(def_id, ref substs), Some(variant_def_id)) => {
+ (&ty_enum(def_id, substs), Some(variant_def_id)) => {
let variant_info = enum_variant_with_id(cx, def_id, variant_def_id);
variant_info.arg_names.as_ref()
.expect("must have struct enum variant if accessing a named fields")
span: Span,
ty: Ty) -> Region {
match ty.sty {
- ty_rptr(r, _) => r,
+ ty_rptr(r, _) => *r,
ref s => {
tcx.sess.span_bug(
span,
AdjustReifyFnPointer(_) => {
match unadjusted_ty.sty {
- ty::ty_bare_fn(Some(_), ref b) => {
- ty::mk_bare_fn(cx, None, (*b).clone())
+ ty::ty_bare_fn(Some(_), b) => {
+ ty::mk_bare_fn(cx, None, b)
}
ref b => {
cx.sess.bug(
&Some(box ref a) => adjust_ty_for_autoref(cx, span, ty, Some(a)),
&None => ty
};
- mk_rptr(cx, r, mt {
+ mk_rptr(cx, cx.mk_region(r), mt {
ty: adjusted_ty,
mutbl: m
})
ty_to_string(cx, ty))[])
},
&UnsizeStruct(box ref k, tp_index) => match ty.sty {
- ty_struct(did, ref substs) => {
+ ty_struct(did, substs) => {
let ty_substs = substs.types.get_slice(subst::TypeSpace);
let new_ty = unsize_ty(cx, ty_substs[tp_index], k, span);
let mut unsized_substs = substs.clone();
unsized_substs.types.get_mut_slice(subst::TypeSpace)[tp_index] = new_ty;
- mk_struct(cx, did, unsized_substs)
+ mk_struct(cx, did, cx.mk_substs(unsized_substs))
}
_ => cx.sess.span_bug(span,
format!("UnsizeStruct with bad sty: {}",
// the index method invoked for `a[i]` always yields an `&T`
ast::ExprIndex(..) => LvalueExpr,
- // the slice method invoked for `a[..]` always yields an `&T`
- ast::ExprSlice(..) => LvalueExpr,
-
// `for` loops are statements
ast::ExprForLoop(..) => RvalueStmtExpr,
ast::ExprUnary(ast::UnDeref, _) |
ast::ExprField(..) |
ast::ExprTupField(..) |
- ast::ExprIndex(..) |
- ast::ExprSlice(..) => {
+ ast::ExprIndex(..) => {
LvalueExpr
}
match cx.map.find(id.node) {
Some(ast_map::NodeItem(item)) => {
match item.node {
- ItemTrait(_, _, _, _, ref ms) => {
+ ItemTrait(_, _, _, ref ms) => {
let (_, p) =
ast_util::split_trait_methods(ms[]);
p.iter()
-> uint {
for type_parameter_def in trait_def.generics.types.iter() {
if type_parameter_def.def_id == associated_type_id {
- return type_parameter_def.index
+ return type_parameter_def.index as uint
}
}
cx.sess.bug("couldn't find associated type parameter index")
trait_def.bounds.trait_bounds
.iter()
.map(|bound_trait_ref| {
+ let substs = tcx.mk_substs(bound_trait_ref.substs().subst(tcx, trait_ref.substs()));
ty::Binder(
ty::TraitRef::new(bound_trait_ref.def_id(),
- bound_trait_ref.substs().subst(tcx, trait_ref.substs())))
+ substs))
})
.map(|bound_trait_ref| Rc::new(bound_trait_ref))
.collect();
var_id: freevar_def_id.node,
closure_expr_id: closure_id.node
}].clone();
- freevar_ty = mk_rptr(tcx, borrow.region, ty::mt {
+ freevar_ty = mk_rptr(tcx, tcx.mk_region(borrow.region), ty::mt {
ty: freevar_ty,
mutbl: borrow.kind.to_mutbl_lossy()
});
let opt_trait_ref = opt_principal.map_or(Vec::new(), |principal| {
let substs = principal.substs().with_self_ty(open_ty);
- vec!(Rc::new(ty::Binder(ty::TraitRef::new(principal.def_id(), substs))))
+ vec!(Rc::new(ty::Binder(ty::TraitRef::new(principal.def_id(), tcx.mk_substs(substs)))))
});
let param_bounds = ty::ParamBounds {
}
ty_rptr(r, m) => {
byte!(13);
- region(state, r);
+ region(state, *r);
mt(state, m);
}
ty_bare_fn(opt_def_id, ref b) => {
ty_unboxed_closure(d, r, _) => {
byte!(24);
did(state, d);
- region(state, r);
+ region(state, *r);
}
}
true
space,
def.repr(tcx),
i);
- let ty = ty::mk_param(tcx, space, i, def.def_id);
+ let ty = ty::mk_param(tcx, space, i as u32, def.def_id);
types.push(space, ty);
}
}
self
}
- fn node_ty(&self, id: ast::NodeId) -> mc::McResult<Ty<'tcx>> {
- Ok(ty::node_id_to_type(self, id))
+ fn node_ty(&self, id: ast::NodeId) -> Ty<'tcx> {
+ ty::node_id_to_type(self, id)
+ }
+
+ fn expr_ty_adjusted(&self, expr: &ast::Expr) -> Ty<'tcx> {
+ ty::expr_ty_adjusted(self, expr)
}
- fn node_method_ty(&self, method_call: MethodCall) -> Option<Ty<'tcx>> {
+ fn node_method_ty(&self, method_call: ty::MethodCall) -> Option<Ty<'tcx>> {
self.method_map.borrow().get(&method_call).map(|method| method.ty)
}
+ fn node_method_origin(&self, method_call: ty::MethodCall)
+ -> Option<ty::MethodOrigin<'tcx>>
+ {
+ self.method_map.borrow().get(&method_call).map(|method| method.origin.clone())
+ }
+
fn adjustments<'a>(&'a self) -> &'a RefCell<NodeMap<ty::AutoAdjustment<'tcx>>> {
&self.adjustments
}
walk_ty(ty, |ty| {
match ty.sty {
ty_rptr(region, _) => {
- accumulator.push(region)
+ accumulator.push(*region)
}
ty_trait(ref t) => {
accumulator.push_all(t.principal.substs().regions().as_slice());
}
- ty_enum(_, ref substs) |
- ty_struct(_, ref substs) => {
+ ty_enum(_, substs) |
+ ty_struct(_, substs) => {
accum_substs(accumulator, substs);
}
ty_closure(ref closure_ty) => {
UniqTraitStore => {}
}
}
- ty_unboxed_closure(_, ref region, ref substs) => {
+ ty_unboxed_closure(_, region, substs) => {
accumulator.push(*region);
accum_substs(accumulator, substs);
}
// Trait method resolution
pub type TraitMap = NodeMap<Vec<DefId>>;
+// Map from the NodeId of a glob import to a list of items which are actually
+// imported.
+pub type GlobMap = HashMap<NodeId, HashSet<Name>>;
+
pub fn with_freevars<T, F>(tcx: &ty::ctxt, fid: ast::NodeId, f: F) -> T where
F: FnOnce(&[Freevar]) -> T,
{
}
impl DebruijnIndex {
- pub fn new(depth: uint) -> DebruijnIndex {
+ pub fn new(depth: u32) -> DebruijnIndex {
assert!(depth > 0);
DebruijnIndex { depth: depth }
}
- pub fn shifted(&self, amount: uint) -> DebruijnIndex {
+ pub fn shifted(&self, amount: u32) -> DebruijnIndex {
DebruijnIndex { depth: self.depth + amount }
}
}
param_env: &ParameterEnvironment<'tcx>)
-> Result<(),CopyImplementationError> {
match self_type.sty {
- ty::ty_struct(struct_did, ref substs) => {
+ ty::ty_struct(struct_did, substs) => {
let fields = ty::struct_fields(tcx, struct_did, substs);
for field in fields.iter() {
if type_moves_by_default(tcx, field.mt.ty, param_env) {
}
}
}
- ty::ty_enum(enum_did, ref substs) => {
+ ty::ty_enum(enum_did, substs) => {
let enum_variants = ty::enum_variants(tcx, enum_did);
for variant in enum_variants.iter() {
for variant_arg_type in variant.args.iter() {
self.has_regions_escaping_depth(0)
}
- fn has_regions_escaping_depth(&self, depth: uint) -> bool;
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool;
}
impl<'tcx> RegionEscape for Ty<'tcx> {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
ty::type_escapes_depth(*self, depth)
}
}
impl RegionEscape for Region {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.escapes_depth(depth)
}
}
impl<'tcx> RegionEscape for TraitRef<'tcx> {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.substs.types.iter().any(|t| t.has_regions_escaping_depth(depth)) &&
self.substs.regions().iter().any(|t| t.has_regions_escaping_depth(depth))
}
}
impl<'tcx,T:RegionEscape> RegionEscape for Binder<T> {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.0.has_regions_escaping_depth(depth + 1)
}
}
impl<'tcx> RegionEscape for EquatePredicate<'tcx> {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.0.has_regions_escaping_depth(depth) || self.1.has_regions_escaping_depth(depth)
}
}
impl<T:RegionEscape,U:RegionEscape> RegionEscape for OutlivesPredicate<T,U> {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.0.has_regions_escaping_depth(depth) || self.1.has_regions_escaping_depth(depth)
}
}
-
{
fn fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> traits::Obligation<'tcx, O> {
traits::Obligation {
- cause: self.cause,
+ cause: self.cause.clone(),
recursion_depth: self.recursion_depth,
trait_ref: self.trait_ref.fold_with(folder),
}
ty::ty_open(typ.fold_with(this))
}
ty::ty_enum(tid, ref substs) => {
- ty::ty_enum(tid, substs.fold_with(this))
+ let substs = substs.fold_with(this);
+ ty::ty_enum(tid, this.tcx().mk_substs(substs))
}
ty::ty_trait(box ty::TyTrait { ref principal, bounds }) => {
ty::ty_trait(box ty::TyTrait {
ty::ty_tup(ts.fold_with(this))
}
ty::ty_bare_fn(opt_def_id, ref f) => {
- ty::ty_bare_fn(opt_def_id, f.fold_with(this))
+ let bfn = f.fold_with(this);
+ ty::ty_bare_fn(opt_def_id, this.tcx().mk_bare_fn(bfn))
}
ty::ty_closure(ref f) => {
ty::ty_closure(box f.fold_with(this))
}
ty::ty_rptr(r, ref tm) => {
- ty::ty_rptr(r.fold_with(this), tm.fold_with(this))
+ let r = r.fold_with(this);
+ ty::ty_rptr(this.tcx().mk_region(r), tm.fold_with(this))
}
ty::ty_struct(did, ref substs) => {
- ty::ty_struct(did, substs.fold_with(this))
+ let substs = substs.fold_with(this);
+ ty::ty_struct(did, this.tcx().mk_substs(substs))
}
ty::ty_unboxed_closure(did, ref region, ref substs) => {
- ty::ty_unboxed_closure(did, region.fold_with(this), substs.fold_with(this))
+ let r = region.fold_with(this);
+ let s = substs.fold_with(this);
+ ty::ty_unboxed_closure(did, this.tcx().mk_region(r), this.tcx().mk_substs(s))
}
ty::ty_bool | ty::ty_char | ty::ty_str |
ty::ty_int(_) | ty::ty_uint(_) | ty::ty_float(_) |
t: &ty::TraitRef<'tcx>)
-> ty::TraitRef<'tcx>
{
+ let substs = t.substs.fold_with(this);
ty::TraitRef {
def_id: t.def_id,
- substs: t.substs.fold_with(this),
+ substs: this.tcx().mk_substs(substs),
}
}
pub struct RegionFolder<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
- current_depth: uint,
- fld_r: &'a mut (FnMut(ty::Region, uint) -> ty::Region + 'a),
+ current_depth: u32,
+ fld_r: &'a mut (FnMut(ty::Region, u32) -> ty::Region + 'a),
}
impl<'a, 'tcx> RegionFolder<'a, 'tcx> {
pub fn new<F>(tcx: &'a ty::ctxt<'tcx>, fld_r: &'a mut F) -> RegionFolder<'a, 'tcx>
- where F : FnMut(ty::Region, uint) -> ty::Region
+ where F : FnMut(ty::Region, u32) -> ty::Region
{
RegionFolder {
tcx: tcx,
value: &T,
mut f: F)
-> T
- where F : FnMut(ty::Region, uint) -> ty::Region,
+ where F : FnMut(ty::Region, u32) -> ty::Region,
T : TypeFoldable<'tcx>,
{
value.fold_with(&mut RegionFolder::new(tcx, &mut f))
// regions. See comment on `shift_regions_through_binders` method in
// `subst.rs` for more details.
-pub fn shift_region(region: ty::Region, amount: uint) -> ty::Region {
+pub fn shift_region(region: ty::Region, amount: u32) -> ty::Region {
match region {
ty::ReLateBound(debruijn, br) => {
ty::ReLateBound(debruijn.shifted(amount), br)
}
pub fn shift_regions<'tcx, T:TypeFoldable<'tcx>+Repr<'tcx>>(tcx: &ty::ctxt<'tcx>,
- amount: uint, value: &T) -> T {
+ amount: u32, value: &T) -> T {
debug!("shift_regions(value={}, amount={})",
value.repr(tcx), amount);
shift_region(region, amount)
}))
}
-
PARSE_ONLY,
NO_TRANS,
NO_ANALYSIS,
- UNSTABLE_OPTIONS
+ UNSTABLE_OPTIONS,
+ PRINT_ENUM_SIZES
]
0
}
("no-analysis", "Parse and expand the source, but run no analysis and",
NO_TRANS),
("unstable-options", "Adds unstable command line options to rustc interface",
- UNSTABLE_OPTIONS)]
+ UNSTABLE_OPTIONS),
+ ("print-enum-sizes", "Print the size of enums and their variants", PRINT_ENUM_SIZES),
+ ]
}
#[deriving(Clone)]
pub fn show_span(&self) -> bool {
self.debugging_opt(config::SHOW_SPAN)
}
+ pub fn print_enum_sizes(&self) -> bool {
+ self.debugging_opt(config::PRINT_ENUM_SIZES)
+ }
pub fn sysroot<'a>(&'a self) -> &'a Path {
match self.opts.maybe_sysroot {
Some (ref sysroot) => sysroot,
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Warning);
}
-
}, ty_to_string(cx, tm.ty))
}
ty_rptr(r, ref tm) => {
- let mut buf = region_ptr_to_string(cx, r);
+ let mut buf = region_ptr_to_string(cx, *r);
buf.push_str(mt_to_string(cx, tm)[]);
buf
}
param_ty.user_string(cx)
}
}
- ty_enum(did, ref substs) | ty_struct(did, ref substs) => {
+ ty_enum(did, substs) | ty_struct(did, substs) => {
let base = ty::item_path_str(cx, did);
let generics = ty::lookup_item_type(cx, did).generics;
parameterized(cx, base.as_slice(), substs, &generics, did)
bound_str)
}
ty_str => "str".to_string(),
- ty_unboxed_closure(ref did, _, ref substs) => {
+ ty_unboxed_closure(ref did, _, substs) => {
let unboxed_closures = cx.unboxed_closures.borrow();
unboxed_closures.get(did).map(|cl| {
closure_to_string(cx, &cl.closure_type.subst(cx, substs))
let trait_def = ty::lookup_trait_def(tcx, self.def_id);
format!("TraitRef({}, {})",
self.substs.self_ty().repr(tcx),
- parameterized(tcx, base.as_slice(), &self.substs, &trait_def.generics, self.def_id))
+ parameterized(tcx, base.as_slice(), self.substs, &trait_def.generics, self.def_id))
}
}
fn user_string(&self, tcx: &ctxt<'tcx>) -> String {
let path_str = ty::item_path_str(tcx, self.def_id);
let trait_def = ty::lookup_trait_def(tcx, self.def_id);
- parameterized(tcx, path_str.as_slice(), &self.substs,
+ parameterized(tcx, path_str.as_slice(), self.substs,
&trait_def.generics, self.def_id)
}
}
}
#[test]
- #[cfg(target_os = "freebsd")]
- fn test_rpath_relative() {
- let config = &mut RPathConfig {
- used_crates: Vec::new(),
- has_rpath: true,
- is_like_osx: false,
- out_filename: Path::new("bin/rustc"),
- get_install_prefix_lib_path: || panic!(),
- realpath: |p| Ok(p.clone())
- };
- let res = get_rpath_relative_to_output(config, &Path::new("lib/libstd.so"));
- assert_eq!(res, "$ORIGIN/../lib");
- }
-
- #[test]
- #[cfg(target_os = "dragonfly")]
+ #[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
fn test_rpath_relative() {
let config = &mut RPathConfig {
used_crates: Vec::new(),
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
- self.buffer[mut self.buffer_idx..size],
+ self.buffer.slice_mut(self.buffer_idx, size),
input[..buffer_remaining]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
- self.buffer[mut self.buffer_idx..self.buffer_idx + input.len()],
+ self.buffer.slice_mut(self.buffer_idx, self.buffer_idx + input.len()),
input);
self.buffer_idx += input.len();
return;
// be empty.
let input_remaining = input.len() - i;
copy_memory(
- self.buffer[mut ..input_remaining],
+ self.buffer.slice_to_mut(input_remaining),
input[i..]);
self.buffer_idx += input_remaining;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
- self.buffer[mut self.buffer_idx..idx].set_memory(0);
+ self.buffer.slice_mut(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
- return self.buffer[mut self.buffer_idx - len..self.buffer_idx];
+ return self.buffer.slice_mut(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
)
);
- read_u32v_be(w[mut 0..16], data);
+ read_u32v_be(w.slice_mut(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
- write_u32_be(out[mut 0..4], self.engine.state.h0);
- write_u32_be(out[mut 4..8], self.engine.state.h1);
- write_u32_be(out[mut 8..12], self.engine.state.h2);
- write_u32_be(out[mut 12..16], self.engine.state.h3);
- write_u32_be(out[mut 16..20], self.engine.state.h4);
- write_u32_be(out[mut 20..24], self.engine.state.h5);
- write_u32_be(out[mut 24..28], self.engine.state.h6);
- write_u32_be(out[mut 28..32], self.engine.state.h7);
+ write_u32_be(out.slice_mut(0, 4), self.engine.state.h0);
+ write_u32_be(out.slice_mut(4, 8), self.engine.state.h1);
+ write_u32_be(out.slice_mut(8, 12), self.engine.state.h2);
+ write_u32_be(out.slice_mut(12, 16), self.engine.state.h3);
+ write_u32_be(out.slice_mut(16, 20), self.engine.state.h4);
+ write_u32_be(out.slice_mut(20, 24), self.engine.state.h5);
+ write_u32_be(out.slice_mut(24, 28), self.engine.state.h6);
+ write_u32_be(out.slice_mut(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
SawExprAssign,
SawExprAssignOp(ast::BinOp),
SawExprIndex,
- SawExprSlice,
SawExprRange,
SawExprPath,
SawExprAddrOf(ast::Mutability),
ExprField(_, id) => SawExprField(content(id.node)),
ExprTupField(_, id) => SawExprTupField(id.node),
ExprIndex(..) => SawExprIndex,
- ExprSlice(..) => SawExprSlice,
ExprRange(..) => SawExprRange,
ExprPath(..) => SawExprPath,
ExprAddrOf(m, _) => SawExprAddrOf(m),
dynamic_linking: true,
executables: true,
morestack: true,
+ linker_is_gnu: true,
has_rpath: true,
pre_link_args: vec!(
"-L/usr/local/lib".to_string(),
- "-L/usr/local/lib/gcc47".to_string(),
- "-L/usr/local/lib/gcc44".to_string(),
+ "-L/usr/lib/gcc47".to_string(),
+ // GNU-style linkers will use this to omit linking to libraries
+ // which don't actually fulfill any relocations, but only for
+ // libraries which follow this flag. Thus, use it before
+ // specifying libraries to link to.
+ "-Wl,--as-needed".to_string(),
),
-
+ position_independent_executables: true,
.. Default::default()
}
}
use target::Target;
pub fn target() -> Target {
+ let mut base = super::dragonfly_base::opts();
+ base.pre_link_args.push("-m64".to_string());
+
Target {
- data_layout: "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_string(),
+ data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
+ f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-\
+ s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-unknown-dragonfly".to_string(),
target_endian: "little".to_string(),
- target_word_size: "32".to_string(),
+ target_word_size: "64".to_string(),
arch: "x86_64".to_string(),
target_os: "dragonfly".to_string(),
- options: super::dragonfly_base::opts()
+ options: base,
}
}
use self::UseError::*;
use borrowck::*;
-use borrowck::LoanPathElem::*;
-use borrowck::LoanPathKind::*;
use rustc::middle::expr_use_visitor as euv;
use rustc::middle::mem_categorization as mc;
use rustc::middle::region;
// FIXME (#16118): These functions are intended to allow the borrow checker to
// be less precise in its handling of Box while still allowing moves out of a
-// Box. They should be removed when OwnedPtr is removed from LoanPath.
+// Box. They should be removed when Unique is removed from LoanPath.
fn owned_ptr_base_path<'a, 'tcx>(loan_path: &'a LoanPath<'tcx>) -> &'a LoanPath<'tcx> {
- //! Returns the base of the leftmost dereference of an OwnedPtr in
- //! `loan_path`. If there is no dereference of an OwnedPtr in `loan_path`,
+ //! Returns the base of the leftmost dereference of an Unique in
+ //! `loan_path`. If there is no dereference of an Unique in `loan_path`,
//! then it just returns `loan_path` itself.
return match helper(loan_path) {
fn helper<'a, 'tcx>(loan_path: &'a LoanPath<'tcx>) -> Option<&'a LoanPath<'tcx>> {
match loan_path.kind {
LpVar(_) | LpUpvar(_) => None,
- LpExtend(ref lp_base, _, LpDeref(mc::OwnedPtr)) => {
+ LpExtend(ref lp_base, _, LpDeref(mc::Unique)) => {
match helper(&**lp_base) {
v @ Some(_) => v,
None => Some(&**lp_base)
fn helper<'tcx>(loan_path: &Rc<LoanPath<'tcx>>) -> Option<Rc<LoanPath<'tcx>>> {
match loan_path.kind {
LpVar(_) | LpUpvar(_) => None,
- LpExtend(ref lp_base, _, LpDeref(mc::OwnedPtr)) => {
+ LpExtend(ref lp_base, _, LpDeref(mc::Unique)) => {
match helper(lp_base) {
v @ Some(_) => v,
None => Some(lp_base.clone())
}
}
- mc::cat_deref(b, _, mc::OwnedPtr) => {
+ mc::cat_deref(b, _, mc::Unique) => {
assert_eq!(cmt.mutbl, mc::McInherited);
cmt = b;
}
add_fragment_siblings(this, tcx, gathered_fragments, loan_parent.clone(), origin_id);
}
- // *LV for OwnedPtr consumes the contents of the box (at
+ // *LV for Unique consumes the contents of the box (at
// least when it is non-copy...), so propagate inward.
- LpExtend(ref loan_parent, _, LpDeref(mc::OwnedPtr)) => {
+ LpExtend(ref loan_parent, _, LpDeref(mc::Unique)) => {
add_fragment_siblings(this, tcx, gathered_fragments, loan_parent.clone(), origin_id);
}
}
}
- (&ty::ty_enum(enum_def_id, ref substs), ref enum_variant_info) => {
+ (&ty::ty_enum(enum_def_id, substs), ref enum_variant_info) => {
let variant_info = {
let mut variants = ty::substd_enum_variants(tcx, enum_def_id, substs);
match *enum_variant_info {
//! Computes moves.
use borrowck::*;
-use borrowck::LoanPathKind::*;
use borrowck::gather_loans::move_error::MoveSpanAndPath;
use borrowck::gather_loans::move_error::{MoveError, MoveErrorCollector};
use borrowck::move_data::*;
}
}
- mc::cat_deref(ref b, _, mc::OwnedPtr) => {
+ mc::cat_deref(ref b, _, mc::Unique) => {
check_and_get_illegal_move_origin(bccx, b)
}
}
}
mc::cat_downcast(ref base, _) |
- mc::cat_deref(ref base, _, mc::OwnedPtr) | // L-Deref-Send
+ mc::cat_deref(ref base, _, mc::Unique) | // L-Deref-Send
mc::cat_interior(ref base, _) => { // L-Field
self.check(base, discr_scope)
}
r
}
mc::cat_downcast(ref cmt, _) |
- mc::cat_deref(ref cmt, _, mc::OwnedPtr) |
+ mc::cat_deref(ref cmt, _, mc::Unique) |
mc::cat_interior(ref cmt, _) => {
self.scope(cmt)
}
// sure that all of these loans are honored.
use borrowck::*;
-use borrowck::LoanPathKind::*;
use borrowck::move_data::MoveData;
use rustc::middle::expr_use_visitor as euv;
use rustc::middle::mem_categorization as mc;
pub use self::RestrictionResult::*;
use borrowck::*;
-use borrowck::LoanPathElem::*;
-use borrowck::LoanPathKind::*;
use rustc::middle::expr_use_visitor as euv;
use rustc::middle::mem_categorization as mc;
use rustc::middle::ty;
mc::cat_deref(cmt_base, _, pk) => {
match pk {
- mc::OwnedPtr => {
+ mc::Unique => {
// R-Deref-Send-Pointer
//
// When we borrow the interior of an owned pointer, we
}
pub fn cat_expr(&self, expr: &ast::Expr) -> mc::cmt<'tcx> {
- match self.mc().cat_expr(expr) {
- Ok(c) => c,
- Err(()) => {
- self.tcx.sess.span_bug(expr.span, "error in mem categorization");
- }
- }
+ self.mc().cat_expr(expr)
}
pub fn report(&self, err: BckError<'tcx>) {
use std::io;
use std::io::fs;
use std::os;
-use arena::TypedArena;
use syntax::ast;
use syntax::ast_map;
use syntax::attr;
if stop_after_phase_2(&sess) { return; }
- let type_arena = TypedArena::new();
- let analysis = phase_3_run_analysis_passes(sess, ast_map, &type_arena, id);
+ let arenas = ty::CtxtArenas::new();
+ let analysis = phase_3_run_analysis_passes(sess, ast_map, &arenas, id);
phase_save_analysis(&analysis.ty_cx.sess, analysis.ty_cx.map.krate(), &analysis, outdir);
+
+ if log_enabled!(::log::INFO) {
+ println!("Pre-trans");
+ analysis.ty_cx.print_debug_stats();
+ }
+
if stop_after_phase_3(&analysis.ty_cx.sess) { return; }
let (tcx, trans) = phase_4_translate_to_llvm(analysis);
+ if log_enabled!(::log::INFO) {
+ println!("Post-trans");
+ tcx.print_debug_stats();
+ }
+
// Discard interned strings as they are no longer required.
token::get_ident_interner().clear();
/// structures carrying the results of the analysis.
pub fn phase_3_run_analysis_passes<'tcx>(sess: Session,
ast_map: ast_map::Map<'tcx>,
- type_arena: &'tcx TypedArena<ty::TyS<'tcx>>,
+ arenas: &'tcx ty::CtxtArenas<'tcx>,
name: String) -> ty::CrateAnalysis<'tcx> {
let time_passes = sess.time_passes();
let krate = ast_map.krate();
let lang_items = time(time_passes, "language item collection", (), |_|
middle::lang_items::collect_language_items(krate, &sess));
+ let make_glob_map = if save_analysis(&sess) {
+ resolve::MakeGlobMap::Yes
+ } else {
+ resolve::MakeGlobMap::No
+ };
let resolve::CrateMap {
def_map,
freevars,
export_map,
trait_map,
external_exports,
- last_private_map
+ last_private_map,
+ glob_map,
} =
time(time_passes, "resolution", (),
- |_| resolve::resolve_crate(&sess, &lang_items, krate));
+ |_| resolve::resolve_crate(&sess,
+ &ast_map,
+ &lang_items,
+ krate,
+ make_glob_map));
// Discard MTWT tables that aren't required past resolution.
syntax::ext::mtwt::clear_tables();
middle::check_static_recursion::check_crate(&sess, krate, &def_map, &ast_map));
let ty_cx = ty::mk_ctxt(sess,
- type_arena,
+ arenas,
def_map,
named_region_map,
ast_map,
public_items: public_items,
reachable: reachable_map,
name: name,
+ glob_map: glob_map,
}
}
+fn save_analysis(sess: &Session) -> bool {
+ (sess.opts.debugging_opts & config::SAVE_ANALYSIS) != 0
+}
+
pub fn phase_save_analysis(sess: &Session,
krate: &ast::Crate,
analysis: &ty::CrateAnalysis,
odir: &Option<Path>) {
- if (sess.opts.debugging_opts & config::SAVE_ANALYSIS) == 0 {
+ if !save_analysis(sess) {
return;
}
time(sess.time_passes(), "save analysis", krate, |krate|
use std::io::{mod, MemReader};
use std::option;
use std::str::FromStr;
-use arena::TypedArena;
#[deriving(Copy, PartialEq, Show)]
pub enum PpSourceMode {
fn call_with_pp_support<'tcx, A, B, F>(&self,
sess: Session,
ast_map: Option<ast_map::Map<'tcx>>,
- type_arena: &'tcx TypedArena<ty::TyS<'tcx>>,
+ arenas: &'tcx ty::CtxtArenas<'tcx>,
id: String,
payload: B,
f: F) -> A where
}
PpmTyped => {
let ast_map = ast_map.expect("--pretty=typed missing ast_map");
- let analysis = driver::phase_3_run_analysis_passes(sess, ast_map,
- type_arena, id);
+ let analysis = driver::phase_3_run_analysis_passes(sess, ast_map, arenas, id);
let annotation = TypedAnnotation { analysis: analysis };
f(&annotation, payload)
}
};
let mut forest = ast_map::Forest::new(krate);
- let type_arena = TypedArena::new();
+ let arenas = ty::CtxtArenas::new();
let (krate, ast_map) = if compute_ast_map {
let map = driver::assign_node_ids_and_map(&sess, &mut forest);
match (ppm, opt_uii) {
(PpmSource(s), None) =>
s.call_with_pp_support(
- sess, ast_map, &type_arena, id, out, |annotation, out| {
+ sess, ast_map, &arenas, id, out, |annotation, out| {
debug!("pretty printing source code {}", s);
let sess = annotation.sess();
pprust::print_crate(sess.codemap(),
(PpmSource(s), Some(uii)) =>
s.call_with_pp_support(
- sess, ast_map, &type_arena, id, (out,uii), |annotation, (out,uii)| {
+ sess, ast_map, &arenas, id, (out,uii), |annotation, (out,uii)| {
debug!("pretty printing source code {}", s);
let sess = annotation.sess();
let ast_map = annotation.ast_map()
match code {
Some(code) => {
let variants = gather_flowgraph_variants(&sess);
- let analysis = driver::phase_3_run_analysis_passes(sess, ast_map,
- &type_arena, id);
+ let analysis = driver::phase_3_run_analysis_passes(sess, ast_map, &arenas, id);
print_flowgraph(variants, analysis, code, out)
}
None => {
// run just enough stuff to build a tcx:
let lang_items = lang_items::collect_language_items(krate, &sess);
let resolve::CrateMap { def_map, freevars, capture_mode_map, .. } =
- resolve::resolve_crate(&sess, &lang_items, krate);
+ resolve::resolve_crate(&sess, &ast_map, &lang_items, krate, resolve::MakeGlobMap::No);
let named_region_map = resolve_lifetime::krate(&sess, krate, &def_map);
let region_map = region::resolve_crate(&sess, krate);
let stability_index = stability::Index::build(krate);
- let type_arena = TypedArena::new();
+ let arenas = ty::CtxtArenas::new();
let tcx = ty::mk_ctxt(sess,
- &type_arena,
+ &arenas,
def_map,
named_region_map,
ast_map,
let input_args = input_tys.iter().map(|ty| *ty).collect();
ty::mk_bare_fn(self.infcx.tcx,
None,
- ty::BareFnTy {
+ self.infcx.tcx.mk_bare_fn(ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
output: ty::FnConverging(output_ty),
variadic: false
})
- })
+ }))
}
pub fn t_nil(&self) -> Ty<'tcx> {
})
}
- pub fn t_param(&self, space: subst::ParamSpace, index: uint) -> Ty<'tcx> {
+ pub fn t_param(&self, space: subst::ParamSpace, index: u32) -> Ty<'tcx> {
ty::mk_param(self.infcx.tcx, space, index, ast_util::local_def(ast::DUMMY_NODE_ID))
}
pub fn re_early_bound(&self,
space: subst::ParamSpace,
- index: uint,
+ index: u32,
name: &'static str)
-> ty::Region
{
ty::ReEarlyBound(ast::DUMMY_NODE_ID, space, index, name)
}
- pub fn re_late_bound_with_debruijn(&self, id: uint, debruijn: ty::DebruijnIndex) -> ty::Region {
+ pub fn re_late_bound_with_debruijn(&self, id: u32, debruijn: ty::DebruijnIndex) -> ty::Region {
ty::ReLateBound(debruijn, ty::BrAnon(id))
}
pub fn t_rptr(&self, r: ty::Region) -> Ty<'tcx> {
- ty::mk_imm_rptr(self.infcx.tcx, r, ty::mk_int())
+ ty::mk_imm_rptr(self.infcx.tcx, self.infcx.tcx.mk_region(r), ty::mk_int())
}
- pub fn t_rptr_late_bound(&self, id: uint) -> Ty<'tcx> {
+ pub fn t_rptr_late_bound(&self, id: u32) -> Ty<'tcx> {
+ let r = self.re_late_bound_with_debruijn(id, ty::DebruijnIndex::new(1));
ty::mk_imm_rptr(self.infcx.tcx,
- self.re_late_bound_with_debruijn(id, ty::DebruijnIndex::new(1)),
+ self.infcx.tcx.mk_region(r),
ty::mk_int())
}
pub fn t_rptr_late_bound_with_debruijn(&self,
- id: uint,
+ id: u32,
debruijn: ty::DebruijnIndex)
-> Ty<'tcx> {
+ let r = self.re_late_bound_with_debruijn(id, debruijn);
ty::mk_imm_rptr(self.infcx.tcx,
- self.re_late_bound_with_debruijn(id, debruijn),
+ self.infcx.tcx.mk_region(r),
ty::mk_int())
}
pub fn t_rptr_scope(&self, id: ast::NodeId) -> Ty<'tcx> {
- ty::mk_imm_rptr(self.infcx.tcx, ty::ReScope(CodeExtent::from_node_id(id)), ty::mk_int())
+ let r = ty::ReScope(CodeExtent::from_node_id(id));
+ ty::mk_imm_rptr(self.infcx.tcx, self.infcx.tcx.mk_region(r), ty::mk_int())
}
- pub fn re_free(&self, nid: ast::NodeId, id: uint) -> ty::Region {
+ pub fn re_free(&self, nid: ast::NodeId, id: u32) -> ty::Region {
ty::ReFree(ty::FreeRegion { scope: CodeExtent::from_node_id(nid),
bound_region: ty::BrAnon(id)})
}
- pub fn t_rptr_free(&self, nid: ast::NodeId, id: uint) -> Ty<'tcx> {
- ty::mk_imm_rptr(self.infcx.tcx, self.re_free(nid, id), ty::mk_int())
+ pub fn t_rptr_free(&self, nid: ast::NodeId, id: u32) -> Ty<'tcx> {
+ let r = self.re_free(nid, id);
+ ty::mk_imm_rptr(self.infcx.tcx, self.infcx.tcx.mk_region(r), ty::mk_int())
}
pub fn t_rptr_static(&self) -> Ty<'tcx> {
- ty::mk_imm_rptr(self.infcx.tcx, ty::ReStatic, ty::mk_int())
+ ty::mk_imm_rptr(self.infcx.tcx, self.infcx.tcx.mk_region(ty::ReStatic), ty::mk_int())
}
pub fn dummy_type_trace(&self) -> infer::TypeTrace<'tcx> {
assert_eq!(t_substituted, t_expected);
})
}
-
use syntax::codemap::{Span, DUMMY_SP};
use syntax::visit::{mod, Visitor};
-struct UnusedImportCheckVisitor<'a, 'b:'a> {
- resolver: &'a mut Resolver<'b>
+struct UnusedImportCheckVisitor<'a, 'b:'a, 'tcx:'b> {
+ resolver: &'a mut Resolver<'b, 'tcx>
}
// Deref and DerefMut impls allow treating UnusedImportCheckVisitor as Resolver.
-impl<'a, 'b> Deref<Resolver<'b>> for UnusedImportCheckVisitor<'a, 'b> {
- fn deref<'c>(&'c self) -> &'c Resolver<'b> {
+impl<'a, 'b, 'tcx:'b> Deref<Resolver<'b, 'tcx>> for UnusedImportCheckVisitor<'a, 'b, 'tcx> {
+ fn deref<'c>(&'c self) -> &'c Resolver<'b, 'tcx> {
&*self.resolver
}
}
-impl<'a, 'b> DerefMut<Resolver<'b>> for UnusedImportCheckVisitor<'a, 'b> {
- fn deref_mut<'c>(&'c mut self) -> &'c mut Resolver<'b> {
+impl<'a, 'b, 'tcx:'b> DerefMut<Resolver<'b, 'tcx>> for UnusedImportCheckVisitor<'a, 'b, 'tcx> {
+ fn deref_mut<'c>(&'c mut self) -> &'c mut Resolver<'b, 'tcx> {
&mut *self.resolver
}
}
-impl<'a, 'b> UnusedImportCheckVisitor<'a, 'b> {
+impl<'a, 'b, 'tcx> UnusedImportCheckVisitor<'a, 'b, 'tcx> {
// We have information about whether `use` (import) directives are actually used now.
// If an import is not used at all, we signal a lint error. If an import is only used
// for a single namespace, we remove the other namespace from the recorded privacy
}
}
-impl<'a, 'b, 'v> Visitor<'v> for UnusedImportCheckVisitor<'a, 'b> {
+impl<'a, 'b, 'v, 'tcx> Visitor<'v> for UnusedImportCheckVisitor<'a, 'b, 'tcx> {
fn visit_view_item(&mut self, vi: &ViewItem) {
// Ignore is_public import statements because there's no way to be sure
// whether they're used or not. Also ignore imports with a dummy span
use rustc::middle::pat_util::pat_bindings;
use rustc::middle::privacy::*;
use rustc::middle::subst::{ParamSpace, FnSpace, TypeSpace};
-use rustc::middle::ty::{CaptureModeMap, Freevar, FreevarMap, TraitMap};
+use rustc::middle::ty::{CaptureModeMap, Freevar, FreevarMap, TraitMap, GlobMap};
use rustc::util::nodemap::{NodeMap, NodeSet, DefIdSet, FnvHashMap};
use rustc::util::lev_distance::lev_distance;
use syntax::ast::{ForeignItem, ForeignItemFn, ForeignItemStatic, Generics};
use syntax::ast::{Ident, ImplItem, Item, ItemConst, ItemEnum, ItemFn};
use syntax::ast::{ItemForeignMod, ItemImpl, ItemMac, ItemMod, ItemStatic};
-use syntax::ast::{ItemStruct, ItemTrait, ItemTy, Local};
+use syntax::ast::{ItemStruct, ItemTrait, ItemTy, Local, LOCAL_CRATE};
use syntax::ast::{MethodImplItem, Mod, Name, NamedField, NodeId};
use syntax::ast::{Pat, PatEnum, PatIdent, PatLit};
use syntax::ast::{PatRange, PatStruct, Path, PathListIdent, PathListMod};
use syntax::ast::{ViewItemUse, ViewPathGlob, ViewPathList, ViewPathSimple};
use syntax::ast::{Visibility};
use syntax::ast;
+use syntax::ast_map;
use syntax::ast_util::{mod, PostExpansionMethod, local_def, walk_pat};
use syntax::attr::AttrMetaMethods;
use syntax::ext::mtwt;
ImportNameDefinition(Def, LastPrivate) //< The name identifies an import.
}
-impl<'a, 'v> Visitor<'v> for Resolver<'a> {
+impl<'a, 'v, 'tcx> Visitor<'v> for Resolver<'a, 'tcx> {
fn visit_item(&mut self, item: &Item) {
self.resolve_item(item);
}
}
}
+/// Whether an import can be shadowed by another import.
+#[deriving(Show,PartialEq,Clone,Copy)]
+enum Shadowable {
+ Always,
+ /// Means that the recorded import obeys the glob shadowing rules, i.e., can
+ /// only be shadowed by another glob import.
+ Glob,
+ Never
+}
+
/// One import directive.
struct ImportDirective {
module_path: Vec<Name>,
span: Span,
id: NodeId,
is_public: bool, // see note in ImportResolution about how to use this
- shadowable: bool,
+ shadowable: Shadowable,
}
impl ImportDirective {
span: Span,
id: NodeId,
is_public: bool,
- shadowable: bool)
+ shadowable: Shadowable)
-> ImportDirective {
ImportDirective {
module_path: module_path,
struct Target {
target_module: Rc<Module>,
bindings: Rc<NameBindings>,
- shadowable: bool,
+ shadowable: Shadowable,
}
impl Target {
fn new(target_module: Rc<Module>,
bindings: Rc<NameBindings>,
- shadowable: bool)
+ shadowable: Shadowable)
-> Target {
Target {
target_module: target_module,
ValueNS => self.value_id,
}
}
+
+ fn shadowable(&self, namespace: Namespace) -> Shadowable {
+ let target = self.target_for_namespace(namespace);
+ if target.is_none() {
+ return Shadowable::Always;
+ }
+
+ target.unwrap().shadowable
+ }
}
/// The link from a module up to its nearest parent node.
}
/// The main resolver class.
-struct Resolver<'a> {
+struct Resolver<'a, 'tcx:'a> {
session: &'a Session,
+ ast_map: &'a ast_map::Map<'tcx>,
+
graph_root: NameBindings,
trait_item_map: FnvHashMap<(Name, DefId), TraitItemKind>,
// so as to avoid printing duplicate errors
emit_errors: bool,
+ make_glob_map: bool,
+ // Maps imports to the names of items actually imported (this actually maps
+ // all imports, but only glob imports are actually interesting).
+ glob_map: GlobMap,
+
used_imports: HashSet<(NodeId, Namespace)>,
used_crates: HashSet<CrateNum>,
}
-struct BuildReducedGraphVisitor<'a, 'b:'a> {
- resolver: &'a mut Resolver<'b>,
+struct BuildReducedGraphVisitor<'a, 'b:'a, 'tcx:'b> {
+ resolver: &'a mut Resolver<'b, 'tcx>,
parent: ReducedGraphParent
}
-impl<'a, 'b, 'v> Visitor<'v> for BuildReducedGraphVisitor<'a, 'b> {
+impl<'a, 'b, 'v, 'tcx> Visitor<'v> for BuildReducedGraphVisitor<'a, 'b, 'tcx> {
fn visit_item(&mut self, item: &Item) {
let p = self.resolver.build_reduced_graph_for_item(item, self.parent.clone());
}
-impl<'a> Resolver<'a> {
- fn new(session: &'a Session, crate_span: Span) -> Resolver<'a> {
+impl<'a, 'tcx> Resolver<'a, 'tcx> {
+ fn new(session: &'a Session,
+ ast_map: &'a ast_map::Map<'tcx>,
+ crate_span: Span,
+ make_glob_map: MakeGlobMap) -> Resolver<'a, 'tcx> {
let graph_root = NameBindings::new();
graph_root.define_module(NoParentLink,
Resolver {
session: session,
+ ast_map: ast_map,
+
// The outermost module has def ID 0; this is not reflected in the
// AST.
last_private: NodeMap::new(),
emit_errors: true,
+ make_glob_map: make_glob_map == MakeGlobMap::Yes,
+ glob_map: HashMap::new(),
}
}
ItemImpl(_, _, Some(_), _, _) => parent,
- ItemTrait(_, _, _, _, ref items) => {
+ ItemTrait(_, _, _, ref items) => {
let name_bindings =
self.add_child(name,
parent.clone(),
attr.name() == token::get_name(
special_idents::prelude_import.name)
});
+ let shadowable = if shadowable {
+ Shadowable::Always
+ } else {
+ Shadowable::Never
+ };
match view_path.node {
ViewPathSimple(binding, ref full_path, id) => {
view_path.span,
id,
is_public,
- shadowable);
+ if shadowable == Shadowable::Never {
+ Shadowable::Glob
+ } else {
+ shadowable
+ });
}
}
}
span: Span,
id: NodeId,
is_public: bool,
- shadowable: bool) {
+ shadowable: Shadowable) {
module_.imports.borrow_mut().push(ImportDirective::new(module_path,
subclass,
span,
}
}
+ #[inline]
+ fn record_import_use(&mut self, import_id: NodeId, name: Name) {
+ if !self.make_glob_map {
+ return;
+ }
+ if self.glob_map.contains_key(&import_id) {
+ self.glob_map[import_id].insert(name);
+ return;
+ }
+
+ let mut new_set = HashSet::new();
+ new_set.insert(name);
+ self.glob_map.insert(import_id, new_set);
+ }
+
+ fn get_trait_name(&self, did: DefId) -> Name {
+ if did.krate == LOCAL_CRATE {
+ self.ast_map.expect_item(did.node).ident.name
+ } else {
+ csearch::get_trait_name(&self.session.cstore, did)
+ }
+ }
+
/// Attempts to resolve the given import. The return value indicates
/// failure if we're certain the name does not exist, indeterminate if we
/// don't know whether the name exists at the moment due to other
let mut resolution_result = Failed(None);
let module_path = &import_directive.module_path;
- debug!("(resolving import for module) resolving import `{}::...` in \
- `{}`",
+ debug!("(resolving import for module) resolving import `{}::...` in `{}`",
self.names_to_string(module_path[]),
self.module_to_string(&*module_));
fn get_binding(this: &mut Resolver,
import_resolution: &ImportResolution,
- namespace: Namespace)
+ namespace: Namespace,
+ source: &Name)
-> NamespaceResult {
// Import resolutions must be declared with "pub"
let id = import_resolution.id(namespace);
// track used imports and extern crates as well
this.used_imports.insert((id, namespace));
+ this.record_import_use(id, *source);
match target_module.def_id.get() {
Some(DefId{krate: kid, ..}) => {
this.used_crates.insert(kid);
// The name is an import which has been fully
// resolved. We can, therefore, just follow it.
if value_result.is_unknown() {
- value_result = get_binding(self, import_resolution,
- ValueNS);
+ value_result = get_binding(self,
+ import_resolution,
+ ValueNS,
+ &source);
value_used_reexport = import_resolution.is_public;
}
if type_result.is_unknown() {
- type_result = get_binding(self, import_resolution,
- TypeNS);
+ type_result = get_binding(self,
+ import_resolution,
+ TypeNS,
+ &source);
type_used_reexport = import_resolution.is_public;
}
return Success(());
}
- // Resolves a glob import. Note that this function cannot panic; it either
+ // Resolves a glob import. Note that this function cannot fail; it either
// succeeds or bails out (as importing * from an empty module or a module
// that exports nothing is valid).
fn resolve_glob_import(&mut self,
let mut import_resolutions = module_.import_resolutions.borrow_mut();
let dest_import_resolution = match import_resolutions.entry(name) {
- Occupied(entry) => entry.into_mut(),
+ Occupied(entry) => {
+ entry.into_mut()
+ }
Vacant(entry) => {
// Create a new import resolution from this child.
entry.set(ImportResolution::new(id, is_public))
// Merge the child item into the import resolution.
if name_bindings.defined_in_namespace_with(ValueNS, IMPORTABLE | PUBLIC) {
debug!("(resolving glob import) ... for value target");
- dest_import_resolution.value_target =
- Some(Target::new(containing_module.clone(),
- name_bindings.clone(),
- import_directive.shadowable));
- dest_import_resolution.value_id = id;
+ if dest_import_resolution.shadowable(ValueNS) == Shadowable::Never {
+ let msg = format!("a value named `{}` has already been imported \
+ in this module",
+ token::get_name(name).get());
+ self.session.span_err(import_directive.span, msg.as_slice());
+ } else {
+ dest_import_resolution.value_target =
+ Some(Target::new(containing_module.clone(),
+ name_bindings.clone(),
+ import_directive.shadowable));
+ dest_import_resolution.value_id = id;
+ }
}
if name_bindings.defined_in_namespace_with(TypeNS, IMPORTABLE | PUBLIC) {
debug!("(resolving glob import) ... for type target");
- dest_import_resolution.type_target =
- Some(Target::new(containing_module,
- name_bindings.clone(),
- import_directive.shadowable));
- dest_import_resolution.type_id = id;
+ if dest_import_resolution.shadowable(TypeNS) == Shadowable::Never {
+ let msg = format!("a type named `{}` has already been imported \
+ in this module",
+ token::get_name(name).get());
+ self.session.span_err(import_directive.span, msg.as_slice());
+ } else {
+ dest_import_resolution.type_target =
+ Some(Target::new(containing_module,
+ name_bindings.clone(),
+ import_directive.shadowable));
+ dest_import_resolution.type_id = id;
+ }
}
dest_import_resolution.is_public = is_public;
}
match *target {
- Some(ref target) if !target.shadowable => {
+ Some(ref target) if target.shadowable != Shadowable::Always => {
let msg = format!("a {} named `{}` has already been imported \
in this module",
match namespace {
.borrow()
.contains_key(&name) {
match import_resolution.type_target {
- Some(ref target) if !target.shadowable => {
+ Some(ref target) if target.shadowable != Shadowable::Always => {
let msg = format!("import `{0}` conflicts with imported \
crate in this module \
(maybe you meant `use {0}::*`?)",
};
match import_resolution.value_target {
- Some(ref target) if !target.shadowable => {
+ Some(ref target) if target.shadowable != Shadowable::Always => {
if let Some(ref value) = *name_bindings.value_def.borrow() {
let msg = format!("import `{}` conflicts with value \
in this module",
}
match import_resolution.type_target {
- Some(ref target) if !target.shadowable => {
+ Some(ref target) if target.shadowable != Shadowable::Always => {
if let Some(ref ty) = *name_bindings.type_def.borrow() {
match ty.module_def {
None => {
debug!("top name bindings succeeded");
return Success((Target::new(module_.clone(),
name_bindings.clone(),
- false),
+ Shadowable::Never),
false));
}
Some(_) | None => { /* Not found; continue. */ }
debug!("(resolving item in lexical scope) using \
import resolution");
// track used imports and extern crates as well
- self.used_imports.insert((import_resolution.id(namespace), namespace));
+ let id = import_resolution.id(namespace);
+ self.used_imports.insert((id, namespace));
+ self.record_import_use(id, name);
if let Some(DefId{krate: kid, ..}) = target.target_module.def_id.get() {
- self.used_crates.insert(kid);
+ self.used_crates.insert(kid);
}
return Success((target, false));
}
let name_bindings =
Rc::new(Resolver::create_name_bindings_from_module(module));
debug!("lower name bindings succeeded");
- return Success((Target::new(module_, name_bindings, false),
+ return Success((Target::new(module_,
+ name_bindings,
+ Shadowable::Never),
false));
}
}
debug!("(resolving name in module) found node as child");
return Success((Target::new(module_.clone(),
name_bindings.clone(),
- false),
+ Shadowable::Never),
false));
}
Some(_) | None => {
debug!("(resolving name in module) resolved to \
import");
// track used imports and extern crates as well
- self.used_imports.insert((import_resolution.id(namespace), namespace));
+ let id = import_resolution.id(namespace);
+ self.used_imports.insert((id, namespace));
+ self.record_import_use(id, name);
if let Some(DefId{krate: kid, ..}) = target.target_module.def_id.get() {
self.used_crates.insert(kid);
}
if let Some(module) = module_.external_module_children.borrow().get(&name).cloned() {
let name_bindings =
Rc::new(Resolver::create_name_bindings_from_module(module));
- return Success((Target::new(module_, name_bindings, false),
+ return Success((Target::new(module_,
+ name_bindings,
+ Shadowable::Never),
false));
}
}
impl_items[]);
}
- ItemTrait(_, ref generics, ref unbound, ref bounds, ref trait_items) => {
+ ItemTrait(_, ref generics, ref bounds, ref trait_items) => {
// Create a new rib for the self type.
let mut self_type_rib = Rib::new(ItemRibKind);
this.resolve_type_parameter_bounds(item.id, bounds,
TraitDerivation);
- match *unbound {
- Some(ref tpb) => {
- this.resolve_trait_reference(item.id, tpb, TraitDerivation);
- }
- None => {}
- }
-
for trait_item in (*trait_items).iter() {
// Create a new rib for the trait_item-specific type
// parameters.
let def_like = DlDef(DefTyParam(space,
local_def(type_parameter.id),
- index));
+ index as u32));
// Associate this type parameter with
// the item that bound it
self.record_def(type_parameter.id,
self.resolve_type_parameter_bound(type_parameter.id, bound,
TraitBoundingTypeParameter);
}
- match &type_parameter.unbound {
- &Some(ref unbound) =>
- self.resolve_trait_reference(
- type_parameter.id, unbound, TraitBoundingTypeParameter),
- &None => {}
- }
match type_parameter.default {
Some(ref ty) => self.resolve_type(&**ty),
None => {}
type_parameter_bound: &TyParamBound,
reference_type: TraitReferenceType) {
match *type_parameter_bound {
- TraitTyParamBound(ref tref) => {
+ TraitTyParamBound(ref tref, _) => {
self.resolve_poly_trait_reference(id, tref, reference_type)
}
RegionTyParamBound(..) => {}
let id = import_resolution.id(namespace);
// track imports and extern crates as well
self.used_imports.insert((id, namespace));
+ self.record_import_use(id, name);
match target.target_module.def_id.get() {
Some(DefId{krate: kid, ..}) => {
self.used_crates.insert(kid);
};
if self.trait_item_map.contains_key(&(name, did)) {
add_trait_info(&mut found_traits, did, name);
- self.used_imports.insert((import.type_id, TypeNS));
+ let id = import.type_id;
+ self.used_imports.insert((id, TypeNS));
+ let trait_name = self.get_trait_name(did);
+ self.record_import_use(id, trait_name);
if let Some(DefId{krate: kid, ..}) = target.target_module.def_id.get() {
self.used_crates.insert(kid);
}
pub trait_map: TraitMap,
pub external_exports: ExternalExports,
pub last_private_map: LastPrivateMap,
+ pub glob_map: Option<GlobMap>
+}
+
+#[deriving(PartialEq,Copy)]
+pub enum MakeGlobMap {
+ Yes,
+ No
}
/// Entry point to crate resolution.
-pub fn resolve_crate(session: &Session,
- _: &LanguageItems,
- krate: &Crate)
- -> CrateMap {
- let mut resolver = Resolver::new(session, krate.span);
+pub fn resolve_crate<'a, 'tcx>(session: &'a Session,
+ ast_map: &'a ast_map::Map<'tcx>,
+ _: &LanguageItems,
+ krate: &Crate,
+ make_glob_map: MakeGlobMap)
+ -> CrateMap {
+ let mut resolver = Resolver::new(session, ast_map, krate.span, make_glob_map);
resolver.build_reduced_graph(krate);
session.abort_if_errors();
trait_map: resolver.trait_map,
external_exports: resolver.external_exports,
last_private_map: resolver.last_private,
+ glob_map: if resolver.make_glob_map {
+ Some(resolver.glob_map)
+ } else {
+ None
+ },
}
}
use std::rc::Rc;
-struct ExportRecorder<'a, 'b:'a> {
- resolver: &'a mut Resolver<'b>
+struct ExportRecorder<'a, 'b:'a, 'tcx:'b> {
+ resolver: &'a mut Resolver<'b, 'tcx>
}
// Deref and DerefMut impls allow treating ExportRecorder as Resolver.
-impl<'a, 'b> Deref<Resolver<'b>> for ExportRecorder<'a, 'b> {
- fn deref<'c>(&'c self) -> &'c Resolver<'b> {
+impl<'a, 'b, 'tcx:'b> Deref<Resolver<'b, 'tcx>> for ExportRecorder<'a, 'b, 'tcx> {
+ fn deref<'c>(&'c self) -> &'c Resolver<'b, 'tcx> {
&*self.resolver
}
}
-impl<'a, 'b> DerefMut<Resolver<'b>> for ExportRecorder<'a, 'b> {
- fn deref_mut<'c>(&'c mut self) -> &'c mut Resolver<'b> {
+impl<'a, 'b, 'tcx:'b> DerefMut<Resolver<'b, 'tcx>> for ExportRecorder<'a, 'b, 'tcx> {
+ fn deref_mut<'c>(&'c mut self) -> &'c mut Resolver<'b, 'tcx> {
&mut *self.resolver
}
}
-impl<'a, 'b> ExportRecorder<'a, 'b> {
+impl<'a, 'b, 'tcx> ExportRecorder<'a, 'b, 'tcx> {
fn record_exports_for_module_subtree(&mut self,
module_: Rc<Module>) {
// If this isn't a local krate, then bail out. We don't need to record
}
fn dump(&mut self, handler: &Handler) {
- let mut buffer = self.buffer.lock();
+ let mut buffer = self.buffer.lock().unwrap();
for diag in buffer.iter() {
match diag.code {
Some(ref code) => {
msg: &str, code: Option<&str>, lvl: Level) {
assert!(cmsp.is_none(), "SharedEmitter doesn't support spans");
- self.buffer.lock().push(Diagnostic {
+ self.buffer.lock().unwrap().push(Diagnostic {
msg: msg.to_string(),
code: code.map(|s| s.to_string()),
lvl: lvl,
time_passes: bool,
}
+unsafe impl Send for ModuleConfig { }
+
impl ModuleConfig {
fn new(tm: TargetMachineRef, passes: Vec<String>) -> ModuleConfig {
ModuleConfig {
loop {
// Avoid holding the lock for the entire duration of the match.
- let maybe_work = work_items_arc.lock().pop();
+ let maybe_work = work_items_arc.lock().unwrap().pop();
match maybe_work {
Some(work) => {
execute_work_item(&cgcx, work);
// super-traits
for super_bound in trait_refs.iter() {
let trait_ref = match *super_bound {
- ast::TraitTyParamBound(ref trait_ref) => {
+ ast::TraitTyParamBound(ref trait_ref, _) => {
trait_ref
}
ast::RegionTyParamBound(..) => {
&**typ,
impl_items)
}
- ast::ItemTrait(_, ref generics, _, ref trait_refs, ref methods) =>
+ ast::ItemTrait(_, ref generics, ref trait_refs, ref methods) =>
self.process_trait(item, generics, trait_refs, methods),
ast::ItemMod(ref m) => self.process_mod(item, m),
ast::ItemTy(ref ty, ref ty_params) => {
fn visit_generics(&mut self, generics: &ast::Generics) {
for param in generics.ty_params.iter() {
for bound in param.bounds.iter() {
- if let ast::TraitTyParamBound(ref trait_ref) = *bound {
+ if let ast::TraitTyParamBound(ref trait_ref, _) = *bound {
self.process_trait_ref(&trait_ref.trait_ref, None);
}
}
self.cur_scope);
self.write_sub_paths_truncated(path);
}
- ast::ViewPathGlob(ref path, _) => {
+ ast::ViewPathGlob(ref path, id) => {
+ // Make a comma-separated list of names of imported modules.
+ let mut name_string = String::new();
+ let glob_map = &self.analysis.glob_map;
+ let glob_map = glob_map.as_ref().unwrap();
+ if glob_map.contains_key(&id) {
+ let names = glob_map.index(&id);
+ for n in names.iter() {
+ if name_string.len() > 0 {
+ name_string.push_str(", ");
+ }
+ name_string.push_str(n.as_str());
+ }
+ }
+
+ let sub_span = self.span.sub_span_of_token(path.span,
+ token::BinOp(token::Star));
+ self.fmt.use_glob_str(path.span,
+ sub_span,
+ id,
+ name_string.as_slice(),
+ self.cur_scope);
self.write_sub_paths(path);
}
ast::ViewPathList(ref path, ref list, _) => {
return;
}
+ assert!(analysis.glob_map.is_some());
let cratename = match attr::find_crate_name(krate.attrs[]) {
Some(name) => name.get().to_string(),
None => {
Impl,
Module,
UseAlias,
+ UseGlob,
ExternCrate,
Inheritance,
MethodCall,
UseAlias => ("use_alias",
vec!("id","refid","refidcrate","name","scopeid"),
true, true),
+ UseGlob => ("use_glob", vec!("id","value","scopeid"), true, true),
ExternCrate => ("extern_crate",
vec!("id","name","location","crate","scopeid"),
true, true),
svec!(id, mod_node, mod_crate, name, parent));
}
+ pub fn use_glob_str(&mut self,
+ span: Span,
+ sub_span: Option<Span>,
+ id: NodeId,
+ values: &str,
+ parent: NodeId) {
+ self.check_and_record(UseGlob,
+ span,
+ sub_span,
+ svec!(id, values, parent));
+ }
+
pub fn extern_crate_str(&mut self,
span: Span,
sub_span: Option<Span>,
}
}
+ pub fn sub_span_of_token(&self, span: Span, tok: Token) -> Option<Span> {
+ let mut toks = self.retokenise_span(span);
+ loop {
+ let next = toks.real_token();
+ if next.tok == token::Eof {
+ return None;
+ }
+ if next.tok == tok {
+ return self.make_sub_span(span, Some(next.sp));
+ }
+ }
+ }
+
pub fn sub_span_after_keyword(&self,
span: Span,
keyword: keywords::Keyword) -> Option<Span> {
let slice_len_offset = C_uint(bcx.ccx(), offset_left + offset_right);
let slice_len = Sub(bcx, len, slice_len_offset);
let slice_ty = ty::mk_slice(bcx.tcx(),
- ty::ReStatic,
+ bcx.tcx().mk_region(ty::ReStatic),
ty::mt {ty: vt.unit_ty, mutbl: ast::MutImmutable});
let scratch = rvalue_scratch_datum(bcx, slice_ty, "");
Store(bcx, slice_begin,
ty::ty_uint(ast::TyU8) => {
// NOTE: cast &[u8] to &str and abuse the str_eq lang item,
// which calls memcmp().
- let t = ty::mk_str_slice(cx.tcx(), ty::ReStatic, ast::MutImmutable);
+ let t = ty::mk_str_slice(cx.tcx(),
+ cx.tcx().mk_region(ty::ReStatic),
+ ast::MutImmutable);
let lhs = BitCast(cx, lhs, type_of::type_of(cx.ccx(), t).ptr_to());
let rhs = BitCast(cx, rhs, type_of::type_of(cx.ccx(), t).ptr_to());
compare_str(cx, lhs, rhs, rhs_t)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("match::store_for_loop_binding");
- if simple_identifier(&*pat).is_some() {
+ if simple_identifier(&*pat).is_some() &&
+ bcx.sess().opts.debuginfo != FullDebugInfo {
// Generate nicer LLVM for the common case of a `for` loop pattern
// like `for x in blahblah { ... }`.
let binding_type = node_id_type(bcx, pat.id);
#![allow(unsigned_negation)]
-pub use self::PointerField::*;
pub use self::Repr::*;
use std::num::Int;
use std::rc::Rc;
use llvm::{ValueRef, True, IntEQ, IntNE};
-use back::abi;
+use back::abi::FAT_PTR_ADDR;
use middle::subst;
use middle::subst::Subst;
use trans::_match;
type Hint = attr::ReprAttr;
-
/// Representations.
#[deriving(Eq, PartialEq, Show)]
pub enum Repr<'tcx> {
nullfields: Vec<Ty<'tcx>>
},
/// Two cases distinguished by a nullable pointer: the case with discriminant
- /// `nndiscr` is represented by the struct `nonnull`, where the `ptrfield`th
+ /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
/// field is known to be nonnull due to its type; if that field is null, then
/// it represents the other case, which is inhabited by at most one value
/// (and all other fields are undefined/unused).
StructWrappedNullablePointer {
nonnull: Struct<'tcx>,
nndiscr: Disr,
- ptrfield: PointerField,
+ discrfield: DiscrField,
nullfields: Vec<Ty<'tcx>>,
}
}
ty::ty_tup(ref elems) => {
Univariant(mk_struct(cx, elems[], false, t), false)
}
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) => {
let fields = ty::lookup_struct_fields(cx.tcx(), def_id);
let mut ftys = fields.iter().map(|field| {
ty::lookup_field_type(cx.tcx(), def_id, field.id, substs)
Univariant(mk_struct(cx, ftys[], packed, t), dtor)
}
- ty::ty_unboxed_closure(def_id, _, ref substs) => {
+ ty::ty_unboxed_closure(def_id, _, substs) => {
let upvars = ty::unboxed_closure_upvars(cx.tcx(), def_id, substs);
let upvar_types = upvars.iter().map(|u| u.ty).collect::<Vec<_>>();
Univariant(mk_struct(cx, upvar_types[], false, t), false)
}
- ty::ty_enum(def_id, ref substs) => {
+ ty::ty_enum(def_id, substs) => {
let cases = get_cases(cx.tcx(), def_id, substs);
let hint = *ty::lookup_repr_hints(cx.tcx(), def_id)[].get(0)
.unwrap_or(&attr::ReprAny);
let st = mk_struct(cx, cases[discr].tys[],
false, t);
match cases[discr].find_ptr(cx) {
- Some(ThinPointer(_)) if st.fields.len() == 1 => {
+ Some(ref df) if df.len() == 1 && st.fields.len() == 1 => {
return RawNullablePointer {
nndiscr: discr as Disr,
nnty: st.fields[0],
nullfields: cases[1 - discr].tys.clone()
};
}
- Some(ptrfield) => {
+ Some(mut discrfield) => {
+ discrfield.push(0);
+ discrfield.reverse();
return StructWrappedNullablePointer {
nndiscr: discr as Disr,
nonnull: st,
- ptrfield: ptrfield,
+ discrfield: discrfield,
nullfields: cases[1 - discr].tys.clone()
};
}
- None => { }
+ None => {}
}
}
discr += 1;
tys: Vec<Ty<'tcx>>
}
+/// This represents the (GEP) indices to follow to get to the discriminant field
+pub type DiscrField = Vec<uint>;
+
+fn find_discr_field_candidate<'tcx>(tcx: &ty::ctxt<'tcx>,
+ ty: Ty<'tcx>,
+ mut path: DiscrField) -> Option<DiscrField> {
+ match ty.sty {
+ // Fat &T/&mut T/Box<T> i.e. T is [T], str, or Trait
+ ty::ty_rptr(_, ty::mt { ty, .. }) | ty::ty_uniq(ty) if !ty::type_is_sized(tcx, ty) => {
+ path.push(FAT_PTR_ADDR);
+ Some(path)
+ },
-#[deriving(Copy, Eq, PartialEq, Show)]
-pub enum PointerField {
- ThinPointer(uint),
- FatPointer(uint)
-}
+ // Regular thin pointer: &T/&mut T/Box<T>
+ ty::ty_rptr(..) | ty::ty_uniq(..) => Some(path),
-impl<'tcx> Case<'tcx> {
- fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>)
- -> bool {
- mk_struct(cx, self.tys[], false, scapegoat).size == 0
- }
+ // Functions are just pointers
+ ty::ty_bare_fn(..) => Some(path),
- fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option<PointerField> {
- for (i, &ty) in self.tys.iter().enumerate() {
- match ty.sty {
- // &T/&mut T/Box<T> could either be a thin or fat pointer depending on T
- ty::ty_rptr(_, ty::mt { ty, .. }) | ty::ty_uniq(ty) => match ty.sty {
- // &[T] and &str are a pointer and length pair
- ty::ty_vec(_, None) | ty::ty_str => return Some(FatPointer(i)),
+ // Closures are a pair of pointers: the code and environment
+ ty::ty_closure(..) => {
+ path.push(FAT_PTR_ADDR);
+ Some(path)
+ },
+
+ // Is this the NonZero lang item wrapping a pointer or integer type?
+ ty::ty_struct(did, substs) if Some(did) == tcx.lang_items.non_zero() => {
+ let nonzero_fields = ty::lookup_struct_fields(tcx, did);
+ assert_eq!(nonzero_fields.len(), 1);
+ let nonzero_field = ty::lookup_field_type(tcx, did, nonzero_fields[0].id, substs);
+ match nonzero_field.sty {
+ ty::ty_ptr(..) | ty::ty_int(..) | ty::ty_uint(..) => {
+ path.push(0);
+ Some(path)
+ },
+ _ => None
+ }
+ },
- // &Trait is a pair of pointers: the actual object and a vtable
- ty::ty_trait(..) => return Some(FatPointer(i)),
+ // Perhaps one of the fields of this struct is non-zero
+ // let's recurse and find out
+ ty::ty_struct(def_id, substs) => {
+ let fields = ty::lookup_struct_fields(tcx, def_id);
+ for (j, field) in fields.iter().enumerate() {
+ let field_ty = ty::lookup_field_type(tcx, def_id, field.id, substs);
+ if let Some(mut fpath) = find_discr_field_candidate(tcx, field_ty, path.clone()) {
+ fpath.push(j);
+ return Some(fpath);
+ }
+ }
+ None
+ },
- ty::ty_struct(..) if !ty::type_is_sized(cx.tcx(), ty) => {
- return Some(FatPointer(i))
- }
+ // Can we use one of the fields in this tuple?
+ ty::ty_tup(ref tys) => {
+ for (j, &ty) in tys.iter().enumerate() {
+ if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) {
+ fpath.push(j);
+ return Some(fpath);
+ }
+ }
+ None
+ },
- // Any other &T is just a pointer
- _ => return Some(ThinPointer(i))
- },
+ // Is this a fixed-size array of something non-zero
+ // with at least one element?
+ ty::ty_vec(ety, Some(d)) if d > 0 => {
+ if let Some(mut vpath) = find_discr_field_candidate(tcx, ety, path) {
+ vpath.push(0);
+ Some(vpath)
+ } else {
+ None
+ }
+ },
- // Functions are just pointers
- ty::ty_bare_fn(..) => return Some(ThinPointer(i)),
+ // Anything else is not a pointer
+ _ => None
+ }
+}
- // Closures are a pair of pointers: the code and environment
- ty::ty_closure(..) => return Some(FatPointer(i)),
+impl<'tcx> Case<'tcx> {
+ fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>) -> bool {
+ mk_struct(cx, self.tys[], false, scapegoat).size == 0
+ }
- // Anything else is not a pointer
- _ => continue
+ fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option<DiscrField> {
+ for (i, &ty) in self.tys.iter().enumerate() {
+ if let Some(mut path) = find_discr_field_candidate(cx.tcx(), ty, vec![]) {
+ path.push(i);
+ return Some(path);
}
}
-
None
}
}
val = ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty));
signed = false;
}
- StructWrappedNullablePointer { nndiscr, ptrfield, .. } => {
- val = struct_wrapped_nullable_bitdiscr(bcx, nndiscr, ptrfield, scrutinee);
+ StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
+ val = struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee);
signed = false;
}
}
}
}
-fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, ptrfield: PointerField,
+fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &DiscrField,
scrutinee: ValueRef) -> ValueRef {
- let llptrptr = match ptrfield {
- ThinPointer(field) => GEPi(bcx, scrutinee, &[0, field]),
- FatPointer(field) => GEPi(bcx, scrutinee, &[0, field, abi::FAT_PTR_ADDR])
- };
+ let llptrptr = GEPi(bcx, scrutinee, discrfield[]);
let llptr = Load(bcx, llptrptr);
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)))
Store(bcx, C_null(llptrty), val)
}
}
- StructWrappedNullablePointer { ref nonnull, nndiscr, ptrfield, .. } => {
+ StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
if discr != nndiscr {
- let (llptrptr, llptrty) = match ptrfield {
- ThinPointer(field) =>
- (GEPi(bcx, val, &[0, field]),
- type_of::type_of(bcx.ccx(), nonnull.fields[field])),
- FatPointer(field) => {
- let v = GEPi(bcx, val, &[0, field, abi::FAT_PTR_ADDR]);
- (v, val_ty(v).element_type())
- }
- };
+ let llptrptr = GEPi(bcx, val, discrfield[]);
+ let llptrty = val_ty(llptrptr).element_type();
Store(bcx, C_null(llptrty), llptrptr)
}
}
false)
} else {
let vals = nonnull.fields.iter().map(|&ty| {
- // Always use null even if it's not the `ptrfield`th
+ // Always use null even if it's not the `discrfield`th
// field; see #8506.
C_null(type_of::sizing_type_of(ccx, ty))
}).collect::<Vec<ValueRef>>();
#[inline]
fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
-/// Get the discriminant of a constant value. (Not currently used.)
-pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef)
- -> Disr {
+/// Get the discriminant of a constant value.
+pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) -> Disr {
match *r {
CEnum(ity, _, _) => {
match ity {
}
}
Univariant(..) => 0,
- RawNullablePointer { nndiscr, .. } => {
- if is_null(val) {
- /* subtraction as uint is ok because nndiscr is either 0 or 1 */
- (1 - nndiscr) as Disr
- } else {
- nndiscr
- }
- }
- StructWrappedNullablePointer { nndiscr, ptrfield, .. } => {
- let (idx, sub_idx) = match ptrfield {
- ThinPointer(field) => (field, None),
- FatPointer(field) => (field, Some(abi::FAT_PTR_ADDR))
- };
- if is_null(const_struct_field(ccx, val, idx, sub_idx)) {
- /* subtraction as uint is ok because nndiscr is either 0 or 1 */
- (1 - nndiscr) as Disr
- } else {
- nndiscr
- }
+ RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
+ ccx.sess().bug("const discrim access of non c-like enum")
}
}
}
_discr: Disr, ix: uint) -> ValueRef {
match *r {
CEnum(..) => ccx.sess().bug("element access in C-like enum const"),
- Univariant(..) => const_struct_field(ccx, val, ix, None),
- General(..) => const_struct_field(ccx, val, ix + 1, None),
+ Univariant(..) => const_struct_field(ccx, val, ix),
+ General(..) => const_struct_field(ccx, val, ix + 1),
RawNullablePointer { .. } => {
assert_eq!(ix, 0);
val
- }
- StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix, None)
+ },
+ StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix)
}
}
/// Extract field of struct-like const, skipping our alignment padding.
-fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: uint, sub_idx: Option<uint>)
- -> ValueRef {
+fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: uint) -> ValueRef {
// Get the ix-th non-undef element of the struct.
let mut real_ix = 0; // actual position in the struct
let mut ix = ix; // logical index relative to real_ix
let mut field;
loop {
loop {
- field = match sub_idx {
- Some(si) => const_get_elt(ccx, val, &[real_ix, si as u32]),
- None => const_get_elt(ccx, val, &[real_ix])
- };
+ field = const_get_elt(ccx, val, &[real_ix]);
if !is_undef(field) {
break;
}
let unboxed_closure = &(*unboxed_closures)[closure_id];
match unboxed_closure.kind {
ty::FnUnboxedClosureKind => {
- ty::mk_imm_rptr(ccx.tcx(), ty::ReStatic, fn_ty)
+ ty::mk_imm_rptr(ccx.tcx(), ccx.tcx().mk_region(ty::ReStatic), fn_ty)
}
ty::FnMutUnboxedClosureKind => {
- ty::mk_mut_rptr(ccx.tcx(), ty::ReStatic, fn_ty)
+ ty::mk_mut_rptr(ccx.tcx(), ccx.tcx().mk_region(ty::ReStatic), fn_ty)
}
ty::FnOnceUnboxedClosureKind => fn_ty
}
ty::ty_closure(ref f) => {
(f.sig.0.inputs.clone(), f.sig.0.output, f.abi, Some(Type::i8p(ccx)))
}
- ty::ty_unboxed_closure(closure_did, _, ref substs) => {
+ ty::ty_unboxed_closure(closure_did, _, substs) => {
let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let unboxed_closure = &(*unboxed_closures)[closure_did];
let function_type = unboxed_closure.closure_type.clone();
assert_eq!(did.krate, ast::LOCAL_CRATE);
// Since we're in trans we don't care for any region parameters
- let ref substs = subst::Substs::erased(substs.types.clone());
+ let substs = subst::Substs::erased(substs.types.clone());
- let (val, _) = monomorphize::monomorphic_fn(ccx, did, substs, None);
+ let (val, _) = monomorphize::monomorphic_fn(ccx, did, &substs, None);
val
} else if did.krate == ast::LOCAL_CRATE {
}
})
}
- ty::ty_unboxed_closure(def_id, _, ref substs) => {
+ ty::ty_unboxed_closure(def_id, _, substs) => {
let repr = adt::represent_type(cx.ccx(), t);
let upvars = ty::unboxed_closure_upvars(cx.tcx(), def_id, substs);
for (i, upvar) in upvars.iter().enumerate() {
cx = f(cx, llfld_a, *arg);
}
}
- ty::ty_enum(tid, ref substs) => {
+ ty::ty_enum(tid, substs) => {
let fcx = cx.fcx;
let ccx = fcx.ccx;
llfn: ValueRef,
llargs: &[ValueRef],
fn_ty: Ty<'tcx>,
- call_info: Option<NodeInfo>,
- // FIXME(15064) is_lang_item is a horrible hack, please remove it
- // at the soonest opportunity.
- is_lang_item: bool)
+ call_info: Option<NodeInfo>)
-> (ValueRef, Block<'blk, 'tcx>) {
let _icx = push_ctxt("invoke_");
if bcx.unreachable.get() {
return (C_null(Type::i8(bcx.ccx())), bcx);
}
- // FIXME(15064) Lang item methods may (in the reflect case) not have proper
- // types, so doing an attribute lookup will fail.
- let attributes = if is_lang_item {
- llvm::AttrBuilder::new()
- } else {
- get_fn_llvm_attributes(bcx.ccx(), fn_ty)
- };
+ let attributes = get_fn_llvm_attributes(bcx.ccx(), fn_ty);
match bcx.opt_node_id {
None => {
fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) {
let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
+ let print_info = ccx.sess().print_enum_sizes();
+
let levels = ccx.tcx().node_lint_levels.borrow();
let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCES);
- let lvlsrc = match levels.get(&(id, lint_id)) {
- None | Some(&(lint::Allow, _)) => return,
- Some(&lvlsrc) => lvlsrc,
- };
+ let lvlsrc = levels.get(&(id, lint_id));
+ let is_allow = lvlsrc.map_or(true, |&(lvl, _)| lvl == lint::Allow);
- let avar = adt::represent_type(ccx, ty::node_id_to_type(ccx.tcx(), id));
+ if is_allow && !print_info {
+ // we're not interested in anything here
+ return
+ }
+
+ let ty = ty::node_id_to_type(ccx.tcx(), id);
+ let avar = adt::represent_type(ccx, ty);
match *avar {
adt::General(_, ref variants, _) => {
for var in variants.iter() {
}
);
+ if print_info {
+ let llty = type_of::sizing_type_of(ccx, ty);
+
+ let sess = &ccx.tcx().sess;
+ sess.span_note(sp, &*format!("total size: {} bytes", llsize_of_real(ccx, llty)));
+ match *avar {
+ adt::General(..) => {
+ for (i, var) in enum_def.variants.iter().enumerate() {
+ ccx.tcx().sess.span_note(var.span,
+ &*format!("variant data: {} bytes", sizes[i]));
+ }
+ }
+ _ => {}
+ }
+ }
+
// we only warn if the largest variant is at least thrice as large as
// the second-largest.
- if largest > slargest * 3 && slargest > 0 {
+ if !is_allow && largest > slargest * 3 && slargest > 0 {
// Use lint::raw_emit_lint rather than sess.add_lint because the lint-printing
// pass for the latter already ran.
lint::raw_emit_lint(&ccx.tcx().sess, lint::builtin::VARIANT_SIZE_DIFFERENCES,
- lvlsrc, Some(sp),
+ *lvlsrc.unwrap(), Some(sp),
format!("enum variant is more than three times larger \
({} bytes) than the next largest (ignoring padding)",
largest)[]);
ast::ItemMod(ref m) => {
trans_mod(&ccx.rotate(), m);
}
- ast::ItemEnum(ref enum_definition, _) => {
- enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
+ ast::ItemEnum(ref enum_definition, ref gens) => {
+ if gens.ty_params.is_empty() {
+ // sizes only make sense for non-generic types
+
+ enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
+ }
}
ast::ItemConst(_, ref expr) => {
// Recurse on the expression to catch items in blocks
let (fn_sig, abi, has_env) = match fn_ty.sty {
ty::ty_closure(ref f) => (f.sig.clone(), f.abi, true),
ty::ty_bare_fn(_, ref f) => (f.sig.clone(), f.abi, false),
- ty::ty_unboxed_closure(closure_did, _, ref substs) => {
+ ty::ty_unboxed_closure(closure_did, _, substs) => {
let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let ref function_type = (*unboxed_closures)[closure_did]
.closure_type;
attrs.arg(idx, llvm::ReadOnlyAttribute);
}
- if let ReLateBound(_, BrAnon(_)) = b {
+ if let ReLateBound(_, BrAnon(_)) = *b {
attrs.arg(idx, llvm::NoCaptureAttribute);
}
}
// When a reference in an argument has no named lifetime, it's impossible for that
// reference to escape this function (returned or stored beyond the call by a closure).
- ty::ty_rptr(ReLateBound(_, BrAnon(_)), mt) => {
+ ty::ty_rptr(&ReLateBound(_, BrAnon(_)), mt) => {
let llsz = llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
attrs.arg(idx, llvm::NoCaptureAttribute)
.arg(idx, llvm::DereferenceableAttribute(llsz));
debug!("Store {} -> {}",
self.ccx.tn().val_to_string(val),
self.ccx.tn().val_to_string(ptr));
- assert!(self.llbuilder.is_not_null());
+ assert!(!self.llbuilder.is_null());
self.count_insn("store");
unsafe {
llvm::LLVMBuildStore(self.llbuilder, val, ptr);
debug!("Store {} -> {}",
self.ccx.tn().val_to_string(val),
self.ccx.tn().val_to_string(ptr));
- assert!(self.llbuilder.is_not_null());
+ assert!(!self.llbuilder.is_null());
self.count_insn("store.volatile");
unsafe {
let insn = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
bare_fn_ty.repr(tcx));
// This is an impl of `Fn` trait, so receiver is `&self`.
- let bare_fn_ty_ref = ty::mk_imm_rptr(tcx, ty::ReStatic, bare_fn_ty);
+ let bare_fn_ty_ref = ty::mk_imm_rptr(tcx, tcx.mk_region(ty::ReStatic), bare_fn_ty);
// Construct the "tuply" version of `bare_fn_ty`. It takes two arguments: `self`,
// which is the fn pointer, and `args`, which is the arguments tuple.
let (opt_def_id, input_tys, output_ty) =
match bare_fn_ty.sty {
ty::ty_bare_fn(opt_def_id,
- ty::BareFnTy { unsafety: ast::Unsafety::Normal,
+ &ty::BareFnTy { unsafety: ast::Unsafety::Normal,
abi: synabi::Rust,
sig: ty::Binder(ty::FnSig { inputs: ref input_tys,
output: output_ty,
let tuple_input_ty = ty::mk_tup(tcx, input_tys.to_vec());
let tuple_fn_ty = ty::mk_bare_fn(tcx,
opt_def_id,
- ty::BareFnTy { unsafety: ast::Unsafety::Normal,
- abi: synabi::RustCall,
- sig: ty::Binder(ty::FnSig {
- inputs: vec![bare_fn_ty_ref,
- tuple_input_ty],
- output: output_ty,
- variadic: false
- })});
+ tcx.mk_bare_fn(ty::BareFnTy {
+ unsafety: ast::Unsafety::Normal,
+ abi: synabi::RustCall,
+ sig: ty::Binder(ty::FnSig {
+ inputs: vec![bare_fn_ty_ref,
+ tuple_input_ty],
+ output: output_ty,
+ variadic: false
+ })}));
debug!("tuple_fn_ty: {}", tuple_fn_ty.repr(tcx));
//
llfn,
llargs[],
callee_ty,
- call_info,
- dest.is_none());
+ call_info);
bcx = b;
llresult = llret;
use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
use trans::debuginfo;
use trans::glue;
-use middle::region;
+// Temporary due to slicing syntax hacks (KILLME)
+//use middle::region;
use trans::type_::Type;
use middle::ty::{mod, Ty};
use std::fmt;
// excluding id's that correspond to closure bodies only). For
// now we just say that if there is already an AST scope on the stack,
// this new AST scope had better be its immediate child.
- let top_scope = self.top_ast_scope();
+ // Temporarily removed due to slicing syntax hacks (KILLME).
+ /*let top_scope = self.top_ast_scope();
if top_scope.is_some() {
assert_eq!(self.ccx
.tcx()
.opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
.map(|s|s.node_id()),
top_scope);
- }
+ }*/
self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
Some(debug_loc)));
fn type_is_newtype_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ty: Ty<'tcx>) -> bool {
match ty.sty {
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) => {
let fields = ty::struct_fields(ccx.tcx(), def_id, substs);
fields.len() == 1 &&
fields[0].name ==
self.tcx()
}
- fn node_ty(&self, id: ast::NodeId) -> mc::McResult<Ty<'tcx>> {
- Ok(node_id_type(self, id))
+ fn node_ty(&self, id: ast::NodeId) -> Ty<'tcx> {
+ node_id_type(self, id)
+ }
+
+ fn expr_ty_adjusted(&self, expr: &ast::Expr) -> Ty<'tcx> {
+ expr_ty_adjusted(self, expr)
}
fn node_method_ty(&self, method_call: ty::MethodCall) -> Option<Ty<'tcx>> {
.map(|method| monomorphize_type(self, method.ty))
}
+ fn node_method_origin(&self, method_call: ty::MethodCall)
+ -> Option<ty::MethodOrigin<'tcx>>
+ {
+ self.tcx()
+ .method_map
+ .borrow()
+ .get(&method_call)
+ .map(|method| method.origin.clone())
+ }
+
fn adjustments<'a>(&'a self) -> &'a RefCell<NodeMap<ty::AutoAdjustment<'tcx>>> {
&self.tcx().adjustments
}
}
}
+#[allow(dead_code)] // potentially useful
pub fn is_null(val: ValueRef) -> bool {
unsafe {
llvm::LLVMIsNull(val) != False
monomorphize_type(bcx, t)
}
-pub fn expr_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ex: &ast::Expr) -> Ty<'tcx> {
+pub fn expr_ty<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &ast::Expr) -> Ty<'tcx> {
node_id_type(bcx, ex.id)
}
-pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ex: &ast::Expr) -> Ty<'tcx> {
+pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &ast::Expr) -> Ty<'tcx> {
monomorphize_type(bcx, ty::expr_ty_adjusted(bcx.tcx(), ex))
}
debug!("iterator type is {}, datum type is {}",
ppaux::ty_to_string(bcx.tcx(), iterator_type),
ppaux::ty_to_string(bcx.tcx(), iterator_datum.ty));
+
let lliterator = load_ty(bcx, iterator_datum.val, iterator_datum.ty);
// Create our basic blocks and set up our loop cleanups.
llpayload,
binding_cleanup_scope_id);
+ debuginfo::create_for_loop_var_metadata(body_bcx_in, pat);
+
// Codegen the body.
body_bcx_out = trans_block(body_bcx_out, body, expr::Ignore);
body_bcx_out =
//! comparatively expensive to construct, though, `ty::type_id()` is still used
//! additionally as an optimization for cases where the exact same type has been
//! seen before (which is most of the time).
-use self::FunctionDebugContextRepr::*;
use self::VariableAccess::*;
use self::VariableKind::*;
use self::MemberOffset::*;
use syntax::ast_util::PostExpansionMethod;
use syntax::parse::token::{mod, special_idents};
-static DW_LANG_RUST: c_uint = 0x9000;
+const DW_LANG_RUST: c_uint = 0x9000;
#[allow(non_upper_case_globals)]
-static DW_TAG_auto_variable: c_uint = 0x100;
+const DW_TAG_auto_variable: c_uint = 0x100;
#[allow(non_upper_case_globals)]
-static DW_TAG_arg_variable: c_uint = 0x101;
+const DW_TAG_arg_variable: c_uint = 0x101;
#[allow(non_upper_case_globals)]
-static DW_ATE_boolean: c_uint = 0x02;
+const DW_ATE_boolean: c_uint = 0x02;
#[allow(non_upper_case_globals)]
-static DW_ATE_float: c_uint = 0x04;
+const DW_ATE_float: c_uint = 0x04;
#[allow(non_upper_case_globals)]
-static DW_ATE_signed: c_uint = 0x05;
+const DW_ATE_signed: c_uint = 0x05;
#[allow(non_upper_case_globals)]
-static DW_ATE_unsigned: c_uint = 0x07;
+const DW_ATE_unsigned: c_uint = 0x07;
#[allow(non_upper_case_globals)]
-static DW_ATE_unsigned_char: c_uint = 0x08;
+const DW_ATE_unsigned_char: c_uint = 0x08;
-static UNKNOWN_LINE_NUMBER: c_uint = 0;
-static UNKNOWN_COLUMN_NUMBER: c_uint = 0;
+const UNKNOWN_LINE_NUMBER: c_uint = 0;
+const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
// ptr::null() doesn't work :(
-static UNKNOWN_FILE_METADATA: DIFile = (0 as DIFile);
-static UNKNOWN_SCOPE_METADATA: DIScope = (0 as DIScope);
+const UNKNOWN_FILE_METADATA: DIFile = (0 as DIFile);
+const UNKNOWN_SCOPE_METADATA: DIScope = (0 as DIScope);
-static FLAGS_NONE: c_uint = 0;
+const FLAGS_NONE: c_uint = 0;
//=-----------------------------------------------------------------------------
// Public Interface of debuginfo module
ty::ty_float(_) => {
push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
},
- ty::ty_enum(def_id, ref substs) => {
+ ty::ty_enum(def_id, substs) => {
unique_type_id.push_str("enum ");
from_def_id_and_substs(self, cx, def_id, substs, &mut unique_type_id);
},
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) => {
unique_type_id.push_str("struct ");
from_def_id_and_substs(self, cx, def_id, substs, &mut unique_type_id);
},
trait_data.principal.substs(),
&mut unique_type_id);
},
- ty::ty_bare_fn(_, ty::BareFnTy{ unsafety, abi, ref sig } ) => {
+ ty::ty_bare_fn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
if unsafety == ast::Unsafety::Unsafe {
unique_type_id.push_str("unsafe ");
}
closure_ty.clone(),
&mut unique_type_id);
},
- ty::ty_unboxed_closure(ref def_id, _, ref substs) => {
+ ty::ty_unboxed_closure(ref def_id, _, substs) => {
let closure_ty = cx.tcx().unboxed_closures.borrow()
.get(def_id).unwrap().closure_type.subst(cx.tcx(), substs);
self.get_unique_type_id_of_closure_type(cx,
}
}
-pub struct FunctionDebugContext {
- repr: FunctionDebugContextRepr,
-}
-
-enum FunctionDebugContextRepr {
- DebugInfo(Box<FunctionDebugContextData>),
+pub enum FunctionDebugContext {
+ RegularContext(Box<FunctionDebugContextData>),
DebugInfoDisabled,
FunctionWithoutDebugInfo,
}
cx: &CrateContext,
span: Span)
-> &'a FunctionDebugContextData {
- match self.repr {
- DebugInfo(box ref data) => data,
- DebugInfoDisabled => {
+ match *self {
+ FunctionDebugContext::RegularContext(box ref data) => data,
+ FunctionDebugContext::DebugInfoDisabled => {
cx.sess().span_bug(span,
FunctionDebugContext::debuginfo_disabled_message());
}
- FunctionWithoutDebugInfo => {
+ FunctionDebugContext::FunctionWithoutDebugInfo => {
cx.sess().span_bug(span,
FunctionDebugContext::should_be_ignored_message());
}
/// Creates debug information for the given local variable.
///
+/// This function assumes that there's a datum for each pattern component of the
+/// local in `bcx.fcx.lllocals`.
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_local_var_metadata(bcx: Block, local: &ast::Local) {
if fn_should_be_ignored(bcx.fcx) {
let cx = bcx.ccx();
let def_map = &cx.tcx().def_map;
+ let locals = bcx.fcx.lllocals.borrow();
- pat_util::pat_bindings(def_map, &*local.pat, |_, node_id, span, path1| {
- let var_ident = path1.node;
-
- let datum = match bcx.fcx.lllocals.borrow().get(&node_id).cloned() {
+ pat_util::pat_bindings(def_map, &*local.pat, |_, node_id, span, var_ident| {
+ let datum = match locals.get(&node_id) {
Some(datum) => datum,
None => {
bcx.sess().span_bug(span,
}
};
+ if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
+ cx.sess().span_bug(span, "debuginfo::create_local_var_metadata() - \
+ Referenced variable location is not an alloca!");
+ }
+
let scope_metadata = scope_metadata(bcx.fcx, node_id, span);
declare_local(bcx,
- var_ident,
+ var_ident.node,
datum.ty,
scope_metadata,
DirectVariable { alloca: datum.val },
// for the binding. For ByRef bindings that's a `T*` but for ByMove bindings we
// actually have `T**`. So to get the actual variable we need to dereference once
// more. For ByCopy we just use the stack slot we created for the binding.
- let var_type = match binding.trmode {
+ let var_access = match binding.trmode {
TrByCopy(llbinding) => DirectVariable {
alloca: llbinding
},
variable_ident,
binding.ty,
scope_metadata,
- var_type,
+ var_access,
LocalVariable,
binding.span);
}
/// Creates debug information for the given function argument.
///
+/// This function assumes that there's a datum for each pattern component of the
+/// argument in `bcx.fcx.lllocals`.
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_argument_metadata(bcx: Block, arg: &ast::Arg) {
if fn_should_be_ignored(bcx.fcx) {
return;
}
- let fcx = bcx.fcx;
- let cx = fcx.ccx;
+ let def_map = &bcx.tcx().def_map;
+ let scope_metadata = bcx
+ .fcx
+ .debug_context
+ .get_ref(bcx.ccx(), arg.pat.span)
+ .fn_metadata;
+ let locals = bcx.fcx.lllocals.borrow();
- let def_map = &cx.tcx().def_map;
- let scope_metadata = bcx.fcx.debug_context.get_ref(cx, arg.pat.span).fn_metadata;
-
- pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, span, path1| {
- let llarg = match bcx.fcx.lllocals.borrow().get(&node_id).cloned() {
+ pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, span, var_ident| {
+ let datum = match locals.get(&node_id) {
Some(v) => v,
None => {
bcx.sess().span_bug(span,
}
};
- if unsafe { llvm::LLVMIsAAllocaInst(llarg.val) } == ptr::null_mut() {
- cx.sess().span_bug(span, "debuginfo::create_argument_metadata() - \
- Referenced variable location is not an alloca!");
+ if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
+ bcx.sess().span_bug(span, "debuginfo::create_argument_metadata() - \
+ Referenced variable location is not an alloca!");
}
let argument_index = {
- let counter = &fcx.debug_context.get_ref(cx, span).argument_counter;
+ let counter = &bcx
+ .fcx
+ .debug_context
+ .get_ref(bcx.ccx(), span)
+ .argument_counter;
let argument_index = counter.get();
counter.set(argument_index + 1);
argument_index
};
declare_local(bcx,
- path1.node,
- llarg.ty,
+ var_ident.node,
+ datum.ty,
scope_metadata,
- DirectVariable { alloca: llarg.val },
+ DirectVariable { alloca: datum.val },
ArgumentVariable(argument_index),
span);
})
}
+/// Creates debug information for the given for-loop variable.
+///
+/// This function assumes that there's a datum for each pattern component of the
+/// loop variable in `bcx.fcx.lllocals`.
+/// Adds the created metadata nodes directly to the crate's IR.
+pub fn create_for_loop_var_metadata(bcx: Block, pat: &ast::Pat) {
+ if fn_should_be_ignored(bcx.fcx) {
+ return;
+ }
+
+ let def_map = &bcx.tcx().def_map;
+ let locals = bcx.fcx.lllocals.borrow();
+
+ pat_util::pat_bindings(def_map, pat, |_, node_id, span, var_ident| {
+ let datum = match locals.get(&node_id) {
+ Some(datum) => datum,
+ None => {
+ bcx.sess().span_bug(span,
+ format!("no entry in lllocals table for {}",
+ node_id).as_slice());
+ }
+ };
+
+ if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
+ bcx.sess().span_bug(span, "debuginfo::create_for_loop_var_metadata() - \
+ Referenced variable location is not an alloca!");
+ }
+
+ let scope_metadata = scope_metadata(bcx.fcx, node_id, span);
+
+ declare_local(bcx,
+ var_ident.node,
+ datum.ty,
+ scope_metadata,
+ DirectVariable { alloca: datum.val },
+ LocalVariable,
+ span);
+ })
+}
+
pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
node_id: ast::NodeId,
node_span: Span,
pub fn set_source_location(fcx: &FunctionContext,
node_id: ast::NodeId,
span: Span) {
- match fcx.debug_context.repr {
- DebugInfoDisabled => return,
- FunctionWithoutDebugInfo => {
+ match fcx.debug_context {
+ FunctionDebugContext::DebugInfoDisabled => return,
+ FunctionDebugContext::FunctionWithoutDebugInfo => {
set_debug_location(fcx.ccx, UnknownLocation);
return;
}
- DebugInfo(box ref function_debug_context) => {
+ FunctionDebugContext::RegularContext(box ref function_debug_context) => {
let cx = fcx.ccx;
debug!("set_source_location: {}", cx.sess().codemap().span_to_string(span));
/// switches source location emitting on and must therefore be called before the
/// first real statement/expression of the function is translated.
pub fn start_emitting_source_locations(fcx: &FunctionContext) {
- match fcx.debug_context.repr {
- DebugInfo(box ref data) => {
+ match fcx.debug_context {
+ FunctionDebugContext::RegularContext(box ref data) => {
data.source_locations_enabled.set(true)
},
_ => { /* safe to ignore */ }
param_substs: &Substs<'tcx>,
llfn: ValueRef) -> FunctionDebugContext {
if cx.sess().opts.debuginfo == NoDebugInfo {
- return FunctionDebugContext { repr: DebugInfoDisabled };
+ return FunctionDebugContext::DebugInfoDisabled;
}
// Clear the debug location so we don't assign them in the function prelude.
if fn_ast_id == ast::DUMMY_NODE_ID {
// This is a function not linked to any source location, so don't
// generate debuginfo for it.
- return FunctionDebugContext { repr: FunctionWithoutDebugInfo };
+ return FunctionDebugContext::FunctionWithoutDebugInfo;
}
let empty_generics = ast_util::empty_generics();
let (ident, fn_decl, generics, top_level_block, span, has_path) = match fnitem {
ast_map::NodeItem(ref item) => {
if contains_nodebug_attribute(item.attrs.as_slice()) {
- return FunctionDebugContext { repr: FunctionWithoutDebugInfo };
+ return FunctionDebugContext::FunctionWithoutDebugInfo;
}
match item.node {
match **item {
ast::MethodImplItem(ref method) => {
if contains_nodebug_attribute(method.attrs.as_slice()) {
- return FunctionDebugContext {
- repr: FunctionWithoutDebugInfo
- };
+ return FunctionDebugContext::FunctionWithoutDebugInfo;
}
(method.pe_ident(),
match **trait_method {
ast::ProvidedMethod(ref method) => {
if contains_nodebug_attribute(method.attrs.as_slice()) {
- return FunctionDebugContext {
- repr: FunctionWithoutDebugInfo
- };
+ return FunctionDebugContext::FunctionWithoutDebugInfo;
}
(method.pe_ident(),
ast_map::NodeForeignItem(..) |
ast_map::NodeVariant(..) |
ast_map::NodeStructCtor(..) => {
- return FunctionDebugContext { repr: FunctionWithoutDebugInfo };
+ return FunctionDebugContext::FunctionWithoutDebugInfo;
}
_ => cx.sess().bug(format!("create_function_debug_context: \
unexpected sort of node: {}",
// This can be the case for functions inlined from another crate
if span == codemap::DUMMY_SP {
- return FunctionDebugContext { repr: FunctionWithoutDebugInfo };
+ return FunctionDebugContext::FunctionWithoutDebugInfo;
}
let loc = span_start(cx, span);
})
});
+ let scope_map = create_scope_map(cx,
+ fn_decl.inputs.as_slice(),
+ &*top_level_block,
+ fn_metadata,
+ fn_ast_id);
+
// Initialize fn debug context (including scope map and namespace map)
let fn_debug_context = box FunctionDebugContextData {
- scope_map: RefCell::new(NodeMap::new()),
+ scope_map: RefCell::new(scope_map),
fn_metadata: fn_metadata,
argument_counter: Cell::new(1),
source_locations_enabled: Cell::new(false),
};
- populate_scope_map(cx,
- fn_decl.inputs.as_slice(),
- &*top_level_block,
- fn_metadata,
- fn_ast_id,
- &mut *fn_debug_context.scope_map.borrow_mut());
- return FunctionDebugContext { repr: DebugInfo(fn_debug_context) };
+
+ return FunctionDebugContext::RegularContext(fn_debug_context);
fn get_function_signature<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
fn_ast_id: ast::NodeId,
},
adt::StructWrappedNullablePointer { nonnull: ref struct_def,
nndiscr,
- ptrfield, ..} => {
+ ref discrfield, ..} => {
// Create a description of the non-null variant
let (variant_type_metadata, variant_llvm_type, member_description_factory) =
describe_enum_variant(cx,
self.enum_type,
struct_def,
&*(*self.variants)[nndiscr as uint],
- OptimizedDiscriminant(ptrfield),
+ OptimizedDiscriminant,
self.containing_scope,
self.span);
// member's name.
let null_variant_index = (1 - nndiscr) as uint;
let null_variant_name = token::get_name((*self.variants)[null_variant_index].name);
- let discrfield = match ptrfield {
- adt::ThinPointer(field) => format!("{}", field),
- adt::FatPointer(field) => format!("{}", field)
- };
+ let discrfield = discrfield.iter()
+ .skip(1)
+ .map(|x| x.to_string())
+ .collect::<Vec<_>>().connect("$");
let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
discrfield,
null_variant_name);
#[deriving(Copy)]
enum EnumDiscriminantInfo {
RegularDiscriminant(DIType),
- OptimizedDiscriminant(adt::PointerField),
+ OptimizedDiscriminant,
NoDiscriminant
}
ty::ty_closure(ref closurety) => {
subroutine_type_metadata(cx, unique_type_id, &closurety.sig, usage_site_span)
}
- ty::ty_unboxed_closure(ref def_id, _, ref substs) => {
+ ty::ty_unboxed_closure(ref def_id, _, substs) => {
let sig = cx.tcx().unboxed_closures.borrow()
.get(def_id).unwrap().closure_type.sig.subst(cx.tcx(), substs);
subroutine_type_metadata(cx, unique_type_id, &sig, usage_site_span)
}
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) => {
prepare_struct_metadata(cx,
t,
def_id,
}
fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
- match fcx.debug_context.repr {
- DebugInfo(_) => false,
+ match fcx.debug_context {
+ FunctionDebugContext::RegularContext(_) => false,
_ => true
}
}
// what belongs to which scope, creating DIScope DIEs along the way, and
// introducing *artificial* lexical scope descriptors where necessary. These
// artificial scopes allow GDB to correctly handle name shadowing.
-fn populate_scope_map(cx: &CrateContext,
- args: &[ast::Arg],
- fn_entry_block: &ast::Block,
- fn_metadata: DISubprogram,
- fn_ast_id: ast::NodeId,
- scope_map: &mut NodeMap<DIScope>) {
+fn create_scope_map(cx: &CrateContext,
+ args: &[ast::Arg],
+ fn_entry_block: &ast::Block,
+ fn_metadata: DISubprogram,
+ fn_ast_id: ast::NodeId)
+ -> NodeMap<DIScope> {
+ let mut scope_map = NodeMap::new();
+
let def_map = &cx.tcx().def_map;
struct ScopeStackEntry {
with_new_scope(cx,
fn_entry_block.span,
&mut scope_stack,
- scope_map,
+ &mut scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, fn_entry_block, scope_stack, scope_map);
});
+ return scope_map;
+
+
// local helper functions for walking the AST.
fn with_new_scope<F>(cx: &CrateContext,
scope_span: Span,
}
ast::PatMac(_) => {
- cx.sess().span_bug(pat.span, "debuginfo::populate_scope_map() - \
+ cx.sess().span_bug(pat.span, "debuginfo::create_scope_map() - \
Found unexpanded macro.");
}
}
}
ast::ExprAssignOp(_, ref lhs, ref rhs) |
- ast::ExprIndex(ref lhs, ref rhs) |
+ ast::ExprIndex(ref lhs, ref rhs) |
ast::ExprBinary(_, ref lhs, ref rhs) => {
walk_expr(cx, &**lhs, scope_stack, scope_map);
walk_expr(cx, &**rhs, scope_stack, scope_map);
}
- ast::ExprSlice(ref base, ref start, ref end, _) => {
- walk_expr(cx, &**base, scope_stack, scope_map);
- start.as_ref().map(|x| walk_expr(cx, &**x, scope_stack, scope_map));
- end.as_ref().map(|x| walk_expr(cx, &**x, scope_stack, scope_map));
- }
-
ast::ExprRange(ref start, ref end) => {
- walk_expr(cx, &**start, scope_stack, scope_map);
+ start.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map));
end.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map));
}
}
ast::ExprIfLet(..) => {
- cx.sess().span_bug(exp.span, "debuginfo::populate_scope_map() - \
+ cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded if-let.");
}
}
ast::ExprWhileLet(..) => {
- cx.sess().span_bug(exp.span, "debuginfo::populate_scope_map() - \
+ cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded while-let.");
}
}
ast::ExprMac(_) => {
- cx.sess().span_bug(exp.span, "debuginfo::populate_scope_map() - \
+ cx.sess().span_bug(exp.span, "debuginfo::create_scope_map() - \
Found unexpanded macro.");
}
ty::ty_uint(ast::TyU64) => output.push_str("u64"),
ty::ty_float(ast::TyF32) => output.push_str("f32"),
ty::ty_float(ast::TyF64) => output.push_str("f64"),
- ty::ty_struct(def_id, ref substs) |
- ty::ty_enum(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) |
+ ty::ty_enum(def_id, substs) => {
push_item_name(cx, def_id, qualified, output);
push_type_params(cx, substs, output);
},
push_item_name(cx, trait_data.principal.def_id(), false, output);
push_type_params(cx, trait_data.principal.substs(), output);
},
- ty::ty_bare_fn(_, ty::BareFnTy{ unsafety, abi, ref sig } ) => {
+ ty::ty_bare_fn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
if unsafety == ast::Unsafety::Unsafe {
output.push_str("unsafe ");
}
let substs = principal.substs().with_self_ty(unadjusted_ty).erase_regions();
let trait_ref =
Rc::new(ty::Binder(ty::TraitRef { def_id: principal.def_id(),
- substs: substs }));
+ substs: bcx.tcx().mk_substs(substs) }));
let trait_ref = trait_ref.subst(bcx.tcx(), bcx.fcx.param_substs);
let box_ty = mk_ty(unadjusted_ty);
PointerCast(bcx,
expr.id,
datum_ty,
|t| ty::mk_rptr(tcx,
- ty::ReStatic,
+ tcx.mk_region(ty::ReStatic),
ty::mt{
ty: t,
mutbl: ast::MutImmutable
trans_rec_tup_field(bcx, &**base, idx.node)
}
ast::ExprIndex(ref base, ref idx) => {
- trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
- }
- ast::ExprSlice(ref base, ref start, ref end, _) => {
- let _icx = push_ctxt("trans_slice");
- let ccx = bcx.ccx();
-
- let method_call = MethodCall::expr(expr.id);
- let method_ty = ccx.tcx()
- .method_map
- .borrow()
- .get(&method_call)
- .map(|method| method.ty);
- let base_datum = unpack_datum!(bcx, trans(bcx, &**base));
-
- let mut args = vec![];
- start.as_ref().map(|e| args.push((unpack_datum!(bcx, trans(bcx, &**e)), e.id)));
- end.as_ref().map(|e| args.push((unpack_datum!(bcx, trans(bcx, &**e)), e.id)));
-
- let result_ty = ty::ty_fn_ret(monomorphize_type(bcx, method_ty.unwrap())).unwrap();
- let scratch = rvalue_scratch_datum(bcx, result_ty, "trans_slice");
-
- unpack_result!(bcx,
- trans_overloaded_op(bcx,
- expr,
- method_call,
- base_datum,
- args,
- Some(SaveIn(scratch.val)),
- true));
- DatumBlock::new(bcx, scratch.to_expr_datum())
+ match idx.node {
+ ast::ExprRange(ref start, ref end) => {
+ // Special case for slicing syntax (KILLME).
+ let _icx = push_ctxt("trans_slice");
+ let ccx = bcx.ccx();
+
+ let method_call = MethodCall::expr(expr.id);
+ let method_ty = ccx.tcx()
+ .method_map
+ .borrow()
+ .get(&method_call)
+ .map(|method| method.ty);
+ let base_datum = unpack_datum!(bcx, trans(bcx, &**base));
+
+ let mut args = vec![];
+ start.as_ref().map(|e| args.push((unpack_datum!(bcx, trans(bcx, &**e)), e.id)));
+ end.as_ref().map(|e| args.push((unpack_datum!(bcx, trans(bcx, &**e)), e.id)));
+
+ let result_ty = ty::ty_fn_ret(monomorphize_type(bcx,
+ method_ty.unwrap())).unwrap();
+ let scratch = rvalue_scratch_datum(bcx, result_ty, "trans_slice");
+
+ unpack_result!(bcx,
+ trans_overloaded_op(bcx,
+ expr,
+ method_call,
+ base_datum,
+ args,
+ Some(SaveIn(scratch.val)),
+ true));
+ DatumBlock::new(bcx, scratch.to_expr_datum())
+ }
+ _ => trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
+ }
}
ast::ExprBox(_, ref contents) => {
// Special case for `Box<T>`
}
// A range just desugars into a struct.
- let (did, fields) = match end {
- &Some(ref end) => {
+ // Note that the type of the start and end may not be the same, but
+ // they should only differ in their lifetime, which should not matter
+ // in trans.
+ let (did, fields, ty_params) = match (start, end) {
+ (&Some(ref start), &Some(ref end)) => {
// Desugar to Range
- let fields = vec!(make_field("start", start.clone()),
- make_field("end", end.clone()));
- (tcx.lang_items.range_struct(), fields)
+ let fields = vec![make_field("start", start.clone()),
+ make_field("end", end.clone())];
+ (tcx.lang_items.range_struct(), fields, vec![node_id_type(bcx, start.id)])
}
- &None => {
+ (&Some(ref start), &None) => {
// Desugar to RangeFrom
- let fields = vec!(make_field("start", start.clone()));
- (tcx.lang_items.range_from_struct(), fields)
+ let fields = vec![make_field("start", start.clone())];
+ (tcx.lang_items.range_from_struct(), fields, vec![node_id_type(bcx, start.id)])
+ }
+ (&None, &Some(ref end)) => {
+ // Desugar to RangeTo
+ let fields = vec![make_field("end", end.clone())];
+ (tcx.lang_items.range_to_struct(), fields, vec![node_id_type(bcx, end.id)])
+ }
+ _ => {
+ // Desugar to FullRange
+ (tcx.lang_items.full_range_struct(), vec![], vec![])
}
};
if let Some(did) = did {
- let substs = Substs::new_type(vec![node_id_type(bcx, start.id)], vec![]);
+ let substs = Substs::new_type(ty_params, vec![]);
trans_struct(bcx,
fields.as_slice(),
None,
expr.span,
expr.id,
- ty::mk_struct(tcx, did, substs),
+ ty::mk_struct(tcx, did, tcx.mk_substs(substs)),
dest)
} else {
tcx.sess.span_bug(expr.span,
F: FnOnce(ty::Disr, &[ty::field<'tcx>]) -> R,
{
match ty.sty {
- ty::ty_struct(did, ref substs) => {
+ ty::ty_struct(did, substs) => {
op(0, struct_fields(tcx, did, substs)[])
}
op(0, tup_fields(v[])[])
}
- ty::ty_enum(_, ref substs) => {
+ ty::ty_enum(_, substs) => {
// We want the *variant* ID here, not the enum ID.
match node_id_opt {
None => {
// Compute final type. Note that we are loose with the region and
// mutability, since those things don't matter in trans.
let referent_ty = lv_datum.ty;
- let ptr_ty = ty::mk_imm_rptr(bcx.tcx(), ty::ReStatic, referent_ty);
+ let ptr_ty = ty::mk_imm_rptr(bcx.tcx(), bcx.tcx().mk_region(ty::ReStatic), referent_ty);
// Get the pointer.
let llref = lv_datum.to_llref();
class_did,
&[get_drop_glue_type(bcx.ccx(), t)],
ty::mk_nil(bcx.tcx()));
- let (_, variant_cx) = invoke(variant_cx, dtor_addr, args[], dtor_ty, None, false);
+ let (_, variant_cx) = invoke(variant_cx, dtor_addr, args[], dtor_ty, None);
variant_cx.fcx.pop_and_trans_custom_cleanup_scope(variant_cx, field_scope);
variant_cx
return (size, align);
}
match t.sty {
- ty::ty_struct(id, ref substs) => {
+ ty::ty_struct(id, substs) => {
let ccx = bcx.ccx();
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized.
}
}
}
- ty::ty_struct(did, ref substs) | ty::ty_enum(did, ref substs) => {
+ ty::ty_struct(did, substs) | ty::ty_enum(did, substs) => {
let tcx = bcx.tcx();
match ty::ty_dtor(tcx, did) {
ty::TraitDtor(dtor, true) => {
Vec::new()));
debug!("trait_substs={}", trait_substs.repr(bcx.tcx()));
let trait_ref = Rc::new(ty::Binder(ty::TraitRef { def_id: trait_id,
- substs: trait_substs }));
+ substs: bcx.tcx().mk_substs(trait_substs) }));
let vtbl = fulfill_obligation(bcx.ccx(),
DUMMY_SP,
trait_ref);
m.repr(tcx),
substs.repr(tcx));
if m.generics.has_type_params(subst::FnSpace) ||
- ty::type_has_self(ty::mk_bare_fn(tcx, None, m.fty.clone()))
+ ty::type_has_self(ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(m.fty.clone())))
{
debug!("(making impl vtable) method has self or type \
params: {}",
pub llmod: ModuleRef,
}
+unsafe impl Send for ModuleTranslation { }
+unsafe impl Sync for ModuleTranslation { }
+
pub struct CrateTranslation {
pub modules: Vec<ModuleTranslation>,
pub metadata_module: ModuleTranslation,
pub crate_formats: dependency_format::Dependencies,
pub no_builtins: bool,
}
-
macro_rules! opt_val { ($e:expr) => (
unsafe {
match $e {
- p if p.is_not_null() => Some(Value(p)),
+ p if !p.is_null() => Some(Value(p)),
_ => None
}
}
pub fn get_parent(self) -> Option<BasicBlock> {
unsafe {
match llvm::LLVMGetInstructionParent(self.get()) {
- p if p.is_not_null() => Some(BasicBlock(p)),
+ p if !p.is_null() => Some(BasicBlock(p)),
_ => None
}
}
pub fn get_first_use(self) -> Option<Use> {
unsafe {
match llvm::LLVMGetFirstUse(self.get()) {
- u if u.is_not_null() => Some(Use(u)),
+ u if !u.is_null() => Some(Use(u)),
_ => None
}
}
/// Tests if this value is a terminator instruction
pub fn is_a_terminator_inst(self) -> bool {
unsafe {
- llvm::LLVMIsATerminatorInst(self.get()).is_not_null()
+ !llvm::LLVMIsATerminatorInst(self.get()).is_null()
}
}
}
pub fn get_next_use(self) -> Option<Use> {
unsafe {
match llvm::LLVMGetNextUse(self.get()) {
- u if u.is_not_null() => Some(Use(u)),
+ u if !u.is_null() => Some(Use(u)),
_ => None
}
}
regions,
assoc_bindings);
- ty::TraitRef::new(trait_def_id, substs)
+ ty::TraitRef::new(trait_def_id, this.tcx().mk_substs(substs))
}
pub fn ast_path_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
let r = opt_ast_region_to_region(this, rscope, ast_ty.span, region);
debug!("ty_rptr r={}", r.repr(this.tcx()));
let t = ast_ty_to_ty(this, rscope, &*mt.ty);
- ty::mk_rptr(tcx, r, ty::mt {ty: t, mutbl: mt.mutbl})
+ ty::mk_rptr(tcx, tcx.mk_region(r), ty::mt {ty: t, mutbl: mt.mutbl})
}
ast::TyTup(ref fields) => {
let flds = fields.iter()
tcx.sess.span_err(ast_ty.span,
"variadic function must have C calling convention");
}
- ty::mk_bare_fn(tcx, None, ty_of_bare_fn(this, bf.unsafety, bf.abi, &*bf.decl))
+ let bare_fn = ty_of_bare_fn(this, bf.unsafety, bf.abi, &*bf.decl);
+ ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(bare_fn))
}
ast::TyClosure(ref f) => {
// Use corresponding trait store to figure out default bounds
}
ty::ByReferenceExplicitSelfCategory(region, mutability) => {
(Some(ty::mk_rptr(this.tcx(),
- region,
+ this.tcx().mk_region(region),
ty::mt {
ty: self_info.untransformed_self_ty,
mutbl: mutability
ty::ByValueExplicitSelfCategory
} else {
match explicit_type.sty {
- ty::ty_rptr(r, mt) => ty::ByReferenceExplicitSelfCategory(r, mt.mutbl),
+ ty::ty_rptr(r, mt) => ty::ByReferenceExplicitSelfCategory(*r, mt.mutbl),
ty::ty_uniq(_) => ty::ByBoxExplicitSelfCategory,
_ => ty::ByValueExplicitSelfCategory,
}
let mut trait_def_ids = DefIdMap::new();
for ast_bound in ast_bounds.iter() {
match *ast_bound {
- ast::TraitTyParamBound(ref b) => {
+ ast::TraitTyParamBound(ref b, ast::TraitBoundModifier::None) => {
match ::lookup_def_tcx(tcx, b.trait_ref.path.span, b.trait_ref.ref_id) {
def::DefTrait(trait_did) => {
match trait_def_ids.get(&trait_did) {
}
trait_bounds.push(b);
}
+ ast::TraitTyParamBound(_, ast::TraitBoundModifier::Maybe) => {}
ast::RegionTyParamBound(ref l) => {
region_bounds.push(l);
}
// and T is the expected type
let region_var = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
let mt = ty::mt { ty: expected, mutbl: mutbl };
- let region_ty = ty::mk_rptr(tcx, region_var, mt);
+ let region_ty = ty::mk_rptr(tcx, tcx.mk_region(region_var), mt);
demand::eqtype(fcx, pat.span, region_ty, typ);
}
// otherwise the type of x is the expected type T
let mt = ty::mt { ty: inner_ty, mutbl: mutbl };
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- let rptr_ty = ty::mk_rptr(tcx, region, mt);
+ let rptr_ty = ty::mk_rptr(tcx, tcx.mk_region(region), mt);
if check_dereferencable(pcx, pat.span, expected, &**inner) {
demand::suptype(fcx, pat.span, expected, rptr_ty);
})),
_ => {
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- ty::mk_slice(tcx, region, ty::mt {
+ ty::mk_slice(tcx, tcx.mk_region(region), ty::mt {
ty: inner_ty,
mutbl: ty::deref(expected_ty, true)
.map_or(ast::MutImmutable, |mt| mt.mutbl)
let mutbl = ty::deref(expected_ty, true)
.map_or(ast::MutImmutable, |mt| mt.mutbl);
- let slice_ty = ty::mk_slice(tcx, region, ty::mt {
+ let slice_ty = ty::mk_slice(tcx, tcx.mk_region(region), ty::mt {
ty: inner_ty,
mutbl: mutbl
});
let real_path_ty = fcx.node_ty(pat.id);
let (arg_tys, kind_name) = match real_path_ty.sty {
- ty::ty_enum(enum_def_id, ref expected_substs)
+ ty::ty_enum(enum_def_id, expected_substs)
if def == def::DefVariant(enum_def_id, def.def_id(), false) => {
let variant = ty::enum_variant_with_id(tcx, enum_def_id, def.def_id());
(variant.args.iter().map(|t| t.subst(tcx, expected_substs)).collect::<Vec<_>>(),
"variant")
}
- ty::ty_struct(struct_def_id, ref expected_substs) => {
+ ty::ty_struct(struct_def_id, expected_substs) => {
let struct_fields = ty::struct_fields(tcx, struct_def_id, expected_substs);
(struct_fields.iter().map(|field| field.mt.ty).collect::<Vec<_>>(),
"struct")
use rscope::RegionScope;
use syntax::abi;
use syntax::ast;
+use syntax::ast::CaptureClause::*;
use syntax::ast_util;
use util::ppaux::Repr;
pub fn check_expr_closure<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>,
expr: &ast::Expr,
+ capture: ast::CaptureClause,
opt_kind: Option<ast::UnboxedClosureKind>,
decl: &ast::FnDecl,
body: &ast::Block,
fcx.infcx(),
expr.span,
&None);
+
check_boxed_closure(fcx,
expr,
ty::RegionTraitStore(region, ast::MutMutable),
decl,
body,
expected);
+
+ match capture {
+ CaptureByValue => {
+ fcx.ccx.tcx.sess.span_err(
+ expr.span,
+ "boxed closures can't capture by value, \
+ if you want to use an unboxed closure, \
+ explicitly annotate its kind: e.g. `move |:|`");
+ },
+ CaptureByRef => {}
+ }
}
Some((sig, kind)) => {
check_unboxed_closure(fcx, expr, kind, decl, body, Some(sig));
let closure_type = ty::mk_unboxed_closure(fcx.ccx.tcx,
expr_def_id,
- region,
- fcx.inh.param_env.free_substs.clone());
+ fcx.ccx.tcx.mk_region(region),
+ fcx.ccx.tcx.mk_substs(
+ fcx.inh.param_env.free_substs.clone()));
fcx.write_ty(expr.id, closure_type);
self.add_obligations(&pick, &method_bounds_substs, &method_bounds);
// Create the final `MethodCallee`.
- let fty = ty::mk_bare_fn(self.tcx(), None, ty::BareFnTy {
+ let fty = ty::mk_bare_fn(self.tcx(), None, self.tcx().mk_bare_fn(ty::BareFnTy {
sig: ty::Binder(method_sig),
unsafety: pick.method_ty.fty.unsafety,
abi: pick.method_ty.fty.abi.clone(),
- });
+ }));
let callee = MethodCallee {
origin: method_origin,
ty: fty,
// been ruled out when we deemed the trait to be
// "object safe".
let original_poly_trait_ref =
- data.principal_trait_ref_with_self_ty(object_ty);
+ data.principal_trait_ref_with_self_ty(this.tcx(), object_ty);
let upcast_poly_trait_ref =
this.upcast(original_poly_trait_ref.clone(), trait_def_id);
let upcast_trait_ref =
self.infcx().next_ty_var());
let trait_ref =
- Rc::new(ty::TraitRef::new(trait_def_id, substs.clone()));
+ Rc::new(ty::TraitRef::new(trait_def_id, self.tcx().mk_substs(substs.clone())));
let origin = MethodTypeParam(MethodParam { trait_ref: trait_ref,
method_num: method_num });
(substs, origin)
ast::ExprParen(ref expr) |
ast::ExprField(ref expr, _) |
ast::ExprTupField(ref expr, _) |
- ast::ExprSlice(ref expr, _, _, _) |
ast::ExprIndex(ref expr, _) |
ast::ExprUnary(ast::UnDeref, ref expr) => exprs.push(&**expr),
_ => break,
// Construct a trait-reference `self_ty : Trait<input_tys>`
let substs = subst::Substs::new_trait(input_types, Vec::new(), assoc_types, self_ty);
- let trait_ref = Rc::new(ty::TraitRef::new(trait_def_id, substs));
+ let trait_ref = Rc::new(ty::TraitRef::new(trait_def_id, fcx.tcx().mk_substs(substs)));
// Construct an obligation
let poly_trait_ref = Rc::new(ty::Binder((*trait_ref).clone()));
// Substitute the trait parameters into the method type and
// instantiate late-bound regions to get the actual method type.
let ref bare_fn_ty = method_ty.fty;
- let fn_sig = bare_fn_ty.sig.subst(tcx, &trait_ref.substs);
+ let fn_sig = bare_fn_ty.sig.subst(tcx, trait_ref.substs);
let fn_sig = fcx.infcx().replace_late_bound_regions_with_fresh_var(span,
infer::FnCall,
&fn_sig).0;
let transformed_self_ty = fn_sig.inputs[0];
- let fty = ty::mk_bare_fn(tcx, None, ty::BareFnTy {
+ let fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(ty::BareFnTy {
sig: ty::Binder(fn_sig),
unsafety: bare_fn_ty.unsafety,
abi: bare_fn_ty.abi.clone(),
- });
+ }));
debug!("lookup_in_trait_adjusted: matched method fty={} obligation={}",
fty.repr(fcx.tcx()),
//
// Note that as the method comes from a trait, it should not have
// any late-bound regions appearing in its bounds.
- let method_bounds = method_ty.generics.to_bounds(fcx.tcx(), &trait_ref.substs);
+ let method_bounds = method_ty.generics.to_bounds(fcx.tcx(), trait_ref.substs);
assert!(!method_bounds.has_escaping_regions());
fcx.add_obligations_for_parameters(
traits::ObligationCause::misc(span, fcx.body_id),
span,
ty::AdjustDerefRef(ty::AutoDerefRef {
autoderefs: autoderefs,
- autoref: Some(ty::AutoPtr(region, mutbl, autoref))
+ autoref: Some(ty::AutoPtr(*region, mutbl, autoref))
}));
}
.find(|m| m.name() == method_name)
.and_then(|item| item.as_opt_method())
}
-
// it ride, although it's really not great, and in fact could I
// think cause spurious errors. Really though this part should
// take place in the `fcx.infcx().probe` below.
- let steps = create_steps(fcx, span, self_ty);
+ let steps = match create_steps(fcx, span, self_ty) {
+ Some(steps) => steps,
+ None => return Err(NoMatch(Vec::new())),
+ };
// Create a list of simplified self types, if we can.
let mut simplified_steps = Vec::new();
fn create_steps<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
span: Span,
self_ty: Ty<'tcx>)
- -> Vec<CandidateStep<'tcx>> {
+ -> Option<Vec<CandidateStep<'tcx>>> {
let mut steps = Vec::new();
let (fully_dereferenced_ty, dereferences, _) =
adjustment: AutoUnsizeLength(dereferences, len),
});
}
- _ => {
- }
+ ty::ty_err => return None,
+ _ => (),
}
- return steps;
+ Some(steps)
}
impl<'a,'tcx> ProbeContext<'a,'tcx> {
// a substitution that replaces `Self` with the object type
// itself. Hence, a `&self` method will wind up with an
// argument type like `&Trait`.
- let trait_ref = data.principal_trait_ref_with_self_ty(self_ty);
+ let trait_ref = data.principal_trait_ref_with_self_ty(self.tcx(), self_ty);
self.elaborate_bounds(&[trait_ref.clone()], false, |this, new_trait_ref, m, method_num| {
let vtable_index =
get_method_index(tcx, &*new_trait_ref, trait_ref.clone(), method_num);
// Determine the receiver type that the method itself expects.
let xform_self_ty =
- self.xform_self_ty(&method, &impl_trait_ref.substs);
+ self.xform_self_ty(&method, impl_trait_ref.substs);
debug!("xform_self_ty={}", xform_self_ty.repr(self.tcx()));
let tcx = self.tcx();
self.search_mutabilities(
|m| AutoRef(m, box step.adjustment.clone()),
- |m,r| ty::mk_rptr(tcx, r, ty::mt {ty:step.self_ty, mutbl:m}))
+ |m,r| ty::mk_rptr(tcx, tcx.mk_region(r), ty::mt {ty:step.self_ty, mutbl:m}))
}
fn search_mutabilities<F, G>(&mut self,
use middle::{const_eval, def};
use middle::infer;
use middle::lang_items::IteratorItem;
-use middle::mem_categorization::{mod, McResult};
+use middle::mem_categorization as mc;
use middle::pat_util::{mod, pat_id_map};
use middle::region::CodeExtent;
use middle::subst::{mod, Subst, Substs, VecPerParamSpace, ParamSpace};
ccx: &'a CrateCtxt<'a, 'tcx>,
}
-impl<'a, 'tcx> mem_categorization::Typer<'tcx> for FnCtxt<'a, 'tcx> {
+impl<'a, 'tcx> mc::Typer<'tcx> for FnCtxt<'a, 'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> {
self.ccx.tcx
}
- fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
- Ok(self.node_ty(id))
+ fn node_ty(&self, id: ast::NodeId) -> Ty<'tcx> {
+ let ty = self.node_ty(id);
+ self.infcx().resolve_type_vars_if_possible(&ty)
+ }
+ fn expr_ty_adjusted(&self, expr: &ast::Expr) -> Ty<'tcx> {
+ let ty = self.expr_ty_adjusted(expr);
+ self.infcx().resolve_type_vars_if_possible(&ty)
}
fn node_method_ty(&self, method_call: ty::MethodCall)
-> Option<Ty<'tcx>> {
- self.inh.method_map.borrow().get(&method_call).map(|m| m.ty)
+ self.inh.method_map.borrow()
+ .get(&method_call)
+ .map(|method| method.ty)
+ .map(|ty| self.infcx().resolve_type_vars_if_possible(&ty))
+ }
+ fn node_method_origin(&self, method_call: ty::MethodCall)
+ -> Option<ty::MethodOrigin<'tcx>>
+ {
+ self.inh.method_map.borrow()
+ .get(&method_call)
+ .map(|method| method.origin.clone())
}
fn adjustments(&self) -> &RefCell<NodeMap<ty::AutoAdjustment<'tcx>>> {
&self.inh.adjustments
}
}
- ast::ItemTrait(_, _, _, _, ref trait_methods) => {
+ ast::ItemTrait(_, _, _, ref trait_methods) => {
let trait_def = ty::lookup_trait_def(ccx.tcx, local_def(it.id));
for trait_method in trait_methods.iter() {
match *trait_method {
}
// Compute skolemized form of impl and trait method tys.
- let impl_fty = ty::mk_bare_fn(tcx, None, impl_m.fty.clone());
+ let impl_fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(impl_m.fty.clone()));
let impl_fty = impl_fty.subst(tcx, &impl_to_skol_substs);
- let trait_fty = ty::mk_bare_fn(tcx, None, trait_m.fty.clone());
+ let trait_fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(trait_m.fty.clone()));
let trait_fty = trait_fty.subst(tcx, &trait_to_skol_substs);
// Check the impl method type IM is a subtype of the trait method
}
}
-#[deriving(Copy, Show)]
+#[deriving(Copy, Show,PartialEq,Eq)]
pub enum LvaluePreference {
PreferMutLvalue,
NoPreference
}
}
-/// Autoderefs `base_expr`, looking for a `Slice` impl. If it finds one, installs the relevant
-/// method info and returns the result type (else None).
-fn try_overloaded_slice<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- method_call: MethodCall,
- expr: &ast::Expr,
- base_expr: &ast::Expr,
- base_ty: Ty<'tcx>,
- start_expr: &Option<P<ast::Expr>>,
- end_expr: &Option<P<ast::Expr>>,
- mutbl: ast::Mutability)
- -> Option<Ty<'tcx>> // return type is result of slice
-{
- let lvalue_pref = match mutbl {
- ast::MutMutable => PreferMutLvalue,
- ast::MutImmutable => NoPreference
- };
-
- let opt_method_ty =
- autoderef_for_index(fcx, base_expr, base_ty, lvalue_pref, |adjusted_ty, autoderefref| {
- try_overloaded_slice_step(fcx, method_call, expr, base_expr,
- adjusted_ty, autoderefref, mutbl,
- start_expr, end_expr)
- });
-
- // Regardless of whether the lookup succeeds, check the method arguments
- // so that we have *some* type for each argument.
- let method_ty_or_err = opt_method_ty.unwrap_or(ty::mk_err());
-
- let mut args = vec![];
- start_expr.as_ref().map(|x| args.push(x));
- end_expr.as_ref().map(|x| args.push(x));
-
- check_method_argument_types(fcx,
- expr.span,
- method_ty_or_err,
- expr,
- args.as_slice(),
- AutorefArgs::Yes,
- DontTupleArguments);
-
- opt_method_ty.map(|method_ty| {
- let result_ty = ty::ty_fn_ret(method_ty);
- match result_ty {
- ty::FnConverging(result_ty) => result_ty,
- ty::FnDiverging => {
- fcx.tcx().sess.span_bug(expr.span,
- "slice trait does not define a `!` return")
- }
- }
- })
-}
/// Checks for a `Slice` (or `SliceMut`) impl at the relevant level of autoderef. If it finds one,
/// installs method info and returns type of method (else None).
base_expr: &ast::Expr,
base_ty: Ty<'tcx>, // autoderef'd type
autoderefref: ty::AutoDerefRef<'tcx>,
- mutbl: ast::Mutability,
+ lvalue_pref: LvaluePreference,
start_expr: &Option<P<ast::Expr>>,
end_expr: &Option<P<ast::Expr>>)
- // result type is type of method being called
- -> Option<Ty<'tcx>>
+ -> Option<(Ty<'tcx>, /* index type */
+ Ty<'tcx>)> /* return type */
{
- let method = if mutbl == ast::MutMutable {
- // Try `SliceMut` first, if preferred.
- match fcx.tcx().lang_items.slice_mut_trait() {
- Some(trait_did) => {
- let method_name = match (start_expr, end_expr) {
- (&Some(_), &Some(_)) => "slice_or_fail_mut",
- (&Some(_), &None) => "slice_from_or_fail_mut",
- (&None, &Some(_)) => "slice_to_or_fail_mut",
- (&None, &None) => "as_mut_slice_",
- };
+ let input_ty = fcx.infcx().next_ty_var();
+ let return_ty = fcx.infcx().next_ty_var();
- method::lookup_in_trait_adjusted(fcx,
- expr.span,
- Some(&*base_expr),
- token::intern(method_name),
- trait_did,
- autoderefref,
- base_ty,
- None)
+ let method = match lvalue_pref {
+ PreferMutLvalue => {
+ // Try `SliceMut` first, if preferred.
+ match fcx.tcx().lang_items.slice_mut_trait() {
+ Some(trait_did) => {
+ let method_name = match (start_expr, end_expr) {
+ (&Some(_), &Some(_)) => "slice_or_fail_mut",
+ (&Some(_), &None) => "slice_from_or_fail_mut",
+ (&None, &Some(_)) => "slice_to_or_fail_mut",
+ (&None, &None) => "as_mut_slice_",
+ };
+
+ method::lookup_in_trait_adjusted(fcx,
+ expr.span,
+ Some(&*base_expr),
+ token::intern(method_name),
+ trait_did,
+ autoderefref,
+ base_ty,
+ Some(vec![input_ty, return_ty]))
+ }
+ _ => None,
}
- _ => None,
}
- } else {
- // Otherwise, fall back to `Slice`.
- // FIXME(#17293) this will not coerce base_expr, so we miss the Slice
- // trait for `&mut [T]`.
- match fcx.tcx().lang_items.slice_trait() {
- Some(trait_did) => {
- let method_name = match (start_expr, end_expr) {
- (&Some(_), &Some(_)) => "slice_or_fail",
- (&Some(_), &None) => "slice_from_or_fail",
- (&None, &Some(_)) => "slice_to_or_fail",
- (&None, &None) => "as_slice_",
- };
+ NoPreference => {
+ // Otherwise, fall back to `Slice`.
+ match fcx.tcx().lang_items.slice_trait() {
+ Some(trait_did) => {
+ let method_name = match (start_expr, end_expr) {
+ (&Some(_), &Some(_)) => "slice_or_fail",
+ (&Some(_), &None) => "slice_from_or_fail",
+ (&None, &Some(_)) => "slice_to_or_fail",
+ (&None, &None) => "as_slice_",
+ };
- method::lookup_in_trait_adjusted(fcx,
- expr.span,
- Some(&*base_expr),
- token::intern(method_name),
- trait_did,
- autoderefref,
- base_ty,
- None)
+ method::lookup_in_trait_adjusted(fcx,
+ expr.span,
+ Some(&*base_expr),
+ token::intern(method_name),
+ trait_did,
+ autoderefref,
+ base_ty,
+ Some(vec![input_ty, return_ty]))
+ }
+ _ => None,
}
- _ => None,
}
};
// If some lookup succeeded, install method in table
method.map(|method| {
- let ty = method.ty;
- fcx.inh.method_map.borrow_mut().insert(method_call, method);
- ty
+ let method_ty = method.ty;
+ make_overloaded_lvalue_return_type(fcx, Some(method_call), Some(method));
+
+ let result_ty = ty::ty_fn_ret(method_ty);
+ let result_ty = match result_ty {
+ ty::FnConverging(result_ty) => result_ty,
+ ty::FnDiverging => {
+ fcx.tcx().sess.span_bug(expr.span,
+ "slice trait does not define a `!` return")
+ }
+ };
+
+ (input_ty, result_ty)
})
}
let tcx = fcx.ccx.tcx;
match lit.node {
- ast::LitStr(..) => ty::mk_str_slice(tcx, ty::ReStatic, ast::MutImmutable),
+ ast::LitStr(..) => ty::mk_str_slice(tcx, tcx.mk_region(ty::ReStatic), ast::MutImmutable),
ast::LitBinary(..) => {
- ty::mk_slice(tcx, ty::ReStatic, ty::mt{ ty: ty::mk_u8(), mutbl: ast::MutImmutable })
+ ty::mk_slice(tcx, tcx.mk_region(ty::ReStatic),
+ ty::mt{ ty: ty::mk_u8(), mutbl: ast::MutImmutable })
}
ast::LitByte(_) => ty::mk_u8(),
ast::LitChar(_) => ty::mk_char(),
});
let fn_sig = match fn_ty.sty {
- ty::ty_bare_fn(_, ty::BareFnTy {ref sig, ..}) |
+ ty::ty_bare_fn(_, &ty::BareFnTy {ref sig, ..}) |
ty::ty_closure(box ty::ClosureTy {ref sig, ..}) => sig,
_ => {
fcx.type_error_message(call_expr.span, |actual| {
let (adj_ty, adjustment) = match lhs_ty.sty {
ty::ty_rptr(r_in, mt) => {
let r_adj = fcx.infcx().next_region_var(infer::Autoref(lhs.span));
- fcx.mk_subr(infer::Reborrow(lhs.span), r_adj, r_in);
- let adjusted_ty = ty::mk_rptr(fcx.tcx(), r_adj, mt);
+ fcx.mk_subr(infer::Reborrow(lhs.span), r_adj, *r_in);
+ let adjusted_ty = ty::mk_rptr(fcx.tcx(), fcx.tcx().mk_region(r_adj), mt);
let autoptr = ty::AutoPtr(r_adj, mt.mutbl, None);
let adjustment = ty::AutoDerefRef { autoderefs: 1, autoref: Some(autoptr) };
(adjusted_ty, adjustment)
let (_, autoderefs, field_ty) =
autoderef(fcx, expr.span, expr_t, Some(base.id), lvalue_pref, |base_t, _| {
match base_t.sty {
- ty::ty_struct(base_id, ref substs) => {
+ ty::ty_struct(base_id, substs) => {
debug!("struct named {}", ppaux::ty_to_string(tcx, base_t));
let fields = ty::lookup_struct_fields(tcx, base_id);
lookup_field_ty(tcx, base_id, fields[],
let (_, autoderefs, field_ty) =
autoderef(fcx, expr.span, expr_t, Some(base.id), lvalue_pref, |base_t, _| {
match base_t.sty {
- ty::ty_struct(base_id, ref substs) => {
+ ty::ty_struct(base_id, substs) => {
tuple_like = ty::is_tuple_struct(tcx, base_id);
if tuple_like {
debug!("tuple struct named {}", ppaux::ty_to_string(tcx, base_t));
span: Span,
class_id: ast::DefId,
node_id: ast::NodeId,
- substitutions: subst::Substs<'tcx>,
+ substitutions: &'tcx subst::Substs<'tcx>,
field_types: &[ty::field_ty],
ast_fields: &[ast::Field],
check_completeness: bool,
Some((field_id, false)) => {
expected_field_type =
ty::lookup_field_type(
- tcx, class_id, field_id, &substitutions);
+ tcx, class_id, field_id, substitutions);
class_field_map.insert(
field.ident.node.name, (field_id, true));
fields_found += 1;
span,
class_id,
id,
- struct_substs,
+ fcx.ccx.tcx.mk_substs(struct_substs),
class_fields[],
fields,
base_expr.is_none(),
span,
variant_id,
id,
- substitutions,
+ fcx.ccx.tcx.mk_substs(substitutions),
variant_fields[],
fields,
true,
Some(mt) => mt.ty,
None => {
let is_newtype = match oprnd_t.sty {
- ty::ty_struct(did, ref substs) => {
+ ty::ty_struct(did, substs) => {
let fields = ty::struct_fields(fcx.tcx(), did, substs);
fields.len() == 1
&& fields[0].name ==
// `'static`!
let region = fcx.infcx().next_region_var(
infer::AddrOfSlice(expr.span));
- ty::mk_rptr(tcx, region, tm)
+ ty::mk_rptr(tcx, tcx.mk_region(region), tm)
}
_ => {
let region = fcx.infcx().next_region_var(infer::AddrOfRegion(expr.span));
- ty::mk_rptr(tcx, region, tm)
+ ty::mk_rptr(tcx, tcx.mk_region(region), tm)
}
}
};
ast::ExprMatch(ref discrim, ref arms, match_src) => {
_match::check_match(fcx, expr, &**discrim, arms.as_slice(), expected, match_src);
}
- ast::ExprClosure(_, opt_kind, ref decl, ref body) => {
- closure::check_expr_closure(fcx, expr, opt_kind, &**decl, &**body, expected);
+ ast::ExprClosure(capture, opt_kind, ref decl, ref body) => {
+ closure::check_expr_closure(fcx, expr, capture, opt_kind, &**decl, &**body, expected);
}
ast::ExprBlock(ref b) => {
check_block_with_expected(fcx, &**b, expected);
}
ast::ExprIndex(ref base, ref idx) => {
check_expr_with_lvalue_pref(fcx, &**base, lvalue_pref);
- check_expr(fcx, &**idx);
let base_t = fcx.expr_ty(&**base);
- let idx_t = fcx.expr_ty(&**idx);
if ty::type_is_error(base_t) {
fcx.write_ty(id, base_t);
- } else if ty::type_is_error(idx_t) {
- fcx.write_ty(id, idx_t);
} else {
- let base_t = structurally_resolved_type(fcx, expr.span, base_t);
-
- let result =
- autoderef_for_index(fcx, &**base, base_t, lvalue_pref, |adj_ty, adj| {
- try_index_step(fcx,
- MethodCall::expr(expr.id),
- expr,
- &**base,
- adj_ty,
- adj,
- lvalue_pref)
- });
-
- match result {
- Some((index_ty, element_ty)) => {
- check_expr_has_type(fcx, &**idx, index_ty);
- fcx.write_ty(id, element_ty);
- }
- _ => {
- check_expr_has_type(fcx, &**idx, ty::mk_err());
- fcx.type_error_message(
- expr.span,
- |actual| {
- format!("cannot index a value of type `{}`",
- actual)
- },
- base_t,
- None);
- fcx.write_ty(id, ty::mk_err())
- }
- }
- }
- }
- ast::ExprSlice(ref base, ref start, ref end, mutbl) => {
- check_expr_with_lvalue_pref(fcx, &**base, lvalue_pref);
- let raw_base_t = fcx.expr_ty(&**base);
-
- let mut some_err = false;
- if ty::type_is_error(raw_base_t) {
- fcx.write_ty(id, raw_base_t);
- some_err = true;
- }
-
- {
- let check_slice_idx = |e: &ast::Expr| {
- check_expr(fcx, e);
- let e_t = fcx.expr_ty(e);
- if ty::type_is_error(e_t) {
- fcx.write_ty(e.id, e_t);
- some_err = true;
+ match idx.node {
+ ast::ExprRange(ref start, ref end) => {
+ // A slice, rather than an index. Special cased for now (KILLME).
+ let base_t = structurally_resolved_type(fcx, expr.span, base_t);
+
+ let result =
+ autoderef_for_index(fcx, &**base, base_t, lvalue_pref, |adj_ty, adj| {
+ try_overloaded_slice_step(fcx,
+ MethodCall::expr(expr.id),
+ expr,
+ &**base,
+ adj_ty,
+ adj,
+ lvalue_pref,
+ start,
+ end)
+ });
+
+ let mut args = vec![];
+ start.as_ref().map(|x| args.push(x));
+ end.as_ref().map(|x| args.push(x));
+
+ match result {
+ Some((index_ty, element_ty)) => {
+ for a in args.iter() {
+ check_expr_has_type(fcx, &***a, index_ty);
+ }
+ fcx.write_ty(idx.id, element_ty);
+ fcx.write_ty(id, element_ty)
+ }
+ _ => {
+ for a in args.iter() {
+ check_expr(fcx, &***a);
+ }
+ fcx.type_error_message(expr.span,
+ |actual| {
+ format!("cannot take a slice of a value with type `{}`",
+ actual)
+ },
+ base_t,
+ None);
+ fcx.write_ty(idx.id, ty::mk_err());
+ fcx.write_ty(id, ty::mk_err())
+ }
}
- };
- start.as_ref().map(|e| check_slice_idx(&**e));
- end.as_ref().map(|e| check_slice_idx(&**e));
- }
-
- if !some_err {
- let base_t = structurally_resolved_type(fcx,
- expr.span,
- raw_base_t);
- let method_call = MethodCall::expr(expr.id);
- match try_overloaded_slice(fcx,
- method_call,
- expr,
- &**base,
- base_t,
- start,
- end,
- mutbl) {
- Some(ty) => fcx.write_ty(id, ty),
- None => {
- fcx.type_error_message(expr.span,
- |actual| {
- format!("cannot take a {}slice of a value with type `{}`",
- if mutbl == ast::MutMutable {
- "mutable "
- } else {
- ""
- },
- actual)
- },
- base_t,
- None);
- fcx.write_ty(id, ty::mk_err())
+ }
+ _ => {
+ check_expr(fcx, &**idx);
+ let idx_t = fcx.expr_ty(&**idx);
+ if ty::type_is_error(idx_t) {
+ fcx.write_ty(id, idx_t);
+ } else {
+ let base_t = structurally_resolved_type(fcx, expr.span, base_t);
+
+ let result =
+ autoderef_for_index(fcx, &**base, base_t, lvalue_pref, |adj_ty, adj| {
+ try_index_step(fcx,
+ MethodCall::expr(expr.id),
+ expr,
+ &**base,
+ adj_ty,
+ adj,
+ lvalue_pref)
+ });
+
+ match result {
+ Some((index_ty, element_ty)) => {
+ check_expr_has_type(fcx, &**idx, index_ty);
+ fcx.write_ty(id, element_ty);
+ }
+ _ => {
+ check_expr_has_type(fcx, &**idx, ty::mk_err());
+ fcx.type_error_message(
+ expr.span,
+ |actual| {
+ format!("cannot index a value of type `{}`",
+ actual)
+ },
+ base_t,
+ None);
+ fcx.write_ty(id, ty::mk_err())
+ }
+ }
}
+ }
}
}
}
ast::ExprRange(ref start, ref end) => {
- check_expr(fcx, &**start);
- let t_start = fcx.expr_ty(&**start);
-
- let idx_type = if let &Some(ref e) = end {
+ let t_start = start.as_ref().map(|e| {
check_expr(fcx, &**e);
- let t_end = fcx.expr_ty(&**e);
- if ty::type_is_error(t_end) {
- ty::mk_err()
- } else if t_start == ty::mk_err() {
- ty::mk_err()
- } else {
- infer::common_supertype(fcx.infcx(),
- infer::RangeExpression(expr.span),
- true,
- t_start,
- t_end)
+ fcx.expr_ty(&**e)
+ });
+ let t_end = end.as_ref().map(|e| {
+ check_expr(fcx, &**e);
+ fcx.expr_ty(&**e)
+ });
+
+ let idx_type = match (t_start, t_end) {
+ (Some(ty), None) | (None, Some(ty)) => Some(ty),
+ (Some(t_start), Some(t_end))
+ if ty::type_is_error(t_start) || ty::type_is_error(t_end) => {
+ Some(ty::mk_err())
}
- } else {
- t_start
+ (Some(t_start), Some(t_end)) => {
+ Some(infer::common_supertype(fcx.infcx(),
+ infer::RangeExpression(expr.span),
+ true,
+ t_start,
+ t_end))
+ }
+ _ => None
};
// Note that we don't check the type of start/end satisfy any
// bounds because right the range structs do not have any. If we add
// some bounds, then we'll need to check `t_start` against them here.
- let range_type = if idx_type == ty::mk_err() {
- ty::mk_err()
- } else {
- // Find the did from the appropriate lang item.
- let did = if end.is_some() {
- // Range
- tcx.lang_items.range_struct()
- } else {
- // RangeFrom
- tcx.lang_items.range_from_struct()
- };
-
- if let Some(did) = did {
- let polytype = ty::lookup_item_type(tcx, did);
- let substs = Substs::new_type(vec![idx_type], vec![]);
- let bounds = polytype.generics.to_bounds(tcx, &substs);
- fcx.add_obligations_for_parameters(
- traits::ObligationCause::new(expr.span,
- fcx.body_id,
- traits::ItemObligation(did)),
- &bounds);
-
- ty::mk_struct(tcx, did, substs)
- } else {
+ let range_type = match idx_type {
+ Some(idx_type) if ty::type_is_error(idx_type) => {
ty::mk_err()
}
+ Some(idx_type) => {
+ // Find the did from the appropriate lang item.
+ let did = match (start, end) {
+ (&Some(_), &Some(_)) => tcx.lang_items.range_struct(),
+ (&Some(_), &None) => tcx.lang_items.range_from_struct(),
+ (&None, &Some(_)) => tcx.lang_items.range_to_struct(),
+ (&None, &None) => {
+ tcx.sess.span_bug(expr.span, "full range should be dealt with above")
+ }
+ };
+
+ if let Some(did) = did {
+ let polytype = ty::lookup_item_type(tcx, did);
+ let substs = Substs::new_type(vec![idx_type], vec![]);
+ let bounds = polytype.generics.to_bounds(tcx, &substs);
+ fcx.add_obligations_for_parameters(
+ traits::ObligationCause::new(expr.span,
+ fcx.body_id,
+ traits::ItemObligation(did)),
+ &bounds);
+
+ ty::mk_struct(tcx, did, tcx.mk_substs(substs))
+ } else {
+ tcx.sess.span_err(expr.span, "No lang item for range syntax");
+ ty::mk_err()
+ }
+ }
+ None => {
+ // Neither start nor end => FullRange
+ if let Some(did) = tcx.lang_items.full_range_struct() {
+ let substs = Substs::new_type(vec![], vec![]);
+ ty::mk_struct(tcx, did, tcx.mk_substs(substs))
+ } else {
+ tcx.sess.span_err(expr.span, "No lang item for range syntax");
+ ty::mk_err()
+ }
+ }
};
+
fcx.write_ty(id, range_type);
}
return;
}
match t.sty {
- ty::ty_struct(did, ref substs) => {
+ ty::ty_struct(did, substs) => {
let fields = ty::lookup_struct_fields(tcx, did);
if fields.is_empty() {
span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty");
match t.sty {
ty::ty_param(ParamTy {idx, ..}) => {
debug!("Found use of ty param num {}", idx);
- tps_used[idx] = true;
+ tps_used[idx as uint] = true;
}
_ => ()
}
}
pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) {
- fn param<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, n: uint) -> Ty<'tcx> {
+ fn param<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, n: u32) -> Ty<'tcx> {
ty::mk_param(ccx.tcx, subst::FnSpace, n, local_def(0))
}
"breakpoint" => (0, Vec::new(), ty::mk_nil(tcx)),
"size_of" |
"pref_align_of" | "min_align_of" => (1u, Vec::new(), ty::mk_uint()),
- "init" => (1u, Vec::new(), param(ccx, 0u)),
- "uninit" => (1u, Vec::new(), param(ccx, 0u)),
+ "init" => (1u, Vec::new(), param(ccx, 0)),
+ "uninit" => (1u, Vec::new(), param(ccx, 0)),
"forget" => (1u, vec!( param(ccx, 0) ), ty::mk_nil(tcx)),
"transmute" => (2, vec!( param(ccx, 0) ), param(ccx, 1)),
"move_val_init" => {
(1u,
vec!(
- ty::mk_mut_rptr(tcx, ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(0)),
+ ty::mk_mut_rptr(tcx,
+ tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1),
+ ty::BrAnon(0))),
param(ccx, 0)),
- param(ccx, 0u)
+ param(ccx, 0)
),
ty::mk_nil(tcx))
}
Ok(did) => (1u,
Vec::new(),
ty::mk_struct(ccx.tcx, did,
- subst::Substs::empty())),
+ ccx.tcx.mk_substs(subst::Substs::empty()))),
Err(msg) => {
tcx.sess.span_fatal(it.span, msg[]);
}
};
(n_tps, inputs, ty::FnConverging(output))
};
- let fty = ty::mk_bare_fn(tcx, None, ty::BareFnTy {
+ let fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(ty::BareFnTy {
unsafety: ast::Unsafety::Unsafe,
abi: abi::RustIntrinsic,
sig: ty::Binder(FnSig {
output: output,
variadic: false,
}),
- });
+ }));
let i_ty = ty::lookup_item_type(ccx.tcx, local_def(it.id));
let i_n_tps = i_ty.generics.types.len(subst::FnSpace);
if i_n_tps != n_tps {
});
}
}
-
use middle::ty::{mod, Ty, MethodCall};
use middle::infer;
use middle::pat_util;
-use util::nodemap::{DefIdMap, NodeMap, FnvHashMap};
+use util::nodemap::{FnvHashMap};
use util::ppaux::{ty_to_string, Repr};
use syntax::{ast, ast_util};
///////////////////////////////////////////////////////////////////////////
// INTERNALS
-// If mem categorization results in an error, it's because the type
-// check failed (or will fail, when the error is uncovered and
-// reported during writeback). In this case, we just ignore this part
-// of the code and don't try to add any more region constraints.
-macro_rules! ignore_err {
- ($inp: expr) => (
- match $inp {
- Ok(v) => v,
- Err(()) => return
- }
- )
-}
-
// Stores parameters for a potential call to link_region()
// to perform if an upvar reference is marked unique/mutable after
// it has already been processed before.
}
}
-impl<'fcx, 'tcx> mc::Typer<'tcx> for Rcx<'fcx, 'tcx> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
- self.fcx.ccx.tcx
- }
-
- fn node_ty(&self, id: ast::NodeId) -> mc::McResult<Ty<'tcx>> {
- let t = self.resolve_node_type(id);
- if ty::type_is_error(t) {Err(())} else {Ok(t)}
- }
-
- fn node_method_ty(&self, method_call: MethodCall) -> Option<Ty<'tcx>> {
- self.resolve_method_type(method_call)
- }
-
- fn adjustments<'a>(&'a self) -> &'a RefCell<NodeMap<ty::AutoAdjustment<'tcx>>> {
- &self.fcx.inh.adjustments
- }
-
- fn is_method_call(&self, id: ast::NodeId) -> bool {
- self.fcx.inh.method_map.borrow().contains_key(&MethodCall::expr(id))
- }
-
- fn temporary_scope(&self, id: ast::NodeId) -> Option<CodeExtent> {
- self.tcx().region_maps.temporary_scope(id)
- }
-
- fn upvar_borrow(&self, id: ty::UpvarId) -> ty::UpvarBorrow {
- self.fcx.inh.upvar_borrow_map.borrow()[id].clone()
- }
-
- fn capture_mode(&self, closure_expr_id: ast::NodeId)
- -> ast::CaptureClause {
- self.tcx().capture_modes.borrow()[closure_expr_id].clone()
- }
-
- fn unboxed_closures<'a>(&'a self)
- -> &'a RefCell<DefIdMap<ty::UnboxedClosure<'tcx>>> {
- &self.fcx.inh.unboxed_closures
- }
-}
-
impl<'a, 'tcx, 'v> Visitor<'v> for Rcx<'a, 'tcx> {
// (..) FIXME(#3238) should use visit_pat, not visit_arm/visit_local,
// However, right now we run into an issue whereby some free
};
if let ty::ty_rptr(r_ptr, _) = base_ty.sty {
mk_subregion_due_to_dereference(
- rcx, expr.span, ty::ReScope(CodeExtent::from_node_id(expr.id)), r_ptr);
+ rcx, expr.span, ty::ReScope(CodeExtent::from_node_id(expr.id)), *r_ptr);
}
visit::walk_expr(rcx, expr);
constrain_bindings_in_pat(&**pat, rcx);
{
- let mc = mc::MemCategorizationContext::new(rcx);
+ let mc = mc::MemCategorizationContext::new(rcx.fcx);
let pat_ty = rcx.resolve_node_type(pat.id);
let pat_cmt = mc.cat_rvalue(pat.id,
pat.span,
/*From:*/ (&ty::ty_rptr(from_r, ref from_mt),
/*To: */ &ty::ty_rptr(to_r, ref to_mt)) => {
// Target cannot outlive source, naturally.
- rcx.fcx.mk_subr(infer::Reborrow(cast_expr.span), to_r, from_r);
+ rcx.fcx.mk_subr(infer::Reborrow(cast_expr.span), *to_r, *from_r);
walk_cast(rcx, cast_expr, from_mt.ty, to_mt.ty);
}
// Variables being referenced must be constrained and registered
// in the upvar borrow map
constrain_free_variables_in_by_ref_closure(
- rcx, region, expr, freevars);
+ rcx, *region, expr, freevars);
}
})
}
}
ty::ty_unboxed_closure(_, region, _) => {
ty::with_freevars(tcx, expr.id, |freevars| {
- let bounds = ty::region_existential_bound(region);
+ let bounds = ty::region_existential_bound(*region);
ensure_free_variable_types_outlive_closure_bound(rcx, bounds, expr, freevars);
})
}
let var_ty = match rcx.fcx.inh.upvar_borrow_map.borrow().get(&upvar_id) {
Some(upvar_borrow) => {
ty::mk_rptr(rcx.tcx(),
- upvar_borrow.region,
+ rcx.tcx().mk_region(upvar_borrow.region),
ty::mt { mutbl: upvar_borrow.kind.to_mutbl_lossy(),
ty: raw_var_ty })
}
ty::RegionTraitStore(r, _) => {
// While we're here, link the closure's region with a unique
// immutable borrow (gathered later in borrowck)
- let mc = mc::MemCategorizationContext::new(rcx);
- let expr_cmt = ignore_err!(mc.cat_expr(callee_expr));
+ let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let expr_cmt = mc.cat_expr(callee_expr);
link_region(rcx, callee_expr.span, call_region,
ty::UniqueImmBorrow, expr_cmt);
r
method.ty.repr(rcx.tcx()))[])
};
{
- let mc = mc::MemCategorizationContext::new(rcx);
- let self_cmt = ignore_err!(mc.cat_expr_autoderefd(deref_expr, i));
- link_region(rcx, deref_expr.span, r,
+ let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let self_cmt = mc.cat_expr_autoderefd(deref_expr, i);
+ link_region(rcx, deref_expr.span, *r,
ty::BorrowKind::from_mutbl(m), self_cmt);
}
if let ty::ty_rptr(r_ptr, _) = derefd_ty.sty {
mk_subregion_due_to_dereference(rcx, deref_expr.span,
- r_deref_expr, r_ptr);
+ r_deref_expr, *r_ptr);
}
match ty::deref(derefd_ty, true) {
match mt.ty.sty {
ty::ty_vec(_, None) | ty::ty_str => {
rcx.fcx.mk_subr(infer::IndexSlice(index_expr.span),
- r_index_expr, r_ptr);
+ r_index_expr, *r_ptr);
}
_ => {}
}
debug!("link_addr_of(base=?)");
let cmt = {
- let mc = mc::MemCategorizationContext::new(rcx);
- ignore_err!(mc.cat_expr(base))
+ let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ mc.cat_expr(base)
};
link_region_from_node_type(rcx, expr.span, expr.id, mutability, cmt);
}
None => { return; }
Some(ref expr) => &**expr,
};
- let mc = mc::MemCategorizationContext::new(rcx);
- let discr_cmt = ignore_err!(mc.cat_expr(init_expr));
+ let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let discr_cmt = mc.cat_expr(init_expr);
link_pattern(rcx, mc, discr_cmt, &*local.pat);
}
/// linked to the lifetime of its guarantor (if any).
fn link_match(rcx: &Rcx, discr: &ast::Expr, arms: &[ast::Arm]) {
debug!("regionck::for_match()");
- let mc = mc::MemCategorizationContext::new(rcx);
- let discr_cmt = ignore_err!(mc.cat_expr(discr));
+ let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let discr_cmt = mc.cat_expr(discr);
debug!("discr_cmt={}", discr_cmt.repr(rcx.tcx()));
for arm in arms.iter() {
for root_pat in arm.pats.iter() {
/// linked to the lifetime of its guarantor (if any).
fn link_fn_args(rcx: &Rcx, body_scope: CodeExtent, args: &[ast::Arg]) {
debug!("regionck::link_fn_args(body_scope={})", body_scope);
- let mc = mc::MemCategorizationContext::new(rcx);
+ let mc = mc::MemCategorizationContext::new(rcx.fcx);
for arg in args.iter() {
let arg_ty = rcx.fcx.node_ty(arg.id);
let re_scope = ty::ReScope(body_scope);
/// Link lifetimes of any ref bindings in `root_pat` to the pointers found in the discriminant, if
/// needed.
fn link_pattern<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
- mc: mc::MemCategorizationContext<Rcx<'a, 'tcx>>,
+ mc: mc::MemCategorizationContext<FnCtxt<'a, 'tcx>>,
discr_cmt: mc::cmt<'tcx>,
root_pat: &ast::Pat) {
debug!("link_pattern(discr_cmt={}, root_pat={})",
// `[_, ..slice, _]` pattern
ast::PatVec(_, Some(ref slice_pat), _) => {
- match mc.cat_slice_pattern(sub_cmt, &**slice_pat) {
- Ok((slice_cmt, slice_mutbl, slice_r)) => {
- link_region(rcx, sub_pat.span, slice_r,
- ty::BorrowKind::from_mutbl(slice_mutbl),
- slice_cmt);
- }
- Err(()) => {}
- }
+ let (slice_cmt, slice_mutbl, slice_r) =
+ mc.cat_slice_pattern(sub_cmt, &**slice_pat);
+ link_region(rcx, sub_pat.span, slice_r,
+ ty::BorrowKind::from_mutbl(slice_mutbl),
+ slice_cmt);
}
_ => {}
}
autoref: &ty::AutoRef) {
debug!("link_autoref(autoref={})", autoref);
- let mc = mc::MemCategorizationContext::new(rcx);
- let expr_cmt = ignore_err!(mc.cat_expr_autoderefd(expr, autoderefs));
+ let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let expr_cmt = mc.cat_expr_autoderefd(expr, autoderefs);
debug!("expr_cmt={}", expr_cmt.repr(rcx.tcx()));
match *autoref {
let tcx = rcx.tcx();
debug!("link_by_ref(expr={}, callee_scope={})",
expr.repr(tcx), callee_scope);
- let mc = mc::MemCategorizationContext::new(rcx);
- let expr_cmt = ignore_err!(mc.cat_expr(expr));
+ let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let expr_cmt = mc.cat_expr(expr);
let borrow_region = ty::ReScope(callee_scope);
link_region(rcx, expr.span, borrow_region, ty::ImmBorrow, expr_cmt);
}
}
mc::cat_downcast(cmt_base, _) |
- mc::cat_deref(cmt_base, _, mc::OwnedPtr) |
+ mc::cat_deref(cmt_base, _, mc::Unique) |
mc::cat_interior(cmt_base, _) => {
// Borrowing interior or owned data requires the base
// to be valid and borrowable in the same fashion.
/// assignment expression.
fn adjust_borrow_kind_for_assignment_lhs(rcx: &Rcx,
lhs: &ast::Expr) {
- let mc = mc::MemCategorizationContext::new(rcx);
- let cmt = ignore_err!(mc.cat_expr(lhs));
+ let mc = mc::MemCategorizationContext::new(rcx.fcx);
+ let cmt = mc.cat_expr(lhs);
adjust_upvar_borrow_kind_for_mut(rcx, cmt);
}
cmt.repr(rcx.tcx()));
match cmt.cat.clone() {
- mc::cat_deref(base, _, mc::OwnedPtr) |
+ mc::cat_deref(base, _, mc::Unique) |
mc::cat_interior(base, _) |
mc::cat_downcast(base, _) => {
// Interior or owned data is mutable if base is
cmt.repr(rcx.tcx()));
match cmt.cat.clone() {
- mc::cat_deref(base, _, mc::OwnedPtr) |
+ mc::cat_deref(base, _, mc::Unique) |
mc::cat_interior(base, _) |
mc::cat_downcast(base, _) => {
// Interior or owned data is unique if base is
// captured by reference it must also outlive the
// region bound on the closure, but this is explicitly
// handled by logic in regionck.
- self.push_region_constraint_from_top(region);
+ self.push_region_constraint_from_top(*region);
}
ty::ty_trait(ref t) => {
self.accumulate_from_object_ty(ty, t.bounds.region_bound, required_region_bounds)
}
- ty::ty_enum(def_id, ref substs) |
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_enum(def_id, substs) |
+ ty::ty_struct(def_id, substs) => {
self.accumulate_from_adt(ty, def_id, substs)
}
}
ty::ty_rptr(r_b, mt) => {
- self.accumulate_from_rptr(ty, r_b, mt.ty);
+ self.accumulate_from_rptr(ty, *r_b, mt.ty);
}
ty::ty_param(p) => {
use check::{FnCtxt, structurally_resolved_type};
use middle::subst::{FnSpace};
use middle::traits;
-use middle::traits::{SelectionError, OutputTypeParameterMismatch, Overflow, Unimplemented};
use middle::traits::{Obligation, ObligationCause};
-use middle::traits::{FulfillmentError, CodeSelectionError, CodeAmbiguity};
-use middle::traits::{PredicateObligation};
+use middle::traits::report_fulfillment_errors;
use middle::ty::{mod, Ty};
use middle::infer;
use std::rc::Rc;
use syntax::ast;
use syntax::codemap::Span;
-use util::ppaux::{UserString, Repr, ty_to_string};
+use util::ppaux::{Repr, ty_to_string};
pub fn check_object_cast<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
cast_expr: &ast::Expr,
// Ensure that if &'a T is cast to &'b Trait, then 'b <= 'a
infer::mk_subr(fcx.infcx(),
infer::RelateObjectBound(source_expr.span),
- target_region,
- referent_region);
+ *target_region,
+ *referent_region);
check_object_safety(fcx.tcx(), object_trait, source_expr.span);
}
object_trait: &ty::TyTrait<'tcx>,
span: Span)
{
- let object_trait_ref = object_trait.principal_trait_ref_with_self_ty(ty::mk_err());
+ let object_trait_ref = object_trait.principal_trait_ref_with_self_ty(tcx, ty::mk_err());
for tr in traits::supertraits(tcx, object_trait_ref) {
check_object_safety_inner(tcx, &*tr, span);
}
// Create the obligation for casting from T to Trait.
let object_trait_ref =
- object_trait.principal_trait_ref_with_self_ty(referent_ty);
+ object_trait.principal_trait_ref_with_self_ty(fcx.tcx(), referent_ty);
let object_obligation =
Obligation::new(
ObligationCause::new(span,
fcx);
match r {
Ok(()) => { }
- Err(errors) => { report_fulfillment_errors(fcx, &errors); }
- }
-}
-
-pub fn report_fulfillment_errors<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- errors: &Vec<FulfillmentError<'tcx>>) {
- for error in errors.iter() {
- report_fulfillment_error(fcx, error);
- }
-}
-
-pub fn report_fulfillment_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- error: &FulfillmentError<'tcx>) {
- match error.code {
- CodeSelectionError(ref e) => {
- report_selection_error(fcx, &error.obligation, e);
- }
- CodeAmbiguity => {
- maybe_report_ambiguity(fcx, &error.obligation);
- }
- }
-}
-
-pub fn report_selection_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- obligation: &PredicateObligation<'tcx>,
- error: &SelectionError<'tcx>)
-{
- match *error {
- Overflow => {
- // We could track the stack here more precisely if we wanted, I imagine.
- let predicate =
- fcx.infcx().resolve_type_vars_if_possible(&obligation.trait_ref);
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "overflow evaluating the requirement `{}`",
- predicate.user_string(fcx.tcx())).as_slice());
-
- let current_limit = fcx.tcx().sess.recursion_limit.get();
- let suggested_limit = current_limit * 2;
- fcx.tcx().sess.span_note(
- obligation.cause.span,
- format!(
- "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate",
- suggested_limit)[]);
-
- note_obligation_cause(fcx, obligation);
- }
- Unimplemented => {
- match obligation.trait_ref {
- ty::Predicate::Trait(ref trait_ref) => {
- let trait_ref = fcx.infcx().resolve_type_vars_if_possible(&**trait_ref);
- if !ty::type_is_error(trait_ref.self_ty()) {
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "the trait `{}` is not implemented for the type `{}`",
- trait_ref.user_string(fcx.tcx()),
- trait_ref.self_ty().user_string(fcx.tcx())).as_slice());
- }
- }
-
- ty::Predicate::Equate(ref predicate) => {
- let predicate = fcx.infcx().resolve_type_vars_if_possible(predicate);
- let err = fcx.infcx().equality_predicate(obligation.cause.span,
- &predicate).unwrap_err();
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "the requirement `{}` is not satisfied (`{}`)",
- predicate.user_string(fcx.tcx()),
- ty::type_err_to_str(fcx.tcx(), &err)).as_slice());
- }
-
- ty::Predicate::RegionOutlives(ref predicate) => {
- let predicate = fcx.infcx().resolve_type_vars_if_possible(predicate);
- let err = fcx.infcx().region_outlives_predicate(obligation.cause.span,
- &predicate).unwrap_err();
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "the requirement `{}` is not satisfied (`{}`)",
- predicate.user_string(fcx.tcx()),
- ty::type_err_to_str(fcx.tcx(), &err)).as_slice());
- }
-
- ty::Predicate::TypeOutlives(ref predicate) => {
- let predicate = fcx.infcx().resolve_type_vars_if_possible(predicate);
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "the requirement `{}` is not satisfied",
- predicate.user_string(fcx.tcx())).as_slice());
- }
- }
-
- note_obligation_cause(fcx, obligation);
- }
- OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
- let expected_trait_ref =
- fcx.infcx().resolve_type_vars_if_possible(
- &**expected_trait_ref);
- let actual_trait_ref =
- fcx.infcx().resolve_type_vars_if_possible(
- &**actual_trait_ref);
- if !ty::type_is_error(actual_trait_ref.self_ty()) {
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "type mismatch: the type `{}` implements the trait `{}`, \
- but the trait `{}` is required ({})",
- expected_trait_ref.self_ty().user_string(fcx.tcx()),
- expected_trait_ref.user_string(fcx.tcx()),
- actual_trait_ref.user_string(fcx.tcx()),
- ty::type_err_to_str(fcx.tcx(), e)).as_slice());
- note_obligation_cause(fcx, obligation);
- }
- }
- }
-}
-
-pub fn maybe_report_ambiguity<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- obligation: &PredicateObligation<'tcx>) {
- // Unable to successfully determine, probably means
- // insufficient type information, but could mean
- // ambiguous impls. The latter *ought* to be a
- // coherence violation, so we don't report it here.
-
- let trait_ref = match obligation.trait_ref {
- ty::Predicate::Trait(ref trait_ref) => {
- fcx.infcx().resolve_type_vars_if_possible(&**trait_ref)
- }
- _ => {
- fcx.tcx().sess.span_bug(
- obligation.cause.span,
- format!("ambiguity from something other than a trait: {}",
- obligation.trait_ref.repr(fcx.tcx())).as_slice());
- }
- };
- let self_ty = trait_ref.self_ty();
-
- debug!("maybe_report_ambiguity(trait_ref={}, self_ty={}, obligation={})",
- trait_ref.repr(fcx.tcx()),
- self_ty.repr(fcx.tcx()),
- obligation.repr(fcx.tcx()));
- let all_types = &trait_ref.substs().types;
- if all_types.iter().any(|&t| ty::type_is_error(t)) {
- } else if all_types.iter().any(|&t| ty::type_needs_infer(t)) {
- // This is kind of a hack: it frequently happens that some earlier
- // error prevents types from being fully inferred, and then we get
- // a bunch of uninteresting errors saying something like "<generic
- // #0> doesn't implement Sized". It may even be true that we
- // could just skip over all checks where the self-ty is an
- // inference variable, but I was afraid that there might be an
- // inference variable created, registered as an obligation, and
- // then never forced by writeback, and hence by skipping here we'd
- // be ignoring the fact that we don't KNOW the type works
- // out. Though even that would probably be harmless, given that
- // we're only talking about builtin traits, which are known to be
- // inhabited. But in any case I just threw in this check for
- // has_errors() to be sure that compilation isn't happening
- // anyway. In that case, why inundate the user.
- if !fcx.tcx().sess.has_errors() {
- if fcx.ccx.tcx.lang_items.sized_trait()
- .map_or(false, |sized_id| sized_id == trait_ref.def_id()) {
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "unable to infer enough type information about `{}`; type annotations \
- required",
- self_ty.user_string(fcx.tcx()))[]);
- } else {
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "unable to infer enough type information to \
- locate the impl of the trait `{}` for \
- the type `{}`; type annotations required",
- trait_ref.user_string(fcx.tcx()),
- self_ty.user_string(fcx.tcx()))[]);
- note_obligation_cause(fcx, obligation);
- }
- }
- } else if !fcx.tcx().sess.has_errors() {
- // Ambiguity. Coherence should have reported an error.
- fcx.tcx().sess.span_bug(
- obligation.cause.span,
- format!(
- "coherence failed to report ambiguity: \
- cannot locate the impl of the trait `{}` for \
- the type `{}`",
- trait_ref.user_string(fcx.tcx()),
- self_ty.user_string(fcx.tcx()))[]);
+ Err(errors) => { report_fulfillment_errors(fcx.infcx(), &errors); }
}
}
.select_where_possible(fcx.infcx(), &fcx.inh.param_env, fcx)
{
Ok(()) => { }
- Err(errors) => { report_fulfillment_errors(fcx, &errors); }
+ Err(errors) => { report_fulfillment_errors(fcx.infcx(), &errors); }
}
}
.select_new_obligations(fcx.infcx(), &fcx.inh.param_env, fcx)
{
Ok(()) => { }
- Err(errors) => { report_fulfillment_errors(fcx, &errors); }
- }
-}
-
-fn note_obligation_cause<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- obligation: &PredicateObligation<'tcx>) {
- let tcx = fcx.tcx();
- match obligation.cause.code {
- traits::MiscObligation => { }
- traits::ItemObligation(item_def_id) => {
- let item_name = ty::item_path_str(tcx, item_def_id);
- tcx.sess.span_note(
- obligation.cause.span,
- format!(
- "required by `{}`",
- item_name).as_slice());
- }
- traits::ObjectCastObligation(object_ty) => {
- tcx.sess.span_note(
- obligation.cause.span,
- format!(
- "required for the cast to the object type `{}`",
- fcx.infcx().ty_to_string(object_ty)).as_slice());
- }
- traits::RepeatVec => {
- tcx.sess.span_note(
- obligation.cause.span,
- "the `Copy` trait is required because the \
- repeated element will be copied");
- }
- traits::VariableType(_) => {
- tcx.sess.span_note(
- obligation.cause.span,
- "all local variables must have a statically known size");
- }
- traits::ReturnType => {
- tcx.sess.span_note(
- obligation.cause.span,
- "the return type of a function must have a \
- statically known size");
- }
- traits::AssignmentLhsSized => {
- tcx.sess.span_note(
- obligation.cause.span,
- "the left-hand-side of an assignment must have a statically known size");
- }
- traits::StructInitializerSized => {
- tcx.sess.span_note(
- obligation.cause.span,
- "structs must have a statically known size to be initialized");
- }
- traits::DropTrait => {
- span_note!(tcx.sess, obligation.cause.span,
- "cannot implement a destructor on a \
- structure or enumeration that does not satisfy Send");
- span_help!(tcx.sess, obligation.cause.span,
- "use \"#[unsafe_destructor]\" on the implementation \
- to force the compiler to allow this");
- }
- traits::ClosureCapture(var_id, closure_span, builtin_bound) => {
- let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap();
- let trait_name = ty::item_path_str(tcx, def_id);
- let name = ty::local_var_name_str(tcx, var_id);
- span_note!(tcx.sess, closure_span,
- "the closure that captures `{}` requires that all captured variables \"
- implement the trait `{}`",
- name,
- trait_name);
- }
- traits::FieldSized => {
- span_note!(tcx.sess, obligation.cause.span,
- "only the last field of a struct or enum variant \
- may have a dynamically sized type")
- }
- traits::ObjectSized => {
- span_note!(tcx.sess, obligation.cause.span,
- "only sized types can be made into objects");
- }
+ Err(errors) => { report_fulfillment_errors(fcx.infcx(), &errors); }
}
}
match self_ty.sty {
ty::ty_struct(def_id, _) |
ty::ty_enum(def_id, _) => {
- check_struct_safe_for_destructor(fcx, item.span, self_ty, def_id);
+ check_struct_safe_for_destructor(fcx, item.span, def_id);
}
_ => {
// Coherence already reports an error in this case.
let poly_trait_ref = ty::Binder(trait_ref);
let predicates = ty::predicates_for_trait_ref(fcx.tcx(), &poly_trait_ref);
for predicate in predicates.into_iter() {
- fcx.register_predicate(traits::Obligation::new(cause, predicate));
+ fcx.register_predicate(traits::Obligation::new(cause.clone(), predicate));
}
});
}
pub fn check_trait_ref(&mut self, trait_ref: &ty::TraitRef<'tcx>) {
let trait_def = ty::lookup_trait_def(self.fcx.tcx(), trait_ref.def_id);
- let bounds = trait_def.generics.to_bounds(self.tcx(), &trait_ref.substs);
+ let bounds = trait_def.generics.to_bounds(self.tcx(), trait_ref.substs);
self.fcx.add_obligations_for_parameters(
traits::ObligationCause::new(
self.span,
}
match t.sty{
- ty::ty_struct(type_id, ref substs) |
- ty::ty_enum(type_id, ref substs) => {
+ ty::ty_struct(type_id, substs) |
+ ty::ty_enum(type_id, substs) => {
let polytype = ty::lookup_item_type(self.fcx.tcx(), type_id);
if self.binding_count == 0 {
self.fold_substs(substs);
}
- ty::ty_bare_fn(_, ty::BareFnTy{sig: ref fn_sig, ..}) |
+ ty::ty_bare_fn(_, &ty::BareFnTy{sig: ref fn_sig, ..}) |
ty::ty_closure(box ty::ClosureTy{sig: ref fn_sig, ..}) => {
self.binding_count += 1;
fn check_struct_safe_for_destructor<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
span: Span,
- self_ty: Ty<'tcx>,
struct_did: ast::DefId) {
let struct_tpt = ty::lookup_item_type(fcx.tcx(), struct_did);
- if !struct_tpt.generics.has_type_params(subst::TypeSpace)
- && !struct_tpt.generics.has_region_params(subst::TypeSpace)
+ if struct_tpt.generics.has_type_params(subst::TypeSpace)
+ || struct_tpt.generics.has_region_params(subst::TypeSpace)
{
- let cause = traits::ObligationCause::new(span, fcx.body_id, traits::DropTrait);
- fcx.register_builtin_bound(self_ty, ty::BoundSend, cause);
- } else {
span_err!(fcx.tcx().sess, span, E0141,
"cannot implement a destructor on a structure \
- with type parameters");
- span_note!(fcx.tcx().sess, span,
- "use \"#[unsafe_destructor]\" on the implementation \
- to force the compiler to allow this");
+ with type parameters");
+ span_note!(fcx.tcx().sess, span,
+ "use \"#[unsafe_destructor]\" on the implementation \
+ to force the compiler to allow this");
}
}
// impl, plus its own.
let new_polytype = ty::Polytype {
generics: new_method_ty.generics.clone(),
- ty: ty::mk_bare_fn(tcx, Some(new_did), new_method_ty.fty.clone())
+ ty: ty::mk_bare_fn(tcx, Some(new_did),
+ tcx.mk_bare_fn(new_method_ty.fty.clone()))
};
debug!("new_polytype={}", new_polytype.repr(tcx));
trait_def: &ty::TraitDef<'tcx>) {
let tcx = ccx.tcx;
if let ast_map::NodeItem(item) = tcx.map.get(trait_id) {
- if let ast::ItemTrait(_, _, _, _, ref trait_items) = item.node {
+ if let ast::ItemTrait(_, _, _, ref trait_items) = item.node {
// For each method, construct a suitable ty::Method and
// store it into the `tcx.impl_or_trait_items` table:
for trait_item in trait_items.iter() {
m.def_id,
Polytype {
generics: m.generics.clone(),
- ty: ty::mk_bare_fn(ccx.tcx, Some(m.def_id), m.fty.clone()) });
+ ty: ty::mk_bare_fn(ccx.tcx, Some(m.def_id), ccx.tcx.mk_bare_fn(m.fty.clone())) });
}
fn ty_method_of_trait_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
untransformed_rcvr_ty,
rcvr_ty_generics,
rcvr_visibility));
- let fty = ty::mk_bare_fn(tcx, Some(m_def_id), mty.fty.clone());
+ let fty = ty::mk_bare_fn(tcx, Some(m_def_id), tcx.mk_bare_fn(mty.fty.clone()));
debug!("method {} (id {}) has type {}",
m.pe_ident().repr(tcx),
m.id,
ast::RegionTyParamBound(..) => { }
}
}
-
- match ty_param.unbound {
- Some(_) => { warn = true; }
- None => { }
- }
}
if warn {
generics: &ty::Generics)
-> bool {
if let ty::ty_param(param_ty) = ty.sty {
- let type_parameter = generics.types.get(param_ty.space, param_ty.idx);
+ let type_parameter = generics.types.get(param_ty.space, param_ty.idx as uint);
for trait_bound in type_parameter.bounds.trait_bounds.iter() {
if trait_bound.def_id() == trait_id {
return true
AllowEqConstraints::DontAllow);
}
},
- ast::ItemTrait(_, _, _, _, ref trait_methods) => {
+ ast::ItemTrait(_, _, _, ref trait_methods) => {
let trait_def = trait_def_of_item(ccx, it);
debug!("trait_def: ident={} trait_def={}",
tcx.struct_fields.borrow_mut().insert(local_def(id), Rc::new(field_tys));
let substs = mk_item_substs(ccx, &pty.generics);
- let selfty = ty::mk_struct(tcx, local_def(id), substs);
+ let selfty = ty::mk_struct(tcx, local_def(id), tcx.mk_substs(substs));
// If this struct is enum-like or tuple-like, create the type of its
// constructor.
return def.clone();
}
- let (unsafety, generics, unbound, bounds, items) = match it.node {
+ let (unsafety, generics, bounds, items) = match it.node {
ast::ItemTrait(unsafety,
ref generics,
- ref unbound,
ref supertraits,
ref items) => {
- (unsafety, generics, unbound, supertraits, items.as_slice())
+ (unsafety, generics, supertraits, items.as_slice())
}
ref s => {
tcx.sess.span_bug(
}
};
- let substs = mk_trait_substs(ccx, it.id, generics, items);
+ let substs = ccx.tcx.mk_substs(mk_trait_substs(ccx, it.id, generics, items));
let ty_generics = ty_generics_for_trait(ccx,
it.id,
- &substs,
+ substs,
generics,
items);
token::SELF_KEYWORD_NAME,
self_param_ty,
bounds.as_slice(),
- unbound,
it.span);
let substs = mk_item_substs(ccx, &ty_generics);
bounds: bounds,
trait_ref: Rc::new(ty::TraitRef {
def_id: def_id,
- substs: substs
+ substs: ccx.tcx.mk_substs(substs)
})
});
tcx.trait_defs.borrow_mut().insert(def_id, trait_def.clone());
.enumerate()
.map(|(i, def)| ty::ReEarlyBound(def.lifetime.id,
subst::TypeSpace,
- i,
+ i as u32,
def.lifetime.name))
.collect();
.iter()
.enumerate()
.map(|(i, def)| ty::mk_param(ccx.tcx, subst::TypeSpace,
- i, local_def(def.id)))
+ i as u32, local_def(def.id)))
.collect();
// ...and also create generics synthesized from the associated types.
};
let pty = Polytype {
generics: ty_generics,
- ty: ty::mk_bare_fn(ccx.tcx, Some(local_def(it.id)), tofd)
+ ty: ty::mk_bare_fn(ccx.tcx, Some(local_def(it.id)), ccx.tcx.mk_bare_fn(tofd))
};
debug!("type of {} (id {}) is {}",
token::get_ident(it.ident),
generics,
DontCreateTypeParametersForAssociatedTypes);
let substs = mk_item_substs(ccx, &ty_generics);
- let t = ty::mk_enum(tcx, local_def(it.id), substs);
+ let t = ty::mk_enum(tcx, local_def(it.id), tcx.mk_substs(substs));
let pty = Polytype {
generics: ty_generics,
ty: t
generics,
DontCreateTypeParametersForAssociatedTypes);
let substs = mk_item_substs(ccx, &ty_generics);
- let t = ty::mk_struct(tcx, local_def(it.id), substs);
+ let t = ty::mk_struct(tcx, local_def(it.id), tcx.mk_substs(substs));
let pty = Polytype {
generics: ty_generics,
ty: t
fn ty_generics_for_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
trait_id: ast::NodeId,
- substs: &subst::Substs<'tcx>,
+ substs: &'tcx subst::Substs<'tcx>,
ast_generics: &ast::Generics,
items: &[ast::TraitItem])
-> ty::Generics<'tcx>
ccx,
subst::AssocSpace,
&associated_type.ty_param,
- generics.types.len(subst::AssocSpace),
+ generics.types.len(subst::AssocSpace) as u32,
Some(local_def(trait_id)));
ccx.tcx.ty_param_defs.borrow_mut().insert(associated_type.ty_param.id,
def.clone());
let self_trait_ref =
Rc::new(ty::Binder(ty::TraitRef { def_id: local_def(trait_id),
- substs: (*substs).clone() }));
+ substs: substs }));
let def = ty::TypeParameterDef {
space: subst::SelfSpace,
create_type_parameters_for_associated_types)
}
-// Add the Sized bound, unless the type parameter is marked as `Sized?`.
+// Add the Sized bound, unless the type parameter is marked as `?Sized`.
fn add_unsized_bound<'tcx,AC>(this: &AC,
- unbound: &Option<ast::TraitRef>,
bounds: &mut ty::BuiltinBounds,
- desc: &str,
+ ast_bounds: &[ast::TyParamBound],
span: Span)
where AC: AstConv<'tcx> {
+ // Try to find an unbound in bounds.
+ let mut unbound = None;
+ for ab in ast_bounds.iter() {
+ if let &ast::TraitTyParamBound(ref ptr, ast::TraitBoundModifier::Maybe) = ab {
+ if unbound.is_none() {
+ assert!(ptr.bound_lifetimes.is_empty());
+ unbound = Some(ptr.trait_ref.clone());
+ } else {
+ this.tcx().sess.span_err(span, "type parameter has more than one relaxed default \
+ bound, only one is supported");
+ }
+ }
+ }
+
let kind_id = this.tcx().lang_items.require(SizedTraitLangItem);
match unbound {
- &Some(ref tpb) => {
+ Some(ref tpb) => {
// FIXME(#8559) currently requires the unbound to be built-in.
let trait_def_id = ty::trait_ref_to_def_id(this.tcx(), tpb);
match kind_id {
Ok(kind_id) if trait_def_id != kind_id => {
this.tcx().sess.span_warn(span,
- format!("default bound relaxed \
- for a {}, but this \
- does nothing because \
- the given bound is not \
- a default. \
- Only `Sized?` is \
- supported",
- desc)[]);
+ "default bound relaxed for a type parameter, but \
+ this does nothing because the given bound is not \
+ a default. Only `?Sized` is supported");
ty::try_add_builtin_trait(this.tcx(),
kind_id,
bounds);
ty::try_add_builtin_trait(this.tcx(), kind_id.unwrap(), bounds);
}
// No lang item for Sized, so we can't add it as a bound.
- &None => {}
+ None => {}
}
}
.collect();
let def = ty::RegionParameterDef { name: l.lifetime.name,
space: space,
- index: i,
+ index: i as u32,
def_id: local_def(l.lifetime.id),
bounds: bounds };
debug!("ty_generics: def for region param: {}", def);
let def = get_or_create_type_parameter_def(&gcx,
space,
param,
- i,
+ i as u32,
None);
debug!("ty_generics: def for type param: {}, {}",
def.repr(this.tcx()),
.get_slice(space)
.iter() {
assert!(result.types.get_slice(space).len() ==
- associated_type_param.index);
+ associated_type_param.index as uint);
debug!("ty_generics: def for associated type: {}, {}",
associated_type_param.repr(this.tcx()),
space);
for bound in bound_pred.bounds.iter() {
match bound {
- &ast::TyParamBound::TraitTyParamBound(ref poly_trait_ref) => {
+ &ast::TyParamBound::TraitTyParamBound(ref poly_trait_ref, _) => {
let trait_ref = astconv::instantiate_poly_trait_ref(
this,
&ExplicitRscope,
for bound in param.bounds.iter() {
// In the above example, `ast_trait_ref` is `Iterator`.
let ast_trait_ref = match *bound {
- ast::TraitTyParamBound(ref r) => r,
+ ast::TraitTyParamBound(ref r, _) => r,
ast::RegionTyParamBound(..) => { continue; }
};
name: associated_type_def.name,
def_id: associated_type_def.def_id,
space: space,
- index: types.len() + index,
+ index: types.len() as u32 + index,
bounds: ty::ParamBounds {
builtin_bounds: associated_type_def.bounds.builtin_bounds,
fn get_or_create_type_parameter_def<'tcx,AC>(this: &AC,
space: subst::ParamSpace,
param: &ast::TyParam,
- index: uint,
+ index: u32,
associated_with: Option<ast::DefId>)
-> ty::TypeParameterDef<'tcx>
where AC: AstConv<'tcx>
param.ident.name,
param_ty,
param.bounds[],
- ¶m.unbound,
param.span);
let default = match param.default {
None => None,
name_of_bounded_thing: ast::Name,
param_ty: ty::ParamTy,
ast_bounds: &[ast::TyParamBound],
- unbound: &Option<ast::TraitRef>,
span: Span)
-> ty::ParamBounds<'tcx>
where AC: AstConv<'tcx> {
param_ty,
ast_bounds);
-
add_unsized_bound(this,
- unbound,
&mut param_bounds.builtin_bounds,
- "type parameter",
+ ast_bounds,
span);
check_bounds_compatible(this.tcx(),
let t_fn = ty::mk_bare_fn(
ccx.tcx,
None,
- ty::BareFnTy {
+ ccx.tcx.mk_bare_fn(ty::BareFnTy {
abi: abi,
unsafety: ast::Unsafety::Unsafe,
sig: ty::Binder(ty::FnSig {inputs: input_tys,
- output: output,
- variadic: decl.variadic}),
- });
+ output: output,
+ variadic: decl.variadic})
+ }));
let pty = Polytype {
generics: ty_generics_for_fn_or_method,
ty: t_fn
}
_ => ()
}
- let se_ty = ty::mk_bare_fn(tcx, Some(local_def(main_id)), ty::BareFnTy {
+ let se_ty = ty::mk_bare_fn(tcx, Some(local_def(main_id)), tcx.mk_bare_fn(ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
output: ty::FnConverging(ty::mk_nil(tcx)),
variadic: false
})
- });
+ }));
require_same_types(tcx, None, false, main_span, main_t, se_ty,
|| {
_ => ()
}
- let se_ty = ty::mk_bare_fn(tcx, Some(local_def(start_id)), ty::BareFnTy {
+ let se_ty = ty::mk_bare_fn(tcx, Some(local_def(start_id)), tcx.mk_bare_fn(ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
output: ty::FnConverging(ty::mk_int()),
variadic: false
}),
- });
+ }));
require_same_types(tcx, None, false, start_span, start_t, se_ty,
|| {
/// A scope in which we generate anonymous, late-bound regions for
/// omitted regions. This occurs in function signatures.
pub struct BindingRscope {
- anon_bindings: Cell<uint>,
+ anon_bindings: Cell<u32>,
}
impl BindingRscope {
match item.node {
ast::ItemEnum(_, ref generics) |
ast::ItemStruct(_, ref generics) |
- ast::ItemTrait(_, ref generics, _, _, _) => {
+ ast::ItemTrait(_, ref generics, _, _) => {
for (i, p) in generics.lifetimes.iter().enumerate() {
let id = p.lifetime.id;
self.add_inferred(item.id, RegionParam, TypeSpace, i, id);
ty::ty_rptr(region, ref mt) => {
let contra = self.contravariant(variance);
- self.add_constraints_from_region(region, contra);
+ self.add_constraints_from_region(*region, contra);
self.add_constraints_from_mt(mt, variance);
}
}
}
- ty::ty_enum(def_id, ref substs) |
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_enum(def_id, substs) |
+ ty::ty_struct(def_id, substs) => {
let item_type = ty::lookup_item_type(self.tcx(), def_id);
let generics = &item_type.generics;
}
}
- ty::ty_bare_fn(_, ty::BareFnTy { ref sig, .. }) |
+ ty::ty_bare_fn(_, &ty::BareFnTy { ref sig, .. }) |
ty::ty_closure(box ty::ClosureTy {
ref sig,
store: ty::UniqTraitStore,
for p in type_param_defs.iter() {
let variance_decl =
self.declared_variance(p.def_id, def_id, TypeParam,
- p.space, p.index);
+ p.space, p.index as uint);
let variance_i = self.xform(variance, variance_decl);
- let substs_ty = *substs.types.get(p.space, p.index);
+ let substs_ty = *substs.types.get(p.space, p.index as uint);
self.add_constraints_from_ty(substs_ty, variance_i);
}
for p in region_param_defs.iter() {
let variance_decl =
self.declared_variance(p.def_id, def_id,
- RegionParam, p.space, p.index);
+ RegionParam, p.space, p.index as uint);
let variance_i = self.xform(variance, variance_decl);
- let substs_r = *substs.regions().get(p.space, p.index);
+ let substs_r = *substs.regions().get(p.space, p.index as uint);
self.add_constraints_from_region(substs_r, variance_i);
}
}
}
});
let trait_def = ty::lookup_trait_def(tcx, did);
- let (bounds, default_unbound) = trait_def.bounds.clean(cx);
+ let bounds = trait_def.bounds.clean(cx);
clean::Trait {
unsafety: def.unsafety,
generics: (&def.generics, subst::TypeSpace).clean(cx),
items: items.collect(),
bounds: bounds,
- default_unbound: default_unbound
}
}
derived: clean::detect_derived(attrs.as_slice()),
trait_: associated_trait.clean(cx).map(|bound| {
match bound {
- clean::TraitBound(ty) => ty,
+ clean::TraitBound(polyt, _) => polyt.trait_,
clean::RegionBound(..) => unreachable!(),
}
}),
use rustc::metadata::csearch;
use rustc::metadata::decoder;
use rustc::middle::def;
-use rustc::middle::subst;
-use rustc::middle::subst::VecPerParamSpace;
+use rustc::middle::subst::{mod, ParamSpace, VecPerParamSpace};
use rustc::middle::ty;
use rustc::middle::stability;
use rustc::session::config;
pub did: ast::DefId,
pub bounds: Vec<TyParamBound>,
pub default: Option<Type>,
- /// An optional default bound on the parameter which is unbound, like `Sized?`
- pub default_unbound: Option<Type>
}
impl Clean<TyParam> for ast::TyParam {
did: ast::DefId { krate: ast::LOCAL_CRATE, node: self.id },
bounds: self.bounds.clean(cx),
default: self.default.clean(cx),
- default_unbound: self.unbound.clean(cx)
}
}
}
fn clean(&self, cx: &DocContext) -> TyParam {
cx.external_typarams.borrow_mut().as_mut().unwrap()
.insert(self.def_id, self.name.clean(cx));
- let (bounds, default_unbound) = self.bounds.clean(cx);
+ let bounds = self.bounds.clean(cx);
TyParam {
name: self.name.clean(cx),
did: self.def_id,
bounds: bounds,
default: self.default.clean(cx),
- default_unbound: default_unbound
}
}
}
#[deriving(Clone, RustcEncodable, RustcDecodable, PartialEq)]
pub enum TyParamBound {
RegionBound(Lifetime),
- TraitBound(Type)
+ TraitBound(PolyTrait, ast::TraitBoundModifier)
}
impl Clean<TyParamBound> for ast::TyParamBound {
fn clean(&self, cx: &DocContext) -> TyParamBound {
match *self {
ast::RegionTyParamBound(lt) => RegionBound(lt.clean(cx)),
- ast::TraitTyParamBound(ref t) => TraitBound(t.clean(cx)),
+ ast::TraitTyParamBound(ref t, modifier) => TraitBound(t.clean(cx), modifier),
}
}
}
}
}
-fn external_path(cx: &DocContext, name: &str, substs: &subst::Substs) -> Path {
+fn external_path_params(cx: &DocContext, trait_did: Option<ast::DefId>,
+ substs: &subst::Substs) -> PathParameters {
+ use rustc::middle::ty::sty;
let lifetimes = substs.regions().get_slice(subst::TypeSpace)
.iter()
.filter_map(|v| v.clean(cx))
.collect();
let types = substs.types.get_slice(subst::TypeSpace).to_vec();
- let types = types.clean(cx);
+
+ match (trait_did, cx.tcx_opt()) {
+ // Attempt to sugar an external path like Fn<(A, B,), C> to Fn(A, B) -> C
+ (Some(did), Some(ref tcx)) if tcx.lang_items.fn_trait_kind(did).is_some() => {
+ assert_eq!(types.len(), 2);
+ let inputs = match types[0].sty {
+ sty::ty_tup(ref tys) => tys.iter().map(|t| t.clean(cx)).collect(),
+ _ => {
+ return PathParameters::AngleBracketed {
+ lifetimes: lifetimes,
+ types: types.clean(cx)
+ }
+ }
+ };
+ let output = match types[1].sty {
+ sty::ty_tup(ref v) if v.is_empty() => None, // -> ()
+ _ => Some(types[1].clean(cx))
+ };
+ PathParameters::Parenthesized {
+ inputs: inputs,
+ output: output
+ }
+ },
+ (_, _) => {
+ PathParameters::AngleBracketed {
+ lifetimes: lifetimes,
+ types: types.clean(cx),
+ }
+ }
+ }
+}
+
+// trait_did should be set to a trait's DefId if called on a TraitRef, in order to sugar
+// from Fn<(A, B,), C> to Fn(A, B) -> C
+fn external_path(cx: &DocContext, name: &str, trait_did: Option<ast::DefId>,
+ substs: &subst::Substs) -> Path {
Path {
global: false,
segments: vec![PathSegment {
name: name.to_string(),
- lifetimes: lifetimes,
- types: types,
+ params: external_path_params(cx, trait_did, substs)
}],
}
}
let (did, path) = match *self {
ty::BoundSend =>
(tcx.lang_items.send_trait().unwrap(),
- external_path(cx, "Send", &empty)),
+ external_path(cx, "Send", None, &empty)),
ty::BoundSized =>
(tcx.lang_items.sized_trait().unwrap(),
- external_path(cx, "Sized", &empty)),
+ external_path(cx, "Sized", None, &empty)),
ty::BoundCopy =>
(tcx.lang_items.copy_trait().unwrap(),
- external_path(cx, "Copy", &empty)),
+ external_path(cx, "Copy", None, &empty)),
ty::BoundSync =>
(tcx.lang_items.sync_trait().unwrap(),
- external_path(cx, "Sync", &empty)),
+ external_path(cx, "Sync", None, &empty)),
};
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did,
(fqn, TypeTrait));
- TraitBound(ResolvedPath {
- path: path,
- typarams: None,
- did: did,
- })
+ TraitBound(PolyTrait {
+ trait_: ResolvedPath {
+ path: path,
+ typarams: None,
+ did: did,
+ },
+ lifetimes: vec![]
+ }, ast::TraitBoundModifier::None)
}
}
let fqn = fqn.into_iter().map(|i| i.to_string())
.collect::<Vec<String>>();
let path = external_path(cx, fqn.last().unwrap().as_slice(),
- &self.substs);
+ Some(self.def_id), self.substs);
cx.external_paths.borrow_mut().as_mut().unwrap().insert(self.def_id,
(fqn, TypeTrait));
- TraitBound(ResolvedPath {
- path: path,
- typarams: None,
- did: self.def_id,
- })
+
+ debug!("ty::TraitRef\n substs.types(TypeSpace): {}\n",
+ self.substs.types.get_slice(ParamSpace::TypeSpace));
+
+ // collect any late bound regions
+ let mut late_bounds = vec![];
+ for &ty_s in self.substs.types.get_slice(ParamSpace::TypeSpace).iter() {
+ use rustc::middle::ty::{Region, sty};
+ if let sty::ty_tup(ref ts) = ty_s.sty {
+ for &ty_s in ts.iter() {
+ if let sty::ty_rptr(ref reg, _) = ty_s.sty {
+ if let &Region::ReLateBound(_, _) = *reg {
+ debug!(" hit an ReLateBound {}", reg);
+ if let Some(lt) = reg.clean(cx) {
+ late_bounds.push(lt)
+ }
+ }
+ }
+ }
+ }
+ }
+
+ TraitBound(PolyTrait {
+ trait_: ResolvedPath { path: path, typarams: None, did: self.def_id, },
+ lifetimes: late_bounds
+ }, ast::TraitBoundModifier::None)
}
}
-// Returns (bounds, default_unbound)
-impl<'tcx> Clean<(Vec<TyParamBound>, Option<Type>)> for ty::ParamBounds<'tcx> {
- fn clean(&self, cx: &DocContext) -> (Vec<TyParamBound>, Option<Type>) {
+impl<'tcx> Clean<Vec<TyParamBound>> for ty::ParamBounds<'tcx> {
+ fn clean(&self, cx: &DocContext) -> Vec<TyParamBound> {
let mut v = Vec::new();
- let mut has_sized_bound = false;
- for b in self.builtin_bounds.iter() {
- if b != ty::BoundSized {
- v.push(b.clean(cx));
- } else {
- has_sized_bound = true;
- }
- }
for t in self.trait_bounds.iter() {
v.push(t.clean(cx));
}
for r in self.region_bounds.iter().filter_map(|r| r.clean(cx)) {
v.push(RegionBound(r));
}
- if has_sized_bound {
- (v, None)
- } else {
- let ty = match ty::BoundSized.clean(cx) {
- TraitBound(ty) => ty,
- _ => unreachable!()
- };
- (v, Some(ty))
- }
+ v
}
}
fn clean(&self, cx: &DocContext) -> Option<Vec<TyParamBound>> {
let mut v = Vec::new();
v.extend(self.regions().iter().filter_map(|r| r.clean(cx)).map(RegionBound));
- v.extend(self.types.iter().map(|t| TraitBound(t.clean(cx))));
+ v.extend(self.types.iter().map(|t| TraitBound(PolyTrait {
+ trait_: t.clean(cx),
+ lifetimes: vec![]
+ }, ast::TraitBoundModifier::None)));
if v.len() > 0 {Some(v)} else {None}
}
}
}
#[deriving(Clone, RustcEncodable, RustcDecodable, PartialEq)]
-pub struct WherePredicate {
- pub ty: Type,
- pub bounds: Vec<TyParamBound>
+pub enum WherePredicate {
+ BoundPredicate { ty: Type, bounds: Vec<TyParamBound> },
+ RegionPredicate { lifetime: Lifetime, bounds: Vec<Lifetime>},
+ // FIXME (#20041)
+ EqPredicate
}
impl Clean<WherePredicate> for ast::WherePredicate {
fn clean(&self, cx: &DocContext) -> WherePredicate {
match *self {
ast::WherePredicate::BoundPredicate(ref wbp) => {
- WherePredicate {
+ WherePredicate::BoundPredicate {
ty: wbp.bounded_ty.clean(cx),
bounds: wbp.bounds.clean(cx)
}
}
- // FIXME(#20048)
- _ => {
- unimplemented!();
+
+ ast::WherePredicate::RegionPredicate(ref wrp) => {
+ WherePredicate::RegionPredicate {
+ lifetime: wrp.lifetime.clean(cx),
+ bounds: wrp.bounds.clean(cx)
+ }
+ }
+
+ ast::WherePredicate::EqPredicate(_) => {
+ WherePredicate::EqPredicate
}
}
}
pub items: Vec<TraitMethod>,
pub generics: Generics,
pub bounds: Vec<TyParamBound>,
- /// An optional default bound not required for `Self`, like `Sized?`
- pub default_unbound: Option<Type>
}
impl Clean<Item> for doctree::Trait {
items: self.items.clean(cx),
generics: self.generics.clean(cx),
bounds: self.bounds.clean(cx),
- default_unbound: self.default_unbound.clean(cx)
}),
}
}
}
}
-impl Clean<Type> for ast::PolyTraitRef {
- fn clean(&self, cx: &DocContext) -> Type {
- self.trait_ref.clean(cx)
+impl Clean<PolyTrait> for ast::PolyTraitRef {
+ fn clean(&self, cx: &DocContext) -> PolyTrait {
+ PolyTrait {
+ trait_: self.trait_ref.clean(cx),
+ lifetimes: self.bound_lifetimes.clean(cx)
+ }
}
}
}
}
+/// A trait reference, which may have higher ranked lifetimes.
+#[deriving(Clone, RustcEncodable, RustcDecodable, PartialEq)]
+pub struct PolyTrait {
+ pub trait_: Type,
+ pub lifetimes: Vec<Lifetime>
+}
+
/// A representation of a Type suitable for hyperlinking purposes. Ideally one can get the original
/// type out of the AST/ty::ctxt given one of these, if more information is needed. Most importantly
/// it does not preserve mutability or boxes.
ty::RegionTraitStore(..) => Closure(decl),
}
}
- ty::ty_struct(did, ref substs) |
- ty::ty_enum(did, ref substs) |
+ ty::ty_struct(did, substs) |
+ ty::ty_enum(did, substs) |
ty::ty_trait(box ty::TyTrait {
- principal: ty::Binder(ty::TraitRef { def_id: did, ref substs }),
+ principal: ty::Binder(ty::TraitRef { def_id: did, substs }),
.. }) =>
{
let fqn = csearch::get_item_path(cx.tcx(), did);
_ => TypeEnum,
};
let path = external_path(cx, fqn.last().unwrap().to_string().as_slice(),
- substs);
+ None, substs);
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
ResolvedPath {
path: path,
}
#[deriving(Clone, RustcEncodable, RustcDecodable, PartialEq)]
-pub struct PathSegment {
- pub name: String,
- pub lifetimes: Vec<Lifetime>,
- pub types: Vec<Type>,
+pub enum PathParameters {
+ AngleBracketed {
+ lifetimes: Vec<Lifetime>,
+ types: Vec<Type>,
+ },
+ Parenthesized {
+ inputs: Vec<Type>,
+ output: Option<Type>
+ }
}
-impl Clean<PathSegment> for ast::PathSegment {
- fn clean(&self, cx: &DocContext) -> PathSegment {
- let (lifetimes, types) = match self.parameters {
+impl Clean<PathParameters> for ast::PathParameters {
+ fn clean(&self, cx: &DocContext) -> PathParameters {
+ match *self {
ast::AngleBracketedParameters(ref data) => {
- (data.lifetimes.clean(cx), data.types.clean(cx))
+ PathParameters::AngleBracketed {
+ lifetimes: data.lifetimes.clean(cx),
+ types: data.types.clean(cx)
+ }
}
ast::ParenthesizedParameters(ref data) => {
- // FIXME -- rustdoc should be taught about Foo() notation
- let inputs = Tuple(data.inputs.clean(cx));
- let output = data.output.as_ref().map(|t| t.clean(cx)).unwrap_or(Tuple(Vec::new()));
- (Vec::new(), vec![inputs, output])
+ PathParameters::Parenthesized {
+ inputs: data.inputs.clean(cx),
+ output: data.output.clean(cx)
+ }
}
- };
+ }
+ }
+}
+#[deriving(Clone, RustcEncodable, RustcDecodable, PartialEq)]
+pub struct PathSegment {
+ pub name: String,
+ pub params: PathParameters
+}
+
+impl Clean<PathSegment> for ast::PathSegment {
+ fn clean(&self, cx: &DocContext) -> PathSegment {
PathSegment {
name: self.identifier.clean(cx),
- lifetimes: lifetimes,
- types: types,
+ params: self.parameters.clean(cx)
}
}
}
},
bounds: vec![],
default: None,
- default_unbound: None
}),
visibility: None,
def_id: self.def_id,
global: false,
segments: vec![PathSegment {
name: name.to_string(),
- lifetimes: vec![],
- types: vec![t.clean(cx)],
+ params: PathParameters::AngleBracketed {
+ lifetimes: vec![],
+ types: vec![t.clean(cx)],
+ }
}],
},
}
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
-use arena::TypedArena;
use visit_ast::RustdocVisitor;
use clean;
let mut forest = ast_map::Forest::new(krate);
let ast_map = driver::assign_node_ids_and_map(&sess, &mut forest);
- let type_arena = TypedArena::new();
+ let arenas = ty::CtxtArenas::new();
let ty::CrateAnalysis {
exported_items, public_items, ty_cx, ..
- } = driver::phase_3_run_analysis_passes(sess, ast_map, &type_arena, name);
+ } = driver::phase_3_run_analysis_passes(sess, ast_map, &arenas, name);
let ctxt = DocContext {
krate: ty_cx.map.krate(),
pub whence: Span,
pub vis: ast::Visibility,
pub stab: Option<attr::Stability>,
- pub default_unbound: Option<ast::TraitRef> // FIXME(tomjakubowski)
}
pub struct Impl {
if i > 0 {
try!(f.write(", ".as_bytes()))
}
- if let Some(ref unbound) = tp.default_unbound {
- try!(write!(f, "{}? ", unbound));
- };
try!(f.write(tp.name.as_bytes()));
if tp.bounds.len() > 0 {
if i > 0 {
try!(f.write(", ".as_bytes()));
}
- let bounds = pred.bounds.as_slice();
- try!(write!(f, "{}: {}", pred.ty, TyParamBounds(bounds)));
+ match pred {
+ &clean::WherePredicate::BoundPredicate { ref ty, ref bounds } => {
+ let bounds = bounds.as_slice();
+ try!(write!(f, "{}: {}", ty, TyParamBounds(bounds)));
+ }
+ &clean::WherePredicate::RegionPredicate { ref lifetime,
+ ref bounds } => {
+ try!(write!(f, "{}: ", lifetime));
+ for (i, lifetime) in bounds.iter().enumerate() {
+ if i > 0 {
+ try!(f.write(" + ".as_bytes()));
+ }
+
+ try!(write!(f, "{}", lifetime));
+ }
+ }
+ &clean::WherePredicate::EqPredicate => {
+ unimplemented!()
+ }
+ }
}
Ok(())
}
}
}
+impl fmt::Show for clean::PolyTrait {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ if self.lifetimes.len() > 0 {
+ try!(f.write("for<".as_bytes()));
+ for (i, lt) in self.lifetimes.iter().enumerate() {
+ if i > 0 {
+ try!(f.write(", ".as_bytes()));
+ }
+ try!(write!(f, "{}", lt));
+ }
+ try!(f.write("> ".as_bytes()));
+ }
+ write!(f, "{}", self.trait_)
+ }
+}
+
impl fmt::Show for clean::TyParamBound {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
clean::RegionBound(ref lt) => {
write!(f, "{}", *lt)
}
- clean::TraitBound(ref ty) => {
- write!(f, "{}", *ty)
+ clean::TraitBound(ref ty, modifier) => {
+ let modifier_str = match modifier {
+ ast::TraitBoundModifier::None => "",
+ ast::TraitBoundModifier::Maybe => "?",
+ };
+ write!(f, "{}{}", modifier_str, *ty)
}
}
}
}
-impl fmt::Show for clean::Path {
+impl fmt::Show for clean::PathParameters {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- if self.global {
- try!(f.write("::".as_bytes()))
- }
-
- for (i, seg) in self.segments.iter().enumerate() {
- if i > 0 {
- try!(f.write("::".as_bytes()))
- }
- try!(f.write(seg.name.as_bytes()));
-
- if seg.lifetimes.len() > 0 || seg.types.len() > 0 {
- try!(f.write("<".as_bytes()));
- let mut comma = false;
- for lifetime in seg.lifetimes.iter() {
- if comma {
- try!(f.write(", ".as_bytes()));
+ match *self {
+ clean::PathParameters::AngleBracketed { ref lifetimes, ref types } => {
+ if lifetimes.len() > 0 || types.len() > 0 {
+ try!(f.write("<".as_bytes()));
+ let mut comma = false;
+ for lifetime in lifetimes.iter() {
+ if comma {
+ try!(f.write(", ".as_bytes()));
+ }
+ comma = true;
+ try!(write!(f, "{}", *lifetime));
}
- comma = true;
- try!(write!(f, "{}", *lifetime));
+ for ty in types.iter() {
+ if comma {
+ try!(f.write(", ".as_bytes()));
+ }
+ comma = true;
+ try!(write!(f, "{}", *ty));
+ }
+ try!(f.write(">".as_bytes()));
}
- for ty in seg.types.iter() {
+ }
+ clean::PathParameters::Parenthesized { ref inputs, ref output } => {
+ try!(f.write("(".as_bytes()));
+ let mut comma = false;
+ for ty in inputs.iter() {
if comma {
try!(f.write(", ".as_bytes()));
}
comma = true;
try!(write!(f, "{}", *ty));
}
- try!(f.write(">".as_bytes()));
+ try!(f.write(")".as_bytes()));
+ if let Some(ref ty) = *output {
+ try!(f.write(" -> ".as_bytes()));
+ try!(write!(f, "{}", ty));
+ }
+ }
+ }
+ Ok(())
+ }
+}
+
+impl fmt::Show for clean::PathSegment {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ try!(f.write(self.name.as_bytes()));
+ write!(f, "{}", self.params)
+ }
+}
+
+impl fmt::Show for clean::Path {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ if self.global {
+ try!(f.write("::".as_bytes()))
+ }
+
+ for (i, seg) in self.segments.iter().enumerate() {
+ if i > 0 {
+ try!(f.write("::".as_bytes()))
}
+ try!(write!(f, "{}", seg));
}
Ok(())
}
G: FnOnce(&render::Cache) -> Option<(Vec<String>, ItemType)>,
{
// The generics will get written to both the title and link
- let mut generics = String::new();
let last = path.segments.last().unwrap();
- if last.lifetimes.len() > 0 || last.types.len() > 0 {
- let mut counter = 0u;
- generics.push_str("<");
- for lifetime in last.lifetimes.iter() {
- if counter > 0 { generics.push_str(", "); }
- counter += 1;
- generics.push_str(format!("{}", *lifetime).as_slice());
- }
- for ty in last.types.iter() {
- if counter > 0 { generics.push_str(", "); }
- counter += 1;
- generics.push_str(format!("{}", *ty).as_slice());
- }
- generics.push_str(">");
- }
+ let generics = format!("{}", last.params);
let loc = CURRENT_LOCATION_KEY.with(|l| l.borrow().clone());
let cache = cache();
try!(resolved_path(f, did, path, false));
tybounds(f, typarams)
}
- clean::PolyTraitRef(ref bounds) => {
- for (i, bound) in bounds.iter().enumerate() {
- if i != 0 {
- try!(write!(f, " + "));
- }
- try!(write!(f, "{}", *bound));
- }
- Ok(())
- }
clean::Infer => write!(f, "_"),
clean::Self(..) => f.write("Self".as_bytes()),
clean::Primitive(prim) => primitive_link(f, prim, prim.to_string()),
lifetimes = if decl.lifetimes.len() == 0 {
"".to_string()
} else {
- format!("<{:#}>", decl.lifetimes)
+ format!("for <{:#}>", decl.lifetimes)
},
args = decl.decl.inputs,
arrow = decl.decl.output,
for bound in decl.bounds.iter() {
match *bound {
clean::RegionBound(..) => {}
- clean::TraitBound(ref t) => {
+ clean::TraitBound(ref t, modifier) => {
if ret.len() == 0 {
ret.push_str(": ");
} else {
ret.push_str(" + ");
}
+ if modifier == ast::TraitBoundModifier::Maybe {
+ ret.push_str("?");
+ }
ret.push_str(format!("{}",
*t).as_slice());
}
lifetimes = if decl.lifetimes.len() == 0 {
"".to_string()
} else {
- format!("<{:#}>", decl.lifetimes)
+ format!("for <{:#}>", decl.lifetimes)
},
args = decl.decl.inputs,
bounds = if decl.bounds.len() == 0 {
}
}
}
+ clean::PolyTraitRef(ref bounds) => {
+ for (i, bound) in bounds.iter().enumerate() {
+ if i != 0 {
+ try!(write!(f, " + "));
+ }
+ try!(write!(f, "{}", *bound));
+ }
+ Ok(())
+ }
clean::QPath { ref name, ref self_type, ref trait_ } => {
write!(f, "<{} as {}>::{}", self_type, trait_, name)
}
global: false,
segments: vec!(clean::PathSegment {
name: self.name.clone(),
- lifetimes: Vec::new(),
- types: Vec::new(),
+ params: clean::PathParameters::AngleBracketed {
+ lifetimes: Vec::new(),
+ types: Vec::new(),
+ }
})
};
resolved_path(f, did, &path, false)
};
// Transform the contents of the header into a hyphenated string
- let id = s.words().map(|s| s.to_ascii_lower())
+ let id = s.words().map(|s| s.to_ascii_lowercase())
.collect::<Vec<String>>().connect("-");
// This is a terrible hack working around how hoedown gives us rendered
should_fail: false,
no_run: false,
ignore: false,
- rust: false,
+ rust: true, // NB This used to be `notrust = false`
test_harness: false,
}
}
}
}
- data.rust |= !seen_other_tags || seen_rust_tags;
+ data.rust &= !seen_other_tags || seen_rust_tags;
data
}
})
}
- t("", false,false,false,true,false);
- t("rust", false,false,false,true,false);
- t("sh", false,false,false,false,false);
- t("ignore", false,false,true,true,false);
- t("should_fail", true,false,false,true,false);
- t("no_run", false,true,false,true,false);
- t("test_harness", false,false,false,true,true);
- t("{.no_run .example}", false,true,false,true,false);
- t("{.sh .should_fail}", true,false,false,true,false);
- t("{.example .rust}", false,false,false,true,false);
- t("{.test_harness .rust}", false,false,false,true,true);
+ // marker | should_fail | no_run | ignore | rust | test_harness
+ t("", false, false, false, true, false);
+ t("rust", false, false, false, true, false);
+ t("sh", false, false, false, false, false);
+ t("ignore", false, false, true, true, false);
+ t("should_fail", true, false, false, true, false);
+ t("no_run", false, true, false, true, false);
+ t("test_harness", false, false, false, true, true);
+ t("{.no_run .example}", false, true, false, true, false);
+ t("{.sh .should_fail}", true, false, false, true, false);
+ t("{.example .rust}", false, false, false, true, false);
+ t("{.test_harness .rust}", false, false, false, true, true);
}
#[test]
// Add all the static files. These may already exist, but we just
// overwrite them anyway to make sure that they're fresh and up-to-date.
try!(write(cx.dst.join("jquery.js"),
- include_bin!("static/jquery-2.1.0.min.js")));
- try!(write(cx.dst.join("main.js"), include_bin!("static/main.js")));
- try!(write(cx.dst.join("playpen.js"), include_bin!("static/playpen.js")));
- try!(write(cx.dst.join("main.css"), include_bin!("static/main.css")));
+ include_bytes!("static/jquery-2.1.0.min.js")));
+ try!(write(cx.dst.join("main.js"), include_bytes!("static/main.js")));
+ try!(write(cx.dst.join("playpen.js"), include_bytes!("static/playpen.js")));
+ try!(write(cx.dst.join("main.css"), include_bytes!("static/main.css")));
try!(write(cx.dst.join("normalize.css"),
- include_bin!("static/normalize.css")));
+ include_bytes!("static/normalize.css")));
try!(write(cx.dst.join("FiraSans-Regular.woff"),
- include_bin!("static/FiraSans-Regular.woff")));
+ include_bytes!("static/FiraSans-Regular.woff")));
try!(write(cx.dst.join("FiraSans-Medium.woff"),
- include_bin!("static/FiraSans-Medium.woff")));
+ include_bytes!("static/FiraSans-Medium.woff")));
try!(write(cx.dst.join("Heuristica-Italic.woff"),
- include_bin!("static/Heuristica-Italic.woff")));
+ include_bytes!("static/Heuristica-Italic.woff")));
try!(write(cx.dst.join("SourceSerifPro-Regular.woff"),
- include_bin!("static/SourceSerifPro-Regular.woff")));
+ include_bytes!("static/SourceSerifPro-Regular.woff")));
try!(write(cx.dst.join("SourceSerifPro-Bold.woff"),
- include_bin!("static/SourceSerifPro-Bold.woff")));
+ include_bytes!("static/SourceSerifPro-Bold.woff")));
try!(write(cx.dst.join("SourceCodePro-Regular.woff"),
- include_bin!("static/SourceCodePro-Regular.woff")));
+ include_bytes!("static/SourceCodePro-Regular.woff")));
try!(write(cx.dst.join("SourceCodePro-Semibold.woff"),
- include_bin!("static/SourceCodePro-Semibold.woff")));
+ include_bytes!("static/SourceCodePro-Semibold.woff")));
fn collect(path: &Path, krate: &str,
key: &str) -> io::IoResult<Vec<String>> {
fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
t: &clean::Trait) -> fmt::Result {
let mut bounds = String::new();
- if let Some(ref ty) = t.default_unbound {
- bounds.push_str(format!(" for {}?", ty).as_slice());
- }
if t.bounds.len() > 0 {
if bounds.len() > 0 {
bounds.push(' ');
};
om.constants.push(s);
},
- ast::ItemTrait(unsafety, ref gen, ref def_ub, ref b, ref items) => {
+ ast::ItemTrait(unsafety, ref gen, ref b, ref items) => {
let t = Trait {
unsafety: unsafety,
name: name,
whence: item.span,
vis: item.vis,
stab: self.stability(item.id),
- default_unbound: def_ub.clone()
};
om.traits.push(t);
},
//! * `String`: equivalent to rust's `String`
//! * `Array`: equivalent to rust's `Vec<T>`, but also allowing objects of different types in the
//! same array
-//! * `Object`: equivalent to rust's `Treemap<String, json::Json>`
+//! * `Object`: equivalent to rust's `BTreeMap<String, json::Json>`
//! * `Null`
//!
//! An object is a series of string keys mapping to values, in `"key": value` format.
#![allow(deprecated)]
use core::kinds::Sized;
-use fmt;
use iter::IteratorExt;
-use mem;
use ops::FnMut;
-use option::Option;
-use option::Option::{Some, None};
-use slice::{SliceExt, AsSlice};
-use str::{Str, StrExt};
-use string::{String, IntoString};
+use slice::SliceExt;
+use str::StrExt;
+use string::String;
use vec::Vec;
-/// Datatype to hold one ascii character. It wraps a `u8`, with the highest bit always zero.
-#[deriving(Clone, Copy, PartialEq, PartialOrd, Ord, Eq, Hash)]
-pub struct Ascii { chr: u8 }
-
-impl Ascii {
- /// Converts an ascii character into a `u8`.
- #[inline]
- #[unstable = "recently renamed"]
- pub fn as_byte(&self) -> u8 {
- self.chr
- }
-
- /// Deprecated: use `as_byte` instead.
- #[deprecated = "use as_byte"]
- pub fn to_byte(self) -> u8 {
- self.as_byte()
- }
-
- /// Converts an ascii character into a `char`.
- #[inline]
- #[unstable = "recently renamed"]
- pub fn as_char(&self) -> char {
- self.chr as char
- }
-
- /// Deprecated: use `as_char` instead.
- #[deprecated = "use as_char"]
- pub fn to_char(self) -> char {
- self.as_char()
- }
-
- /// Convert to lowercase.
- #[inline]
- #[stable]
- pub fn to_lowercase(&self) -> Ascii {
- Ascii{chr: ASCII_LOWER_MAP[self.chr as uint]}
- }
-
- /// Convert to uppercase.
- #[inline]
- #[stable]
- pub fn to_uppercase(&self) -> Ascii {
- Ascii{chr: ASCII_UPPER_MAP[self.chr as uint]}
- }
-
- /// Compares two ascii characters of equality, ignoring case.
- #[inline]
- #[deprecated = "normalize with to_lowercase"]
- pub fn eq_ignore_case(self, other: Ascii) -> bool {
- ASCII_LOWER_MAP[self.chr as uint] == ASCII_LOWER_MAP[other.chr as uint]
- }
-
- // the following methods are like ctype, and the implementation is inspired by musl
-
- /// Check if the character is a letter (a-z, A-Z)
- #[inline]
- #[stable]
- pub fn is_alphabetic(&self) -> bool {
- (self.chr >= 0x41 && self.chr <= 0x5A) || (self.chr >= 0x61 && self.chr <= 0x7A)
- }
-
- /// Check if the character is a number (0-9)
- #[inline]
- #[unstable = "may be renamed"]
- pub fn is_digit(&self) -> bool {
- self.chr >= 0x30 && self.chr <= 0x39
- }
-
- /// Check if the character is a letter or number
- #[inline]
- #[stable]
- pub fn is_alphanumeric(&self) -> bool {
- self.is_alphabetic() || self.is_digit()
- }
-
- /// Check if the character is a space or horizontal tab
- #[inline]
- #[experimental = "likely to be removed"]
- pub fn is_blank(&self) -> bool {
- self.chr == b' ' || self.chr == b'\t'
- }
-
- /// Check if the character is a control character
- #[inline]
- #[stable]
- pub fn is_control(&self) -> bool {
- self.chr < 0x20 || self.chr == 0x7F
- }
-
- /// Checks if the character is printable (except space)
- #[inline]
- #[experimental = "unsure about naming, or whether this is needed"]
- pub fn is_graph(&self) -> bool {
- (self.chr - 0x21) < 0x5E
- }
-
- /// Checks if the character is printable (including space)
- #[inline]
- #[unstable = "unsure about naming"]
- pub fn is_print(&self) -> bool {
- (self.chr - 0x20) < 0x5F
- }
-
- /// Checks if the character is alphabetic and lowercase
- #[inline]
- #[stable]
- pub fn is_lowercase(&self) -> bool {
- (self.chr - b'a') < 26
- }
-
- /// Checks if the character is alphabetic and uppercase
- #[inline]
- #[stable]
- pub fn is_uppercase(&self) -> bool {
- (self.chr - b'A') < 26
- }
-
- /// Checks if the character is punctuation
- #[inline]
- #[stable]
- pub fn is_punctuation(&self) -> bool {
- self.is_graph() && !self.is_alphanumeric()
- }
-
- /// Checks if the character is a valid hex digit
- #[inline]
- #[stable]
- pub fn is_hex(&self) -> bool {
- self.is_digit() || ((self.chr | 32u8) - b'a') < 6
- }
-}
+/// Extension methods for ASCII-subset only operations on owned strings
+#[experimental = "would prefer to do this in a more general way"]
+pub trait OwnedAsciiExt {
+ /// Convert the string to ASCII upper case:
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ fn into_ascii_uppercase(self) -> Self;
-impl<'a> fmt::Show for Ascii {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- (self.chr as char).fmt(f)
- }
+ /// Convert the string to ASCII lower case:
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ fn into_ascii_lowercase(self) -> Self;
}
-/// Trait for converting into an ascii type.
-#[experimental = "may be replaced by generic conversion traits"]
-pub trait AsciiCast<T> for Sized? {
- /// Convert to an ascii type, panic on non-ASCII input.
- #[inline]
- fn to_ascii(&self) -> T {
- assert!(self.is_ascii());
- unsafe {self.to_ascii_nocheck()}
- }
-
- /// Convert to an ascii type, return None on non-ASCII input.
- #[inline]
- fn to_ascii_opt(&self) -> Option<T> {
- if self.is_ascii() {
- Some(unsafe { self.to_ascii_nocheck() })
- } else {
- None
- }
- }
-
- /// Convert to an ascii type, not doing any range asserts
- unsafe fn to_ascii_nocheck(&self) -> T;
-
- /// Check if convertible to ascii
+/// Extension methods for ASCII-subset only operations on string slices
+#[experimental = "would prefer to do this in a more general way"]
+pub trait AsciiExt<T = Self> for Sized? {
+ /// Check if within the ASCII range.
fn is_ascii(&self) -> bool;
-}
-#[experimental = "may be replaced by generic conversion traits"]
-impl<'a> AsciiCast<&'a[Ascii]> for [u8] {
- #[inline]
- unsafe fn to_ascii_nocheck(&self) -> &'a[Ascii] {
- mem::transmute(self)
- }
+ /// Makes a copy of the string in ASCII upper case:
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ fn to_ascii_uppercase(&self) -> T;
- #[inline]
- fn is_ascii(&self) -> bool {
- for b in self.iter() {
- if !b.is_ascii() { return false; }
- }
- true
- }
-}
+ /// Makes a copy of the string in ASCII lower case:
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ fn to_ascii_lowercase(&self) -> T;
-#[experimental = "may be replaced by generic conversion traits"]
-impl<'a> AsciiCast<&'a [Ascii]> for str {
- #[inline]
- unsafe fn to_ascii_nocheck(&self) -> &'a [Ascii] {
- mem::transmute(self)
- }
+ /// Check that two strings are an ASCII case-insensitive match.
+ /// Same as `to_ascii_lowercase(a) == to_ascii_lower(b)`,
+ /// but without allocating and copying temporary strings.
+ fn eq_ignore_ascii_case(&self, other: &Self) -> bool;
+}
+#[experimental = "would prefer to do this in a more general way"]
+impl AsciiExt<String> for str {
#[inline]
fn is_ascii(&self) -> bool {
self.bytes().all(|b| b.is_ascii())
}
-}
-#[experimental = "may be replaced by generic conversion traits"]
-impl AsciiCast<Ascii> for u8 {
#[inline]
- unsafe fn to_ascii_nocheck(&self) -> Ascii {
- Ascii{ chr: *self }
+ fn to_ascii_uppercase(&self) -> String {
+ // Vec<u8>::to_ascii_uppercase() preserves the UTF-8 invariant.
+ unsafe { String::from_utf8_unchecked(self.as_bytes().to_ascii_uppercase()) }
}
#[inline]
- fn is_ascii(&self) -> bool {
- *self & 128 == 0u8
- }
-}
-
-#[experimental = "may be replaced by generic conversion traits"]
-impl AsciiCast<Ascii> for char {
- #[inline]
- unsafe fn to_ascii_nocheck(&self) -> Ascii {
- Ascii{ chr: *self as u8 }
+ fn to_ascii_lowercase(&self) -> String {
+ // Vec<u8>::to_ascii_lowercase() preserves the UTF-8 invariant.
+ unsafe { String::from_utf8_unchecked(self.as_bytes().to_ascii_lowercase()) }
}
#[inline]
- fn is_ascii(&self) -> bool {
- *self as u32 - ('\x7F' as u32 & *self as u32) == 0
+ fn eq_ignore_ascii_case(&self, other: &str) -> bool {
+ self.as_bytes().eq_ignore_ascii_case(other.as_bytes())
}
}
-/// Trait for copyless casting to an ascii vector.
-#[experimental = "may be replaced by generic conversion traits"]
-pub trait OwnedAsciiCast {
- /// Check if convertible to ascii
- fn is_ascii(&self) -> bool;
-
- /// Take ownership and cast to an ascii vector.
- /// # Panics
- ///
- /// Panic on non-ASCII input.
+#[experimental = "would prefer to do this in a more general way"]
+impl OwnedAsciiExt for String {
#[inline]
- fn into_ascii(self) -> Vec<Ascii> {
- assert!(self.is_ascii());
- unsafe {self.into_ascii_nocheck()}
+ fn into_ascii_uppercase(self) -> String {
+ // Vec<u8>::into_ascii_uppercase() preserves the UTF-8 invariant.
+ unsafe { String::from_utf8_unchecked(self.into_bytes().into_ascii_uppercase()) }
}
- /// Take ownership and cast to an ascii vector. Return None on non-ASCII input.
#[inline]
- fn into_ascii_opt(self) -> Option<Vec<Ascii>> {
- if self.is_ascii() {
- Some(unsafe { self.into_ascii_nocheck() })
- } else {
- None
- }
+ fn into_ascii_lowercase(self) -> String {
+ // Vec<u8>::into_ascii_lowercase() preserves the UTF-8 invariant.
+ unsafe { String::from_utf8_unchecked(self.into_bytes().into_ascii_lowercase()) }
}
-
- /// Take ownership and cast to an ascii vector.
- /// Does not perform validation checks.
- unsafe fn into_ascii_nocheck(self) -> Vec<Ascii>;
}
-#[experimental = "may be replaced by generic conversion traits"]
-impl OwnedAsciiCast for String {
+#[experimental = "would prefer to do this in a more general way"]
+impl AsciiExt<Vec<u8>> for [u8] {
#[inline]
fn is_ascii(&self) -> bool {
- self.as_slice().is_ascii()
+ self.iter().all(|b| b.is_ascii())
}
#[inline]
- unsafe fn into_ascii_nocheck(self) -> Vec<Ascii> {
- self.into_bytes().into_ascii_nocheck()
+ fn to_ascii_uppercase(&self) -> Vec<u8> {
+ self.iter().map(|b| b.to_ascii_uppercase()).collect()
}
-}
-#[experimental = "may be replaced by generic conversion traits"]
-impl OwnedAsciiCast for Vec<u8> {
#[inline]
- fn is_ascii(&self) -> bool {
- self.as_slice().is_ascii()
+ fn to_ascii_lowercase(&self) -> Vec<u8> {
+ self.iter().map(|b| b.to_ascii_lowercase()).collect()
}
#[inline]
- unsafe fn into_ascii_nocheck(self) -> Vec<Ascii> {
- let v = Vec::from_raw_parts(self.as_ptr() as *mut Ascii,
- self.len(),
- self.capacity());
-
- // We forget `self` to avoid freeing it at the end of the scope
- // Otherwise, the returned `Vec` would point to freed memory
- mem::forget(self);
- v
+ fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
+ self.len() == other.len() &&
+ self.iter().zip(other.iter()).all(|(a, b)| {
+ a.eq_ignore_ascii_case(b)
+ })
}
}
-/// Trait for converting an ascii type to a string. Needed to convert
-/// `&[Ascii]` to `&str`.
-#[experimental = "may be replaced by generic conversion traits"]
-pub trait AsciiStr for Sized? {
- /// Convert to a string.
- fn as_str_ascii<'a>(&'a self) -> &'a str;
-
- /// Deprecated: use `to_lowercase`
- #[deprecated="renamed `to_lowercase`"]
- fn to_lower(&self) -> Vec<Ascii>;
-
- /// Convert to vector representing a lower cased ascii string.
- #[deprecated = "use iterators instead"]
- fn to_lowercase(&self) -> Vec<Ascii>;
-
- /// Deprecated: use `to_uppercase`
- #[deprecated="renamed `to_uppercase`"]
- fn to_upper(&self) -> Vec<Ascii>;
-
- /// Convert to vector representing a upper cased ascii string.
- #[deprecated = "use iterators instead"]
- fn to_uppercase(&self) -> Vec<Ascii>;
-
- /// Compares two Ascii strings ignoring case.
- #[deprecated = "use iterators instead"]
- fn eq_ignore_case(&self, other: &[Ascii]) -> bool;
-}
-
-#[experimental = "may be replaced by generic conversion traits"]
-impl AsciiStr for [Ascii] {
- #[inline]
- fn as_str_ascii<'a>(&'a self) -> &'a str {
- unsafe { mem::transmute(self) }
- }
-
- #[inline]
- fn to_lower(&self) -> Vec<Ascii> {
- self.to_lowercase()
- }
-
- #[inline]
- fn to_lowercase(&self) -> Vec<Ascii> {
- self.iter().map(|a| a.to_lowercase()).collect()
- }
-
- #[inline]
- fn to_upper(&self) -> Vec<Ascii> {
- self.to_uppercase()
- }
-
- #[inline]
- fn to_uppercase(&self) -> Vec<Ascii> {
- self.iter().map(|a| a.to_uppercase()).collect()
- }
-
+#[experimental = "would prefer to do this in a more general way"]
+impl OwnedAsciiExt for Vec<u8> {
#[inline]
- fn eq_ignore_case(&self, other: &[Ascii]) -> bool {
- self.iter().zip(other.iter()).all(|(&a, &b)| a.eq_ignore_case(b))
+ fn into_ascii_uppercase(mut self) -> Vec<u8> {
+ for byte in self.iter_mut() {
+ *byte = byte.to_ascii_uppercase();
+ }
+ self
}
-}
-impl IntoString for Vec<Ascii> {
#[inline]
- fn into_string(self) -> String {
- unsafe { String::from_utf8_unchecked(self.into_bytes()) }
- }
-}
-
-/// Trait to convert to an owned byte vector by consuming self
-#[experimental = "may be replaced by generic conversion traits"]
-pub trait IntoBytes {
- /// Converts to an owned byte vector by consuming self
- fn into_bytes(self) -> Vec<u8>;
-}
-
-#[experimental = "may be replaced by generic conversion traits"]
-impl IntoBytes for Vec<Ascii> {
- fn into_bytes(self) -> Vec<u8> {
- unsafe {
- let v = Vec::from_raw_parts(self.as_ptr() as *mut u8,
- self.len(),
- self.capacity());
-
- // We forget `self` to avoid freeing it at the end of the scope
- // Otherwise, the returned `Vec` would point to freed memory
- mem::forget(self);
- v
+ fn into_ascii_lowercase(mut self) -> Vec<u8> {
+ for byte in self.iter_mut() {
+ *byte = byte.to_ascii_lowercase();
}
+ self
}
}
-
-/// Extension methods for ASCII-subset only operations on owned strings
-#[experimental = "would prefer to do this in a more general way"]
-pub trait OwnedAsciiExt {
- /// Convert the string to ASCII upper case:
- /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
- /// but non-ASCII letters are unchanged.
- fn into_ascii_upper(self) -> Self;
-
- /// Convert the string to ASCII lower case:
- /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
- /// but non-ASCII letters are unchanged.
- fn into_ascii_lower(self) -> Self;
-}
-
-/// Extension methods for ASCII-subset only operations on string slices
#[experimental = "would prefer to do this in a more general way"]
-pub trait AsciiExt<T> for Sized? {
- /// Makes a copy of the string in ASCII upper case:
- /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
- /// but non-ASCII letters are unchanged.
- fn to_ascii_upper(&self) -> T;
-
- /// Makes a copy of the string in ASCII lower case:
- /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
- /// but non-ASCII letters are unchanged.
- fn to_ascii_lower(&self) -> T;
-
- /// Check that two strings are an ASCII case-insensitive match.
- /// Same as `to_ascii_lower(a) == to_ascii_lower(b)`,
- /// but without allocating and copying temporary strings.
- fn eq_ignore_ascii_case(&self, other: &Self) -> bool;
-}
-
-#[experimental = "would prefer to do this in a more general way"]
-impl AsciiExt<String> for str {
- #[inline]
- fn to_ascii_upper(&self) -> String {
- // Vec<u8>::to_ascii_upper() preserves the UTF-8 invariant.
- unsafe { String::from_utf8_unchecked(self.as_bytes().to_ascii_upper()) }
- }
-
+impl AsciiExt for u8 {
#[inline]
- fn to_ascii_lower(&self) -> String {
- // Vec<u8>::to_ascii_lower() preserves the UTF-8 invariant.
- unsafe { String::from_utf8_unchecked(self.as_bytes().to_ascii_lower()) }
+ fn is_ascii(&self) -> bool {
+ *self & 128 == 0u8
}
#[inline]
- fn eq_ignore_ascii_case(&self, other: &str) -> bool {
- self.as_bytes().eq_ignore_ascii_case(other.as_bytes())
+ fn to_ascii_uppercase(&self) -> u8 {
+ ASCII_UPPERCASE_MAP[*self as uint]
}
-}
-#[experimental = "would prefer to do this in a more general way"]
-impl OwnedAsciiExt for String {
#[inline]
- fn into_ascii_upper(self) -> String {
- // Vec<u8>::into_ascii_upper() preserves the UTF-8 invariant.
- unsafe { String::from_utf8_unchecked(self.into_bytes().into_ascii_upper()) }
+ fn to_ascii_lowercase(&self) -> u8 {
+ ASCII_LOWERCASE_MAP[*self as uint]
}
#[inline]
- fn into_ascii_lower(self) -> String {
- // Vec<u8>::into_ascii_lower() preserves the UTF-8 invariant.
- unsafe { String::from_utf8_unchecked(self.into_bytes().into_ascii_lower()) }
+ fn eq_ignore_ascii_case(&self, other: &u8) -> bool {
+ self.to_ascii_lowercase() == other.to_ascii_lowercase()
}
}
#[experimental = "would prefer to do this in a more general way"]
-impl AsciiExt<Vec<u8>> for [u8] {
+impl AsciiExt for char {
#[inline]
- fn to_ascii_upper(&self) -> Vec<u8> {
- self.iter().map(|&byte| ASCII_UPPER_MAP[byte as uint]).collect()
+ fn is_ascii(&self) -> bool {
+ *self as u32 <= 0x7F
}
#[inline]
- fn to_ascii_lower(&self) -> Vec<u8> {
- self.iter().map(|&byte| ASCII_LOWER_MAP[byte as uint]).collect()
+ fn to_ascii_uppercase(&self) -> char {
+ if self.is_ascii() {
+ (*self as u8).to_ascii_uppercase() as char
+ } else {
+ *self
+ }
}
#[inline]
- fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
- self.len() == other.len() &&
- self.iter().zip(other.iter()).all(
- |(byte_self, byte_other)| {
- ASCII_LOWER_MAP[*byte_self as uint] ==
- ASCII_LOWER_MAP[*byte_other as uint]
- })
- }
-}
-
-#[experimental = "would prefer to do this in a more general way"]
-impl OwnedAsciiExt for Vec<u8> {
- #[inline]
- fn into_ascii_upper(mut self) -> Vec<u8> {
- for byte in self.iter_mut() {
- *byte = ASCII_UPPER_MAP[*byte as uint];
+ fn to_ascii_lowercase(&self) -> char {
+ if self.is_ascii() {
+ (*self as u8).to_ascii_lowercase() as char
+ } else {
+ *self
}
- self
}
#[inline]
- fn into_ascii_lower(mut self) -> Vec<u8> {
- for byte in self.iter_mut() {
- *byte = ASCII_LOWER_MAP[*byte as uint];
- }
- self
+ fn eq_ignore_ascii_case(&self, other: &char) -> bool {
+ self.to_ascii_lowercase() == other.to_ascii_lowercase()
}
}
}
}
-static ASCII_LOWER_MAP: [u8, ..256] = [
+static ASCII_LOWERCASE_MAP: [u8, ..256] = [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
];
-static ASCII_UPPER_MAP: [u8, ..256] = [
+static ASCII_UPPERCASE_MAP: [u8, ..256] = [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
#[cfg(test)]
mod tests {
- use prelude::*;
use super::*;
+ use prelude::*;
use char::from_u32;
- macro_rules! v2ascii {
- ( [$($e:expr),*]) => (&[$(Ascii{chr:$e}),*]);
- (&[$($e:expr),*]) => (&[$(Ascii{chr:$e}),*]);
- }
-
- macro_rules! vec2ascii {
- ($($e:expr),*) => ([$(Ascii{chr:$e}),*].to_vec());
- }
-
#[test]
fn test_ascii() {
- assert_eq!(65u8.to_ascii().to_byte(), 65u8);
- assert_eq!(65u8.to_ascii().to_char(), 'A');
- assert_eq!('A'.to_ascii().to_char(), 'A');
- assert_eq!('A'.to_ascii().to_byte(), 65u8);
-
- assert_eq!('A'.to_ascii().to_lowercase().to_char(), 'a');
- assert_eq!('Z'.to_ascii().to_lowercase().to_char(), 'z');
- assert_eq!('a'.to_ascii().to_uppercase().to_char(), 'A');
- assert_eq!('z'.to_ascii().to_uppercase().to_char(), 'Z');
-
- assert_eq!('@'.to_ascii().to_lowercase().to_char(), '@');
- assert_eq!('['.to_ascii().to_lowercase().to_char(), '[');
- assert_eq!('`'.to_ascii().to_uppercase().to_char(), '`');
- assert_eq!('{'.to_ascii().to_uppercase().to_char(), '{');
-
- assert!('0'.to_ascii().is_digit());
- assert!('9'.to_ascii().is_digit());
- assert!(!'/'.to_ascii().is_digit());
- assert!(!':'.to_ascii().is_digit());
-
- assert!((0x1fu8).to_ascii().is_control());
- assert!(!' '.to_ascii().is_control());
- assert!((0x7fu8).to_ascii().is_control());
-
assert!("banana".chars().all(|c| c.is_ascii()));
assert!(!"ประเทศไทย中华Việt Nam".chars().all(|c| c.is_ascii()));
}
#[test]
fn test_ascii_vec() {
- let test = &[40u8, 32u8, 59u8];
- let b: &[_] = v2ascii!([40, 32, 59]);
- assert_eq!(test.to_ascii(), b);
- assert_eq!("( ;".to_ascii(), b);
- let v = vec![40u8, 32u8, 59u8];
- assert_eq!(v.to_ascii(), b);
- assert_eq!("( ;".to_string().to_ascii(), b);
-
- assert_eq!("abCDef&?#".to_ascii().to_lowercase().into_string(), "abcdef&?#");
- assert_eq!("abCDef&?#".to_ascii().to_uppercase().into_string(), "ABCDEF&?#");
-
- assert_eq!("".to_ascii().to_lowercase().into_string(), "");
- assert_eq!("YMCA".to_ascii().to_lowercase().into_string(), "ymca");
- let mixed = "abcDEFxyz:.;".to_ascii();
- assert_eq!(mixed.to_uppercase().into_string(), "ABCDEFXYZ:.;");
-
- assert!("aBcDeF&?#".to_ascii().eq_ignore_case("AbCdEf&?#".to_ascii()));
-
assert!("".is_ascii());
assert!("a".is_ascii());
assert!(!"\u{2009}".is_ascii());
}
#[test]
- fn test_ascii_vec_ng() {
- assert_eq!("abCDef&?#".to_ascii().to_lowercase().into_string(), "abcdef&?#");
- assert_eq!("abCDef&?#".to_ascii().to_uppercase().into_string(), "ABCDEF&?#");
- assert_eq!("".to_ascii().to_lowercase().into_string(), "");
- assert_eq!("YMCA".to_ascii().to_lowercase().into_string(), "ymca");
- let mixed = "abcDEFxyz:.;".to_ascii();
- assert_eq!(mixed.to_uppercase().into_string(), "ABCDEFXYZ:.;");
- }
-
- #[test]
- fn test_owned_ascii_vec() {
- assert_eq!(("( ;".to_string()).into_ascii(), vec2ascii![40, 32, 59]);
- assert_eq!((vec![40u8, 32u8, 59u8]).into_ascii(), vec2ascii![40, 32, 59]);
- }
-
- #[test]
- fn test_ascii_as_str() {
- let v = v2ascii!([40, 32, 59]);
- assert_eq!(v.as_str_ascii(), "( ;");
- }
-
- #[test]
- fn test_ascii_into_string() {
- assert_eq!(vec2ascii![40, 32, 59].into_string(), "( ;");
- assert_eq!(vec2ascii!(40, 32, 59).into_string(), "( ;");
- }
-
- #[test]
- fn test_ascii_to_bytes() {
- assert_eq!(vec2ascii![40, 32, 59].into_bytes(), vec![40u8, 32u8, 59u8]);
- }
-
- #[test] #[should_fail]
- fn test_ascii_vec_panic_u8_slice() { (&[127u8, 128u8, 255u8]).to_ascii(); }
-
- #[test] #[should_fail]
- fn test_ascii_vec_panic_str_slice() { "zoä华".to_ascii(); }
-
- #[test] #[should_fail]
- fn test_ascii_panic_u8_slice() { 255u8.to_ascii(); }
-
- #[test] #[should_fail]
- fn test_ascii_panic_char_slice() { 'λ'.to_ascii(); }
-
- #[test]
- fn test_opt() {
- assert_eq!(65u8.to_ascii_opt(), Some(Ascii { chr: 65u8 }));
- assert_eq!(255u8.to_ascii_opt(), None);
-
- assert_eq!('A'.to_ascii_opt(), Some(Ascii { chr: 65u8 }));
- assert_eq!('λ'.to_ascii_opt(), None);
-
- assert_eq!("zoä华".to_ascii_opt(), None);
-
- let test1 = &[127u8, 128u8, 255u8];
- assert_eq!((test1).to_ascii_opt(), None);
-
- let v = [40u8, 32u8, 59u8];
- let v2: &[_] = v2ascii!(&[40, 32, 59]);
- assert_eq!(v.to_ascii_opt(), Some(v2));
- let v = [127u8, 128u8, 255u8];
- assert_eq!(v.to_ascii_opt(), None);
-
- let v = "( ;";
- assert_eq!(v.to_ascii_opt(), Some(v2));
- assert_eq!("zoä华".to_ascii_opt(), None);
-
- assert_eq!((vec![40u8, 32u8, 59u8]).into_ascii_opt(), Some(vec2ascii![40, 32, 59]));
- assert_eq!((vec![127u8, 128u8, 255u8]).into_ascii_opt(), None);
-
- assert_eq!(("( ;".to_string()).into_ascii_opt(), Some(vec2ascii![40, 32, 59]));
- assert_eq!(("zoä华".to_string()).into_ascii_opt(), None);
- }
-
- #[test]
- fn test_to_ascii_upper() {
- assert_eq!("url()URL()uRl()ürl".to_ascii_upper(), "URL()URL()URL()üRL");
- assert_eq!("hıKß".to_ascii_upper(), "HıKß");
+ fn test_to_ascii_uppercase() {
+ assert_eq!("url()URL()uRl()ürl".to_ascii_uppercase(), "URL()URL()URL()üRL");
+ assert_eq!("hıKß".to_ascii_uppercase(), "HıKß");
let mut i = 0;
while i <= 500 {
let upper = if 'a' as u32 <= i && i <= 'z' as u32 { i + 'A' as u32 - 'a' as u32 }
else { i };
- assert_eq!((from_u32(i).unwrap()).to_string().to_ascii_upper(),
+ assert_eq!((from_u32(i).unwrap()).to_string().to_ascii_uppercase(),
(from_u32(upper).unwrap()).to_string());
i += 1;
}
}
#[test]
- fn test_to_ascii_lower() {
- assert_eq!("url()URL()uRl()Ürl".to_ascii_lower(), "url()url()url()Ürl");
+ fn test_to_ascii_lowercase() {
+ assert_eq!("url()URL()uRl()Ürl".to_ascii_lowercase(), "url()url()url()Ürl");
// Dotted capital I, Kelvin sign, Sharp S.
- assert_eq!("HİKß".to_ascii_lower(), "hİKß");
+ assert_eq!("HİKß".to_ascii_lowercase(), "hİKß");
let mut i = 0;
while i <= 500 {
let lower = if 'A' as u32 <= i && i <= 'Z' as u32 { i + 'a' as u32 - 'A' as u32 }
else { i };
- assert_eq!((from_u32(i).unwrap()).to_string().to_ascii_lower(),
+ assert_eq!((from_u32(i).unwrap()).to_string().to_ascii_lowercase(),
(from_u32(lower).unwrap()).to_string());
i += 1;
}
}
#[test]
- fn test_into_ascii_upper() {
- assert_eq!(("url()URL()uRl()ürl".to_string()).into_ascii_upper(),
+ fn test_into_ascii_uppercase() {
+ assert_eq!(("url()URL()uRl()ürl".to_string()).into_ascii_uppercase(),
"URL()URL()URL()üRL".to_string());
- assert_eq!(("hıKß".to_string()).into_ascii_upper(), "HıKß");
+ assert_eq!(("hıKß".to_string()).into_ascii_uppercase(), "HıKß");
let mut i = 0;
while i <= 500 {
let upper = if 'a' as u32 <= i && i <= 'z' as u32 { i + 'A' as u32 - 'a' as u32 }
else { i };
- assert_eq!((from_u32(i).unwrap()).to_string().into_ascii_upper(),
+ assert_eq!((from_u32(i).unwrap()).to_string().into_ascii_uppercase(),
(from_u32(upper).unwrap()).to_string());
i += 1;
}
}
#[test]
- fn test_into_ascii_lower() {
- assert_eq!(("url()URL()uRl()Ürl".to_string()).into_ascii_lower(),
+ fn test_into_ascii_lowercase() {
+ assert_eq!(("url()URL()uRl()Ürl".to_string()).into_ascii_lowercase(),
"url()url()url()Ürl");
// Dotted capital I, Kelvin sign, Sharp S.
- assert_eq!(("HİKß".to_string()).into_ascii_lower(), "hİKß");
+ assert_eq!(("HİKß".to_string()).into_ascii_lowercase(), "hİKß");
let mut i = 0;
while i <= 500 {
let lower = if 'A' as u32 <= i && i <= 'Z' as u32 { i + 'a' as u32 - 'A' as u32 }
else { i };
- assert_eq!((from_u32(i).unwrap()).to_string().into_ascii_lower(),
+ assert_eq!((from_u32(i).unwrap()).to_string().into_ascii_lowercase(),
(from_u32(lower).unwrap()).to_string());
i += 1;
}
i += 1;
}
}
-
- #[test]
- fn test_to_string() {
- let s = Ascii{ chr: b't' }.to_string();
- assert_eq!(s, "t");
- }
-
- #[test]
- fn test_show() {
- let c = Ascii { chr: b't' };
- assert_eq!(format!("{}", c), "t");
- }
}
use fmt;
use hash;
-use kinds::marker;
use mem;
use ptr;
use slice::{mod, ImmutableIntSlice};
use str;
use string::String;
-
+use core::kinds::marker;
/// The representation of a C String.
///
owns_buffer_: bool,
}
+unsafe impl Send for CString { }
+unsafe impl Sync for CString { }
+
impl Clone for CString {
/// Clone this CString into a new, uniquely owned CString. For safety
/// reasons, this is always a deep clone with the memory allocated
#[cfg(test)]
mod tests {
+ use super::*;
use prelude::*;
use ptr;
use thread::Thread;
use libc;
- use super::*;
-
#[test]
fn test_str_multistring_parsing() {
unsafe {
use ops::{Drop, FnOnce};
use option::Option;
use option::Option::{Some, None};
-use ptr::RawPtr;
+use ptr::PtrExt;
use ptr;
use raw;
use slice::AsSlice;
fn malloc(n: uint) -> CVec<u8> {
unsafe {
- let mem = libc::malloc(n as libc::size_t);
- if mem.is_null() { ::alloc::oom() }
+ let mem = ptr::Unique(libc::malloc(n as libc::size_t));
+ if mem.0.is_null() { ::alloc::oom() }
- CVec::new_with_dtor(mem as *mut u8,
+ CVec::new_with_dtor(mem.0 as *mut u8,
n,
- move|| { libc::free(mem as *mut libc::c_void); })
+ move|| { libc::free(mem.0 as *mut libc::c_void); })
}
}
/// }
/// ```
///
-/// The easiest way to use `HashMap` with a custom type is to derive `Eq` and `Hash`.
+/// The easiest way to use `HashMap` with a custom type as key is to derive `Eq` and `Hash`.
/// We must also derive `PartialEq`.
///
/// ```
/// use std::collections::HashMap;
///
/// #[deriving(Hash, Eq, PartialEq, Show)]
-/// struct Viking<'a> {
-/// name: &'a str,
-/// power: uint,
+/// struct Viking {
+/// name: String,
+/// country: String,
/// }
///
+/// impl Viking {
+/// /// Create a new Viking.
+/// pub fn new(name: &str, country: &str) -> Viking {
+/// Viking { name: name.to_string(), country: country.to_string() }
+/// }
+/// }
+///
+/// // Use a HashMap to store the vikings' health points.
/// let mut vikings = HashMap::new();
///
-/// vikings.insert("Norway", Viking { name: "Einar", power: 9u });
-/// vikings.insert("Denmark", Viking { name: "Olaf", power: 4u });
-/// vikings.insert("Iceland", Viking { name: "Harald", power: 8u });
+/// vikings.insert(Viking::new("Einar", "Norway"), 25u);
+/// vikings.insert(Viking::new("Olaf", "Denmark"), 24u);
+/// vikings.insert(Viking::new("Harald", "Iceland"), 12u);
///
-/// // Use derived implementation to print the vikings.
-/// for (land, viking) in vikings.iter() {
-/// println!("{} at {}", viking, land);
+/// // Use derived implementation to print the status of the vikings.
+/// for (viking, health) in vikings.iter() {
+/// println!("{} has {} hp", viking, health);
/// }
/// ```
#[deriving(Clone)]
/// }
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
- pub fn iter(&self) -> Entries<K, V> {
- Entries { inner: self.table.iter() }
+ pub fn iter(&self) -> Iter<K, V> {
+ Iter { inner: self.table.iter() }
}
/// An iterator visiting all key-value pairs in arbitrary order,
}
/// HashMap iterator
-pub struct Entries<'a, K: 'a, V: 'a> {
- inner: table::Entries<'a, K, V>
+pub struct Iter<'a, K: 'a, V: 'a> {
+ inner: table::Iter<'a, K, V>
}
/// HashMap mutable values iterator
/// HashMap keys iterator
pub struct Keys<'a, K: 'a, V: 'a> {
- inner: Map<(&'a K, &'a V), &'a K, Entries<'a, K, V>, fn((&'a K, &'a V)) -> &'a K>
+ inner: Map<(&'a K, &'a V), &'a K, Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a K>
}
/// HashMap values iterator
pub struct Values<'a, K: 'a, V: 'a> {
- inner: Map<(&'a K, &'a V), &'a V, Entries<'a, K, V>, fn((&'a K, &'a V)) -> &'a V>
+ inner: Map<(&'a K, &'a V), &'a V, Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a V>
}
/// HashMap drain iterator
NoElem(EmptyBucket<K, V, M>),
}
-impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
+impl<'a, K, V> Iterator<(&'a K, &'a V)> for Iter<'a, K, V> {
#[inline] fn next(&mut self) -> Option<(&'a K, &'a V)> { self.inner.next() }
#[inline] fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
}
use ops::{Deref, DerefMut, Drop};
use option::Option;
use option::Option::{Some, None};
-use ptr::{RawPtr, copy_nonoverlapping_memory, zero_memory};
+use ptr::{Unique, PtrExt, copy_nonoverlapping_memory, zero_memory};
use ptr;
use rt::heap::{allocate, deallocate};
pub struct RawTable<K, V> {
capacity: uint,
size: uint,
- hashes: *mut u64,
+ hashes: Unique<u64>,
// Because K/V do not appear directly in any of the types in the struct,
// inform rustc that in fact instances of K and V are reachable from here.
marker: marker::CovariantType<(K,V)>,
return RawTable {
size: 0,
capacity: 0,
- hashes: 0 as *mut u64,
+ hashes: Unique::null(),
marker: marker::CovariantType,
};
}
RawTable {
capacity: capacity,
size: 0,
- hashes: hashes,
+ hashes: Unique(hashes),
marker: marker::CovariantType,
}
}
let hashes_size = self.capacity * size_of::<u64>();
let keys_size = self.capacity * size_of::<K>();
- let buffer = self.hashes as *mut u8;
+ let buffer = self.hashes.0 as *mut u8;
let (keys_offset, vals_offset) = calculate_offsets(hashes_size,
keys_size, min_align_of::<K>(),
min_align_of::<V>());
unsafe {
RawBucket {
- hash: self.hashes,
+ hash: self.hashes.0,
key: buffer.offset(keys_offset as int) as *mut K,
val: buffer.offset(vals_offset as int) as *mut V
}
pub fn new(capacity: uint) -> RawTable<K, V> {
unsafe {
let ret = RawTable::new_uninitialized(capacity);
- zero_memory(ret.hashes, capacity);
+ zero_memory(ret.hashes.0, capacity);
ret
}
}
RawBuckets {
raw: self.first_bucket_raw(),
hashes_end: unsafe {
- self.hashes.offset(self.capacity as int)
+ self.hashes.0.offset(self.capacity as int)
},
marker: marker::ContravariantLifetime,
}
}
- pub fn iter(&self) -> Entries<K, V> {
- Entries {
+ pub fn iter(&self) -> Iter<K, V> {
+ Iter {
iter: self.raw_buckets(),
elems_left: self.size(),
}
}
/// Iterator over shared references to entries in a table.
-pub struct Entries<'a, K: 'a, V: 'a> {
+pub struct Iter<'a, K: 'a, V: 'a> {
iter: RawBuckets<'a, K, V>,
elems_left: uint,
}
iter: RawBuckets<'static, K, V>,
}
-impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
+impl<'a, K, V> Iterator<(&'a K, &'a V)> for Iter<'a, K, V> {
fn next(&mut self) -> Option<(&'a K, &'a V)> {
self.iter.next().map(|bucket| {
self.elems_left -= 1;
#[unsafe_destructor]
impl<K, V> Drop for RawTable<K, V> {
fn drop(&mut self) {
- if self.hashes.is_null() {
+ if self.hashes.0.is_null() {
return;
}
// This is done in reverse because we've likely partially taken
vals_size, min_align_of::<V>());
unsafe {
- deallocate(self.hashes as *mut u8, size, align);
+ deallocate(self.hashes.0 as *mut u8, size, align);
// Remember how everything was allocated out of one buffer
// during initialization? We only need one call to free here.
}
use thread::Thread;
use sync::atomic::{AtomicBool, INIT_ATOMIC_BOOL, Ordering};
use sync::Arc;
+use kinds::{Sync, Send};
use kinds::marker::{NoSend, NoSync};
use mem;
use clone::Clone;
woken: AtomicBool,
}
+unsafe impl Send for Inner {}
+unsafe impl Sync for Inner {}
+
#[deriving(Clone)]
pub struct SignalToken {
inner: Arc<Inner>,
use self::Flavor::*;
use alloc::arc::Arc;
+use core::kinds;
use core::kinds::marker;
use core::mem;
use core::cell::UnsafeCell;
mod $name {
#![allow(unused_imports)]
- use prelude::*;
- use rt;
-
- use comm::*;
use super::*;
+ use comm::*;
use thread::Thread;
+ use prelude::*;
$(#[$a])* #[test] fn f() { $b }
}
#[unstable]
pub struct Receiver<T> {
inner: UnsafeCell<Flavor<T>>,
- // can't share in an arc
- _marker: marker::NoSync,
}
+// The receiver port can be sent from place to place, so long as it
+// is not used to receive non-sendable things.
+unsafe impl<T:Send> Send for Receiver<T> { }
+
/// An iterator over messages on a receiver, this iterator will block
/// whenever `next` is called, waiting for a new message, and `None` will be
/// returned when the corresponding channel has hung up.
#[unstable]
pub struct Sender<T> {
inner: UnsafeCell<Flavor<T>>,
- // can't share in an arc
- _marker: marker::NoSync,
}
+// The send port can be sent from place to place, so long as it
+// is not used to send non-sendable things.
+unsafe impl<T:Send> Send for Sender<T> { }
+
/// The sending-half of Rust's synchronous channel type. This half can only be
/// owned by one task, but it can be cloned to send to other tasks.
#[unstable = "this type may be renamed, but it will always exist"]
pub struct SyncSender<T> {
- inner: Arc<UnsafeCell<sync::Packet<T>>>,
+ inner: Arc<RacyCell<sync::Packet<T>>>,
// can't share in an arc
_marker: marker::NoSync,
}
}
enum Flavor<T> {
- Oneshot(Arc<UnsafeCell<oneshot::Packet<T>>>),
- Stream(Arc<UnsafeCell<stream::Packet<T>>>),
- Shared(Arc<UnsafeCell<shared::Packet<T>>>),
- Sync(Arc<UnsafeCell<sync::Packet<T>>>),
+ Oneshot(Arc<RacyCell<oneshot::Packet<T>>>),
+ Stream(Arc<RacyCell<stream::Packet<T>>>),
+ Shared(Arc<RacyCell<shared::Packet<T>>>),
+ Sync(Arc<RacyCell<sync::Packet<T>>>),
}
#[doc(hidden)]
/// ```
#[unstable]
pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
- let a = Arc::new(UnsafeCell::new(oneshot::Packet::new()));
+ let a = Arc::new(RacyCell::new(oneshot::Packet::new()));
(Sender::new(Oneshot(a.clone())), Receiver::new(Oneshot(a)))
}
#[unstable = "this function may be renamed to more accurately reflect the type \
of channel that is is creating"]
pub fn sync_channel<T: Send>(bound: uint) -> (SyncSender<T>, Receiver<T>) {
- let a = Arc::new(UnsafeCell::new(sync::Packet::new(bound)));
+ let a = Arc::new(RacyCell::new(sync::Packet::new(bound)));
(SyncSender::new(a.clone()), Receiver::new(Sync(a)))
}
fn new(inner: Flavor<T>) -> Sender<T> {
Sender {
inner: UnsafeCell::new(inner),
- _marker: marker::NoSync,
}
}
if !(*p).sent() {
return (*p).send(t);
} else {
- let a = Arc::new(UnsafeCell::new(stream::Packet::new()));
+ let a =
+ Arc::new(RacyCell::new(stream::Packet::new()));
match (*p).upgrade(Receiver::new(Stream(a.clone()))) {
oneshot::UpSuccess => {
let ret = (*a.get()).send(t);
fn clone(&self) -> Sender<T> {
let (packet, sleeper, guard) = match *unsafe { self.inner() } {
Oneshot(ref p) => {
- let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
+ let a = Arc::new(RacyCell::new(shared::Packet::new()));
unsafe {
let guard = (*a.get()).postinit_lock();
match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
}
}
Stream(ref p) => {
- let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
+ let a = Arc::new(RacyCell::new(shared::Packet::new()));
unsafe {
let guard = (*a.get()).postinit_lock();
match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
////////////////////////////////////////////////////////////////////////////////
impl<T: Send> SyncSender<T> {
- fn new(inner: Arc<UnsafeCell<sync::Packet<T>>>) -> SyncSender<T> {
+ fn new(inner: Arc<RacyCell<sync::Packet<T>>>) -> SyncSender<T> {
SyncSender { inner: inner, _marker: marker::NoSync }
}
impl<T: Send> Receiver<T> {
fn new(inner: Flavor<T>) -> Receiver<T> {
- Receiver { inner: UnsafeCell::new(inner), _marker: marker::NoSync }
+ Receiver { inner: UnsafeCell::new(inner) }
}
/// Blocks waiting for a value on this receiver
}
}
+/// A version of `UnsafeCell` intended for use in concurrent data
+/// structures (for example, you might put it in an `Arc`).
+struct RacyCell<T>(pub UnsafeCell<T>);
+
+impl<T> RacyCell<T> {
+
+ fn new(value: T) -> RacyCell<T> {
+ RacyCell(UnsafeCell { value: value })
+ }
+
+ unsafe fn get(&self) -> *mut T {
+ self.0.get()
+ }
+
+}
+
+unsafe impl<T:Send> Send for RacyCell<T> { }
+
+unsafe impl<T> kinds::Sync for RacyCell<T> { } // Oh dear
+
+
#[cfg(test)]
mod test {
+ use super::*;
use prelude::*;
-
use os;
- use super::*;
pub fn stress_factor() -> uint {
match os::getenv("RUST_TEST_STRESS") {
tail: UnsafeCell<*mut Node<T>>,
}
+unsafe impl<T:Send> Send for Queue<T> { }
+unsafe impl<T:Send> Sync for Queue<T> { }
+
impl<T> Node<T> {
unsafe fn new(v: Option<T>) -> *mut Node<T> {
mem::transmute(box Node {
// and that could cause problems on platforms where it is
// represented by opaque data structure
pub fn postinit_lock(&self) -> MutexGuard<()> {
- self.select_lock.lock()
+ self.select_lock.lock().unwrap()
}
// This function is used at the creation of a shared packet to inherit a
// about looking at and dealing with to_wake. Once we have acquired the
// lock, we are guaranteed that inherit_blocker is done.
{
- let _guard = self.select_lock.lock();
+ let _guard = self.select_lock.lock().unwrap();
}
// Like the stream implementation, we want to make sure that the count
cache_subtractions: AtomicUint,
}
+unsafe impl<T: Send> Send for Queue<T> { }
+
+unsafe impl<T: Send> Sync for Queue<T> { }
+
impl<T: Send> Node<T> {
fn new() -> *mut Node<T> {
unsafe {
lock: Mutex<State<T>>,
}
+unsafe impl<T:Send> Send for Packet<T> { }
+
+unsafe impl<T:Send> Sync for Packet<T> { }
+
struct State<T> {
disconnected: bool, // Is the channel disconnected yet?
queue: Queue, // queue of senders waiting to send data
canceled: Option<&'static mut bool>,
}
+unsafe impl<T: Send> Send for State<T> {}
+
/// Possible flavors of threads who can be blocked on this channel.
enum Blocker {
BlockedSender(SignalToken),
next: *mut Node,
}
+unsafe impl Send for Node {}
+
/// A simple ring-buffer
struct Buffer<T> {
buf: Vec<Option<T>>,
NoneBlocked => {}
_ => unreachable!(),
}
- drop(guard); // unlock
- wait_token.wait(); // block
- lock.lock() // relock
+ drop(guard); // unlock
+ wait_token.wait(); // block
+ lock.lock().unwrap() // relock
}
/// Wakes up a thread, dropping the lock at the correct time
fn acquire_send_slot(&self) -> MutexGuard<State<T>> {
let mut node = Node { token: None, next: 0 as *mut Node };
loop {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
// are we ready to go?
if guard.disconnected || guard.buf.size() < guard.buf.cap() {
return guard;
}
pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
if guard.disconnected {
Err(super::RecvDisconnected(t))
} else if guard.buf.size() == guard.buf.cap() {
// When reading this, remember that there can only ever be one receiver at
// time.
pub fn recv(&self) -> Result<T, ()> {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
// Wait for the buffer to have something in it. No need for a while loop
// because we're the only receiver.
}
pub fn try_recv(&self) -> Result<T, Failure> {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
// Easy cases first
if guard.disconnected { return Err(Disconnected) }
}
// Not much to do other than wake up a receiver if one's there
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
match mem::replace(&mut guard.blocker, NoneBlocked) {
}
pub fn drop_port(&self) {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
// If Ok, the value is whether this port has data, if Err, then the upgraded
// port needs to be checked instead of this one.
pub fn can_recv(&self) -> bool {
- let guard = self.lock.lock();
+ let guard = self.lock.lock().unwrap();
guard.disconnected || guard.buf.size() > 0
}
// Attempts to start selection on this port. This can either succeed or fail
// because there is data waiting.
pub fn start_selection(&self, token: SignalToken) -> StartResult {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
if guard.disconnected || guard.buf.size() > 0 {
Abort
} else {
//
// The return value indicates whether there's data on this port.
pub fn abort_selection(&self) -> bool {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => true,
BlockedSender(token) => {
impl<T: Send> Drop for Packet<T> {
fn drop(&mut self) {
assert_eq!(self.channels.load(atomic::SeqCst), 0);
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
assert!(guard.queue.dequeue().is_none());
assert!(guard.canceled.is_none());
}
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings and streams.
//!
-//! The functions contained in this module should not normally be used in
-//! everyday use cases of `format!`. The assumptions made by these functions are
-//! unsafe for all inputs, and the compiler performs a large amount of
-//! validation on the arguments to `format!` in order to ensure safety at
-//! runtime. While it is possible to call these functions directly, it is not
-//! recommended to do so in the general case.
-//!
//! ## Usage
//!
//! The `format!` macro is intended to be familiar to those coming from C's
//!
//! # #[allow(unused_must_use)]
//! # fn main() {
-//! format_args!(fmt::format, "this returns {}", "String");
+//! fmt::format(format_args!("this returns {}", "String"));
//!
//! let some_writer: &mut io::Writer = &mut io::stdout();
-//! format_args!(|args| { write!(some_writer, "{}", args) },
-//! "print with a {}", "closure");
+//! write!(some_writer, "{}", format_args!("print with a {}", "macro"));
//!
-//! fn my_fmt_fn(args: &fmt::Arguments) {
+//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
-//! format_args!(my_fmt_fn, "or a {} too", "function");
+//! my_fmt_fn(format_args!("or a {} too", "function"));
//! # }
//! ```
//!
-//! The first argument of the `format_args!` macro is a function (or closure)
-//! which takes one argument of type `&fmt::Arguments`. This structure can then
-//! be passed to the `write` and `format` functions inside this module in order
-//! to process the format string. The goal of this macro is to even further
-//! prevent intermediate allocations when dealing formatting strings.
+//! The result of the `format_args!` macro is a value of type `fmt::Arguments`.
+//! This structure can then be passed to the `write` and `format` functions
+//! inside this module in order to process the format string.
+//! The goal of this macro is to even further prevent intermediate allocations
+//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
-//! It is unsafe to programmatically create an instance of `fmt::Arguments`
-//! because the operations performed when executing a format string require the
-//! compile-time checks provided by the compiler. The `format_args!` macro is
-//! the only method of safely creating these structures, but they can be
-//! unsafely created with the constructor provided.
-//!
//! ## Syntax
//!
//! The syntax for the formatting language used is drawn from other languages,
#[doc(hidden)]
pub use core::fmt::{argument, argumentuint};
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// The format function takes a precompiled format string and a list of
+/// arguments, to return the resulting formatted string.
+///
+/// # Arguments
+///
+/// * args - a structure of arguments generated via the `format_args!` macro.
+///
+/// # Example
+///
+/// ```rust
+/// use std::fmt;
+///
+/// let s = fmt::format(format_args!("Hello, {}!", "world"));
+/// assert_eq!(s, "Hello, world!".to_string());
+/// ```
+#[experimental = "this is an implementation detail of format! and should not \
+ be called directly"]
+pub fn format(args: Arguments) -> string::String {
+ let mut output = Vec::new();
+ let _ = write!(&mut output as &mut Writer, "{}", args);
+ string::String::from_utf8(output).unwrap()
+}
+
+// NOTE(stage0): Remove function after a snapshot
+#[cfg(stage0)]
/// The format function takes a precompiled format string and a list of
/// arguments, to return the resulting formatted string.
///
/// # Arguments
///
/// * args - a structure of arguments generated via the `format_args!` macro.
-/// Because this structure can only be safely generated at
-/// compile-time, this function is safe.
///
/// # Example
///
use slice::{SliceExt};
use slice;
use vec::Vec;
+use kinds::{Send,Sync};
/// Wraps a Reader and buffers input from it
///
cap: uint,
}
+
+unsafe impl<R: Send> Send for BufferedReader<R> {}
+unsafe impl<R: Send+Sync> Sync for BufferedReader<R> {}
+
+
impl<R: Reader> BufferedReader<R> {
/// Creates a new `BufferedReader` with the specified buffer capacity
pub fn with_capacity(cap: uint, inner: R) -> BufferedReader<R> {
impl<R: Reader> Buffer for BufferedReader<R> {
fn fill_buf<'a>(&'a mut self) -> IoResult<&'a [u8]> {
if self.pos == self.cap {
- self.cap = try!(self.inner.read(self.buf[mut]));
+ self.cap = try!(self.inner.read(self.buf.as_mut_slice()));
self.pos = 0;
}
Ok(self.buf[self.pos..self.cap])
if buf.len() > self.buf.len() {
self.inner.as_mut().unwrap().write(buf)
} else {
- let dst = self.buf[mut self.pos..];
+ let dst = self.buf.slice_from_mut(self.pos);
slice::bytes::copy_memory(dst, buf);
self.pos += buf.len();
Ok(())
loop {
let count = match self.fill_buf().ok() {
Some(src) => {
- let dst = buf[mut num_read..];
+ let dst = buf.slice_from_mut(num_read);
let count = cmp::min(src.len(), dst.len());
bytes::copy_memory(dst, src[..count]);
count
use ops::FnOnce;
use option::Option;
use option::Option::{Some, None};
-use ptr::RawPtr;
+use ptr::PtrExt;
use result::Result::{Ok, Err};
use slice::{SliceExt, AsSlice};
{
let mut read_stream = File::open_mode(filename, Open, Read);
{
- let read_buf = read_mem[mut 0..4];
+ let read_buf = read_mem.slice_mut(0, 4);
check!(read_stream.read(read_buf));
}
{
- let read_buf = read_mem[mut 4..8];
+ let read_buf = read_mem.slice_mut(4, 8);
check!(read_stream.read(read_buf));
}
}
let write_len = min(buf.len(), self.buf.len() - self.pos);
{
let input = self.buf[self.pos.. self.pos + write_len];
- let output = buf[mut ..write_len];
+ let output = buf.slice_to_mut(write_len);
assert_eq!(input.len(), output.len());
slice::bytes::copy_memory(output, input);
}
let write_len = min(buf.len(), self.len());
{
let input = self[..write_len];
- let output = buf[mut ..write_len];
+ let output = buf.slice_to_mut(write_len);
slice::bytes::copy_memory(output, input);
}
impl<'a> Writer for BufWriter<'a> {
#[inline]
fn write(&mut self, src: &[u8]) -> IoResult<()> {
- let dst = self.buf[mut self.pos..];
+ let dst = self.buf.slice_from_mut(self.pos);
let dst_len = dst.len();
if dst_len == 0 {
let write_len = min(buf.len(), self.buf.len() - self.pos);
{
let input = self.buf[self.pos.. self.pos + write_len];
- let output = buf[mut ..write_len];
+ let output = buf.slice_to_mut(write_len);
assert_eq!(input.len(), output.len());
slice::bytes::copy_memory(output, input);
}
#[cfg(test)]
mod test {
extern crate "test" as test_crate;
- use prelude::*;
use super::*;
use io::*;
+ use prelude::*;
use io;
use self::test_crate::Bencher;
assert!(r.read_at_least(buf.len(), &mut buf).is_ok());
let b: &[_] = &[1, 2, 3];
assert_eq!(buf, b);
- assert!(r.read_at_least(0, buf[mut ..0]).is_ok());
+ assert!(r.read_at_least(0, buf.slice_to_mut(0)).is_ok());
assert_eq!(buf, b);
assert!(r.read_at_least(buf.len(), &mut buf).is_ok());
let b: &[_] = &[4, 5, 6];
while read < min {
let mut zeroes = 0;
loop {
- match self.read(buf[mut read..]) {
+ match self.read(buf.slice_from_mut(read)) {
Ok(0) => {
zeroes += 1;
if zeroes >= NO_PROGRESS_LIMIT {
// API yet. If so, it should be a method on Vec.
unsafe fn slice_vec_capacity<'a, T>(v: &'a mut Vec<T>, start: uint, end: uint) -> &'a mut [T] {
use raw::Slice;
- use ptr::RawPtr;
+ use ptr::PtrExt;
assert!(start <= end);
assert!(end <= v.capacity());
/// decide whether their stream needs to be buffered or not.
fn flush(&mut self) -> IoResult<()> { Ok(()) }
+ // NOTE(stage0): Remove cfg after a snapshot
+ #[cfg(not(stage0))]
+ /// Writes a formatted string into this writer, returning any error
+ /// encountered.
+ ///
+ /// This method is primarily used to interface with the `format_args!`
+ /// macro, but it is rare that this should explicitly be called. The
+ /// `write!` macro should be favored to invoke this method instead.
+ ///
+ /// # Errors
+ ///
+ /// This function will return any I/O error reported while formatting.
+ fn write_fmt(&mut self, fmt: fmt::Arguments) -> IoResult<()> {
+ // Create a shim which translates a Writer to a FormatWriter and saves
+ // off I/O errors. instead of discarding them
+ struct Adaptor<'a, T:'a> {
+ inner: &'a mut T,
+ error: IoResult<()>,
+ }
+
+ impl<'a, T: Writer> fmt::FormatWriter for Adaptor<'a, T> {
+ fn write(&mut self, bytes: &[u8]) -> fmt::Result {
+ match self.inner.write(bytes) {
+ Ok(()) => Ok(()),
+ Err(e) => {
+ self.error = Err(e);
+ Err(fmt::Error)
+ }
+ }
+ }
+ }
+
+ let mut output = Adaptor { inner: self, error: Ok(()) };
+ match fmt::write(&mut output, fmt) {
+ Ok(()) => Ok(()),
+ Err(..) => output.error
+ }
+ }
+
+
+ // NOTE(stage0): Remove method after a snapshot
+ #[cfg(stage0)]
/// Writes a formatted string into this writer, returning any error
/// encountered.
///
#[inline]
fn write_char(&mut self, c: char) -> IoResult<()> {
let mut buf = [0u8, ..4];
- let n = c.encode_utf8(buf[mut]).unwrap_or(0);
+ let n = c.encode_utf8(buf.as_mut_slice()).unwrap_or(0);
self.write(buf[..n])
}
{
let mut start = 1;
while start < width {
- match try!(self.read(buf[mut start..width])) {
+ match try!(self.read(buf.slice_mut(start, width))) {
n if n == width - start => break,
n if n < width - start => { start += n; }
_ => return Err(standard_error(InvalidInput)),
assert!(head.len() + tail.len() <= 8);
let mut gs = [0u16, ..8];
gs.clone_from_slice(head);
- gs[mut 8 - tail.len() .. 8].clone_from_slice(tail);
+ gs.slice_mut(8 - tail.len(), 8).clone_from_slice(tail);
Ipv6Addr(gs[0], gs[1], gs[2], gs[3], gs[4], gs[5], gs[6], gs[7])
}
#[cfg(test)]
#[allow(experimental)]
mod tests {
- use prelude::*;
use super::*;
use io::*;
use io::test::*;
+ use prelude::*;
use io::fs::PathExtensions;
use time::Duration;
/// match socket.recv_from(&mut buf) {
/// Ok((amt, src)) => {
/// // Send a reply to the socket we received data from
-/// let buf = buf[mut ..amt];
+/// let buf = buf.slice_to_mut(amt);
/// buf.reverse();
/// socket.send_to(buf, src);
/// }
#[allow(experimental)]
mod test {
use super::*;
- use prelude::*;
- use io::*;
use io::net::ip::*;
+ use io::*;
use io::test::*;
+ use prelude::*;
// FIXME #11530 this fails on android because tests are run as root
#[cfg_attr(any(windows, target_os = "android"), ignore)]
#![allow(unused_imports)]
use super::*;
- use prelude::*;
use io::timer::*;
use io::*;
+ use prelude::*;
use io::fs::PathExtensions;
use time::Duration;
use str;
use fmt;
use io::{Reader, Writer, IoResult, IoError, OtherIoError, Buffer,
standard_error, EndOfFile, LineBufferedWriter, BufferedReader};
-use kinds::Send;
+use kinds::{Sync, Send};
use libc;
use mem;
use option::Option;
}
}
+struct RaceBox(BufferedReader<StdReader>);
+
+unsafe impl Send for RaceBox {}
+unsafe impl Sync for RaceBox {}
+
/// A synchronized wrapper around a buffered reader from stdin
#[deriving(Clone)]
pub struct StdinReader {
- inner: Arc<Mutex<BufferedReader<StdReader>>>,
+ inner: Arc<Mutex<RaceBox>>,
}
+unsafe impl Send for StdinReader {}
+unsafe impl Sync for StdinReader {}
+
/// A guard for exclusive access to `StdinReader`'s internal `BufferedReader`.
pub struct StdinReaderGuard<'a> {
- inner: MutexGuard<'a, BufferedReader<StdReader>>,
+ inner: MutexGuard<'a, RaceBox>,
}
impl<'a> Deref<BufferedReader<StdReader>> for StdinReaderGuard<'a> {
fn deref(&self) -> &BufferedReader<StdReader> {
- &*self.inner
+ &self.inner.0
}
}
impl<'a> DerefMut<BufferedReader<StdReader>> for StdinReaderGuard<'a> {
fn deref_mut(&mut self) -> &mut BufferedReader<StdReader> {
- &mut *self.inner
+ &mut self.inner.0
}
}
/// ```
pub fn lock<'a>(&'a mut self) -> StdinReaderGuard<'a> {
StdinReaderGuard {
- inner: self.inner.lock()
+ inner: self.inner.lock().unwrap()
}
}
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_line(&mut self) -> IoResult<String> {
- self.inner.lock().read_line()
+ self.inner.lock().unwrap().0.read_line()
}
/// Like `Buffer::read_until`.
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_until(&mut self, byte: u8) -> IoResult<Vec<u8>> {
- self.inner.lock().read_until(byte)
+ self.inner.lock().unwrap().0.read_until(byte)
}
/// Like `Buffer::read_char`.
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_char(&mut self) -> IoResult<char> {
- self.inner.lock().read_char()
+ self.inner.lock().unwrap().0.read_char()
}
}
impl Reader for StdinReader {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
- self.inner.lock().read(buf)
+ self.inner.lock().unwrap().0.read(buf)
}
// We have to manually delegate all of these because the default impls call
// incur the costs of repeated locking).
fn read_at_least(&mut self, min: uint, buf: &mut [u8]) -> IoResult<uint> {
- self.inner.lock().read_at_least(min, buf)
+ self.inner.lock().unwrap().0.read_at_least(min, buf)
}
fn push_at_least(&mut self, min: uint, len: uint, buf: &mut Vec<u8>) -> IoResult<uint> {
- self.inner.lock().push_at_least(min, len, buf)
+ self.inner.lock().unwrap().0.push_at_least(min, len, buf)
}
fn read_to_end(&mut self) -> IoResult<Vec<u8>> {
- self.inner.lock().read_to_end()
+ self.inner.lock().unwrap().0.read_to_end()
}
fn read_le_uint_n(&mut self, nbytes: uint) -> IoResult<u64> {
- self.inner.lock().read_le_uint_n(nbytes)
+ self.inner.lock().unwrap().0.read_le_uint_n(nbytes)
}
fn read_be_uint_n(&mut self, nbytes: uint) -> IoResult<u64> {
- self.inner.lock().read_be_uint_n(nbytes)
+ self.inner.lock().unwrap().0.read_be_uint_n(nbytes)
}
}
BufferedReader::new(stdin_raw())
};
let stdin = StdinReader {
- inner: Arc::new(Mutex::new(stdin))
+ inner: Arc::new(Mutex::new(RaceBox(stdin)))
};
STDIN = mem::transmute(box stdin);
})
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// Similar to `print`, but takes a `fmt::Arguments` structure to be compatible
+/// with the `format_args!` macro.
+pub fn print_args(fmt: fmt::Arguments) {
+ with_task_stdout(|io| write!(io, "{}", fmt))
+}
+
+// NOTE(stage0): Remove function after a snapshot
+#[cfg(stage0)]
/// Similar to `print`, but takes a `fmt::Arguments` structure to be compatible
/// with the `format_args!` macro.
pub fn print_args(fmt: &fmt::Arguments) {
with_task_stdout(|io| write!(io, "{}", fmt))
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// Similar to `println`, but takes a `fmt::Arguments` structure to be
+/// compatible with the `format_args!` macro.
+pub fn println_args(fmt: fmt::Arguments) {
+ with_task_stdout(|io| writeln!(io, "{}", fmt))
+}
+
+// NOTE(stage0): Remove function after a snapshot
+#[cfg(stage0)]
/// Similar to `println`, but takes a `fmt::Arguments` structure to be
/// compatible with the `format_args!` macro.
pub fn println_args(fmt: &fmt::Arguments) {
inner: StdSource
}
+unsafe impl Send for StdWriter {}
+unsafe impl Sync for StdWriter {}
+
impl StdWriter {
/// Gets the size of this output window, if possible. This is typically used
/// when the writer is attached to something like a terminal, this is used
}
let len = cmp::min(self.limit, buf.len());
- let res = self.inner.read(buf[mut ..len]);
+ let res = self.inner.read(buf.slice_to_mut(len));
match res {
Ok(len) => self.limit -= len,
_ => {}
#![experimental]
#![macro_escape]
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// The entry point for panic of Rust tasks.
+///
+/// This macro is used to inject panic into a Rust task, causing the task to
+/// unwind and panic entirely. Each task's panic can be reaped as the
+/// `Box<Any>` type, and the single-argument form of the `panic!` macro will be
+/// the value which is transmitted.
+///
+/// The multi-argument form of this macro panics with a string and has the
+/// `format!` syntax for building a string.
+///
+/// # Example
+///
+/// ```should_fail
+/// # #![allow(unreachable_code)]
+/// panic!();
+/// panic!("this is a terrible mistake!");
+/// panic!(4i); // panic with the value of 4 to be collected elsewhere
+/// panic!("this is a {} {message}", "fancy", message = "message");
+/// ```
+#[macro_export]
+macro_rules! panic {
+ () => ({
+ panic!("explicit panic")
+ });
+ ($msg:expr) => ({
+ // static requires less code at runtime, more constant data
+ static _FILE_LINE: (&'static str, uint) = (file!(), line!());
+ ::std::rt::begin_unwind($msg, &_FILE_LINE)
+ });
+ ($fmt:expr, $($arg:tt)*) => ({
+ // The leading _'s are to avoid dead code warnings if this is
+ // used inside a dead function. Just `#[allow(dead_code)]` is
+ // insufficient, since the user may have
+ // `#[forbid(dead_code)]` and which cannot be overridden.
+ static _FILE_LINE: (&'static str, uint) = (file!(), line!());
+ ::std::rt::begin_unwind_fmt(format_args!($fmt, $($arg)*), &_FILE_LINE)
+
+ });
+}
+
+// NOTE(stage0): Remove macro after a snapshot
+#[cfg(stage0)]
/// The entry point for panic of Rust tasks.
///
/// This macro is used to inject panic into a Rust task, causing the task to
() => (panic!("not yet implemented"))
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// Use the syntax described in `std::fmt` to create a value of type `String`.
+/// See `std::fmt` for more information.
+///
+/// # Example
+///
+/// ```
+/// format!("test");
+/// format!("hello {}", "world!");
+/// format!("x = {}, y = {y}", 10i, y = 30i);
+/// ```
+#[macro_export]
+#[stable]
+macro_rules! format {
+ ($($arg:tt)*) => (::std::fmt::format(format_args!($($arg)*)))
+}
+
+// NOTE(stage0): Remove macro after a snapshot
+#[cfg(stage0)]
/// Use the syntax described in `std::fmt` to create a value of type `String`.
/// See `std::fmt` for more information.
///
)
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// Use the `format!` syntax to write data into a buffer of type `&mut Writer`.
+/// See `std::fmt` for more information.
+///
+/// # Example
+///
+/// ```
+/// # #![allow(unused_must_use)]
+///
+/// let mut w = Vec::new();
+/// write!(&mut w, "test");
+/// write!(&mut w, "formatted {}", "arguments");
+/// ```
+#[macro_export]
+#[stable]
+macro_rules! write {
+ ($dst:expr, $($arg:tt)*) => ((&mut *$dst).write_fmt(format_args!($($arg)*)))
+}
+
+// NOTE(stage0): Remove macro after a snapshot
+#[cfg(stage0)]
/// Use the `format!` syntax to write data into a buffer of type `&mut Writer`.
/// See `std::fmt` for more information.
///
)
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// Equivalent to the `println!` macro except that a newline is not printed at
+/// the end of the message.
+#[macro_export]
+#[stable]
+macro_rules! print {
+ ($($arg:tt)*) => (::std::io::stdio::print_args(format_args!($($arg)*)))
+}
+
+// NOTE(stage0): Remove macro after a snapshot
+#[cfg(stage0)]
/// Equivalent to the `println!` macro except that a newline is not printed at
/// the end of the message.
#[macro_export]
($($arg:tt)*) => (format_args!(::std::io::stdio::print_args, $($arg)*))
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// Macro for printing to a task's stdout handle.
+///
+/// Each task can override its stdout handle via `std::io::stdio::set_stdout`.
+/// The syntax of this macro is the same as that used for `format!`. For more
+/// information, see `std::fmt` and `std::io::stdio`.
+///
+/// # Example
+///
+/// ```
+/// println!("hello there!");
+/// println!("format {} arguments", "some");
+/// ```
+#[macro_export]
+#[stable]
+macro_rules! println {
+ ($($arg:tt)*) => (::std::io::stdio::println_args(format_args!($($arg)*)))
+}
+
+// NOTE(stage0): Remove macro after a snapshot
+#[cfg(stage0)]
/// Macro for printing to a task's stdout handle.
///
/// Each task can override its stdout handle via `std::io::stdio::set_stdout`.
pub mod builtin {
/// The core macro for formatted string creation & output.
///
- /// This macro takes as its first argument a callable expression which will
- /// receive as its first argument a value of type `&fmt::Arguments`. This
- /// value can be passed to the functions in `std::fmt` for performing useful
- /// functions. All other formatting macros (`format!`, `write!`,
- /// `println!`, etc) are proxied through this one.
+ /// This macro produces a value of type `fmt::Arguments`. This value can be
+ /// passed to the functions in `std::fmt` for performing useful functions.
+ /// All other formatting macros (`format!`, `write!`, `println!`, etc) are
+ /// proxied through this one.
///
/// For more information, see the documentation in `std::fmt`.
///
/// ```rust
/// use std::fmt;
///
- /// let s = format_args!(fmt::format, "hello {}", "world");
+ /// let s = fmt::format(format_args!("hello {}", "world"));
/// assert_eq!(s, format!("hello {}", "world"));
///
- /// format_args!(|args| {
- /// // pass `args` to another function, etc.
- /// }, "hello {}", "world");
/// ```
#[macro_export]
- macro_rules! format_args { ($closure:expr, $fmt:expr $($args:tt)*) => ({
+ macro_rules! format_args { ($fmt:expr $($args:tt)*) => ({
/* compiler built-in */
}) }
/// # Example
///
/// ```rust,ignore
- /// let secret_key = include_bin!("secret-key.bin");
+ /// let secret_key = include_bytes!("secret-key.bin");
/// ```
#[macro_export]
- macro_rules! include_bin { ($file:expr) => ({ /* compiler built-in */ }) }
+ macro_rules! include_bytes { ($file:expr) => ({ /* compiler built-in */ }) }
+
+ /// Deprecated alias for `include_bytes!()`.
+ #[macro_export]
+ macro_rules! include_bin { ($file:expr) => ({ /* compiler built-in */}) }
/// Expands to a string that represents the current module path.
///
use option::Option::{Some, None};
use path::{Path, GenericPath, BytesContainer};
use sys;
-use ptr::RawPtr;
+use sys::os as os_imp;
+use ptr::PtrExt;
use ptr;
use result::Result;
use result::Result::{Err, Ok};
#[cfg(test)]
mod tests {
- use prelude::*;
use super::*;
+ use prelude::Option::{mod, Some, None};
+ use prelude::{Vec, Clone, AsSlice, SliceExt, CloneSliceExt, IteratorExt};
+ use prelude::{DoubleEndedIteratorExt, Str, StrExt, ToString, GenericPath};
use str;
macro_rules! t {
use self::PathPrefix::*;
-use ascii::AsciiCast;
+use ascii::AsciiExt;
use c_str::{CString, ToCStr};
use clone::Clone;
use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering};
use option::Option;
use option::Option::{Some, None};
use slice::SliceExt;
-use str::{CharSplits, FromStr, StrVector, StrExt};
+use str::{SplitTerminator, FromStr, StrVector, StrExt};
use string::{String, ToString};
use unicode::char::UnicodeChar;
use vec::Vec;
/// Each component is yielded as Option<&str> for compatibility with PosixPath, but
/// every component in WindowsPath is guaranteed to be Some.
pub type StrComponents<'a> =
- Map<&'a str, Option<&'a str>, CharSplits<'a, char>, fn(&'a str) -> Option<&'a str>>;
+ Map<&'a str, Option<&'a str>, SplitTerminator<'a, char>, fn(&'a str) -> Option<&'a str>>;
/// Iterator that yields successive components of a Path as &[u8]
pub type Components<'a> =
let repr = me.repr[];
match me.prefix {
Some(DiskPrefix) => {
- repr.as_bytes()[0] == path.as_bytes()[0].to_ascii().to_uppercase().as_byte()
+ repr.as_bytes()[0] == path.as_bytes()[0].to_ascii_uppercase()
}
Some(VerbatimDiskPrefix) => {
- repr.as_bytes()[4] == path.as_bytes()[0].to_ascii().to_uppercase().as_byte()
+ repr.as_bytes()[4] == path.as_bytes()[0].to_ascii_uppercase()
}
_ => false
}
match (self.prefix, other.prefix) {
(Some(DiskPrefix), Some(VerbatimDiskPrefix)) => {
self.is_absolute() &&
- s_repr.as_bytes()[0].to_ascii().to_lowercase() ==
- o_repr.as_bytes()[4].to_ascii().to_lowercase()
+ s_repr.as_bytes()[0].to_ascii_lowercase() ==
+ o_repr.as_bytes()[4].to_ascii_lowercase()
}
(Some(VerbatimDiskPrefix), Some(DiskPrefix)) => {
other.is_absolute() &&
- s_repr.as_bytes()[4].to_ascii().to_lowercase() ==
- o_repr.as_bytes()[0].to_ascii().to_lowercase()
+ s_repr.as_bytes()[4].to_ascii_lowercase() ==
+ o_repr.as_bytes()[0].to_ascii_lowercase()
}
(Some(VerbatimDiskPrefix), Some(VerbatimDiskPrefix)) => {
- s_repr.as_bytes()[4].to_ascii().to_lowercase() ==
- o_repr.as_bytes()[4].to_ascii().to_lowercase()
+ s_repr.as_bytes()[4].to_ascii_lowercase() ==
+ o_repr.as_bytes()[4].to_ascii_lowercase()
}
(Some(UNCPrefix(_,_)), Some(VerbatimUNCPrefix(_,_))) => {
s_repr[2..self.prefix_len()] == o_repr[8..other.prefix_len()]
let mut s = String::from_str(s[0..len]);
unsafe {
let v = s.as_mut_vec();
- v[0] = (*v)[0].to_ascii().to_uppercase().as_byte();
+ v[0] = (*v)[0].to_ascii_uppercase();
}
if is_abs {
// normalize C:/ to C:\
let mut s = String::from_str(s[0..len]);
unsafe {
let v = s.as_mut_vec();
- v[4] = (*v)[4].to_ascii().to_uppercase().as_byte();
+ v[4] = (*v)[4].to_ascii_uppercase();
}
Some(s)
}
let mut s = String::with_capacity(n);
match prefix {
Some(DiskPrefix) => {
- s.push(prefix_.as_bytes()[0].to_ascii()
- .to_uppercase().as_char());
+ s.push(prefix_.as_bytes()[0].to_ascii_uppercase() as char);
s.push(':');
}
Some(VerbatimDiskPrefix) => {
s.push_str(prefix_[0..4]);
- s.push(prefix_.as_bytes()[4].to_ascii()
- .to_uppercase().as_char());
+ s.push(prefix_.as_bytes()[4].to_ascii_uppercase() as char);
s.push_str(prefix_[5..]);
}
Some(UNCPrefix(a,b)) => {
#[cfg(test)]
mod tests {
- use prelude::*;
use super::*;
+ use prelude::Option::{mod, Some, None};
+ use prelude::{Vec, Clone, AsSlice, SliceExt, CloneSliceExt, IteratorExt};
+ use prelude::{DoubleEndedIteratorExt, Str, ToString, GenericPath};
use super::PathPrefix::*;
use super::parse_prefix;
// Reexported types and traits
-#[doc(no_inline)] pub use ascii::{Ascii, AsciiCast, OwnedAsciiCast, AsciiStr};
-#[doc(no_inline)] pub use ascii::IntoBytes;
#[doc(no_inline)] pub use borrow::IntoCow;
#[doc(no_inline)] pub use c_str::ToCStr;
#[doc(no_inline)] pub use char::{Char, UnicodeChar};
#[doc(no_inline)] pub use option::Option;
#[doc(no_inline)] pub use option::Option::{Some, None};
#[doc(no_inline)] pub use path::{GenericPath, Path, PosixPath, WindowsPath};
-#[doc(no_inline)] pub use ptr::{RawPtr, RawMutPtr};
+#[doc(no_inline)] pub use ptr::{PtrExt, MutPtrExt};
#[doc(no_inline)] pub use result::Result;
#[doc(no_inline)] pub use result::Result::{Ok, Err};
#[doc(no_inline)] pub use io::{Buffer, Writer, Reader, Seek, BufferPrelude};
let mut read = 0;
let len = v.len();
while read < len {
- let result = getrandom(v[mut read..]);
+ let result = getrandom(v.slice_from_mut(read));
if result == -1 {
let err = errno() as libc::c_int;
if err == libc::EINTR {
data: UnsafeCell<T>,
}
+unsafe impl<T:Send> Send for Exclusive<T> { }
+
+unsafe impl<T:Send> Sync for Exclusive<T> { }
+
/// An RAII guard returned via `lock`
pub struct ExclusiveGuard<'a, T:'a> {
// FIXME #12808: strange name to try to avoid interfering with
#![macro_escape]
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+macro_rules! rterrln {
+ ($fmt:expr $($arg:tt)*) => ( {
+ ::rt::util::dumb_print(format_args!(concat!($fmt, "\n") $($arg)*))
+ } )
+}
+
+// NOTE(stage0): Remove macro after a snapshot
+#[cfg(stage0)]
macro_rules! rterrln {
($fmt:expr $($arg:tt)*) => ( {
format_args!(::rt::util::dumb_print, concat!($fmt, "\n") $($arg)*)
} )
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+macro_rules! rtabort {
+ ($($arg:tt)*) => (::rt::util::abort(format_args!($($arg)*)))
+}
+
+// NOTE(stage0): Remove macro after a snapshot
+#[cfg(stage0)]
macro_rules! rtabort {
($($arg:tt)*) => (format_args!(::rt::util::abort, $($arg)*))
}
}
}
-// Entry point of panic from the libcore crate
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
#[cfg(not(test))]
+/// Entry point of panic from the libcore crate.
+#[lang = "panic_fmt"]
+pub extern fn rust_begin_unwind(msg: fmt::Arguments,
+ file: &'static str, line: uint) -> ! {
+ begin_unwind_fmt(msg, &(file, line))
+}
+
+// NOTE(stage0): Remove function after a snapshot
+#[cfg(stage0)]
+#[cfg(not(test))]
+/// Entry point of panic from the libcore crate.
#[lang = "panic_fmt"]
pub extern fn rust_begin_unwind(msg: &fmt::Arguments,
file: &'static str, line: uint) -> ! {
begin_unwind_fmt(msg, &(file, line))
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+/// The entry point for unwinding with a formatted message.
+///
+/// This is designed to reduce the amount of code required at the call
+/// site as much as possible (so that `panic!()` has as low an impact
+/// on (e.g.) the inlining of other functions as possible), by moving
+/// the actual formatting into this shared place.
+#[inline(never)] #[cold]
+pub fn begin_unwind_fmt(msg: fmt::Arguments, file_line: &(&'static str, uint)) -> ! {
+ use fmt::FormatWriter;
+
+ // We do two allocations here, unfortunately. But (a) they're
+ // required with the current scheme, and (b) we don't handle
+ // panic + OOM properly anyway (see comment in begin_unwind
+ // below).
+
+ struct VecWriter<'a> { v: &'a mut Vec<u8> }
+
+ impl<'a> fmt::FormatWriter for VecWriter<'a> {
+ fn write(&mut self, buf: &[u8]) -> fmt::Result {
+ self.v.push_all(buf);
+ Ok(())
+ }
+ }
+
+ let mut v = Vec::new();
+ let _ = write!(&mut VecWriter { v: &mut v }, "{}", msg);
+
+ let msg = box String::from_utf8_lossy(v.as_slice()).into_owned();
+ begin_unwind_inner(msg, file_line)
+}
+
+// NOTE(stage0): Remove function after a snapshot
+#[cfg(stage0)]
/// The entry point for unwinding with a formatted message.
///
/// This is designed to reduce the amount of code required at the call
}
}
+// NOTE(stage0): Remove cfg after a snapshot
+#[cfg(not(stage0))]
+pub fn dumb_print(args: fmt::Arguments) {
+ let _ = Stderr.write_fmt(args);
+}
+
+// NOTE(stage0): Remove function after a snapshot
+#[cfg(stage0)]
pub fn dumb_print(args: &fmt::Arguments) {
let mut w = Stderr;
let _ = write!(&mut w, "{}", args);
}
-pub fn abort(args: &fmt::Arguments) -> ! {
+// NOTE(stage0): Remove wrappers after a snapshot
+#[cfg(not(stage0))] pub fn abort(args: fmt::Arguments) -> ! { abort_(&args) }
+#[cfg(stage0)] pub fn abort(args: &fmt::Arguments) -> ! { abort_(args) }
+
+// NOTE(stage0): Change to `pub fn abort(args: fmt::Arguments) -> !` after a snapshot
+fn abort_(args: &fmt::Arguments) -> ! {
use fmt::FormatWriter;
struct BufWriter<'a> {
}
impl<'a> FormatWriter for BufWriter<'a> {
fn write(&mut self, bytes: &[u8]) -> fmt::Result {
- let left = self.buf[mut self.pos..];
+ let left = self.buf.slice_from_mut(self.pos);
let to_write = bytes[..cmp::min(bytes.len(), left.len())];
slice::bytes::copy_memory(left, to_write);
self.pos += to_write.len();
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use kinds::{Send, Sync};
use sync::{Mutex, Condvar};
/// A barrier enables multiple tasks to synchronize the beginning
num_threads: uint,
}
+unsafe impl Send for Barrier {}
+unsafe impl Sync for Barrier {}
+
// The inner state of a double barrier
struct BarrierState {
count: uint,
generation_id: uint,
}
+unsafe impl Send for BarrierState {}
+unsafe impl Sync for BarrierState {}
+
impl Barrier {
/// Create a new barrier that can block a given number of threads.
///
/// Barriers are re-usable after all threads have rendezvoused once, and can
/// be used continuously.
pub fn wait(&self) {
- let mut lock = self.lock.lock();
+ let mut lock = self.lock.lock().unwrap();
let local_gen = lock.generation_id;
lock.count += 1;
if lock.count < self.num_threads {
// http://en.wikipedia.org/wiki/Spurious_wakeup
while local_gen == lock.generation_id &&
lock.count < self.num_threads {
- self.cvar.wait(&lock);
+ lock = self.cvar.wait(lock).unwrap();
}
} else {
lock.count = 0;
use prelude::*;
use sync::atomic::{mod, AtomicUint};
-use sync::{mutex, StaticMutexGuard};
+use sync::poison::{mod, LockResult};
use sys_common::condvar as sys;
use sys_common::mutex as sys_mutex;
use time::Duration;
+use sync::{mutex, MutexGuard};
/// A Condition Variable
///
/// // Inside of our lock, spawn a new thread, and then wait for it to start
/// Thread::spawn(move|| {
/// let &(ref lock, ref cvar) = &*pair2;
-/// let mut started = lock.lock();
+/// let mut started = lock.lock().unwrap();
/// *started = true;
/// cvar.notify_one();
/// }).detach();
///
/// // wait for the thread to start up
/// let &(ref lock, ref cvar) = &*pair;
-/// let started = lock.lock();
+/// let mut started = lock.lock().unwrap();
/// while !*started {
-/// cvar.wait(&started);
+/// started = cvar.wait(started).unwrap();
/// }
/// ```
+#[stable]
pub struct Condvar { inner: Box<StaticCondvar> }
+unsafe impl Send for Condvar {}
+unsafe impl Sync for Condvar {}
+
/// Statically allocated condition variables.
///
/// This structure is identical to `Condvar` except that it is suitable for use
///
/// static CVAR: StaticCondvar = CONDVAR_INIT;
/// ```
+#[unstable = "may be merged with Condvar in the future"]
pub struct StaticCondvar {
inner: sys::Condvar,
mutex: AtomicUint,
}
+unsafe impl Send for StaticCondvar {}
+unsafe impl Sync for StaticCondvar {}
+
/// Constant initializer for a statically allocated condition variable.
+#[unstable = "may be merged with Condvar in the future"]
pub const CONDVAR_INIT: StaticCondvar = StaticCondvar {
inner: sys::CONDVAR_INIT,
mutex: atomic::INIT_ATOMIC_UINT,
};
-/// A trait for vaules which can be passed to the waiting methods of condition
-/// variables. This is implemented by the mutex guards in this module.
-///
-/// Note that this trait should likely not be implemented manually unless you
-/// really know what you're doing.
-pub trait AsMutexGuard {
- #[allow(missing_docs)]
- unsafe fn as_mutex_guard(&self) -> &StaticMutexGuard;
-}
-
impl Condvar {
/// Creates a new condition variable which is ready to be waited on and
/// notified.
+ #[stable]
pub fn new() -> Condvar {
Condvar {
inner: box StaticCondvar {
/// notification.
///
/// This function will atomically unlock the mutex specified (represented by
- /// `guard`) and block the current thread. This means that any calls to
- /// `notify_*()` which happen logically after the mutex is unlocked are
+ /// `mutex_guard`) and block the current thread. This means that any calls
+ /// to `notify_*()` which happen logically after the mutex is unlocked are
/// candidates to wake this thread up. When this function call returns, the
/// lock specified will have been re-acquired.
///
/// the predicate must always be checked each time this function returns to
/// protect against spurious wakeups.
///
+ /// # Failure
+ ///
+ /// This function will return an error if the mutex being waited on is
+ /// poisoned when this thread re-acquires the lock. For more information,
+ /// see information about poisoning on the Mutex type.
+ ///
/// # Panics
///
/// This function will `panic!()` if it is used with more than one mutex
/// over time. Each condition variable is dynamically bound to exactly one
/// mutex to ensure defined behavior across platforms. If this functionality
/// is not desired, then unsafe primitives in `sys` are provided.
- pub fn wait<T: AsMutexGuard>(&self, mutex_guard: &T) {
+ #[stable]
+ pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>)
+ -> LockResult<MutexGuard<'a, T>> {
unsafe {
let me: &'static Condvar = &*(self as *const _);
- me.inner.wait(mutex_guard)
+ me.inner.wait(guard)
}
}
// provide. There are also additional concerns about the unix-specific
// implementation which may need to be addressed.
#[allow(dead_code)]
- fn wait_timeout<T: AsMutexGuard>(&self, mutex_guard: &T,
- dur: Duration) -> bool {
+ fn wait_timeout<'a, T>(&self, guard: MutexGuard<'a, T>, dur: Duration)
+ -> LockResult<(MutexGuard<'a, T>, bool)> {
unsafe {
let me: &'static Condvar = &*(self as *const _);
- me.inner.wait_timeout(mutex_guard, dur)
+ me.inner.wait_timeout(guard, dur)
}
}
/// `notify_one` are not buffered in any way.
///
/// To wake up all threads, see `notify_one()`.
+ #[stable]
pub fn notify_one(&self) { unsafe { self.inner.inner.notify_one() } }
/// Wake up all blocked threads on this condvar.
/// way.
///
/// To wake up only one thread, see `notify_one()`.
+ #[stable]
pub fn notify_all(&self) { unsafe { self.inner.inner.notify_all() } }
}
/// notification.
///
/// See `Condvar::wait`.
- pub fn wait<T: AsMutexGuard>(&'static self, mutex_guard: &T) {
- unsafe {
- let lock = mutex_guard.as_mutex_guard();
- let sys = mutex::guard_lock(lock);
- self.verify(sys);
- self.inner.wait(sys);
- (*mutex::guard_poison(lock)).check("mutex");
+ #[unstable = "may be merged with Condvar in the future"]
+ pub fn wait<'a, T>(&'static self, guard: MutexGuard<'a, T>)
+ -> LockResult<MutexGuard<'a, T>> {
+ let poisoned = unsafe {
+ let lock = mutex::guard_lock(&guard);
+ self.verify(lock);
+ self.inner.wait(lock);
+ mutex::guard_poison(&guard).get()
+ };
+ if poisoned {
+ Err(poison::new_poison_error(guard))
+ } else {
+ Ok(guard)
}
}
///
/// See `Condvar::wait_timeout`.
#[allow(dead_code)] // may want to stabilize this later, see wait_timeout above
- fn wait_timeout<T: AsMutexGuard>(&'static self, mutex_guard: &T,
- dur: Duration) -> bool {
- unsafe {
- let lock = mutex_guard.as_mutex_guard();
- let sys = mutex::guard_lock(lock);
- self.verify(sys);
- let ret = self.inner.wait_timeout(sys, dur);
- (*mutex::guard_poison(lock)).check("mutex");
- return ret;
+ fn wait_timeout<'a, T>(&'static self, guard: MutexGuard<'a, T>, dur: Duration)
+ -> LockResult<(MutexGuard<'a, T>, bool)> {
+ let (poisoned, success) = unsafe {
+ let lock = mutex::guard_lock(&guard);
+ self.verify(lock);
+ let success = self.inner.wait_timeout(lock, dur);
+ (mutex::guard_poison(&guard).get(), success)
+ };
+ if poisoned {
+ Err(poison::new_poison_error((guard, success)))
+ } else {
+ Ok((guard, success))
}
}
/// Wake up one blocked thread on this condvar.
///
/// See `Condvar::notify_one`.
+ #[unstable = "may be merged with Condvar in the future"]
pub fn notify_one(&'static self) { unsafe { self.inner.notify_one() } }
/// Wake up all blocked threads on this condvar.
///
/// See `Condvar::notify_all`.
+ #[unstable = "may be merged with Condvar in the future"]
pub fn notify_all(&'static self) { unsafe { self.inner.notify_all() } }
/// Deallocate all resources associated with this static condvar.
/// active users of the condvar, and this also doesn't prevent any future
/// users of the condvar. This method is required to be called to not leak
/// memory on all platforms.
+ #[unstable = "may be merged with Condvar in the future"]
pub unsafe fn destroy(&'static self) {
self.inner.destroy()
}
static C: StaticCondvar = CONDVAR_INIT;
static M: StaticMutex = MUTEX_INIT;
- let g = M.lock();
+ let g = M.lock().unwrap();
spawn(move|| {
- let _g = M.lock();
+ let _g = M.lock().unwrap();
C.notify_one();
});
- C.wait(&g);
+ let g = C.wait(g).unwrap();
drop(g);
unsafe { C.destroy(); M.destroy(); }
}
let tx = tx.clone();
spawn(move|| {
let &(ref lock, ref cond) = &*data;
- let mut cnt = lock.lock();
+ let mut cnt = lock.lock().unwrap();
*cnt += 1;
if *cnt == N {
tx.send(());
}
while *cnt != 0 {
- cond.wait(&cnt);
+ cnt = cond.wait(cnt).unwrap();
}
tx.send(());
});
let &(ref lock, ref cond) = &*data;
rx.recv();
- let mut cnt = lock.lock();
+ let mut cnt = lock.lock().unwrap();
*cnt = 0;
cond.notify_all();
drop(cnt);
static C: StaticCondvar = CONDVAR_INIT;
static M: StaticMutex = MUTEX_INIT;
- let g = M.lock();
- assert!(!C.wait_timeout(&g, Duration::nanoseconds(1000)));
+ let g = M.lock().unwrap();
+ let (g, success) = C.wait_timeout(g, Duration::nanoseconds(1000)).unwrap();
+ assert!(!success);
spawn(move|| {
- let _g = M.lock();
+ let _g = M.lock().unwrap();
C.notify_one();
});
- assert!(C.wait_timeout(&g, Duration::days(1)));
+ let (g, success) = C.wait_timeout(g, Duration::days(1)).unwrap();
+ assert!(success);
drop(g);
unsafe { C.destroy(); M.destroy(); }
}
static M2: StaticMutex = MUTEX_INIT;
static C: StaticCondvar = CONDVAR_INIT;
- let g = M1.lock();
+ let mut g = M1.lock().unwrap();
spawn(move|| {
- let _g = M1.lock();
+ let _g = M1.lock().unwrap();
C.notify_one();
});
- C.wait(&g);
+ g = C.wait(g).unwrap();
drop(g);
- C.wait(&M2.lock());
+ C.wait(M2.lock().unwrap()).unwrap();
}
}
pub use alloc::arc::{Arc, Weak};
-pub use self::mutex::{Mutex, MutexGuard, StaticMutex, StaticMutexGuard, MUTEX_INIT};
+pub use self::mutex::{Mutex, MutexGuard, StaticMutex};
+pub use self::mutex::MUTEX_INIT;
pub use self::rwlock::{RWLock, StaticRWLock, RWLOCK_INIT};
pub use self::rwlock::{RWLockReadGuard, RWLockWriteGuard};
-pub use self::rwlock::{StaticRWLockReadGuard, StaticRWLockWriteGuard};
-pub use self::condvar::{Condvar, StaticCondvar, CONDVAR_INIT, AsMutexGuard};
+pub use self::condvar::{Condvar, StaticCondvar, CONDVAR_INIT};
pub use self::once::{Once, ONCE_INIT};
pub use self::semaphore::{Semaphore, SemaphoreGuard};
pub use self::barrier::Barrier;
+pub use self::poison::{PoisonError, TryLockError, TryLockResult, LockResult};
pub use self::future::Future;
pub use self::task_pool::TaskPool;
use cell::UnsafeCell;
use kinds::marker;
-use sync::{poison, AsMutexGuard};
+use sync::poison::{mod, TryLockError, TryLockResult, LockResult};
use sys_common::mutex as sys;
/// A mutual exclusion primitive useful for protecting shared data
///
/// # Poisoning
///
-/// In order to prevent access to otherwise invalid data, each mutex will
-/// propagate any panics which occur while the lock is held. Once a thread has
-/// panicked while holding the lock, then all other threads will immediately
-/// panic as well once they hold the lock.
+/// The mutexes in this module implement a strategy called "poisoning" where a
+/// mutex is considered poisoned whenever a thread panics while holding the
+/// lock. Once a mutex is poisoned, all other tasks are unable to access the
+/// data by default as it is likely tainted (some invariant is not being
+/// upheld).
///
-/// # Example
+/// For a mutex, this means that the `lock` and `try_lock` methods return a
+/// `Result` which indicates whether a mutex has been poisoned or not. Most
+/// usage of a mutex will simply `unwrap()` these results, propagating panics
+/// among threads to ensure that a possibly invalid invariant is not witnessed.
+///
+/// A poisoned mutex, however, does not prevent all access to the underlying
+/// data. The `PoisonError` type has an `into_guard` method which will return
+/// the guard that would have otherwise been returned on a successful lock. This
+/// allows access to the data, despite the lock being poisoned.
+///
+/// # Examples
///
/// ```rust
/// use std::sync::{Arc, Mutex};
/// let (tx, rx) = channel();
/// for _ in range(0u, 10) {
/// let (data, tx) = (data.clone(), tx.clone());
-/// Thread::spawn(move|| {
+/// Thread::spawn(move || {
/// // The shared static can only be accessed once the lock is held.
/// // Our non-atomic increment is safe because we're the only thread
/// // which can access the shared state when the lock is held.
-/// let mut data = data.lock();
+/// //
+/// // We unwrap() the return value to assert that we are not expecting
+/// // tasks to ever fail while holding the lock.
+/// let mut data = data.lock().unwrap();
/// *data += 1;
/// if *data == N {
/// tx.send(());
///
/// rx.recv();
/// ```
+///
+/// To recover from a poisoned mutex:
+///
+/// ```rust
+/// use std::sync::{Arc, Mutex};
+/// use std::thread::Thread;
+///
+/// let lock = Arc::new(Mutex::new(0u));
+/// let lock2 = lock.clone();
+///
+/// let _ = Thread::spawn(move || -> () {
+/// // This thread will acquire the mutex first, unwrapping the result of
+/// // `lock` because the lock has not been poisoned.
+/// let _lock = lock2.lock().unwrap();
+///
+/// // This panic while holding the lock (`_guard` is in scope) will poison
+/// // the mutex.
+/// panic!();
+/// }).join();
+///
+/// // The lock is poisoned by this point, but the returned result can be
+/// // pattern matched on to return the underlying guard on both branches.
+/// let mut guard = match lock.lock() {
+/// Ok(guard) => guard,
+/// Err(poisoned) => poisoned.into_guard(),
+/// };
+///
+/// *guard += 1;
+/// ```
+#[stable]
pub struct Mutex<T> {
// Note that this static mutex is in a *box*, not inlined into the struct
// itself. Once a native mutex has been used once, its address can never
data: UnsafeCell<T>,
}
+unsafe impl<T:Send> Send for Mutex<T> { }
+
+unsafe impl<T:Send> Sync for Mutex<T> { }
+
/// The static mutex type is provided to allow for static allocation of mutexes.
///
/// Note that this is a separate type because using a Mutex correctly means that
/// static LOCK: StaticMutex = MUTEX_INIT;
///
/// {
-/// let _g = LOCK.lock();
+/// let _g = LOCK.lock().unwrap();
/// // do some productive work
/// }
/// // lock is unlocked here.
/// ```
+#[unstable = "may be merged with Mutex in the future"]
pub struct StaticMutex {
lock: sys::Mutex,
- poison: UnsafeCell<poison::Flag>,
+ poison: poison::Flag,
}
+unsafe impl Sync for StaticMutex {}
+
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
///
/// The data protected by the mutex can be access through this guard via its
/// Deref and DerefMut implementations
#[must_use]
+#[stable]
pub struct MutexGuard<'a, T: 'a> {
// funny underscores due to how Deref/DerefMut currently work (they
// disregard field privacy).
- __lock: &'a Mutex<T>,
- __guard: StaticMutexGuard,
-}
-
-/// An RAII implementation of a "scoped lock" of a static mutex. When this
-/// structure is dropped (falls out of scope), the lock will be unlocked.
-#[must_use]
-pub struct StaticMutexGuard {
- lock: &'static sys::Mutex,
- marker: marker::NoSend,
- poison: poison::Guard<'static>,
+ __lock: &'a StaticMutex,
+ __data: &'a UnsafeCell<T>,
+ __poison: poison::Guard,
+ __marker: marker::NoSend,
}
/// Static initialization of a mutex. This constant can be used to initialize
/// other mutex constants.
+#[unstable = "may be merged with Mutex in the future"]
pub const MUTEX_INIT: StaticMutex = StaticMutex {
lock: sys::MUTEX_INIT,
- poison: UnsafeCell { value: poison::Flag { failed: false } },
+ poison: poison::FLAG_INIT,
};
impl<T: Send> Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use.
+ #[stable]
pub fn new(t: T) -> Mutex<T> {
Mutex {
inner: box MUTEX_INIT,
/// held. An RAII guard is returned to allow scoped unlock of the lock. When
/// the guard goes out of scope, the mutex will be unlocked.
///
- /// # Panics
+ /// # Failure
///
/// If another user of this mutex panicked while holding the mutex, then
- /// this call will immediately panic once the mutex is acquired.
- pub fn lock(&self) -> MutexGuard<T> {
- unsafe {
- let lock: &'static StaticMutex = &*(&*self.inner as *const _);
- MutexGuard::new(self, lock.lock())
- }
+ /// this call will return an error once the mutex is acquired.
+ #[stable]
+ pub fn lock(&self) -> LockResult<MutexGuard<T>> {
+ unsafe { self.inner.lock.lock() }
+ MutexGuard::new(&*self.inner, &self.data)
}
/// Attempts to acquire this lock.
///
/// This function does not block.
///
- /// # Panics
+ /// # Failure
///
/// If another user of this mutex panicked while holding the mutex, then
- /// this call will immediately panic if the mutex would otherwise be
+ /// this call will return failure if the mutex would otherwise be
/// acquired.
- pub fn try_lock(&self) -> Option<MutexGuard<T>> {
- unsafe {
- let lock: &'static StaticMutex = &*(&*self.inner as *const _);
- lock.try_lock().map(|guard| {
- MutexGuard::new(self, guard)
- })
+ #[stable]
+ pub fn try_lock(&self) -> TryLockResult<MutexGuard<T>> {
+ if unsafe { self.inner.lock.try_lock() } {
+ Ok(try!(MutexGuard::new(&*self.inner, &self.data)))
+ } else {
+ Err(TryLockError::WouldBlock)
}
}
}
}
}
+static DUMMY: UnsafeCell<()> = UnsafeCell { value: () };
+
impl StaticMutex {
/// Acquires this lock, see `Mutex::lock`
- pub fn lock(&'static self) -> StaticMutexGuard {
+ #[inline]
+ #[unstable = "may be merged with Mutex in the future"]
+ pub fn lock(&'static self) -> LockResult<MutexGuard<()>> {
unsafe { self.lock.lock() }
- StaticMutexGuard::new(self)
+ MutexGuard::new(self, &DUMMY)
}
/// Attempts to grab this lock, see `Mutex::try_lock`
- pub fn try_lock(&'static self) -> Option<StaticMutexGuard> {
+ #[inline]
+ #[unstable = "may be merged with Mutex in the future"]
+ pub fn try_lock(&'static self) -> TryLockResult<MutexGuard<()>> {
if unsafe { self.lock.try_lock() } {
- Some(StaticMutexGuard::new(self))
+ Ok(try!(MutexGuard::new(self, &DUMMY)))
} else {
- None
+ Err(TryLockError::WouldBlock)
}
}
/// *all* platforms. It may be the case that some platforms do not leak
/// memory if this method is not called, but this is not guaranteed to be
/// true on all platforms.
+ #[unstable = "may be merged with Mutex in the future"]
pub unsafe fn destroy(&'static self) {
self.lock.destroy()
}
}
impl<'mutex, T> MutexGuard<'mutex, T> {
- fn new(lock: &Mutex<T>, guard: StaticMutexGuard) -> MutexGuard<T> {
- MutexGuard { __lock: lock, __guard: guard }
+ fn new(lock: &'mutex StaticMutex, data: &'mutex UnsafeCell<T>)
+ -> LockResult<MutexGuard<'mutex, T>> {
+ poison::map_result(lock.poison.borrow(), |guard| {
+ MutexGuard {
+ __lock: lock,
+ __data: data,
+ __poison: guard,
+ __marker: marker::NoSend,
+ }
+ })
}
}
-impl<'mutex, T> AsMutexGuard for MutexGuard<'mutex, T> {
- unsafe fn as_mutex_guard(&self) -> &StaticMutexGuard { &self.__guard }
-}
-
impl<'mutex, T> Deref<T> for MutexGuard<'mutex, T> {
- fn deref<'a>(&'a self) -> &'a T { unsafe { &*self.__lock.data.get() } }
+ fn deref<'a>(&'a self) -> &'a T {
+ unsafe { &*self.__data.get() }
+ }
}
impl<'mutex, T> DerefMut<T> for MutexGuard<'mutex, T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
- unsafe { &mut *self.__lock.data.get() }
+ unsafe { &mut *self.__data.get() }
}
}
-impl StaticMutexGuard {
- fn new(lock: &'static StaticMutex) -> StaticMutexGuard {
+#[unsafe_destructor]
+impl<'a, T> Drop for MutexGuard<'a, T> {
+ #[inline]
+ fn drop(&mut self) {
unsafe {
- let guard = StaticMutexGuard {
- lock: &lock.lock,
- marker: marker::NoSend,
- poison: (*lock.poison.get()).borrow(),
- };
- guard.poison.check("mutex");
- return guard;
+ self.__lock.poison.done(&self.__poison);
+ self.__lock.lock.unlock();
}
}
}
-pub fn guard_lock(guard: &StaticMutexGuard) -> &sys::Mutex { guard.lock }
-pub fn guard_poison(guard: &StaticMutexGuard) -> &poison::Guard {
- &guard.poison
-}
-
-impl AsMutexGuard for StaticMutexGuard {
- unsafe fn as_mutex_guard(&self) -> &StaticMutexGuard { self }
+pub fn guard_lock<'a, T>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
+ &guard.__lock.lock
}
-#[unsafe_destructor]
-impl Drop for StaticMutexGuard {
- fn drop(&mut self) {
- unsafe {
- self.poison.done();
- self.lock.unlock();
- }
- }
+pub fn guard_poison<'a, T>(guard: &MutexGuard<'a, T>) -> &'a poison::Flag {
+ &guard.__lock.poison
}
#[cfg(test)]
use thread::Thread;
use sync::{Arc, Mutex, StaticMutex, MUTEX_INIT, Condvar};
+ struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
+
+ unsafe impl<T:'static+Send> Send for Packet<T> {}
+ unsafe impl<T> Sync for Packet<T> {}
+
#[test]
fn smoke() {
let m = Mutex::new(());
- drop(m.lock());
- drop(m.lock());
+ drop(m.lock().unwrap());
+ drop(m.lock().unwrap());
}
#[test]
fn smoke_static() {
static M: StaticMutex = MUTEX_INIT;
unsafe {
- drop(M.lock());
- drop(M.lock());
+ drop(M.lock().unwrap());
+ drop(M.lock().unwrap());
M.destroy();
}
}
fn inc() {
for _ in range(0, J) {
unsafe {
- let _g = M.lock();
+ let _g = M.lock().unwrap();
CNT += 1;
}
}
#[test]
fn try_lock() {
let m = Mutex::new(());
- assert!(m.try_lock().is_some());
+ *m.try_lock().unwrap() = ();
}
#[test]
fn test_mutex_arc_condvar() {
- let arc = Arc::new((Mutex::new(false), Condvar::new()));
- let arc2 = arc.clone();
+ let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
+ let packet2 = Packet(packet.0.clone());
let (tx, rx) = channel();
spawn(move|| {
// wait until parent gets in
rx.recv();
- let &(ref lock, ref cvar) = &*arc2;
- let mut lock = lock.lock();
+ let &(ref lock, ref cvar) = &*packet2.0;
+ let mut lock = lock.lock().unwrap();
*lock = true;
cvar.notify_one();
});
- let &(ref lock, ref cvar) = &*arc;
- let lock = lock.lock();
+ let &(ref lock, ref cvar) = &*packet.0;
+ let mut lock = lock.lock().unwrap();
tx.send(());
assert!(!*lock);
while !*lock {
- cvar.wait(&lock);
+ lock = cvar.wait(lock).unwrap();
}
}
#[test]
- #[should_fail]
fn test_arc_condvar_poison() {
- let arc = Arc::new((Mutex::new(1i), Condvar::new()));
- let arc2 = arc.clone();
+ let packet = Packet(Arc::new((Mutex::new(1i), Condvar::new())));
+ let packet2 = Packet(packet.0.clone());
let (tx, rx) = channel();
spawn(move|| {
rx.recv();
- let &(ref lock, ref cvar) = &*arc2;
- let _g = lock.lock();
+ let &(ref lock, ref cvar) = &*packet2.0;
+ let _g = lock.lock().unwrap();
cvar.notify_one();
// Parent should fail when it wakes up.
panic!();
});
- let &(ref lock, ref cvar) = &*arc;
- let lock = lock.lock();
+ let &(ref lock, ref cvar) = &*packet.0;
+ let mut lock = lock.lock().unwrap();
tx.send(());
while *lock == 1 {
- cvar.wait(&lock);
+ match cvar.wait(lock) {
+ Ok(l) => {
+ lock = l;
+ assert_eq!(*lock, 1);
+ }
+ Err(..) => break,
+ }
}
}
#[test]
- #[should_fail]
fn test_mutex_arc_poison() {
let arc = Arc::new(Mutex::new(1i));
let arc2 = arc.clone();
- let _ = Thread::spawn(move|| {
- let lock = arc2.lock();
+ Thread::spawn(move|| {
+ let lock = arc2.lock().unwrap();
assert_eq!(*lock, 2);
}).join();
- let lock = arc.lock();
- assert_eq!(*lock, 1);
+ assert!(arc.lock().is_err());
}
#[test]
let arc2 = Arc::new(Mutex::new(arc));
let (tx, rx) = channel();
spawn(move|| {
- let lock = arc2.lock();
- let lock2 = lock.deref().lock();
+ let lock = arc2.lock().unwrap();
+ let lock2 = lock.deref().lock().unwrap();
assert_eq!(*lock2, 1);
tx.send(());
});
}
impl Drop for Unwinder {
fn drop(&mut self) {
- *self.i.lock() += 1;
+ *self.i.lock().unwrap() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
- let lock = arc.lock();
+ let lock = arc.lock().unwrap();
assert_eq!(*lock, 2);
}
}
//! example use case would be for initializing an FFI library.
use int;
+use kinds::Sync;
use mem::drop;
use ops::FnOnce;
use sync::atomic;
lock_cnt: atomic::AtomicInt,
}
+unsafe impl Sync for Once {}
+
/// Initialization value for static `Once` values.
pub const ONCE_INIT: Once = Once {
mutex: MUTEX_INIT,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use prelude::*;
+
+use cell::UnsafeCell;
+use error::FromError;
+use fmt;
use thread::Thread;
-pub struct Flag { pub failed: bool }
+pub struct Flag { failed: UnsafeCell<bool> }
+pub const FLAG_INIT: Flag = Flag { failed: UnsafeCell { value: false } };
impl Flag {
- pub fn borrow(&mut self) -> Guard {
- Guard { flag: &mut self.failed, panicking: Thread::panicking() }
+ #[inline]
+ pub fn borrow(&self) -> LockResult<Guard> {
+ let ret = Guard { panicking: Thread::panicking() };
+ if unsafe { *self.failed.get() } {
+ Err(new_poison_error(ret))
+ } else {
+ Ok(ret)
+ }
+ }
+
+ #[inline]
+ pub fn done(&self, guard: &Guard) {
+ if !guard.panicking && Thread::panicking() {
+ unsafe { *self.failed.get() = true; }
+ }
+ }
+
+ #[inline]
+ pub fn get(&self) -> bool {
+ unsafe { *self.failed.get() }
}
}
-pub struct Guard<'a> {
- flag: &'a mut bool,
+#[allow(missing_copy_implementations)]
+pub struct Guard {
panicking: bool,
}
-impl<'a> Guard<'a> {
- pub fn check(&self, name: &str) {
- if *self.flag {
- panic!("poisoned {} - another task failed inside", name);
- }
+/// A type of error which can be returned whenever a lock is acquired.
+///
+/// Both Mutexes and RWLocks are poisoned whenever a task fails while the lock
+/// is held. The precise semantics for when a lock is poisoned is documented on
+/// each lock, but once a lock is poisoned then all future acquisitions will
+/// return this error.
+#[stable]
+pub struct PoisonError<T> {
+ guard: T,
+}
+
+/// An enumeration of possible errors which can occur while calling the
+/// `try_lock` method.
+#[stable]
+pub enum TryLockError<T> {
+ /// The lock could not be acquired because another task failed while holding
+ /// the lock.
+ #[stable]
+ Poisoned(PoisonError<T>),
+ /// The lock could not be acquired at this time because the operation would
+ /// otherwise block.
+ #[stable]
+ WouldBlock,
+}
+
+/// A type alias for the result of a lock method which can be poisoned.
+///
+/// The `Ok` variant of this result indicates that the primitive was not
+/// poisoned, and the `Guard` is contained within. The `Err` variant indicates
+/// that the primitive was poisoned. Note that the `Err` variant *also* carries
+/// the associated guard, and it can be acquired through the `into_inner`
+/// method.
+#[stable]
+pub type LockResult<Guard> = Result<Guard, PoisonError<Guard>>;
+
+/// A type alias for the result of a nonblocking locking method.
+///
+/// For more information, see `LockResult`. A `TryLockResult` doesn't
+/// necessarily hold the associated guard in the `Err` type as the lock may not
+/// have been acquired for other reasons.
+#[stable]
+pub type TryLockResult<Guard> = Result<Guard, TryLockError<Guard>>;
+
+impl<T> fmt::Show for PoisonError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ "poisoned lock: another task failed inside".fmt(f)
+ }
+}
+
+impl<T> PoisonError<T> {
+ /// Consumes this error indicating that a lock is poisoned, returning the
+ /// underlying guard to allow access regardless.
+ #[stable]
+ pub fn into_guard(self) -> T { self.guard }
+}
+
+impl<T> FromError<PoisonError<T>> for TryLockError<T> {
+ fn from_error(err: PoisonError<T>) -> TryLockError<T> {
+ TryLockError::Poisoned(err)
}
+}
- pub fn done(&mut self) {
- if !self.panicking && Thread::panicking() {
- *self.flag = true;
+impl<T> fmt::Show for TryLockError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ TryLockError::Poisoned(ref p) => p.fmt(f),
+ TryLockError::WouldBlock => {
+ "try_lock failed because the operation would block".fmt(f)
+ }
}
}
}
+
+pub fn new_poison_error<T>(guard: T) -> PoisonError<T> {
+ PoisonError { guard: guard }
+}
+
+pub fn map_result<T, U, F>(result: LockResult<T>, f: F)
+ -> LockResult<U>
+ where F: FnOnce(T) -> U {
+ match result {
+ Ok(t) => Ok(f(t)),
+ Err(PoisonError { guard }) => Err(new_poison_error(f(guard)))
+ }
+}
use prelude::*;
-use kinds::marker;
use cell::UnsafeCell;
+use kinds::marker;
+use sync::poison::{mod, LockResult, TryLockError, TryLockResult};
use sys_common::rwlock as sys;
-use sync::poison;
/// A reader-writer lock
///
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
/// to allow access to the contained of the lock.
///
+/// # Poisoning
+///
/// RWLocks, like Mutexes, will become poisoned on panics. Note, however, that
/// an RWLock may only be poisoned if a panic occurs while it is locked
/// exclusively (write mode). If a panic occurs in any reader, then the lock
/// will not be poisoned.
///
-/// # Example
+/// # Examples
///
/// ```
/// use std::sync::RWLock;
///
/// // many reader locks can be held at once
/// {
-/// let r1 = lock.read();
-/// let r2 = lock.read();
+/// let r1 = lock.read().unwrap();
+/// let r2 = lock.read().unwrap();
/// assert_eq!(*r1, 5);
/// assert_eq!(*r2, 5);
/// } // read locks are dropped at this point
///
/// // only one write lock may be held, however
/// {
-/// let mut w = lock.write();
+/// let mut w = lock.write().unwrap();
/// *w += 1;
/// assert_eq!(*w, 6);
/// } // write lock is dropped here
/// ```
+#[stable]
pub struct RWLock<T> {
inner: Box<StaticRWLock>,
data: UnsafeCell<T>,
}
+unsafe impl<T:'static+Send> Send for RWLock<T> {}
+unsafe impl<T> Sync for RWLock<T> {}
+
/// Structure representing a statically allocated RWLock.
///
/// This structure is intended to be used inside of a `static` and will provide
/// static LOCK: StaticRWLock = RWLOCK_INIT;
///
/// {
-/// let _g = LOCK.read();
+/// let _g = LOCK.read().unwrap();
/// // ... shared read access
/// }
/// {
-/// let _g = LOCK.write();
+/// let _g = LOCK.write().unwrap();
/// // ... exclusive write access
/// }
/// unsafe { LOCK.destroy() } // free all resources
/// ```
+#[unstable = "may be merged with RWLock in the future"]
pub struct StaticRWLock {
- inner: sys::RWLock,
- poison: UnsafeCell<poison::Flag>,
+ lock: sys::RWLock,
+ poison: poison::Flag,
}
+unsafe impl Send for StaticRWLock {}
+unsafe impl Sync for StaticRWLock {}
+
/// Constant initialization for a statically-initialized rwlock.
+#[unstable = "may be merged with RWLock in the future"]
pub const RWLOCK_INIT: StaticRWLock = StaticRWLock {
- inner: sys::RWLOCK_INIT,
- poison: UnsafeCell { value: poison::Flag { failed: false } },
+ lock: sys::RWLOCK_INIT,
+ poison: poison::FLAG_INIT,
};
/// RAII structure used to release the shared read access of a lock when
/// dropped.
#[must_use]
+#[stable]
pub struct RWLockReadGuard<'a, T: 'a> {
- __lock: &'a RWLock<T>,
- __guard: StaticRWLockReadGuard,
+ __lock: &'a StaticRWLock,
+ __data: &'a UnsafeCell<T>,
+ __marker: marker::NoSend,
}
/// RAII structure used to release the exclusive write access of a lock when
/// dropped.
#[must_use]
+#[stable]
pub struct RWLockWriteGuard<'a, T: 'a> {
- __lock: &'a RWLock<T>,
- __guard: StaticRWLockWriteGuard,
-}
-
-/// RAII structure used to release the shared read access of a lock when
-/// dropped.
-#[must_use]
-pub struct StaticRWLockReadGuard {
- lock: &'static sys::RWLock,
- marker: marker::NoSend,
-}
-
-/// RAII structure used to release the exclusive write access of a lock when
-/// dropped.
-#[must_use]
-pub struct StaticRWLockWriteGuard {
- lock: &'static sys::RWLock,
- marker: marker::NoSend,
- poison: poison::Guard<'static>,
+ __lock: &'a StaticRWLock,
+ __data: &'a UnsafeCell<T>,
+ __poison: poison::Guard,
+ __marker: marker::NoSend,
}
impl<T: Send + Sync> RWLock<T> {
/// Creates a new instance of an RWLock which is unlocked and read to go.
+ #[stable]
pub fn new(t: T) -> RWLock<T> {
RWLock { inner: box RWLOCK_INIT, data: UnsafeCell::new(t) }
}
/// Returns an RAII guard which will release this thread's shared access
/// once it is dropped.
///
- /// # Panics
+ /// # Failure
///
- /// This function will panic if the RWLock is poisoned. An RWLock is
- /// poisoned whenever a writer panics while holding an exclusive lock. The
- /// panic will occur immediately after the lock has been acquired.
+ /// This function will return an error if the RWLock is poisoned. An RWLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock.
+ /// The failure will occur immediately after the lock has been acquired.
#[inline]
- pub fn read(&self) -> RWLockReadGuard<T> {
- unsafe {
- let lock: &'static StaticRWLock = &*(&*self.inner as *const _);
- RWLockReadGuard::new(self, lock.read())
- }
+ #[stable]
+ pub fn read(&self) -> LockResult<RWLockReadGuard<T>> {
+ unsafe { self.inner.lock.read() }
+ RWLockReadGuard::new(&*self.inner, &self.data)
}
/// Attempt to acquire this lock with shared read access.
/// guarantees with respect to the ordering of whether contentious readers
/// or writers will acquire the lock first.
///
- /// # Panics
+ /// # Failure
///
- /// This function will panic if the RWLock is poisoned. An RWLock is
- /// poisoned whenever a writer panics while holding an exclusive lock. A
- /// panic will only occur if the lock is acquired.
+ /// This function will return an error if the RWLock is poisoned. An RWLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock. An
+ /// error will only be returned if the lock would have otherwise been
+ /// acquired.
#[inline]
- pub fn try_read(&self) -> Option<RWLockReadGuard<T>> {
- unsafe {
- let lock: &'static StaticRWLock = &*(&*self.inner as *const _);
- lock.try_read().map(|guard| {
- RWLockReadGuard::new(self, guard)
- })
+ #[stable]
+ pub fn try_read(&self) -> TryLockResult<RWLockReadGuard<T>> {
+ if unsafe { self.inner.lock.try_read() } {
+ Ok(try!(RWLockReadGuard::new(&*self.inner, &self.data)))
+ } else {
+ Err(TryLockError::WouldBlock)
}
}
/// Returns an RAII guard which will drop the write access of this rwlock
/// when dropped.
///
- /// # Panics
+ /// # Failure
///
- /// This function will panic if the RWLock is poisoned. An RWLock is
- /// poisoned whenever a writer panics while holding an exclusive lock. The
- /// panic will occur when the lock is acquired.
+ /// This function will return an error if the RWLock is poisoned. An RWLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock.
+ /// An error will be returned when the lock is acquired.
#[inline]
- pub fn write(&self) -> RWLockWriteGuard<T> {
- unsafe {
- let lock: &'static StaticRWLock = &*(&*self.inner as *const _);
- RWLockWriteGuard::new(self, lock.write())
- }
+ #[stable]
+ pub fn write(&self) -> LockResult<RWLockWriteGuard<T>> {
+ unsafe { self.inner.lock.write() }
+ RWLockWriteGuard::new(&*self.inner, &self.data)
}
/// Attempt to lock this rwlock with exclusive write access.
/// to `write` would otherwise block. If successful, an RAII guard is
/// returned.
///
- /// # Panics
+ /// # Failure
///
- /// This function will panic if the RWLock is poisoned. An RWLock is
- /// poisoned whenever a writer panics while holding an exclusive lock. A
- /// panic will only occur if the lock is acquired.
+ /// This function will return an error if the RWLock is poisoned. An RWLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock. An
+ /// error will only be returned if the lock would have otherwise been
+ /// acquired.
#[inline]
- pub fn try_write(&self) -> Option<RWLockWriteGuard<T>> {
- unsafe {
- let lock: &'static StaticRWLock = &*(&*self.inner as *const _);
- lock.try_write().map(|guard| {
- RWLockWriteGuard::new(self, guard)
- })
+ #[stable]
+ pub fn try_write(&self) -> TryLockResult<RWLockWriteGuard<T>> {
+ if unsafe { self.inner.lock.try_read() } {
+ Ok(try!(RWLockWriteGuard::new(&*self.inner, &self.data)))
+ } else {
+ Err(TryLockError::WouldBlock)
}
}
}
#[unsafe_destructor]
impl<T> Drop for RWLock<T> {
fn drop(&mut self) {
- unsafe { self.inner.inner.destroy() }
+ unsafe { self.inner.lock.destroy() }
}
}
+static DUMMY: UnsafeCell<()> = UnsafeCell { value: () };
+
impl StaticRWLock {
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
/// See `RWLock::read`.
#[inline]
- pub fn read(&'static self) -> StaticRWLockReadGuard {
- unsafe { self.inner.read() }
- StaticRWLockReadGuard::new(self)
+ #[unstable = "may be merged with RWLock in the future"]
+ pub fn read(&'static self) -> LockResult<RWLockReadGuard<'static, ()>> {
+ unsafe { self.lock.read() }
+ RWLockReadGuard::new(self, &DUMMY)
}
/// Attempt to acquire this lock with shared read access.
///
/// See `RWLock::try_read`.
#[inline]
- pub fn try_read(&'static self) -> Option<StaticRWLockReadGuard> {
- if unsafe { self.inner.try_read() } {
- Some(StaticRWLockReadGuard::new(self))
+ #[unstable = "may be merged with RWLock in the future"]
+ pub fn try_read(&'static self)
+ -> TryLockResult<RWLockReadGuard<'static, ()>> {
+ if unsafe { self.lock.try_read() } {
+ Ok(try!(RWLockReadGuard::new(self, &DUMMY)))
} else {
- None
+ Err(TryLockError::WouldBlock)
}
}
///
/// See `RWLock::write`.
#[inline]
- pub fn write(&'static self) -> StaticRWLockWriteGuard {
- unsafe { self.inner.write() }
- StaticRWLockWriteGuard::new(self)
+ #[unstable = "may be merged with RWLock in the future"]
+ pub fn write(&'static self) -> LockResult<RWLockWriteGuard<'static, ()>> {
+ unsafe { self.lock.write() }
+ RWLockWriteGuard::new(self, &DUMMY)
}
/// Attempt to lock this rwlock with exclusive write access.
///
/// See `RWLock::try_write`.
#[inline]
- pub fn try_write(&'static self) -> Option<StaticRWLockWriteGuard> {
- if unsafe { self.inner.try_write() } {
- Some(StaticRWLockWriteGuard::new(self))
+ #[unstable = "may be merged with RWLock in the future"]
+ pub fn try_write(&'static self)
+ -> TryLockResult<RWLockWriteGuard<'static, ()>> {
+ if unsafe { self.lock.try_write() } {
+ Ok(try!(RWLockWriteGuard::new(self, &DUMMY)))
} else {
- None
+ Err(TryLockError::WouldBlock)
}
}
/// active users of the lock, and this also doesn't prevent any future users
/// of this lock. This method is required to be called to not leak memory on
/// all platforms.
+ #[unstable = "may be merged with RWLock in the future"]
pub unsafe fn destroy(&'static self) {
- self.inner.destroy()
+ self.lock.destroy()
}
}
impl<'rwlock, T> RWLockReadGuard<'rwlock, T> {
- fn new(lock: &RWLock<T>, guard: StaticRWLockReadGuard)
- -> RWLockReadGuard<T> {
- RWLockReadGuard { __lock: lock, __guard: guard }
+ fn new(lock: &'rwlock StaticRWLock, data: &'rwlock UnsafeCell<T>)
+ -> LockResult<RWLockReadGuard<'rwlock, T>> {
+ poison::map_result(lock.poison.borrow(), |_| {
+ RWLockReadGuard {
+ __lock: lock,
+ __data: data,
+ __marker: marker::NoSend,
+ }
+ })
}
}
impl<'rwlock, T> RWLockWriteGuard<'rwlock, T> {
- fn new(lock: &RWLock<T>, guard: StaticRWLockWriteGuard)
- -> RWLockWriteGuard<T> {
- RWLockWriteGuard { __lock: lock, __guard: guard }
+ fn new(lock: &'rwlock StaticRWLock, data: &'rwlock UnsafeCell<T>)
+ -> LockResult<RWLockWriteGuard<'rwlock, T>> {
+ poison::map_result(lock.poison.borrow(), |guard| {
+ RWLockWriteGuard {
+ __lock: lock,
+ __data: data,
+ __poison: guard,
+ __marker: marker::NoSend,
+ }
+ })
}
}
impl<'rwlock, T> Deref<T> for RWLockReadGuard<'rwlock, T> {
- fn deref(&self) -> &T { unsafe { &*self.__lock.data.get() } }
+ fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
}
impl<'rwlock, T> Deref<T> for RWLockWriteGuard<'rwlock, T> {
- fn deref(&self) -> &T { unsafe { &*self.__lock.data.get() } }
+ fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
}
impl<'rwlock, T> DerefMut<T> for RWLockWriteGuard<'rwlock, T> {
- fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.__lock.data.get() } }
-}
-
-impl StaticRWLockReadGuard {
- fn new(lock: &'static StaticRWLock) -> StaticRWLockReadGuard {
- let guard = StaticRWLockReadGuard {
- lock: &lock.inner,
- marker: marker::NoSend,
- };
- unsafe { (*lock.poison.get()).borrow().check("rwlock"); }
- return guard;
- }
-}
-impl StaticRWLockWriteGuard {
- fn new(lock: &'static StaticRWLock) -> StaticRWLockWriteGuard {
- unsafe {
- let guard = StaticRWLockWriteGuard {
- lock: &lock.inner,
- marker: marker::NoSend,
- poison: (*lock.poison.get()).borrow(),
- };
- guard.poison.check("rwlock");
- return guard;
- }
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.__data.get() }
}
}
#[unsafe_destructor]
-impl Drop for StaticRWLockReadGuard {
+impl<'a, T> Drop for RWLockReadGuard<'a, T> {
fn drop(&mut self) {
- unsafe { self.lock.read_unlock(); }
+ unsafe { self.__lock.lock.read_unlock(); }
}
}
#[unsafe_destructor]
-impl Drop for StaticRWLockWriteGuard {
+impl<'a, T> Drop for RWLockWriteGuard<'a, T> {
fn drop(&mut self) {
- self.poison.done();
- unsafe { self.lock.write_unlock(); }
+ self.__lock.poison.done(&self.__poison);
+ unsafe { self.__lock.lock.write_unlock(); }
}
}
#[test]
fn smoke() {
let l = RWLock::new(());
- drop(l.read());
- drop(l.write());
- drop((l.read(), l.read()));
- drop(l.write());
+ drop(l.read().unwrap());
+ drop(l.write().unwrap());
+ drop((l.read().unwrap(), l.read().unwrap()));
+ drop(l.write().unwrap());
}
#[test]
fn static_smoke() {
static R: StaticRWLock = RWLOCK_INIT;
- drop(R.read());
- drop(R.write());
- drop((R.read(), R.read()));
- drop(R.write());
+ drop(R.read().unwrap());
+ drop(R.write().unwrap());
+ drop((R.read().unwrap(), R.read().unwrap()));
+ drop(R.write().unwrap());
unsafe { R.destroy(); }
}
let mut rng = rand::task_rng();
for _ in range(0, M) {
if rng.gen_weighted_bool(N) {
- drop(R.write());
+ drop(R.write().unwrap());
} else {
- drop(R.read());
+ drop(R.read().unwrap());
}
}
drop(tx);
}
#[test]
- #[should_fail]
fn test_rw_arc_poison_wr() {
let arc = Arc::new(RWLock::new(1i));
let arc2 = arc.clone();
- let _ = Thread::spawn(move|| {
- let lock = arc2.write();
- assert_eq!(*lock, 2);
+ let _: Result<uint, _> = Thread::spawn(move|| {
+ let _lock = arc2.write().unwrap();
+ panic!();
}).join();
- let lock = arc.read();
- assert_eq!(*lock, 1);
+ assert!(arc.read().is_err());
}
#[test]
- #[should_fail]
fn test_rw_arc_poison_ww() {
let arc = Arc::new(RWLock::new(1i));
let arc2 = arc.clone();
- let _ = Thread::spawn(move|| {
- let lock = arc2.write();
- assert_eq!(*lock, 2);
+ let _: Result<uint, _> = Thread::spawn(move|| {
+ let _lock = arc2.write().unwrap();
+ panic!();
}).join();
- let lock = arc.write();
- assert_eq!(*lock, 1);
+ assert!(arc.write().is_err());
}
#[test]
fn test_rw_arc_no_poison_rr() {
let arc = Arc::new(RWLock::new(1i));
let arc2 = arc.clone();
- let _ = Thread::spawn(move|| {
- let lock = arc2.read();
- assert_eq!(*lock, 2);
+ let _: Result<uint, _> = Thread::spawn(move|| {
+ let _lock = arc2.read().unwrap();
+ panic!();
}).join();
- let lock = arc.read();
+ let lock = arc.read().unwrap();
assert_eq!(*lock, 1);
}
#[test]
fn test_rw_arc_no_poison_rw() {
let arc = Arc::new(RWLock::new(1i));
let arc2 = arc.clone();
- let _ = Thread::spawn(move|| {
- let lock = arc2.read();
- assert_eq!(*lock, 2);
+ let _: Result<uint, _> = Thread::spawn(move|| {
+ let _lock = arc2.read().unwrap();
+ panic!()
}).join();
- let lock = arc.write();
+ let lock = arc.write().unwrap();
assert_eq!(*lock, 1);
}
let (tx, rx) = channel();
Thread::spawn(move|| {
- let mut lock = arc2.write();
+ let mut lock = arc2.write().unwrap();
for _ in range(0u, 10) {
let tmp = *lock;
*lock = -1;
for _ in range(0u, 5) {
let arc3 = arc.clone();
children.push(Thread::spawn(move|| {
- let lock = arc3.read();
+ let lock = arc3.read().unwrap();
assert!(*lock >= 0);
}));
}
// Wait for writer to finish
rx.recv();
- let lock = arc.read();
+ let lock = arc.read().unwrap();
assert_eq!(*lock, 10);
}
}
impl Drop for Unwinder {
fn drop(&mut self) {
- let mut lock = self.i.write();
+ let mut lock = self.i.write().unwrap();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
- let lock = arc.read();
+ let lock = arc.read().unwrap();
assert_eq!(*lock, 2);
}
}
/// This method will block until the internal count of the semaphore is at
/// least 1.
pub fn acquire(&self) {
- let mut count = self.lock.lock();
+ let mut count = self.lock.lock().unwrap();
while *count <= 0 {
- self.cvar.wait(&count);
+ count = self.cvar.wait(count).unwrap();
}
*count -= 1;
}
/// This will increment the number of resources in this semaphore by 1 and
/// will notify any pending waiters in `acquire` or `access` if necessary.
pub fn release(&self) {
- *self.lock.lock() += 1;
+ *self.lock.lock().unwrap() += 1;
self.cvar.notify_one();
}
let message = {
// Only lock jobs for the time it takes
// to get a job, not run it.
- let lock = jobs.lock();
+ let lock = jobs.lock().unwrap();
lock.recv_opt()
};
// in theory we can demangle any Unicode code point, but
// for simplicity we just catch the common ones.
- "$x20" => " ",
- "$x27" => "'",
- "$x5b" => "[",
- "$x5d" => "]"
+ "$u{20}" => " ",
+ "$u{27}" => "'",
+ "$u{5b}" => "[",
+ "$u{5d}" => "]"
)
} else {
let idx = match rest.find('$') {
pub shutdown: UnsafeCell<bool>,
}
+unsafe impl<M:Send> Send for Helper<M> { }
+
+unsafe impl<M:Send> Sync for Helper<M> { }
+
+struct RaceBox(helper_signal::signal);
+
+unsafe impl Send for RaceBox {}
+unsafe impl Sync for RaceBox {}
+
impl<M: Send> Helper<M> {
/// Lazily boots a helper thread, becoming a no-op if the helper has already
/// been spawned.
F: FnOnce() -> T,
{
unsafe {
- let _guard = self.lock.lock();
+ let _guard = self.lock.lock().unwrap();
if !*self.initialized.get() {
let (tx, rx) = channel();
*self.chan.get() = mem::transmute(box tx);
let (receive, send) = helper_signal::new();
*self.signal.get() = send as uint;
+ let receive = RaceBox(receive);
+
let t = f();
Thread::spawn(move |:| {
- helper(receive, rx, t);
- let _g = self.lock.lock();
+ helper(receive.0, rx, t);
+ let _g = self.lock.lock().unwrap();
*self.shutdown.get() = true;
self.cond.notify_one()
}).detach();
/// This is only valid if the worker thread has previously booted
pub fn send(&'static self, msg: M) {
unsafe {
- let _guard = self.lock.lock();
+ let _guard = self.lock.lock().unwrap();
// Must send and *then* signal to ensure that the child receives the
// message. Otherwise it could wake up and go to sleep before we
// Shut down, but make sure this is done inside our lock to ensure
// that we'll always receive the exit signal when the thread
// returns.
- let guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
// Close the channel by destroying it
let chan: Box<Sender<M>> = mem::transmute(*self.chan.get());
// Wait for the child to exit
while !*self.shutdown.get() {
- self.cond.wait(&guard);
+ guard = self.cond.wait(guard).unwrap();
}
drop(guard);
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use kinds::Sync;
use sys::mutex as imp;
/// An OS-based mutual exclusion lock.
/// at the top level of the crate instead of this type.
pub struct Mutex(imp::Mutex);
+unsafe impl Sync for Mutex {}
+
/// Constant initializer for statically allocated mutexes.
pub const MUTEX_INIT: Mutex = Mutex(imp::MUTEX_INIT);
// Collect all the results we found
let mut addrs = Vec::new();
let mut rp = res;
- while rp.is_not_null() {
+ while !rp.is_null() {
unsafe {
let addr = try!(sockaddr_to_addr(mem::transmute((*rp).ai_addr),
(*rp).ai_addrlen as uint));
use iter::{Iterator, IteratorExt};
use os;
use path::GenericPath;
- use ptr::RawPtr;
+ use ptr::PtrExt;
use ptr;
use slice::SliceExt;
sa_restorer: *mut libc::c_void,
}
+ unsafe impl ::kinds::Send for sigaction { }
+ unsafe impl ::kinds::Sync for sigaction { }
+
#[repr(C)]
#[cfg(target_word_size = "32")]
pub struct sigset_t {
sa_resv: [libc::c_int, ..1],
}
+ impl ::kinds::Send for sigaction { }
+ impl ::kinds::Sync for sigaction { }
+
#[repr(C)]
pub struct sigset_t {
__val: [libc::c_ulong, ..32],
// except according to those terms.
use cell::UnsafeCell;
+use kinds::Sync;
use sys::sync as ffi;
use sys_common::mutex;
inner: UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER },
};
+unsafe impl Sync for Mutex {}
+
impl Mutex {
#[inline]
pub unsafe fn new() -> Mutex {
Ok(joined)
}
-#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
+#[cfg(target_os = "freebsd")]
pub fn load_self() -> Option<Vec<u8>> {
unsafe {
use libc::funcs::bsd44::*;
}
}
+#[cfg(target_os = "dragonfly")]
+pub fn load_self() -> Option<Vec<u8>> {
+ use std::io;
+
+ match io::fs::readlink(&Path::new("/proc/curproc/file")) {
+ Ok(path) => Some(path.into_vec()),
+ Err(..) => None
+ }
+}
+
#[cfg(any(target_os = "linux", target_os = "android"))]
pub fn load_self() -> Option<Vec<u8>> {
use std::io;
write_deadline: u64,
}
+unsafe impl Send for UnixStream {}
+unsafe impl Sync for UnixStream {}
+
impl UnixStream {
pub fn connect(addr: &CString,
timeout: Option<u64>) -> IoResult<UnixStream> {
path: CString,
}
+unsafe impl Send for UnixListener {}
+unsafe impl Sync for UnixListener {}
+
impl UnixListener {
pub fn bind(addr: &CString) -> IoResult<UnixListener> {
bind(addr, libc::SOCK_STREAM).map(|fd| {
closed: atomic::AtomicBool,
}
+unsafe impl Send for AcceptorInner {}
+unsafe impl Sync for AcceptorInner {}
+
impl UnixAcceptor {
pub fn fd(&self) -> fd_t { self.inner.listener.fd() }
pub static SIGSTKSZ: libc::size_t = 8192;
- pub static SIG_DFL: sighandler_t = 0i as sighandler_t;
+ pub const SIG_DFL: sighandler_t = 0i as sighandler_t;
// This definition is not as accurate as it could be, {si_addr} is
// actually a giant union. Currently we're only interested in that field,
pub inner: FileDesc,
}
+unsafe impl Sync for TcpListener {}
+
impl TcpListener {
pub fn bind(addr: ip::SocketAddr) -> IoResult<TcpListener> {
let fd = try!(net::socket(addr, libc::SOCK_STREAM));
closed: atomic::AtomicBool,
}
+unsafe impl Sync for AcceptorInner {}
+
impl TcpAcceptor {
pub fn fd(&self) -> sock_t { self.inner.listener.fd() }
pub const MUTEX_INIT: Mutex = Mutex { inner: atomic::INIT_ATOMIC_UINT };
+unsafe impl Sync for Mutex {}
+
#[inline]
pub unsafe fn raw(m: &Mutex) -> ffi::LPCRITICAL_SECTION {
m.get()
write_deadline: u64,
}
+unsafe impl Send for UnixStream {}
+unsafe impl Sync for UnixStream {}
+
impl UnixStream {
fn try_connect(p: *const u16) -> Option<libc::HANDLE> {
// Note that most of this is lifted from the libuv implementation.
name: CString,
}
+unsafe impl Send for UnixListener {}
+unsafe impl Sync for UnixListener {}
+
impl UnixListener {
pub fn bind(addr: &CString) -> IoResult<UnixListener> {
// Although we technically don't need the pipe until much later, we
deadline: u64,
}
+unsafe impl Send for UnixAcceptor {}
+unsafe impl Sync for UnixAcceptor {}
+
struct AcceptorState {
abort: Event,
closed: atomic::AtomicBool,
}
+unsafe impl Send for AcceptorState {}
+unsafe impl Sync for AcceptorState {}
+
impl UnixAcceptor {
pub fn accept(&mut self) -> IoResult<UnixStream> {
// This function has some funky implementation details when working with
pub struct Event(c::WSAEVENT);
+unsafe impl Send for Event {}
+unsafe impl Sync for Event {}
+
impl Event {
pub fn new() -> IoResult<Event> {
let event = unsafe { c::WSACreateEvent() };
pub struct TcpListener { sock: sock_t }
+unsafe impl Send for TcpListener {}
+unsafe impl Sync for TcpListener {}
+
impl TcpListener {
pub fn bind(addr: ip::SocketAddr) -> IoResult<TcpListener> {
sys::init_net();
deadline: u64,
}
+unsafe impl Send for TcpAcceptor {}
+unsafe impl Sync for TcpAcceptor {}
+
struct AcceptorInner {
listener: TcpListener,
abort: Event,
closed: atomic::AtomicBool,
}
+unsafe impl Send for AcceptorInner {}
+unsafe impl Sync for AcceptorInner {}
+
impl TcpAcceptor {
pub fn socket(&self) -> sock_t { self.inner.listener.socket() }
RemoveTimer(libc::HANDLE, Sender<()>),
}
+unsafe impl Send for Req {}
+
+
fn helper(input: libc::HANDLE, messages: Receiver<Req>, _: ()) {
let mut objs = vec![input];
let mut chans = vec![];
use boxed::Box;
use cell::UnsafeCell;
use clone::Clone;
-use kinds::Send;
+use kinds::{Send, Sync};
use ops::{Drop, FnOnce};
use option::Option::{mod, Some, None};
use result::Result::{Err, Ok};
}
fn spawn_inner<T: Send>(self, f: Thunk<(), T>) -> JoinGuard<T> {
- let my_packet = Arc::new(UnsafeCell::new(None));
- let their_packet = my_packet.clone();
+ let my_packet = Packet(Arc::new(UnsafeCell::new(None)));
+ let their_packet = Packet(my_packet.0.clone());
let Builder { name, stack_size, stdout, stderr } = self;
}
};
unsafe {
- *their_packet.get() = Some(match (output, try_result) {
+ *their_packet.0.get() = Some(match (output, try_result) {
(Some(data), Ok(_)) => Ok(data),
(None, Err(cause)) => Err(cause),
_ => unreachable!()
cvar: Condvar,
}
+unsafe impl Sync for Inner {}
+
#[deriving(Clone)]
/// A handle to a thread.
pub struct Thread {
inner: Arc<Inner>,
}
+unsafe impl Sync for Thread {}
+
impl Thread {
// Used only internally to construct a thread object without spawning
fn new(name: Option<String>) -> Thread {
}
/// Determines whether the current thread is panicking.
+ #[inline]
pub fn panicking() -> bool {
unwind::panicking()
}
// or futuxes, and in either case may allow spurious wakeups.
pub fn park() {
let thread = Thread::current();
- let mut guard = thread.inner.lock.lock();
+ let mut guard = thread.inner.lock.lock().unwrap();
while !*guard {
- thread.inner.cvar.wait(&guard);
+ guard = thread.inner.cvar.wait(guard).unwrap();
}
*guard = false;
}
///
/// See the module doc for more detail.
pub fn unpark(&self) {
- let mut guard = self.inner.lock.lock();
+ let mut guard = self.inner.lock.lock().unwrap();
if !*guard {
*guard = true;
self.inner.cvar.notify_one();
/// A thread that completes without panicking is considered to exit successfully.
pub type Result<T> = ::result::Result<T, Box<Any + Send>>;
+struct Packet<T>(Arc<UnsafeCell<Option<Result<T>>>>);
+
+unsafe impl<T:'static+Send> Send for Packet<T> {}
+unsafe impl<T> Sync for Packet<T> {}
+
#[must_use]
/// An RAII-style guard that will block until thread termination when dropped.
///
native: imp::rust_thread,
thread: Thread,
joined: bool,
- packet: Arc<UnsafeCell<Option<Result<T>>>>,
+ packet: Packet<T>,
}
+unsafe impl<T: Send> Sync for JoinGuard<T> {}
+
impl<T: Send> JoinGuard<T> {
/// Extract a handle to the thread this guard will join on.
pub fn thread(&self) -> &Thread {
unsafe { imp::join(self.native) };
self.joined = true;
unsafe {
- (*self.packet.get()).take().unwrap()
+ (*self.packet.0.get()).take().unwrap()
}
}
unsafe {
let slot = slot.get().expect("cannot access a TLS value during or \
after it is destroyed");
- if (*slot.get()).is_none() {
- *slot.get() = Some((self.init)());
- }
- f((*slot.get()).as_ref().unwrap())
+ f(match *slot.get() {
+ Some(ref inner) => inner,
+ None => self.init(slot),
+ })
}
}
+ unsafe fn init(&self, slot: &UnsafeCell<Option<T>>) -> &T {
+ *slot.get() = Some((self.init)());
+ (*slot.get()).as_ref().unwrap()
+ }
+
/// Test this TLS key to determine whether its value has been destroyed for
/// the current thread or not.
///
pub dtor_running: UnsafeCell<bool>, // should be Cell
}
+ unsafe impl<T> ::kinds::Sync for Key<T> { }
+
#[doc(hidden)]
impl<T> Key<T> {
pub unsafe fn get(&'static self) -> Option<&'static T> {
pub os: OsStaticKey,
}
+ unsafe impl<T> ::kinds::Sync for Key<T> { }
+
struct Value<T: 'static> {
key: &'static Key<T>,
value: T,
mod imp {
use std::cell::UnsafeCell;
- // FIXME: Should be a `Cell`, but that's not `Sync`
#[doc(hidden)]
pub struct KeyInner<T> { pub inner: UnsafeCell<*mut T> }
+ unsafe impl<T> ::kinds::Sync for KeyInner<T> { }
+
#[doc(hidden)]
impl<T> KeyInner<T> {
#[doc(hidden)]
pub marker: marker::InvariantType<T>,
}
+ unsafe impl<T> ::kinds::Sync for KeyInner<T> { }
+
#[doc(hidden)]
impl<T> KeyInner<T> {
#[doc(hidden)]
/// detects Copy, Send and Sync.
#[deriving(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show)]
pub enum TyParamBound {
- TraitTyParamBound(PolyTraitRef),
+ TraitTyParamBound(PolyTraitRef, TraitBoundModifier),
RegionTyParamBound(Lifetime)
}
+/// A modifier on a bound, currently this is only used for `?Sized`, where the
+/// modifier is `Maybe`. Negative bounds should also be handled here.
+#[deriving(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show)]
+pub enum TraitBoundModifier {
+ None,
+ Maybe,
+}
+
pub type TyParamBounds = OwnedSlice<TyParamBound>;
#[deriving(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show)]
pub ident: Ident,
pub id: NodeId,
pub bounds: TyParamBounds,
- pub unbound: Option<TraitRef>,
pub default: Option<P<Ty>>,
pub span: Span
}
/// Expr with trailing semi-colon (may have any type):
StmtSemi(P<Expr>, NodeId),
- StmtMac(Mac, MacStmtStyle),
+ StmtMac(P<Mac>, MacStmtStyle),
}
#[deriving(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show)]
ExprField(P<Expr>, SpannedIdent),
ExprTupField(P<Expr>, Spanned<uint>),
ExprIndex(P<Expr>, P<Expr>),
- ExprSlice(P<Expr>, Option<P<Expr>>, Option<P<Expr>>, Mutability),
- ExprRange(P<Expr>, Option<P<Expr>>),
+ ExprRange(Option<P<Expr>>, Option<P<Expr>>),
/// Variable reference, possibly containing `::` and/or
/// type parameters, e.g. foo::bar::<baz>
pub bound_lifetimes: Vec<LifetimeDef>,
/// The `Foo<&'a T>` in `<'a> Foo<&'a T>`
- pub trait_ref: TraitRef
+ pub trait_ref: TraitRef,
}
#[deriving(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show, Copy)]
/// Represents a Trait Declaration
ItemTrait(Unsafety,
Generics,
- Option<TraitRef>, // (optional) default bound not required for Self.
- // Currently, only Sized makes sense here.
TyParamBounds,
Vec<TraitItem>),
ItemImpl(Unsafety,
None => {}
}
}
- ItemTrait(_, _, _, ref bounds, ref trait_items) => {
+ ItemTrait(_, _, ref bounds, ref trait_items) => {
for b in bounds.iter() {
- if let TraitTyParamBound(ref t) = *b {
+ if let TraitTyParamBound(ref t, TraitBoundModifier::None) = *b {
self.insert(t.trait_ref.ref_id, NodeItem(i));
}
}
.collect();
ast::ItemImpl(u, a, b, c, impl_items)
}
- ast::ItemTrait(u, a, b, c, methods) => {
+ ast::ItemTrait(u, a, b, methods) => {
let methods = methods.into_iter()
.filter(|m| trait_method_in_cfg(cx, m))
.collect();
- ast::ItemTrait(u, a, b, c, methods)
+ ast::ItemTrait(u, a, b, methods)
}
ast::ItemStruct(def, generics) => {
ast::ItemStruct(fold_struct(cx, def), generics)
syntax_expanders.insert(intern("include_bin"),
builtin_normal_expander(
ext::source_util::expand_include_bin));
+ syntax_expanders.insert(intern("include_bytes"),
+ builtin_normal_expander(
+ ext::source_util::expand_include_bytes));
syntax_expanders.insert(intern("module_path"),
builtin_normal_expander(
ext::source_util::expand_mod));
span: Span,
id: ast::Ident,
bounds: OwnedSlice<ast::TyParamBound>,
- unbound: Option<ast::TraitRef>,
default: Option<P<ast::Ty>>) -> ast::TyParam;
fn trait_ref(&self, path: ast::Path) -> ast::TraitRef;
span: Span,
id: ast::Ident,
bounds: OwnedSlice<ast::TyParamBound>,
- unbound: Option<ast::TraitRef>,
default: Option<P<ast::Ty>>) -> ast::TyParam {
ast::TyParam {
ident: id,
id: ast::DUMMY_NODE_ID,
bounds: bounds,
- unbound: unbound,
default: default,
span: span
}
}
fn typarambound(&self, path: ast::Path) -> ast::TyParamBound {
- ast::TraitTyParamBound(self.poly_trait_ref(path))
+ ast::TraitTyParamBound(self.poly_trait_ref(path), ast::TraitBoundModifier::None)
}
fn lifetime(&self, span: Span, name: ast::Name) -> ast::Lifetime {
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
+use std::ascii::AsciiExt;
pub fn expand_syntax_ext<'cx>(cx: &'cx mut ExtCtxt,
MetaWord(ref tname) => {
match tname.get() {
"Copy" => "Copy",
- "Send" => "Send",
- "Sync" => "Sync",
+ "Send" | "Sync" => {
+ return cx.span_err(span,
+ format!("{} is an unsafe trait and it \
+ should be implemented explicitly", *tname)[])
+ }
ref tname => {
cx.span_bug(span,
format!("expected built-in trait name but \
additional_bounds: Vec::new(),
generics: LifetimeBounds {
lifetimes: Vec::new(),
- bounds: vec!(("__D", None, vec!(Path::new_(
+ bounds: vec!(("__D", vec!(Path::new_(
vec!(krate, "Decoder"), None,
vec!(box Literal(Path::new_local("__E"))), true))),
- ("__E", None, vec!()))
+ ("__E", vec!()))
},
methods: vec!(
MethodDef {
additional_bounds: Vec::new(),
generics: LifetimeBounds {
lifetimes: Vec::new(),
- bounds: vec!(("__S", None, vec!(Path::new_(
+ bounds: vec!(("__S", vec!(Path::new_(
vec!(krate, "Encoder"), None,
vec!(box Literal(Path::new_local("__E"))), true))),
- ("__E", None, vec!()))
+ ("__E", vec!()))
},
methods: vec!(
MethodDef {
cx.typaram(self.span,
ty_param.ident,
OwnedSlice::from_vec(bounds),
- ty_param.unbound.clone(),
None)
}));
}
-fn mk_ty_param(cx: &ExtCtxt, span: Span, name: &str,
- bounds: &[Path], unbound: Option<ast::TraitRef>,
- self_ident: Ident, self_generics: &Generics) -> ast::TyParam {
+fn mk_ty_param(cx: &ExtCtxt,
+ span: Span,
+ name: &str,
+ bounds: &[Path],
+ self_ident: Ident,
+ self_generics: &Generics)
+ -> ast::TyParam {
let bounds =
bounds.iter().map(|b| {
let path = b.to_path(cx, span, self_ident, self_generics);
cx.typarambound(path)
}).collect();
- cx.typaram(span, cx.ident_of(name), bounds, unbound, None)
+ cx.typaram(span, cx.ident_of(name), bounds, None)
}
fn mk_generics(lifetimes: Vec<ast::LifetimeDef>, ty_params: Vec<ast::TyParam>)
#[deriving(Clone)]
pub struct LifetimeBounds<'a> {
pub lifetimes: Vec<(&'a str, Vec<&'a str>)>,
- pub bounds: Vec<(&'a str, Option<ast::TraitRef>, Vec<Path<'a>>)>,
+ pub bounds: Vec<(&'a str, Vec<Path<'a>>)>,
}
impl<'a> LifetimeBounds<'a> {
}).collect();
let ty_params = self.bounds.iter().map(|t| {
match t {
- &(ref name, ref unbound, ref bounds) => {
+ &(ref name, ref bounds) => {
mk_ty_param(cx,
span,
*name,
bounds.as_slice(),
- unbound.clone(),
self_ty,
self_generics)
}
vec!(box Literal(Path::new_local("__S"))), true),
LifetimeBounds {
lifetimes: Vec::new(),
- bounds: vec!(("__S", None,
+ bounds: vec!(("__S",
vec!(Path::new(vec!("std", "hash", "Writer"))))),
},
Path::new_local("__S"))
generics: LifetimeBounds {
lifetimes: Vec::new(),
bounds: vec!(("R",
- None,
vec!( Path::new(vec!("std", "rand", "Rng")) )))
},
explicit_self: None,
let format_string = cx.expr_str(span, s);
// phew, not our responsibility any more!
- format::expand_preparsed_format_args(cx, span,
- format::MethodCall(formatter, meth),
- format_string, exprs, Vec::new(),
- HashMap::new())
+
+ let args = vec![
+ format::expand_preparsed_format_args(cx, span, format_string,
+ exprs, vec![], HashMap::new())
+ ];
+ cx.expr_method_call(span, formatter, meth, args)
}
StmtMac(mac, style) => (mac, style),
_ => return expand_non_macro_stmt(s, fld)
};
- let expanded_stmt = match expand_mac_invoc(mac, s.span,
+ let expanded_stmt = match expand_mac_invoc(mac.and_then(|m| m), s.span,
|r| r.make_stmt(),
mark_stmt, fld) {
Some(stmt) => stmt,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-pub use self::Invocation::*;
use self::ArgumentType::*;
use self::Position::*;
use ptr::P;
use std::collections::HashMap;
-use std::string;
#[deriving(PartialEq)]
enum ArgumentType {
- Known(string::String),
+ Known(String),
Unsigned
}
enum Position {
Exact(uint),
- Named(string::String),
+ Named(String),
}
struct Context<'a, 'b:'a> {
/// Note that we keep a side-array of the ordering of the named arguments
/// found to be sure that we can translate them in the same order that they
/// were declared in.
- names: HashMap<string::String, P<ast::Expr>>,
- name_types: HashMap<string::String, ArgumentType>,
- name_ordering: Vec<string::String>,
+ names: HashMap<String, P<ast::Expr>>,
+ name_types: HashMap<String, ArgumentType>,
+ name_ordering: Vec<String>,
/// The latest consecutive literal strings, or empty if there weren't any.
- literal: string::String,
+ literal: String,
/// Collection of the compiled `rt::Argument` structures
pieces: Vec<P<ast::Expr>>,
/// Stays `true` if all formatting parameters are default (as in "{}{}").
all_pieces_simple: bool,
- name_positions: HashMap<string::String, uint>,
- method_statics: Vec<P<ast::Item>>,
+ name_positions: HashMap<String, uint>,
/// Updated as arguments are consumed or methods are entered
nest_level: uint,
next_arg: uint,
}
-pub enum Invocation {
- Call(P<ast::Expr>),
- MethodCall(P<ast::Expr>, ast::Ident),
-}
-
/// Parses the arguments from the given list of tokens, returning None
/// if there's a parse error so we can continue parsing other format!
/// expressions.
///
-/// If parsing succeeds, the second return value is:
+/// If parsing succeeds, the return value is:
///
/// Some((fmtstr, unnamed arguments, ordering of named arguments,
/// named arguments))
-fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
- tts: &[ast::TokenTree])
- -> (Invocation, Option<(P<ast::Expr>, Vec<P<ast::Expr>>, Vec<string::String>,
- HashMap<string::String, P<ast::Expr>>)>) {
+fn parse_args(ecx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
+ -> Option<(P<ast::Expr>, Vec<P<ast::Expr>>, Vec<String>,
+ HashMap<String, P<ast::Expr>>)> {
let mut args = Vec::new();
- let mut names = HashMap::<string::String, P<ast::Expr>>::new();
+ let mut names = HashMap::<String, P<ast::Expr>>::new();
let mut order = Vec::new();
let mut p = ecx.new_parser_from_tts(tts);
- // Parse the leading function expression (maybe a block, maybe a path)
- let invocation = if allow_method {
- let e = p.parse_expr();
- if !p.eat(&token::Comma) {
- ecx.span_err(sp, "expected token: `,`");
- return (Call(e), None);
- }
- MethodCall(e, p.parse_ident())
- } else {
- Call(p.parse_expr())
- };
- if !p.eat(&token::Comma) {
- ecx.span_err(sp, "expected token: `,`");
- return (invocation, None);
- }
if p.token == token::Eof {
ecx.span_err(sp, "requires at least a format string argument");
- return (invocation, None);
+ return None;
}
let fmtstr = p.parse_expr();
let mut named = false;
while p.token != token::Eof {
if !p.eat(&token::Comma) {
ecx.span_err(sp, "expected token: `,`");
- return (invocation, None);
+ return None;
}
if p.token == token::Eof { break } // accept trailing commas
if named || (p.token.is_ident() && p.look_ahead(1, |t| *t == token::Eq)) {
ecx.span_err(p.span,
"expected ident, positional arguments \
cannot follow named arguments");
- return (invocation, None);
+ return None;
}
_ => {
ecx.span_err(p.span,
format!("expected ident for named argument, found `{}`",
p.this_token_to_string())[]);
- return (invocation, None);
+ return None;
}
};
let interned_name = token::get_ident(ident);
args.push(p.parse_expr());
}
}
- return (invocation, Some((fmtstr, args, order, names)));
+ Some((fmtstr, args, order, names))
}
impl<'a, 'b> Context<'a, 'b> {
}
}
- fn describe_num_args(&self) -> string::String {
+ fn describe_num_args(&self) -> String {
match self.args.len() {
0 => "no arguments given".to_string(),
1 => "there is 1 argument".to_string(),
}
}
- fn item_static_array(ecx: &mut ExtCtxt,
- name: ast::Ident,
- piece_ty: P<ast::Ty>,
- pieces: Vec<P<ast::Expr>>)
- -> P<ast::Stmt> {
+ fn static_array(ecx: &mut ExtCtxt,
+ name: &str,
+ piece_ty: P<ast::Ty>,
+ pieces: Vec<P<ast::Expr>>)
+ -> P<ast::Expr> {
let fmtsp = piece_ty.span;
- let fmt = ecx.expr_vec(fmtsp, pieces);
- let fmt = ecx.expr_addr_of(fmtsp, fmt);
- let ty = ast::TyVec(piece_ty);
- let ty = ast::TyRptr(Some(ecx.lifetime(fmtsp, special_idents::static_lifetime.name)),
- ast::MutTy{ mutbl: ast::MutImmutable, ty: ecx.ty(fmtsp, ty) });
- let ty = ecx.ty(fmtsp, ty);
- let st = ast::ItemStatic(ty, ast::MutImmutable, fmt);
+ let ty = ecx.ty_rptr(fmtsp,
+ ecx.ty(fmtsp, ast::TyVec(piece_ty)),
+ Some(ecx.lifetime(fmtsp, special_idents::static_lifetime.name)),
+ ast::MutImmutable);
+ let slice = ecx.expr_vec_slice(fmtsp, pieces);
+ let st = ast::ItemStatic(ty, ast::MutImmutable, slice);
+
+ let name = ecx.ident_of(name);
let item = ecx.item(fmtsp, name, Context::static_attrs(ecx, fmtsp), st);
let decl = respan(fmtsp, ast::DeclItem(item));
- P(respan(fmtsp, ast::StmtDecl(P(decl), ast::DUMMY_NODE_ID)))
+
+ // Wrap the declaration in a block so that it forms a single expression.
+ ecx.expr_block(ecx.block(fmtsp,
+ vec![P(respan(fmtsp, ast::StmtDecl(P(decl), ast::DUMMY_NODE_ID)))],
+ Some(ecx.expr_ident(fmtsp, name))))
}
/// Actually builds the expression which the iformat! block will be expanded
/// to
- fn to_expr(mut self, invocation: Invocation) -> P<ast::Expr> {
- let mut lets = Vec::new();
+ fn into_expr(mut self) -> P<ast::Expr> {
let mut locals = Vec::new();
let mut names = Vec::from_fn(self.name_positions.len(), |_| None);
let mut pats = Vec::new();
let mut heads = Vec::new();
- // First, declare all of our methods that are statics
- for method in self.method_statics.into_iter() {
- let decl = respan(self.fmtsp, ast::DeclItem(method));
- lets.push(P(respan(self.fmtsp,
- ast::StmtDecl(P(decl), ast::DUMMY_NODE_ID))));
- }
-
- // Next, build up the static array which will become our precompiled
+ // First, build up the static array which will become our precompiled
// format "string"
- let static_str_name = self.ecx.ident_of("__STATIC_FMTSTR");
- let static_lifetime = self.ecx.lifetime(self.fmtsp, self.ecx.ident_of("'static").name);
+ let static_lifetime = self.ecx.lifetime(self.fmtsp, special_idents::static_lifetime.name);
let piece_ty = self.ecx.ty_rptr(
self.fmtsp,
self.ecx.ty_ident(self.fmtsp, self.ecx.ident_of("str")),
Some(static_lifetime),
ast::MutImmutable);
- lets.push(Context::item_static_array(self.ecx,
- static_str_name,
- piece_ty,
- self.str_pieces));
-
- // Then, build up the static array which will store our precompiled
- // nonstandard placeholders, if there are any.
- let static_args_name = self.ecx.ident_of("__STATIC_FMTARGS");
- if !self.all_pieces_simple {
- let piece_ty = self.ecx.ty_path(self.ecx.path_all(
- self.fmtsp,
- true, Context::rtpath(self.ecx, "Argument"),
- vec![static_lifetime],
- vec![],
- vec![]
- ));
- lets.push(Context::item_static_array(self.ecx,
- static_args_name,
- piece_ty,
- self.pieces));
- }
+ let pieces = Context::static_array(self.ecx,
+ "__STATIC_FMTSTR",
+ piece_ty,
+ self.str_pieces);
+
// Right now there is a bug such that for the expression:
// foo(bar(&1))
// Now create a vector containing all the arguments
let args = locals.into_iter().chain(names.into_iter().map(|a| a.unwrap()));
- // Now create the fmt::Arguments struct with all our locals we created.
- let pieces = self.ecx.expr_ident(self.fmtsp, static_str_name);
- let args_slice = self.ecx.expr_vec_slice(self.fmtsp, args.collect());
-
- let (fn_name, fn_args) = if self.all_pieces_simple {
- ("new", vec![pieces, args_slice])
- } else {
- let fmt = self.ecx.expr_ident(self.fmtsp, static_args_name);
- ("with_placeholders", vec![pieces, fmt, args_slice])
- };
-
- let result = self.ecx.expr_call_global(self.fmtsp, vec!(
- self.ecx.ident_of("std"),
- self.ecx.ident_of("fmt"),
- self.ecx.ident_of("Arguments"),
- self.ecx.ident_of(fn_name)), fn_args);
-
- let result = match invocation {
- Call(e) => {
- let span = e.span;
- self.ecx.expr_call(span, e, vec![
- self.ecx.expr_addr_of(span, result)
- ])
- }
- MethodCall(e, m) => {
- let span = e.span;
- self.ecx.expr_method_call(span, e, m, vec![
- self.ecx.expr_addr_of(span, result)
- ])
- }
- };
- let body = self.ecx.expr_block(self.ecx.block(self.fmtsp, lets,
- Some(result)));
+ let args_array = self.ecx.expr_vec(self.fmtsp, args.collect());
// Constructs an AST equivalent to:
//
// match (&arg0, &arg1) {
- // (tmp0, tmp1) => body
+ // (tmp0, tmp1) => args_array
// }
//
// It was:
//
// let tmp0 = &arg0;
// let tmp1 = &arg1;
- // body
+ // args_array
//
// Because of #11585 the new temporary lifetime rule, the enclosing
// statements for these temporaries become the let's themselves.
// If one or more of them are RefCell's, RefCell borrow() will also
- // end there; they don't last long enough for body to use them. The
- // match expression solves the scope problem.
+ // end there; they don't last long enough for args_array to use them.
+ // The match expression solves the scope problem.
//
// Note, it may also very well be transformed to:
//
// match arg0 {
// ref tmp0 => {
// match arg1 => {
- // ref tmp1 => body } } }
+ // ref tmp1 => args_array } } }
//
// But the nested match expression is proved to perform not as well
// as series of let's; the first approach does.
let pat = self.ecx.pat_tuple(self.fmtsp, pats);
- let arm = self.ecx.arm(self.fmtsp, vec!(pat), body);
+ let arm = self.ecx.arm(self.fmtsp, vec!(pat), args_array);
let head = self.ecx.expr(self.fmtsp, ast::ExprTup(heads));
- self.ecx.expr_match(self.fmtsp, head, vec!(arm))
+ let result = self.ecx.expr_match(self.fmtsp, head, vec!(arm));
+
+ let args_slice = self.ecx.expr_addr_of(self.fmtsp, result);
+
+ // Now create the fmt::Arguments struct with all our locals we created.
+ let (fn_name, fn_args) = if self.all_pieces_simple {
+ ("new", vec![pieces, args_slice])
+ } else {
+ // Build up the static array which will store our precompiled
+ // nonstandard placeholders, if there are any.
+ let piece_ty = self.ecx.ty_path(self.ecx.path_all(
+ self.fmtsp,
+ true, Context::rtpath(self.ecx, "Argument"),
+ vec![static_lifetime],
+ vec![],
+ vec![]
+ ));
+ let fmt = Context::static_array(self.ecx,
+ "__STATIC_FMTARGS",
+ piece_ty,
+ self.pieces);
+
+ ("with_placeholders", vec![pieces, fmt, args_slice])
+ };
+
+ self.ecx.expr_call_global(self.fmtsp, vec!(
+ self.ecx.ident_of("std"),
+ self.ecx.ident_of("fmt"),
+ self.ecx.ident_of("Arguments"),
+ self.ecx.ident_of(fn_name)), fn_args)
}
fn format_arg(ecx: &ExtCtxt, sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
- match parse_args(ecx, sp, false, tts) {
- (invocation, Some((efmt, args, order, names))) => {
- MacExpr::new(expand_preparsed_format_args(ecx, sp, invocation, efmt,
+ match parse_args(ecx, sp, tts) {
+ Some((efmt, args, order, names)) => {
+ MacExpr::new(expand_preparsed_format_args(ecx, sp, efmt,
args, order, names))
}
- (_, None) => MacExpr::new(ecx.expr_uint(sp, 2))
+ None => DummyResult::expr(sp)
}
}
-/// Take the various parts of `format_args!(extra, efmt, args...,
-/// name=names...)` and construct the appropriate formatting
-/// expression.
+/// Take the various parts of `format_args!(efmt, args..., name=names...)`
+/// and construct the appropriate formatting expression.
pub fn expand_preparsed_format_args(ecx: &mut ExtCtxt, sp: Span,
- invocation: Invocation,
efmt: P<ast::Expr>,
args: Vec<P<ast::Expr>>,
- name_ordering: Vec<string::String>,
- names: HashMap<string::String, P<ast::Expr>>)
+ name_ordering: Vec<String>,
+ names: HashMap<String, P<ast::Expr>>)
-> P<ast::Expr> {
let arg_types = Vec::from_fn(args.len(), |_| None);
let mut cx = Context {
name_ordering: name_ordering,
nest_level: 0,
next_arg: 0,
- literal: string::String::new(),
+ literal: String::new(),
pieces: Vec::new(),
str_pieces: Vec::new(),
all_pieces_simple: true,
- method_statics: Vec::new(),
fmtsp: sp,
};
cx.fmtsp = efmt.span;
let fmt = match expr_to_string(cx.ecx,
- efmt,
- "format argument must be a string literal.") {
+ efmt,
+ "format argument must be a string literal.") {
Some((fmt, _)) => fmt,
None => return DummyResult::raw_expr(sp)
};
}
}
- cx.to_expr(invocation)
+ cx.into_expr()
}
pub fn expand_include_bin(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
- let file = match get_single_str_from_tts(cx, sp, tts, "include_bin!") {
+ cx.span_warn(sp, "include_bin! is deprecated; use include_bytes! instead");
+ expand_include_bytes(cx, sp, tts)
+}
+
+pub fn expand_include_bytes(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
+ -> Box<base::MacResult+'static> {
+ let file = match get_single_str_from_tts(cx, sp, tts, "include_bytes!") {
Some(f) => f,
None => return DummyResult::expr(sp)
};
use parse::token;
use std::slice;
+use std::ascii::AsciiExt;
+
// if you change this list without updating src/doc/reference.md, @cmr will be sad
static KNOWN_FEATURES: &'static [(&'static str, Status)] = &[
fn visit_expr(&mut self, e: &ast::Expr) {
match e.node {
- ast::ExprSlice(..) => {
+ ast::ExprRange(..) => {
self.gate_feature("slicing_syntax",
e.span,
- "slicing syntax is experimental");
+ "range syntax is experimental");
}
_ => {}
}
-> TyParamBound
where T: Folder {
match tpb {
- TraitTyParamBound(ty) => TraitTyParamBound(fld.fold_poly_trait_ref(ty)),
+ TraitTyParamBound(ty, modifier) => TraitTyParamBound(fld.fold_poly_trait_ref(ty), modifier),
RegionTyParamBound(lifetime) => RegionTyParamBound(fld.fold_lifetime(lifetime)),
}
}
pub fn noop_fold_ty_param<T: Folder>(tp: TyParam, fld: &mut T) -> TyParam {
- let TyParam {id, ident, bounds, unbound, default, span} = tp;
+ let TyParam {id, ident, bounds, default, span} = tp;
TyParam {
id: fld.new_id(id),
ident: ident,
bounds: fld.fold_bounds(bounds),
- unbound: unbound.map(|x| fld.fold_trait_ref(x)),
default: default.map(|x| fld.fold_ty(x)),
span: span
}
folder.fold_ty(ty),
new_impl_items)
}
- ItemTrait(unsafety, generics, unbound, bounds, methods) => {
+ ItemTrait(unsafety, generics, bounds, methods) => {
let bounds = folder.fold_bounds(bounds);
let methods = methods.into_iter().flat_map(|method| {
let r = match method {
}).collect();
ItemTrait(unsafety,
folder.fold_generics(generics),
- unbound,
bounds,
methods)
}
ExprIndex(el, er) => {
ExprIndex(folder.fold_expr(el), folder.fold_expr(er))
}
- ExprSlice(e, e1, e2, m) => {
- ExprSlice(folder.fold_expr(e),
- e1.map(|x| folder.fold_expr(x)),
- e2.map(|x| folder.fold_expr(x)),
- m)
- }
ExprRange(e1, e2) => {
- ExprRange(folder.fold_expr(e1),
+ ExprRange(e1.map(|x| folder.fold_expr(x)),
e2.map(|x| folder.fold_expr(x)))
}
ExprPath(pth) => ExprPath(folder.fold_path(pth)),
}))
}
StmtMac(mac, semi) => SmallVector::one(P(Spanned {
- node: StmtMac(folder.fold_mac(mac), semi),
+ node: StmtMac(mac.map(|m| folder.fold_mac(m)), semi),
span: span
}))
}
use abi;
use ast::{AssociatedType, BareFnTy, ClosureTy};
-use ast::{RegionTyParamBound, TraitTyParamBound};
+use ast::{RegionTyParamBound, TraitTyParamBound, TraitBoundModifier};
use ast::{ProvidedMethod, Public, Unsafety};
use ast::{Mod, BiAdd, Arg, Arm, Attribute, BindByRef, BindByValue};
use ast::{BiBitAnd, BiBitOr, BiBitXor, BiRem, Block};
use ast::{Expr, Expr_, ExprAddrOf, ExprMatch, ExprAgain};
use ast::{ExprAssign, ExprAssignOp, ExprBinary, ExprBlock, ExprBox};
use ast::{ExprBreak, ExprCall, ExprCast};
-use ast::{ExprField, ExprTupField, ExprClosure, ExprIf, ExprIfLet, ExprIndex, ExprSlice};
+use ast::{ExprField, ExprTupField, ExprClosure, ExprIf, ExprIfLet, ExprIndex};
use ast::{ExprLit, ExprLoop, ExprMac, ExprRange};
use ast::{ExprMethodCall, ExprParen, ExprPath};
use ast::{ExprRepeat, ExprRet, ExprStruct, ExprTup, ExprUnary};
use ast::{Visibility, WhereClause};
use ast;
use ast_util::{mod, as_prec, ident_to_path, operator_prec};
-use codemap::{mod, Span, BytePos, Spanned, spanned, mk_sp};
+use codemap::{mod, Span, BytePos, Spanned, spanned, mk_sp, DUMMY_SP};
use diagnostic;
use ext::tt::macro_parser;
use parse;
LifetimeAndTypesWithColons,
}
+/// How to parse a bound, whether to allow bound modifiers such as `?`.
+#[deriving(Copy, PartialEq)]
+pub enum BoundParsingMode {
+ Bare,
+ Modified,
+}
+
enum ItemOrViewItem {
/// Indicates a failure to parse any kind of item. The attributes are
/// returned.
let poly_trait_ref = ast::PolyTraitRef { bound_lifetimes: lifetime_defs,
trait_ref: trait_ref };
let other_bounds = if self.eat(&token::BinOp(token::Plus)) {
- self.parse_ty_param_bounds()
+ self.parse_ty_param_bounds(BoundParsingMode::Bare)
} else {
OwnedSlice::empty()
};
let all_bounds =
- Some(TraitTyParamBound(poly_trait_ref)).into_iter()
+ Some(TraitTyParamBound(poly_trait_ref, TraitBoundModifier::None)).into_iter()
.chain(other_bounds.into_vec().into_iter())
.collect();
ast::TyPolyTraitRef(all_bounds)
// To be helpful, parse the proc as ever
let _ = self.parse_legacy_lifetime_defs(lifetime_defs);
let _ = self.parse_fn_args(false, false);
- let _ = self.parse_colon_then_ty_param_bounds();
+ let _ = self.parse_colon_then_ty_param_bounds(BoundParsingMode::Bare);
let _ = self.parse_ret_ty();
self.obsolete(proc_span, ObsoleteProcType);
inputs
};
- let bounds = self.parse_colon_then_ty_param_bounds();
+ let bounds = self.parse_colon_then_ty_param_bounds(BoundParsingMode::Bare);
let output = self.parse_ret_ty();
let decl = P(FnDecl {
return lhs;
}
- let bounds = self.parse_ty_param_bounds();
+ let bounds = self.parse_ty_param_bounds(BoundParsingMode::Bare);
// In type grammar, `+` is treated like a binary operator,
// and hence both L and R side are required.
expr: P<Expr>,
start: Option<P<Expr>>,
end: Option<P<Expr>>,
- mutbl: Mutability)
+ _mutbl: Mutability)
-> ast::Expr_ {
- ExprSlice(expr, start, end, mutbl)
+ // FIXME: we could give more accurate span info here.
+ let (lo, hi) = match (&start, &end) {
+ (&Some(ref s), &Some(ref e)) => (s.span.lo, e.span.hi),
+ (&Some(ref s), &None) => (s.span.lo, s.span.hi),
+ (&None, &Some(ref e)) => (e.span.lo, e.span.hi),
+ (&None, &None) => (DUMMY_SP.lo, DUMMY_SP.hi),
+ };
+ ExprIndex(expr, self.mk_expr(lo, hi, ExprRange(start, end)))
}
pub fn mk_range(&mut self,
start: P<Expr>,
end: Option<P<Expr>>)
-> ast::Expr_ {
- ExprRange(start, end)
+ ExprRange(Some(start), end)
}
pub fn mk_field(&mut self, expr: P<Expr>, ident: ast::SpannedIdent) -> ast::Expr_ {
if id.name == token::special_idents::invalid.name {
P(spanned(lo,
hi,
- StmtMac(spanned(lo,
+ StmtMac(P(spanned(lo,
hi,
- MacInvocTT(pth, tts, EMPTY_CTXT)),
+ MacInvocTT(pth, tts, EMPTY_CTXT))),
style)))
} else {
// if it has a special ident, it's definitely an item
_ => {
let e = self.mk_mac_expr(span.lo,
span.hi,
- macro.node);
+ macro.and_then(|m| m.node));
let e =
self.parse_dot_or_call_expr_with(e);
self.handle_expression_like_statement(
expr = Some(
self.mk_mac_expr(span.lo,
span.hi,
- m.node));
+ m.and_then(|x| x.node)));
}
_ => {
stmts.push(P(Spanned {
// Parses a sequence of bounds if a `:` is found,
// otherwise returns empty list.
- fn parse_colon_then_ty_param_bounds(&mut self)
+ fn parse_colon_then_ty_param_bounds(&mut self,
+ mode: BoundParsingMode)
-> OwnedSlice<TyParamBound>
{
if !self.eat(&token::Colon) {
OwnedSlice::empty()
} else {
- self.parse_ty_param_bounds()
+ self.parse_ty_param_bounds(mode)
}
}
// where boundseq = ( polybound + boundseq ) | polybound
// and polybound = ( 'for' '<' 'region '>' )? bound
// and bound = 'region | trait_ref
- // NB: The None/Some distinction is important for issue #7264.
- fn parse_ty_param_bounds(&mut self)
+ fn parse_ty_param_bounds(&mut self,
+ mode: BoundParsingMode)
-> OwnedSlice<TyParamBound>
{
let mut result = vec!();
loop {
+ let question_span = self.span;
+ let ate_question = self.eat(&token::Question);
match self.token {
token::Lifetime(lifetime) => {
+ if ate_question {
+ self.span_err(question_span,
+ "`?` may only modify trait bounds, not lifetime bounds");
+ }
result.push(RegionTyParamBound(ast::Lifetime {
id: ast::DUMMY_NODE_ID,
span: self.span,
}
token::ModSep | token::Ident(..) => {
let poly_trait_ref = self.parse_poly_trait_ref();
- result.push(TraitTyParamBound(poly_trait_ref))
+ let modifier = if ate_question {
+ if mode == BoundParsingMode::Modified {
+ TraitBoundModifier::Maybe
+ } else {
+ self.span_err(question_span,
+ "unexpected `?`");
+ TraitBoundModifier::None
+ }
+ } else {
+ TraitBoundModifier::None
+ };
+ result.push(TraitTyParamBound(poly_trait_ref, modifier))
}
_ => break,
}
}
}
- /// Matches typaram = (unbound`?`)? IDENT optbounds ( EQ ty )?
+ /// Matches typaram = (unbound `?`)? IDENT (`?` unbound)? optbounds ( EQ ty )?
fn parse_ty_param(&mut self) -> TyParam {
// This is a bit hacky. Currently we are only interested in a single
// unbound, and it may only be `Sized`. To avoid backtracking and other
// complications, we parse an ident, then check for `?`. If we find it,
// we use the ident as the unbound, otherwise, we use it as the name of
- // type param.
+ // type param. Even worse, for now, we need to check for `?` before or
+ // after the bound.
let mut span = self.span;
let mut ident = self.parse_ident();
let mut unbound = None;
ident = self.parse_ident();
}
- let bounds = self.parse_colon_then_ty_param_bounds();
+ let mut bounds = self.parse_colon_then_ty_param_bounds(BoundParsingMode::Modified);
+ if let Some(unbound) = unbound {
+ let mut bounds_as_vec = bounds.into_vec();
+ bounds_as_vec.push(TraitTyParamBound(PolyTraitRef { bound_lifetimes: vec![],
+ trait_ref: unbound },
+ TraitBoundModifier::Maybe));
+ bounds = OwnedSlice::from_vec(bounds_as_vec);
+ };
let default = if self.check(&token::Eq) {
self.bump();
ident: ident,
id: ast::DUMMY_NODE_ID,
bounds: bounds,
- unbound: unbound,
default: default,
span: span,
}
let bounded_ty = self.parse_ty();
if self.eat(&token::Colon) {
- let bounds = self.parse_ty_param_bounds();
+ let bounds = self.parse_ty_param_bounds(BoundParsingMode::Bare);
let hi = self.span.hi;
let span = mk_sp(lo, hi);
fn parse_item_trait(&mut self, unsafety: Unsafety) -> ItemInfo {
let ident = self.parse_ident();
let mut tps = self.parse_generics();
- let sized = self.parse_for_sized();
+ let unbound = self.parse_for_sized();
// Parse supertrait bounds.
- let bounds = self.parse_colon_then_ty_param_bounds();
+ let mut bounds = self.parse_colon_then_ty_param_bounds(BoundParsingMode::Bare);
+
+ if let Some(unbound) = unbound {
+ let mut bounds_as_vec = bounds.into_vec();
+ bounds_as_vec.push(TraitTyParamBound(PolyTraitRef { bound_lifetimes: vec![],
+ trait_ref: unbound },
+ TraitBoundModifier::Maybe));
+ bounds = OwnedSlice::from_vec(bounds_as_vec);
+ };
self.parse_where_clause(&mut tps);
let meths = self.parse_trait_items();
- (ident, ItemTrait(unsafety, tps, sized, bounds, meths), None)
+ (ident, ItemTrait(unsafety, tps, bounds, meths), None)
}
fn parse_impl_items(&mut self) -> (Vec<ImplItem>, Vec<Attribute>) {
}
fn parse_for_sized(&mut self) -> Option<ast::TraitRef> {
+ // FIXME, this should really use TraitBoundModifier, but it will get
+ // re-jigged shortly in any case, so leaving the hacky version for now.
if self.eat_keyword(keywords::For) {
let span = self.span;
+ let mut ate_question = false;
+ if self.eat(&token::Question) {
+ ate_question = true;
+ }
let ident = self.parse_ident();
- if !self.eat(&token::Question) {
+ if self.eat(&token::Question) {
+ if ate_question {
+ self.span_err(span,
+ "unexpected `?`");
+ }
+ ate_question = true;
+ }
+ if !ate_question {
self.span_err(span,
- "expected 'Sized?' after `for` in trait item");
+ "expected `?Sized` after `for` in trait item");
return None;
}
let tref = Parser::trait_ref_from_ident(ident, span);
use abi;
use ast::{mod, FnUnboxedClosureKind, FnMutUnboxedClosureKind};
use ast::{FnOnceUnboxedClosureKind};
-use ast::{MethodImplItem, RegionTyParamBound, TraitTyParamBound};
+use ast::{MethodImplItem, RegionTyParamBound, TraitTyParamBound, TraitBoundModifier};
use ast::{RequiredMethod, ProvidedMethod, TypeImplItem, TypeTraitItem};
use ast::{UnboxedClosureKind};
use ast_util;
}
try!(self.bclose(item.span));
}
- ast::ItemTrait(unsafety, ref generics, ref unbound, ref bounds, ref methods) => {
+ ast::ItemTrait(unsafety, ref generics, ref bounds, ref methods) => {
try!(self.head(""));
try!(self.print_visibility(item.vis));
try!(self.print_unsafety(unsafety));
try!(self.word_nbsp("trait"));
try!(self.print_ident(item.ident));
try!(self.print_generics(generics));
- if let &Some(ref tref) = unbound {
- try!(space(&mut self.s));
- try!(self.word_space("for"));
- try!(self.print_trait_ref(tref));
- try!(word(&mut self.s, "?"));
+ let bounds: Vec<_> = bounds.iter().map(|b| b.clone()).collect();
+ let mut real_bounds = Vec::with_capacity(bounds.len());
+ for b in bounds.into_iter() {
+ if let TraitTyParamBound(ref ptr, ast::TraitBoundModifier::Maybe) = b {
+ try!(space(&mut self.s));
+ try!(self.word_space("for ?"));
+ try!(self.print_trait_ref(&ptr.trait_ref));
+ } else {
+ real_bounds.push(b);
+ }
}
- try!(self.print_bounds(":", bounds[]));
+ try!(self.print_bounds(":", real_bounds[]));
try!(self.print_where_clause(generics));
try!(word(&mut self.s, " "));
try!(self.bopen());
ast::MacStmtWithBraces => token::Brace,
_ => token::Paren
};
- try!(self.print_mac(mac, delim));
+ try!(self.print_mac(&**mac, delim));
match style {
ast::MacStmtWithBraces => {}
_ => try!(word(&mut self.s, ";")),
try!(self.print_expr(&**index));
try!(word(&mut self.s, "]"));
}
- ast::ExprSlice(ref e, ref start, ref end, ref mutbl) => {
- try!(self.print_expr(&**e));
- try!(word(&mut self.s, "["));
- if mutbl == &ast::MutMutable {
- try!(word(&mut self.s, "mut"));
- if start.is_some() || end.is_some() {
- try!(space(&mut self.s));
- }
- }
+ ast::ExprRange(ref start, ref end) => {
if let &Some(ref e) = start {
try!(self.print_expr(&**e));
}
if let &Some(ref e) = end {
try!(self.print_expr(&**e));
}
- try!(word(&mut self.s, "]"));
- }
- ast::ExprRange(ref start, ref end) => {
- try!(self.print_expr(&**start));
- try!(word(&mut self.s, ".."));
- if let &Some(ref e) = end {
- try!(self.print_expr(&**e));
- }
}
ast::ExprPath(ref path) => try!(self.print_path(path, true)),
ast::ExprBreak(opt_ident) => {
}
try!(match *bound {
- TraitTyParamBound(ref tref) => {
+ TraitTyParamBound(ref tref, TraitBoundModifier::None) => {
+ self.print_poly_trait_ref(tref)
+ }
+ TraitTyParamBound(ref tref, TraitBoundModifier::Maybe) => {
+ try!(word(&mut self.s, "?"));
self.print_poly_trait_ref(tref)
}
RegionTyParamBound(ref lt) => {
}
pub fn print_ty_param(&mut self, param: &ast::TyParam) -> IoResult<()> {
- if let Some(ref tref) = param.unbound {
- try!(self.print_trait_ref(tref));
- try!(self.word_space("?"));
- }
try!(self.print_ident(param.ident));
try!(self.print_bounds(":", param.bounds[]));
match param.default {
fn visit_ty_param_bound(&mut self, bounds: &'v TyParamBound) {
walk_ty_param_bound(self, bounds)
}
- fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef) {
- walk_poly_trait_ref(self, t)
+ fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef, m: &'v TraitBoundModifier) {
+ walk_poly_trait_ref(self, t, m)
}
fn visit_struct_def(&mut self, s: &'v StructDef, _: Ident, _: &'v Generics, _: NodeId) {
walk_struct_def(self, s)
/// Like with walk_method_helper this doesn't correspond to a method
/// in Visitor, and so it gets a _helper suffix.
pub fn walk_poly_trait_ref<'v, V>(visitor: &mut V,
- trait_ref: &'v PolyTraitRef)
+ trait_ref: &'v PolyTraitRef,
+ _modifier: &'v TraitBoundModifier)
where V: Visitor<'v>
{
walk_lifetime_decls_helper(visitor, &trait_ref.bound_lifetimes);
generics,
item.id)
}
- ItemTrait(_, ref generics, _, ref bounds, ref methods) => {
+ ItemTrait(_, ref generics, ref bounds, ref methods) => {
visitor.visit_generics(generics);
walk_ty_param_bounds_helper(visitor, bounds);
for method in methods.iter() {
pub fn walk_ty_param_bound<'v, V: Visitor<'v>>(visitor: &mut V,
bound: &'v TyParamBound) {
match *bound {
- TraitTyParamBound(ref typ) => {
- visitor.visit_poly_trait_ref(typ);
+ TraitTyParamBound(ref typ, ref modifier) => {
+ visitor.visit_poly_trait_ref(typ, modifier);
}
RegionTyParamBound(ref lifetime) => {
visitor.visit_lifetime_bound(lifetime);
StmtExpr(ref expression, _) | StmtSemi(ref expression, _) => {
visitor.visit_expr(&**expression)
}
- StmtMac(ref macro, _) => visitor.visit_mac(macro),
+ StmtMac(ref macro, _) => visitor.visit_mac(&**macro),
}
}
visitor.visit_expr(&**main_expression);
visitor.visit_expr(&**index_expression)
}
- ExprSlice(ref main_expression, ref start, ref end, _) => {
- visitor.visit_expr(&**main_expression);
- walk_expr_opt(visitor, start);
- walk_expr_opt(visitor, end)
- }
ExprRange(ref start, ref end) => {
- visitor.visit_expr(&**start);
+ walk_expr_opt(visitor, start);
walk_expr_opt(visitor, end)
}
ExprPath(ref path) => {
use self::States::*;
use self::FormatState::*;
use self::FormatOp::*;
+use std::ascii::OwnedAsciiExt;
use std::mem::replace;
#[deriving(Copy, PartialEq)]
}
}
FormatHEX => {
- s = s.to_ascii()
- .iter()
- .map(|b| b.to_uppercase().as_byte())
- .collect();
+ s = s.into_ascii_uppercase();
if flags.alternate {
let s_ = replace(&mut s, vec!(b'0', b'X'));
s.extend(s_.into_iter());
pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
+unsafe impl Send for MonitorMsg {}
+
fn run_tests<F>(opts: &TestOpts,
tests: Vec<TestDescAndFn> ,
mut callback: F) -> io::IoResult<()> where
use core::mem;
use core::num::Int;
use core::slice;
-use core::str::CharSplits;
+use core::str::Split;
use u_char::UnicodeChar;
use tables::grapheme::GraphemeCat;
/// An iterator over the words of a string, separated by a sequence of whitespace
-/// FIXME: This should be opaque
#[stable]
pub struct Words<'a> {
- inner: Filter<&'a str, CharSplits<'a, fn(char) -> bool>, fn(&&str) -> bool>,
+ inner: Filter<&'a str, Split<'a, fn(char) -> bool>, fn(&&str) -> bool>,
}
/// Methods for Unicode string slices
#[inline]
fn trim_left(&self) -> &str {
- self.trim_left_chars(|&: c: char| c.is_whitespace())
+ self.trim_left_matches(|&: c: char| c.is_whitespace())
}
#[inline]
fn trim_right(&self) -> &str {
- self.trim_right_chars(|&: c: char| c.is_whitespace())
+ self.trim_right_matches(|&: c: char| c.is_whitespace())
}
}
let mut buf = [0u16, ..2];
self.chars.next().map(|ch| {
- let n = ch.encode_utf16(buf[mut]).unwrap_or(0);
+ let n = ch.encode_utf16(buf.as_mut_slice()).unwrap_or(0);
if n == 2 { self.extra = buf[1]; }
buf[0]
})
fn send(p: &pipe, msg: uint) {
let &(ref lock, ref cond) = &**p;
- let mut arr = lock.lock();
+ let mut arr = lock.lock().unwrap();
arr.push(msg);
cond.notify_one();
}
fn recv(p: &pipe) -> uint {
let &(ref lock, ref cond) = &**p;
- let mut arr = lock.lock();
+ let mut arr = lock.lock().unwrap();
while arr.is_empty() {
- cond.wait(&arr);
+ arr = cond.wait(arr).unwrap();
}
arr.pop().unwrap()
}
fn next_permutation(perm: &mut [i32], count: &mut [i32]) {
for i in range(1, perm.len()) {
- rotate(perm[mut ..i + 1]);
+ rotate(perm.slice_to_mut(i + 1));
let count_i = &mut count[i];
if *count_i >= i as i32 {
*count_i = 0;
fn reverse(tperm: &mut [i32], mut k: uint) {
- tperm[mut ..k].reverse()
+ tperm.slice_to_mut(k).reverse()
}
fn work(mut perm: Perm, n: uint, max: uint) -> (i32, i32) {
copy_memory(buf.as_mut_slice(), alu);
let buf_len = buf.len();
- copy_memory(buf[mut alu_len..buf_len],
+ copy_memory(buf.slice_mut(alu_len, buf_len),
alu[..LINE_LEN]);
let mut pos = 0;
extern crate collections;
+use std::ascii::{AsciiExt, OwnedAsciiExt};
use std::collections::HashMap;
use std::mem::replace;
use std::num::Float;
let mut buffer = String::new();
for &(ref k, v) in pairs_sorted.iter() {
buffer.push_str(format!("{} {:0.3}\n",
- k.as_slice()
- .to_ascii()
- .to_uppercase()
- .into_string(), v).as_slice());
+ k.to_ascii_uppercase(),
+ v).as_slice());
}
return buffer
// given a map, search for the frequency of a pattern
fn find(mm: &HashMap<Vec<u8> , uint>, key: String) -> uint {
- let key = key.into_ascii().as_slice().to_lowercase().into_string();
+ let key = key.into_ascii_lowercase();
match mm.get(key.as_bytes()) {
option::Option::None => { return 0u; }
option::Option::Some(&num) => { return num; }
use std::io::{stdio, MemReader, BufferedReader};
let rdr = if os::getenv("RUST_BENCH").is_some() {
- let foo = include_bin!("shootout-k-nucleotide.data");
+ let foo = include_bytes!("shootout-k-nucleotide.data");
box MemReader::new(foo.to_vec()) as Box<Reader>
} else {
box stdio::stdin() as Box<Reader>
#![feature(slicing_syntax)]
+use std::ascii::OwnedAsciiExt;
use std::string::String;
use std::slice;
use std::sync::{Arc, Future};
{
res.push_all(l.as_slice().trim().as_bytes());
}
- for b in res.iter_mut() {
- *b = b.to_ascii().to_uppercase().to_byte();
- }
- res
+ res.into_ascii_uppercase()
}
fn main() {
use std::io::stdio::{stdin_raw, stdout_raw};
use std::num::{div_rem};
-use std::ptr::{copy_memory};
+use std::ptr::{copy_memory, Unique};
use std::io::{IoResult, EndOfFile};
struct Tables {
}
}
+
+struct Racy<T>(T);
+
+unsafe impl<T: 'static> Send for Racy<T> {}
+
/// Executes a closure in parallel over the given iterator over mutable slice.
/// The closure `f` is run in parallel with an element of `iter`.
fn parallel<'a, I, T, F>(mut iter: I, f: F)
- where T: Send + Sync,
+ where T: 'a+Send + Sync,
I: Iterator<&'a mut [T]>,
F: Fn(&'a mut [T]) + Sync {
use std::mem;
// Need to convert `f` and `chunk` to something that can cross the task
// boundary.
- let f = &f as *const F as *const uint;
- let raw = chunk.repr();
+ let f = Racy(&f as *const F as *const uint);
+ let raw = Racy(chunk.repr());
spawn(move|| {
- let f = f as *const F;
- unsafe { (*f)(mem::transmute(raw)) }
+ let f = f.0 as *const F;
+ unsafe { (*f)(mem::transmute(raw.0)) }
drop(tx)
});
}
fn main() {
let mut data = read_to_end(&mut stdin_raw()).unwrap();
let tables = &Tables::new();
- parallel(mut_dna_seqs(data[mut]), |&: seq| reverse_complement(seq, tables));
+ parallel(mut_dna_seqs(data.as_mut_slice()), |&: seq| reverse_complement(seq, tables));
stdout_raw().write(data.as_mut_slice()).unwrap();
}
v.iter().zip(u.iter()).map(|(a, b)| *a * *b).sum()
}
+
+struct Racy<T>(T);
+
+unsafe impl<T: 'static> Send for Racy<T> {}
+
// Executes a closure in parallel over the given mutable slice. The closure `f`
// is run in parallel and yielded the starting index within `v` as well as a
// sub-slice of `v`.
// Need to convert `f` and `chunk` to something that can cross the task
// boundary.
- let f = &f as *const _ as *const uint;
- let raw = chunk.repr();
+ let f = Racy(&f as *const _ as *const uint);
+ let raw = Racy(chunk.repr());
spawn(move|| {
- let f = f as *const F;
- unsafe { (*f)(i * size, mem::transmute(raw)) }
+ let f = f.0 as *const F;
+ unsafe { (*f)(i * size, mem::transmute(raw.0)) }
drop(tx)
});
}
pub fn main() {
let bar = box 3;
let _g = || {
- let _h = move|| -> int { *bar }; //~ ERROR cannot move out of captured outer variable
+ let _h = move |:| -> int { *bar }; //~ ERROR cannot move out of captured outer variable
};
}
fn main() {
let x: || -> ! = || panic!();
x();
- println!("Foo bar"); //~ ERROR: unreachable statement
+ std::io::println("Foo bar"); //~ ERROR: unreachable statement
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#[deriving(Sync(Bad),Send,Copy)]
+#[deriving(Copy(Bad))]
//~^ ERROR unexpected value in deriving, expected a trait
struct Test;
+#[deriving(Sync)]
+//~^ ERROR Sync is an unsafe trait and it should be implemented explicitly
+struct Test1;
+
pub fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod foo {
+ pub use self::bar::X;
+ use self::bar::X;
+ //~^ ERROR a value named `X` has already been imported in this module
+ //~| ERROR a type named `X` has already been imported in this module
+
+ mod bar {
+ pub struct X;
+ }
+}
+
+fn main() {
+ let _ = foo::X;
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Foo {}
+
+fn foo<T: Foo + Foo>() {} //~ ERROR `Foo` already appears in the list of bounds
+
+fn main() {}
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// except according to those terms.
fn main() {
- format_args!("test"); //~ ERROR: expected token
- format_args!("", || {}); //~ ERROR: must be a string literal
+ format_args!(); //~ ERROR: requires at least a format string argument
+ format_args!(|| {}); //~ ERROR: must be a string literal
}
+++ /dev/null
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-fn main() {
- format_args!("{}", ""); //~ ERROR: expected function
-}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait FromStructReader<'a> { }
+trait ResponseHook {
+ fn get<'a, T: FromStructReader<'a>>(&'a self);
+}
+fn foo(res : Box<ResponseHook>) { res.get } //~ ERROR attempted to take value of method
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "lib"]
+
+enum NodeContents<'a> {
+ Children(Vec<Node<'a>>),
+}
+
+impl<'a> Drop for NodeContents<'a> {
+ //~^ ERROR cannot implement a destructor on a structure with type parameters
+ fn drop( &mut self ) {
+ }
+}
+
+struct Node<'a> {
+ contents: NodeContents<'a>,
+}
+
+impl<'a> Node<'a> {
+ fn noName(contents: NodeContents<'a>) -> Node<'a> {
+ Node{ contents: contents,}
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct AutoBuilder<'a> {
+ context: &'a int
+}
+
+impl<'a> Drop for AutoBuilder<'a> {
+ //~^ ERROR cannot implement a destructor on a structure with type parameters
+ fn drop(&mut self) {
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Deserializer<'a> { }
+
+trait Deserializable {
+ fn deserialize_token<'a, D: Deserializer<'a>>(D, &'a str) -> Self;
+}
+
+impl<'a, T: Deserializable> Deserializable for &'a str {
+ //~^ ERROR unable to infer enough type information
+ fn deserialize_token<D: Deserializer<'a>>(_x: D, _y: &'a str) -> &'a str {
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Node {
+ fn zomg();
+}
+
+trait Graph<N: Node> {
+ fn nodes<'a, I: Iterator<&'a N>>(&'a self) -> I;
+}
+
+impl<N: Node> Graph<N> for Vec<N> {
+ fn nodes<'a, I: Iterator<&'a N>>(&self) -> I {
+ self.iter() //~ ERROR mismatched types
+ }
+}
+
+struct Stuff;
+
+impl Node for Stuff {
+ fn zomg() {
+ println!("zomg");
+ }
+}
+
+fn iterate<N: Node, G: Graph<N>>(graph: &G) {
+ for node in graph.iter() { //~ ERROR does not implement any method in scope named
+ node.zomg();
+ }
+}
+
+pub fn main() {
+ let graph = Vec::new();
+
+ graph.push(Stuff);
+
+ iterate(graph); //~ ERROR mismatched types
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern {
+ pub static symbol: ();
+}
+static CRASH: () = symbol; //~ cannot refer to other statics by value
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub struct Lexer<'a> {
+ input: &'a str,
+}
+
+impl<'a> Lexer<'a> {
+ pub fn new(input: &'a str) -> Lexer<'a> {
+ Lexer { input: input }
+ }
+}
+
+struct Parser<'a> {
+ lexer: &'a mut Lexer<'a>,
+}
+
+impl<'a> Parser<'a> {
+ pub fn new(lexer: &'a mut Lexer) -> Parser<'a> {
+ Parser { lexer: lexer }
+ //~^ ERROR cannot infer an appropriate lifetime for lifetime parameter
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod Y {
+ type X = uint;
+ extern {
+ static x: *const uint;
+ }
+ fn foo(value: *const X) -> *const X {
+ value
+ }
+}
+
+static foo: *const Y::X = Y::foo(Y::x as *const Y::X);
+//~^ ERROR cannot refer to other statics by value
+//~| ERROR: the trait `core::kinds::Sync` is not implemented for the type
+
+fn main() {}
static FOO: uint = 3;
static BAR: Foo = Foo { marker: marker::NoSync };
-//~^ ERROR: shared static items must have a type which implements Sync
+//~^ ERROR: the trait `core::kinds::Sync` is not implemented
fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fmt::{Show, Formatter, Error};
+use std::collections::HashMap;
+
+trait HasInventory {
+ fn getInventory<'s>(&'s self) -> &'s mut Inventory;
+ fn addToInventory(&self, item: &Item);
+ fn removeFromInventory(&self, itemName: &str) -> bool;
+}
+
+trait TraversesWorld {
+ fn attemptTraverse(&self, room: &Room, directionStr: &str) -> Result<&Room, &str> {
+ let direction = str_to_direction(directionStr);
+ let maybe_room = room.direction_to_room.find(&direction);
+ //~^ ERROR cannot infer an appropriate lifetime for autoref due to conflicting requirements
+ match maybe_room {
+ Some(entry) => Ok(entry),
+ _ => Err("Direction does not exist in room.")
+ }
+ }
+}
+
+
+#[deriving(Show, Eq, PartialEq, Hash)]
+enum RoomDirection {
+ West,
+ East,
+ North,
+ South,
+ Up,
+ Down,
+ In,
+ Out,
+
+ None
+}
+
+struct Room {
+ description: String,
+ items: Vec<Item>,
+ direction_to_room: HashMap<RoomDirection, Room>,
+}
+
+impl Room {
+ fn new(description: &'static str) -> Room {
+ Room {
+ description: description.to_string(),
+ items: Vec::new(),
+ direction_to_room: HashMap::new()
+ }
+ }
+
+ fn add_direction(&mut self, direction: RoomDirection, room: Room) {
+ self.direction_to_room.insert(direction, room);
+ }
+}
+
+struct Item {
+ name: String,
+}
+
+struct Inventory {
+ items: Vec<Item>,
+}
+
+impl Inventory {
+ fn new() -> Inventory {
+ Inventory {
+ items: Vec::new()
+ }
+ }
+}
+
+struct Player {
+ name: String,
+ inventory: Inventory,
+}
+
+impl Player {
+ fn new(name: &'static str) -> Player {
+ Player {
+ name: name.to_string(),
+ inventory: Inventory::new()
+ }
+ }
+}
+
+impl TraversesWorld for Player {
+}
+
+impl Show for Player {
+ fn fmt(&self, formatter: &mut Formatter) -> Result<(), Error> {
+ formatter.write_str("Player{ name:");
+ formatter.write_str(self.name.as_slice());
+ formatter.write_str(" }");
+ Ok(())
+ }
+}
+
+fn str_to_direction(to_parse: &str) -> RoomDirection {
+ match to_parse {
+ "w" | "west" => RoomDirection::West,
+ "e" | "east" => RoomDirection::East,
+ "n" | "north" => RoomDirection::North,
+ "s" | "south" => RoomDirection::South,
+ "in" => RoomDirection::In,
+ "out" => RoomDirection::Out,
+ "up" => RoomDirection::Up,
+ "down" => RoomDirection::Down,
+ _ => None //~ ERROR mismatched types
+ }
+}
+
+fn main() {
+ let mut player = Player::new("Test player");
+ let mut room = Room::new("A test room");
+ println!("Made a player: {}", player);
+ println!("Direction parse: {}", str_to_direction("east"));
+ match player.attemptTraverse(&room, "west") {
+ Ok(_) => println!("Was able to move west"),
+ Err(msg) => println!("Not able to move west: {}", msg)
+ };
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo<'a> {
+ data: &'a[u8],
+}
+
+impl <'a> Foo<'a>{
+ fn bar(self: &mut Foo) {
+ //~^ mismatched types: expected `Foo<'a>`, found `Foo<'_>` (lifetime mismatch)
+ //~| mismatched types: expected `Foo<'a>`, found `Foo<'_>` (lifetime mismatch)
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let n = 0u;
+
+ let f = move || n += 1; //~error boxed closures can't capture by value
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Homura;
+
+fn akemi(homura: Homura) {
+ let Some(ref madoka) = Some(homura.kaname()); //~ ERROR does not implement any method
+ madoka.clone(); //~ ERROR the type of this value must be known
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo(t: &mut int){
+ println!("{}", t);
+}
+
+fn main() {
+ let test = 10;
+
+ let h = move || { //~error boxed closures can't capture by value
+ let mut r = &mut test.clone();
+ foo(r);
+ };
+
+ h();
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct S;
+
+impl S {
+ fn foo(&self) {
+ let _ = move || { self }; //~error boxed closures can't capture by value
+ }
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct S;
+
+impl S {
+ fn foo(&self) {
+ let _ = move || { self.foo() }; //~error boxed closures can't capture by value
+ }
+}
+
+fn main() {
+}
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ignore-test
-
-use std::io::ReaderUtil;
-use std::io::Reader;
-
-fn bar(r:@ReaderUtil) -> String { r.read_line() }
+trait Foo {}
+impl Foo for u8 {}
fn main() {
- let r : @Reader = io::stdin();
- let _m = bar(r as @ReaderUtil);
+ let r: Box<Foo> = box 5;
+ let _m: Box<Foo> = r as Box<Foo>;
+ //~^ ERROR `core::kinds::Sized` is not implemented for the type `Foo`
+ //~| ERROR `Foo` is not implemented for the type `Foo`
}
fn main() {
let a = A {v: box B{v: None} as Box<Foo+Send>};
//~^ ERROR the trait `core::kinds::Send` is not implemented
+ //~^^ ERROR the trait `core::kinds::Send` is not implemented
}
// Regresion test for issue 7364
static boxed: Box<RefCell<int>> = box RefCell::new(0);
//~^ ERROR statics are not allowed to have custom pointers
-//~^^ ERROR: shared static items must have a type which implements Sync
+//~| ERROR: the trait `core::kinds::Sync` is not implemented for the type
+//~| ERROR: the trait `core::kinds::Sync` is not implemented for the type
+//~| ERROR: the trait `core::kinds::Sync` is not implemented for the type
fn main() { }
// except according to those terms.
-use std::rc::Rc;
-
-struct Foo {
- f: Rc<int>,
-}
-
-impl Drop for Foo {
-//~^ ERROR the trait `core::kinds::Send` is not implemented
-//~^^ NOTE cannot implement a destructor on a structure or enumeration that does not satisfy Send
- fn drop(&mut self) {
- }
-}
-
struct Bar<'a> {
f: &'a int,
}
fn main() {
let x = Rc::new(3u);
- bar(move|| foo(x)); //~ ERROR `core::kinds::Send` is not implemented
+ bar(move|| foo(x));
+ //~^ ERROR `core::kinds::Send` is not implemented
+ //~^^ ERROR `core::kinds::Send` is not implemented
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+extern crate core;
+
fn assert_send<T:Send>() { }
-// unsafe ptrs are ok unless they point at unsendable things
-fn test70() {
- assert_send::<*mut int>();
-}
fn test71<'a>() {
- assert_send::<*mut &'a int>(); //~ ERROR declared lifetime bound not satisfied
+ assert_send::<*mut &'a int>();
+ //~^ ERROR the trait `core::kinds::Send` is not implemented for the type
}
fn main() {
include_str!(invalid); //~ ERROR
include_str!("i'd be quite surprised if a file with this name existed"); //~ ERROR
- include_bin!(invalid); //~ ERROR
- include_bin!("i'd be quite surprised if a file with this name existed"); //~ ERROR
+ include_bytes!(invalid); //~ ERROR
+ include_bytes!("i'd be quite surprised if a file with this name existed"); //~ ERROR
trace_macros!(invalid); //~ ERROR
}
fn main() {
let x = RefCell::new(0i);
- f(x); //~ ERROR `core::kinds::Sync` is not implemented
+ f(x);
+ //~^ ERROR `core::kinds::Sync` is not implemented
+ //~^^ ERROR `core::kinds::Sync` is not implemented
+ //~^^^ ERROR `core::kinds::Sync` is not implemented
}
task::spawn(move|| {
//~^ ERROR `core::kinds::Send` is not implemented
+ //~^^ ERROR `core::kinds::Send` is not implemented
let y = x;
println!("{}", y);
});
let x = Rc::new(5i);
bar(x);
//~^ ERROR `core::kinds::Send` is not implemented
+ //~^^ ERROR `core::kinds::Send` is not implemented
}
let x = Rc::new(RefCell::new(5i));
bar(x);
//~^ ERROR the trait `core::kinds::Sync` is not implemented
+ //~^^ ERROR the trait `core::kinds::Sync` is not implemented
}
// except according to those terms.
// Test range syntax - type errors.
+#![feature(slicing_syntax)]
pub fn main() {
// Mixed types.
// except according to those terms.
// Test range syntax - borrow errors.
+#![feature(slicing_syntax)]
pub fn main() {
let r = {
// in this file all test region bound and lifetime violations that are
// detected during type check.
+extern crate core;
+use core::ptr::Unique;
+
fn assert_send<T:Send>() { }
-trait Dummy { }
+trait Dummy:Send { }
// lifetime pointers with 'static lifetime are ok
fn object_with_random_bound_not_ok<'a>() {
assert_send::<&'a (Dummy+'a)>();
- //~^ ERROR not implemented
+ //~^ ERROR reference has a longer lifetime
}
fn object_with_send_bound_not_ok<'a>() {
// unsafe pointers are ok unless they point at unsendable things
-fn unsafe_ok1<'a>(_: &'a int) {
- assert_send::<*const int>();
- assert_send::<*mut int>();
-}
+struct UniqueUnsafePtr(Unique<*const int>);
-fn unsafe_ok2<'a>(_: &'a int) {
- assert_send::<*const &'a int>(); //~ ERROR declared lifetime bound not satisfied
-}
+unsafe impl Send for UniqueUnsafePtr {}
-fn unsafe_ok3<'a>(_: &'a int) {
- assert_send::<*mut &'a int>(); //~ ERROR declared lifetime bound not satisfied
+fn unsafe_ok1<'a>(_: &'a int) {
+ assert_send::<UniqueUnsafePtr>();
}
fn main() {
x[Foo..]; //~ ERROR cannot take a slice of a value with type `Foo`
x[..Foo]; //~ ERROR cannot take a slice of a value with type `Foo`
x[Foo..Foo]; //~ ERROR cannot take a slice of a value with type `Foo`
- x[mut]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
- x[mut Foo..]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
- x[mut ..Foo]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
- x[mut Foo..Foo]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
}
fn main() {
let x: &[int] = &[1, 2, 3, 4, 5];
// Can't mutably slice an immutable slice
- let y = x[mut 2..4]; //~ ERROR cannot borrow
+ let slice: &mut [int] = &mut [0, 1];
+ x[2..4] = slice; //~ ERROR cannot borrow
}
let x: &[int] = &[1, 2, 3, 4, 5];
// Immutable slices are not mutable.
let y: &mut[_] = x[2..4]; //~ ERROR cannot borrow immutable dereference of `&`-pointer as mutabl
-
- let x: &mut [int] = &mut [1, 2, 3, 4, 5];
- // Can't borrow mutably twice
- let y = x[mut 1..2];
- let y = x[mut 4..5]; //~ERROR cannot borrow
}
pub fn main() {
test_send::<rand::TaskRng>();
//~^ ERROR `core::kinds::Send` is not implemented
+ //~^^ ERROR `core::kinds::Send` is not implemented
}
fn main() {
let us = UnsafeCell::new(MySync{u: UnsafeCell::new(0i)});
test(us);
+ //~^ ERROR `core::kinds::Sync` is not implemented
let uns = UnsafeCell::new(NoSync{m: marker::NoSync});
test(uns);
+ //~^ ERROR `core::kinds::Sync` is not implemented
let ms = MySync{u: uns};
test(ms);
+ //~^ ERROR `core::kinds::Sync` is not implemented
let ns = NoSync{m: marker::NoSync};
test(ns);
fn main() {
let i = box Rc::new(100i);
- f(i); //~ ERROR `core::kinds::Send` is not implemented
+ f(i);
+ //~^ ERROR `core::kinds::Send` is not implemented
+ //~^^ ERROR `core::kinds::Send` is not implemented
}
fn main() {
let cat = "kitty".to_string();
- let (tx, _) = channel(); //~ ERROR `core::kinds::Send` is not implemented
+ let (tx, _) = channel();
+ //~^ ERROR `core::kinds::Send` is not implemented
+ //~^^ ERROR `core::kinds::Send` is not implemented
tx.send(foo(42, Rc::new(cat)));
}
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-android: FIXME(#10381)
+// min-lldb-version: 310
+
+// compile-flags:-g
+
+// === GDB TESTS ===================================================================================
+
+// gdb-command:run
+
+// DESTRUCTURED STRUCT
+// gdb-command:print x
+// gdb-check:$1 = 400
+// gdb-command:print y
+// gdb-check:$2 = 401.5
+// gdb-command:print z
+// gdb-check:$3 = true
+// gdb-command:continue
+
+// DESTRUCTURED TUPLE
+// gdb-command:print/x _i8
+// gdb-check:$4 = 0x6f
+// gdb-command:print/x _u8
+// gdb-check:$5 = 0x70
+// gdb-command:print _i16
+// gdb-check:$6 = -113
+// gdb-command:print _u16
+// gdb-check:$7 = 114
+// gdb-command:print _i32
+// gdb-check:$8 = -115
+// gdb-command:print _u32
+// gdb-check:$9 = 116
+// gdb-command:print _i64
+// gdb-check:$10 = -117
+// gdb-command:print _u64
+// gdb-check:$11 = 118
+// gdb-command:print _f32
+// gdb-check:$12 = 119.5
+// gdb-command:print _f64
+// gdb-check:$13 = 120.5
+// gdb-command:continue
+
+// MORE COMPLEX CASE
+// gdb-command:print v1
+// gdb-check:$14 = 80000
+// gdb-command:print x1
+// gdb-check:$15 = 8000
+// gdb-command:print *y1
+// gdb-check:$16 = 80001.5
+// gdb-command:print z1
+// gdb-check:$17 = false
+// gdb-command:print *x2
+// gdb-check:$18 = -30000
+// gdb-command:print y2
+// gdb-check:$19 = -300001.5
+// gdb-command:print *z2
+// gdb-check:$20 = true
+// gdb-command:print v2
+// gdb-check:$21 = 854237.5
+// gdb-command:continue
+
+// SIMPLE IDENTIFIER
+// gdb-command:print i
+// gdb-check:$22 = 1234
+// gdb-command:continue
+
+// gdb-command:print simple_struct_ident
+// gdb-check:$23 = {x = 3537, y = 35437.5, z = true}
+// gdb-command:continue
+
+// gdb-command:print simple_tuple_ident
+// gdb-check:$24 = {34903493, 232323}
+// gdb-command:continue
+
+// === LLDB TESTS ==================================================================================
+
+// lldb-command:type format add --format hex char
+// lldb-command:type format add --format hex 'unsigned char'
+
+// lldb-command:run
+
+// DESTRUCTURED STRUCT
+// lldb-command:print x
+// lldb-check:[...]$0 = 400
+// lldb-command:print y
+// lldb-check:[...]$1 = 401.5
+// lldb-command:print z
+// lldb-check:[...]$2 = true
+// lldb-command:continue
+
+// DESTRUCTURED TUPLE
+// lldb-command:print _i8
+// lldb-check:[...]$3 = 0x6f
+// lldb-command:print _u8
+// lldb-check:[...]$4 = 0x70
+// lldb-command:print _i16
+// lldb-check:[...]$5 = -113
+// lldb-command:print _u16
+// lldb-check:[...]$6 = 114
+// lldb-command:print _i32
+// lldb-check:[...]$7 = -115
+// lldb-command:print _u32
+// lldb-check:[...]$8 = 116
+// lldb-command:print _i64
+// lldb-check:[...]$9 = -117
+// lldb-command:print _u64
+// lldb-check:[...]$10 = 118
+// lldb-command:print _f32
+// lldb-check:[...]$11 = 119.5
+// lldb-command:print _f64
+// lldb-check:[...]$12 = 120.5
+// lldb-command:continue
+
+// MORE COMPLEX CASE
+// lldb-command:print v1
+// lldb-check:[...]$13 = 80000
+// lldb-command:print x1
+// lldb-check:[...]$14 = 8000
+// lldb-command:print *y1
+// lldb-check:[...]$15 = 80001.5
+// lldb-command:print z1
+// lldb-check:[...]$16 = false
+// lldb-command:print *x2
+// lldb-check:[...]$17 = -30000
+// lldb-command:print y2
+// lldb-check:[...]$18 = -300001.5
+// lldb-command:print *z2
+// lldb-check:[...]$19 = true
+// lldb-command:print v2
+// lldb-check:[...]$20 = 854237.5
+// lldb-command:continue
+
+// SIMPLE IDENTIFIER
+// lldb-command:print i
+// lldb-check:[...]$21 = 1234
+// lldb-command:continue
+
+// lldb-command:print simple_struct_ident
+// lldb-check:[...]$22 = Struct { x: 3537, y: 35437.5, z: true }
+// lldb-command:continue
+
+// lldb-command:print simple_tuple_ident
+// lldb-check:[...]$23 = (34903493, 232323)
+// lldb-command:continue
+
+struct Struct {
+ x: i16,
+ y: f32,
+ z: bool
+}
+
+fn main() {
+
+ let s = Struct {
+ x: 400,
+ y: 401.5,
+ z: true
+ };
+
+ for &Struct { x, y, z } in [s].iter() {
+ zzz(); // #break
+ }
+
+ let tuple: (i8, u8, i16, u16, i32, u32, i64, u64, f32, f64) =
+ (0x6f, 0x70, -113, 114, -115, 116, -117, 118, 119.5, 120.5);
+
+ for &(_i8, _u8, _i16, _u16, _i32, _u32, _i64, _u64, _f32, _f64) in [tuple].iter() {
+ zzz(); // #break
+ }
+
+ let more_complex: (i32, &Struct, Struct, Box<f64>) =
+ (80000,
+ &Struct {
+ x: 8000,
+ y: 80001.5,
+ z: false
+ },
+ Struct {
+ x: -30000,
+ y: -300001.5,
+ z: true
+ },
+ box 854237.5);
+
+ for &(v1,
+ &Struct { x: x1, y: ref y1, z: z1 },
+ Struct { x: ref x2, y: y2, z: ref z2 },
+ box v2) in [more_complex].iter() {
+ zzz(); // #break
+ }
+
+ for i in range(1234, 1235i) {
+ zzz(); // #break
+ }
+
+ for simple_struct_ident in
+ vec![Struct {
+ x: 3537,
+ y: 35437.5,
+ z: true
+ }].into_iter() {
+ zzz(); // #break
+ }
+
+ for simple_tuple_ident in vec![(34903493u32, 232323i64)].into_iter() {
+ zzz(); // #break
+ }
+}
+
+fn zzz() {()}
// except according to those terms.
// ignore-android: FIXME(#10381)
-// ignore-test: Not sure what is going on here --pcwalton
// min-lldb-version: 310
// compile-flags:-g
[int; 3]) as &[int; 3]) as *const _ as *const [int; 3]) as
*const [int; (3u as uint)] as *const [int; 3]);
- (match (() as ()) {
- () => {
- #[inline]
- #[allow(dead_code)]
- static __STATIC_FMTSTR: &'static [&'static str] =
- (&([("test" as &'static str)] as [&'static str; 1]) as
- &'static [&'static str; 1]);
- ((::std::fmt::format as
- fn(&core::fmt::Arguments<'_>) -> collections::string::String {std::fmt::format})((&((::std::fmt::Arguments::new
- as
- fn(&[&str], &[core::fmt::Argument<'_>]) -> core::fmt::Arguments<'_> {core::fmt::Arguments<'a>::new})((__STATIC_FMTSTR
- as
- &'static [&'static str]),
- (&([]
- as
- [core::fmt::Argument<'_>; 0])
- as
- &[core::fmt::Argument<'_>; 0]))
- as
- core::fmt::Arguments<'_>)
- as
- &core::fmt::Arguments<'_>))
- as collections::string::String)
- }
- } as collections::string::String);
+ ((::std::fmt::format as
+ fn(core::fmt::Arguments<'_>) -> collections::string::String {std::fmt::format})(((::std::fmt::Arguments::new
+ as
+ fn(&[&str], &[core::fmt::Argument<'_>]) -> core::fmt::Arguments<'_> {core::fmt::Arguments<'a>::new})(({
+ #[inline]
+ #[allow(dead_code)]
+ static __STATIC_FMTSTR:
+ &'static [&'static str]
+ =
+ (&([("test"
+ as
+ &'static str)]
+ as
+ [&'static str; 1])
+ as
+ &'static [&'static str; 1]);
+ (__STATIC_FMTSTR
+ as
+ &'static [&'static str])
+ }
+ as
+ &[&str]),
+ (&(match (()
+ as
+ ())
+ {
+ ()
+ =>
+ ([]
+ as
+ [core::fmt::Argument<'_>; 0]),
+ }
+ as
+ [core::fmt::Argument<'_>; 0])
+ as
+ &[core::fmt::Argument<'_>; 0]))
+ as
+ core::fmt::Arguments<'_>))
+ as collections::string::String);
}
pub type Foo = [int; (3u as uint)];
pub struct Bar {
// except according to those terms.
pub static X: &'static str = "foobarbaz";
-pub static Y: &'static [u8] = include_bin!("lib.rs");
+pub static Y: &'static [u8] = include_bytes!("lib.rs");
trait Foo {}
impl Foo for uint {}
#![allow(dead_code)]
#![allow(unused_unsafe)]
+use std::kinds::Sync;
+
struct Foo {
a: uint,
b: *const ()
}
+unsafe impl Sync for Foo {}
+
fn foo<T>(a: T) -> T {
a
}
use std::ptr;
-static a: *const u8 = 0 as *const u8;
+struct TestStruct {
+ x: *const u8
+}
+
+unsafe impl Sync for TestStruct {}
+
+static a: TestStruct = TestStruct{x: 0 as *const u8};
pub fn main() {
- assert_eq!(a, ptr::null());
+ assert_eq!(a.x, ptr::null());
}
extern crate libc;
-extern fn foo() {}
+struct TestStruct {
+ x: *const libc::c_void
+}
+unsafe impl Sync for TestStruct {}
+
+extern fn foo() {}
const x: extern "C" fn() = foo;
-static y: *const libc::c_void = x as *const libc::c_void;
-const a: &'static int = &10;
-static b: *const int = a as *const int;
+static y: TestStruct = TestStruct { x: x as *const libc::c_void };
pub fn main() {
- assert_eq!(x as *const libc::c_void, y);
- assert_eq!(a as *const int, b);
+ assert_eq!(x as *const libc::c_void, y.x);
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#[deriving(Sync,Send,Copy)]
+#[deriving(Copy)]
struct Test;
pub fn main() {}
}
}
-struct Foo<Sized? T> {
+struct Foo<T: ?Sized> {
f: T
}
// As dst-struct.rs, but the unsized field is the only field in the struct.
-struct Fat<Sized? T> {
+struct Fat<T: ?Sized> {
ptr: T
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-struct Fat<Sized? T> {
+struct Fat<T: ?Sized> {
f1: int,
f2: &'static str,
ptr: T
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-struct Fat<Sized? T> {
+struct Fat<T: ?Sized> {
f1: int,
f2: &'static str,
ptr: T
// except according to those terms.
+extern crate core;
+
+use core::nonzero::NonZero;
use std::mem::size_of;
+use std::rc::Rc;
+use std::sync::Arc;
trait Trait {}
// Pointers - Box<T>
assert_eq!(size_of::<Box<int>>(), size_of::<Option<Box<int>>>());
-
// The optimization can't apply to raw pointers
assert!(size_of::<Option<*const int>>() != size_of::<*const int>());
assert!(Some(0 as *const int).is_some()); // Can't collapse None to null
+ struct Foo {
+ _a: Box<int>
+ }
+ struct Bar(Box<int>);
+
+ // Should apply through structs
+ assert_eq!(size_of::<Foo>(), size_of::<Option<Foo>>());
+ assert_eq!(size_of::<Bar>(), size_of::<Option<Bar>>());
+ // and tuples
+ assert_eq!(size_of::<(u8, Box<int>)>(), size_of::<Option<(u8, Box<int>)>>());
+ // and fixed-size arrays
+ assert_eq!(size_of::<[Box<int>, ..1]>(), size_of::<Option<[Box<int>, ..1]>>());
+
+ // Should apply to NonZero
+ assert_eq!(size_of::<NonZero<uint>>(), size_of::<Option<NonZero<uint>>>());
+ assert_eq!(size_of::<NonZero<*mut i8>>(), size_of::<Option<NonZero<*mut i8>>>());
+
+ // Should apply to types that use NonZero internally
+ assert_eq!(size_of::<Vec<int>>(), size_of::<Option<Vec<int>>>());
+ assert_eq!(size_of::<Arc<int>>(), size_of::<Option<Arc<int>>>());
+ assert_eq!(size_of::<Rc<int>>(), size_of::<Option<Rc<int>>>());
+
+ // Should apply to types that have NonZero transitively
+ assert_eq!(size_of::<String>(), size_of::<Option<String>>());
+
}
let mut buf = Vec::new();
{
let w = &mut buf as &mut io::Writer;
- format_args!(|args| { write!(w, "{}", args); }, "{}", 1i);
- format_args!(|args| { write!(w, "{}", args); }, "test");
- format_args!(|args| { write!(w, "{}", args); }, "{test}", test=3i);
+ write!(w, "{}", format_args!("{}", 1i));
+ write!(w, "{}", format_args!("test"));
+ write!(w, "{}", format_args!("{test}", test=3i));
}
let s = String::from_utf8(buf).unwrap();
t!(s, "1test3");
- let s = format_args!(fmt::format, "hello {}", "world");
+ let s = fmt::format(format_args!("hello {}", "world"));
t!(s, "hello world");
- let s = format_args!(|args| {
- format!("{}: {}", "args were", args)
- }, "hello {}", "world");
+ let s = format!("{}: {}", "args were", format_args!("hello {}", "world"));
t!(s, "args were: hello world");
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo(_: &[&str]) {}
+
+fn bad(a: &str, b: &str) {
+ foo(&[a, b]);
+}
+
+fn good(a: &str, b: &str) {
+ foo(&[a.as_slice(), b.as_slice()]);
+}
+
+fn main() {}
static NAME: &'static str = "hello world";
fn main() {
- match NAME.to_ascii_lower().as_slice() {
+ match NAME.to_ascii_lowercase().as_slice() {
"foo" => {}
_ => {}
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(unboxed_closures)]
+use std::ops::Fn;
+
+struct Foo<T>(T);
+
+impl<T: Copy> Fn<(), T> for Foo<T> {
+ extern "rust-call" fn call(&self, _: ()) -> T {
+ match *self {
+ Foo(t) => t
+ }
+ }
+}
+
+fn main() {
+ let t: u8 = 1;
+ println!("{}", Foo(t)());
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo<'r>() {
+ let maybe_value_ref: Option<&'r u8> = None;
+
+ let _ = maybe_value_ref.map(|& ref v| v);
+ let _ = maybe_value_ref.map(|& ref v| -> &'r u8 {v});
+ let _ = maybe_value_ref.map(|& ref v: &'r u8| -> &'r u8 {v});
+ let _ = maybe_value_ref.map(|& ref v: &'r u8| {v});
+}
+
+fn main() {
+ foo();
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo<'a> {
+ listener: ||: 'a
+}
+
+impl<'a> Foo<'a> {
+ fn new(listener: ||: 'a) -> Foo<'a> {
+ Foo { listener: listener }
+ }
+}
+
+fn main() {
+ let a = Foo::new(|| {});
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-static TEST_VALUE : *const [int; 2] = 0x1234 as *const [int; 2];
+struct TestStruct {
+ x: *const [int; 2]
+}
+
+unsafe impl Sync for TestStruct {}
+
+static TEST_VALUE : TestStruct = TestStruct{x: 0x1234 as *const [int; 2]};
fn main() {}
impl TraitWithSend for IndirectBlah {}
impl IndirectTraitWithSend for IndirectBlah {}
-fn test_trait<Sized? T: Send>() { println!("got here!") }
+fn test_trait<T: Send + ?Sized>() { println!("got here!") }
fn main() {
test_trait::<TraitWithSend>();
// Test that astconv doesn't forget about mutability of &mut str
fn main() {
- fn foo<Sized? T>(_: &mut T) {}
+ fn foo<T: ?Sized>(_: &mut T) {}
let _f: fn(&mut str) = foo;
}
use std::kinds::marker;
use std::cell::UnsafeCell;
+struct MyUnsafePack<T>(UnsafeCell<T>);
+
+unsafe impl<T: Send> Sync for MyUnsafePack<T> {}
+
struct MyUnsafe<T> {
- value: UnsafeCell<T>
+ value: MyUnsafePack<T>
}
impl<T> MyUnsafe<T> {
fn forbidden(&self) {}
}
+unsafe impl<T: Send> Sync for MyUnsafe<T> {}
+
enum UnsafeEnum<T> {
VariantSafe,
VariantUnsafe(UnsafeCell<T>)
}
+unsafe impl<T: Send> Sync for UnsafeEnum<T> {}
+
static STATIC1: UnsafeEnum<int> = UnsafeEnum::VariantSafe;
-static STATIC2: UnsafeCell<int> = UnsafeCell { value: 1 };
-const CONST: UnsafeCell<int> = UnsafeCell { value: 1 };
+static STATIC2: MyUnsafePack<int> = MyUnsafePack(UnsafeCell { value: 1 });
+const CONST: MyUnsafePack<int> = MyUnsafePack(UnsafeCell { value: 1 });
static STATIC3: MyUnsafe<int> = MyUnsafe{value: CONST};
-static STATIC4: &'static UnsafeCell<int> = &STATIC2;
+static STATIC4: &'static MyUnsafePack<int> = &STATIC2;
struct Wrap<T> {
value: T
}
-static UNSAFE: UnsafeCell<int> = UnsafeCell{value: 1};
-static WRAPPED_UNSAFE: Wrap<&'static UnsafeCell<int>> = Wrap { value: &UNSAFE };
+unsafe impl<T: Send> Sync for Wrap<T> {}
+
+static UNSAFE: MyUnsafePack<int> = MyUnsafePack(UnsafeCell{value: 2});
+static WRAPPED_UNSAFE: Wrap<&'static MyUnsafePack<int>> = Wrap { value: &UNSAFE };
fn main() {
let a = &STATIC1;
STATIC3.forbidden()
}
-
-
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub trait Borrow<Sized? Borrowed> {
+ fn borrow(&self) -> &Borrowed;
+}
+
+impl<T: Sized> Borrow<T> for T {
+ fn borrow(&self) -> &T { self }
+}
+
+trait Foo {
+ fn foo(&self, other: &Self);
+}
+
+fn bar<K, Q>(k: &K, q: &Q) where K: Borrow<Q>, Q: Foo {
+ q.foo(k.borrow())
+}
+
+struct MyTree<K>;
+
+impl<K> MyTree<K> {
+ // This caused a failure in #18906
+ fn bar<Q>(k: &K, q: &Q) where K: Borrow<Q>, Q: Foo {
+ q.foo(k.borrow())
+ }
+}
+
+fn main() {}
use std::mem::{replace, swap};
use std::mem;
use std::task;
+ use std::kinds::Send;
pub struct Stuff<T> {
state: state,
payload: Option<T>
}
+ unsafe impl<T:Send> Send for packet<T> {}
+
pub fn packet<T:Send>() -> *const packet<T> {
unsafe {
let p: *const packet<T> = mem::transmute(box Stuff{
use std::mem;
pub struct ping(::pipes::send_packet<pong>);
+
+ unsafe impl Send for ping {}
+
pub struct pong(::pipes::send_packet<ping>);
+ unsafe impl Send for pong {}
+
pub fn liberate_ping(p: ping) -> ::pipes::send_packet<pong> {
unsafe {
let _addr : *const ::pipes::send_packet<pong> = match &p {
use std::sync::Mutex;
pub fn main() {
- unsafe {
- let x = Some(Mutex::new(true));
- match x {
- Some(ref z) if *z.lock() => {
- assert!(*z.lock());
- },
- _ => panic!()
- }
+ let x = Some(Mutex::new(true));
+ match x {
+ Some(ref z) if *z.lock().unwrap() => {
+ assert!(*z.lock().unwrap());
+ },
+ _ => panic!()
}
}
use std::kinds::Sized;
// Note: this must be generic for the problem to show up
-trait Foo<A> for Sized? {
+trait Foo<A> for ?Sized {
fn foo(&self);
}
// Test range syntax.
+#![feature(slicing_syntax)]
+
fn foo() -> int { 42 }
pub fn main() {
.as_slice()
.starts_with("/* this is for "));
assert!(
- include_bin!("syntax-extension-source-utils-files/includeme.fragment")
+ include_bytes!("syntax-extension-source-utils-files/includeme.fragment")
[1] == (42 as u8)); // '*'
// The Windows tests are wrapped in an extra module for some reason
assert!((m1::m2::where_am_i().as_slice().ends_with("m1::m2")));
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod base {
+ pub trait HasNew<T> {
+ fn new() -> T;
+ }
+
+ pub struct Foo {
+ dummy: (),
+ }
+
+ impl HasNew<Foo> for Foo {
+ fn new() -> Foo {
+ Foo { dummy: () }
+ }
+ }
+
+ pub struct Bar {
+ dummy: (),
+ }
+
+ impl HasNew<Bar> for Bar {
+ fn new() -> Bar {
+ Bar { dummy: () }
+ }
+ }
+}
+
+pub fn main() {
+ let _f: base::Foo = base::HasNew::new();
+ let _b: base::Bar = base::HasNew::new();
+}
// This test checks that the `_` type placeholder works
// correctly for enabling type inference.
-static CONSTEXPR: *const int = &413 as *const _;
+struct TestStruct {
+ x: *const int
+}
+
+unsafe impl Sync for TestStruct {}
+
+static CONSTEXPR: TestStruct = TestStruct{x: &413 as *const _};
+
pub fn main() {
let x: Vec<_> = range(0u, 5).collect();
//
// ignore-lexer-test FIXME #15879
-// Test syntax checks for `Sized?` syntax.
+// Test syntax checks for `?Sized` syntax.
-trait T1 for Sized? {}
-pub trait T2 for Sized? {}
-trait T3<X: T1> for Sized?: T2 {}
-trait T4<Sized? X> {}
-trait T5<Sized? X, Y> {}
-trait T6<Y, Sized? X> {}
-trait T7<Sized? X, Sized? Y> {}
-trait T8<Sized? X: T2> {}
-struct S1<Sized? X>;
-enum E<Sized? X> {}
-impl <Sized? X> T1 for S1<X> {}
-fn f<Sized? X>() {}
-type TT<Sized? T> = T;
+trait T1 for ?Sized {}
+pub trait T2 for ?Sized {}
+trait T3<X: T1> for ?Sized: T2 {}
+trait T4<X: ?Sized> {}
+trait T5<X: ?Sized, Y> {}
+trait T6<Y, X: ?Sized> {}
+trait T7<X: ?Sized, Y: ?Sized> {}
+trait T8<X: ?Sized+T2> {}
+trait T9<X: T2 + ?Sized> {}
+struct S1<X: ?Sized>;
+enum E<X: ?Sized> {}
+impl <X: ?Sized> T1 for S1<X> {}
+fn f<X: ?Sized>() {}
+type TT<T: ?Sized> = T;
pub fn main() {
}
// Test sized-ness checking in substitution.
// Unbounded.
-fn f1<Sized? X>(x: &X) {
+fn f1<X: ?Sized>(x: &X) {
f1::<X>(x);
}
fn f2<X>(x: &X) {
}
// Bounded.
-trait T for Sized? {}
-fn f3<Sized? X: T>(x: &X) {
+trait T for ?Sized {}
+fn f3<X: T+?Sized>(x: &X) {
f3::<X>(x);
}
fn f4<X: T>(x: &X) {
}
// Self type.
-trait T2 for Sized? {
+trait T2 for ?Sized {
fn f() -> Box<Self>;
}
struct S;
box S
}
}
-fn f5<Sized? X: T2>(x: &X) {
+fn f5<X: ?Sized+T2>(x: &X) {
let _: Box<X> = T2::f();
}
fn f6<X: T2>(x: &X) {
let _: Box<X> = T2::f();
}
-trait T3 for Sized? {
+trait T3 for ?Sized {
fn f() -> Box<Self>;
}
impl T3 for S {
box S
}
}
-fn f7<Sized? X: T3>(x: &X) {
+fn f7<X: ?Sized+T3>(x: &X) {
// This is valid, but the unsized bound on X is irrelevant because any type
// which implements T3 must have statically known size.
let _: Box<X> = T3::f();
fn m1(x: &T4<X>);
fn m2(x: &T5<X>);
}
-trait T5<Sized? X> {
+trait T5<X: ?Sized> {
// not an error (for now)
fn m1(x: &T4<X>);
fn m2(x: &T5<X>);
fn m1(x: &T4<X>);
fn m2(x: &T5<X>);
}
-trait T7<Sized? X: T> {
+trait T7<X: ?Sized+T> {
// not an error (for now)
fn m1(x: &T4<X>);
fn m2(x: &T5<X>);
}
// The last field in a struct or variant may be unsized
-struct S2<Sized? X> {
+struct S2<X: ?Sized> {
f: X,
}
-struct S3<Sized? X> {
+struct S3<X: ?Sized> {
f1: int,
f2: X,
}
-enum E<Sized? X> {
+enum E<X: ?Sized> {
V1(X),
V2{x: X},
V3(int, X),
fn f(p: &mut Point) { p.z = 13; }
pub fn main() {
- unsafe {
- let x = Some(Mutex::new(true));
- match x {
- Some(ref z) if *z.lock() => {
- assert!(*z.lock());
- },
- _ => panic!()
- }
+ let x = Some(Mutex::new(true));
+ match x {
+ Some(ref z) if *z.lock().unwrap() => {
+ assert!(*z.lock().unwrap());
+ },
+ _ => panic!()
}
}