language: generic
sudo: required
-dist: trusty
+services:
+ - docker
# LLVM takes awhile to check out and otherwise we'll manage the submodules in
# our configure script, so disable auto submodule management.
submodules: false
before_install:
- - echo 0 | sudo tee /proc/sys/net/ipv6/conf/lo/disable_ipv6
- - echo 'deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.7 main' | sudo tee -a /etc/apt/sources.list
- - echo 'deb-src http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.7 main' | sudo tee -a /etc/apt/sources.list
- - sudo apt-get update
- - sudo apt-get --force-yes install curl make g++ python2.7 git zlib1g-dev libedit-dev llvm-3.7-tools
+ - docker build -t rust -f src/etc/Dockerfile src/etc
script:
- - ./configure --llvm-root=/usr/lib/llvm-3.7
- - make tidy && make check-notidy -j4
+ - docker run -v `pwd`:/build rust
+ sh -c "
+ ./configure --llvm-root=/usr/lib/llvm-3.7 &&
+ make tidy &&
+ make check-notidy -j4
+ "
# Real testing happens on http://buildbot.rust-lang.org/
#
download(sha_path, sha_url, verbose)
download(temp_path, url, verbose)
verify(temp_path, sha_path, verbose)
- print("moving " + temp_path + " to " + path)
+ print("moving {} to {}".format(temp_path, path))
shutil.move(temp_path, path)
finally:
delete_if_present(sha_path)
def download(path, url, verbose):
- print("downloading " + url + " to " + path)
+ print("downloading {} to {}".format(url, path))
# see http://serverfault.com/questions/301128/how-to-download
if sys.platform == 'win32':
run(["PowerShell.exe", "/nologo", "-Command",
if os.path.exists(self.bin_root()):
shutil.rmtree(self.bin_root())
channel = self.stage0_rustc_channel()
- filename = "rust-std-" + channel + "-" + self.build + ".tar.gz"
+ filename = "rust-std-{}-{}.tar.gz".format(channel, self.build)
url = "https://static.rust-lang.org/dist/" + self.stage0_rustc_date()
tarball = os.path.join(rustc_cache, filename)
if not os.path.exists(tarball):
- get(url + "/" + filename, tarball, verbose=self.verbose)
+ get("{}/{}".format(url, filename), tarball, verbose=self.verbose)
unpack(tarball, self.bin_root(),
match="rust-std-" + self.build,
verbose=self.verbose)
- filename = "rustc-" + channel + "-" + self.build + ".tar.gz"
+ filename = "rustc-{}-{}.tar.gz".format(channel, self.build)
url = "https://static.rust-lang.org/dist/" + self.stage0_rustc_date()
tarball = os.path.join(rustc_cache, filename)
if not os.path.exists(tarball):
- get(url + "/" + filename, tarball, verbose=self.verbose)
+ get("{}/{}".format(url, filename), tarball, verbose=self.verbose)
unpack(tarball, self.bin_root(), match="rustc", verbose=self.verbose)
with open(self.rustc_stamp(), 'w') as f:
f.write(self.stage0_rustc_date())
if self.cargo().startswith(self.bin_root()) and \
(not os.path.exists(self.cargo()) or self.cargo_out_of_date()):
channel = self.stage0_cargo_channel()
- filename = "cargo-" + channel + "-" + self.build + ".tar.gz"
+ filename = "cargo-{}-{}.tar.gz".format(channel, self.build)
url = "https://static.rust-lang.org/cargo-dist/" + self.stage0_cargo_date()
tarball = os.path.join(cargo_cache, filename)
if not os.path.exists(tarball):
- get(url + "/" + filename, tarball, verbose=self.verbose)
+ get("{}/{}".format(url, filename), tarball, verbose=self.verbose)
unpack(tarball, self.bin_root(), match="cargo", verbose=self.verbose)
with open(self.cargo_stamp(), 'w') as f:
f.write(self.stage0_cargo_date())
return os.path.join(self.bin_root(), '.cargo-stamp')
def rustc_out_of_date(self):
- if not os.path.exists(self.rustc_stamp()):
+ if not os.path.exists(self.rustc_stamp()) or self.clean:
return True
with open(self.rustc_stamp(), 'r') as f:
return self.stage0_rustc_date() != f.read()
def cargo_out_of_date(self):
- if not os.path.exists(self.cargo_stamp()):
+ if not os.path.exists(self.cargo_stamp()) or self.clean:
return True
with open(self.cargo_stamp(), 'r') as f:
return self.stage0_cargo_date() != f.read()
return ''
def build_bootstrap(self):
+ build_dir = os.path.join(self.build_dir, "bootstrap")
+ if self.clean and os.path.exists(build_dir):
+ shutil.rmtree(build_dir)
env = os.environ.copy()
- env["CARGO_TARGET_DIR"] = os.path.join(self.build_dir, "bootstrap")
+ env["CARGO_TARGET_DIR"] = build_dir
env["RUSTC"] = self.rustc()
env["LD_LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib")
env["DYLD_LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib")
raise ValueError(err)
sys.exit(err)
- return cputype + '-' + ostype
+ return "{}-{}".format(cputype, ostype)
def main():
parser = argparse.ArgumentParser(description='Build rust')
parser.add_argument('--config')
+ parser.add_argument('--clean', action='store_true')
parser.add_argument('-v', '--verbose', action='store_true')
args = [a for a in sys.argv if a != '-h']
rb.rust_root = os.path.abspath(os.path.join(__file__, '../../..'))
rb.build_dir = os.path.join(os.getcwd(), "build")
rb.verbose = args.verbose
+ rb.clean = args.clean
try:
with open(args.config or 'config.toml') as config:
not a dynamic executable
$ ./example
hi!
-thread '<main>' panicked at 'failed', example.rs:1
+thread 'main' panicked at 'failed', example.rs:1
```
Success! This binary can be copied to almost any Linux machine with the same
At runtime each borrow causes a modification/check of the refcount.
-[cell-mod]: ../std/cell/
+[cell-mod]: ../std/cell/index.html
[cell]: ../std/cell/struct.Cell.html
[refcell]: ../std/cell/struct.RefCell.html
correct; documentation comments apply to the thing after them, and there's
nothing after that last comment.
-[rc-new]: https://doc.rust-lang.org/nightly/std/rc/struct.Rc.html#method.new
+[rc-new]: ../std/rc/struct.Rc.html#method.new
### Writing documentation comments
If you try running this code, the program will crash with a message like this:
```text
-thread '<main>' panicked at 'Invalid number: 11', src/bin/panic-simple.rs:5
+thread 'main' panicked at 'Invalid number: 11', src/bin/panic-simple.rs:5
```
Here's another example that is slightly less contrived. A program that accepts
the string doesn't parse as a number, you'll get a panic:
```text
-thread '<main>' panicked at 'called `Result::unwrap()` on an `Err` value: ParseIntError { kind: InvalidDigit }', /home/rustbuild/src/rust-buildbot/slave/beta-dist-rustc-linux/build/src/libcore/result.rs:729
+thread 'main' panicked at 'called `Result::unwrap()` on an `Err` value: ParseIntError { kind: InvalidDigit }', /home/rustbuild/src/rust-buildbot/slave/beta-dist-rustc-linux/build/src/libcore/result.rs:729
```
This is rather unsightly, and if this happened inside a library you're
[3]: ../std/option/enum.Option.html#method.unwrap_or
[4]: ../std/option/enum.Option.html#method.unwrap_or_else
[5]: ../std/option/enum.Option.html
-[6]: ../std/result/
+[6]: ../std/result/index.html
[7]: ../std/result/enum.Result.html#method.unwrap
[8]: ../std/fmt/trait.Debug.html
[9]: ../std/primitive.str.html#method.parse
some output that looks like this:
```text
-thread ‘<main>’ panicked at ‘This function never returns!’, hello.rs:2
+thread ‘main’ panicked at ‘This function never returns!’, hello.rs:2
```
If you want more information, you can get a backtrace by setting the
```text
$ RUST_BACKTRACE=1 ./diverges
-thread '<main>' panicked at 'This function never returns!', hello.rs:2
+thread 'main' panicked at 'This function never returns!', hello.rs:2
stack backtrace:
1: 0x7f402773a829 - sys::backtrace::write::h0942de78b6c02817K8r
2: 0x7f402773d7fc - panicking::on_panic::h3f23f9d0b5f4c91bu9w
$ export RUST_BACKTRACE=1
...
$ RUST_BACKTRACE=0 ./diverges
-thread '<main>' panicked at 'This function never returns!', hello.rs:2
+thread 'main' panicked at 'This function never returns!', hello.rs:2
note: Run with `RUST_BACKTRACE=1` for a backtrace.
```
```text
$ RUST_BACKTRACE=1 cargo run
Running `target/debug/diverges`
-thread '<main>' panicked at 'This function never returns!', hello.rs:2
+thread 'main' panicked at 'This function never returns!', hello.rs:2
stack backtrace:
1: 0x7f402773a829 - sys::backtrace::write::h0942de78b6c02817K8r
2: 0x7f402773d7fc - panicking::on_panic::h3f23f9d0b5f4c91bu9w
You win!
Please input your guess.
quit
-thread '<main>' panicked at 'Please type a number!'
+thread 'main' panicked at 'Please type a number!'
```
Ha! `quit` actually quits. As does any other non-number input. Well, this is
with this error:
```text
-thread '<main>' panicked at 'index 0 and/or 2 in `忠犬ハチ公` do not lie on
+thread 'main' panicked at 'index 0 and/or 2 in `忠犬ハチ公` do not lie on
character boundary'
```
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured
-thread '<main>' panicked at 'Some tests failed', /home/steve/src/rust/src/libtest/lib.rs:247
+thread 'main' panicked at 'Some tests failed', /home/steve/src/rust/src/libtest/lib.rs:247
```
Rust indicates that our test failed:
```
Much of the functionality that’s exposed in the standard library is also
-available via the [`core` crate](../core/). When we’re using the standard
-library, Rust automatically brings `std` into scope, allowing you to use
-its features without an explicit import. By the same token, when using
+available via the [`core` crate](../core/index.html). When we’re using the
+standard library, Rust automatically brings `std` into scope, allowing you to
+use its features without an explicit import. By the same token, when using
`#![no_std]`, Rust will bring `core` into scope for you, as well as [its
-prelude](../core/prelude/v1/). This means that a lot of code will Just Work:
+prelude](../core/prelude/v1/index.html). This means that a lot of code will Just
+Work:
```rust
#![no_std]
then the current thread will [panic] with a message like this:
```text
-thread '<main>' panicked at 'index out of bounds: the len is 3 but the index is 7'
+thread 'main' panicked at 'index out of bounds: the len is 3 but the index is 7'
```
If you want to handle out-of-bounds errors without panicking, you can use
[box]: ../std/boxed/index.html
[generic]: generics.html
[panic]: concurrency.html#panics
-[get]: http://doc.rust-lang.org/std/vec/struct.Vec.html#method.get
-[get_mut]: http://doc.rust-lang.org/std/vec/struct.Vec.html#method.get_mut
+[get]: ../std/vec/struct.Vec.html#method.get
+[get_mut]: ../std/vec/struct.Vec.html#method.get_mut
We will also be spending a lot of time talking about the different kinds
of safety and guarantees.
-[trpl]: ../book/
+[trpl]: ../book/index.html
--- /dev/null
+FROM ubuntu:xenial
+
+# curl
+# Download stage0, see src/bootstrap/bootstrap.py
+# g++
+# Compile LLVM binding in src/rustllvm
+# git
+# Get commit hash and commit date in version string
+# make
+# Run build scripts in mk
+# libedit-dev zlib1g-dev
+# LLVM dependencies as packaged in Ubuntu
+# (They are optional, but Ubuntu package enables them)
+# llvm-3.7-dev (installed by llvm-3.7-tools)
+# LLVM
+# llvm-3.7-tools
+# FileCheck is used to run tests in src/test/codegen
+
+RUN apt-get update && apt-get -y install \
+ curl g++ git make \
+ libedit-dev zlib1g-dev \
+ llvm-3.7-tools
+
+RUN mkdir /build
+WORKDIR /build
// * We don't overflow `usize::MAX` and actually allocate too little
//
// On 64-bit we just need to check for overflow since trying to allocate
-// `> isize::MAX` bytes will surely fail. On 32-bit we need to add an extra
-// guard for this in case we're running on a platform which can use all 4GB in
-// user-space. e.g. PAE or x32
+// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
+// an extra guard for this in case we're running on a platform which can use
+// all 4GB in user-space. e.g. PAE or x32
#[inline]
fn alloc_guard(alloc_size: usize) {
// Check if right-most child is underfull.
let mut last_edge = internal.last_edge();
let right_child_len = last_edge.reborrow().descend().len();
- if right_child_len < node::CAPACITY / 2 {
+ if right_child_len < node::MIN_LEN {
// We need to steal.
let mut last_kv = match last_edge.left_kv() {
Ok(left) => left,
Err(_) => unreachable!(),
};
- last_kv.bulk_steal_left(node::CAPACITY/2 - right_child_len);
+ last_kv.bulk_steal_left(node::MIN_LEN - right_child_len);
last_edge = last_kv.right_edge();
}
cur_node = last_edge.descend();
}
}
+
+ /// Splits the collection into two at the given key. Returns everything after the given key,
+ /// including the key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(btree_split_off)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "a");
+ /// a.insert(2, "b");
+ /// a.insert(3, "c");
+ /// a.insert(17, "d");
+ /// a.insert(41, "e");
+ ///
+ /// let b = a.split_off(&3);
+ ///
+ /// assert_eq!(a.len(), 2);
+ /// assert_eq!(b.len(), 3);
+ ///
+ /// assert_eq!(a[&1], "a");
+ /// assert_eq!(a[&2], "b");
+ ///
+ /// assert_eq!(b[&3], "c");
+ /// assert_eq!(b[&17], "d");
+ /// assert_eq!(b[&41], "e");
+ /// ```
+ #[unstable(feature = "btree_split_off",
+ reason = "recently added as part of collections reform 2",
+ issue = "19986")]
+ pub fn split_off<Q: ?Sized + Ord>(&mut self, key: &Q) -> Self where K: Borrow<Q> {
+ if self.is_empty() {
+ return Self::new();
+ }
+
+ let total_num = self.len();
+
+ let mut right = Self::new();
+ for _ in 0..(self.root.as_ref().height()) {
+ right.root.push_level();
+ }
+
+ {
+ let mut left_node = self.root.as_mut();
+ let mut right_node = right.root.as_mut();
+
+ loop {
+ let mut split_edge = match search::search_node(left_node, key) {
+ // key is going to the right tree
+ Found(handle) => handle.left_edge(),
+ GoDown(handle) => handle
+ };
+
+ split_edge.move_suffix(&mut right_node);
+
+ match (split_edge.force(), right_node.force()) {
+ (Internal(edge), Internal(node)) => {
+ left_node = edge.descend();
+ right_node = node.first_edge().descend();
+ }
+ (Leaf(_), Leaf(_)) => { break; },
+ _ => { unreachable!(); }
+ }
+ }
+ }
+
+ self.fix_right_border();
+ right.fix_left_border();
+
+ if self.root.as_ref().height() < right.root.as_ref().height() {
+ self.recalc_length();
+ right.length = total_num - self.len();
+ } else {
+ right.recalc_length();
+ self.length = total_num - right.len();
+ }
+
+ right
+ }
+
+ /// Calculates the number of elements if it is incorrect.
+ fn recalc_length(&mut self) {
+ fn dfs<K, V>(node: NodeRef<marker::Immut, K, V, marker::LeafOrInternal>) -> usize {
+ let mut res = node.len();
+
+ if let Internal(node) = node.force() {
+ let mut edge = node.first_edge();
+ loop {
+ res += dfs(edge.reborrow().descend());
+ match edge.right_kv() {
+ Ok(right_kv) => { edge = right_kv.right_edge(); },
+ Err(_) => { break; }
+ }
+ }
+ }
+
+ res
+ }
+
+ self.length = dfs(self.root.as_ref());
+ }
+
+ /// Removes empty levels on the top.
+ fn fix_top(&mut self) {
+ loop {
+ {
+ let node = self.root.as_ref();
+ if node.height() == 0 || node.len() > 0 {
+ break;
+ }
+ }
+ self.root.pop_level();
+ }
+ }
+
+ fn fix_right_border(&mut self) {
+ self.fix_top();
+
+ {
+ let mut cur_node = self.root.as_mut();
+
+ while let Internal(node) = cur_node.force() {
+ let mut last_kv = node.last_kv();
+
+ if last_kv.can_merge() {
+ cur_node = last_kv.merge().descend();
+ } else {
+ let right_len = last_kv.reborrow().right_edge().descend().len();
+ // `MINLEN + 1` to avoid readjust if merge happens on the next level.
+ if right_len < node::MIN_LEN + 1 {
+ last_kv.bulk_steal_left(node::MIN_LEN + 1 - right_len);
+ }
+ cur_node = last_kv.right_edge().descend();
+ }
+ }
+ }
+
+ self.fix_top();
+ }
+
+ /// The symmetric clone of `fix_right_border`.
+ fn fix_left_border(&mut self) {
+ self.fix_top();
+
+ {
+ let mut cur_node = self.root.as_mut();
+
+ while let Internal(node) = cur_node.force() {
+ let mut first_kv = node.first_kv();
+
+ if first_kv.can_merge() {
+ cur_node = first_kv.merge().descend();
+ } else {
+ let left_len = first_kv.reborrow().left_edge().descend().len();
+ if left_len < node::MIN_LEN + 1 {
+ first_kv.bulk_steal_right(node::MIN_LEN + 1 - left_len);
+ }
+ cur_node = first_kv.left_edge().descend();
+ }
+ }
+ }
+
+ self.fix_top();
+ }
}
impl<'a, K: 'a, V: 'a> IntoIterator for &'a BTreeMap<K, V> {
use boxed::Box;
const B: usize = 6;
+pub const MIN_LEN: usize = B - 1;
pub const CAPACITY: usize = 2 * B - 1;
/// The underlying representation of leaf nodes. Note that it is often unsafe to actually store
let len = self.len();
Handle::new_edge(self, len)
}
+
+ /// Note that `self` must be nonempty.
+ pub fn first_kv(self) -> Handle<Self, marker::KV> {
+ debug_assert!(self.len() > 0);
+ Handle::new_kv(self, 0)
+ }
+
+ /// Note that `self` must be nonempty.
+ pub fn last_kv(self) -> Handle<Self, marker::KV> {
+ let len = self.len();
+ debug_assert!(len > 0);
+ Handle::new_kv(self, len - 1)
+ }
}
impl<K, V> NodeRef<marker::Owned, K, V, marker::Leaf> {
}
}
+ fn correct_childrens_parent_links(&mut self, first: usize, after_last: usize) {
+ for i in first..after_last {
+ Handle::new_edge(unsafe { self.reborrow_mut() }, i).correct_parent_link();
+ }
+ }
+
+ fn correct_all_childrens_parent_links(&mut self) {
+ let len = self.len();
+ self.correct_childrens_parent_links(0, len + 1);
+ }
+
/// Adds a key/value pair and an edge to go to the left of that pair to
/// the beginning of the node.
pub fn push_front(&mut self, key: K, val: V, edge: Root<K, V>) {
self.as_leaf_mut().len += 1;
- for i in 0..self.len()+1 {
- Handle::new_edge(self.reborrow_mut(), i).correct_parent_link();
- }
+ self.correct_all_childrens_parent_links();
}
-
}
}
(key, val, edge)
}
}
+
+ fn into_kv_pointers_mut(mut self) -> (*mut K, *mut V) {
+ (
+ self.keys_mut().as_mut_ptr(),
+ self.vals_mut().as_mut_ptr()
+ )
+ }
}
impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
}
/// This does stealing similar to `steal_left` but steals multiple elements at once.
- pub fn bulk_steal_left(&mut self, n: usize) {
+ pub fn bulk_steal_left(&mut self, count: usize) {
unsafe {
- // Get raw pointers to left child's keys, values and edges.
- let (left_len, left_k, left_v, left_e) = {
- let mut left = self.reborrow_mut().left_edge().descend();
-
- (left.len(),
- left.keys_mut().as_mut_ptr(),
- left.vals_mut().as_mut_ptr(),
- match left.force() {
- ForceResult::Leaf(_) => None,
- ForceResult::Internal(mut i) => Some(i.as_internal_mut().edges.as_mut_ptr()),
- })
- };
-
- // Get raw pointers to right child's keys, values and edges.
- let (right_len, right_k, right_v, right_e) = {
- let mut right = self.reborrow_mut().right_edge().descend();
-
- (right.len(),
- right.keys_mut().as_mut_ptr(),
- right.vals_mut().as_mut_ptr(),
- match right.force() {
- ForceResult::Leaf(_) => None,
- ForceResult::Internal(mut i) => Some(i.as_internal_mut().edges.as_mut_ptr()),
- })
- };
-
- // Get raw pointers to parent's key and value.
- let (parent_k, parent_v) = {
- let kv = self.reborrow_mut().into_kv_mut();
- (kv.0 as *mut K, kv.1 as *mut V)
- };
+ let mut left_node = ptr::read(self).left_edge().descend();
+ let left_len = left_node.len();
+ let mut right_node = ptr::read(self).right_edge().descend();
+ let right_len = right_node.len();
// Make sure that we may steal safely.
- debug_assert!(right_len + n <= CAPACITY);
- debug_assert!(left_len >= n);
-
- // Make room for stolen elements in right child.
- ptr::copy(right_k,
- right_k.offset(n as isize),
- right_len);
- ptr::copy(right_v,
- right_v.offset(n as isize),
- right_len);
- if let Some(edges) = right_e {
- ptr::copy(edges,
- edges.offset(n as isize),
- right_len+1);
+ debug_assert!(right_len + count <= CAPACITY);
+ debug_assert!(left_len >= count);
+
+ let new_left_len = left_len - count;
+
+ // Move data.
+ {
+ let left_kv = left_node.reborrow_mut().into_kv_pointers_mut();
+ let right_kv = right_node.reborrow_mut().into_kv_pointers_mut();
+ let parent_kv = {
+ let kv = self.reborrow_mut().into_kv_mut();
+ (kv.0 as *mut K, kv.1 as *mut V)
+ };
+
+ // Make room for stolen elements in the right child.
+ ptr::copy(right_kv.0,
+ right_kv.0.offset(count as isize),
+ right_len);
+ ptr::copy(right_kv.1,
+ right_kv.1.offset(count as isize),
+ right_len);
+
+ // Move elements from the left child to the right one.
+ move_kv(left_kv, new_left_len + 1, right_kv, 0, count - 1);
+
+ // Move parent's key/value pair to the right child.
+ move_kv(parent_kv, 0, right_kv, count - 1, 1);
+
+ // Move the left-most stolen pair to the parent.
+ move_kv(left_kv, new_left_len, parent_kv, 0, 1);
}
- // Move elements from the left child to the right one.
- let left_ind = (left_len - n) as isize;
- ptr::copy_nonoverlapping(left_k.offset(left_ind + 1),
- right_k,
- n - 1);
- ptr::copy_nonoverlapping(left_v.offset(left_ind + 1),
- right_v,
- n - 1);
- match (left_e, right_e) {
- (Some(left), Some(right)) => {
- ptr::copy_nonoverlapping(left.offset(left_ind + 1),
- right,
- n);
+ left_node.reborrow_mut().as_leaf_mut().len -= count as u16;
+ right_node.reborrow_mut().as_leaf_mut().len += count as u16;
+
+ match (left_node.force(), right_node.force()) {
+ (ForceResult::Internal(left), ForceResult::Internal(mut right)) => {
+ // Make room for stolen edges.
+ let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr();
+ ptr::copy(right_edges,
+ right_edges.offset(count as isize),
+ right_len + 1);
+ right.correct_childrens_parent_links(count, count + right_len + 1);
+
+ move_edges(left, new_left_len + 1, right, 0, count);
},
- (Some(_), None) => unreachable!(),
- (None, Some(_)) => unreachable!(),
- (None, None) => {},
+ (ForceResult::Leaf(_), ForceResult::Leaf(_)) => { }
+ _ => { unreachable!(); }
}
+ }
+ }
- // Copy parent key/value pair to right child.
- ptr::copy_nonoverlapping(parent_k,
- right_k.offset(n as isize - 1),
- 1);
- ptr::copy_nonoverlapping(parent_v,
- right_v.offset(n as isize - 1),
- 1);
- // Copy left-most stolen pair to parent.
- ptr::copy_nonoverlapping(left_k.offset(left_ind),
- parent_k,
- 1);
- ptr::copy_nonoverlapping(left_v.offset(left_ind),
- parent_v,
- 1);
-
- // Fix lengths of left and right child and parent pointers in children of the right
- // child.
- self.reborrow_mut().left_edge().descend().as_leaf_mut().len -= n as u16;
- let mut right = self.reborrow_mut().right_edge().descend();
- right.as_leaf_mut().len += n as u16;
- if let ForceResult::Internal(mut node) = right.force() {
- for i in 0..(right_len+n+1) {
- Handle::new_edge(node.reborrow_mut(), i as usize).correct_parent_link();
- }
+ /// The symmetric clone of `bulk_steal_left`.
+ pub fn bulk_steal_right(&mut self, count: usize) {
+ unsafe {
+ let mut left_node = ptr::read(self).left_edge().descend();
+ let left_len = left_node.len();
+ let mut right_node = ptr::read(self).right_edge().descend();
+ let right_len = right_node.len();
+
+ // Make sure that we may steal safely.
+ debug_assert!(left_len + count <= CAPACITY);
+ debug_assert!(right_len >= count);
+
+ let new_right_len = right_len - count;
+
+ // Move data.
+ {
+ let left_kv = left_node.reborrow_mut().into_kv_pointers_mut();
+ let right_kv = right_node.reborrow_mut().into_kv_pointers_mut();
+ let parent_kv = {
+ let kv = self.reborrow_mut().into_kv_mut();
+ (kv.0 as *mut K, kv.1 as *mut V)
+ };
+
+ // Move parent's key/value pair to the left child.
+ move_kv(parent_kv, 0, left_kv, left_len, 1);
+
+ // Move elements from the right child to the left one.
+ move_kv(right_kv, 0, left_kv, left_len + 1, count - 1);
+
+ // Move the right-most stolen pair to the parent.
+ move_kv(right_kv, count - 1, parent_kv, 0, 1);
+
+ // Fix right indexing
+ ptr::copy(right_kv.0.offset(count as isize),
+ right_kv.0,
+ new_right_len);
+ ptr::copy(right_kv.1.offset(count as isize),
+ right_kv.1,
+ new_right_len);
+ }
+
+ left_node.reborrow_mut().as_leaf_mut().len += count as u16;
+ right_node.reborrow_mut().as_leaf_mut().len -= count as u16;
+
+ match (left_node.force(), right_node.force()) {
+ (ForceResult::Internal(left), ForceResult::Internal(mut right)) => {
+ move_edges(right.reborrow_mut(), 0, left, left_len + 1, count);
+
+ // Fix right indexing.
+ let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr();
+ ptr::copy(right_edges.offset(count as isize),
+ right_edges,
+ new_right_len + 1);
+ right.correct_childrens_parent_links(0, new_right_len + 1);
+ },
+ (ForceResult::Leaf(_), ForceResult::Leaf(_)) => { }
+ _ => { unreachable!(); }
}
}
}
}
+unsafe fn move_kv<K, V>(
+ source: (*mut K, *mut V), source_offset: usize,
+ dest: (*mut K, *mut V), dest_offset: usize,
+ count: usize)
+{
+ ptr::copy_nonoverlapping(source.0.offset(source_offset as isize),
+ dest.0.offset(dest_offset as isize),
+ count);
+ ptr::copy_nonoverlapping(source.1.offset(source_offset as isize),
+ dest.1.offset(dest_offset as isize),
+ count);
+}
+
+// Source and destination must have the same height.
+unsafe fn move_edges<K, V>(
+ mut source: NodeRef<marker::Mut, K, V, marker::Internal>, source_offset: usize,
+ mut dest: NodeRef<marker::Mut, K, V, marker::Internal>, dest_offset: usize,
+ count: usize)
+{
+ let source_ptr = source.as_internal_mut().edges.as_mut_ptr();
+ let dest_ptr = dest.as_internal_mut().edges.as_mut_ptr();
+ ptr::copy_nonoverlapping(source_ptr.offset(source_offset as isize),
+ dest_ptr.offset(dest_offset as isize),
+ count);
+ dest.correct_childrens_parent_links(dest_offset, dest_offset + count);
+}
+
impl<BorrowType, K, V, HandleType>
Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, HandleType> {
}
}
+impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
+ /// Move the suffix after `self` from one node to another one. `right` must be empty.
+ /// The first edge of `right` remains unchanged.
+ pub fn move_suffix(&mut self,
+ right: &mut NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>) {
+ unsafe {
+ let left_new_len = self.idx;
+ let mut left_node = self.reborrow_mut().into_node();
+
+ let right_new_len = left_node.len() - left_new_len;
+ let mut right_node = right.reborrow_mut();
+
+ debug_assert!(right_node.len() == 0);
+ debug_assert!(left_node.height == right_node.height);
+
+ let left_kv = left_node.reborrow_mut().into_kv_pointers_mut();
+ let right_kv = right_node.reborrow_mut().into_kv_pointers_mut();
+
+
+ move_kv(left_kv, left_new_len, right_kv, 0, right_new_len);
+
+ left_node.reborrow_mut().as_leaf_mut().len = left_new_len as u16;
+ right_node.reborrow_mut().as_leaf_mut().len = right_new_len as u16;
+
+ match (left_node.force(), right_node.force()) {
+ (ForceResult::Internal(left), ForceResult::Internal(right)) => {
+ move_edges(left, left_new_len + 1, right, 1, right_new_len);
+ },
+ (ForceResult::Leaf(_), ForceResult::Leaf(_)) => { }
+ _ => { unreachable!(); }
+ }
+ }
+ }
+}
+
pub enum ForceResult<Leaf, Internal> {
Leaf(Leaf),
Internal(Internal)
pub fn append(&mut self, other: &mut Self) {
self.map.append(&mut other.map);
}
+
+ /// Splits the collection into two at the given key. Returns everything after the given key,
+ /// including the key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(btree_split_off)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "a");
+ /// a.insert(2, "b");
+ /// a.insert(3, "c");
+ /// a.insert(17, "d");
+ /// a.insert(41, "e");
+ ///
+ /// let b = a.split_off(&3);
+ ///
+ /// assert_eq!(a.len(), 2);
+ /// assert_eq!(b.len(), 3);
+ ///
+ /// assert_eq!(a[&1], "a");
+ /// assert_eq!(a[&2], "b");
+ ///
+ /// assert_eq!(b[&3], "c");
+ /// assert_eq!(b[&17], "d");
+ /// assert_eq!(b[&41], "e");
+ /// ```
+ #[unstable(feature = "btree_split_off",
+ reason = "recently added as part of collections reform 2",
+ issue = "19986")]
+ pub fn split_off<Q: ?Sized + Ord>(&mut self, key: &Q) -> Self where T: Borrow<Q> {
+ BTreeSet { map: self.map.split_off(key) }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
}
}
-/// Deprecated, renamed to EncodeUtf16
-#[unstable(feature = "str_utf16", issue = "27714")]
-#[rustc_deprecated(since = "1.8.0", reason = "renamed to EncodeUtf16")]
-pub type Utf16Units<'a> = EncodeUtf16<'a>;
-
/// External iterator for a string's UTF-16 code units.
///
/// For use with the `std::iter` module.
core_str::StrExt::slice_mut_unchecked(self, begin, end)
}
- /// Given a byte position, returns the next `char` and its index.
- ///
- /// # Panics
- ///
- /// If `i` is greater than or equal to the length of the string.
- /// If `i` is not the index of the beginning of a valid UTF-8 sequence.
- ///
- /// # Examples
- ///
- /// This example manually iterates through the code points of a string;
- /// this should normally be
- /// done by `.chars()` or `.char_indices()`.
- ///
- /// ```
- /// #![feature(str_char)]
- /// #![allow(deprecated)]
- ///
- /// use std::str::CharRange;
- ///
- /// let s = "中华Việt Nam";
- /// let mut i = 0;
- /// while i < s.len() {
- /// let CharRange {ch, next} = s.char_range_at(i);
- /// println!("{}: {}", i, ch);
- /// i = next;
- /// }
- /// ```
- ///
- /// This outputs:
- ///
- /// ```text
- /// 0: 中
- /// 3: 华
- /// 6: V
- /// 7: i
- /// 8: e
- /// 9:
- /// 11:
- /// 13: t
- /// 14:
- /// 15: N
- /// 16: a
- /// 17: m
- /// ```
- #[unstable(feature = "str_char",
- reason = "often replaced by char_indices, this method may \
- be removed in favor of just char_at() or eventually \
- removed altogether",
- issue = "27754")]
- #[inline]
- #[rustc_deprecated(reason = "use slicing plus chars() plus len_utf8",
- since = "1.9.0")]
- #[allow(deprecated)]
- pub fn char_range_at(&self, start: usize) -> CharRange {
- core_str::StrExt::char_range_at(self, start)
- }
-
- /// Given a byte position, returns the previous `char` and its position.
- ///
- /// Note that Unicode has many features, such as combining marks, ligatures,
- /// and direction marks, that need to be taken into account to correctly reverse a string.
- ///
- /// Returns 0 for next index if called on start index 0.
- ///
- /// # Panics
- ///
- /// If `i` is greater than the length of the string.
- /// If `i` is not an index following a valid UTF-8 sequence.
- ///
- /// # Examples
- ///
- /// This example manually iterates through the code points of a string;
- /// this should normally be
- /// done by `.chars().rev()` or `.char_indices()`.
- ///
- /// ```
- /// #![feature(str_char)]
- /// #![allow(deprecated)]
- ///
- /// use std::str::CharRange;
- ///
- /// let s = "中华Việt Nam";
- /// let mut i = s.len();
- /// while i > 0 {
- /// let CharRange {ch, next} = s.char_range_at_reverse(i);
- /// println!("{}: {}", i, ch);
- /// i = next;
- /// }
- /// ```
- ///
- /// This outputs:
- ///
- /// ```text
- /// 18: m
- /// 17: a
- /// 16: N
- /// 15:
- /// 14: t
- /// 13:
- /// 11:
- /// 9: e
- /// 8: i
- /// 7: V
- /// 6: 华
- /// 3: 中
- /// ```
- #[unstable(feature = "str_char",
- reason = "often replaced by char_indices, this method may \
- be removed in favor of just char_at_reverse() or \
- eventually removed altogether",
- issue = "27754")]
- #[inline]
- #[rustc_deprecated(reason = "use slicing plus chars().rev() plus len_utf8",
- since = "1.9.0")]
- #[allow(deprecated)]
- pub fn char_range_at_reverse(&self, start: usize) -> CharRange {
- core_str::StrExt::char_range_at_reverse(self, start)
- }
-
- /// Given a byte position, returns the `char` at that position.
- ///
- /// # Panics
- ///
- /// If `i` is greater than or equal to the length of the string.
- /// If `i` is not the index of the beginning of a valid UTF-8 sequence.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(str_char)]
- /// #![allow(deprecated)]
- ///
- /// let s = "abπc";
- /// assert_eq!(s.char_at(1), 'b');
- /// assert_eq!(s.char_at(2), 'π');
- /// assert_eq!(s.char_at(4), 'c');
- /// ```
- #[unstable(feature = "str_char",
- reason = "frequently replaced by the chars() iterator, this \
- method may be removed or possibly renamed in the \
- future; it is normally replaced by chars/char_indices \
- iterators or by getting the first char from a \
- subslice",
- issue = "27754")]
- #[inline]
- #[allow(deprecated)]
- #[rustc_deprecated(reason = "use slicing plus chars()",
- since = "1.9.0")]
- pub fn char_at(&self, i: usize) -> char {
- core_str::StrExt::char_at(self, i)
- }
-
- /// Given a byte position, returns the `char` at that position, counting
- /// from the end.
- ///
- /// # Panics
- ///
- /// If `i` is greater than the length of the string.
- /// If `i` is not an index following a valid UTF-8 sequence.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(str_char)]
- /// #![allow(deprecated)]
- ///
- /// let s = "abπc";
- /// assert_eq!(s.char_at_reverse(1), 'a');
- /// assert_eq!(s.char_at_reverse(2), 'b');
- /// assert_eq!(s.char_at_reverse(3), 'π');
- /// ```
- #[unstable(feature = "str_char",
- reason = "see char_at for more details, but reverse semantics \
- are also somewhat unclear, especially with which \
- cases generate panics",
- issue = "27754")]
- #[inline]
- #[rustc_deprecated(reason = "use slicing plus chars().rev()",
- since = "1.9.0")]
- #[allow(deprecated)]
- pub fn char_at_reverse(&self, i: usize) -> char {
- core_str::StrExt::char_at_reverse(self, i)
- }
-
- /// Retrieves the first `char` from a `&str` and returns it.
- ///
- /// Note that a single Unicode character (grapheme cluster)
- /// can be composed of multiple `char`s.
- ///
- /// This does not allocate a new string; instead, it returns a slice that
- /// points one code point beyond the code point that was shifted.
- ///
- /// `None` is returned if the slice is empty.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(str_char)]
- /// #![allow(deprecated)]
- ///
- /// let s = "Łódź"; // \u{141}o\u{301}dz\u{301}
- /// let (c, s1) = s.slice_shift_char().unwrap();
- ///
- /// assert_eq!(c, 'Ł');
- /// assert_eq!(s1, "ódź");
- ///
- /// let (c, s2) = s1.slice_shift_char().unwrap();
- ///
- /// assert_eq!(c, 'o');
- /// assert_eq!(s2, "\u{301}dz\u{301}");
- /// ```
- #[unstable(feature = "str_char",
- reason = "awaiting conventions about shifting and slices and \
- may not be warranted with the existence of the chars \
- and/or char_indices iterators",
- issue = "27754")]
- #[inline]
- #[rustc_deprecated(reason = "use chars() plus Chars::as_str",
- since = "1.9.0")]
- #[allow(deprecated)]
- pub fn slice_shift_char(&self) -> Option<(char, &str)> {
- core_str::StrExt::slice_shift_char(self)
- }
-
/// Divide one string slice into two at an index.
///
/// The argument, `mid`, should be a byte offset from the start of the
core_str::StrExt::lines_any(self)
}
- /// Returns an iterator of `u16` over the string encoded as UTF-16.
- #[unstable(feature = "str_utf16",
- reason = "this functionality may only be provided by libunicode",
- issue = "27714")]
- #[rustc_deprecated(since = "1.8.0", reason = "renamed to encode_utf16")]
- #[allow(deprecated)]
- pub fn utf16_units(&self) -> Utf16Units {
- Utf16Units { encoder: Utf16Encoder::new(self[..].chars()) }
- }
-
/// Returns an iterator of `u16` over the string encoded as UTF-16.
#[stable(feature = "encode_utf16", since = "1.8.0")]
pub fn encode_utf16(&self) -> EncodeUtf16 {
use std::collections::btree_map::Entry::{Occupied, Vacant};
use std::rc::Rc;
+use std::iter::FromIterator;
+use super::DeterministicRng;
+
#[test]
fn test_basic_large() {
let mut map = BTreeMap::new();
create_append_test!(test_append_239, 239);
create_append_test!(test_append_1700, 1700);
+fn rand_data(len: usize) -> Vec<(u32, u32)> {
+ let mut rng = DeterministicRng::new();
+ Vec::from_iter(
+ (0..len).map(|_| (rng.next(), rng.next()))
+ )
+}
+
+#[test]
+fn test_split_off_empty_right() {
+ let mut data = rand_data(173);
+
+ let mut map = BTreeMap::from_iter(data.clone());
+ let right = map.split_off(&(data.iter().max().unwrap().0 + 1));
+
+ data.sort();
+ assert!(map.into_iter().eq(data));
+ assert!(right.into_iter().eq(None));
+}
+
+#[test]
+fn test_split_off_empty_left() {
+ let mut data = rand_data(314);
+
+ let mut map = BTreeMap::from_iter(data.clone());
+ let right = map.split_off(&data.iter().min().unwrap().0);
+
+ data.sort();
+ assert!(map.into_iter().eq(None));
+ assert!(right.into_iter().eq(data));
+}
+
+#[test]
+fn test_split_off_large_random_sorted() {
+ let mut data = rand_data(1529);
+ // special case with maximum height.
+ data.sort();
+
+ let mut map = BTreeMap::from_iter(data.clone());
+ let key = data[data.len() / 2].0;
+ let right = map.split_off(&key);
+
+ assert!(map.into_iter().eq(data.clone().into_iter().filter(|x| x.0 < key)));
+ assert!(right.into_iter().eq(data.into_iter().filter(|x| x.0 >= key)));
+}
+
mod bench {
use std::collections::BTreeMap;
use std::__rand::{Rng, thread_rng};
mod map;
mod set;
+
+/// XorShiftRng
+struct DeterministicRng {
+ x: u32,
+ y: u32,
+ z: u32,
+ w: u32,
+}
+
+impl DeterministicRng {
+ fn new() -> Self {
+ DeterministicRng {
+ x: 0x193a6754,
+ y: 0xa8a7d469,
+ z: 0x97830e05,
+ w: 0x113ba7bb
+ }
+ }
+
+ fn next(&mut self) -> u32 {
+ let x = self.x;
+ let t = x ^ (x << 11);
+ self.x = self.y;
+ self.y = self.z;
+ self.z = self.w;
+ let w_ = self.w;
+ self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8));
+ self.w
+ }
+}
use std::collections::BTreeSet;
+use std::iter::FromIterator;
+use super::DeterministicRng;
+
#[test]
fn test_clone_eq() {
let mut m = BTreeSet::new();
assert_eq!(a.contains(&4), true);
assert_eq!(a.contains(&5), true);
}
+
+fn rand_data(len: usize) -> Vec<u32> {
+ let mut rng = DeterministicRng::new();
+ Vec::from_iter(
+ (0..len).map(|_| rng.next())
+ )
+}
+
+#[test]
+fn test_split_off_empty_right() {
+ let mut data = rand_data(173);
+
+ let mut set = BTreeSet::from_iter(data.clone());
+ let right = set.split_off(&(data.iter().max().unwrap() + 1));
+
+ data.sort();
+ assert!(set.into_iter().eq(data));
+ assert!(right.into_iter().eq(None));
+}
+
+#[test]
+fn test_split_off_empty_left() {
+ let mut data = rand_data(314);
+
+ let mut set = BTreeSet::from_iter(data.clone());
+ let right = set.split_off(data.iter().min().unwrap());
+
+ data.sort();
+ assert!(set.into_iter().eq(None));
+ assert!(right.into_iter().eq(data));
+}
+
+#[test]
+fn test_split_off_large_random_sorted() {
+ let mut data = rand_data(1529);
+ // special case with maximum height.
+ data.sort();
+
+ let mut set = BTreeSet::from_iter(data.clone());
+ let key = data[data.len() / 2];
+ let right = set.split_off(&key);
+
+ assert!(set.into_iter().eq(data.clone().into_iter().filter(|x| *x < key)));
+ assert!(right.into_iter().eq(data.into_iter().filter(|x| *x >= key)));
+}
#![feature(binary_heap_append)]
#![feature(box_syntax)]
#![feature(btree_append)]
+#![feature(btree_split_off)]
#![feature(btree_range)]
#![feature(collections)]
#![feature(collections_bound)]
#![feature(pattern)]
#![feature(rand)]
#![feature(step_by)]
-#![feature(str_char)]
#![feature(str_escape)]
#![feature(test)]
#![feature(unboxed_closures)]
assert!(!" _ ".chars().all(|c| c.is_whitespace()));
}
-#[test]
-#[allow(deprecated)]
-fn test_slice_shift_char() {
- let data = "ประเทศไทย中";
- assert_eq!(data.slice_shift_char(), Some(('ป', "ระเทศไทย中")));
-}
-
-#[test]
-#[allow(deprecated)]
-fn test_slice_shift_char_2() {
- let empty = "";
- assert_eq!(empty.slice_shift_char(), None);
-}
-
#[test]
fn test_is_utf8() {
// deny overlong encodings
assert!(!"".contains('a'));
}
-#[test]
-#[allow(deprecated)]
-fn test_char_at() {
- let s = "ศไทย中华Việt Nam";
- let v = vec!['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
- let mut pos = 0;
- for ch in &v {
- assert!(s.char_at(pos) == *ch);
- pos += ch.to_string().len();
- }
-}
-
-#[test]
-#[allow(deprecated)]
-fn test_char_at_reverse() {
- let s = "ศไทย中华Việt Nam";
- let v = vec!['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
- let mut pos = s.len();
- for ch in v.iter().rev() {
- assert!(s.char_at_reverse(pos) == *ch);
- pos -= ch.to_string().len();
- }
-}
-
#[test]
fn test_split_at() {
let s = "ศไทย中华Việt Nam";
assert_eq!("22".cmp("1234"), Greater);
}
-#[test]
-#[allow(deprecated)]
-fn test_char_range_at() {
- let data = "b¢€𤭢𤭢€¢b";
- assert_eq!('b', data.char_range_at(0).ch);
- assert_eq!('¢', data.char_range_at(1).ch);
- assert_eq!('€', data.char_range_at(3).ch);
- assert_eq!('𤭢', data.char_range_at(6).ch);
- assert_eq!('𤭢', data.char_range_at(10).ch);
- assert_eq!('€', data.char_range_at(14).ch);
- assert_eq!('¢', data.char_range_at(17).ch);
- assert_eq!('b', data.char_range_at(19).ch);
-}
-
-#[test]
-#[allow(deprecated)]
-fn test_char_range_at_reverse_underflow() {
- assert_eq!("abc".char_range_at_reverse(0).next, 0);
-}
-
#[test]
fn test_iterator() {
let s = "ศไทย中华Việt Nam";
borrow: orig.borrow,
}
}
-
- /// Make a new `Ref` for an optional component of the borrowed data, e.g. an
- /// enum variant.
- ///
- /// The `RefCell` is already immutably borrowed, so this cannot fail.
- ///
- /// This is an associated function that needs to be used as
- /// `Ref::filter_map(...)`. A method would interfere with methods of the
- /// same name on the contents of a `RefCell` used through `Deref`.
- ///
- /// # Example
- ///
- /// ```
- /// # #![feature(cell_extras)]
- /// use std::cell::{RefCell, Ref};
- ///
- /// let c = RefCell::new(Ok(5));
- /// let b1: Ref<Result<u32, ()>> = c.borrow();
- /// let b2: Ref<u32> = Ref::filter_map(b1, |o| o.as_ref().ok()).unwrap();
- /// assert_eq!(*b2, 5)
- /// ```
- #[unstable(feature = "cell_extras", reason = "recently added",
- issue = "27746")]
- #[rustc_deprecated(since = "1.8.0", reason = "can be built on `Ref::map`: \
- https://crates.io/crates/ref_filter_map")]
- #[inline]
- pub fn filter_map<U: ?Sized, F>(orig: Ref<'b, T>, f: F) -> Option<Ref<'b, U>>
- where F: FnOnce(&T) -> Option<&U>
- {
- f(orig.value).map(move |new| Ref {
- value: new,
- borrow: orig.borrow,
- })
- }
}
#[unstable(feature = "coerce_unsized", issue = "27732")]
borrow: orig.borrow,
}
}
-
- /// Make a new `RefMut` for an optional component of the borrowed data, e.g.
- /// an enum variant.
- ///
- /// The `RefCell` is already mutably borrowed, so this cannot fail.
- ///
- /// This is an associated function that needs to be used as
- /// `RefMut::filter_map(...)`. A method would interfere with methods of the
- /// same name on the contents of a `RefCell` used through `Deref`.
- ///
- /// # Example
- ///
- /// ```
- /// # #![feature(cell_extras)]
- /// use std::cell::{RefCell, RefMut};
- ///
- /// let c = RefCell::new(Ok(5));
- /// {
- /// let b1: RefMut<Result<u32, ()>> = c.borrow_mut();
- /// let mut b2: RefMut<u32> = RefMut::filter_map(b1, |o| {
- /// o.as_mut().ok()
- /// }).unwrap();
- /// assert_eq!(*b2, 5);
- /// *b2 = 42;
- /// }
- /// assert_eq!(*c.borrow(), Ok(42));
- /// ```
- #[unstable(feature = "cell_extras", reason = "recently added",
- issue = "27746")]
- #[rustc_deprecated(since = "1.8.0", reason = "can be built on `RefMut::map`: \
- https://crates.io/crates/ref_filter_map")]
- #[inline]
- pub fn filter_map<U: ?Sized, F>(orig: RefMut<'b, T>, f: F) -> Option<RefMut<'b, U>>
- where F: FnOnce(&mut T) -> Option<&mut U>
- {
- let RefMut { value, borrow } = orig;
- f(value).map(move |new| RefMut {
- value: new,
- borrow: borrow,
- })
- }
}
struct BorrowRefMut<'b> {
Sub<Output=Self> + Copy {
fn from_u8(u: u8) -> Self;
fn to_u8(&self) -> u8;
+ fn to_u16(&self) -> u16;
fn to_u32(&self) -> u32;
fn to_u64(&self) -> u64;
}
($($t:ident)*) => ($(impl Int for $t {
fn from_u8(u: u8) -> $t { u as $t }
fn to_u8(&self) -> u8 { *self as u8 }
+ fn to_u16(&self) -> u16 { *self as u16 }
fn to_u32(&self) -> u32 { *self as u32 }
fn to_u64(&self) -> u64 { *self as u64 }
})*)
impl_Display!(i8, u8, i16, u16, i32, u32: to_u32);
impl_Display!(i64, u64: to_u64);
+#[cfg(target_pointer_width = "16")]
+impl_Display!(isize, usize: to_u16);
#[cfg(target_pointer_width = "32")]
impl_Display!(isize, usize: to_u32);
#[cfg(target_pointer_width = "64")]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn drop<T>(_x: T) { }
+macro_rules! repeat_u8_as_u16 {
+ ($name:expr) => { (($name as u16) << 8 |
+ ($name as u16)) }
+}
macro_rules! repeat_u8_as_u32 {
($name:expr) => { (($name as u32) << 24 |
($name as u32) << 16 |
pub const POST_DROP_U8: u8 = 0x1d;
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
+pub const POST_DROP_U16: u16 = repeat_u8_as_u16!(POST_DROP_U8);
+#[unstable(feature = "filling_drop", issue = "5016")]
+#[allow(missing_docs)]
pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8);
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8);
+#[cfg(target_pointer_width = "16")]
+#[unstable(feature = "filling_drop", issue = "5016")]
+#[allow(missing_docs)]
+pub const POST_DROP_USIZE: usize = POST_DROP_U16 as usize;
#[cfg(target_pointer_width = "32")]
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
#![stable(feature = "rust1", since = "1.0.0")]
+#[cfg(target_pointer_width = "16")]
+int_module! { isize, 16 }
#[cfg(target_pointer_width = "32")]
int_module! { isize, 32 }
#[cfg(target_pointer_width = "64")]
intrinsics::mul_with_overflow }
}
+#[cfg(target_pointer_width = "16")]
+#[lang = "isize"]
+impl isize {
+ int_impl! { i16, u16, 16,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
#[cfg(target_pointer_width = "32")]
#[lang = "isize"]
impl isize {
intrinsics::mul_with_overflow }
}
+#[cfg(target_pointer_width = "16")]
+#[lang = "usize"]
+impl usize {
+ uint_impl! { u16, 16,
+ intrinsics::ctpop,
+ intrinsics::ctlz,
+ intrinsics::cttz,
+ intrinsics::bswap,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
#[cfg(target_pointer_width = "32")]
#[lang = "usize"]
impl usize {
#![stable(feature = "rust1", since = "1.0.0")]
+#[cfg(target_pointer_width = "16")]
+uint_module! { usize, 16 }
#[cfg(target_pointer_width = "32")]
uint_module! { usize, 32 }
#[cfg(target_pointer_width = "64")]
mod shift_max {
#![allow(non_upper_case_globals)]
+ #[cfg(target_pointer_width = "16")]
+ mod platform {
+ pub const usize: u32 = super::u16;
+ pub const isize: u32 = super::i16;
+ }
+
#[cfg(target_pointer_width = "32")]
mod platform {
pub const usize: u32 = super::u32;
//!
//! Their definition should always match the ABI defined in `rustc::back::abi`.
-use clone::Clone;
-use marker::Copy;
-use mem;
-
-/// The representation of a slice like `&[T]`.
-///
-/// This struct is guaranteed to have the layout of types like `&[T]`,
-/// `&str`, and `Box<[T]>`, but is not the type of such slices
-/// (e.g. the fields are not directly accessible on a `&[T]`) nor does
-/// it control that layout (changing the definition will not change
-/// the layout of a `&[T]`). It is only designed to be used by unsafe
-/// code that needs to manipulate the low-level details.
-///
-/// However, it is not recommended to use this type for such code,
-/// since there are alternatives which may be safer:
-///
-/// - Creating a slice from a data pointer and length can be done with
-/// `std::slice::from_raw_parts` or `std::slice::from_raw_parts_mut`
-/// instead of `std::mem::transmute`ing a value of type `Slice`.
-/// - Extracting the data pointer and length from a slice can be
-/// performed with the `as_ptr` (or `as_mut_ptr`) and `len`
-/// methods.
-///
-/// If one does decide to convert a slice value to a `Slice`, the
-/// `Repr` trait in this module provides a method for a safe
-/// conversion from `&[T]` (and `&str`) to a `Slice`, more type-safe
-/// than a call to `transmute`.
-///
-/// # Examples
-///
-/// ```
-/// #![feature(raw)]
-///
-/// use std::raw::{self, Repr};
-///
-/// let slice: &[u16] = &[1, 2, 3, 4];
-///
-/// let repr: raw::Slice<u16> = slice.repr();
-/// println!("data pointer = {:?}, length = {}", repr.data, repr.len);
-/// ```
-#[repr(C)]
-#[allow(missing_debug_implementations)]
-#[rustc_deprecated(reason = "use raw accessors/constructors in `slice` module",
- since = "1.9.0")]
-#[unstable(feature = "raw", issue = "27751")]
-pub struct Slice<T> {
- pub data: *const T,
- pub len: usize,
-}
-
-#[allow(deprecated)]
-impl<T> Copy for Slice<T> {}
-#[allow(deprecated)]
-impl<T> Clone for Slice<T> {
- fn clone(&self) -> Slice<T> { *self }
-}
-
/// The representation of a trait object like `&SomeTrait`.
///
/// This struct has the same layout as types like `&SomeTrait` and
pub data: *mut (),
pub vtable: *mut (),
}
-
-/// This trait is meant to map equivalences between raw structs and their
-/// corresponding rust values.
-#[rustc_deprecated(reason = "use raw accessors/constructors in `slice` module",
- since = "1.9.0")]
-#[unstable(feature = "raw", issue = "27751")]
-pub unsafe trait Repr<T> {
- /// This function "unwraps" a rust value (without consuming it) into its raw
- /// struct representation. This can be used to read/write different values
- /// for the struct. This is a safe method because by default it does not
- /// enable write-access to the fields of the return value in safe code.
- #[inline]
- fn repr(&self) -> T { unsafe { mem::transmute_copy(&self) } }
-}
-
-#[allow(deprecated)]
-unsafe impl<T> Repr<Slice<T>> for [T] {}
-#[allow(deprecated)]
-unsafe impl Repr<Slice<u8>> for str {}
use self::pattern::Pattern;
use self::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher};
-use char::{self, CharExt};
+use char;
use clone::Clone;
use convert::AsRef;
use default::Default;
where P::Searcher: ReverseSearcher<'a>;
#[stable(feature = "is_char_boundary", since = "1.9.0")]
fn is_char_boundary(&self, index: usize) -> bool;
- #[unstable(feature = "str_char",
- reason = "often replaced by char_indices, this method may \
- be removed in favor of just char_at() or eventually \
- removed altogether",
- issue = "27754")]
- #[rustc_deprecated(reason = "use slicing plus chars() plus len_utf8",
- since = "1.9.0")]
- fn char_range_at(&self, start: usize) -> CharRange;
- #[unstable(feature = "str_char",
- reason = "often replaced by char_indices, this method may \
- be removed in favor of just char_at_reverse() or \
- eventually removed altogether",
- issue = "27754")]
- #[rustc_deprecated(reason = "use slicing plus chars().rev() plus len_utf8",
- since = "1.9.0")]
- fn char_range_at_reverse(&self, start: usize) -> CharRange;
- #[unstable(feature = "str_char",
- reason = "frequently replaced by the chars() iterator, this \
- method may be removed or possibly renamed in the \
- future; it is normally replaced by chars/char_indices \
- iterators or by getting the first char from a \
- subslice",
- issue = "27754")]
- #[rustc_deprecated(reason = "use slicing plus chars()",
- since = "1.9.0")]
- fn char_at(&self, i: usize) -> char;
- #[unstable(feature = "str_char",
- reason = "see char_at for more details, but reverse semantics \
- are also somewhat unclear, especially with which \
- cases generate panics",
- issue = "27754")]
- #[rustc_deprecated(reason = "use slicing plus chars().rev()",
- since = "1.9.0")]
- fn char_at_reverse(&self, i: usize) -> char;
#[stable(feature = "core", since = "1.6.0")]
fn as_bytes(&self) -> &[u8];
#[stable(feature = "core", since = "1.6.0")]
fn split_at(&self, mid: usize) -> (&str, &str);
#[stable(feature = "core", since = "1.6.0")]
fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str);
- #[unstable(feature = "str_char",
- reason = "awaiting conventions about shifting and slices and \
- may not be warranted with the existence of the chars \
- and/or char_indices iterators",
- issue = "27754")]
- #[rustc_deprecated(reason = "use chars() plus Chars::as_str",
- since = "1.9.0")]
- fn slice_shift_char(&self) -> Option<(char, &str)>;
#[stable(feature = "core", since = "1.6.0")]
fn as_ptr(&self) -> *const u8;
#[stable(feature = "core", since = "1.6.0")]
}
}
- #[inline]
- fn char_range_at(&self, i: usize) -> CharRange {
- let (c, n) = char_range_at_raw(self.as_bytes(), i);
- CharRange { ch: unsafe { char::from_u32_unchecked(c) }, next: n }
- }
-
- #[inline]
- fn char_range_at_reverse(&self, start: usize) -> CharRange {
- let mut prev = start;
-
- prev = prev.saturating_sub(1);
- if self.as_bytes()[prev] < 128 {
- return CharRange{ch: self.as_bytes()[prev] as char, next: prev}
- }
-
- // Multibyte case is a fn to allow char_range_at_reverse to inline cleanly
- fn multibyte_char_range_at_reverse(s: &str, mut i: usize) -> CharRange {
- // while there is a previous byte == 10......
- while i > 0 && s.as_bytes()[i] & !CONT_MASK == TAG_CONT_U8 {
- i -= 1;
- }
-
- let first= s.as_bytes()[i];
- let w = UTF8_CHAR_WIDTH[first as usize];
- assert!(w != 0);
-
- let mut val = utf8_first_byte(first, w as u32);
- val = utf8_acc_cont_byte(val, s.as_bytes()[i + 1]);
- if w > 2 { val = utf8_acc_cont_byte(val, s.as_bytes()[i + 2]); }
- if w > 3 { val = utf8_acc_cont_byte(val, s.as_bytes()[i + 3]); }
-
- CharRange {ch: unsafe { char::from_u32_unchecked(val) }, next: i}
- }
-
- multibyte_char_range_at_reverse(self, prev)
- }
-
- #[inline]
- #[allow(deprecated)]
- fn char_at(&self, i: usize) -> char {
- self.char_range_at(i).ch
- }
-
- #[inline]
- #[allow(deprecated)]
- fn char_at_reverse(&self, i: usize) -> char {
- self.char_range_at_reverse(i).ch
- }
-
#[inline]
fn as_bytes(&self) -> &[u8] {
unsafe { mem::transmute(self) }
}
}
- #[inline]
- #[allow(deprecated)]
- fn slice_shift_char(&self) -> Option<(char, &str)> {
- if self.is_empty() {
- None
- } else {
- let ch = self.char_at(0);
- let next_s = unsafe { self.slice_unchecked(ch.len_utf8(), self.len()) };
- Some((ch, next_s))
- }
- }
-
#[inline]
fn as_ptr(&self) -> *const u8 {
self as *const str as *const u8
}
}
-/// Pluck a code point out of a UTF-8-like byte slice and return the
-/// index of the next code point.
-#[inline]
-fn char_range_at_raw(bytes: &[u8], i: usize) -> (u32, usize) {
- if bytes[i] < 128 {
- return (bytes[i] as u32, i + 1);
- }
-
- // Multibyte case is a fn to allow char_range_at to inline cleanly
- fn multibyte_char_range_at(bytes: &[u8], i: usize) -> (u32, usize) {
- let first = bytes[i];
- let w = UTF8_CHAR_WIDTH[first as usize];
- assert!(w != 0);
-
- let mut val = utf8_first_byte(first, w as u32);
- val = utf8_acc_cont_byte(val, bytes[i + 1]);
- if w > 2 { val = utf8_acc_cont_byte(val, bytes[i + 2]); }
- if w > 3 { val = utf8_acc_cont_byte(val, bytes[i + 3]); }
-
- (val, i + w as usize)
- }
-
- multibyte_char_range_at(bytes, i)
-}
-
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Default for &'a str {
fn default() -> &'a str { "" }
assert_eq!(*d, 7);
}
-#[test]
-#[allow(deprecated)]
-fn ref_filter_map_accessor() {
- struct X(RefCell<Result<u32, ()>>);
- impl X {
- fn accessor(&self) -> Option<Ref<u32>> {
- Ref::filter_map(self.0.borrow(), |r| r.as_ref().ok())
- }
- }
- let x = X(RefCell::new(Ok(7)));
- let d: Ref<u32> = x.accessor().unwrap();
- assert_eq!(*d, 7);
-}
-
#[test]
fn ref_mut_map_accessor() {
struct X(RefCell<(u32, char)>);
assert_eq!(*x.0.borrow(), (8, 'z'));
}
-#[test]
-#[allow(deprecated)]
-fn ref_mut_filter_map_accessor() {
- struct X(RefCell<Result<u32, ()>>);
- impl X {
- fn accessor(&self) -> Option<RefMut<u32>> {
- RefMut::filter_map(self.0.borrow_mut(), |r| r.as_mut().ok())
- }
- }
- let x = X(RefCell::new(Ok(7)));
- {
- let mut d: RefMut<u32> = x.accessor().unwrap();
- assert_eq!(*d, 7);
- *d += 1;
- }
- assert_eq!(*x.0.borrow(), Ok(8));
-}
-
#[test]
fn as_unsafe_cell() {
let c1: Cell<usize> = Cell::new(0);
assert_eq!(size_of::<u64>(), 8);
}
+#[test]
+#[cfg(target_pointer_width = "16")]
+fn size_of_16() {
+ assert_eq!(size_of::<usize>(), 2);
+ assert_eq!(size_of::<*const usize>(), 2);
+}
+
#[test]
#[cfg(target_pointer_width = "32")]
fn size_of_32() {
assert_eq!(align_of::<u32>(), 4);
}
+#[test]
+#[cfg(target_pointer_width = "16")]
+fn align_of_16() {
+ assert_eq!(align_of::<usize>(), 2);
+ assert_eq!(align_of::<*const usize>(), 2);
+}
+
#[test]
#[cfg(target_pointer_width = "32")]
fn align_of_32() {
-Subproject commit b19b5465a1235be3323363cdc11838739b593029
+Subproject commit 45d85899e99d33e291b2bf3259881b46cc5365d7
use std::fmt::Debug;
-#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
+macro_rules! try_opt {
+ ($e:expr) => (
+ match $e {
+ Some(r) => r,
+ None => return None,
+ }
+ )
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub enum DepNode<D: Clone + Debug> {
// The `D` type is "how definitions are identified".
// During compilation, it is always `DefId`, but when serializing
// which would yield an overly conservative dep-graph.
TraitItems(D),
ReprHints(D),
- TraitSelect(D),
+ TraitSelect(D, Vec<D>),
}
impl<D: Clone + Debug> DepNode<D> {
TraitImpls(ref d) => op(d).map(TraitImpls),
TraitItems(ref d) => op(d).map(TraitItems),
ReprHints(ref d) => op(d).map(ReprHints),
- TraitSelect(ref d) => op(d).map(TraitSelect),
+ TraitSelect(ref d, ref type_ds) => {
+ let d = try_opt!(op(d));
+ let type_ds = try_opt!(type_ds.iter().map(|d| op(d)).collect());
+ Some(TraitSelect(d, type_ds))
+ }
}
}
}
self.indices.contains_key(&node)
}
- pub fn nodes(&self) -> Vec<DepNode<D>> {
+ pub fn nodes(&self) -> Vec<&DepNode<D>> {
self.graph.all_nodes()
.iter()
- .map(|n| n.data.clone())
+ .map(|n| &n.data)
.collect()
}
- pub fn edges(&self) -> Vec<(DepNode<D>,DepNode<D>)> {
+ pub fn edges(&self) -> Vec<(&DepNode<D>,&DepNode<D>)> {
self.graph.all_edges()
.iter()
.map(|edge| (edge.source(), edge.target()))
- .map(|(s, t)| (self.graph.node_data(s).clone(),
- self.graph.node_data(t).clone()))
+ .map(|(s, t)| (self.graph.node_data(s),
+ self.graph.node_data(t)))
.collect()
}
- fn reachable_nodes(&self, node: DepNode<D>, direction: Direction) -> Vec<DepNode<D>> {
- if let Some(&index) = self.indices.get(&node) {
+ fn reachable_nodes(&self, node: &DepNode<D>, direction: Direction) -> Vec<&DepNode<D>> {
+ if let Some(&index) = self.indices.get(node) {
self.graph.depth_traverse(index, direction)
- .map(|s| self.graph.node_data(s).clone())
+ .map(|s| self.graph.node_data(s))
.collect()
} else {
vec![]
/// All nodes reachable from `node`. In other words, things that
/// will have to be recomputed if `node` changes.
- pub fn transitive_successors(&self, node: DepNode<D>) -> Vec<DepNode<D>> {
+ pub fn transitive_successors(&self, node: &DepNode<D>) -> Vec<&DepNode<D>> {
self.reachable_nodes(node, OUTGOING)
}
/// All nodes that can reach `node`.
- pub fn transitive_predecessors(&self, node: DepNode<D>) -> Vec<DepNode<D>> {
+ pub fn transitive_predecessors(&self, node: &DepNode<D>) -> Vec<&DepNode<D>> {
self.reachable_nodes(node, INCOMING)
}
/// Just the outgoing edges from `node`.
- pub fn immediate_successors(&self, node: DepNode<D>) -> Vec<DepNode<D>> {
+ pub fn immediate_successors(&self, node: &DepNode<D>) -> Vec<&DepNode<D>> {
if let Some(&index) = self.indices.get(&node) {
self.graph.successor_nodes(index)
- .map(|s| self.graph.node_data(s).clone())
+ .map(|s| self.graph.node_data(s))
.collect()
} else {
vec![]
pub struct DepTask<'graph> {
data: &'graph DepGraphThreadData,
- key: DepNode<DefId>,
+ key: Option<DepNode<DefId>>,
}
impl<'graph> DepTask<'graph> {
pub fn new(data: &'graph DepGraphThreadData, key: DepNode<DefId>)
-> DepTask<'graph> {
- data.enqueue(DepMessage::PushTask(key));
- DepTask { data: data, key: key }
+ data.enqueue(DepMessage::PushTask(key.clone()));
+ DepTask { data: data, key: Some(key) }
}
}
impl<'graph> Drop for DepTask<'graph> {
fn drop(&mut self) {
- self.data.enqueue(DepMessage::PopTask(self.key));
+ self.data.enqueue(DepMessage::PopTask(self.key.take().unwrap()));
}
}
fn visit_item(&mut self, i: &'tcx hir::Item) {
let item_def_id = self.tcx.map.local_def_id(i.id);
let task_id = (self.dep_node_fn)(item_def_id);
- let _task = self.tcx.dep_graph.in_task(task_id);
+ let _task = self.tcx.dep_graph.in_task(task_id.clone());
debug!("Started task {:?}", task_id);
self.tcx.dep_graph.read(DepNode::Hir(item_def_id));
self.visitor.visit_item(i);
E0490, // a value of type `..` is borrowed for too long
E0491, // in type `..`, reference has a longer lifetime than the data it...
E0495, // cannot infer an appropriate lifetime due to conflicting requirements
- E0525, // expected a closure that implements `..` but this closure only implements `..`
+ E0525 // expected a closure that implements `..` but this closure only implements `..`
}
fn visit_generics(&mut self, g: &'v Generics) {
walk_generics(self, g)
}
+ fn visit_where_predicate(&mut self, predicate: &'v WherePredicate) {
+ walk_where_predicate(self, predicate)
+ }
fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, b: &'v Block, s: Span, _: NodeId) {
walk_fn(self, fk, fd, b, s)
}
walk_list!(visitor, visit_ty, ¶m.default);
}
walk_list!(visitor, visit_lifetime_def, &generics.lifetimes);
- for predicate in &generics.where_clause.predicates {
- match predicate {
- &WherePredicate::BoundPredicate(WhereBoundPredicate{ref bounded_ty,
- ref bounds,
- ref bound_lifetimes,
- ..}) => {
- visitor.visit_ty(bounded_ty);
- walk_list!(visitor, visit_ty_param_bound, bounds);
- walk_list!(visitor, visit_lifetime_def, bound_lifetimes);
- }
- &WherePredicate::RegionPredicate(WhereRegionPredicate{ref lifetime,
- ref bounds,
- ..}) => {
- visitor.visit_lifetime(lifetime);
- walk_list!(visitor, visit_lifetime, bounds);
- }
- &WherePredicate::EqPredicate(WhereEqPredicate{id,
- ref path,
- ref ty,
- ..}) => {
- visitor.visit_path(path, id);
- visitor.visit_ty(ty);
- }
+ walk_list!(visitor, visit_where_predicate, &generics.where_clause.predicates);
+}
+
+pub fn walk_where_predicate<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ predicate: &'v WherePredicate)
+{
+ match predicate {
+ &WherePredicate::BoundPredicate(WhereBoundPredicate{ref bounded_ty,
+ ref bounds,
+ ref bound_lifetimes,
+ ..}) => {
+ visitor.visit_ty(bounded_ty);
+ walk_list!(visitor, visit_ty_param_bound, bounds);
+ walk_list!(visitor, visit_lifetime_def, bound_lifetimes);
+ }
+ &WherePredicate::RegionPredicate(WhereRegionPredicate{ref lifetime,
+ ref bounds,
+ ..}) => {
+ visitor.visit_lifetime(lifetime);
+ walk_list!(visitor, visit_lifetime, bounds);
+ }
+ &WherePredicate::EqPredicate(WhereEqPredicate{id,
+ ref path,
+ ref ty,
+ ..}) => {
+ visitor.visit_path(path, id);
+ visitor.visit_ty(ty);
}
}
}
use hir;
use hir::print as pprust;
+use lint;
use hir::def::Def;
use hir::def_id::DefId;
use infer::{self, TypeOrigin};
let (fn_decl, generics) = rebuilder.rebuild();
self.give_expl_lifetime_param(err, &fn_decl, unsafety, constness, name, &generics, span);
}
+
+ pub fn issue_32330_warnings(&self, span: Span, issue32330s: &[ty::Issue32330]) {
+ for issue32330 in issue32330s {
+ match *issue32330 {
+ ty::Issue32330::WontChange => { }
+ ty::Issue32330::WillChange { fn_def_id, region_name } => {
+ self.tcx.sess.add_lint(
+ lint::builtin::HR_LIFETIME_IN_ASSOC_TYPE,
+ ast::CRATE_NODE_ID,
+ span,
+ format!("lifetime parameter `{0}` declared on fn `{1}` \
+ appears only in the return type, \
+ but here is required to be higher-ranked, \
+ which means that `{0}` must appear in both \
+ argument and return types",
+ region_name,
+ self.tcx.item_path_str(fn_def_id)));
+ }
+ }
+ }
+ }
}
struct RebuildPathInfo<'a> {
ty::BrAnon(i) => {
anon_nums.insert(i);
}
- ty::BrNamed(_, name) => {
+ ty::BrNamed(_, name, _) => {
region_names.insert(name);
}
_ => ()
for sr in self.same_regions {
for br in &sr.regions {
match *br {
- ty::BrNamed(_, name) => {
+ ty::BrNamed(_, name, _) => {
all_region_names.insert(name);
}
_ => ()
span: codemap::DUMMY_SP,
name: name }
}
+
//! Helper routines for higher-ranked things. See the `doc` module at
//! the end of the file for details.
-use super::{CombinedSnapshot, InferCtxt, HigherRankedType, SkolemizationMap};
+use super::{CombinedSnapshot,
+ InferCtxt,
+ LateBoundRegion,
+ HigherRankedType,
+ SubregionOrigin,
+ SkolemizationMap};
use super::combine::CombineFields;
+use super::region_inference::{TaintDirections};
use ty::{self, TyCtxt, Binder, TypeFoldable};
use ty::error::TypeError;
use syntax::codemap::Span;
use util::nodemap::{FnvHashMap, FnvHashSet};
+pub struct HrMatchResult<U> {
+ pub value: U,
+
+ /// Normally, when we do a higher-ranked match operation, we
+ /// expect all higher-ranked regions to be constrained as part of
+ /// the match operation. However, in the transition period for
+ /// #32330, it can happen that we sometimes have unconstrained
+ /// regions that get instantiated with fresh variables. In that
+ /// case, we collect the set of unconstrained bound regions here
+ /// and replace them with fresh variables.
+ pub unconstrained_regions: Vec<ty::BoundRegion>,
+}
+
impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> {
pub fn higher_ranked_sub<T>(&self, a: &Binder<T>, b: &Binder<T>)
-> RelateResult<'tcx, Binder<T>>
// Start a snapshot so we can examine "all bindings that were
// created as part of this type comparison".
return self.infcx.commit_if_ok(|snapshot| {
+ let span = self.trace.origin.span();
+
// First, we instantiate each bound region in the subtype with a fresh
// region variable.
let (a_prime, _) =
self.infcx.replace_late_bound_regions_with_fresh_var(
- self.trace.origin.span(),
+ span,
HigherRankedType,
a);
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
- self.infcx.leak_check(!self.a_is_expected, &skol_map, snapshot)?;
+ self.infcx.leak_check(!self.a_is_expected, span, &skol_map, snapshot)?;
+
+ // We are finished with the skolemized regions now so pop
+ // them off.
+ self.infcx.pop_skolemized(skol_map, snapshot);
debug!("higher_ranked_sub: OK result={:?}", result);
});
}
+ /// The value consists of a pair `(t, u)` where `t` is the
+ /// *matcher* and `u` is a *value*. The idea is to find a
+ /// substitution `S` such that `S(t) == b`, and then return
+ /// `S(u)`. In other words, find values for the late-bound regions
+ /// in `a` that can make `t == b` and then replace the LBR in `u`
+ /// with those values.
+ ///
+ /// This routine is (as of this writing) used in trait matching,
+ /// particularly projection.
+ ///
+ /// NB. It should not happen that there are LBR appearing in `U`
+ /// that do not appear in `T`. If that happens, those regions are
+ /// unconstrained, and this routine replaces them with `'static`.
+ pub fn higher_ranked_match<T, U>(&self,
+ span: Span,
+ a_pair: &Binder<(T, U)>,
+ b_match: &T)
+ -> RelateResult<'tcx, HrMatchResult<U>>
+ where T: Relate<'tcx>,
+ U: TypeFoldable<'tcx>
+ {
+ debug!("higher_ranked_match(a={:?}, b={:?})",
+ a_pair, b_match);
+
+ // Start a snapshot so we can examine "all bindings that were
+ // created as part of this type comparison".
+ return self.infcx.commit_if_ok(|snapshot| {
+ // First, we instantiate each bound region in the matcher
+ // with a skolemized region.
+ let ((a_match, a_value), skol_map) =
+ self.infcx.skolemize_late_bound_regions(a_pair, snapshot);
+
+ debug!("higher_ranked_match: a_match={:?}", a_match);
+ debug!("higher_ranked_match: skol_map={:?}", skol_map);
+
+ // Equate types now that bound regions have been replaced.
+ try!(self.equate().relate(&a_match, &b_match));
+
+ // Map each skolemized region to a vector of other regions that it
+ // must be equated with. (Note that this vector may include other
+ // skolemized regions from `skol_map`.)
+ let skol_resolution_map: FnvHashMap<_, _> =
+ skol_map
+ .iter()
+ .map(|(&br, &skol)| {
+ let tainted_regions =
+ self.infcx.tainted_regions(snapshot,
+ skol,
+ TaintDirections::incoming()); // [1]
+
+ // [1] this routine executes after the skolemized
+ // regions have been *equated* with something
+ // else, so examining the incoming edges ought to
+ // be enough to collect all constraints
+
+ (skol, (br, tainted_regions))
+ })
+ .collect();
+
+ // For each skolemized region, pick a representative -- which can
+ // be any region from the sets above, except for other members of
+ // `skol_map`. There should always be a representative if things
+ // are properly well-formed.
+ let mut unconstrained_regions = vec![];
+ let skol_representatives: FnvHashMap<_, _> =
+ skol_resolution_map
+ .iter()
+ .map(|(&skol, &(br, ref regions))| {
+ let representative =
+ regions.iter()
+ .filter(|r| !skol_resolution_map.contains_key(r))
+ .cloned()
+ .next()
+ .unwrap_or_else(|| { // [1]
+ unconstrained_regions.push(br);
+ self.infcx.next_region_var(
+ LateBoundRegion(span, br, HigherRankedType))
+ });
+
+ // [1] There should always be a representative,
+ // unless the higher-ranked region did not appear
+ // in the values being matched. We should reject
+ // as ill-formed cases that can lead to this, but
+ // right now we sometimes issue warnings (see
+ // #32330).
+
+ (skol, representative)
+ })
+ .collect();
+
+ // Equate all the members of each skolemization set with the
+ // representative.
+ for (skol, &(_br, ref regions)) in &skol_resolution_map {
+ let representative = &skol_representatives[skol];
+ debug!("higher_ranked_match: \
+ skol={:?} representative={:?} regions={:?}",
+ skol, representative, regions);
+ for region in regions.iter()
+ .filter(|&r| !skol_resolution_map.contains_key(r))
+ .filter(|&r| r != representative)
+ {
+ let origin = SubregionOrigin::Subtype(self.trace.clone());
+ self.infcx.region_vars.make_eqregion(origin,
+ *representative,
+ *region);
+ }
+ }
+
+ // Replace the skolemized regions appearing in value with
+ // their representatives
+ let a_value =
+ fold_regions_in(
+ self.tcx(),
+ &a_value,
+ |r, _| skol_representatives.get(&r).cloned().unwrap_or(r));
+
+ debug!("higher_ranked_match: value={:?}", a_value);
+
+ // We are now done with these skolemized variables.
+ self.infcx.pop_skolemized(skol_map, snapshot);
+
+ Ok(HrMatchResult {
+ value: a_value,
+ unconstrained_regions: unconstrained_regions,
+ })
+ });
+ }
+
pub fn higher_ranked_lub<T>(&self, a: &Binder<T>, b: &Binder<T>)
-> RelateResult<'tcx, Binder<T>>
where T: Relate<'tcx>
return r0;
}
- let tainted = infcx.tainted_regions(snapshot, r0);
+ let tainted = infcx.tainted_regions(snapshot, r0, TaintDirections::both());
// Variables created during LUB computation which are
// *related* to regions that pre-date the LUB computation
return r0;
}
- let tainted = infcx.tainted_regions(snapshot, r0);
+ let tainted = infcx.tainted_regions(snapshot, r0, TaintDirections::both());
let mut a_r = None;
let mut b_r = None;
}
impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
- fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec<ty::Region> {
- self.region_vars.tainted(&snapshot.region_vars_snapshot, r)
+ fn tainted_regions(&self,
+ snapshot: &CombinedSnapshot,
+ r: ty::Region,
+ directions: TaintDirections)
+ -> FnvHashSet<ty::Region> {
+ self.region_vars.tainted(&snapshot.region_vars_snapshot, r, directions)
}
fn region_vars_confined_to_snapshot(&self,
region_vars
}
+ /// Replace all regions bound by `binder` with skolemized regions and
+ /// return a map indicating which bound-region was replaced with what
+ /// skolemized region. This is the first step of checking subtyping
+ /// when higher-ranked things are involved.
+ ///
+ /// **Important:** you must call this function from within a snapshot.
+ /// Moreover, before committing the snapshot, you must eventually call
+ /// either `plug_leaks` or `pop_skolemized` to remove the skolemized
+ /// regions. If you rollback the snapshot (or are using a probe), then
+ /// the pop occurs as part of the rollback, so an explicit call is not
+ /// needed (but is also permitted).
+ ///
+ /// See `README.md` for more details.
pub fn skolemize_late_bound_regions<T>(&self,
binder: &ty::Binder<T>,
snapshot: &CombinedSnapshot)
-> (T, SkolemizationMap)
where T : TypeFoldable<'tcx>
{
- /*!
- * Replace all regions bound by `binder` with skolemized regions and
- * return a map indicating which bound-region was replaced with what
- * skolemized region. This is the first step of checking subtyping
- * when higher-ranked things are involved. See `README.md` for more
- * details.
- */
-
let (result, map) = self.tcx.replace_late_bound_regions(binder, |br| {
- self.region_vars.new_skolemized(br, &snapshot.region_vars_snapshot)
+ self.region_vars.push_skolemized(br, &snapshot.region_vars_snapshot)
});
debug!("skolemize_bound_regions(binder={:?}, result={:?}, map={:?})",
(result, map)
}
+ /// Searches the region constriants created since `snapshot` was started
+ /// and checks to determine whether any of the skolemized regions created
+ /// in `skol_map` would "escape" -- meaning that they are related to
+ /// other regions in some way. If so, the higher-ranked subtyping doesn't
+ /// hold. See `README.md` for more details.
pub fn leak_check(&self,
overly_polymorphic: bool,
+ span: Span,
skol_map: &SkolemizationMap,
snapshot: &CombinedSnapshot)
-> RelateResult<'tcx, ()>
{
- /*!
- * Searches the region constriants created since `snapshot` was started
- * and checks to determine whether any of the skolemized regions created
- * in `skol_map` would "escape" -- meaning that they are related to
- * other regions in some way. If so, the higher-ranked subtyping doesn't
- * hold. See `README.md` for more details.
- */
-
debug!("leak_check: skol_map={:?}",
skol_map);
+ // ## Issue #32330 warnings
+ //
+ // When Issue #32330 is fixed, a certain number of late-bound
+ // regions (LBR) will become early-bound. We wish to issue
+ // warnings when the result of `leak_check` relies on such LBR, as
+ // that means that compilation will likely start to fail.
+ //
+ // Recall that when we do a "HR subtype" check, we replace all
+ // late-bound regions (LBR) in the subtype with fresh variables,
+ // and skolemize the late-bound regions in the supertype. If those
+ // skolemized regions from the supertype wind up being
+ // super-regions (directly or indirectly) of either
+ //
+ // - another skolemized region; or,
+ // - some region that pre-exists the HR subtype check
+ // - e.g., a region variable that is not one of those created
+ // to represent bound regions in the subtype
+ //
+ // then leak-check (and hence the subtype check) fails.
+ //
+ // What will change when we fix #32330 is that some of the LBR in the
+ // subtype may become early-bound. In that case, they would no longer be in
+ // the "permitted set" of variables that can be related to a skolemized
+ // type.
+ //
+ // So the foundation for this warning is to collect variables that we found
+ // to be related to a skolemized type. For each of them, we have a
+ // `BoundRegion` which carries a `Issue32330` flag. We check whether any of
+ // those flags indicate that this variable was created from a lifetime
+ // that will change from late- to early-bound. If so, we issue a warning
+ // indicating that the results of compilation may change.
+ //
+ // This is imperfect, since there are other kinds of code that will not
+ // compile once #32330 is fixed. However, it fixes the errors observed in
+ // practice on crater runs.
+ let mut warnings = vec![];
+
let new_vars = self.region_vars_confined_to_snapshot(snapshot);
for (&skol_br, &skol) in skol_map {
- let tainted = self.tainted_regions(snapshot, skol);
- for &tainted_region in &tainted {
+ // The inputs to a skolemized variable can only
+ // be itself or other new variables.
+ let incoming_taints = self.tainted_regions(snapshot,
+ skol,
+ TaintDirections::both());
+ for &tainted_region in &incoming_taints {
// Each skolemized should only be relatable to itself
// or new variables:
match tainted_region {
ty::ReVar(vid) => {
- if new_vars.iter().any(|&x| x == vid) { continue; }
+ if new_vars.contains(&vid) {
+ warnings.extend(
+ match self.region_vars.var_origin(vid) {
+ LateBoundRegion(_,
+ ty::BrNamed(_, _, wc),
+ _) => Some(wc),
+ _ => None,
+ });
+ continue;
+ }
}
_ => {
if tainted_region == skol { continue; }
}
}
}
+
+ self.issue_32330_warnings(span, &warnings);
+
Ok(())
}
value: &T) -> T
where T : TypeFoldable<'tcx>
{
- debug_assert!(self.leak_check(false, &skol_map, snapshot).is_ok());
-
debug!("plug_leaks(skol_map={:?}, value={:?})",
skol_map,
value);
// these taint sets are mutually disjoint.
let inv_skol_map: FnvHashMap<ty::Region, ty::BoundRegion> =
skol_map
- .into_iter()
- .flat_map(|(skol_br, skol)| {
- self.tainted_regions(snapshot, skol)
+ .iter()
+ .flat_map(|(&skol_br, &skol)| {
+ self.tainted_regions(snapshot, skol, TaintDirections::both())
.into_iter()
.map(move |tainted_region| (tainted_region, skol_br))
})
// binders, so this assert is satisfied.
assert!(current_depth > 1);
+ // since leak-check passed, this skolemized region
+ // should only have incoming edges from variables
+ // (which ought not to escape the snapshot, but we
+ // don't check that) or itself
+ assert!(
+ match r {
+ ty::ReVar(_) => true,
+ ty::ReSkolemized(_, ref br1) => br == br1,
+ _ => false,
+ },
+ "leak-check would have us replace {:?} with {:?}",
+ r, br);
+
ty::ReLateBound(ty::DebruijnIndex::new(current_depth - 1), br.clone())
}
}
debug!("plug_leaks: result={:?}",
result);
+ self.pop_skolemized(skol_map, snapshot);
+
+ debug!("plug_leaks: result={:?}", result);
+
result
}
+
+ /// Pops the skolemized regions found in `skol_map` from the region
+ /// inference context. Whenever you create skolemized regions via
+ /// `skolemize_late_bound_regions`, they must be popped before you
+ /// commit the enclosing snapshot (if you do not commit, e.g. within a
+ /// probe or as a result of an error, then this is not necessary, as
+ /// popping happens as part of the rollback).
+ ///
+ /// Note: popping also occurs implicitly as part of `leak_check`.
+ pub fn pop_skolemized(&self,
+ skol_map: SkolemizationMap,
+ snapshot: &CombinedSnapshot)
+ {
+ debug!("pop_skolemized({:?})", skol_map);
+ let skol_regions: FnvHashSet<_> = skol_map.values().cloned().collect();
+ self.region_vars.pop_skolemized(&skol_regions, &snapshot.region_vars_snapshot);
+ }
}
use util::nodemap::{FnvHashMap, FnvHashSet, NodeMap};
use self::combine::CombineFields;
+use self::higher_ranked::HrMatchResult;
use self::region_inference::{RegionVarBindings, RegionSnapshot};
use self::unify_key::ToType;
pub mod type_variable;
pub mod unify_key;
+#[must_use]
pub struct InferOk<'tcx, T> {
pub value: T,
pub obligations: PredicateObligations<'tcx>,
pub tables: InferTables<'a, 'gcx, 'tcx>,
+ // Cache for projections. This cache is snapshotted along with the
+ // infcx.
+ //
+ // Public so that `traits::project` can use it.
+ pub projection_cache: RefCell<traits::ProjectionCache<'tcx>>,
+
// We instantiate UnificationTable with bounds<Ty> because the
// types that might instantiate a general type variable have an
// order, represented by its upper and lower bounds.
parameter_environment: param_env,
selection_cache: traits::SelectionCache::new(),
evaluation_cache: traits::EvaluationCache::new(),
+ projection_cache: RefCell::new(traits::ProjectionCache::new()),
reported_trait_errors: RefCell::new(FnvHashSet()),
normalize: false,
projection_mode: ProjectionMode::AnyFinal,
global_tcx.enter_local(arenas, |tcx| f(InferCtxt {
tcx: tcx,
tables: tables,
+ projection_cache: RefCell::new(traits::ProjectionCache::new()),
type_variables: RefCell::new(type_variable::TypeVariableTable::new()),
int_unification_table: RefCell::new(UnificationTable::new()),
float_unification_table: RefCell::new(UnificationTable::new()),
}
impl<'tcx, T> InferOk<'tcx, T> {
- fn unit(self) -> InferOk<'tcx, ()> {
+ pub fn unit(self) -> InferOk<'tcx, ()> {
InferOk { value: (), obligations: self.obligations }
}
}
#[must_use = "once you start a snapshot, you should always consume it"]
pub struct CombinedSnapshot {
+ projection_cache_snapshot: traits::ProjectionCacheSnapshot,
type_snapshot: type_variable::Snapshot,
int_snapshot: unify::Snapshot<ty::IntVid>,
float_snapshot: unify::Snapshot<ty::FloatVid>,
-> T::Lifted
where T: TypeFoldable<'tcx> + ty::Lift<'gcx>
{
+ debug!("drain_fulfillment_cx_or_panic()");
+
let when = "resolving bounds after type-checking";
let v = match self.drain_fulfillment_cx(fulfill_cx, result) {
Ok(v) => v,
}
fn start_snapshot(&self) -> CombinedSnapshot {
+ debug!("start_snapshot()");
+
let obligations_in_snapshot = self.obligations_in_snapshot.get();
self.obligations_in_snapshot.set(false);
CombinedSnapshot {
+ projection_cache_snapshot: self.projection_cache.borrow_mut().snapshot(),
type_snapshot: self.type_variables.borrow_mut().snapshot(),
int_snapshot: self.int_unification_table.borrow_mut().snapshot(),
float_snapshot: self.float_unification_table.borrow_mut().snapshot(),
fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot) {
debug!("rollback_to(cause={})", cause);
- let CombinedSnapshot { type_snapshot,
+ let CombinedSnapshot { projection_cache_snapshot,
+ type_snapshot,
int_snapshot,
float_snapshot,
region_vars_snapshot,
assert!(!self.obligations_in_snapshot.get());
self.obligations_in_snapshot.set(obligations_in_snapshot);
+ self.projection_cache
+ .borrow_mut()
+ .rollback_to(projection_cache_snapshot);
self.type_variables
.borrow_mut()
.rollback_to(type_snapshot);
}
fn commit_from(&self, snapshot: CombinedSnapshot) {
- debug!("commit_from!");
- let CombinedSnapshot { type_snapshot,
+ debug!("commit_from()");
+ let CombinedSnapshot { projection_cache_snapshot,
+ type_snapshot,
int_snapshot,
float_snapshot,
region_vars_snapshot,
self.obligations_in_snapshot.set(obligations_in_snapshot);
+ self.projection_cache
+ .borrow_mut()
+ .commit(projection_cache_snapshot);
self.type_variables
.borrow_mut()
.commit(type_snapshot);
F: FnOnce() -> Result<T, E>
{
debug!("commit_regions_if_ok()");
- let CombinedSnapshot { type_snapshot,
+ let CombinedSnapshot { projection_cache_snapshot,
+ type_snapshot,
int_snapshot,
float_snapshot,
region_vars_snapshot,
// Roll back any non-region bindings - they should be resolved
// inside `f`, with, e.g. `resolve_type_vars_if_possible`.
+ self.projection_cache
+ .borrow_mut()
+ .rollback_to(projection_cache_snapshot);
self.type_variables
.borrow_mut()
.rollback_to(type_snapshot);
self.skolemize_late_bound_regions(predicate, snapshot);
let origin = TypeOrigin::EquatePredicate(span);
let eqty_ok = self.eq_types(false, origin, a, b)?;
- self.leak_check(false, &skol_map, snapshot).map(|_| eqty_ok.unit())
+ self.leak_check(false, span, &skol_map, snapshot)?;
+ self.pop_skolemized(skol_map, snapshot);
+ Ok(eqty_ok.unit())
})
}
self.skolemize_late_bound_regions(predicate, snapshot);
let origin = RelateRegionParamBound(span);
self.sub_regions(origin, r_b, r_a); // `b : a` ==> `a <= b`
- self.leak_check(false, &skol_map, snapshot)
+ self.leak_check(false, span, &skol_map, snapshot)?;
+ Ok(self.pop_skolemized(skol_map, snapshot))
})
}
|br| self.next_region_var(LateBoundRegion(span, br, lbrct)))
}
+ /// Given a higher-ranked projection predicate like:
+ ///
+ /// for<'a> <T as Fn<&'a u32>>::Output = &'a u32
+ ///
+ /// and a target trait-ref like:
+ ///
+ /// <T as Fn<&'x u32>>
+ ///
+ /// find a substitution `S` for the higher-ranked regions (here,
+ /// `['a => 'x]`) such that the predicate matches the trait-ref,
+ /// and then return the value (here, `&'a u32`) but with the
+ /// substitution applied (hence, `&'x u32`).
+ ///
+ /// See `higher_ranked_match` in `higher_ranked/mod.rs` for more
+ /// details.
+ pub fn match_poly_projection_predicate(&self,
+ origin: TypeOrigin,
+ match_a: ty::PolyProjectionPredicate<'tcx>,
+ match_b: ty::TraitRef<'tcx>)
+ -> InferResult<'tcx, HrMatchResult<Ty<'tcx>>>
+ {
+ let span = origin.span();
+ let match_trait_ref = match_a.skip_binder().projection_ty.trait_ref;
+ let trace = TypeTrace {
+ origin: origin,
+ values: TraitRefs(ExpectedFound::new(true, match_trait_ref, match_b))
+ };
+
+ let match_pair = match_a.map_bound(|p| (p.projection_ty.trait_ref, p.ty));
+ let combine = self.combine_fields(true, trace);
+ let result = combine.higher_ranked_match(span, &match_pair, &match_b)?;
+ Ok(InferOk { value: result, obligations: combine.obligations })
+ }
+
/// See `verify_generic_bound` method in `region_inference`
pub fn verify_generic_bound(&self,
origin: SubregionOrigin<'tcx>,
match *c {
Constraint::ConstrainVarSubVar(rv_1, rv_2) =>
(Node::RegionVid(rv_1), Node::RegionVid(rv_2)),
- Constraint::ConstrainRegSubVar(r_1, rv_2) => (Node::Region(r_1), Node::RegionVid(rv_2)),
- Constraint::ConstrainVarSubReg(rv_1, r_2) => (Node::RegionVid(rv_1), Node::Region(r_2)),
+ Constraint::ConstrainRegSubVar(r_1, rv_2) =>
+ (Node::Region(r_1), Node::RegionVid(rv_2)),
+ Constraint::ConstrainVarSubReg(rv_1, r_2) =>
+ (Node::RegionVid(rv_1), Node::Region(r_2)),
+ Constraint::ConstrainRegSubReg(r_1, r_2) =>
+ (Node::Region(r_1), Node::Region(r_2)),
}
}
//! See README.md
pub use self::Constraint::*;
-pub use self::Verify::*;
pub use self::UndoLogEntry::*;
pub use self::CombineMapType::*;
pub use self::RegionResolutionError::*;
use super::{RegionVariableOrigin, SubregionOrigin, MiscVariable};
use super::unify_key;
+use rustc_data_structures::fnv::{FnvHashMap, FnvHashSet};
use rustc_data_structures::graph::{self, Direction, NodeIndex, OUTGOING};
use rustc_data_structures::unify::{self, UnificationTable};
use middle::free_region::FreeRegionMap;
use ty::{BoundRegion, Region, RegionVid};
use ty::{ReEmpty, ReStatic, ReFree, ReEarlyBound};
use ty::{ReLateBound, ReScope, ReVar, ReSkolemized, BrFresh};
-use util::common::indenter;
-use util::nodemap::{FnvHashMap, FnvHashSet};
use std::cell::{Cell, RefCell};
use std::cmp::Ordering::{self, Less, Greater, Equal};
use std::fmt;
+use std::mem;
use std::u32;
use syntax::ast;
// Concrete region is subregion of region variable
ConstrainRegSubVar(Region, RegionVid),
- // Region variable is subregion of concrete region
- //
- // FIXME(#29436) -- should be remove in favor of a Verify
+ // Region variable is subregion of concrete region. This does not
+ // directly affect inference, but instead is checked after
+ // inference is complete.
ConstrainVarSubReg(RegionVid, Region),
+
+ // A constraint where neither side is a variable. This does not
+ // directly affect inference, but instead is checked after
+ // inference is complete.
+ ConstrainRegSubReg(Region, Region),
}
-// Something we have to verify after region inference is done, but
-// which does not directly influence the inference process
-pub enum Verify<'tcx> {
- // VerifyRegSubReg(a, b): Verify that `a <= b`. Neither `a` nor
- // `b` are inference variables.
- VerifyRegSubReg(SubregionOrigin<'tcx>, Region, Region),
-
- // VerifyGenericBound(T, _, R, RS): The parameter type `T` (or
- // associated type) must outlive the region `R`. `T` is known to
- // outlive `RS`. Therefore verify that `R <= RS[i]` for some
- // `i`. Inference variables may be involved (but this verification
- // step doesn't influence inference).
- VerifyGenericBound(GenericKind<'tcx>, SubregionOrigin<'tcx>, Region, VerifyBound),
+// VerifyGenericBound(T, _, R, RS): The parameter type `T` (or
+// associated type) must outlive the region `R`. `T` is known to
+// outlive `RS`. Therefore verify that `R <= RS[i]` for some
+// `i`. Inference variables may be involved (but this verification
+// step doesn't influence inference).
+#[derive(Debug)]
+pub struct Verify<'tcx> {
+ kind: GenericKind<'tcx>,
+ origin: SubregionOrigin<'tcx>,
+ region: Region,
+ bound: VerifyBound,
}
#[derive(Copy, Clone, PartialEq, Eq)]
#[derive(Copy, Clone, PartialEq)]
pub enum UndoLogEntry {
+ /// Pushed when we start a snapshot.
OpenSnapshot,
+
+ /// Replaces an `OpenSnapshot` when a snapshot is committed, but
+ /// that snapshot is not the root. If the root snapshot is
+ /// unrolled, all nested snapshots must be committed.
CommitedSnapshot,
+
+ /// We added `RegionVid`
AddVar(RegionVid),
+
+ /// We added the given `constraint`
AddConstraint(Constraint),
+
+ /// We added the given `verify`
AddVerify(usize),
+
+ /// We added the given `given`
AddGiven(ty::FreeRegion, ty::RegionVid),
+
+ /// We added a GLB/LUB "combinaton variable"
AddCombination(CombineMapType, TwoRegions),
+
+ /// During skolemization, we sometimes purge entries from the undo
+ /// log in a kind of minisnapshot (unlike other snapshots, this
+ /// purging actually takes place *on success*). In that case, we
+ /// replace the corresponding entry with `Noop` so as to avoid the
+ /// need to do a bunch of swapping. (We can't use `swap_remove` as
+ /// the order of the vector is important.)
+ Purged,
}
#[derive(Copy, Clone, PartialEq)]
skolemization_count: u32,
}
+/// When working with skolemized regions, we often wish to find all of
+/// the regions that are either reachable from a skolemized region, or
+/// which can reach a skolemized region, or both. We call such regions
+/// *tained* regions. This struct allows you to decide what set of
+/// tainted regions you want.
+#[derive(Debug)]
+pub struct TaintDirections {
+ incoming: bool,
+ outgoing: bool,
+}
+
+impl TaintDirections {
+ pub fn incoming() -> Self {
+ TaintDirections { incoming: true, outgoing: false }
+ }
+
+ pub fn outgoing() -> Self {
+ TaintDirections { incoming: false, outgoing: true }
+ }
+
+ pub fn both() -> Self {
+ TaintDirections { incoming: true, outgoing: true }
+ }
+}
+
+struct TaintSet {
+ directions: TaintDirections,
+ regions: FnvHashSet<ty::Region>
+}
+
+impl TaintSet {
+ fn new(directions: TaintDirections,
+ initial_region: ty::Region)
+ -> Self {
+ let mut regions = FnvHashSet();
+ regions.insert(initial_region);
+ TaintSet { directions: directions, regions: regions }
+ }
+
+ fn fixed_point(&mut self,
+ undo_log: &[UndoLogEntry],
+ verifys: &[Verify]) {
+ let mut prev_len = 0;
+ while prev_len < self.len() {
+ debug!("tainted: prev_len = {:?} new_len = {:?}",
+ prev_len, self.len());
+
+ prev_len = self.len();
+
+ for undo_entry in undo_log {
+ match undo_entry {
+ &AddConstraint(ConstrainVarSubVar(a, b)) => {
+ self.add_edge(ReVar(a), ReVar(b));
+ }
+ &AddConstraint(ConstrainRegSubVar(a, b)) => {
+ self.add_edge(a, ReVar(b));
+ }
+ &AddConstraint(ConstrainVarSubReg(a, b)) => {
+ self.add_edge(ReVar(a), b);
+ }
+ &AddConstraint(ConstrainRegSubReg(a, b)) => {
+ self.add_edge(a, b);
+ }
+ &AddGiven(a, b) => {
+ self.add_edge(ReFree(a), ReVar(b));
+ }
+ &AddVerify(i) => {
+ verifys[i].bound.for_each_region(&mut |b| {
+ self.add_edge(verifys[i].region, b);
+ });
+ }
+ &Purged |
+ &AddCombination(..) |
+ &AddVar(..) |
+ &OpenSnapshot |
+ &CommitedSnapshot => {}
+ }
+ }
+ }
+ }
+
+ fn into_set(self) -> FnvHashSet<ty::Region> {
+ self.regions
+ }
+
+ fn len(&self) -> usize {
+ self.regions.len()
+ }
+
+ fn add_edge(&mut self,
+ source: ty::Region,
+ target: ty::Region) {
+ if self.directions.incoming {
+ if self.regions.contains(&target) {
+ self.regions.insert(source);
+ }
+ }
+
+ if self.directions.outgoing {
+ if self.regions.contains(&source) {
+ self.regions.insert(target);
+ }
+ }
+ }
+}
+
impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> RegionVarBindings<'a, 'gcx, 'tcx> {
RegionVarBindings {
debug!("RegionVarBindings: commit({})", snapshot.length);
assert!(self.undo_log.borrow().len() > snapshot.length);
assert!((*self.undo_log.borrow())[snapshot.length] == OpenSnapshot);
+ assert!(self.skolemization_count.get() == snapshot.skolemization_count,
+ "failed to pop skolemized regions: {} now vs {} at start",
+ self.skolemization_count.get(),
+ snapshot.skolemization_count);
let mut undo_log = self.undo_log.borrow_mut();
if snapshot.length == 0 {
} else {
(*undo_log)[snapshot.length] = CommitedSnapshot;
}
- self.skolemization_count.set(snapshot.skolemization_count);
self.unification_table.borrow_mut().commit(snapshot.region_snapshot);
}
assert!(undo_log.len() > snapshot.length);
assert!((*undo_log)[snapshot.length] == OpenSnapshot);
while undo_log.len() > snapshot.length + 1 {
- match undo_log.pop().unwrap() {
- OpenSnapshot => {
- bug!("Failure to observe stack discipline");
- }
- CommitedSnapshot => {}
- AddVar(vid) => {
- let mut var_origins = self.var_origins.borrow_mut();
- var_origins.pop().unwrap();
- assert_eq!(var_origins.len(), vid.index as usize);
- }
- AddConstraint(ref constraint) => {
- self.constraints.borrow_mut().remove(constraint);
- }
- AddVerify(index) => {
- self.verifys.borrow_mut().pop();
- assert_eq!(self.verifys.borrow().len(), index);
- }
- AddGiven(sub, sup) => {
- self.givens.borrow_mut().remove(&(sub, sup));
- }
- AddCombination(Glb, ref regions) => {
- self.glbs.borrow_mut().remove(regions);
- }
- AddCombination(Lub, ref regions) => {
- self.lubs.borrow_mut().remove(regions);
- }
- }
+ self.rollback_undo_entry(undo_log.pop().unwrap());
}
let c = undo_log.pop().unwrap();
assert!(c == OpenSnapshot);
.rollback_to(snapshot.region_snapshot);
}
+ pub fn rollback_undo_entry(&self, undo_entry: UndoLogEntry) {
+ match undo_entry {
+ OpenSnapshot => {
+ panic!("Failure to observe stack discipline");
+ }
+ Purged | CommitedSnapshot => {
+ // nothing to do here
+ }
+ AddVar(vid) => {
+ let mut var_origins = self.var_origins.borrow_mut();
+ var_origins.pop().unwrap();
+ assert_eq!(var_origins.len(), vid.index as usize);
+ }
+ AddConstraint(ref constraint) => {
+ self.constraints.borrow_mut().remove(constraint);
+ }
+ AddVerify(index) => {
+ self.verifys.borrow_mut().pop();
+ assert_eq!(self.verifys.borrow().len(), index);
+ }
+ AddGiven(sub, sup) => {
+ self.givens.borrow_mut().remove(&(sub, sup));
+ }
+ AddCombination(Glb, ref regions) => {
+ self.glbs.borrow_mut().remove(regions);
+ }
+ AddCombination(Lub, ref regions) => {
+ self.lubs.borrow_mut().remove(regions);
+ }
+ }
+ }
+
pub fn num_vars(&self) -> u32 {
let len = self.var_origins.borrow().len();
// enforce no overflow
return vid;
}
+ pub fn var_origin(&self, vid: RegionVid) -> RegionVariableOrigin {
+ self.var_origins.borrow()[vid.index as usize].clone()
+ }
+
/// Creates a new skolemized region. Skolemized regions are fresh
/// regions used when performing higher-ranked computations. They
/// must be used in a very particular way and are never supposed
/// to "escape" out into error messages or the code at large.
///
/// The idea is to always create a snapshot. Skolemized regions
- /// can be created in the context of this snapshot, but once the
- /// snapshot is committed or rolled back, their numbers will be
- /// recycled, so you must be finished with them. See the extensive
- /// comments in `higher_ranked.rs` to see how it works (in
- /// particular, the subtyping comparison).
+ /// can be created in the context of this snapshot, but before the
+ /// snapshot is committed or rolled back, they must be popped
+ /// (using `pop_skolemized_regions`), so that their numbers can be
+ /// recycled. Normally you don't have to think about this: you use
+ /// the APIs in `higher_ranked/mod.rs`, such as
+ /// `skolemize_late_bound_regions` and `plug_leaks`, which will
+ /// guide you on this path (ensure that the `SkolemizationMap` is
+ /// consumed and you are good). There are also somewhat extensive
+ /// comments in `higher_ranked/README.md`.
///
/// The `snapshot` argument to this function is not really used;
/// it's just there to make it explicit which snapshot bounds the
- /// skolemized region that results.
- pub fn new_skolemized(&self, br: ty::BoundRegion, snapshot: &RegionSnapshot) -> Region {
+ /// skolemized region that results. It should always be the top-most snapshot.
+ pub fn push_skolemized(&self, br: ty::BoundRegion, snapshot: &RegionSnapshot) -> Region {
assert!(self.in_snapshot());
assert!(self.undo_log.borrow()[snapshot.length] == OpenSnapshot);
ReSkolemized(ty::SkolemizedRegionVid { index: sc }, br)
}
+ /// Removes all the edges to/from the skolemized regions that are
+ /// in `skols`. This is used after a higher-ranked operation
+ /// completes to remove all trace of the skolemized regions
+ /// created in that time.
+ pub fn pop_skolemized(&self,
+ skols: &FnvHashSet<ty::Region>,
+ snapshot: &RegionSnapshot) {
+ debug!("pop_skolemized_regions(skols={:?})", skols);
+
+ assert!(self.in_snapshot());
+ assert!(self.undo_log.borrow()[snapshot.length] == OpenSnapshot);
+ assert!(self.skolemization_count.get() as usize >= skols.len(),
+ "popping more skolemized variables than actually exist, \
+ sc now = {}, skols.len = {}",
+ self.skolemization_count.get(),
+ skols.len());
+
+ let last_to_pop = self.skolemization_count.get();
+ let first_to_pop = last_to_pop - (skols.len() as u32);
+
+ assert!(first_to_pop >= snapshot.skolemization_count,
+ "popping more regions than snapshot contains, \
+ sc now = {}, sc then = {}, skols.len = {}",
+ self.skolemization_count.get(),
+ snapshot.skolemization_count,
+ skols.len());
+ debug_assert! {
+ skols.iter()
+ .all(|k| match *k {
+ ty::ReSkolemized(index, _) =>
+ index.index >= first_to_pop &&
+ index.index < last_to_pop,
+ _ =>
+ false
+ }),
+ "invalid skolemization keys or keys out of range ({}..{}): {:?}",
+ snapshot.skolemization_count,
+ self.skolemization_count.get(),
+ skols
+ }
+
+ let mut undo_log = self.undo_log.borrow_mut();
+
+ let constraints_to_kill: Vec<usize> =
+ undo_log.iter()
+ .enumerate()
+ .rev()
+ .filter(|&(_, undo_entry)| kill_constraint(skols, undo_entry))
+ .map(|(index, _)| index)
+ .collect();
+
+ for index in constraints_to_kill {
+ let undo_entry = mem::replace(&mut undo_log[index], Purged);
+ self.rollback_undo_entry(undo_entry);
+ }
+
+ self.skolemization_count.set(snapshot.skolemization_count);
+ return;
+
+ fn kill_constraint(skols: &FnvHashSet<ty::Region>,
+ undo_entry: &UndoLogEntry)
+ -> bool {
+ match undo_entry {
+ &AddConstraint(ConstrainVarSubVar(_, _)) =>
+ false,
+ &AddConstraint(ConstrainRegSubVar(a, _)) =>
+ skols.contains(&a),
+ &AddConstraint(ConstrainVarSubReg(_, b)) =>
+ skols.contains(&b),
+ &AddConstraint(ConstrainRegSubReg(a, b)) =>
+ skols.contains(&a) || skols.contains(&b),
+ &AddGiven(_, _) =>
+ false,
+ &AddVerify(_) =>
+ false,
+ &AddCombination(_, ref two_regions) =>
+ skols.contains(&two_regions.a) ||
+ skols.contains(&two_regions.b),
+ &AddVar(..) |
+ &OpenSnapshot |
+ &Purged |
+ &CommitedSnapshot =>
+ false,
+ }
+ }
+
+ }
+
pub fn new_bound(&self, debruijn: ty::DebruijnIndex) -> Region {
// Creates a fresh bound variable for use in GLB computations.
// See discussion of GLB computation in the large comment at
debug!("RegionVarBindings: add_verify({:?})", verify);
// skip no-op cases known to be satisfied
- match verify {
- VerifyGenericBound(_, _, _, VerifyBound::AllBounds(ref bs)) if bs.len() == 0 => {
- return;
- }
- _ => {}
+ match verify.bound {
+ VerifyBound::AllBounds(ref bs) if bs.len() == 0 => { return; }
+ _ => { }
}
let mut verifys = self.verifys.borrow_mut();
self.add_constraint(ConstrainVarSubReg(sub_id, r), origin);
}
_ => {
- self.add_verify(VerifyRegSubReg(origin, sub, sup));
+ self.add_constraint(ConstrainRegSubReg(sub, sup), origin);
}
}
}
kind: GenericKind<'tcx>,
sub: Region,
bound: VerifyBound) {
- self.add_verify(VerifyGenericBound(kind, origin, sub, bound));
+ self.add_verify(Verify {
+ kind: kind,
+ origin: origin,
+ region: sub,
+ bound: bound
+ });
}
pub fn lub_regions(&self, origin: SubregionOrigin<'tcx>, a: Region, b: Region) -> Region {
.collect()
}
- /// Computes all regions that have been related to `r0` in any way since the mark `mark` was
- /// made---`r0` itself will be the first entry. This is used when checking whether skolemized
- /// regions are being improperly related to other regions.
- pub fn tainted(&self, mark: &RegionSnapshot, r0: Region) -> Vec<Region> {
- debug!("tainted(mark={:?}, r0={:?})", mark, r0);
- let _indenter = indenter();
+ /// Computes all regions that have been related to `r0` since the
+ /// mark `mark` was made---`r0` itself will be the first
+ /// entry. The `directions` parameter controls what kind of
+ /// relations are considered. For example, one can say that only
+ /// "incoming" edges to `r0` are desired, in which case one will
+ /// get the set of regions `{r|r <= r0}`. This is used when
+ /// checking whether skolemized regions are being improperly
+ /// related to other regions.
+ pub fn tainted(&self,
+ mark: &RegionSnapshot,
+ r0: Region,
+ directions: TaintDirections)
+ -> FnvHashSet<ty::Region> {
+ debug!("tainted(mark={:?}, r0={:?}, directions={:?})",
+ mark, r0, directions);
// `result_set` acts as a worklist: we explore all outgoing
// edges and add any new regions we find to result_set. This
// is not a terribly efficient implementation.
- let mut result_set = vec![r0];
- let mut result_index = 0;
- while result_index < result_set.len() {
- // nb: can't use usize::range() here because result_set grows
- let r = result_set[result_index];
- debug!("result_index={}, r={:?}", result_index, r);
-
- for undo_entry in self.undo_log.borrow()[mark.length..].iter() {
- match undo_entry {
- &AddConstraint(ConstrainVarSubVar(a, b)) => {
- consider_adding_bidirectional_edges(&mut result_set, r, ReVar(a), ReVar(b));
- }
- &AddConstraint(ConstrainRegSubVar(a, b)) => {
- consider_adding_bidirectional_edges(&mut result_set, r, a, ReVar(b));
- }
- &AddConstraint(ConstrainVarSubReg(a, b)) => {
- consider_adding_bidirectional_edges(&mut result_set, r, ReVar(a), b);
- }
- &AddGiven(a, b) => {
- consider_adding_bidirectional_edges(&mut result_set,
- r,
- ReFree(a),
- ReVar(b));
- }
- &AddVerify(i) => {
- match (*self.verifys.borrow())[i] {
- VerifyRegSubReg(_, a, b) => {
- consider_adding_bidirectional_edges(&mut result_set, r, a, b);
- }
- VerifyGenericBound(_, _, a, ref bound) => {
- bound.for_each_region(&mut |b| {
- consider_adding_bidirectional_edges(&mut result_set, r, a, b)
- });
- }
- }
- }
- &AddCombination(..) |
- &AddVar(..) |
- &OpenSnapshot |
- &CommitedSnapshot => {}
- }
- }
-
- result_index += 1;
- }
-
- return result_set;
-
- fn consider_adding_bidirectional_edges(result_set: &mut Vec<Region>,
- r: Region,
- r1: Region,
- r2: Region) {
- consider_adding_directed_edge(result_set, r, r1, r2);
- consider_adding_directed_edge(result_set, r, r2, r1);
- }
-
- fn consider_adding_directed_edge(result_set: &mut Vec<Region>,
- r: Region,
- r1: Region,
- r2: Region) {
- if r == r1 {
- // Clearly, this is potentially inefficient.
- if !result_set.iter().any(|x| *x == r2) {
- result_set.push(r2);
- }
- }
- }
+ let mut taint_set = TaintSet::new(directions, r0);
+ taint_set.fixed_point(&self.undo_log.borrow()[mark.length..],
+ &self.verifys.borrow());
+ debug!("tainted: result={:?}", taint_set.regions);
+ return taint_set.into_set();
}
/// This function performs the actual region resolution. It must be
ErrorValue,
}
-struct VarData {
- value: VarValue,
-}
-
struct RegionAndOrigin<'tcx> {
region: Region,
origin: SubregionOrigin<'tcx>,
let graph = self.construct_graph();
self.expand_givens(&graph);
self.expansion(free_regions, &mut var_data);
- self.contraction(free_regions, &mut var_data);
- let values = self.extract_values_and_collect_conflicts(free_regions,
- &var_data,
- &graph,
- errors);
- self.collect_concrete_region_errors(free_regions, &values, errors);
- values
+ self.collect_errors(free_regions, &mut var_data, errors);
+ self.collect_var_errors(free_regions, &var_data, &graph, errors);
+ var_data
}
- fn construct_var_data(&self) -> Vec<VarData> {
+ fn construct_var_data(&self) -> Vec<VarValue> {
(0..self.num_vars() as usize)
- .map(|_| VarData { value: Value(ty::ReEmpty) })
+ .map(|_| Value(ty::ReEmpty))
.collect()
}
}
}
- fn expansion(&self, free_regions: &FreeRegionMap, var_data: &mut [VarData]) {
- self.iterate_until_fixed_point("Expansion", |constraint| {
+ fn expansion(&self, free_regions: &FreeRegionMap, var_values: &mut [VarValue]) {
+ self.iterate_until_fixed_point("Expansion", |constraint, origin| {
debug!("expansion: constraint={:?} origin={:?}",
- constraint,
- self.constraints
- .borrow()
- .get(constraint)
- .unwrap());
+ constraint, origin);
match *constraint {
ConstrainRegSubVar(a_region, b_vid) => {
- let b_data = &mut var_data[b_vid.index as usize];
+ let b_data = &mut var_values[b_vid.index as usize];
self.expand_node(free_regions, a_region, b_vid, b_data)
}
ConstrainVarSubVar(a_vid, b_vid) => {
- match var_data[a_vid.index as usize].value {
+ match var_values[a_vid.index as usize] {
ErrorValue => false,
Value(a_region) => {
- let b_node = &mut var_data[b_vid.index as usize];
+ let b_node = &mut var_values[b_vid.index as usize];
self.expand_node(free_regions, a_region, b_vid, b_node)
}
}
}
+ ConstrainRegSubReg(..) |
ConstrainVarSubReg(..) => {
- // This is a contraction constraint. Ignore it.
+ // These constraints are checked after expansion
+ // is done, in `collect_errors`.
false
}
}
free_regions: &FreeRegionMap,
a_region: Region,
b_vid: RegionVid,
- b_data: &mut VarData)
+ b_data: &mut VarValue)
-> bool {
debug!("expand_node({:?}, {:?} == {:?})",
a_region,
b_vid,
- b_data.value);
+ b_data);
// Check if this relationship is implied by a given.
match a_region {
_ => {}
}
- match b_data.value {
+ match *b_data {
Value(cur_region) => {
let lub = self.lub_concrete_regions(free_regions, a_region, cur_region);
if lub == cur_region {
cur_region,
lub);
- b_data.value = Value(lub);
+ *b_data = Value(lub);
return true;
}
}
}
- // FIXME(#29436) -- this fn would just go away if we removed ConstrainVarSubReg
- fn contraction(&self, free_regions: &FreeRegionMap, var_data: &mut [VarData]) {
- self.iterate_until_fixed_point("Contraction", |constraint| {
- debug!("contraction: constraint={:?} origin={:?}",
- constraint,
- self.constraints
- .borrow()
- .get(constraint)
- .unwrap());
+ /// After expansion is complete, go and check upper bounds (i.e.,
+ /// cases where the region cannot grow larger than a fixed point)
+ /// and check that they are satisfied.
+ fn collect_errors(&self,
+ free_regions: &FreeRegionMap,
+ var_data: &mut Vec<VarValue>,
+ errors: &mut Vec<RegionResolutionError<'tcx>>) {
+ let constraints = self.constraints.borrow();
+ for (constraint, origin) in constraints.iter() {
+ debug!("collect_errors: constraint={:?} origin={:?}",
+ constraint, origin);
match *constraint {
ConstrainRegSubVar(..) |
ConstrainVarSubVar(..) => {
// Expansion will ensure that these constraints hold. Ignore.
}
+
+ ConstrainRegSubReg(sub, sup) => {
+ if free_regions.is_subregion_of(self.tcx, sub, sup) {
+ continue;
+ }
+
+ debug!("collect_errors: region error at {:?}: \
+ cannot verify that {:?} <= {:?}",
+ origin,
+ sub,
+ sup);
+
+ errors.push(ConcreteFailure((*origin).clone(), sub, sup));
+ }
+
ConstrainVarSubReg(a_vid, b_region) => {
let a_data = &mut var_data[a_vid.index as usize];
debug!("contraction: {:?} == {:?}, {:?}",
a_vid,
- a_data.value,
+ a_data,
b_region);
- let a_region = match a_data.value {
- ErrorValue => return false,
+ let a_region = match *a_data {
+ ErrorValue => continue,
Value(a_region) => a_region,
};
+ // Do not report these errors immediately:
+ // instead, set the variable value to error and
+ // collect them later.
if !free_regions.is_subregion_of(self.tcx, a_region, b_region) {
- debug!("Setting {:?} to ErrorValue: {:?} not subregion of {:?}",
+ debug!("collect_errors: region error at {:?}: \
+ cannot verify that {:?}={:?} <= {:?}",
+ origin,
a_vid,
a_region,
b_region);
- a_data.value = ErrorValue;
+ *a_data = ErrorValue;
}
}
}
+ }
- false
- })
- }
-
- fn collect_concrete_region_errors(&self,
- free_regions: &FreeRegionMap,
- values: &Vec<VarValue>,
- errors: &mut Vec<RegionResolutionError<'tcx>>) {
- let mut reg_reg_dups = FnvHashSet();
for verify in self.verifys.borrow().iter() {
- match *verify {
- VerifyRegSubReg(ref origin, sub, sup) => {
- if free_regions.is_subregion_of(self.tcx, sub, sup) {
- continue;
- }
-
- if !reg_reg_dups.insert((sub, sup)) {
- continue;
- }
-
- debug!("region inference error at {:?}: {:?} <= {:?} is not true",
- origin,
- sub,
- sup);
-
- errors.push(ConcreteFailure((*origin).clone(), sub, sup));
- }
+ debug!("collect_errors: verify={:?}", verify);
+ let sub = normalize(var_data, verify.region);
+ if verify.bound.is_met(self.tcx, free_regions, var_data, sub) {
+ continue;
+ }
- VerifyGenericBound(ref kind, ref origin, sub, ref bound) => {
- let sub = normalize(values, sub);
- if bound.is_met(self.tcx, free_regions, values, sub) {
- continue;
- }
+ debug!("collect_errors: region error at {:?}: \
+ cannot verify that {:?} <= {:?}",
+ verify.origin,
+ verify.region,
+ verify.bound);
- debug!("region inference error at {:?}: verifying {:?} <= {:?}",
- origin,
- sub,
- bound);
-
- errors.push(GenericBoundFailure((*origin).clone(), kind.clone(), sub));
- }
- }
+ errors.push(GenericBoundFailure(verify.origin.clone(),
+ verify.kind.clone(),
+ sub));
}
}
- fn extract_values_and_collect_conflicts(&self,
- free_regions: &FreeRegionMap,
- var_data: &[VarData],
- graph: &RegionGraph,
- errors: &mut Vec<RegionResolutionError<'tcx>>)
- -> Vec<VarValue> {
- debug!("extract_values_and_collect_conflicts()");
+ /// Go over the variables that were declared to be error variables
+ /// and create a `RegionResolutionError` for each of them.
+ fn collect_var_errors(&self,
+ free_regions: &FreeRegionMap,
+ var_data: &[VarValue],
+ graph: &RegionGraph,
+ errors: &mut Vec<RegionResolutionError<'tcx>>) {
+ debug!("collect_var_errors");
// This is the best way that I have found to suppress
// duplicate and related errors. Basically we keep a set of
let mut dup_vec = vec![u32::MAX; self.num_vars() as usize];
for idx in 0..self.num_vars() as usize {
- match var_data[idx].value {
+ match var_data[idx] {
Value(_) => {
/* Inference successful */
}
}
}
}
-
- (0..self.num_vars() as usize).map(|idx| var_data[idx].value).collect()
}
fn construct_graph(&self) -> RegionGraph {
ConstrainVarSubReg(a_id, _) => {
graph.add_edge(NodeIndex(a_id.index as usize), dummy_sink, *constraint);
}
+ ConstrainRegSubReg(..) => {
+ // this would be an edge from `dummy_source` to
+ // `dummy_sink`; just ignore it.
+ }
}
}
origin: this.constraints.borrow().get(&edge.data).unwrap().clone(),
});
}
+
+ ConstrainRegSubReg(..) => {
+ panic!("cannot reach reg-sub-reg edge in region inference \
+ post-processing")
+ }
}
}
}
}
fn iterate_until_fixed_point<F>(&self, tag: &str, mut body: F)
- where F: FnMut(&Constraint) -> bool
+ where F: FnMut(&Constraint, &SubregionOrigin<'tcx>) -> bool
{
let mut iteration = 0;
let mut changed = true;
changed = false;
iteration += 1;
debug!("---- {} Iteration {}{}", "#", tag, iteration);
- for (constraint, _) in self.constraints.borrow().iter() {
- let edge_changed = body(constraint);
+ for (constraint, origin) in self.constraints.borrow().iter() {
+ let edge_changed = body(constraint, origin);
if edge_changed {
debug!("Updated due to constraint {:?}", constraint);
changed = true;
}
-impl<'tcx> fmt::Debug for Verify<'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match *self {
- VerifyRegSubReg(_, ref a, ref b) => {
- write!(f, "VerifyRegSubReg({:?}, {:?})", a, b)
- }
- VerifyGenericBound(_, ref p, ref a, ref bs) => {
- write!(f, "VerifyGenericBound({:?}, {:?}, {:?})", p, a, bs)
- }
- }
- }
-}
-
fn normalize(values: &Vec<VarValue>, r: ty::Region) -> ty::Region {
match r {
ty::ReVar(rid) => lookup(values, rid),
value: Bounded { relations: vec![], default: default },
diverging: diverging
});
- ty::TyVid { index: index as u32 }
+ let v = ty::TyVid { index: index as u32 };
+ debug!("new_var() -> {:?}", v);
+ v
}
pub fn root_var(&mut self, vid: ty::TyVid) -> ty::TyVid {
}
pub fn rollback_to(&mut self, s: Snapshot) {
+ debug!("rollback_to{:?}", {
+ for action in self.values.actions_since_snapshot(&s.snapshot) {
+ match *action {
+ sv::UndoLog::NewElem(index) => {
+ debug!("inference variable _#{}t popped", index)
+ }
+ _ => { }
+ }
+ }
+ });
+
self.values.rollback_to(s.snapshot);
self.eq_relations.rollback_to(s.eq_snapshot);
}
use hir::map::Map;
use session::Session;
use hir::def::{Def, DefMap};
+use hir::def_id::DefId;
use middle::region;
use ty::subst;
use ty;
use syntax::parse::token::keywords;
use util::nodemap::NodeMap;
+use rustc_data_structures::fnv::FnvHashSet;
use hir;
use hir::print::lifetime_to_string;
use hir::intravisit::{self, Visitor, FnKind};
// Maps the id of each lifetime reference to the lifetime decl
// that it corresponds to.
-pub type NamedRegionMap = NodeMap<DefRegion>;
+pub struct NamedRegionMap {
+ // maps from every use of a named (not anonymous) lifetime to a
+ // `DefRegion` describing how that region is bound
+ pub defs: NodeMap<DefRegion>,
+
+ // the set of lifetime def ids that are late-bound; late-bound ids
+ // are named regions appearing in fn arguments that do not appear
+ // in where-clauses
+ pub late_bound: NodeMap<ty::Issue32330>,
+}
-struct LifetimeContext<'a> {
+struct LifetimeContext<'a, 'tcx: 'a> {
sess: &'a Session,
- named_region_map: &'a mut NamedRegionMap,
+ hir_map: &'a Map<'tcx>,
+ map: &'a mut NamedRegionMap,
scope: Scope<'a>,
def_map: &'a DefMap,
// Deep breath. Our representation for poly trait refs contains a single
-> Result<NamedRegionMap, usize> {
let _task = hir_map.dep_graph.in_task(DepNode::ResolveLifetimes);
let krate = hir_map.krate();
- let mut named_region_map = NodeMap();
+ let mut map = NamedRegionMap {
+ defs: NodeMap(),
+ late_bound: NodeMap(),
+ };
sess.track_errors(|| {
krate.visit_all_items(&mut LifetimeContext {
sess: sess,
- named_region_map: &mut named_region_map,
+ hir_map: hir_map,
+ map: &mut map,
scope: &ROOT_SCOPE,
def_map: def_map,
trait_ref_hack: false,
labels_in_fn: vec![],
});
})?;
- Ok(named_region_map)
+ Ok(map)
}
-impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> {
+impl<'a, 'tcx, 'v> Visitor<'v> for LifetimeContext<'a, 'tcx> {
fn visit_item(&mut self, item: &hir::Item) {
assert!(self.labels_in_fn.is_empty());
// Items always introduce a new root scope
self.with(RootScope, |_, this| {
match item.node {
- hir::ForeignItemFn(_, ref generics) => {
- this.visit_early_late(subst::FnSpace, generics, |this| {
+ hir::ForeignItemFn(ref decl, ref generics) => {
+ this.visit_early_late(item.id,
+ subst::FnSpace,
+ decl,
+ generics,
+ |this| {
intravisit::walk_foreign_item(this, item);
})
}
replace(&mut self.labels_in_fn, saved);
}
- fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl,
+ fn visit_fn(&mut self, fk: FnKind<'v>, decl: &'v hir::FnDecl,
b: &'v hir::Block, s: Span, fn_id: ast::NodeId) {
match fk {
FnKind::ItemFn(_, generics, _, _, _, _, _) => {
- self.visit_early_late(subst::FnSpace, generics, |this| {
- this.add_scope_and_walk_fn(fk, fd, b, s, fn_id)
+ self.visit_early_late(fn_id, subst::FnSpace, decl, generics, |this| {
+ this.add_scope_and_walk_fn(fk, decl, b, s, fn_id)
})
}
FnKind::Method(_, sig, _, _) => {
- self.visit_early_late(subst::FnSpace, &sig.generics, |this| {
- this.add_scope_and_walk_fn(fk, fd, b, s, fn_id)
- })
+ self.visit_early_late(
+ fn_id,
+ subst::FnSpace,
+ decl,
+ &sig.generics,
+ |this| this.add_scope_and_walk_fn(fk, decl, b, s, fn_id));
}
FnKind::Closure(_) => {
// Closures have their own set of labels, save labels just
// like for foreign items above.
let saved = replace(&mut self.labels_in_fn, vec![]);
- let result = self.add_scope_and_walk_fn(fk, fd, b, s, fn_id);
+ let result = self.add_scope_and_walk_fn(fk, decl, b, s, fn_id);
replace(&mut self.labels_in_fn, saved);
result
}
if let hir::MethodTraitItem(ref sig, None) = trait_item.node {
self.visit_early_late(
- subst::FnSpace, &sig.generics,
+ trait_item.id, subst::FnSpace,
+ &sig.decl, &sig.generics,
|this| intravisit::walk_trait_item(this, trait_item))
} else {
intravisit::walk_trait_item(self, trait_item);
// Adds all labels in `b` to `ctxt.labels_in_fn`, signalling a warning
// if one of the label shadows a lifetime or another label.
-fn extract_labels<'v, 'a>(ctxt: &mut LifetimeContext<'a>, b: &'v hir::Block) {
-
+fn extract_labels(ctxt: &mut LifetimeContext, b: &hir::Block) {
struct GatherLabels<'a> {
sess: &'a Session,
scope: Scope<'a>,
}
}
-impl<'a> LifetimeContext<'a> {
+impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
fn add_scope_and_walk_fn<'b>(&mut self,
fk: FnKind,
fd: &hir::FnDecl,
fn with<F>(&mut self, wrap_scope: ScopeChain, f: F) where
F: FnOnce(Scope, &mut LifetimeContext),
{
- let LifetimeContext {sess, ref mut named_region_map, ..} = *self;
+ let LifetimeContext {sess, hir_map, ref mut map, ..} = *self;
let mut this = LifetimeContext {
sess: sess,
- named_region_map: *named_region_map,
+ hir_map: hir_map,
+ map: *map,
scope: &wrap_scope,
def_map: self.def_map,
trait_ref_hack: self.trait_ref_hack,
/// bound lifetimes are resolved by name and associated with a binder id (`binder_id`), so the
/// ordering is not important there.
fn visit_early_late<F>(&mut self,
+ fn_id: ast::NodeId,
early_space: subst::ParamSpace,
+ decl: &hir::FnDecl,
generics: &hir::Generics,
walk: F) where
F: FnOnce(&mut LifetimeContext),
{
- let referenced_idents = early_bound_lifetime_names(generics);
-
- debug!("visit_early_late: referenced_idents={:?}",
- referenced_idents);
-
- let (early, late): (Vec<_>, _) = generics.lifetimes.iter().cloned().partition(
- |l| referenced_idents.iter().any(|&i| i == l.lifetime.name));
-
- self.with(EarlyScope(early_space, &early, self.scope), move |old_scope, this| {
+ let fn_def_id = self.hir_map.local_def_id(fn_id);
+ insert_late_bound_lifetimes(self.map,
+ fn_def_id,
+ decl,
+ generics);
+
+ let (late, early): (Vec<_>, _) =
+ generics.lifetimes
+ .iter()
+ .cloned()
+ .partition(|l| self.map.late_bound.contains_key(&l.lifetime.id));
+
+ let this = self;
+ this.with(EarlyScope(early_space, &early, this.scope), move |old_scope, this| {
this.with(LateScope(&late, this.scope), move |_, this| {
this.check_lifetime_defs(old_scope, &generics.lifetimes);
walk(this);
probably a bug in syntax::fold");
}
- debug!("lifetime_ref={:?} id={:?} resolved to {:?}",
- lifetime_to_string(lifetime_ref),
- lifetime_ref.id,
- def);
- self.named_region_map.insert(lifetime_ref.id, def);
+ debug!("lifetime_ref={:?} id={:?} resolved to {:?} span={:?}",
+ lifetime_to_string(lifetime_ref),
+ lifetime_ref.id,
+ def,
+ self.sess.codemap().span_to_string(lifetime_ref.span));
+ self.map.defs.insert(lifetime_ref.id, def);
}
}
///////////////////////////////////////////////////////////////////////////
-pub fn early_bound_lifetimes<'a>(generics: &'a hir::Generics) -> Vec<hir::LifetimeDef> {
- let referenced_idents = early_bound_lifetime_names(generics);
- if referenced_idents.is_empty() {
- return Vec::new();
+/// Detects late-bound lifetimes and inserts them into
+/// `map.late_bound`.
+///
+/// A region declared on a fn is **late-bound** if:
+/// - it is constrained by an argument type;
+/// - it does not appear in a where-clause.
+///
+/// "Constrained" basically means that it appears in any type but
+/// not amongst the inputs to a projection. In other words, `<&'a
+/// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`.
+fn insert_late_bound_lifetimes(map: &mut NamedRegionMap,
+ fn_def_id: DefId,
+ decl: &hir::FnDecl,
+ generics: &hir::Generics) {
+ debug!("insert_late_bound_lifetimes(decl={:?}, generics={:?})", decl, generics);
+
+ let mut constrained_by_input = ConstrainedCollector { regions: FnvHashSet() };
+ for arg in &decl.inputs {
+ constrained_by_input.visit_ty(&arg.ty);
}
- generics.lifetimes.iter()
- .filter(|l| referenced_idents.iter().any(|&i| i == l.lifetime.name))
- .cloned()
- .collect()
-}
-
-/// Given a set of generic declarations, returns a list of names containing all early bound
-/// lifetime names for those generics. (In fact, this list may also contain other names.)
-fn early_bound_lifetime_names(generics: &hir::Generics) -> Vec<ast::Name> {
- // Create two lists, dividing the lifetimes into early/late bound.
- // Initially, all of them are considered late, but we will move
- // things from late into early as we go if we find references to
- // them.
- let mut early_bound = Vec::new();
- let mut late_bound = generics.lifetimes.iter()
- .map(|l| l.lifetime.name)
- .collect();
-
- // Any lifetime that appears in a type bound is early.
- {
- let mut collector =
- FreeLifetimeCollector { early_bound: &mut early_bound,
- late_bound: &mut late_bound };
- for ty_param in generics.ty_params.iter() {
- walk_list!(&mut collector, visit_ty_param_bound, &ty_param.bounds);
+ let mut appears_in_output = AllCollector { regions: FnvHashSet() };
+ intravisit::walk_fn_ret_ty(&mut appears_in_output, &decl.output);
+
+ debug!("insert_late_bound_lifetimes: constrained_by_input={:?}",
+ constrained_by_input.regions);
+
+ // Walk the lifetimes that appear in where clauses.
+ //
+ // Subtle point: because we disallow nested bindings, we can just
+ // ignore binders here and scrape up all names we see.
+ let mut appears_in_where_clause = AllCollector { regions: FnvHashSet() };
+ for ty_param in generics.ty_params.iter() {
+ walk_list!(&mut appears_in_where_clause,
+ visit_ty_param_bound,
+ &ty_param.bounds);
+ }
+ walk_list!(&mut appears_in_where_clause,
+ visit_where_predicate,
+ &generics.where_clause.predicates);
+ for lifetime_def in &generics.lifetimes {
+ if !lifetime_def.bounds.is_empty() {
+ // `'a: 'b` means both `'a` and `'b` are referenced
+ appears_in_where_clause.visit_lifetime_def(lifetime_def);
}
- for predicate in &generics.where_clause.predicates {
- match predicate {
- &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{ref bounds,
- ref bounded_ty,
- ..}) => {
- collector.visit_ty(&bounded_ty);
- walk_list!(&mut collector, visit_ty_param_bound, bounds);
+ }
+
+ debug!("insert_late_bound_lifetimes: appears_in_where_clause={:?}",
+ appears_in_where_clause.regions);
+
+ // Late bound regions are those that:
+ // - appear in the inputs
+ // - do not appear in the where-clauses
+ for lifetime in &generics.lifetimes {
+ let name = lifetime.lifetime.name;
+
+ // appears in the where clauses? early-bound.
+ if appears_in_where_clause.regions.contains(&name) { continue; }
+
+ // does not appear in the inputs, but appears in the return
+ // type? eventually this will be early-bound, but for now we
+ // just mark it so we can issue warnings.
+ let constrained_by_input = constrained_by_input.regions.contains(&name);
+ let appears_in_output = appears_in_output.regions.contains(&name);
+ let will_change = !constrained_by_input && appears_in_output;
+ let issue_32330 = if will_change {
+ ty::Issue32330::WillChange {
+ fn_def_id: fn_def_id,
+ region_name: name,
+ }
+ } else {
+ ty::Issue32330::WontChange
+ };
+
+ debug!("insert_late_bound_lifetimes: \
+ lifetime {:?} with id {:?} is late-bound ({:?}",
+ lifetime.lifetime.name, lifetime.lifetime.id, issue_32330);
+
+ let prev = map.late_bound.insert(lifetime.lifetime.id, issue_32330);
+ assert!(prev.is_none(), "visited lifetime {:?} twice", lifetime.lifetime.id);
+ }
+
+ return;
+
+ struct ConstrainedCollector {
+ regions: FnvHashSet<ast::Name>,
+ }
+
+ impl<'v> Visitor<'v> for ConstrainedCollector {
+ fn visit_ty(&mut self, ty: &'v hir::Ty) {
+ match ty.node {
+ hir::TyPath(Some(_), _) => {
+ // ignore lifetimes appearing in associated type
+ // projections, as they are not *constrained*
+ // (defined above)
}
- &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{ref lifetime,
- ref bounds,
- ..}) => {
- collector.visit_lifetime(lifetime);
- for bound in bounds {
- collector.visit_lifetime(bound);
+ hir::TyPath(None, ref path) => {
+ // consider only the lifetimes on the final
+ // segment; I am not sure it's even currently
+ // valid to have them elsewhere, but even if it
+ // is, those would be potentially inputs to
+ // projections
+ if let Some(last_segment) = path.segments.last() {
+ self.visit_path_segment(path.span, last_segment);
}
}
- &hir::WherePredicate::EqPredicate(_) => bug!("unimplemented")
- }
- }
- }
- // Any lifetime that either has a bound or is referenced by a
- // bound is early.
- for lifetime_def in &generics.lifetimes {
- if !lifetime_def.bounds.is_empty() {
- shuffle(&mut early_bound, &mut late_bound,
- lifetime_def.lifetime.name);
- for bound in &lifetime_def.bounds {
- shuffle(&mut early_bound, &mut late_bound,
- bound.name);
+ _ => {
+ intravisit::walk_ty(self, ty);
+ }
}
}
- }
- return early_bound;
- struct FreeLifetimeCollector<'a> {
- early_bound: &'a mut Vec<ast::Name>,
- late_bound: &'a mut Vec<ast::Name>,
+ fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
+ self.regions.insert(lifetime_ref.name);
+ }
}
- impl<'a, 'v> Visitor<'v> for FreeLifetimeCollector<'a> {
- fn visit_lifetime(&mut self, lifetime_ref: &hir::Lifetime) {
- shuffle(self.early_bound, self.late_bound,
- lifetime_ref.name);
- }
+ struct AllCollector {
+ regions: FnvHashSet<ast::Name>,
}
- fn shuffle(early_bound: &mut Vec<ast::Name>,
- late_bound: &mut Vec<ast::Name>,
- name: ast::Name) {
- match late_bound.iter().position(|n| *n == name) {
- Some(index) => {
- late_bound.swap_remove(index);
- early_bound.push(name);
- }
- None => { }
+ impl<'v> Visitor<'v> for AllCollector {
+ fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
+ self.regions.insert(lifetime_ref.name);
}
}
}
};
let (int_type, uint_type) = match &target.target_pointer_width[..] {
+ "16" => (ast::IntTy::I16, ast::UintTy::U16),
"32" => (ast::IntTy::I32, ast::UintTy::U32),
"64" => (ast::IntTy::I64, ast::UintTy::U64),
w => panic!(sp.fatal(&format!("target specification was invalid: \
// debug output much nicer to read and so on.
let obligation = infcx.resolve_type_vars_if_possible(&obligation);
+ debug!("register_predicate_obligation(obligation={:?})", obligation);
+
infcx.obligations_in_snapshot.set(true);
- if infcx.tcx.fulfilled_predicates.borrow().check_duplicate(&obligation.predicate)
- {
+ if infcx.tcx.fulfilled_predicates.borrow().check_duplicate(&obligation.predicate) {
+ debug!("register_predicate_obligation: duplicate");
return
}
// also includes references to its upvars as part
// of its type, and those types are resolved at
// the same time.
+ //
+ // FIXME(#32286) logic seems false if no upvars
pending_obligation.stalled_on =
trait_ref_type_vars(selcx, data.to_poly_trait_ref());
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Trait Resolution. See the Book for more.
+//! Trait Resolution. See README.md for an overview of how this works.
pub use self::SelectionError::*;
pub use self::FulfillmentErrorCode::*;
pub use self::coherence::overlapping_impls;
pub use self::coherence::OrphanCheckErr;
pub use self::fulfill::{FulfillmentContext, GlobalFulfilledPredicates, RegionObligation};
-pub use self::project::{MismatchedProjectionTypes, ProjectionMode};
+pub use self::project::MismatchedProjectionTypes;
pub use self::project::{normalize, normalize_projection_type, Normalized};
+pub use self::project::{ProjectionCache, ProjectionCacheSnapshot, ProjectionMode};
pub use self::object_safety::ObjectSafetyViolation;
pub use self::object_safety::MethodViolationCode;
pub use self::select::{EvaluationCache, SelectionContext, SelectionCache};
use super::util;
use hir::def_id::DefId;
-use infer::{self, InferOk, TypeOrigin};
+use infer::{InferOk, TypeOrigin};
+use rustc_data_structures::snapshot_map::{Snapshot, SnapshotMap};
+use syntax::parse::token;
+use syntax::ast;
use ty::subst::Subst;
use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt};
use ty::fold::{TypeFoldable, TypeFolder};
-use syntax::parse::token;
-use syntax::ast;
use util::common::FN_OUTPUT_NAME;
use std::rc::Rc;
let skol_obligation = obligation.with(skol_predicate);
match project_and_unify_type(selcx, &skol_obligation) {
Ok(result) => {
- match infcx.leak_check(false, &skol_map, snapshot) {
+ let span = obligation.cause.span;
+ match infcx.leak_check(false, span, &skol_map, snapshot) {
Ok(()) => Ok(infcx.plug_leaks(skol_map, snapshot, &result)),
Err(e) => Err(MismatchedProjectionTypes { err: e }),
}
where T : TypeFoldable<'tcx>
{
+ debug!("normalize_with_depth(depth={}, value={:?})", depth, value);
let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth);
let result = normalizer.fold(value);
-
+ debug!("normalize_with_depth: depth={} result={:?} with {} obligations",
+ depth, result, normalizer.obligations.len());
+ debug!("normalize_with_depth: depth={} obligations={:?}",
+ depth, normalizer.obligations);
Normalized {
value: result,
obligations: normalizer.obligations,
// binder). It would be better to normalize in a
// binding-aware fashion.
- let Normalized { value: ty, obligations } =
+ let Normalized { value: normalized_ty, obligations } =
normalize_projection_type(self.selcx,
data.clone(),
self.cause.clone(),
self.depth);
+ debug!("AssociatedTypeNormalizer: depth={} normalized {:?} to {:?} \
+ with {} add'l obligations",
+ self.depth, ty, normalized_ty, obligations.len());
self.obligations.extend(obligations);
- ty
+ normalized_ty
}
_ => {
depth: usize)
-> Option<NormalizedTy<'tcx>>
{
- debug!("normalize_projection_type(\
+ let infcx = selcx.infcx();
+
+ let projection_ty = infcx.resolve_type_vars_if_possible(&projection_ty);
+
+ debug!("opt_normalize_projection_type(\
projection_ty={:?}, \
depth={})",
projection_ty,
depth);
+ // FIXME(#20304) For now, I am caching here, which is good, but it
+ // means we don't capture the type variables that are created in
+ // the case of ambiguity. Which means we may create a large stream
+ // of such variables. OTOH, if we move the caching up a level, we
+ // would not benefit from caching when proving `T: Trait<U=Foo>`
+ // bounds. It might be the case that we want two distinct caches,
+ // or else another kind of cache entry.
+
+ match infcx.projection_cache.borrow_mut().try_start(projection_ty) {
+ Ok(()) => { }
+ Err(ProjectionCacheEntry::Ambiguous) => {
+ // If we found ambiguity the last time, that generally
+ // means we will continue to do so until some type in the
+ // key changes (and we know it hasn't, because we just
+ // fully resolved it). One exception though is closure
+ // types, which can transition from having a fixed kind to
+ // no kind with no visible change in the key.
+ //
+ // FIXME(#32286) refactor this so that closure type
+ // changes
+ debug!("opt_normalize_projection_type: \
+ found cache entry: ambiguous");
+ if !projection_ty.has_closure_types() {
+ return None;
+ }
+ }
+ Err(ProjectionCacheEntry::InProgress) => {
+ // If while normalized A::B, we are asked to normalize
+ // A::B, just return A::B itself. This is a conservative
+ // answer, in the sense that A::B *is* clearly equivalent
+ // to A::B, though there may be a better value we can
+ // find.
+
+ // Under lazy normalization, this can arise when
+ // bootstrapping. That is, imagine an environment with a
+ // where-clause like `A::B == u32`. Now, if we are asked
+ // to normalize `A::B`, we will want to check the
+ // where-clauses in scope. So we will try to unify `A::B`
+ // with `A::B`, which can trigger a recursive
+ // normalization. In that case, I think we will want this code:
+ //
+ // ```
+ // let ty = selcx.tcx().mk_projection(projection_ty.trait_ref,
+ // projection_ty.item_name);
+ // return Some(NormalizedTy { value: v, obligations: vec![] });
+ // ```
+
+ debug!("opt_normalize_projection_type: \
+ found cache entry: in-progress");
+
+ // But for now, let's classify this as an overflow:
+ let recursion_limit = selcx.tcx().sess.recursion_limit.get();
+ let obligation = Obligation::with_depth(cause.clone(),
+ recursion_limit,
+ projection_ty);
+ selcx.infcx().report_overflow_error(&obligation, false);
+ }
+ Err(ProjectionCacheEntry::NormalizedTy(ty)) => {
+ // If we find the value in the cache, then the obligations
+ // have already been returned from the previous entry (and
+ // should therefore have been honored).
+ debug!("opt_normalize_projection_type: \
+ found normalized ty `{:?}`",
+ ty);
+ return Some(NormalizedTy { value: ty, obligations: vec![] });
+ }
+ Err(ProjectionCacheEntry::Error) => {
+ debug!("opt_normalize_projection_type: \
+ found error");
+ return Some(normalize_to_error(selcx, projection_ty, cause, depth));
+ }
+ }
+
let obligation = Obligation::with_depth(cause.clone(), depth, projection_ty.clone());
match project_type(selcx, &obligation) {
- Ok(ProjectedTy::Progress(projected_ty, mut obligations)) => {
+ Ok(ProjectedTy::Progress(Progress { ty: projected_ty,
+ mut obligations,
+ cacheable })) => {
// if projection succeeded, then what we get out of this
// is also non-normalized (consider: it was derived from
// an impl, where-clause etc) and hence we must
// re-normalize it
- debug!("normalize_projection_type: projected_ty={:?} depth={} obligations={:?}",
+ debug!("opt_normalize_projection_type: \
+ projected_ty={:?} \
+ depth={} \
+ obligations={:?} \
+ cacheable={:?}",
projected_ty,
depth,
- obligations);
+ obligations,
+ cacheable);
- if projected_ty.has_projection_types() {
+ let result = if projected_ty.has_projection_types() {
let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth+1);
let normalized_ty = normalizer.fold(&projected_ty);
- debug!("normalize_projection_type: normalized_ty={:?} depth={}",
+ debug!("opt_normalize_projection_type: \
+ normalized_ty={:?} depth={}",
normalized_ty,
depth);
obligations.extend(normalizer.obligations);
- Some(Normalized {
+ Normalized {
value: normalized_ty,
obligations: obligations,
- })
+ }
} else {
- Some(Normalized {
+ Normalized {
value: projected_ty,
obligations: obligations,
- })
- }
+ }
+ };
+ infcx.projection_cache.borrow_mut()
+ .complete(projection_ty, &result, cacheable);
+ Some(result)
}
Ok(ProjectedTy::NoProgress(projected_ty)) => {
- debug!("normalize_projection_type: projected_ty={:?} no progress",
+ debug!("opt_normalize_projection_type: \
+ projected_ty={:?} no progress",
projected_ty);
- Some(Normalized {
+ let result = Normalized {
value: projected_ty,
obligations: vec!()
- })
+ };
+ infcx.projection_cache.borrow_mut()
+ .complete(projection_ty, &result, true);
+ Some(result)
}
Err(ProjectionTyError::TooManyCandidates) => {
- debug!("normalize_projection_type: too many candidates");
+ debug!("opt_normalize_projection_type: \
+ too many candidates");
+ infcx.projection_cache.borrow_mut()
+ .ambiguous(projection_ty);
None
}
Err(ProjectionTyError::TraitSelectionError(_)) => {
- debug!("normalize_projection_type: ERROR");
+ debug!("opt_normalize_projection_type: ERROR");
// if we got an error processing the `T as Trait` part,
// just return `ty::err` but add the obligation `T :
// Trait`, which when processed will cause the error to be
// reported later
+ infcx.projection_cache.borrow_mut()
+ .error(projection_ty);
Some(normalize_to_error(selcx, projection_ty, cause, depth))
}
}
}
enum ProjectedTy<'tcx> {
- Progress(Ty<'tcx>, Vec<PredicateObligation<'tcx>>),
+ Progress(Progress<'tcx>),
NoProgress(Ty<'tcx>),
}
+struct Progress<'tcx> {
+ ty: Ty<'tcx>,
+ obligations: Vec<PredicateObligation<'tcx>>,
+ cacheable: bool,
+}
+
+impl<'tcx> Progress<'tcx> {
+ fn error<'a,'gcx>(tcx: TyCtxt<'a,'gcx,'tcx>) -> Self {
+ Progress {
+ ty: tcx.types.err,
+ obligations: vec![],
+ cacheable: true
+ }
+ }
+
+ fn with_addl_obligations(mut self,
+ mut obligations: Vec<PredicateObligation<'tcx>>)
+ -> Self {
+ debug!("with_addl_obligations: self.obligations.len={} obligations.len={}",
+ self.obligations.len(), obligations.len());
+
+ debug!("with_addl_obligations: self.obligations={:?} obligations={:?}",
+ self.obligations, obligations);
+
+ self.obligations.append(&mut obligations);
+ self
+ }
+}
+
/// Compute the result of a projection type (if we can).
+///
+/// IMPORTANT:
+/// - `obligation` must be fully normalized
fn project_type<'cx, 'gcx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>)
selcx.infcx().report_overflow_error(&obligation, true);
}
- let obligation_trait_ref =
- selcx.infcx().resolve_type_vars_if_possible(&obligation.predicate.trait_ref);
+ let obligation_trait_ref = &obligation.predicate.trait_ref;
debug!("project: obligation_trait_ref={:?}", obligation_trait_ref);
if obligation_trait_ref.references_error() {
- return Ok(ProjectedTy::Progress(selcx.tcx().types.err, vec!()));
+ return Ok(ProjectedTy::Progress(Progress::error(selcx.tcx())));
}
let mut candidates = ProjectionTyCandidateSet {
match candidates.vec.pop() {
Some(candidate) => {
- let (ty, obligations) = confirm_candidate(selcx,
- obligation,
- &obligation_trait_ref,
- candidate);
- Ok(ProjectedTy::Progress(ty, obligations))
+ Ok(ProjectedTy::Progress(
+ confirm_candidate(selcx,
+ obligation,
+ &obligation_trait_ref,
+ candidate)))
}
None => {
- Ok(ProjectedTy::NoProgress(selcx.tcx().mk_projection(
- obligation.predicate.trait_ref.clone(),
- obligation.predicate.item_name)))
+ Ok(ProjectedTy::NoProgress(
+ selcx.tcx().mk_projection(
+ obligation.predicate.trait_ref.clone(),
+ obligation.predicate.item_name)))
}
}
}
obligation: &ProjectionTyObligation<'tcx>,
obligation_trait_ref: &ty::TraitRef<'tcx>,
candidate: ProjectionTyCandidate<'tcx>)
- -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+ -> Progress<'tcx>
{
debug!("confirm_candidate(candidate={:?}, obligation={:?})",
candidate,
selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
obligation_trait_ref: &ty::TraitRef<'tcx>)
- -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+ -> Progress<'tcx>
{
let poly_trait_ref = obligation_trait_ref.to_poly_trait_ref();
let trait_obligation = obligation.with(poly_trait_ref.to_poly_trait_predicate());
selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
obligation_trait_ref: &ty::TraitRef<'tcx>)
- -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+ -> Progress<'tcx>
{
let self_ty = obligation_trait_ref.self_ty();
let object_ty = selcx.infcx().shallow_resolve(self_ty);
span_bug!(
obligation.cause.span,
"confirm_object_candidate called with non-object: {:?}",
- object_ty);
+ object_ty)
}
};
let projection_bounds = data.projection_bounds_with_self_ty(selcx.tcx(), object_ty);
debug!("confirm_object_candidate: no env-predicate \
found in object type `{:?}`; ill-formed",
object_ty);
- return (selcx.tcx().types.err, vec!());
+ return Progress::error(selcx.tcx());
}
}
};
selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
fn_pointer_vtable: VtableFnPointerData<'tcx, PredicateObligation<'tcx>>)
- -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+ -> Progress<'tcx>
{
// FIXME(#32730) drop this assertion once obligations are propagated from inference (fn pointer
// vtable nested obligations ONLY come from unification in inference)
selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
vtable: VtableClosureData<'tcx, PredicateObligation<'tcx>>)
- -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+ -> Progress<'tcx>
{
let closure_typer = selcx.closure_typer();
let closure_type = closure_typer.closure_type(vtable.closure_def_id, vtable.substs);
let Normalized {
value: closure_type,
- mut obligations
+ obligations
} = normalize_with_depth(selcx,
obligation.cause.clone(),
obligation.recursion_depth+1,
&closure_type);
- let (ty, mut cc_obligations) = confirm_callable_candidate(selcx,
- obligation,
- &closure_type.sig,
- util::TupleArgumentsFlag::No);
- obligations.append(&mut cc_obligations);
- (ty, obligations)
+
+ debug!("confirm_closure_candidate: obligation={:?},closure_type={:?},obligations={:?}",
+ obligation,
+ closure_type,
+ obligations);
+
+ confirm_callable_candidate(selcx,
+ obligation,
+ &closure_type.sig,
+ util::TupleArgumentsFlag::No)
+ .with_addl_obligations(obligations)
+ .with_addl_obligations(vtable.nested)
}
fn confirm_callable_candidate<'cx, 'gcx, 'tcx>(
obligation: &ProjectionTyObligation<'tcx>,
fn_sig: &ty::PolyFnSig<'tcx>,
flag: util::TupleArgumentsFlag)
- -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+ -> Progress<'tcx>
{
let tcx = selcx.tcx();
selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
poly_projection: ty::PolyProjectionPredicate<'tcx>)
- -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+ -> Progress<'tcx>
{
let infcx = selcx.infcx();
-
- let projection =
- infcx.replace_late_bound_regions_with_fresh_var(
- obligation.cause.span,
- infer::LateBoundRegionConversionTime::HigherRankedType,
- &poly_projection).0;
-
- assert_eq!(projection.projection_ty.item_name,
- obligation.predicate.item_name);
-
let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span);
- let obligations = match infcx.eq_trait_refs(false,
- origin,
- obligation.predicate.trait_ref.clone(),
- projection.projection_ty.trait_ref.clone()) {
- Ok(InferOk { obligations, .. }) => {
- // FIXME(#32730) once obligations are generated in inference, remove this assertion
+ let trait_ref = obligation.predicate.trait_ref;
+ match infcx.match_poly_projection_predicate(origin, poly_projection, trait_ref) {
+ Ok(InferOk { value: ty_match, obligations }) => {
+ // FIXME(#32730) once obligations are generated in inference, drop this assertion
assert!(obligations.is_empty());
- obligations
+ Progress {
+ ty: ty_match.value,
+ obligations: obligations,
+ cacheable: ty_match.unconstrained_regions.is_empty(),
+ }
}
Err(e) => {
span_bug!(
obligation.cause.span,
- "Failed to unify `{:?}` and `{:?}` in projection: {}",
+ "Failed to unify obligation `{:?}` \
+ with poly_projection `{:?}`: {:?}",
obligation,
- projection,
+ poly_projection,
e);
}
- };
-
- (projection.ty, obligations)
+ }
}
fn confirm_impl_candidate<'cx, 'gcx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
impl_vtable: VtableImplData<'tcx, PredicateObligation<'tcx>>)
- -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>)
+ -> Progress<'tcx>
{
let VtableImplData { substs, nested, impl_def_id } = impl_vtable;
tcx.types.err
});
let substs = translate_substs(selcx.infcx(), impl_def_id, substs, node_item.node);
- (ty.subst(tcx, substs), nested)
+ Progress {
+ ty: ty.subst(tcx, substs),
+ obligations: nested,
+ cacheable: true
+ }
}
None => {
span_bug!(obligation.cause.span,
.next()
}
}
+
+// # Cache
+
+pub struct ProjectionCache<'tcx> {
+ map: SnapshotMap<ty::ProjectionTy<'tcx>, ProjectionCacheEntry<'tcx>>,
+}
+
+#[derive(Clone, Debug)]
+enum ProjectionCacheEntry<'tcx> {
+ InProgress,
+ Ambiguous,
+ Error,
+ NormalizedTy(Ty<'tcx>),
+}
+
+// NB: intentionally not Clone
+pub struct ProjectionCacheSnapshot {
+ snapshot: Snapshot
+}
+
+impl<'tcx> ProjectionCache<'tcx> {
+ pub fn new() -> Self {
+ ProjectionCache {
+ map: SnapshotMap::new()
+ }
+ }
+
+ pub fn snapshot(&mut self) -> ProjectionCacheSnapshot {
+ ProjectionCacheSnapshot { snapshot: self.map.snapshot() }
+ }
+
+ pub fn rollback_to(&mut self, snapshot: ProjectionCacheSnapshot) {
+ self.map.rollback_to(snapshot.snapshot);
+ }
+
+ pub fn commit(&mut self, snapshot: ProjectionCacheSnapshot) {
+ self.map.commit(snapshot.snapshot);
+ }
+
+ /// Try to start normalize `key`; returns an error if
+ /// normalization already occured (this error corresponds to a
+ /// cache hit, so it's actually a good thing).
+ fn try_start(&mut self, key: ty::ProjectionTy<'tcx>)
+ -> Result<(), ProjectionCacheEntry<'tcx>> {
+ match self.map.get(&key) {
+ Some(entry) => return Err(entry.clone()),
+ None => { }
+ }
+
+ self.map.insert(key, ProjectionCacheEntry::InProgress);
+ Ok(())
+ }
+
+ /// Indicates that `key` was normalized to `value`. If `cacheable` is false,
+ /// then this result is sadly not cacheable.
+ fn complete(&mut self,
+ key: ty::ProjectionTy<'tcx>,
+ value: &NormalizedTy<'tcx>,
+ cacheable: bool) {
+ let fresh_key = if cacheable {
+ debug!("ProjectionCacheEntry::complete: adding cache entry: key={:?}, value={:?}",
+ key, value);
+ self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value.value))
+ } else {
+ debug!("ProjectionCacheEntry::complete: cannot cache: key={:?}, value={:?}",
+ key, value);
+ !self.map.remove(key)
+ };
+
+ assert!(!fresh_key, "never started projecting `{:?}`", key);
+ }
+
+ /// Indicates that trying to normalize `key` resulted in
+ /// ambiguity. No point in trying it again then until we gain more
+ /// type information (in which case, the "fully resolved" key will
+ /// be different).
+ fn ambiguous(&mut self, key: ty::ProjectionTy<'tcx>) {
+ let fresh = self.map.insert(key, ProjectionCacheEntry::Ambiguous);
+ assert!(!fresh, "never started projecting `{:?}`", key);
+ }
+
+ /// Indicates that trying to normalize `key` resulted in
+ /// error.
+ fn error(&mut self, key: ty::ProjectionTy<'tcx>) {
+ let fresh = self.map.insert(key, ProjectionCacheEntry::Error);
+ assert!(!fresh, "never started projecting `{:?}`", key);
+ }
+}
use std::cell::RefCell;
use std::fmt;
use std::marker::PhantomData;
+use std::mem;
use std::rc::Rc;
use syntax::abi::Abi;
use hir;
skol_trait_predicate.trait_ref.clone(),
&skol_map,
snapshot);
+
+ self.infcx.pop_skolemized(skol_map, snapshot);
+
assert!(result);
true
}
Err(_) => { return false; }
}
- self.infcx.leak_check(false, skol_map, snapshot).is_ok()
+ self.infcx.leak_check(false, obligation.cause.span, skol_map, snapshot).is_ok()
}
/// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller
self.tcx(),
obligation.predicate.0.trait_ref.self_ty(),
|impl_def_id| {
- self.probe(|this, snapshot| {
- if let Ok(_) = this.match_impl(impl_def_id, obligation, snapshot) {
- candidates.vec.push(ImplCandidate(impl_def_id));
+ self.probe(|this, snapshot| { /* [1] */
+ match this.match_impl(impl_def_id, obligation, snapshot) {
+ Ok(skol_map) => {
+ candidates.vec.push(ImplCandidate(impl_def_id));
+
+ // NB: we can safely drop the skol map
+ // since we are in a probe [1]
+ mem::drop(skol_map);
+ }
+ Err(_) => { }
}
});
}
return;
}
- self.probe(|this, snapshot| {
- let (self_ty, _) =
- this.infcx().skolemize_late_bound_regions(&obligation.self_ty(), snapshot);
+ self.probe(|this, _snapshot| {
+ // the code below doesn't care about regions, and the
+ // self-ty here doesn't escape this probe, so just erase
+ // any LBR.
+ let self_ty = this.tcx().erase_late_bound_regions(&obligation.self_ty());
let poly_trait_ref = match self_ty.sty {
ty::TyTrait(ref data) => {
match this.tcx().lang_items.to_builtin_kind(obligation.predicate.def_id()) {
})?;
self.inferred_obligations.extend(obligations);
- if let Err(e) = self.infcx.leak_check(false, &skol_map, snapshot) {
+ if let Err(e) = self.infcx.leak_check(false,
+ obligation.cause.span,
+ &skol_map,
+ snapshot) {
debug!("match_impl: failed leak check due to `{}`", e);
return Err(());
}
fn add_region(&mut self, r: ty::Region) {
match r {
- ty::ReVar(..) |
+ ty::ReVar(..) => {
+ self.add_flags(TypeFlags::HAS_RE_INFER);
+ self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX);
+ }
ty::ReSkolemized(..) => {
self.add_flags(TypeFlags::HAS_RE_INFER);
+ self.add_flags(TypeFlags::HAS_RE_SKOL);
self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX);
}
ty::ReLateBound(debruijn, _) => { self.add_depth(debruijn.depth); }
/// address space on 64-bit ARMv8 and x86_64.
pub fn obj_size_bound(&self) -> u64 {
match self.pointer_size.bits() {
+ 16 => 1 << 15,
32 => 1 << 31,
64 => 1 << 47,
bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
pub fn ptr_sized_integer(&self) -> Integer {
match self.pointer_size.bits() {
+ 16 => I16,
32 => I32,
64 => I64,
bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
pub use self::sty::{ClosureSubsts, TypeAndMut};
pub use self::sty::{TraitRef, TypeVariants, PolyTraitRef};
pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region};
+pub use self::sty::Issue32330;
pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid, SkolemizedRegionVid};
pub use self::sty::BoundRegion::*;
pub use self::sty::FnOutput::*;
const HAS_SELF = 1 << 1,
const HAS_TY_INFER = 1 << 2,
const HAS_RE_INFER = 1 << 3,
- const HAS_RE_EARLY_BOUND = 1 << 4,
- const HAS_FREE_REGIONS = 1 << 5,
- const HAS_TY_ERR = 1 << 6,
- const HAS_PROJECTION = 1 << 7,
- const HAS_TY_CLOSURE = 1 << 8,
+ const HAS_RE_SKOL = 1 << 4,
+ const HAS_RE_EARLY_BOUND = 1 << 5,
+ const HAS_FREE_REGIONS = 1 << 6,
+ const HAS_TY_ERR = 1 << 7,
+ const HAS_PROJECTION = 1 << 8,
+ const HAS_TY_CLOSURE = 1 << 9,
// true if there are "names" of types and regions and so forth
// that are local to a particular fn
- const HAS_LOCAL_NAMES = 1 << 9,
+ const HAS_LOCAL_NAMES = 1 << 10,
// Present if the type belongs in a local type context.
// Only set for TyInfer other than Fresh.
- const KEEP_IN_LOCAL_TCX = 1 << 10,
+ const KEEP_IN_LOCAL_TCX = 1 << 11,
const NEEDS_SUBST = TypeFlags::HAS_PARAMS.bits |
TypeFlags::HAS_SELF.bits |
})
}
pub fn to_bound_region(&self) -> ty::BoundRegion {
- ty::BoundRegion::BrNamed(self.def_id, self.name)
+ // this is an early bound region, so unaffected by #32330
+ ty::BoundRegion::BrNamed(self.def_id, self.name, Issue32330::WontChange)
}
}
/// Creates the dep-node for selecting/evaluating this trait reference.
fn dep_node(&self) -> DepNode<DefId> {
- DepNode::TraitSelect(self.def_id())
+ // Ideally, the dep-node would just have all the input types
+ // in it. But they are limited to including def-ids. So as an
+ // approximation we include the def-ids for all nominal types
+ // found somewhere. This means that we will e.g. conflate the
+ // dep-nodes for `u32: SomeTrait` and `u64: SomeTrait`, but we
+ // would have distinct dep-nodes for `Vec<u32>: SomeTrait`,
+ // `Rc<u32>: SomeTrait`, and `(Vec<u32>, Rc<u32>): SomeTrait`.
+ // Note that it's always sound to conflate dep-nodes, it just
+ // leads to more recompilation.
+ let def_ids: Vec<_> =
+ self.input_types()
+ .iter()
+ .flat_map(|t| t.walk())
+ .filter_map(|t| match t.sty {
+ ty::TyStruct(adt_def, _) |
+ ty::TyEnum(adt_def, _) =>
+ Some(adt_def.did),
+ _ =>
+ None
+ })
+ .collect();
+ DepNode::TraitSelect(self.def_id(), def_ids)
}
pub fn input_types(&self) -> &[Ty<'tcx>] {
/// equality between arbitrary types. Processing an instance of Form
/// #2 eventually yields one of these `ProjectionPredicate`
/// instances to normalize the LHS.
-#[derive(Clone, PartialEq, Eq, Hash)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct ProjectionPredicate<'tcx> {
pub projection_ty: ProjectionTy<'tcx>,
pub ty: Ty<'tcx>,
stack: &mut Vec<AdtDefMaster<'tcx>>)
{
- let dep_node = DepNode::SizedConstraint(self.did);
-
- if self.sized_constraint.get(dep_node).is_some() {
+ let dep_node = || DepNode::SizedConstraint(self.did);
+ if self.sized_constraint.get(dep_node()).is_some() {
return;
}
//
// Consider the type as Sized in the meanwhile to avoid
// further errors.
- self.sized_constraint.fulfill(dep_node, tcx.types.err);
+ self.sized_constraint.fulfill(dep_node(), tcx.types.err);
return;
}
_ => tcx.mk_tup(tys)
};
- match self.sized_constraint.get(dep_node) {
+ match self.sized_constraint.get(dep_node()) {
Some(old_ty) => {
debug!("calculate_sized_constraint: {:?} recurred", self);
assert_eq!(old_ty, tcx.types.err)
}
None => {
debug!("calculate_sized_constraint: {:?} => {:?}", self, ty);
- self.sized_constraint.fulfill(dep_node, ty)
+ self.sized_constraint.fulfill(dep_node(), ty)
}
}
}
for def in generics.regions.as_slice() {
let region =
ReFree(FreeRegion { scope: free_id_outlive,
- bound_region: BrNamed(def.def_id, def.name) });
+ bound_region: def.to_bound_region() });
debug!("push_region_params {:?}", region);
regions.push(def.space, region);
}
def_id: self.def_id,
space: self.space,
index: self.index,
- bounds: self.bounds.fold_with(folder)
+ bounds: self.bounds.fold_with(folder),
}
}
///
/// The def-id is needed to distinguish free regions in
/// the event of shadowing.
- BrNamed(DefId, Name),
+ BrNamed(DefId, Name, Issue32330),
/// Fresh bound identifiers created during GLB computations.
BrFresh(u32),
BrEnv
}
+/// True if this late-bound region is unconstrained, and hence will
+/// become early-bound once #32330 is fixed.
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash,
+ RustcEncodable, RustcDecodable)]
+pub enum Issue32330 {
+ WontChange,
+
+ /// this region will change from late-bound to early-bound once
+ /// #32330 is fixed.
+ WillChange {
+ /// fn where is region declared
+ fn_def_id: DefId,
+
+ /// name of region; duplicates the info in BrNamed but convenient
+ /// to have it here, and this code is only temporary
+ region_name: ast::Name,
+ }
+}
+
// NB: If you change this, you'll probably want to change the corresponding
// AST structure in libsyntax/ast.rs as well.
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct TyVid {
- pub index: u32
+ pub index: u32,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
SignedInt(ast::IntTy::I32) => ConstInt::I32(0),
SignedInt(ast::IntTy::I64) => ConstInt::I64(0),
SignedInt(ast::IntTy::Is) => match tcx.sess.target.int_type {
+ ast::IntTy::I16 => ConstInt::Isize(ConstIsize::Is16(0)),
ast::IntTy::I32 => ConstInt::Isize(ConstIsize::Is32(0)),
ast::IntTy::I64 => ConstInt::Isize(ConstIsize::Is64(0)),
_ => bug!(),
UnsignedInt(ast::UintTy::U32) => ConstInt::U32(0),
UnsignedInt(ast::UintTy::U64) => ConstInt::U64(0),
UnsignedInt(ast::UintTy::Us) => match tcx.sess.target.uint_type {
+ ast::UintTy::U16 => ConstInt::Usize(ConstUsize::Us16(0)),
ast::UintTy::U32 => ConstInt::Usize(ConstUsize::Us32(0)),
ast::UintTy::U64 => ConstInt::Usize(ConstUsize::Us64(0)),
_ => bug!(),
let new_value = tcx.replace_late_bound_regions(&value, |br| {
let _ = start_or_continue(f, "for<", ", ");
ty::ReLateBound(ty::DebruijnIndex::new(1), match br {
- ty::BrNamed(_, name) => {
+ ty::BrNamed(_, name, _) => {
let _ = write!(f, "{}", name);
br
}
ty::BrEnv => {
let name = token::intern("'r");
let _ = write!(f, "{}", name);
- ty::BrNamed(tcx.map.local_def_id(CRATE_NODE_ID), name)
+ ty::BrNamed(tcx.map.local_def_id(CRATE_NODE_ID),
+ name,
+ ty::Issue32330::WontChange)
}
})
}).0;
}
match *self {
- BrNamed(_, name) => write!(f, "{}", name),
+ BrNamed(_, name, _) => write!(f, "{}", name),
BrAnon(_) | BrFresh(_) | BrEnv => Ok(())
}
}
match *self {
BrAnon(n) => write!(f, "BrAnon({:?})", n),
BrFresh(n) => write!(f, "BrFresh({:?})", n),
- BrNamed(did, name) => {
- write!(f, "BrNamed({:?}:{:?}, {:?})", did.krate, did.index, name)
+ BrNamed(did, name, issue32330) => {
+ write!(f, "BrNamed({:?}:{:?}, {:?}, {:?})",
+ did.krate, did.index, name, issue32330)
}
BrEnv => "BrEnv".fmt(f),
}
// This fails to compile because the match is irrefutable.
while let Irrefutable(x) = irr {
- ...
+ // ...
}
+```
Try this instead:
-```
+```no_run
struct Irrefutable(i32);
let irr = Irrefutable(0);
loop {
let Irrefutable(x) = irr;
- ...
+ // ...
}
```
"##,
(&LitKind::Int(n, Unsuffixed), Some(&ty::TyInt(IntTy::Is))) |
(&LitKind::Int(n, Signed(IntTy::Is)), _) => {
match tcx.sess.target.int_type {
+ IntTy::I16 => if n == I16_OVERFLOW {
+ return Ok(Integral(Isize(Is16(::std::i16::MIN))));
+ },
IntTy::I32 => if n == I32_OVERFLOW {
return Ok(Integral(Isize(Is32(::std::i32::MIN))));
},
(Infer(a @ 0...as_u64::I16MAX), I16(_)) => I16(a as i64 as i16),
(Infer(a @ 0...as_u64::I32MAX), I32(_)) => I32(a as i64 as i32),
(Infer(a @ 0...as_u64::I64MAX), I64(_)) => I64(a as i64),
+ (Infer(a @ 0...as_u64::I16MAX), Isize(Is16(_))) => Isize(Is16(a as i64 as i16)),
(Infer(a @ 0...as_u64::I32MAX), Isize(Is32(_))) => Isize(Is32(a as i64 as i32)),
(Infer(a @ 0...as_u64::I64MAX), Isize(Is64(_))) => Isize(Is64(a as i64)),
(Infer(a @ 0...as_u64::U8MAX), U8(_)) => U8(a as u8),
(Infer(a @ 0...as_u64::U16MAX), U16(_)) => U16(a as u16),
(Infer(a @ 0...as_u64::U32MAX), U32(_)) => U32(a as u32),
(Infer(a), U64(_)) => U64(a),
+ (Infer(a @ 0...as_u64::U16MAX), Usize(Us16(_))) => Usize(Us16(a as u16)),
(Infer(a @ 0...as_u64::U32MAX), Usize(Us32(_))) => Usize(Us32(a as u32)),
(Infer(a), Usize(Us64(_))) => Usize(Us64(a)),
(InferSigned(a @ as_i64::I16MIN...as_i64::I16MAX), I16(_)) => I16(a as i16),
(InferSigned(a @ as_i64::I32MIN...as_i64::I32MAX), I32(_)) => I32(a as i32),
(InferSigned(a), I64(_)) => I64(a),
+ (InferSigned(a @ as_i64::I16MIN...as_i64::I16MAX), Isize(Is16(_))) => {
+ Isize(Is16(a as i16))
+ },
(InferSigned(a @ as_i64::I32MIN...as_i64::I32MAX), Isize(Is32(_))) => {
Isize(Is32(a as i32))
},
(InferSigned(a @ 0...as_i64::U16MAX), U16(_)) => U16(a as u16),
(InferSigned(a @ 0...as_i64::U32MAX), U32(_)) => U32(a as u32),
(InferSigned(a @ 0...as_i64::I64MAX), U64(_)) => U64(a as u64),
+ (InferSigned(a @ 0...as_i64::U16MAX), Usize(Us16(_))) => Usize(Us16(a as u16)),
(InferSigned(a @ 0...as_i64::U32MAX), Usize(Us32(_))) => Usize(Us32(a as u32)),
(InferSigned(a @ 0...as_i64::I64MAX), Usize(Us64(_))) => Usize(Us64(a as u64)),
(InferSigned(_), _) => return Err(ConstMathErr::NotInRange),
I16(i) if i < 0 => InferSigned(i as i64),
I32(i) if i < 0 => InferSigned(i as i64),
I64(i) if i < 0 => InferSigned(i as i64),
+ Isize(Is16(i)) if i < 0 => InferSigned(i as i64),
Isize(Is32(i)) if i < 0 => InferSigned(i as i64),
Isize(Is64(i)) if i < 0 => InferSigned(i as i64),
InferSigned(i) => Infer(i as u64),
I16(i) => Infer(i as u64),
I32(i) => Infer(i as u64),
I64(i) => Infer(i as u64),
+ Isize(Is16(i)) => Infer(i as u64),
Isize(Is32(i)) => Infer(i as u64),
Isize(Is64(i)) => Infer(i as u64),
U8(i) => Infer(i as u64),
U16(i) => Infer(i as u64),
U32(i) => Infer(i as u64),
U64(i) => Infer(i as u64),
+ Usize(Us16(i)) => Infer(i as u64),
Usize(Us32(i)) => Infer(i as u64),
Usize(Us64(i)) => Infer(i),
}
| Isize(Is64(v))
| I64(v) if v >= 0 && v <= ::std::u32::MAX as i64 => Some(v as u32),
Isize(Is32(v)) if v >= 0 => Some(v as u32),
+ Isize(Is16(v)) if v >= 0 => Some(v as u32),
U8(v) => Some(v as u32),
U16(v) => Some(v as u32),
U32(v) => Some(v),
| Usize(Us64(v))
| U64(v) if v <= ::std::u32::MAX as u64 => Some(v as u32),
Usize(Us32(v)) => Some(v),
+ Usize(Us16(v)) => Some(v as u32),
_ => None,
}
}
I16(v) if v >= 0 => Some(v as u64),
I32(v) if v >= 0 => Some(v as u64),
I64(v) if v >= 0 => Some(v as u64),
+ Isize(Is16(v)) if v >= 0 => Some(v as u64),
Isize(Is32(v)) if v >= 0 => Some(v as u64),
Isize(Is64(v)) if v >= 0 => Some(v as u64),
U8(v) => Some(v as u64),
U16(v) => Some(v as u64),
U32(v) => Some(v as u64),
U64(v) => Some(v),
+ Usize(Us16(v)) => Some(v as u64),
Usize(Us32(v)) => Some(v as u64),
Usize(Us64(v)) => Some(v),
_ => None,
I16(v) => v < 0,
I32(v) => v < 0,
I64(v) => v < 0,
+ Isize(Is16(v)) => v < 0,
Isize(Is32(v)) => v < 0,
Isize(Is64(v)) => v < 0,
InferSigned(v) => v < 0,
(I16(a), I16(b)) => Ok(a.cmp(&b)),
(I32(a), I32(b)) => Ok(a.cmp(&b)),
(I64(a), I64(b)) => Ok(a.cmp(&b)),
+ (Isize(Is16(a)), Isize(Is16(b))) => Ok(a.cmp(&b)),
(Isize(Is32(a)), Isize(Is32(b))) => Ok(a.cmp(&b)),
(Isize(Is64(a)), Isize(Is64(b))) => Ok(a.cmp(&b)),
(U8(a), U8(b)) => Ok(a.cmp(&b)),
(U16(a), U16(b)) => Ok(a.cmp(&b)),
(U32(a), U32(b)) => Ok(a.cmp(&b)),
(U64(a), U64(b)) => Ok(a.cmp(&b)),
+ (Usize(Us16(a)), Usize(Us16(b))) => Ok(a.cmp(&b)),
(Usize(Us32(a)), Usize(Us32(b))) => Ok(a.cmp(&b)),
(Usize(Us64(a)), Usize(Us64(b))) => Ok(a.cmp(&b)),
(Infer(a), Infer(b)) => Ok(a.cmp(&b)),
ConstInt::I16(i) => ConstInt::I16(add1!(i)),
ConstInt::I32(i) => ConstInt::I32(add1!(i)),
ConstInt::I64(i) => ConstInt::I64(add1!(i)),
+ ConstInt::Isize(ConstIsize::Is16(i)) => ConstInt::Isize(ConstIsize::Is16(add1!(i))),
ConstInt::Isize(ConstIsize::Is32(i)) => ConstInt::Isize(ConstIsize::Is32(add1!(i))),
ConstInt::Isize(ConstIsize::Is64(i)) => ConstInt::Isize(ConstIsize::Is64(add1!(i))),
ConstInt::U8(i) => ConstInt::U8(add1!(i)),
ConstInt::U16(i) => ConstInt::U16(add1!(i)),
ConstInt::U32(i) => ConstInt::U32(add1!(i)),
ConstInt::U64(i) => ConstInt::U64(add1!(i)),
+ ConstInt::Usize(ConstUsize::Us16(i)) => ConstInt::Usize(ConstUsize::Us16(add1!(i))),
ConstInt::Usize(ConstUsize::Us32(i)) => ConstInt::Usize(ConstUsize::Us32(add1!(i))),
ConstInt::Usize(ConstUsize::Us64(i)) => ConstInt::Usize(ConstUsize::Us64(add1!(i))),
ConstInt::Infer(_) | ConstInt::InferSigned(_) => panic!("no type info for const int"),
I64(i) => write!(fmt, "{}i64", i),
Isize(ConstIsize::Is64(i)) => write!(fmt, "{}isize", i),
Isize(ConstIsize::Is32(i)) => write!(fmt, "{}isize", i),
+ Isize(ConstIsize::Is16(i)) => write!(fmt, "{}isize", i),
U8(i) => write!(fmt, "{}u8", i),
U16(i) => write!(fmt, "{}u16", i),
U32(i) => write!(fmt, "{}u32", i),
U64(i) => write!(fmt, "{}u64", i),
Usize(ConstUsize::Us64(i)) => write!(fmt, "{}usize", i),
Usize(ConstUsize::Us32(i)) => write!(fmt, "{}usize", i),
+ Usize(ConstUsize::Us16(i)) => write!(fmt, "{}usize", i),
}
}
}
(I16(a), I16(b)) => a.$checked_func(b).map(I16),
(I32(a), I32(b)) => a.$checked_func(b).map(I32),
(I64(a), I64(b)) => a.$checked_func(b).map(I64),
+ (Isize(Is16(a)), Isize(Is16(b))) => a.$checked_func(b).map(Is16).map(Isize),
(Isize(Is32(a)), Isize(Is32(b))) => a.$checked_func(b).map(Is32).map(Isize),
(Isize(Is64(a)), Isize(Is64(b))) => a.$checked_func(b).map(Is64).map(Isize),
(U8(a), U8(b)) => a.$checked_func(b).map(U8),
(U16(a), U16(b)) => a.$checked_func(b).map(U16),
(U32(a), U32(b)) => a.$checked_func(b).map(U32),
(U64(a), U64(b)) => a.$checked_func(b).map(U64),
+ (Usize(Us16(a)), Usize(Us16(b))) => a.$checked_func(b).map(Us16).map(Usize),
(Usize(Us32(a)), Usize(Us32(b))) => a.$checked_func(b).map(Us32).map(Usize),
(Usize(Us64(a)), Usize(Us64(b))) => a.$checked_func(b).map(Us64).map(Usize),
(Infer(a), Infer(b)) => a.$checked_func(b).map(Infer),
(I16(a), I16(b)) => Ok(I16(a.$func(b))),
(I32(a), I32(b)) => Ok(I32(a.$func(b))),
(I64(a), I64(b)) => Ok(I64(a.$func(b))),
+ (Isize(Is16(a)), Isize(Is16(b))) => Ok(Isize(Is16(a.$func(b)))),
(Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a.$func(b)))),
(Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a.$func(b)))),
(U8(a), U8(b)) => Ok(U8(a.$func(b))),
(U16(a), U16(b)) => Ok(U16(a.$func(b))),
(U32(a), U32(b)) => Ok(U32(a.$func(b))),
(U64(a), U64(b)) => Ok(U64(a.$func(b))),
+ (Usize(Us16(a)), Usize(Us16(b))) => Ok(Usize(Us16(a.$func(b)))),
(Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a.$func(b)))),
(Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a.$func(b)))),
(Infer(a), Infer(b)) => Ok(Infer(a.$func(b))),
(I16(_), I16(0)) => Err(zerr),
(I32(_), I32(0)) => Err(zerr),
(I64(_), I64(0)) => Err(zerr),
+ (Isize(_), Isize(Is16(0))) => Err(zerr),
(Isize(_), Isize(Is32(0))) => Err(zerr),
(Isize(_), Isize(Is64(0))) => Err(zerr),
(InferSigned(_), InferSigned(0)) => Err(zerr),
(U16(_), U16(0)) => Err(zerr),
(U32(_), U32(0)) => Err(zerr),
(U64(_), U64(0)) => Err(zerr),
+ (Usize(_), Usize(Us16(0))) => Err(zerr),
(Usize(_), Usize(Us32(0))) => Err(zerr),
(Usize(_), Usize(Us64(0))) => Err(zerr),
(Infer(_), Infer(0)) => Err(zerr),
(I16(::std::i16::MIN), I16(-1)) => Err(Overflow(op)),
(I32(::std::i32::MIN), I32(-1)) => Err(Overflow(op)),
(I64(::std::i64::MIN), I64(-1)) => Err(Overflow(op)),
+ (Isize(Is16(::std::i16::MIN)), Isize(Is16(-1))) => Err(Overflow(op)),
(Isize(Is32(::std::i32::MIN)), Isize(Is32(-1))) => Err(Overflow(op)),
(Isize(Is64(::std::i64::MIN)), Isize(Is64(-1))) => Err(Overflow(op)),
(InferSigned(::std::i64::MIN), InferSigned(-1)) => Err(Overflow(op)),
(I16(a), I16(b)) => Ok(I16(a/b)),
(I32(a), I32(b)) => Ok(I32(a/b)),
(I64(a), I64(b)) => Ok(I64(a/b)),
+ (Isize(Is16(a)), Isize(Is16(b))) => Ok(Isize(Is16(a/b))),
(Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a/b))),
(Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a/b))),
(InferSigned(a), InferSigned(b)) => Ok(InferSigned(a/b)),
(U16(a), U16(b)) => Ok(U16(a/b)),
(U32(a), U32(b)) => Ok(U32(a/b)),
(U64(a), U64(b)) => Ok(U64(a/b)),
+ (Usize(Us16(a)), Usize(Us16(b))) => Ok(Usize(Us16(a/b))),
(Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a/b))),
(Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a/b))),
(Infer(a), Infer(b)) => Ok(Infer(a/b)),
(I16(a), I16(b)) => Ok(I16(a%b)),
(I32(a), I32(b)) => Ok(I32(a%b)),
(I64(a), I64(b)) => Ok(I64(a%b)),
+ (Isize(Is16(a)), Isize(Is16(b))) => Ok(Isize(Is16(a%b))),
(Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a%b))),
(Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a%b))),
(InferSigned(a), InferSigned(b)) => Ok(InferSigned(a%b)),
(U16(a), U16(b)) => Ok(U16(a%b)),
(U32(a), U32(b)) => Ok(U32(a%b)),
(U64(a), U64(b)) => Ok(U64(a%b)),
+ (Usize(Us16(a)), Usize(Us16(b))) => Ok(Usize(Us16(a%b))),
(Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a%b))),
(Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a%b))),
(Infer(a), Infer(b)) => Ok(Infer(a%b)),
I16(a) => Ok(I16(overflowing!(a.overflowing_shl(b), Op::Shl))),
I32(a) => Ok(I32(overflowing!(a.overflowing_shl(b), Op::Shl))),
I64(a) => Ok(I64(overflowing!(a.overflowing_shl(b), Op::Shl))),
+ Isize(Is16(a)) => Ok(Isize(Is16(overflowing!(a.overflowing_shl(b), Op::Shl)))),
Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_shl(b), Op::Shl)))),
Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_shl(b), Op::Shl)))),
U8(a) => Ok(U8(overflowing!(a.overflowing_shl(b), Op::Shl))),
U16(a) => Ok(U16(overflowing!(a.overflowing_shl(b), Op::Shl))),
U32(a) => Ok(U32(overflowing!(a.overflowing_shl(b), Op::Shl))),
U64(a) => Ok(U64(overflowing!(a.overflowing_shl(b), Op::Shl))),
+ Usize(Us16(a)) => Ok(Usize(Us16(overflowing!(a.overflowing_shl(b), Op::Shl)))),
Usize(Us32(a)) => Ok(Usize(Us32(overflowing!(a.overflowing_shl(b), Op::Shl)))),
Usize(Us64(a)) => Ok(Usize(Us64(overflowing!(a.overflowing_shl(b), Op::Shl)))),
Infer(a) => Ok(Infer(overflowing!(a.overflowing_shl(b), Op::Shl))),
I16(a) => Ok(I16(overflowing!(a.overflowing_shr(b), Op::Shr))),
I32(a) => Ok(I32(overflowing!(a.overflowing_shr(b), Op::Shr))),
I64(a) => Ok(I64(overflowing!(a.overflowing_shr(b), Op::Shr))),
+ Isize(Is16(a)) => Ok(Isize(Is16(overflowing!(a.overflowing_shr(b), Op::Shr)))),
Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_shr(b), Op::Shr)))),
Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_shr(b), Op::Shr)))),
U8(a) => Ok(U8(overflowing!(a.overflowing_shr(b), Op::Shr))),
U16(a) => Ok(U16(overflowing!(a.overflowing_shr(b), Op::Shr))),
U32(a) => Ok(U32(overflowing!(a.overflowing_shr(b), Op::Shr))),
U64(a) => Ok(U64(overflowing!(a.overflowing_shr(b), Op::Shr))),
+ Usize(Us16(a)) => Ok(Usize(Us16(overflowing!(a.overflowing_shr(b), Op::Shr)))),
Usize(Us32(a)) => Ok(Usize(Us32(overflowing!(a.overflowing_shr(b), Op::Shr)))),
Usize(Us64(a)) => Ok(Usize(Us64(overflowing!(a.overflowing_shr(b), Op::Shr)))),
Infer(a) => Ok(Infer(overflowing!(a.overflowing_shr(b), Op::Shr))),
I16(a) => Ok(I16(overflowing!(a.overflowing_neg(), Op::Neg))),
I32(a) => Ok(I32(overflowing!(a.overflowing_neg(), Op::Neg))),
I64(a) => Ok(I64(overflowing!(a.overflowing_neg(), Op::Neg))),
+ Isize(Is16(a)) => Ok(Isize(Is16(overflowing!(a.overflowing_neg(), Op::Neg)))),
Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_neg(), Op::Neg)))),
Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_neg(), Op::Neg)))),
U8(0) => Ok(U8(0)),
U16(0) => Ok(U16(0)),
U32(0) => Ok(U32(0)),
U64(0) => Ok(U64(0)),
+ Usize(Us16(0)) => Ok(Usize(Us16(0))),
Usize(Us32(0)) => Ok(Usize(Us32(0))),
Usize(Us64(0)) => Ok(Usize(Us64(0))),
U8(_) => Err(UnsignedNegation),
I16(a) => Ok(I16(!a)),
I32(a) => Ok(I32(!a)),
I64(a) => Ok(I64(!a)),
+ Isize(Is16(a)) => Ok(Isize(Is16(!a))),
Isize(Is32(a)) => Ok(Isize(Is32(!a))),
Isize(Is64(a)) => Ok(Isize(Is64(!a))),
U8(a) => Ok(U8(!a)),
U16(a) => Ok(U16(!a)),
U32(a) => Ok(U32(!a)),
U64(a) => Ok(U64(!a)),
+ Usize(Us16(a)) => Ok(Usize(Us16(!a))),
Usize(Us32(a)) => Ok(Usize(Us32(!a))),
Usize(Us64(a)) => Ok(Usize(Us64(!a))),
Infer(a) => Ok(Infer(!a)),
/// Anything else is an error. This invariant is checked at several locations
#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, Hash, Eq, PartialEq)]
pub enum ConstIsize {
+ Is16(i16),
Is32(i32),
Is64(i64),
}
impl ConstIsize {
pub fn as_i64(self, target_int_ty: ast::IntTy) -> i64 {
match (self, target_int_ty) {
+ (Is16(i), ast::IntTy::I16) => i as i64,
(Is32(i), ast::IntTy::I32) => i as i64,
(Is64(i), ast::IntTy::I64) => i,
_ => panic!("got invalid isize size for target"),
}
pub fn new(i: i64, target_int_ty: ast::IntTy) -> Result<Self, ConstMathErr> {
match target_int_ty {
+ ast::IntTy::I16 if i as i16 as i64 == i => Ok(Is16(i as i16)),
+ ast::IntTy::I16 => Err(LitOutOfRange(ast::IntTy::Is)),
ast::IntTy::I32 if i as i32 as i64 == i => Ok(Is32(i as i32)),
ast::IntTy::I32 => Err(LitOutOfRange(ast::IntTy::Is)),
ast::IntTy::I64 => Ok(Is64(i)),
/// Anything else is an error. This invariant is checked at several locations
#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, Hash, Eq, PartialEq)]
pub enum ConstUsize {
+ Us16(u16),
Us32(u32),
Us64(u64),
}
impl ConstUsize {
pub fn as_u64(self, target_uint_ty: ast::UintTy) -> u64 {
match (self, target_uint_ty) {
+ (Us16(i), ast::UintTy::U16) => i as u64,
(Us32(i), ast::UintTy::U32) => i as u64,
(Us64(i), ast::UintTy::U64) => i,
_ => panic!("got invalid usize size for target"),
}
pub fn new(i: u64, target_uint_ty: ast::UintTy) -> Result<Self, ConstMathErr> {
match target_uint_ty {
+ ast::UintTy::U16 if i as u16 as u64 == i => Ok(Us16(i as u16)),
+ ast::UintTy::U16 => Err(ULitOutOfRange(ast::UintTy::Us)),
ast::UintTy::U32 if i as u32 as u64 == i => Ok(Us32(i as u32)),
ast::UintTy::U32 => Err(ULitOutOfRange(ast::UintTy::Us)),
ast::UintTy::U64 => Ok(Us64(i)),
pub mod graph;
pub mod ivar;
pub mod obligation_forest;
+pub mod snapshot_map;
pub mod snapshot_vec;
pub mod transitive_relation;
pub mod unify;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use fnv::FnvHashMap;
+use std::hash::Hash;
+use std::ops;
+
+#[cfg(test)]
+mod test;
+
+pub struct SnapshotMap<K, V>
+ where K: Hash + Clone + Eq
+{
+ map: FnvHashMap<K, V>,
+ undo_log: Vec<UndoLog<K, V>>,
+}
+
+pub struct Snapshot {
+ len: usize
+}
+
+enum UndoLog<K, V> {
+ OpenSnapshot,
+ CommittedSnapshot,
+ Inserted(K),
+ Overwrite(K, V),
+}
+
+impl<K, V> SnapshotMap<K, V>
+ where K: Hash + Clone + Eq
+{
+ pub fn new() -> Self {
+ SnapshotMap {
+ map: FnvHashMap(),
+ undo_log: vec![]
+ }
+ }
+
+ pub fn insert(&mut self, key: K, value: V) -> bool {
+ match self.map.insert(key.clone(), value) {
+ None => {
+ if !self.undo_log.is_empty() {
+ self.undo_log.push(UndoLog::Inserted(key));
+ }
+ true
+ }
+ Some(old_value) => {
+ if !self.undo_log.is_empty() {
+ self.undo_log.push(UndoLog::Overwrite(key, old_value));
+ }
+ false
+ }
+ }
+ }
+
+ pub fn remove(&mut self, key: K) -> bool {
+ match self.map.remove(&key) {
+ Some(old_value) => {
+ if !self.undo_log.is_empty() {
+ self.undo_log.push(UndoLog::Overwrite(key, old_value));
+ }
+ true
+ }
+ None => {
+ false
+ }
+ }
+ }
+
+ pub fn get(&self, key: &K) -> Option<&V> {
+ self.map.get(key)
+ }
+
+ pub fn snapshot(&mut self) -> Snapshot {
+ self.undo_log.push(UndoLog::OpenSnapshot);
+ let len = self.undo_log.len() - 1;
+ Snapshot { len: len }
+ }
+
+ fn assert_open_snapshot(&self, snapshot: &Snapshot) {
+ assert!(snapshot.len < self.undo_log.len());
+ assert!(match self.undo_log[snapshot.len] {
+ UndoLog::OpenSnapshot => true,
+ _ => false
+ });
+ }
+
+ pub fn commit(&mut self, snapshot: Snapshot) {
+ self.assert_open_snapshot(&snapshot);
+ if snapshot.len == 0 {
+ // The root snapshot.
+ self.undo_log.truncate(0);
+ } else {
+ self.undo_log[snapshot.len] = UndoLog::CommittedSnapshot;
+ }
+ }
+
+ pub fn rollback_to(&mut self, snapshot: Snapshot) {
+ self.assert_open_snapshot(&snapshot);
+ while self.undo_log.len() > snapshot.len + 1 {
+ match self.undo_log.pop().unwrap() {
+ UndoLog::OpenSnapshot => {
+ panic!("cannot rollback an uncommitted snapshot");
+ }
+
+ UndoLog::CommittedSnapshot => { }
+
+ UndoLog::Inserted(key) => {
+ self.map.remove(&key);
+ }
+
+ UndoLog::Overwrite(key, old_value) => {
+ self.map.insert(key, old_value);
+ }
+ }
+ }
+
+ let v = self.undo_log.pop().unwrap();
+ assert!(match v { UndoLog::OpenSnapshot => true, _ => false });
+ assert!(self.undo_log.len() == snapshot.len);
+ }
+}
+
+impl<'k, K, V> ops::Index<&'k K> for SnapshotMap<K, V>
+ where K: Hash + Clone + Eq
+{
+ type Output = V;
+ fn index(&self, key: &'k K) -> &V {
+ &self.map[key]
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::SnapshotMap;
+
+#[test]
+fn basic() {
+ let mut map = SnapshotMap::new();
+ map.insert(22, "twenty-two");
+ let snapshot = map.snapshot();
+ map.insert(22, "thirty-three");
+ assert_eq!(map[&22], "thirty-three");
+ map.insert(44, "fourty-four");
+ assert_eq!(map[&44], "fourty-four");
+ assert_eq!(map.get(&33), None);
+ map.rollback_to(snapshot);
+ assert_eq!(map[&22], "twenty-two");
+ assert_eq!(map.get(&33), None);
+ assert_eq!(map.get(&44), None);
+}
+
+#[test]
+#[should_panic]
+fn out_of_order() {
+ let mut map = SnapshotMap::new();
+ map.insert(22, "twenty-two");
+ let snapshot1 = map.snapshot();
+ let _snapshot2 = map.snapshot();
+ map.rollback_to(snapshot1);
+}
+
+#[test]
+fn nested_commit_then_rollback() {
+ let mut map = SnapshotMap::new();
+ map.insert(22, "twenty-two");
+ let snapshot1 = map.snapshot();
+ let snapshot2 = map.snapshot();
+ map.insert(22, "thirty-three");
+ map.commit(snapshot2);
+ assert_eq!(map[&22], "thirty-three");
+ map.rollback_to(snapshot1);
+ assert_eq!(map[&22], "twenty-two");
+}
// large chunks of memory alive and we want to free them as soon as
// possible to keep the peak memory usage low
let (outputs, trans) = {
- let (outputs, expanded_crate, id) = {
- let krate = match phase_1_parse_input(sess, cfg, input) {
- Ok(krate) => krate,
- Err(mut parse_error) => {
- parse_error.emit();
- return Err(1);
- }
- };
+ let krate = match phase_1_parse_input(sess, cfg, input) {
+ Ok(krate) => krate,
+ Err(mut parse_error) => {
+ parse_error.emit();
+ return Err(1);
+ }
+ };
+ let krate = {
let mut compile_state = CompileState::state_after_parse(input,
sess,
outdir,
sess,
compile_state,
Ok(()));
- let krate = compile_state.krate.unwrap();
- let outputs = build_output_filenames(input, outdir, output, &krate.attrs, sess);
- let id = link::find_crate_name(Some(sess), &krate.attrs, input);
- let expanded_crate = phase_2_configure_and_expand(sess,
- &cstore,
- krate,
- &id,
- addl_plugins)?;
+ compile_state.krate.unwrap()
+ };
- (outputs, expanded_crate, id)
+ let outputs = build_output_filenames(input, outdir, output, &krate.attrs, sess);
+ let id = link::find_crate_name(Some(sess), &krate.attrs, input);
+ let ExpansionResult { expanded_crate, defs, analysis, resolutions, mut hir_forest } = {
+ let make_glob_map = control.make_glob_map;
+ phase_2_configure_and_expand(sess, &cstore, krate, &id, addl_plugins, make_glob_map)?
};
controller_entry_point!(after_expand,
&id),
Ok(()));
- let expanded_crate = assign_node_ids(sess, expanded_crate);
-
- // Collect defintions for def ids.
- let mut defs = time(sess.time_passes(),
- "collecting defs",
- || hir_map::collect_definitions(&expanded_crate));
-
- time(sess.time_passes(),
- "external crate/lib resolution",
- || read_local_crates(sess, &cstore, &defs, &expanded_crate, &id, &sess.dep_graph));
-
- time(sess.time_passes(),
- "early lint checks",
- || lint::check_ast_crate(sess, &expanded_crate));
-
- time(sess.time_passes(),
- "AST validation",
- || ast_validation::check_crate(sess, &expanded_crate));
-
- let (analysis, resolutions, mut hir_forest) = {
- lower_and_resolve(sess, &id, &mut defs, &expanded_crate,
- &sess.dep_graph, control.make_glob_map)
- };
-
- // Discard MTWT tables that aren't required past lowering to HIR.
- if !keep_mtwt_tables(sess) {
- syntax::ext::mtwt::clear_tables();
- }
-
let arenas = ty::CtxtArenas::new();
// Construct the HIR map
- let hir_forest = &mut hir_forest;
let hir_map = time(sess.time_passes(),
"indexing hir",
- move || hir_map::map_crate(hir_forest, defs));
+ || hir_map::map_crate(&mut hir_forest, defs));
{
let _ignore = hir_map.dep_graph.in_ignore();
// For continuing compilation after a parsed crate has been
// modified
+pub struct ExpansionResult<'a> {
+ pub expanded_crate: ast::Crate,
+ pub defs: hir_map::Definitions,
+ pub analysis: ty::CrateAnalysis<'a>,
+ pub resolutions: Resolutions,
+ pub hir_forest: hir_map::Forest,
+}
+
/// Run the "early phases" of the compiler: initial `cfg` processing,
/// loading compiler plugins (including those from `addl_plugins`),
/// syntax expansion, secondary `cfg` expansion, synthesis of a test
-/// harness if one is to be provided and injection of a dependency on the
-/// standard library and prelude.
+/// harness if one is to be provided, injection of a dependency on the
+/// standard library and prelude, and name resolution.
///
/// Returns `None` if we're aborting after handling -W help.
-pub fn phase_2_configure_and_expand(sess: &Session,
- cstore: &CStore,
- mut krate: ast::Crate,
- crate_name: &str,
- addl_plugins: Option<Vec<String>>)
- -> Result<ast::Crate, usize> {
+pub fn phase_2_configure_and_expand<'a>(sess: &Session,
+ cstore: &CStore,
+ mut krate: ast::Crate,
+ crate_name: &'a str,
+ addl_plugins: Option<Vec<String>>,
+ make_glob_map: resolve::MakeGlobMap)
+ -> Result<ExpansionResult<'a>, usize> {
let time_passes = sess.time_passes();
// strip before anything else because crate metadata may use #[cfg_attr]
"prelude injection",
|| syntax::std_inject::maybe_inject_prelude(&sess.parse_sess, krate));
- time(time_passes,
- "checking that all macro invocations are gone",
- || syntax::ext::expand::check_for_macros(&sess.parse_sess, &krate));
-
time(time_passes,
"checking for inline asm in case the target doesn't support it",
|| no_asm::check_crate(sess, &krate));
println!("Post-expansion node count: {}", count_nodes(&krate));
}
- Ok(krate)
+ krate = assign_node_ids(sess, krate);
+
+ // Collect defintions for def ids.
+ let mut defs =
+ time(sess.time_passes(), "collecting defs", || hir_map::collect_definitions(&krate));
+
+ time(sess.time_passes(),
+ "external crate/lib resolution",
+ || read_local_crates(sess, &cstore, &defs, &krate, crate_name, &sess.dep_graph));
+
+ time(sess.time_passes(),
+ "early lint checks",
+ || lint::check_ast_crate(sess, &krate));
+
+ time(sess.time_passes(),
+ "AST validation",
+ || ast_validation::check_crate(sess, &krate));
+
+ let (analysis, resolutions, hir_forest) =
+ lower_and_resolve(sess, crate_name, &mut defs, &krate, &sess.dep_graph, make_glob_map);
+
+ // Discard MTWT tables that aren't required past lowering to HIR.
+ if !keep_mtwt_tables(sess) {
+ syntax::ext::mtwt::clear_tables();
+ }
+
+ Ok(ExpansionResult {
+ expanded_crate: krate,
+ defs: defs,
+ analysis: analysis,
+ resolutions: resolutions,
+ hir_forest: hir_forest
+ })
}
pub fn assign_node_ids(sess: &Session, krate: ast::Crate) -> ast::Crate {
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::infer::{self, InferOk, InferResult, TypeOrigin};
use rustc_metadata::cstore::CStore;
-use rustc_metadata::creader::read_local_crates;
use rustc::hir::map as hir_map;
use rustc::session::{self, config};
use std::rc::Rc;
input: source_string.to_string(),
};
let krate = driver::phase_1_parse_input(&sess, krate_config, &input).unwrap();
- let krate = driver::phase_2_configure_and_expand(&sess, &cstore, krate, "test", None)
- .expect("phase 2 aborted");
-
- let krate = driver::assign_node_ids(&sess, krate);
- let mut defs = hir_map::collect_definitions(&krate);
- read_local_crates(&sess, &cstore, &defs, &krate, "test_crate", &dep_graph);
+ let driver::ExpansionResult { defs, resolutions, mut hir_forest, .. } =
+ driver::phase_2_configure_and_expand(&sess, &cstore, krate, "test", None, MakeGlobMap::No)
+ .expect("phase 2 aborted");
let _ignore = dep_graph.in_ignore();
- let (_, resolutions, mut hir_forest) = {
- driver::lower_and_resolve(&sess, "test-crate", &mut defs, &krate,
- &sess.dep_graph, MakeGlobMap::No)
- };
-
let arenas = ty::CtxtArenas::new();
let ast_map = hir_map::map_crate(&mut hir_forest, defs);
}
};
- for &(_, source_def_id, source_dep_node) in sources {
+ for &(_, source_def_id, ref source_dep_node) in sources {
let dependents = query.transitive_successors(source_dep_node);
for &(target_span, ref target_pass, _, ref target_dep_node) in targets {
if !dependents.contains(&target_dep_node) {
{ // dump a .txt file with just the edges:
let txt_path = format!("{}.txt", path);
let mut file = File::create(&txt_path).unwrap();
- for &(source, target) in &edges {
+ for &(ref source, ref target) in &edges {
write!(file, "{:?} -> {:?}\n", source, target).unwrap();
}
}
}
}
-pub struct GraphvizDepGraph(FnvHashSet<DepNode<DefId>>,
- Vec<(DepNode<DefId>, DepNode<DefId>)>);
+pub struct GraphvizDepGraph<'q>(FnvHashSet<&'q DepNode<DefId>>,
+ Vec<(&'q DepNode<DefId>, &'q DepNode<DefId>)>);
-impl<'a, 'tcx> dot::GraphWalk<'a> for GraphvizDepGraph {
- type Node = DepNode<DefId>;
- type Edge = (DepNode<DefId>, DepNode<DefId>);
- fn nodes(&self) -> dot::Nodes<DepNode<DefId>> {
+impl<'a, 'tcx, 'q> dot::GraphWalk<'a> for GraphvizDepGraph<'q> {
+ type Node = &'q DepNode<DefId>;
+ type Edge = (&'q DepNode<DefId>, &'q DepNode<DefId>);
+ fn nodes(&self) -> dot::Nodes<&'q DepNode<DefId>> {
let nodes: Vec<_> = self.0.iter().cloned().collect();
nodes.into_cow()
}
- fn edges(&self) -> dot::Edges<(DepNode<DefId>, DepNode<DefId>)> {
+ fn edges(&self) -> dot::Edges<(&'q DepNode<DefId>, &'q DepNode<DefId>)> {
self.1[..].into_cow()
}
- fn source(&self, edge: &(DepNode<DefId>, DepNode<DefId>)) -> DepNode<DefId> {
+ fn source(&self, edge: &(&'q DepNode<DefId>, &'q DepNode<DefId>)) -> &'q DepNode<DefId> {
edge.0
}
- fn target(&self, edge: &(DepNode<DefId>, DepNode<DefId>)) -> DepNode<DefId> {
+ fn target(&self, edge: &(&'q DepNode<DefId>, &'q DepNode<DefId>)) -> &'q DepNode<DefId> {
edge.1
}
}
-impl<'a, 'tcx> dot::Labeller<'a> for GraphvizDepGraph {
- type Node = DepNode<DefId>;
- type Edge = (DepNode<DefId>, DepNode<DefId>);
+impl<'a, 'tcx, 'q> dot::Labeller<'a> for GraphvizDepGraph<'q> {
+ type Node = &'q DepNode<DefId>;
+ type Edge = (&'q DepNode<DefId>, &'q DepNode<DefId>);
fn graph_id(&self) -> dot::Id {
dot::Id::new("DependencyGraph").unwrap()
}
- fn node_id(&self, n: &DepNode<DefId>) -> dot::Id {
+ fn node_id(&self, n: &&'q DepNode<DefId>) -> dot::Id {
let s: String =
format!("{:?}", n).chars()
.map(|c| if c == '_' || c.is_alphanumeric() { c } else { '_' })
debug!("n={:?} s={:?}", n, s);
dot::Id::new(s).unwrap()
}
- fn node_label(&self, n: &DepNode<DefId>) -> dot::LabelText {
+ fn node_label(&self, n: &&'q DepNode<DefId>) -> dot::LabelText {
dot::LabelText::label(format!("{:?}", n))
}
}
// Given an optional filter like `"x,y,z"`, returns either `None` (no
// filter) or the set of nodes whose labels contain all of those
// substrings.
-fn node_set(query: &DepGraphQuery<DefId>, filter: &DepNodeFilter)
- -> Option<FnvHashSet<DepNode<DefId>>>
+fn node_set<'q>(query: &'q DepGraphQuery<DefId>, filter: &DepNodeFilter)
+ -> Option<FnvHashSet<&'q DepNode<DefId>>>
{
debug!("node_set(filter={:?})", filter);
Some(query.nodes().into_iter().filter(|n| filter.test(n)).collect())
}
-fn filter_nodes(query: &DepGraphQuery<DefId>,
- sources: &Option<FnvHashSet<DepNode<DefId>>>,
- targets: &Option<FnvHashSet<DepNode<DefId>>>)
- -> FnvHashSet<DepNode<DefId>>
+fn filter_nodes<'q>(query: &'q DepGraphQuery<DefId>,
+ sources: &Option<FnvHashSet<&'q DepNode<DefId>>>,
+ targets: &Option<FnvHashSet<&'q DepNode<DefId>>>)
+ -> FnvHashSet<&'q DepNode<DefId>>
{
if let &Some(ref sources) = sources {
if let &Some(ref targets) = targets {
}
}
-fn walk_nodes(query: &DepGraphQuery<DefId>,
- starts: &FnvHashSet<DepNode<DefId>>,
- direction: Direction)
- -> FnvHashSet<DepNode<DefId>>
+fn walk_nodes<'q>(query: &'q DepGraphQuery<DefId>,
+ starts: &FnvHashSet<&'q DepNode<DefId>>,
+ direction: Direction)
+ -> FnvHashSet<&'q DepNode<DefId>>
{
let mut set = FnvHashSet();
- for start in starts {
+ for &start in starts {
debug!("walk_nodes: start={:?} outgoing?={:?}", start, direction == OUTGOING);
- if set.insert(*start) {
+ if set.insert(start) {
let mut stack = vec![query.indices[start]];
while let Some(index) = stack.pop() {
for (_, edge) in query.graph.adjacent_edges(index, direction) {
let neighbor_index = edge.source_or_target(direction);
let neighbor = query.graph.node_data(neighbor_index);
- if set.insert(*neighbor) {
+ if set.insert(neighbor) {
stack.push(neighbor_index);
}
}
set
}
-fn walk_between(query: &DepGraphQuery<DefId>,
- sources: &FnvHashSet<DepNode<DefId>>,
- targets: &FnvHashSet<DepNode<DefId>>)
- -> FnvHashSet<DepNode<DefId>>
+fn walk_between<'q>(query: &'q DepGraphQuery<DefId>,
+ sources: &FnvHashSet<&'q DepNode<DefId>>,
+ targets: &FnvHashSet<&'q DepNode<DefId>>)
+ -> FnvHashSet<&'q DepNode<DefId>>
{
// This is a bit tricky. We want to include a node only if it is:
// (a) reachable from a source and (b) will reach a target. And we
let mut node_states = vec![State::Undecided; query.graph.len_nodes()];
for &target in targets {
- node_states[query.indices[&target].0] = State::Included;
+ node_states[query.indices[target].0] = State::Included;
}
- for source in sources.iter().map(|n| query.indices[n]) {
+ for source in sources.iter().map(|&n| query.indices[n]) {
recurse(query, &mut node_states, source);
}
return query.nodes()
.into_iter()
- .filter(|n| {
+ .filter(|&n| {
let index = query.indices[n];
node_states[index.0] == State::Included
})
}
}
-fn filter_edges(query: &DepGraphQuery<DefId>,
- nodes: &FnvHashSet<DepNode<DefId>>)
- -> Vec<(DepNode<DefId>, DepNode<DefId>)>
+fn filter_edges<'q>(query: &'q DepGraphQuery<DefId>,
+ nodes: &FnvHashSet<&'q DepNode<DefId>>)
+ -> Vec<(&'q DepNode<DefId>, &'q DepNode<DefId>)>
{
query.edges()
.into_iter()
- .filter(|&(source, target)| nodes.contains(&source) && nodes.contains(&target))
+ .filter(|&(source, target)| nodes.contains(source) && nodes.contains(target))
.collect()
}
self.ids[index.index as usize]
}
- pub fn map(&self, node: DepNode<DefPathIndex>) -> Option<DepNode<DefId>> {
+ pub fn map(&self, node: &DepNode<DefPathIndex>) -> Option<DepNode<DefId>> {
node.map_def(|&index| self.def_id(index))
}
}
.clone()
}
- pub fn map(&mut self, node: DepNode<DefId>) -> DepNode<DefPathIndex> {
+ pub fn map(&mut self, node: &DepNode<DefId>) -> DepNode<DefPathIndex> {
node.map_def(|&def_id| Some(self.add(def_id))).unwrap()
}
}
}
- pub fn hash(&mut self, dep_node: DepNode<DefId>) -> Option<u64> {
- match dep_node {
+ pub fn hash(&mut self, dep_node: &DepNode<DefId>) -> Option<u64> {
+ match *dep_node {
// HIR nodes (which always come from our crate) are an input:
DepNode::Hir(def_id) => {
assert!(def_id.is_local());
let clean_nodes =
serialized_dep_graph.nodes
.iter()
- .filter_map(|&node| retraced.map(node))
+ .filter_map(|node| retraced.map(node))
.filter(|node| !dirty_nodes.contains(node))
- .map(|node| (node, node));
+ .map(|node| (node.clone(), node));
// Add nodes and edges that are not dirty into our main graph.
let dep_graph = tcx.dep_graph.clone();
for (source, target) in clean_edges.into_iter().chain(clean_nodes) {
- let _task = dep_graph.in_task(target);
- dep_graph.read(source);
+ let _task = dep_graph.in_task(target.clone());
+ dep_graph.read(source.clone());
debug!("decode_dep_graph: clean edge: {:?} -> {:?}", source, target);
}
for hash in hashes {
match hash.node.map_def(|&i| retraced.def_id(i)) {
Some(dep_node) => {
- let current_hash = hcx.hash(dep_node).unwrap();
+ let current_hash = hcx.hash(&dep_node).unwrap();
debug!("initial_dirty_nodes: hash of {:?} is {:?}, was {:?}",
dep_node, current_hash, hash.hash);
if current_hash != hash.hash {
// target) if neither node has been removed. If the source has
// been removed, add target to the list of dirty nodes.
let mut clean_edges = Vec::with_capacity(serialized_edges.len());
- for &(serialized_source, serialized_target) in serialized_edges {
+ for &(ref serialized_source, ref serialized_target) in serialized_edges {
if let Some(target) = retraced.map(serialized_target) {
if let Some(source) = retraced.map(serialized_source) {
clean_edges.push((source, target))
query.nodes()
.into_iter()
.filter_map(|dep_node| {
- hcx.hash(dep_node)
+ hcx.hash(&dep_node)
.map(|hash| {
let node = builder.map(dep_node);
SerializedHash { node: node, hash: hash }
let meta_data_def_ids =
query.nodes()
.into_iter()
- .filter_map(|dep_node| match dep_node {
+ .filter_map(|dep_node| match *dep_node {
DepNode::MetaData(def_id) if def_id.is_local() => Some(def_id),
_ => None,
});
let dep_node = DepNode::MetaData(def_id);
let mut state = SipHasher::new();
debug!("save: computing metadata hash for {:?}", dep_node);
- for node in query.transitive_predecessors(dep_node) {
- if let Some(hash) = hcx.hash(node) {
+ for node in query.transitive_predecessors(&dep_node) {
+ if let Some(hash) = hcx.hash(&node) {
debug!("save: predecessor {:?} has hash {}", node, hash);
state.write_u64(hash.to_le());
} else {
}
let mut regions = subst::VecPerParamSpace::empty();
- for rp_doc in reader::tagged_docs(doc, tag_region_param_def) {
- let ident_str_doc = reader::get_doc(rp_doc,
- tag_region_param_def_ident);
- let name = item_name(&token::get_ident_interner(), ident_str_doc);
- let def_id_doc = reader::get_doc(rp_doc,
- tag_region_param_def_def_id);
- let def_id = translated_def_id(cdata, def_id_doc);
-
- let doc = reader::get_doc(rp_doc, tag_region_param_def_space);
- let space = subst::ParamSpace::from_uint(reader::doc_as_u64(doc) as usize);
-
- let doc = reader::get_doc(rp_doc, tag_region_param_def_index);
- let index = reader::doc_as_u64(doc) as u32;
-
- let bounds = reader::tagged_docs(rp_doc, tag_items_data_region).map(|p| {
+ for p in reader::tagged_docs(doc, tag_region_param_def) {
+ let bd =
TyDecoder::with_doc(tcx, cdata.cnum, p,
&mut |did| translate_def_id(cdata, did))
- .parse_region()
- }).collect();
-
- regions.push(space, ty::RegionParameterDef { name: name,
- def_id: def_id,
- space: space,
- index: index,
- bounds: bounds });
+ .parse_region_param_def();
+ regions.push(bd.space, bd);
}
ty::Generics { types: types, regions: regions }
rbml_w.end_tag();
}
-fn encode_region(ecx: &EncodeContext,
- rbml_w: &mut Encoder,
- r: ty::Region) {
- rbml_w.start_tag(tag_items_data_region);
- tyencode::enc_region(rbml_w.writer, &ecx.ty_str_ctxt(), r);
- rbml_w.mark_stable_position();
- rbml_w.end_tag();
-}
-
fn encode_disr_val(_: &EncodeContext,
rbml_w: &mut Encoder,
disr_val: ty::Disr) {
// Region parameters
for param in &generics.regions {
rbml_w.start_tag(tag_region_param_def);
-
- rbml_w.start_tag(tag_region_param_def_ident);
- encode_name(rbml_w, param.name);
- rbml_w.end_tag();
-
- rbml_w.wr_tagged_u64(tag_region_param_def_def_id,
- def_to_u64(param.def_id));
-
- rbml_w.wr_tagged_u64(tag_region_param_def_space,
- param.space.to_uint() as u64);
-
- rbml_w.wr_tagged_u64(tag_region_param_def_index,
- param.index as u64);
-
- for &bound_region in ¶m.bounds {
- encode_region(ecx, rbml_w, bound_region);
- }
-
+ tyencode::enc_region_param_def(rbml_w.writer, &ecx.ty_str_ctxt(), param);
+ rbml_w.mark_stable_position();
rbml_w.end_tag();
}
}
'[' => {
let def = self.parse_def();
- let name = token::intern(&self.parse_str(']'));
- ty::BrNamed(def, name)
+ let name = token::intern(&self.parse_str('|'));
+ let issue32330 = match self.next() {
+ 'n' => {
+ assert_eq!(self.next(), ']');
+ ty::Issue32330::WontChange
+ }
+ 'y' => {
+ ty::Issue32330::WillChange {
+ fn_def_id: self.parse_def(),
+ region_name: token::intern(&self.parse_str(']')),
+ }
+ }
+ c => panic!("expected n or y not {}", c)
+ };
+ ty::BrNamed(def, name, issue32330)
}
'f' => {
let id = self.parse_u32();
def_id: def_id,
space: space,
index: index,
- bounds: bounds
+ bounds: bounds,
}
}
ty::BrAnon(idx) => {
write!(w, "a{}|", idx);
}
- ty::BrNamed(d, name) => {
- write!(w, "[{}|{}]",
- (cx.ds)(cx.tcx, d),
- name);
+ ty::BrNamed(d, name, issue32330) => {
+ write!(w, "[{}|{}|",
+ (cx.ds)(cx.tcx, d),
+ name);
+
+ match issue32330 {
+ ty::Issue32330::WontChange =>
+ write!(w, "n]"),
+ ty::Issue32330::WillChange { fn_def_id, region_name } =>
+ write!(w, "y{}|{}]", (cx.ds)(cx.tcx, fn_def_id), region_name),
+ };
}
ty::BrFresh(id) => {
write!(w, "f{}|", id);
}
TestKind::SwitchInt { switch_ty, ref options, indices: _ } => {
- let otherwise = self.cfg.start_new_block();
- let targets: Vec<_> =
- options.iter()
- .map(|_| self.cfg.start_new_block())
- .chain(Some(otherwise))
- .collect();
+ let (targets, term) = match switch_ty.sty {
+ // If we're matching on boolean we can
+ // use the If TerminatorKind instead
+ ty::TyBool => {
+ assert!(options.len() > 0 && options.len() <= 2);
+
+ let (true_bb, else_bb) =
+ (self.cfg.start_new_block(),
+ self.cfg.start_new_block());
+
+ let targets = match &options[0] {
+ &ConstVal::Bool(true) => vec![true_bb, else_bb],
+ &ConstVal::Bool(false) => vec![else_bb, true_bb],
+ v => span_bug!(test.span, "expected boolean value but got {:?}", v)
+ };
+
+ (targets,
+ TerminatorKind::If {
+ cond: Operand::Consume(lvalue.clone()),
+ targets: (true_bb, else_bb)
+ })
+
+ }
+ _ => {
+ // The switch may be inexhaustive so we
+ // add a catch all block
+ let otherwise = self.cfg.start_new_block();
+ let targets: Vec<_> =
+ options.iter()
+ .map(|_| self.cfg.start_new_block())
+ .chain(Some(otherwise))
+ .collect();
+
+ (targets.clone(),
+ TerminatorKind::SwitchInt {
+ discr: lvalue.clone(),
+ switch_ty: switch_ty,
+ values: options.clone(),
+ targets: targets
+ })
+ }
+ };
+
self.cfg.terminate(block,
scope_id,
test.span,
- TerminatorKind::SwitchInt {
- discr: lvalue.clone(),
- switch_ty: switch_ty,
- values: options.clone(),
- targets: targets.clone(),
- });
+ term);
targets
}
message: &'a str,
context: UnresolvedNameContext<'a>,
is_static_method: bool,
- is_field: bool
+ is_field: bool,
+ def: Def,
},
/// error E0426: use of undeclared label
UndeclaredLabel(&'a str),
argument is missing?")
}
ResolutionError::UnresolvedName { path, message: msg, context, is_static_method,
- is_field } => {
+ is_field, def } => {
let mut err = struct_span_err!(resolver.session,
span,
E0425,
UnresolvedNameContext::PathIsMod(parent) => {
err.help(&match parent.map(|parent| &parent.node) {
Some(&ExprKind::Field(_, ident)) => {
- format!("To reference an item from the `{module}` module, \
+ format!("to reference an item from the `{module}` module, \
use `{module}::{ident}`",
module = path,
ident = ident.node)
}
Some(&ExprKind::MethodCall(ident, _, _)) => {
- format!("To call a function from the `{module}` module, \
+ format!("to call a function from the `{module}` module, \
use `{module}::{ident}(..)`",
module = path,
ident = ident.node)
}
_ => {
- format!("Module `{module}` cannot be used as an expression",
+ format!("{def} `{module}` cannot be used as an expression",
+ def = def.kind_name(),
module = path)
}
});
message: "",
context: UnresolvedNameContext::Other,
is_static_method: false,
- is_field: false
+ is_field: false,
+ def: Def::Err,
};
resolve_error(self, path.span, error);
Def::Err
};
let mut context = UnresolvedNameContext::Other;
+ let mut def = Def::Err;
if !msg.is_empty() {
msg = format!(". Did you mean {}?", msg);
} else {
match self.resolve_module_path(&name_path[..],
UseLexicalScope,
expr.span) {
- Success(_) => {
+ Success(e) => {
+ if let Some(def_type) = e.def {
+ def = def_type;
+ }
context = UnresolvedNameContext::PathIsMod(parent);
},
_ => {},
context: context,
is_static_method: method_scope && is_static,
is_field: is_field,
+ def: def,
});
}
}
let trait_ref = tcx.erase_regions(&trait_ref);
scx.trait_cache().memoize(trait_ref, || {
- debug!("trans fulfill_obligation: trait_ref={:?} def_id={:?}",
+ debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
trait_ref, trait_ref.def_id());
// Do the initial selection for the obligation. This yields the
}
};
+ debug!("fulfill_obligation: selection={:?}", selection);
+
// Currently, we use a fulfillment context to completely resolve
// all nested obligations. This is because they can inform the
// inference of the impl's type parameters.
let mut fulfill_cx = traits::FulfillmentContext::new();
let vtable = selection.map(|predicate| {
+ debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate);
fulfill_cx.register_predicate_obligation(&infcx, predicate);
});
let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
let new_sty = match ty.sty {
TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
+ "16" => TyInt(I16),
"32" => TyInt(I32),
"64" => TyInt(I64),
_ => bug!("unsupported target word size")
},
TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
+ "16" => TyUint(U16),
"32" => TyUint(U32),
"64" => TyUint(U64),
_ => bug!("unsupported target word size")
TyInt(t) => Some((match t {
ast::IntTy::Is => {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
+ "16" => 16,
"32" => 32,
"64" => 64,
tws => bug!("Unsupported target word size for isize: {}", tws),
TyUint(t) => Some((match t {
ast::UintTy::Us => {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
+ "16" => 16,
"32" => 32,
"64" => 64,
tws => bug!("Unsupported target word size for usize: {}", tws),
pub fn int(ccx: &CrateContext) -> Type {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
+ "16" => Type::i16(ccx),
"32" => Type::i32(ccx),
"64" => Type::i64(ccx),
tws => bug!("Unsupported target word size for int: {}", tws),
pub fn ast_region_to_region(tcx: TyCtxt, lifetime: &hir::Lifetime)
-> ty::Region {
- let r = match tcx.named_region_map.get(&lifetime.id) {
+ let r = match tcx.named_region_map.defs.get(&lifetime.id) {
None => {
// should have been recorded by the `resolve_lifetime` pass
span_bug!(lifetime.span, "unresolved lifetime");
}
Some(&rl::DefLateBoundRegion(debruijn, id)) => {
- ty::ReLateBound(debruijn, ty::BrNamed(tcx.map.local_def_id(id), lifetime.name))
+ // If this region is declared on a function, it will have
+ // an entry in `late_bound`, but if it comes from
+ // `for<'a>` in some type or something, it won't
+ // necessarily have one. In that case though, we won't be
+ // changed from late to early bound, so we can just
+ // substitute false.
+ let issue_32330 = tcx.named_region_map
+ .late_bound
+ .get(&id)
+ .cloned()
+ .unwrap_or(ty::Issue32330::WontChange);
+ ty::ReLateBound(debruijn, ty::BrNamed(tcx.map.local_def_id(id),
+ lifetime.name,
+ issue_32330))
}
Some(&rl::DefEarlyBoundRegion(space, index, _)) => {
}
Some(&rl::DefFreeRegion(scope, id)) => {
+ // As in DefLateBoundRegion above, could be missing for some late-bound
+ // regions, but also for early-bound regions.
+ let issue_32330 = tcx.named_region_map
+ .late_bound
+ .get(&id)
+ .cloned()
+ .unwrap_or(ty::Issue32330::WontChange);
ty::ReFree(ty::FreeRegion {
scope: scope.to_code_extent(&tcx.region_maps),
bound_region: ty::BrNamed(tcx.map.local_def_id(id),
- lifetime.name)
- })
+ lifetime.name,
+ issue_32330)
+ })
+
+ // (*) -- not late-bound, won't change
}
};
debug!("late_bound_in_ty = {:?}", late_bound_in_ty);
for br in late_bound_in_ty.difference(&late_bound_in_trait_ref) {
let br_name = match *br {
- ty::BrNamed(_, name) => name,
+ ty::BrNamed(_, name, _) => name,
_ => {
span_bug!(
binding.span,
let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output);
for br in late_bound_in_ret.difference(&late_bound_in_args) {
let br_name = match *br {
- ty::BrNamed(_, name) => name,
+ ty::BrNamed(_, name, _) => name,
_ => {
span_bug!(
bf.decl.output.span(),
use hir::def_id::DefId;
use constrained_type_params as ctp;
use middle::lang_items::SizedTraitLangItem;
-use middle::resolve_lifetime;
use middle::const_val::ConstVal;
use rustc_const_eval::EvalHint::UncheckedExprHint;
use rustc_const_eval::{eval_const_expr_partial, ConstEvalErr};
/// the lifetimes that are declared. For fns or methods, we have to
/// screen out those that do not appear in any where-clauses etc using
/// `resolve_lifetime::early_bound_lifetimes`.
-fn early_bound_lifetimes_from_generics(space: ParamSpace,
- ast_generics: &hir::Generics)
- -> Vec<hir::LifetimeDef>
+fn early_bound_lifetimes_from_generics<'a, 'tcx, 'hir>(
+ ccx: &CrateCtxt<'a, 'tcx>,
+ ast_generics: &'hir hir::Generics)
+ -> Vec<&'hir hir::LifetimeDef>
{
- match space {
- SelfSpace | TypeSpace => ast_generics.lifetimes.to_vec(),
- FnSpace => resolve_lifetime::early_bound_lifetimes(ast_generics),
- }
+ ast_generics
+ .lifetimes
+ .iter()
+ .filter(|l| !ccx.tcx.named_region_map.late_bound.contains_key(&l.lifetime.id))
+ .collect()
}
fn ty_generic_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
// Collect the region predicates that were declared inline as
// well. In the case of parameters declared on a fn or method, we
// have to be careful to only iterate over early-bound regions.
- let early_lifetimes = early_bound_lifetimes_from_generics(space, ast_generics);
+ let early_lifetimes = early_bound_lifetimes_from_generics(ccx, ast_generics);
for (index, param) in early_lifetimes.iter().enumerate() {
let index = index as u32;
let region =
let tcx = ccx.tcx;
let mut result = base_generics.clone();
- let early_lifetimes = early_bound_lifetimes_from_generics(space, ast_generics);
+ let early_lifetimes = early_bound_lifetimes_from_generics(ccx, ast_generics);
for (i, l) in early_lifetimes.iter().enumerate() {
let bounds = l.bounds.iter()
.map(|l| ast_region_to_region(tcx, l))
// the trait
fn foo(&self) {}
}
+```
"##,
E0186: r##"
fn find_binding_for_lifetime(&self, param_id: ast::NodeId) -> ast::NodeId {
let tcx = self.terms_cx.tcx;
assert!(is_lifetime(&tcx.map, param_id));
- match tcx.named_region_map.get(¶m_id) {
+ match tcx.named_region_map.defs.get(¶m_id) {
Some(&rl::DefEarlyBoundRegion(_, _, lifetime_decl_id))
=> lifetime_decl_id,
Some(_) => bug!("should not encounter non early-bound cases"),
fn clean(&self, cx: &DocContext) -> Option<Lifetime> {
match *self {
ty::ReStatic => Some(Lifetime::statik()),
- ty::ReLateBound(_, ty::BrNamed(_, name)) => Some(Lifetime(name.to_string())),
+ ty::ReLateBound(_, ty::BrNamed(_, name, _)) => Some(Lifetime(name.to_string())),
ty::ReEarlyBound(ref data) => Some(Lifetime(data.name.clean(cx))),
ty::ReLateBound(..) |
use rustc_trans::back::link;
use rustc_resolve as resolve;
use rustc_metadata::cstore::CStore;
-use rustc_metadata::creader::read_local_crates;
use syntax::{ast, codemap, errors};
use syntax::errors::emitter::ColorConfig;
let krate = panictry!(driver::phase_1_parse_input(&sess, cfg, &input));
- let name = link::find_crate_name(Some(&sess), &krate.attrs,
- &input);
+ let name = link::find_crate_name(Some(&sess), &krate.attrs, &input);
- let krate = driver::phase_2_configure_and_expand(&sess, &cstore, krate, &name, None)
- .expect("phase_2_configure_and_expand aborted in rustdoc!");
-
- let krate = driver::assign_node_ids(&sess, krate);
-
- let mut defs = hir_map::collect_definitions(&krate);
- read_local_crates(&sess, &cstore, &defs, &krate, &name, &dep_graph);
-
- // Lower ast -> hir and resolve.
- let (analysis, resolutions, mut hir_forest) = {
- driver::lower_and_resolve(&sess, &name, &mut defs, &krate,
- &sess.dep_graph, resolve::MakeGlobMap::No)
+ let driver::ExpansionResult { defs, analysis, resolutions, mut hir_forest, .. } = {
+ let make_glob_map = resolve::MakeGlobMap::No;
+ driver::phase_2_configure_and_expand(&sess, &cstore, krate, &name, None, make_glob_map)
+ .expect("phase_2_configure_and_expand aborted in rustdoc!")
};
let arenas = ty::CtxtArenas::new();
use rustc::session::{self, config};
use rustc::session::config::{get_unstable_features_setting, OutputType};
use rustc::session::search_paths::{SearchPaths, PathKind};
-use rustc::hir::lowering::{lower_crate, DummyResolver};
use rustc_back::dynamic_lib::DynamicLibrary;
use rustc_back::tempdir::TempDir;
use rustc_driver::{driver, Compilation};
+use rustc_driver::driver::phase_2_configure_and_expand;
use rustc_metadata::cstore::CStore;
+use rustc_resolve::MakeGlobMap;
use syntax::codemap::CodeMap;
use syntax::errors;
use syntax::errors::emitter::ColorConfig;
let mut cfg = config::build_configuration(&sess);
cfg.extend(config::parse_cfgspecs(cfgs.clone()));
let krate = panictry!(driver::phase_1_parse_input(&sess, cfg, &input));
- let krate = driver::phase_2_configure_and_expand(&sess, &cstore, krate,
- "rustdoc-test", None)
- .expect("phase_2_configure_and_expand aborted in rustdoc!");
- let krate = driver::assign_node_ids(&sess, krate);
- let dep_graph = DepGraph::new(false);
- let defs = hir_map::collect_definitions(&krate);
-
- let mut dummy_resolver = DummyResolver;
- let krate = lower_crate(&sess, &krate, &sess, &mut dummy_resolver);
-
- let opts = scrape_test_config(&krate);
+ let driver::ExpansionResult { defs, mut hir_forest, .. } = {
+ let make_glob_map = MakeGlobMap::No;
+ phase_2_configure_and_expand(&sess, &cstore, krate, "rustdoc-test", None, make_glob_map)
+ .expect("phase_2_configure_and_expand aborted in rustdoc!")
+ };
+ let dep_graph = DepGraph::new(false);
+ let opts = scrape_test_config(hir_forest.krate());
let _ignore = dep_graph.in_ignore();
- let mut forest = hir_map::Forest::new(krate, &dep_graph);
- let map = hir_map::map_crate(&mut forest, defs);
+ let map = hir_map::map_crate(&mut hir_forest, defs);
let ctx = core::DocContext {
map: &map,
/// it was opened with. Files also implement `Seek` to alter the logical cursor
/// that the file contains internally.
///
+/// Files are automatically closed when they go out of scope.
+///
/// # Examples
///
/// ```no_run
/// if dir.is_dir() {
/// for entry in try!(fs::read_dir(dir)) {
/// let entry = try!(entry);
-/// if try!(entry.file_type()).is_dir() {
-/// try!(visit_dirs(&entry.path(), cb));
+/// let path = entry.path();
+/// if path.is_dir() {
+/// try!(visit_dirs(&path, cb));
/// } else {
/// cb(&entry);
/// }
#[stable(feature = "panic_hooks", since = "1.10.0")]
pub use panicking::{take_hook, set_hook, PanicInfo, Location};
-///
-#[rustc_deprecated(since = "1.9.0", reason = "renamed to set_hook")]
-#[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
-pub fn set_handler<F>(handler: F) where F: Fn(&PanicInfo) + 'static + Sync + Send {
- set_hook(Box::new(handler))
-}
-
-///
-#[rustc_deprecated(since = "1.9.0", reason = "renamed to take_hook")]
-#[unstable(feature = "panic_handler", reason = "awaiting feedback", issue = "30449")]
-pub fn take_handler() -> Box<Fn(&PanicInfo) + 'static + Sync + Send> {
- take_hook()
-}
-
-/// A marker trait which represents "unwind safe" types in Rust.
+/// A marker trait which represents "panic safe" types in Rust.
///
/// This trait is implemented by default for many types and behaves similarly in
/// terms of inference of implementation to the `Send` and `Sync` traits. The
across an unwind boundary"]
pub trait UnwindSafe {}
-/// Deprecated, renamed to UnwindSafe
-#[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
-#[rustc_deprecated(reason = "renamed to `UnwindSafe`", since = "1.9.0")]
-pub trait RecoverSafe {}
-#[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
-#[allow(deprecated)]
-impl<T: UnwindSafe> RecoverSafe for T {}
-
/// A marker trait representing types where a shared reference is considered
/// unwind safe.
///
pub T
);
-/// Deprecated, renamed to `AssertUnwindSafe`
-#[unstable(feature = "recover", issue = "27719")]
-#[rustc_deprecated(reason = "renamed to `AssertUnwindSafe`", since = "1.9.0")]
-pub struct AssertRecoverSafe<T>(pub T);
-
// Implementations of the `UnwindSafe` trait:
//
// * By default everything is unwind safe
impl<T: ?Sized> UnwindSafe for RwLock<T> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
impl<T> UnwindSafe for AssertUnwindSafe<T> {}
-#[unstable(feature = "recover", issue = "27719")]
-#[allow(deprecated)]
-impl<T> UnwindSafe for AssertRecoverSafe<T> {}
// not covered via the Shared impl above b/c the inner contents use
// Cell/AtomicUsize, but the usage here is unwind safe so we can lift the
impl<T: ?Sized> !RefUnwindSafe for UnsafeCell<T> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
impl<T> RefUnwindSafe for AssertUnwindSafe<T> {}
-#[unstable(feature = "recover", issue = "27719")]
-#[allow(deprecated)]
-impl<T> RefUnwindSafe for AssertRecoverSafe<T> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
impl<T> Deref for AssertUnwindSafe<T> {
}
}
-#[allow(deprecated)]
-impl<T> AssertRecoverSafe<T> {
- /// Creates a new `AssertRecoverSafe` wrapper around the provided type.
- #[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
- #[rustc_deprecated(reason = "the type's field is now public, construct it directly",
- since = "1.9.0")]
- pub fn new(t: T) -> AssertRecoverSafe<T> {
- AssertRecoverSafe(t)
- }
-
- /// Consumes the `AssertRecoverSafe`, returning the wrapped value.
- #[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
- #[rustc_deprecated(reason = "the type's field is now public, access it directly",
- since = "1.9.0")]
- pub fn into_inner(self) -> T {
- self.0
- }
-}
-
-#[unstable(feature = "recover", issue = "27719")]
-#[allow(deprecated)]
-impl<T> Deref for AssertRecoverSafe<T> {
- type Target = T;
-
- fn deref(&self) -> &T {
- &self.0
- }
-}
-
-#[unstable(feature = "recover", issue = "27719")]
-#[allow(deprecated)]
-impl<T> DerefMut for AssertRecoverSafe<T> {
- fn deref_mut(&mut self) -> &mut T {
- &mut self.0
- }
-}
-
-#[unstable(feature = "recover", issue = "27719")]
-#[allow(deprecated)]
-impl<R, F: FnOnce() -> R> FnOnce<()> for AssertRecoverSafe<F> {
- type Output = R;
-
- extern "rust-call" fn call_once(self, _args: ()) -> R {
- (self.0)()
- }
-}
-
/// Invokes a closure, capturing the cause of an unwinding panic if one occurs.
///
/// This function will return `Ok` with the closure's result if the closure
}
}
-/// Deprecated, renamed to `catch_unwind`
-#[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
-#[rustc_deprecated(reason = "renamed to `catch_unwind`", since = "1.9.0")]
-pub fn recover<F: FnOnce() -> R + UnwindSafe, R>(f: F) -> Result<R> {
- catch_unwind(f)
-}
-
/// Triggers a panic without invoking the panic hook.
///
/// This is designed to be used in conjunction with `catch_unwind` to, for
pub fn resume_unwind(payload: Box<Any + Send>) -> ! {
panicking::rust_panic(payload)
}
-
-/// Deprecated, use resume_unwind instead
-#[unstable(feature = "panic_propagate", reason = "awaiting feedback", issue = "30752")]
-#[rustc_deprecated(reason = "renamed to `resume_unwind`", since = "1.9.0")]
-pub fn propagate(payload: Box<Any + Send>) -> ! {
- resume_unwind(payload)
-}
/// ```
///
/// [`assert!`]: macro.assert!.html
-/// [`if` conditionals]: ../book/if.html
+/// [`if`]: ../book/if.html
/// [`BitAnd`]: ops/trait.BitAnd.html
/// [`BitOr`]: ops/trait.BitOr.html
/// [`Not`]: ops/trait.Not.html
/// .arg("-c")
/// .arg("echo hello")
/// .output()
-/// .expect("failed to execute proces");
+/// .expect("failed to execute process");
///
/// let hello = output.stdout;
/// ```
// created. Note that this isn't necessary in general for new threads,
// but we just do this to name the main thread and to give it correct
// info about the stack bounds.
- let thread: Thread = NewThread::new(Some("<main>".to_owned()));
+ let thread: Thread = NewThread::new(Some("main".to_owned()));
thread_info::set(main_guard, thread);
// Store our args if necessary in a squirreled away location
/// Creates a new mutex in an unlocked state ready for use.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(t: T) -> Mutex<T> {
- Mutex {
+ let mut m = Mutex {
inner: box StaticMutex::new(),
data: UnsafeCell::new(t),
+ };
+ unsafe {
+ m.inner.lock.init();
}
+ m
}
}
}
})
}
-
- /// Transform this guard to hold a sub-borrow of the original data.
- ///
- /// Applies the supplied closure to the data, returning a new lock
- /// guard referencing the borrow returned by the closure.
- ///
- /// # Examples
- ///
- /// ```rust
- /// # #![feature(guard_map)]
- /// # use std::sync::{RwLockReadGuard, RwLock};
- /// let x = RwLock::new(vec![1, 2]);
- ///
- /// let y = RwLockReadGuard::map(x.read().unwrap(), |v| &v[0]);
- /// assert_eq!(*y, 1);
- /// ```
- #[unstable(feature = "guard_map",
- reason = "recently added, needs RFC for stabilization,
- questionable interaction with Condvar",
- issue = "27746")]
- #[rustc_deprecated(since = "1.8.0",
- reason = "unsound on Mutex because of Condvar and \
- RwLock may also with to be used with Condvar \
- one day")]
- pub fn map<U: ?Sized, F>(this: Self, cb: F) -> RwLockReadGuard<'rwlock, U>
- where F: FnOnce(&T) -> &U
- {
- let new = RwLockReadGuard {
- __lock: this.__lock,
- __data: cb(this.__data)
- };
-
- mem::forget(this);
-
- new
- }
}
#[allow(deprecated)]
}
})
}
-
- /// Transform this guard to hold a sub-borrow of the original data.
- ///
- /// Applies the supplied closure to the data, returning a new lock
- /// guard referencing the borrow returned by the closure.
- ///
- /// # Examples
- ///
- /// ```rust
- /// # #![feature(guard_map)]
- /// # use std::sync::{RwLockWriteGuard, RwLock};
- /// let x = RwLock::new(vec![1, 2]);
- ///
- /// {
- /// let mut y = RwLockWriteGuard::map(x.write().unwrap(), |v| &mut v[0]);
- /// assert_eq!(*y, 1);
- ///
- /// *y = 10;
- /// }
- ///
- /// assert_eq!(&**x.read().unwrap(), &[10, 2]);
- /// ```
- #[unstable(feature = "guard_map",
- reason = "recently added, needs RFC for stabilization,
- questionable interaction with Condvar",
- issue = "27746")]
- #[rustc_deprecated(since = "1.8.0",
- reason = "unsound on Mutex because of Condvar and \
- RwLock may also with to be used with Condvar \
- one day")]
- pub fn map<U: ?Sized, F>(this: Self, cb: F) -> RwLockWriteGuard<'rwlock, U>
- where F: FnOnce(&mut T) -> &mut U
- {
- // Compute the new data while still owning the original lock
- // in order to correctly poison if the callback panics.
- let data = unsafe { ptr::read(&this.__data) };
- let new_data = cb(data);
-
- // We don't want to unlock the lock by running the destructor of the
- // original lock, so just read the fields we need and forget it.
- let (poison, lock) = unsafe {
- (ptr::read(&this.__poison), ptr::read(&this.__lock))
- };
- mem::forget(this);
-
- RwLockWriteGuard {
- __lock: lock,
- __data: new_data,
- __poison: poison
- }
- }
}
#[stable(feature = "rust1", since = "1.0.0")]
use rand::{self, Rng};
use sync::mpsc::channel;
use thread;
- use sync::{Arc, RwLock, StaticRwLock, TryLockError, RwLockWriteGuard};
+ use sync::{Arc, RwLock, StaticRwLock, TryLockError};
use sync::atomic::{AtomicUsize, Ordering};
#[derive(Eq, PartialEq, Debug)]
Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x),
}
}
-
- #[test]
- fn test_rwlock_write_map_poison() {
- let rwlock = Arc::new(RwLock::new(vec![1, 2]));
- let rwlock2 = rwlock.clone();
-
- thread::spawn(move || {
- let _ = RwLockWriteGuard::map::<usize, _>(rwlock2.write().unwrap(), |_| panic!());
- }).join().unwrap_err();
-
- match rwlock.read() {
- Ok(r) => panic!("Read lock on poisioned RwLock is Ok: {:?}", &*r),
- Err(_) => {}
- };
- }
}
-
/// first used with any of the functions below.
pub const fn new() -> Mutex { Mutex(imp::Mutex::new()) }
+ /// Prepare the mutex for use.
+ ///
+ /// This should be called once the mutex is at a stable memory address.
+ #[inline]
+ pub unsafe fn init(&mut self) { self.0.init() }
+
/// Locks the mutex blocking the current thread until it is available.
///
/// Behavior is undefined if the mutex has been moved between this and any
// implemented as an illegal instruction.
#[cfg(unix)]
unsafe fn abort_internal() -> ! {
- use libc;
- libc::abort()
+ ::libc::abort()
}
-// On Windows, we want to avoid using libc, and there isn't a direct
-// equivalent of libc::abort. The __failfast intrinsic may be a reasonable
-// substitute, but desireability of using it over the abort instrinsic is
-// debateable; see https://github.com/rust-lang/rust/pull/31519 for details.
-#[cfg(not(unix))]
+// On Windows, use the processor-specific __fastfail mechanism. In Windows 8
+// and later, this will terminate the process immediately without running any
+// in-process exception handlers. In earlier versions of Windows, this
+// sequence of instructions will be treated as an access violation,
+// terminating the process but without necessarily bypassing all exception
+// handlers.
+//
+// https://msdn.microsoft.com/en-us/library/dn774154.aspx
+#[cfg(all(windows, any(target_arch = "x86", target_arch = "x86_64")))]
unsafe fn abort_internal() -> ! {
- use intrinsics;
- intrinsics::abort()
+ asm!("int $$0x29" :: "{ecx}"(7) ::: volatile); // 7 is FAST_FAIL_FATAL_APP_EXIT
+ ::intrinsics::unreachable();
}
+// Other platforms should use the appropriate platform-specific mechanism for
+// aborting the process. If no platform-specific mechanism is available,
+// ::intrinsics::abort() may be used instead. The above implementations cover
+// all targets currently supported by libstd.
+
pub fn abort(args: fmt::Arguments) -> ! {
dumb_print(format_args!("fatal runtime error: {}\n", args));
unsafe { abort_internal(); }
#[stable(feature = "rust1", since = "1.0.0")]
fn gid(&mut self, id: u32) -> &mut process::Command;
- /// Create a new session (cf. `setsid(2)`) for the child process. This means
- /// that the child is the leader of a new process group. The parent process
- /// remains the child reaper of the new process.
- ///
- /// This is not enough to create a daemon process. The *init* process should
- /// be the child reaper of a daemon. This can be achieved if the parent
- /// process exit. Moreover, a daemon should not have a controlling terminal.
- /// To achieve this, a session leader (the child) must spawn another process
- /// (the daemon) in the same session.
- #[unstable(feature = "process_session_leader", reason = "recently added",
- issue = "27811")]
- #[rustc_deprecated(reason = "use `before_exec` instead",
- since = "1.9.0")]
- fn session_leader(&mut self, on: bool) -> &mut process::Command;
-
/// Schedules a closure to be run just before the `exec` function is
/// invoked.
///
self
}
- fn session_leader(&mut self, on: bool) -> &mut process::Command {
- self.as_inner_mut().session_leader(on);
- self
- }
-
fn before_exec<F>(&mut self, f: F) -> &mut process::Command
where F: FnMut() -> io::Result<()> + Send + Sync + 'static
{
Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
}
#[inline]
+ pub unsafe fn init(&mut self) {
+ // Issue #33770
+ //
+ // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
+ // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you
+ // try to re-lock it from the same thread when you already hold a lock.
+ //
+ // In practice, glibc takes advantage of this undefined behavior to
+ // implement hardware lock elision, which uses hardware transactional
+ // memory to avoid acquiring the lock. While a transaction is in
+ // progress, the lock appears to be unlocked. This isn't a problem for
+ // other threads since the transactional memory will abort if a conflict
+ // is detected, however no abort is generated if re-locking from the
+ // same thread.
+ //
+ // Since locking the same mutex twice will result in two aliasing &mut
+ // references, we instead create the mutex with type
+ // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
+ // re-lock it from the same thread, thus avoiding undefined behavior.
+ //
+ // We can't do anything for StaticMutex, but that type is deprecated
+ // anyways.
+ let mut attr: libc::pthread_mutexattr_t = mem::uninitialized();
+ let r = libc::pthread_mutexattr_init(&mut attr);
+ debug_assert_eq!(r, 0);
+ let r = libc::pthread_mutexattr_settype(&mut attr, libc::PTHREAD_MUTEX_NORMAL);
+ debug_assert_eq!(r, 0);
+ let r = libc::pthread_mutex_init(self.inner.get(), &attr);
+ debug_assert_eq!(r, 0);
+ let r = libc::pthread_mutexattr_destroy(&mut attr);
+ debug_assert_eq!(r, 0);
+ }
+ #[inline]
pub unsafe fn lock(&self) {
let r = libc::pthread_mutex_lock(self.inner.get());
debug_assert_eq!(r, 0);
cwd: Option<CString>,
uid: Option<uid_t>,
gid: Option<gid_t>,
- session_leader: bool,
saw_nul: bool,
closures: Vec<Box<FnMut() -> io::Result<()> + Send + Sync>>,
stdin: Option<Stdio>,
cwd: None,
uid: None,
gid: None,
- session_leader: false,
saw_nul: saw_nul,
closures: Vec::new(),
stdin: None,
pub fn gid(&mut self, id: gid_t) {
self.gid = Some(id);
}
- pub fn session_leader(&mut self, session_leader: bool) {
- self.session_leader = session_leader;
- }
pub fn before_exec(&mut self,
f: Box<FnMut() -> io::Result<()> + Send + Sync>) {
t!(cvt(libc::setuid(u as uid_t)));
}
- if self.session_leader {
- // Don't check the error of setsid because it fails if we're the
- // process leader already. We just forked so it shouldn't return
- // error, but ignore it anyway.
- let _ = libc::setsid();
- }
if let Some(ref cwd) = self.cwd {
t!(cvt(libc::chdir(cwd.as_ptr())));
}
use libc;
use cell::UnsafeCell;
+use sync::atomic::{AtomicUsize, Ordering};
-pub struct RWLock { inner: UnsafeCell<libc::pthread_rwlock_t> }
+pub struct RWLock {
+ inner: UnsafeCell<libc::pthread_rwlock_t>,
+ write_locked: UnsafeCell<bool>,
+ num_readers: AtomicUsize,
+}
unsafe impl Send for RWLock {}
unsafe impl Sync for RWLock {}
impl RWLock {
pub const fn new() -> RWLock {
- RWLock { inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER) }
+ RWLock {
+ inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
+ write_locked: UnsafeCell::new(false),
+ num_readers: AtomicUsize::new(0),
+ }
}
#[inline]
pub unsafe fn read(&self) {
//
// We roughly maintain the deadlocking behavior by panicking to ensure
// that this lock acquisition does not succeed.
- if r == libc::EDEADLK {
+ //
+ // We also check whether there this lock is already write locked. This
+ // is only possible if it was write locked by the current thread and
+ // the implementation allows recursive locking. The POSIX standard
+ // doesn't require recursivly locking a rwlock to deadlock, but we can't
+ // allow that because it could lead to aliasing issues.
+ if r == libc::EDEADLK || *self.write_locked.get() {
+ if r == 0 {
+ self.raw_unlock();
+ }
panic!("rwlock read lock would result in deadlock");
} else {
debug_assert_eq!(r, 0);
+ self.num_readers.fetch_add(1, Ordering::Relaxed);
}
}
#[inline]
pub unsafe fn try_read(&self) -> bool {
- libc::pthread_rwlock_tryrdlock(self.inner.get()) == 0
+ let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
+ if r == 0 {
+ if *self.write_locked.get() {
+ self.raw_unlock();
+ false
+ } else {
+ self.num_readers.fetch_add(1, Ordering::Relaxed);
+ true
+ }
+ } else {
+ false
+ }
}
#[inline]
pub unsafe fn write(&self) {
let r = libc::pthread_rwlock_wrlock(self.inner.get());
- // see comments above for why we check for EDEADLK
- if r == libc::EDEADLK {
+ // See comments above for why we check for EDEADLK and write_locked. We
+ // also need to check that num_readers is 0.
+ if r == libc::EDEADLK || *self.write_locked.get() ||
+ self.num_readers.load(Ordering::Relaxed) != 0 {
+ if r == 0 {
+ self.raw_unlock();
+ }
panic!("rwlock write lock would result in deadlock");
} else {
debug_assert_eq!(r, 0);
}
+ *self.write_locked.get() = true;
}
#[inline]
pub unsafe fn try_write(&self) -> bool {
- libc::pthread_rwlock_trywrlock(self.inner.get()) == 0
+ let r = libc::pthread_rwlock_trywrlock(self.inner.get());
+ if r == 0 {
+ if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
+ self.raw_unlock();
+ false
+ } else {
+ *self.write_locked.get() = true;
+ true
+ }
+ } else {
+ false
+ }
}
#[inline]
- pub unsafe fn read_unlock(&self) {
+ unsafe fn raw_unlock(&self) {
let r = libc::pthread_rwlock_unlock(self.inner.get());
debug_assert_eq!(r, 0);
}
#[inline]
- pub unsafe fn write_unlock(&self) { self.read_unlock() }
+ pub unsafe fn read_unlock(&self) {
+ debug_assert!(!*self.write_locked.get());
+ self.num_readers.fetch_sub(1, Ordering::Relaxed);
+ self.raw_unlock();
+ }
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
+ debug_assert!(*self.write_locked.get());
+ *self.write_locked.get() = false;
+ self.raw_unlock();
+ }
#[inline]
pub unsafe fn destroy(&self) {
let r = libc::pthread_rwlock_destroy(self.inner.get());
held: UnsafeCell::new(false),
}
}
+ #[inline]
+ pub unsafe fn init(&mut self) {}
pub unsafe fn lock(&self) {
match kind() {
Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
////////////////////////////////////////////////////////////////////////////////
#[macro_use] mod local;
-#[macro_use] mod scoped_tls;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::local::{LocalKey, LocalKeyState};
-#[unstable(feature = "scoped_tls",
- reason = "scoped TLS has yet to have wide enough use to fully \
- consider stabilizing its interface",
- issue = "27715")]
-#[allow(deprecated)]
-pub use self::scoped_tls::ScopedKey;
-
#[unstable(feature = "libstd_thread_internals", issue = "0")]
#[cfg(target_thread_local)]
#[doc(hidden)] pub use self::local::elf::Key as __ElfLocalKeyInner;
#[unstable(feature = "libstd_thread_internals", issue = "0")]
#[doc(hidden)] pub use self::local::os::Key as __OsLocalKeyInner;
-#[unstable(feature = "libstd_thread_internals", issue = "0")]
-#[doc(hidden)] pub use self::scoped_tls::__KeyInner as __ScopedKeyInner;
////////////////////////////////////////////////////////////////////////////////
// Builder
+++ /dev/null
-// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Scoped thread-local storage
-//!
-//! This module provides the ability to generate *scoped* thread-local
-//! variables. In this sense, scoped indicates that thread local storage
-//! actually stores a reference to a value, and this reference is only placed
-//! in storage for a scoped amount of time.
-//!
-//! There are no restrictions on what types can be placed into a scoped
-//! variable, but all scoped variables are initialized to the equivalent of
-//! null. Scoped thread local storage is useful when a value is present for a known
-//! period of time and it is not required to relinquish ownership of the
-//! contents.
-//!
-//! # Examples
-//!
-//! ```
-//! #![feature(scoped_tls)]
-//!
-//! scoped_thread_local!(static FOO: u32);
-//!
-//! // Initially each scoped slot is empty.
-//! assert!(!FOO.is_set());
-//!
-//! // When inserting a value, the value is only in place for the duration
-//! // of the closure specified.
-//! FOO.set(&1, || {
-//! FOO.with(|slot| {
-//! assert_eq!(*slot, 1);
-//! });
-//! });
-//! ```
-
-#![unstable(feature = "thread_local_internals", issue = "0")]
-#![allow(deprecated)]
-
-#[doc(hidden)]
-pub use self::imp::KeyInner as __KeyInner;
-
-/// Type representing a thread local storage key corresponding to a reference
-/// to the type parameter `T`.
-///
-/// Keys are statically allocated and can contain a reference to an instance of
-/// type `T` scoped to a particular lifetime. Keys provides two methods, `set`
-/// and `with`, both of which currently use closures to control the scope of
-/// their contents.
-#[unstable(feature = "scoped_tls",
- reason = "scoped TLS has yet to have wide enough use to fully consider \
- stabilizing its interface",
- issue = "27715")]
-#[rustc_deprecated(since = "1.8.0",
- reason = "hasn't proven itself over LocalKey")]
-pub struct ScopedKey<T:'static> { inner: fn() -> &'static imp::KeyInner<T> }
-
-/// Declare a new scoped thread local storage key.
-///
-/// This macro declares a `static` item on which methods are used to get and
-/// set the value stored within.
-///
-/// See [ScopedKey documentation](thread/struct.ScopedKey.html) for more
-/// information.
-#[unstable(feature = "thread_local_internals",
- reason = "should not be necessary",
- issue = "0")]
-#[rustc_deprecated(since = "1.8.0",
- reason = "hasn't proven itself over LocalKey")]
-#[macro_export]
-#[allow_internal_unstable]
-macro_rules! scoped_thread_local {
- (static $name:ident: $t:ty) => (
- static $name: $crate::thread::ScopedKey<$t> =
- __scoped_thread_local_inner!($t);
- );
- (pub static $name:ident: $t:ty) => (
- pub static $name: $crate::thread::ScopedKey<$t> =
- __scoped_thread_local_inner!($t);
- );
-}
-
-#[doc(hidden)]
-#[unstable(feature = "thread_local_internals",
- reason = "should not be necessary",
- issue = "0")]
-#[rustc_deprecated(since = "1.8.0",
- reason = "hasn't proven itself over LocalKey")]
-#[macro_export]
-#[allow_internal_unstable]
-macro_rules! __scoped_thread_local_inner {
- ($t:ty) => {{
- #[cfg_attr(target_thread_local, thread_local)]
- static _KEY: $crate::thread::__ScopedKeyInner<$t> =
- $crate::thread::__ScopedKeyInner::new();
- fn _getit() -> &'static $crate::thread::__ScopedKeyInner<$t> { &_KEY }
- $crate::thread::ScopedKey::new(_getit)
- }}
-}
-
-#[unstable(feature = "scoped_tls",
- reason = "scoped TLS has yet to have wide enough use to fully consider \
- stabilizing its interface",
- issue = "27715")]
-#[rustc_deprecated(since = "1.8.0",
- reason = "hasn't proven itself over LocalKey")]
-impl<T> ScopedKey<T> {
- #[doc(hidden)]
- pub const fn new(inner: fn() -> &'static imp::KeyInner<T>) -> ScopedKey<T> {
- ScopedKey { inner: inner }
- }
-
- /// Inserts a value into this scoped thread local storage slot for a
- /// duration of a closure.
- ///
- /// While `cb` is running, the value `t` will be returned by `get` unless
- /// this function is called recursively inside of `cb`.
- ///
- /// Upon return, this function will restore the previous value, if any
- /// was available.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(scoped_tls)]
- ///
- /// scoped_thread_local!(static FOO: u32);
- ///
- /// FOO.set(&100, || {
- /// let val = FOO.with(|v| *v);
- /// assert_eq!(val, 100);
- ///
- /// // set can be called recursively
- /// FOO.set(&101, || {
- /// // ...
- /// });
- ///
- /// // Recursive calls restore the previous value.
- /// let val = FOO.with(|v| *v);
- /// assert_eq!(val, 100);
- /// });
- /// ```
- pub fn set<R, F>(&'static self, t: &T, cb: F) -> R where
- F: FnOnce() -> R,
- {
- struct Reset<'a, T: 'a> {
- key: &'a imp::KeyInner<T>,
- val: *mut T,
- }
- impl<'a, T> Drop for Reset<'a, T> {
- fn drop(&mut self) {
- unsafe { self.key.set(self.val) }
- }
- }
-
- let inner = (self.inner)();
- let prev = unsafe {
- let prev = inner.get();
- inner.set(t as *const T as *mut T);
- prev
- };
-
- let _reset = Reset { key: inner, val: prev };
- cb()
- }
-
- /// Gets a value out of this scoped variable.
- ///
- /// This function takes a closure which receives the value of this
- /// variable.
- ///
- /// # Panics
- ///
- /// This function will panic if `set` has not previously been called.
- ///
- /// # Examples
- ///
- /// ```no_run
- /// #![feature(scoped_tls)]
- ///
- /// scoped_thread_local!(static FOO: u32);
- ///
- /// FOO.with(|slot| {
- /// // work with `slot`
- /// });
- /// ```
- pub fn with<R, F>(&'static self, cb: F) -> R where
- F: FnOnce(&T) -> R
- {
- unsafe {
- let ptr = (self.inner)().get();
- assert!(!ptr.is_null(), "cannot access a scoped thread local \
- variable without calling `set` first");
- cb(&*ptr)
- }
- }
-
- /// Test whether this TLS key has been `set` for the current thread.
- pub fn is_set(&'static self) -> bool {
- unsafe { !(self.inner)().get().is_null() }
- }
-}
-
-#[cfg(target_thread_local)]
-#[doc(hidden)]
-mod imp {
- use cell::Cell;
- use ptr;
-
- pub struct KeyInner<T> { inner: Cell<*mut T> }
-
- unsafe impl<T> ::marker::Sync for KeyInner<T> { }
-
- impl<T> KeyInner<T> {
- pub const fn new() -> KeyInner<T> {
- KeyInner { inner: Cell::new(ptr::null_mut()) }
- }
- pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr); }
- pub unsafe fn get(&self) -> *mut T { self.inner.get() }
- }
-}
-
-#[cfg(not(target_thread_local))]
-#[doc(hidden)]
-mod imp {
- use cell::Cell;
- use marker;
- use sys_common::thread_local::StaticKey as OsStaticKey;
-
- pub struct KeyInner<T> {
- pub inner: OsStaticKey,
- pub marker: marker::PhantomData<Cell<T>>,
- }
-
- unsafe impl<T> marker::Sync for KeyInner<T> { }
-
- impl<T> KeyInner<T> {
- pub const fn new() -> KeyInner<T> {
- KeyInner {
- inner: OsStaticKey::new(None),
- marker: marker::PhantomData
- }
- }
- pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr as *mut _) }
- pub unsafe fn get(&self) -> *mut T { self.inner.get() as *mut _ }
- }
-}
-
-
-#[cfg(test)]
-mod tests {
- use cell::Cell;
-
- scoped_thread_local!(static FOO: u32);
-
- #[test]
- fn smoke() {
- scoped_thread_local!(static BAR: u32);
-
- assert!(!BAR.is_set());
- BAR.set(&1, || {
- assert!(BAR.is_set());
- BAR.with(|slot| {
- assert_eq!(*slot, 1);
- });
- });
- assert!(!BAR.is_set());
- }
-
- #[test]
- fn cell_allowed() {
- scoped_thread_local!(static BAR: Cell<u32>);
-
- BAR.set(&Cell::new(1), || {
- BAR.with(|slot| {
- assert_eq!(slot.get(), 1);
- });
- });
- }
-
- #[test]
- fn scope_item_allowed() {
- assert!(!FOO.is_set());
- FOO.set(&1, || {
- assert!(FOO.is_set());
- FOO.with(|slot| {
- assert_eq!(*slot, 1);
- });
- });
- assert!(!FOO.is_set());
- }
-}
self.0.sub_instant(&earlier.0)
}
- /// Deprecated, renamed to `duration_since`
- #[unstable(feature = "time2_old", issue = "29866")]
- #[rustc_deprecated(since = "1.8.0", reason = "renamed to duration_since")]
- pub fn duration_from_earlier(&self, earlier: Instant) -> Duration {
- self.0.sub_instant(&earlier.0)
- }
-
/// Returns the amount of time elapsed since this instant was created.
///
/// # Panics
self.0.sub_time(&earlier.0).map_err(SystemTimeError)
}
- /// Deprecated, renamed to `duration_since`
- #[unstable(feature = "time2_old", issue = "29866")]
- #[rustc_deprecated(since = "1.8.0", reason = "renamed to duration_since")]
- pub fn duration_from_earlier(&self, earlier: SystemTime)
- -> Result<Duration, SystemTimeError> {
- self.0.sub_time(&earlier.0).map_err(SystemTimeError)
- }
-
/// Returns the amount of time elapsed since this system time was created.
///
/// This function may fail as the underlying system clock is susceptible to
use fold;
use fold::*;
use util::move_map::MoveMap;
-use parse;
use parse::token::{fresh_mark, fresh_name, intern, keywords};
use ptr::P;
use util::small_vector::SmallVector;
noop_fold_tts(tts, &mut Marker{mark:m, expn_id: None})
}
-/// Check that there are no macro invocations left in the AST:
-pub fn check_for_macros(sess: &parse::ParseSess, krate: &ast::Crate) {
- visit::walk_crate(&mut MacroExterminator{sess:sess}, krate);
-}
-
-/// A visitor that ensures that no macro invocations remain in an AST.
-struct MacroExterminator<'a>{
- sess: &'a parse::ParseSess
-}
-
-impl<'a, 'v> Visitor<'v> for MacroExterminator<'a> {
- fn visit_mac(&mut self, mac: &ast::Mac) {
- self.sess.span_diagnostic.span_bug(mac.span,
- "macro exterminator: expected AST \
- with no macro invocations");
- }
-}
-
#[cfg(test)]
mod tests {
base::MacEager::expr(expanded)
}
-pub fn expand_quote_item<'cx>(cx: &mut ExtCtxt,
+pub fn expand_quote_item<'cx>(cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[TokenTree])
-> Box<base::MacResult+'cx> {
use syntax::parse::token::str_to_ident;
use syntax::ptr::P;
-pub fn expand_syntax_ext<'cx>(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
+pub fn expand_syntax_ext<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<base::MacResult+'cx> {
if !cx.ecfg.enable_concat_idents() {
feature_gate::emit_feature_err(&cx.parse_sess.span_diagnostic,
-Subproject commit 4638c60dedfa581fd5fa7c6420d8f32274c9ca0b
+Subproject commit a3736a0a1907cbc8bf619708738815a5fd789c80
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Irrefutable(i32);
+
+fn main() {
+ let irr = Irrefutable(0);
+ if let Irrefutable(x) = irr { //~ ERROR E0162
+ println!("{}", x);
+ }
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+enum Foo { B(u32) }
+
+fn bar(foo: Foo) -> u32 {
+ match foo {
+ Foo::B { i } => i, //~ ERROR E0163
+ }
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+enum Foo { B { i: u32 } }
+
+fn bar(foo: Foo) -> u32 {
+ match foo {
+ Foo::B(i) => i, //~ ERROR E0164
+ }
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Irrefutable(i32);
+
+fn main() {
+ let irr = Irrefutable(0);
+ while let Irrefutable(x) = irr { //~ ERROR E0165
+ // ...
+ }
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo() -> ! { return; } //~ ERROR E0166
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo(bar: i32+std::fmt::Display) {} //~ ERROR E0172
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Foo {}
+
+struct Bar<'a> {
+ w: &'a Foo + Copy, //~ ERROR E0178
+ x: &'a Foo + 'a, //~ ERROR E0178
+ y: &'a mut Foo + 'a, //~ ERROR E0178
+ z: fn() -> Foo + 'a, //~ ERROR E0178
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[derive(Copy)] //~ ERROR E0184
+struct Foo;
+
+impl Drop for Foo {
+ fn drop(&mut self) {
+ }
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Foo {
+ fn foo();
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn foo(&self) {} //~ ERROR E0185
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Foo {
+ fn foo(&self);
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn foo() {} //~ ERROR E0186
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Trait {
+ type Bar;
+}
+
+type Foo = Trait; //~ ERROR E0191
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(optin_builtin_traits)]
+
+trait Trait {
+ type Bar;
+}
+
+struct Foo;
+
+impl !Trait for Foo { } //~ ERROR E0192
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Foo<T> {
+ fn do_something(&self) -> T;
+ fn do_something_else<T: Clone>(&self, bar: T); //~ ERROR E0194
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Trait {
+ fn bar<'a,'b:'a>(x: &'a str, y: &'b str);
+}
+
+struct Foo;
+
+impl Trait for Foo {
+ fn bar<'a,'b>(x: &'a str, y: &'b str) { //~ ERROR E0195
+ }
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo;
+
+unsafe impl Foo { } //~ ERROR E0197
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(optin_builtin_traits)]
+
+struct Foo;
+
+unsafe impl !Clone for Foo { } //~ ERROR E0199
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo;
+
+unsafe trait Bar { }
+
+impl Bar for Foo { } //~ ERROR E0200
+
+fn main() {
+}
--- /dev/null
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_attrs)]
+#![allow(warnings)]
+
+pub type ParseResult<T> = Result<T, ()>;
+
+pub enum Item<'a> { Literal(&'a str),
+ }
+
+pub fn colon_or_space(s: &str) -> ParseResult<&str> {
+ unimplemented!()
+}
+
+pub fn timezone_offset_zulu<F>(s: &str, colon: F) -> ParseResult<(&str, i32)>
+ where F: FnMut(&str) -> ParseResult<&str> {
+ unimplemented!()
+}
+
+pub fn parse<'a, I>(mut s: &str, items: I) -> ParseResult<()>
+ where I: Iterator<Item=Item<'a>> {
+ macro_rules! try_consume {
+ ($e:expr) => ({ let (s_, v) = try!($e); s = s_; v })
+ }
+ let offset = try_consume!(timezone_offset_zulu(s.trim_left(), colon_or_space));
+ let offset = try_consume!(timezone_offset_zulu(s.trim_left(), colon_or_space));
+ Ok(())
+}
+
+#[rustc_error]
+fn main() { } //~ ERROR compilation successful
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_attrs)]
+#![allow(warnings)]
+
+// Check that you are allowed to implement using elision but write
+// trait without elision (a bug in this cropped up during
+// bootstrapping, so this is a regression test).
+
+pub struct SplitWhitespace<'a> {
+ x: &'a u8
+}
+
+pub trait UnicodeStr {
+ fn split_whitespace<'a>(&'a self) -> SplitWhitespace<'a>;
+}
+
+impl UnicodeStr for str {
+ #[inline]
+ fn split_whitespace(&self) -> SplitWhitespace {
+ unimplemented!()
+ }
+}
+
+#[rustc_error]
+fn main() { } //~ ERROR compilation successful
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(unboxed_closures)]
+#![feature(rustc_attrs)]
+
+// Test for projection cache. We should be able to project distinct
+// lifetimes from `foo` as we reinstantiate it multiple times, but not
+// if we do it just once. In this variant, the region `'a` is used in
+// an contravariant position, which affects the results.
+
+// revisions: ok oneuse transmute krisskross
+
+#![allow(dead_code, unused_variables)]
+
+fn foo<'a>() -> &'a u32 { loop { } }
+
+fn bar<T>(t: T, x: T::Output) -> T::Output
+ where T: FnOnce<()>
+{
+ t()
+}
+
+#[cfg(ok)] // two instantiations: OK
+fn baz<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) {
+ let a = bar(foo, x);
+ let b = bar(foo, y);
+ (a, b)
+}
+
+#[cfg(oneuse)] // one instantiation: OK (surprisingly)
+fn baz<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) {
+ let f /* : fn() -> &'static u32 */ = foo; // <-- inferred type annotated
+ let a = bar(f, x); // this is considered ok because fn args are contravariant...
+ let b = bar(f, y); // ...and hence we infer T to distinct values in each call.
+ (a, b)
+}
+
+// FIXME(#32330)
+//#[cfg(transmute)] // one instantiations: BAD
+//fn baz<'a,'b>(x: &'a u32) -> &'static u32 {
+// bar(foo, x) //[transmute] ERROR E0495
+//}
+
+// FIXME(#32330)
+//#[cfg(krisskross)] // two instantiations, mixing and matching: BAD
+//fn transmute<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) {
+// let a = bar(foo, y); //[krisskross] ERROR E0495
+// let b = bar(foo, x); //[krisskross] ERROR E0495
+// (a, b)
+//}
+
+#[rustc_error]
+fn main() { }
+//[ok]~^ ERROR compilation successful
+//[oneuse]~^^ ERROR compilation successful
+//[transmute]~^^^ ERROR compilation successful
+//[krisskross]~^^^^ ERROR compilation successful
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(unboxed_closures)]
+#![feature(rustc_attrs)]
+
+// Test for projection cache. We should be able to project distinct
+// lifetimes from `foo` as we reinstantiate it multiple times, but not
+// if we do it just once. In this variant, the region `'a` is used in
+// an invariant position, which affects the results.
+
+// revisions: ok oneuse transmute krisskross
+
+#![allow(dead_code, unused_variables)]
+
+use std::marker::PhantomData;
+
+struct Type<'a> {
+ // Invariant
+ data: PhantomData<fn(&'a u32) -> &'a u32>
+}
+
+fn foo<'a>() -> Type<'a> { loop { } }
+
+fn bar<T>(t: T, x: T::Output) -> T::Output
+ where T: FnOnce<()>
+{
+ t()
+}
+
+#[cfg(ok)] // two instantiations: OK
+fn baz<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) {
+ let a = bar(foo, x);
+ let b = bar(foo, y);
+ (a, b)
+}
+
+// FIXME(#32330)
+//#[cfg(oneuse)] // one instantiation: BAD
+//fn baz<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) {
+// let f = foo; // <-- No consistent type can be inferred for `f` here.
+// let a = bar(f, x); //[oneuse] ERROR E0495
+// let b = bar(f, y);
+// (a, b)
+//}
+
+// FIXME(#32330)
+//#[cfg(transmute)] // one instantiations: BAD
+//fn baz<'a,'b>(x: Type<'a>) -> Type<'static> {
+// // Cannot instantiate `foo` with any lifetime other than `'a`,
+// // since it is provided as input.
+//
+// bar(foo, x) //[transmute] ERROR E0495
+//}
+
+// FIXME(#32330)
+//#[cfg(krisskross)] // two instantiations, mixing and matching: BAD
+//fn transmute<'a,'b>(x: Type<'a>, y: Type<'b>) -> (Type<'a>, Type<'b>) {
+// let a = bar(foo, y); //[krisskross] ERROR E0495
+// let b = bar(foo, x); //[krisskross] ERROR E0495
+// (a, b)
+//}
+
+#[rustc_error]
+fn main() { }
+//[ok]~^ ERROR compilation successful
+//[oneuse]~^^ ERROR compilation successful
+//[transmute]~^^^ ERROR compilation successful
+//[krisskross]~^^^^ ERROR compilation successful
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test was derived from the wasm and parsell crates. They
+// stopped compiling when #32330 is fixed.
+
+#![allow(dead_code, unused_variables)]
+#![deny(hr_lifetime_in_assoc_type)]
+#![feature(unboxed_closures)]
+
+use std::str::Chars;
+
+pub trait HasOutput<Ch, Str> {
+ type Output;
+}
+
+#[derive(Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Debug)]
+pub enum Token<'a> {
+ Begin(&'a str)
+}
+
+fn mk_unexpected_char_err<'a>() -> Option<&'a i32> {
+ unimplemented!()
+}
+
+fn foo<'a>(data: &mut Chars<'a>) {
+ bar(mk_unexpected_char_err)
+ //~^ ERROR lifetime parameter `'a` declared on fn `mk_unexpected_char_err`
+ //~| WARNING hard error in a future release
+}
+
+fn bar<F>(t: F)
+ // No type can satisfy this requirement, since `'a` does not
+ // appear in any of the input types:
+ where F: for<'a> Fn() -> Option<&'a i32>
+ //~^ ERROR associated type `Output` references lifetime `'a`, which does not
+ //~| WARNING hard error in a future release
+{
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Targeted tests for the higher-ranked subtyping code.
+
+#![feature(rustc_attrs)]
+#![allow(dead_code)]
+
+// revisions: bound_a_vs_bound_a
+// revisions: bound_a_vs_bound_b
+// revisions: bound_inv_a_vs_bound_inv_b
+// revisions: bound_co_a_vs_bound_co_b
+// revisions: bound_a_vs_free_x
+// revisions: free_x_vs_free_x
+// revisions: free_x_vs_free_y
+// revisions: free_inv_x_vs_free_inv_y
+// revisions: bound_a_b_vs_bound_a
+// revisions: bound_co_a_b_vs_bound_co_a
+// revisions: bound_contra_a_contra_b_ret_co_a
+// revisions: bound_co_a_co_b_ret_contra_a
+// revisions: bound_inv_a_b_vs_bound_inv_a
+// revisions: bound_a_b_ret_a_vs_bound_a_ret_a
+
+fn gimme<T>(_: Option<T>) { }
+
+struct Inv<'a> { x: *mut &'a u32 }
+
+struct Co<'a> { x: fn(&'a u32) }
+
+struct Contra<'a> { x: &'a u32 }
+
+macro_rules! check {
+ ($rev:ident: ($t1:ty, $t2:ty)) => {
+ #[cfg($rev)]
+ fn subtype<'x,'y:'x,'z:'y>() {
+ gimme::<$t2>(None::<$t1>);
+ //[free_inv_x_vs_free_inv_y]~^ ERROR mismatched types
+ }
+
+ #[cfg($rev)]
+ fn supertype<'x,'y:'x,'z:'y>() {
+ gimme::<$t1>(None::<$t2>);
+ //[bound_a_vs_free_x]~^ ERROR mismatched types
+ //[free_x_vs_free_y]~^^ ERROR mismatched types
+ //[bound_inv_a_b_vs_bound_inv_a]~^^^ ERROR mismatched types
+ //[bound_a_b_ret_a_vs_bound_a_ret_a]~^^^^ ERROR mismatched types
+ //[free_inv_x_vs_free_inv_y]~^^^^^ ERROR mismatched types
+ //[bound_a_b_vs_bound_a]~^^^^^^ ERROR mismatched types
+ //[bound_co_a_b_vs_bound_co_a]~^^^^^^^ ERROR mismatched types
+ //[bound_contra_a_contra_b_ret_co_a]~^^^^^^^^ ERROR mismatched types
+ //[bound_co_a_co_b_ret_contra_a]~^^^^^^^^^ ERROR mismatched types
+ }
+ }
+}
+
+// If both have bound regions, they are equivalent, regardless of
+// variant.
+check! { bound_a_vs_bound_a: (for<'a> fn(&'a u32),
+ for<'a> fn(&'a u32)) }
+check! { bound_a_vs_bound_b: (for<'a> fn(&'a u32),
+ for<'b> fn(&'b u32)) }
+check! { bound_inv_a_vs_bound_inv_b: (for<'a> fn(Inv<'a>),
+ for<'b> fn(Inv<'b>)) }
+check! { bound_co_a_vs_bound_co_b: (for<'a> fn(Co<'a>),
+ for<'b> fn(Co<'b>)) }
+
+// Bound is a subtype of free.
+check! { bound_a_vs_free_x: (for<'a> fn(&'a u32),
+ fn(&'x u32)) }
+
+// Two free regions are relatable if subtyping holds.
+check! { free_x_vs_free_x: (fn(&'x u32),
+ fn(&'x u32)) }
+check! { free_x_vs_free_y: (fn(&'x u32),
+ fn(&'y u32)) }
+check! { free_inv_x_vs_free_inv_y: (fn(Inv<'x>),
+ fn(Inv<'y>)) }
+
+// Somewhat surprisingly, a fn taking two distinct bound lifetimes and
+// a fn taking one bound lifetime can be interchangable, but only if
+// we are co- or contra-variant with respect to both lifetimes.
+//
+// The reason is:
+// - if we are covariant, then 'a and 'b can be set to the call-site
+// intersection;
+// - if we are contravariant, then 'a can be inferred to 'static.
+//
+// FIXME(#32330) this is true, but we are not currently impl'ing this
+// full semantics
+check! { bound_a_b_vs_bound_a: (for<'a,'b> fn(&'a u32, &'b u32),
+ for<'a> fn(&'a u32, &'a u32)) }
+check! { bound_co_a_b_vs_bound_co_a: (for<'a,'b> fn(Co<'a>, Co<'b>),
+ for<'a> fn(Co<'a>, Co<'a>)) }
+check! { bound_contra_a_contra_b_ret_co_a: (for<'a,'b> fn(Contra<'a>, Contra<'b>) -> Co<'a>,
+ for<'a> fn(Contra<'a>, Contra<'a>) -> Co<'a>) }
+check! { bound_co_a_co_b_ret_contra_a: (for<'a,'b> fn(Co<'a>, Co<'b>) -> Contra<'a>,
+ for<'a> fn(Co<'a>, Co<'a>) -> Contra<'a>) }
+
+// If we make those lifetimes invariant, then the two types are not interchangable.
+check! { bound_inv_a_b_vs_bound_inv_a: (for<'a,'b> fn(Inv<'a>, Inv<'b>),
+ for<'a> fn(Inv<'a>, Inv<'a>)) }
+check! { bound_a_b_ret_a_vs_bound_a_ret_a: (for<'a,'b> fn(&'a u32, &'b u32) -> &'a u32,
+ for<'a> fn(&'a u32, &'a u32) -> &'a u32) }
+
+#[rustc_error]
+fn main() {
+//[bound_a_vs_bound_a]~^ ERROR compilation successful
+//[bound_a_vs_bound_b]~^^ ERROR compilation successful
+//[bound_inv_a_vs_bound_inv_b]~^^^ ERROR compilation successful
+//[bound_co_a_vs_bound_co_b]~^^^^ ERROR compilation successful
+//[free_x_vs_free_x]~^^^^^ ERROR compilation successful
+}
impl<'a> NoLifetime for Foo<'a> {
fn get<'p, T : Test<'a>>(&self) -> T {
-//~^ ERROR lifetime parameters or bounds on method `get` do not match the trait declaration
+//~^ ERROR E0195
return *self as T;
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::raw::Slice;
+struct Slice<T> {
+ data: *const T,
+ len: usize,
+}
fn main() {
let Slice { data: data, len: len } = "foo";
//~^ ERROR mismatched types
//~| expected type `&str`
- //~| found type `std::raw::Slice<_>`
- //~| expected &-ptr, found struct `std::raw::Slice`
+ //~| found type `Slice<_>`
+ //~| expected &-ptr, found struct `Slice`
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::raw::Slice;
+struct Slice<T> {
+ data: *const T,
+ len: usize,
+}
fn main() {
match () {
Slice { data: data, len: len } => (),
//~^ ERROR mismatched types
//~| expected type `()`
- //~| found type `std::raw::Slice<_>`
- //~| expected (), found struct `std::raw::Slice`
+ //~| found type `Slice<_>`
+ //~| expected (), found struct `Slice`
_ => unreachable!()
}
}
// Not obvious, but there is an implicit lifetime here -------^
//~^^ ERROR cannot infer
//~| ERROR cannot infer
- //~| ERROR cannot infer
//
// The fact that `Publisher` is using an implicit lifetime is
// what was causing the debruijn accounting to be off, so
fn main() {
self += 1;
//~^ ERROR: unresolved name `self`
- //~| HELP: Module
+ //~| HELP: module `self`
// it's a bug if this suggests a missing `self` as we're not in a method
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(reflect_marker)]
+
+use std::marker::Reflect;
+use std::any::Any;
+
+struct Foo;
+
+trait Bar {}
+
+impl Bar for Foo {}
+
+fn main() {
+ let any: &Any = &Bar; //~ ERROR E0425
+ //~| HELP trait `Bar`
+ if any.is::<u32>() { println!("u32"); }
+}
struct Bar<'x, 'y, 'z> { bar: &'y i32, baz: i32, marker: PhantomData<(&'x(),&'y(),&'z())> }
fn bar1<'a>(x: &Bar) -> (&'a i32, &'a i32, &'a i32) {
- //~^ HELP: consider using an explicit lifetime parameter as shown: fn bar1<'a>(x: &'a Bar) -> (&'a i32, &'a i32, &'a i32)
+ //~^ HELP consider using an explicit lifetime parameter as shown: fn bar1<'b, 'c, 'a>(x: &'a Bar<'b, 'a, 'c>) -> (&'a i32, &'a i32, &'a i32)
(x.bar, &x.baz, &x.baz)
//~^ ERROR E0312
//~| ERROR cannot infer
impl<'a> Baz<'a> {
fn baz2<'b>(&self, x: &isize) -> (&'b isize, &'b isize) {
- //~^ HELP consider using an explicit lifetime parameter as shown: fn baz2<'b>(&self, x: &'b isize) -> (&'a isize, &'a isize)
+ //~^ HELP consider using an explicit lifetime parameter as shown: fn baz2<'b>(&self, x: &'a isize) -> (&'a isize, &'a isize)
(self.bar, x) //~ ERROR E0312
//~^ ERROR E0312
}
// except according to those terms.
#![allow(dead_code)]
-#![feature(recover)]
-use std::panic::RecoverSafe;
+use std::panic::UnwindSafe;
use std::rc::Rc;
use std::cell::RefCell;
-fn assert<T: RecoverSafe + ?Sized>() {}
+fn assert<T: UnwindSafe + ?Sized>() {}
fn main() {
assert::<Rc<RefCell<i32>>>();
// except according to those terms.
#![allow(dead_code)]
-#![feature(recover)]
-use std::panic::RecoverSafe;
+use std::panic::UnwindSafe;
use std::sync::Arc;
use std::cell::RefCell;
-fn assert<T: RecoverSafe + ?Sized>() {}
+fn assert<T: UnwindSafe + ?Sized>() {}
fn main() {
assert::<Arc<RefCell<i32>>>();
// except according to those terms.
#![allow(dead_code)]
-#![feature(recover)]
-use std::panic::RecoverSafe;
+use std::panic::UnwindSafe;
use std::cell::RefCell;
-fn assert<T: RecoverSafe + ?Sized>() {}
+fn assert<T: UnwindSafe + ?Sized>() {}
fn main() {
assert::<&RefCell<i32>>();
// except according to those terms.
#![allow(dead_code)]
-#![feature(recover)]
-use std::panic::RecoverSafe;
+use std::panic::UnwindSafe;
use std::cell::UnsafeCell;
-fn assert<T: RecoverSafe + ?Sized>() {}
+fn assert<T: UnwindSafe + ?Sized>() {}
fn main() {
assert::<*const UnsafeCell<i32>>(); //~ ERROR E0277
// except according to those terms.
#![allow(dead_code)]
-#![feature(recover)]
-use std::panic::RecoverSafe;
+use std::panic::UnwindSafe;
use std::cell::RefCell;
-fn assert<T: RecoverSafe + ?Sized>() {}
+fn assert<T: UnwindSafe + ?Sized>() {}
fn main() {
assert::<*mut RefCell<i32>>();
fn make_object1<A:SomeTrait>(v: A) -> Box<SomeTrait+'static> {
box v as Box<SomeTrait+'static>
//~^ ERROR the parameter type `A` may not live long enough
- //~^^ ERROR the parameter type `A` may not live long enough
+ //~| ERROR the parameter type `A` may not live long enough
}
fn make_object2<'a,A:SomeTrait+'a>(v: A) -> Box<SomeTrait+'a> {
fn make_object3<'a,'b,A:SomeTrait+'a>(v: A) -> Box<SomeTrait+'b> {
box v as Box<SomeTrait+'b>
//~^ ERROR the parameter type `A` may not live long enough
- //~^^ ERROR the parameter type `A` may not live long enough
+ //~| ERROR the parameter type `A` may not live long enough
}
fn main() { }
impl<'a> Box<'a> {
fn or<'b,G:GetRef<'b>>(&self, g2: G) -> &'a isize {
g2.get()
- //~^ ERROR mismatched types
- //~| expected type `&'a isize`
- //~| found type `&'b isize`
- //~| lifetime mismatch
-
+ //~^ ERROR E0312
}
}
fn get<'a,'b,G:GetRef<'a, isize>>(g1: G, b: &'b isize) -> &'b isize {
g1.get()
- //~^ ERROR mismatched types
+ //~^ ERROR E0312
}
fn main() {
struct Invariant<'a> {
- f: Box<for<'b> FnOnce() -> &'b mut &'a isize + 'static>,
+ f: Box<FnOnce() -> *mut &'a isize + 'static>,
}
fn to_same_lifetime<'r>(b_isize: Invariant<'r>) {
fn h1() -> i32 {
a.I
//~^ ERROR E0425
- //~| HELP To reference an item from the `a` module, use `a::I`
+ //~| HELP to reference an item from the `a` module, use `a::I`
}
fn h2() -> i32 {
a.g()
//~^ ERROR E0425
- //~| HELP To call a function from the `a` module, use `a::g(..)`
+ //~| HELP to call a function from the `a` module, use `a::g(..)`
}
fn h3() -> i32 {
a.b.J
//~^ ERROR E0425
- //~| HELP To reference an item from the `a` module, use `a::b`
+ //~| HELP to reference an item from the `a` module, use `a::b`
}
fn h4() -> i32 {
a::b.J
//~^ ERROR E0425
- //~| HELP To reference an item from the `a::b` module, use `a::b::J`
+ //~| HELP to reference an item from the `a::b` module, use `a::b::J`
}
fn h5() {
a.b.f();
//~^ ERROR E0425
- //~| HELP To reference an item from the `a` module, use `a::b`
+ //~| HELP to reference an item from the `a` module, use `a::b`
let v = Vec::new();
v.push(a::b);
//~^ ERROR E0425
- //~| HELP Module `a::b` cannot be used as an expression
+ //~| HELP module `a::b` cannot be used as an expression
}
fn h6() -> i32 {
a::b.f()
//~^ ERROR E0425
- //~| HELP To call a function from the `a::b` module, use `a::b::f(..)`
+ //~| HELP to call a function from the `a::b` module, use `a::b::f(..)`
}
fn h7() {
a::b
//~^ ERROR E0425
- //~| HELP Module `a::b` cannot be used as an expression
+ //~| HELP module `a::b` cannot be used as an expression
}
fn h8() -> i32 {
a::b()
//~^ ERROR E0425
- //~| HELP Module `a::b` cannot be used as an expression
+ //~| HELP module `a::b` cannot be used as an expression
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test incremental compilation tracking where we change field names
+// in between revisions (hashing should be stable).
+
+// revisions:rpass1 rpass2
+
+#![feature(rustc_attrs)]
+
+pub struct X {
+ pub x: u32,
+
+ #[cfg(rpass2)]
+ pub x2: u32,
+}
+
+pub struct EmbedX {
+ x: X
+}
+
+pub struct Y {
+ pub y: char
+}
+
+#[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_X(x: X) -> u32 {
+ x.x as u32
+}
+
+#[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_EmbedX(embed: EmbedX) -> u32 {
+ embed.x.x as u32
+}
+
+#[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_Y() {
+ let x: Y = Y { y: 'c' };
+}
+
+pub fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test incremental compilation tracking where we change field names
+// in between revisions (hashing should be stable).
+
+// revisions:rpass1 cfail2
+
+#![feature(rustc_attrs)]
+
+#[cfg(rpass1)]
+pub struct X {
+ pub x: u32
+}
+
+#[cfg(cfail2)]
+pub struct X {
+ pub y: u32
+}
+
+pub struct EmbedX {
+ x: X
+}
+
+pub struct Y {
+ pub y: char
+}
+
+#[rustc_dirty(label="TypeckItemBody", cfg="cfail2")]
+pub fn use_X() -> u32 {
+ let x: X = X { x: 22 };
+ //[cfail2]~^ ERROR structure `X` has no field named `x`
+ x.x as u32
+ //[cfail2]~^ ERROR attempted access of field `x`
+}
+
+#[rustc_dirty(label="TypeckItemBody", cfg="cfail2")]
+pub fn use_EmbedX(embed: EmbedX) -> u32 {
+ embed.x.x as u32
+ //[cfail2]~^ ERROR attempted access of field `x`
+}
+
+#[rustc_clean(label="TypeckItemBody", cfg="cfail2")]
+pub fn use_Y() {
+ let x: Y = Y { y: 'c' };
+}
+
+pub fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test incremental compilation tracking where we change nothing
+// in between revisions (hashing should be stable).
+
+// revisions:rpass1 rpass2
+
+#![feature(rustc_attrs)]
+
+#[cfg(rpass1)]
+pub struct X {
+ pub x: u32
+}
+
+#[cfg(rpass2)]
+pub struct X {
+ pub x: i32
+}
+
+pub struct EmbedX {
+ x: X
+}
+
+pub struct Y {
+ pub y: char
+}
+
+#[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_X() -> u32 {
+ let x: X = X { x: 22 };
+ x.x as u32
+}
+
+#[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_EmbedX(x: EmbedX) -> u32 {
+ let x: X = X { x: 22 };
+ x.x as u32
+}
+
+#[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_Y() {
+ let x: Y = Y { y: 'c' };
+}
+
+pub fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type="rlib"]
+
+ #[cfg(rpass1)]
+pub struct X {
+ pub x: u32
+}
+
+#[cfg(rpass2)]
+pub struct X {
+ pub x: i32
+}
+
+pub struct EmbedX {
+ pub x: X
+}
+
+pub struct Y {
+ pub y: char
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:a.rs
+// revisions:rpass1 rpass2
+
+#![feature(rustc_attrs)]
+
+extern crate a;
+
+use a::*;
+
+#[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_X() -> u32 {
+ let x: X = X { x: 22 };
+ x.x as u32
+}
+
+#[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_EmbedX(embed: EmbedX) -> u32 {
+ embed.x.x as u32
+}
+
+#[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_Y() {
+ let x: Y = Y { y: 'c' };
+}
+
+pub fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test incremental compilation tracking where we change nothing
+// in between revisions (hashing should be stable).
+
+// revisions:rpass1 rpass2
+
+#![feature(rustc_attrs)]
+
+#[cfg(rpass1)]
+pub struct X {
+ pub x: u32
+}
+
+#[cfg(rpass2)]
+pub struct X {
+ pub x: u32
+}
+
+pub struct EmbedX {
+ x: X
+}
+
+pub struct Y {
+ pub y: char
+}
+
+#[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_X() -> u32 {
+ let x: X = X { x: 22 };
+ x.x as u32
+}
+
+#[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_EmbedX(x: EmbedX) -> u32 {
+ let x: X = X { x: 22 };
+ x.x as u32
+}
+
+#[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_Y() {
+ let x: Y = Y { y: 'c' };
+}
+
+pub fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test incremental compilation tracking where we change field names
+// in between revisions (hashing should be stable).
+
+// revisions:rpass1 rpass2
+
+#![feature(rustc_attrs)]
+
+#[cfg(rpass1)]
+pub struct X {
+ pub x: u32,
+ pub x2: u32,
+}
+
+#[cfg(rpass2)]
+pub struct X {
+ pub x: u32,
+}
+
+pub struct EmbedX {
+ x: X
+}
+
+pub struct Y {
+ pub y: char
+}
+
+#[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_X(x: X) -> u32 {
+ x.x as u32
+}
+
+#[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_EmbedX(embed: EmbedX) -> u32 {
+ embed.x.x as u32
+}
+
+#[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+pub fn use_Y() {
+ let x: Y = Y { y: 'c' };
+}
+
+pub fn main() { }
#[cfg(rpass2)]
pub type X = i32;
+// this version doesn't actually change anything:
+#[cfg(rpass3)]
+pub type X = i32;
+
pub type Y = char;
// except according to those terms.
// aux-build:a.rs
-// revisions:rpass1 rpass2
+// revisions:rpass1 rpass2 rpass3
#![feature(rustc_attrs)]
extern crate a;
#[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
+#[rustc_clean(label="TypeckItemBody", cfg="rpass3")]
pub fn use_X() -> u32 {
let x: a::X = 22;
x as u32
}
#[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+#[rustc_clean(label="TypeckItemBody", cfg="rpass3")]
pub fn use_Y() {
let x: a::Y = 'c';
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern:thread '<main>' panicked at
+// error-pattern:thread 'main' panicked at
fn main() {
panic!()
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'arithmetic operation overflowed'
+// error-pattern:thread 'main' panicked at 'arithmetic operation overflowed'
// compile-flags: -C debug-assertions
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'shift operation overflowed'
+// error-pattern:thread 'main' panicked at 'shift operation overflowed'
// compile-flags: -C debug-assertions
#![warn(exceeding_bitshifts)]
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'shift operation overflowed'
+// error-pattern:thread 'main' panicked at 'shift operation overflowed'
// compile-flags: -C debug-assertions
#![warn(exceeding_bitshifts)]
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'shift operation overflowed'
+// error-pattern:thread 'main' panicked at 'shift operation overflowed'
// compile-flags: -C debug-assertions
#![warn(exceeding_bitshifts)]
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'shift operation overflowed'
+// error-pattern:thread 'main' panicked at 'shift operation overflowed'
// compile-flags: -C debug-assertions
// This function is checking that our automatic truncation does not
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'arithmetic operation overflowed'
+// error-pattern:thread 'main' panicked at 'arithmetic operation overflowed'
// compile-flags: -C debug-assertions
#![feature(rustc_attrs)]
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'attempted to negate with overflow'
+// error-pattern:thread 'main' panicked at 'attempted to negate with overflow'
// compile-flags: -C debug-assertions
#![feature(rustc_attrs)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern:thread '<main>' panicked at 'arithmetic operation overflowed'
+// error-pattern:thread 'main' panicked at 'arithmetic operation overflowed'
// compile-flags: -C debug-assertions
fn main() {
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'shift operation overflowed'
+// error-pattern:thread 'main' panicked at 'shift operation overflowed'
// compile-flags: -C debug-assertions
#![warn(exceeding_bitshifts)]
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'shift operation overflowed'
+// error-pattern:thread 'main' panicked at 'shift operation overflowed'
// compile-flags: -C debug-assertions
#![warn(exceeding_bitshifts)]
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'shift operation overflowed'
+// error-pattern:thread 'main' panicked at 'shift operation overflowed'
// compile-flags: -C debug-assertions
#![warn(exceeding_bitshifts)]
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'shift operation overflowed'
+// error-pattern:thread 'main' panicked at 'shift operation overflowed'
// compile-flags: -C debug-assertions
// This function is checking that our (type-based) automatic
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'shift operation overflowed'
+// error-pattern:thread 'main' panicked at 'shift operation overflowed'
// compile-flags: -C debug-assertions
#![warn(exceeding_bitshifts)]
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'shift operation overflowed'
+// error-pattern:thread 'main' panicked at 'shift operation overflowed'
// compile-flags: -C debug-assertions
#![warn(exceeding_bitshifts)]
// ignore-pretty : (#23623) problems when ending with // comments
-// error-pattern:thread '<main>' panicked at 'arithmetic operation overflowed'
+// error-pattern:thread 'main' panicked at 'arithmetic operation overflowed'
// compile-flags: -C debug-assertions
#![feature(rustc_attrs)]
// error-pattern:greetings from the panic handler
-#![feature(std_panic, panic_handler)]
+#![feature(panic_handler)]
+
use std::panic;
use std::io::{self, Write};
fn main() {
- panic::set_handler(|i| {
+ panic::set_hook(Box::new(|i| {
write!(io::stderr(), "greetings from the panic handler");
- });
+ }));
panic!("foobar");
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern:thread '<main>' panicked at 'foobar'
+// error-pattern:thread 'main' panicked at 'foobar'
+
+#![feature(panic_handler)]
-#![feature(std_panic, panic_handler)]
use std::panic;
use std::io::{self, Write};
fn main() {
- panic::set_handler(|i| {
+ panic::set_hook(Box::new(|i| {
write!(io::stderr(), "greetings from the panic handler");
- });
- panic::take_handler();
+ }));
+ panic::take_hook();
panic!("foobar");
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern:thread '<main>' panicked at 'foobar'
+// error-pattern:thread 'main' panicked at 'foobar'
+
+#![feature(panic_handler)]
-#![feature(std_panic, panic_handler)]
use std::panic;
fn main() {
- panic::take_handler();
+ panic::take_hook();
panic!("foobar");
}
+++ /dev/null
--include ../tools.mk
-
-all:
- $(RUSTC) -o $(TMPDIR)/input.dd -Z no-analysis --emit dep-info input.rs
- sed -i'.bak' 's/^.*input.dd/input.dd/g' $(TMPDIR)/input.dd
- diff -u $(TMPDIR)/input.dd input.dd
+++ /dev/null
-input.dd: input.rs
-
-input.rs:
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Tests that dep info can be emitted without resolving external crates.
-extern crate not_there;
-
-fn main() {}
let krate = panictry!(driver::phase_1_parse_input(&sess, cfg, &input));
- let krate = driver::phase_2_configure_and_expand(&sess, &cstore, krate, &id, None)
- .expect("phase_2 returned `None`");
-
- let krate = driver::assign_node_ids(&sess, krate);
- let mut defs = ast_map::collect_definitions(&krate);
- read_local_crates(&sess, &cstore, &defs, &krate, &id, &dep_graph);
- let (analysis, resolutions, mut hir_forest) = {
- driver::lower_and_resolve(&sess, &id, &mut defs, &krate,
- &sess.dep_graph, MakeGlobMap::No)
+ let driver::ExpansionResult { defs, analysis, resolutions, mut hir_forest, .. } = {
+ driver::phase_2_configure_and_expand(&sess, &cstore, krate, &id, None, MakeGlobMap::No)
+ .expect("phase_2 returned `None`")
};
let arenas = ty::CtxtArenas::new();
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(recover, rand, std_panic)]
+#![feature(rand, std_panic)]
use std::__rand::{thread_rng, Rng};
-use std::panic::{self, AssertRecoverSafe};
+use std::panic::{self, AssertUnwindSafe};
use std::collections::BinaryHeap;
use std::cmp;
{
// push the panicking item to the heap and catch the panic
let thread_result = {
- let mut heap_ref = AssertRecoverSafe(&mut heap);
- panic::recover(move || {
+ let mut heap_ref = AssertUnwindSafe(&mut heap);
+ panic::catch_unwind(move || {
heap_ref.push(panic_item);
})
};
fn foo(&self) { }
}
-impl Contravariant for for<'a,'b> fn(&'a u8, &'b u8) {
+impl Contravariant for for<'a,'b> fn(&'a u8, &'b u8) -> &'a u8 {
}
-impl Contravariant for for<'a> fn(&'a u8, &'a u8) {
+impl Contravariant for for<'a> fn(&'a u8, &'a u8) -> &'a u8 {
}
///////////////////////////////////////////////////////////////////////////
fn foo(&self) { }
}
-impl Covariant for for<'a,'b> fn(&'a u8, &'b u8) {
+impl Covariant for for<'a,'b> fn(&'a u8, &'b u8) -> &'a u8 {
}
-impl Covariant for for<'a> fn(&'a u8, &'a u8) {
+impl Covariant for for<'a> fn(&'a u8, &'a u8) -> &'a u8 {
}
///////////////////////////////////////////////////////////////////////////
fn foo(&self) { }
}
-impl Invariant for for<'a,'b> fn(&'a u8, &'b u8) {
+impl Invariant for for<'a,'b> fn(&'a u8, &'b u8) -> &'a u8 {
}
-impl Invariant for for<'a> fn(&'a u8, &'a u8) {
+impl Invariant for for<'a> fn(&'a u8, &'a u8) -> &'a u8 {
}
fn main() { }
}
impl<'a> Named for S<'a> {
- fn new<'b>(name: &'static str) -> S<'b> {
+ fn new(name: &'static str) -> S<'a> {
S { name: name, mark: Cell::new(0), next: Cell::new(None) }
}
fn name(&self) -> &str { self.name }
}
impl<'a> Named for S2<'a> {
- fn new<'b>(name: &'static str) -> S2<'b> {
+ fn new(name: &'static str) -> S2<'a> {
S2 { name: name, mark: Cell::new(0), next: Cell::new((None, None)) }
}
fn name(&self) -> &str { self.name }
}
impl<'a> Named for V<'a> {
- fn new<'b>(name: &'static str) -> V<'b> {
+ fn new(name: &'static str) -> V<'a> {
V { name: name,
mark: Cell::new(0),
contents: vec![Cell::new(None), Cell::new(None)]
}
impl<'a> Named for H<'a> {
- fn new<'b>(name: &'static str) -> H<'b> {
+ fn new(name: &'static str) -> H<'a> {
H { name: name, mark: Cell::new(0), next: Cell::new(None) }
}
fn name(&self) -> &str { self.name }
}
impl<'a> Named for HM<'a> {
- fn new<'b>(name: &'static str) -> HM<'b> {
+ fn new(name: &'static str) -> HM<'a> {
HM { name: name,
mark: Cell::new(0),
contents: Cell::new(None)
}
impl<'a> Named for VD<'a> {
- fn new<'b>(name: &'static str) -> VD<'b> {
+ fn new(name: &'static str) -> VD<'a> {
VD { name: name,
mark: Cell::new(0),
contents: Cell::new(None)
}
impl<'a> Named for VM<'a> {
- fn new<'b>(name: &'static str) -> VM<'b> {
+ fn new(name: &'static str) -> VM<'a> {
VM { name: name,
mark: Cell::new(0),
contents: Cell::new(None)
}
impl<'a> Named for LL<'a> {
- fn new<'b>(name: &'static str) -> LL<'b> {
+ fn new(name: &'static str) -> LL<'a> {
LL { name: name,
mark: Cell::new(0),
contents: Cell::new(None)
}
impl<'a> Named for BH<'a> {
- fn new<'b>(name: &'static str) -> BH<'b> {
+ fn new(name: &'static str) -> BH<'a> {
BH { name: name,
mark: Cell::new(0),
contents: Cell::new(None)
}
impl<'a> Named for BTM<'a> {
- fn new<'b>(name: &'static str) -> BTM<'b> {
+ fn new(name: &'static str) -> BTM<'a> {
BTM { name: name,
mark: Cell::new(0),
contents: Cell::new(None)
}
impl<'a> Named for BTS<'a> {
- fn new<'b>(name: &'static str) -> BTS<'b> {
+ fn new(name: &'static str) -> BTS<'a> {
BTS { name: name,
mark: Cell::new(0),
contents: Cell::new(None)
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Issue #33540
+// We previously used to generate a 3-armed boolean `SwitchInt` in the
+// MIR of the function `foo` below. #33583 changed rustc to
+// generate an `If` terminator instead. This test is to just ensure
+// sanity in that we generate an if-else chain giving the correct
+// results.
+
+#![feature(rustc_attrs)]
+
+#[rustc_mir]
+fn foo(x: bool, y: bool) -> u32 {
+ match (x, y) {
+ (false, _) => 0,
+ (_, false) => 1,
+ (true, true) => 2
+ }
+}
+
+fn main() {
+ assert_eq!(foo(false, true), 0);
+ assert_eq!(foo(false, false), 0);
+ assert_eq!(foo(true, false), 1);
+ assert_eq!(foo(true, true), 2);
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::process::{Command, Stdio};
+use std::env;
+use std::sync::{Mutex, RwLock};
+use std::time::Duration;
+use std::thread;
+
+fn test_mutex() {
+ let m = Mutex::new(0);
+ let _g = m.lock().unwrap();
+ let _g2 = m.lock().unwrap();
+}
+
+fn test_try_mutex() {
+ let m = Mutex::new(0);
+ let _g = m.lock().unwrap();
+ let _g2 = m.try_lock().unwrap();
+}
+
+fn test_rwlock_ww() {
+ let m = RwLock::new(0);
+ let _g = m.write().unwrap();
+ let _g2 = m.write().unwrap();
+}
+
+fn test_try_rwlock_ww() {
+ let m = RwLock::new(0);
+ let _g = m.write().unwrap();
+ let _g2 = m.try_write().unwrap();
+}
+
+fn test_rwlock_rw() {
+ let m = RwLock::new(0);
+ let _g = m.read().unwrap();
+ let _g2 = m.write().unwrap();
+}
+
+fn test_try_rwlock_rw() {
+ let m = RwLock::new(0);
+ let _g = m.read().unwrap();
+ let _g2 = m.try_write().unwrap();
+}
+
+fn test_rwlock_wr() {
+ let m = RwLock::new(0);
+ let _g = m.write().unwrap();
+ let _g2 = m.read().unwrap();
+}
+
+fn test_try_rwlock_wr() {
+ let m = RwLock::new(0);
+ let _g = m.write().unwrap();
+ let _g2 = m.try_read().unwrap();
+}
+
+fn main() {
+ let args: Vec<String> = env::args().collect();
+ if args.len() > 1 {
+ match &*args[1] {
+ "mutex" => test_mutex(),
+ "try_mutex" => test_try_mutex(),
+ "rwlock_ww" => test_rwlock_ww(),
+ "try_rwlock_ww" => test_try_rwlock_ww(),
+ "rwlock_rw" => test_rwlock_rw(),
+ "try_rwlock_rw" => test_try_rwlock_rw(),
+ "rwlock_wr" => test_rwlock_wr(),
+ "try_rwlock_wr" => test_try_rwlock_wr(),
+ _ => unreachable!(),
+ }
+ // If we reach this point then the test failed
+ println!("TEST FAILED: {}", args[1]);
+ } else {
+ let mut v = vec![];
+ v.push(Command::new(&args[0]).arg("mutex").stderr(Stdio::null()).spawn().unwrap());
+ v.push(Command::new(&args[0]).arg("try_mutex").stderr(Stdio::null()).spawn().unwrap());
+ v.push(Command::new(&args[0]).arg("rwlock_ww").stderr(Stdio::null()).spawn().unwrap());
+ v.push(Command::new(&args[0]).arg("try_rwlock_ww").stderr(Stdio::null()).spawn().unwrap());
+ v.push(Command::new(&args[0]).arg("rwlock_rw").stderr(Stdio::null()).spawn().unwrap());
+ v.push(Command::new(&args[0]).arg("try_rwlock_rw").stderr(Stdio::null()).spawn().unwrap());
+ v.push(Command::new(&args[0]).arg("rwlock_wr").stderr(Stdio::null()).spawn().unwrap());
+ v.push(Command::new(&args[0]).arg("try_rwlock_wr").stderr(Stdio::null()).spawn().unwrap());
+
+ thread::sleep(Duration::new(1, 0));
+
+ // Make sure all subprocesses either panicked or were killed because they deadlocked
+ for mut c in v {
+ c.kill().ok();
+ assert!(!c.wait().unwrap().success());
+ }
+ }
+}
assert_eq!(it.next().map(|l| l.starts_with("thread '<unnamed>' panicked at")), Some(true));
assert_eq!(it.next(), Some("note: Run with `RUST_BACKTRACE=1` for a backtrace."));
- assert_eq!(it.next().map(|l| l.starts_with("thread '<main>' panicked at")), Some(true));
+ assert_eq!(it.next().map(|l| l.starts_with("thread 'main' panicked at")), Some(true));
assert_eq!(it.next(), None);
}
// ignore-emscripten no threads support
-#![feature(std_panic, recover, panic_propagate, panic_handler, const_fn)]
+#![feature(panic_handler)]
-use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::panic;
use std::thread;
-static A: AtomicUsize = AtomicUsize::new(0);
+static A: AtomicUsize = ATOMIC_USIZE_INIT;
fn main() {
- panic::set_handler(|_| {
+ panic::set_hook(Box::new(|_| {
A.fetch_add(1, Ordering::SeqCst);
- });
+ }));
let result = thread::spawn(|| {
- let result = panic::recover(|| {
+ let result = panic::catch_unwind(|| {
panic!("hi there");
});
- panic::propagate(result.unwrap_err());
+ panic::resume_unwind(result.unwrap_err());
}).join();
let msg = *result.unwrap_err().downcast::<&'static str>().unwrap();
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for #31849: the problem here was actually a performance
+// cliff, but I'm adding the test for reference.
+
+pub trait Upcast<T> {
+ fn upcast(self) -> T;
+}
+
+impl<S1, S2, T1, T2> Upcast<(T1, T2)> for (S1,S2)
+ where S1: Upcast<T1>,
+ S2: Upcast<T2>,
+{
+ fn upcast(self) -> (T1, T2) { (self.0.upcast(), self.1.upcast()) }
+}
+
+impl Upcast<()> for ()
+{
+ fn upcast(self) -> () { () }
+}
+
+pub trait ToStatic {
+ type Static: 'static;
+ fn to_static(self) -> Self::Static where Self: Sized;
+}
+
+impl<T, U> ToStatic for (T, U)
+ where T: ToStatic,
+ U: ToStatic
+{
+ type Static = (T::Static, U::Static);
+ fn to_static(self) -> Self::Static { (self.0.to_static(), self.1.to_static()) }
+}
+
+impl ToStatic for ()
+{
+ type Static = ();
+ fn to_static(self) -> () { () }
+}
+
+
+trait Factory {
+ type Output;
+ fn build(&self) -> Self::Output;
+}
+
+impl<S,T> Factory for (S, T)
+ where S: Factory,
+ T: Factory,
+ S::Output: ToStatic,
+ <S::Output as ToStatic>::Static: Upcast<S::Output>,
+{
+ type Output = (S::Output, T::Output);
+ fn build(&self) -> Self::Output { (self.0.build().to_static().upcast(), self.1.build()) }
+}
+
+impl Factory for () {
+ type Output = ();
+ fn build(&self) -> Self::Output { () }
+}
+
+fn main() {
+ // More parens, more time.
+ let it = ((((((((((),()),()),()),()),()),()),()),()),());
+ it.build();
+}
+
// aux-build:reachable-unnameable-items.rs
-#![feature(recover)]
-
extern crate reachable_unnameable_items;
use reachable_unnameable_items::*;
let none = None;
function_accepting_unnameable_type(none);
- let _guard = std::panic::recover(|| none.unwrap().method_of_unnameable_type3());
+ let _guard = std::panic::catch_unwind(|| none.unwrap().method_of_unnameable_type3());
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(std_panic, recover, start)]
+#![feature(start)]
use std::ffi::CStr;
use std::process::{Command, Output};
match **argv.offset(1) as char {
'1' => {}
'2' => println!("foo"),
- '3' => assert!(panic::recover(|| {}).is_ok()),
- '4' => assert!(panic::recover(|| panic!()).is_err()),
+ '3' => assert!(panic::catch_unwind(|| {}).is_ok()),
+ '4' => assert!(panic::catch_unwind(|| panic!()).is_err()),
'5' => assert!(Command::new("test").spawn().is_err()),
_ => panic!()
}
assert_eq!(s.chars().count(), 4);
assert_eq!(schs.len(), 4);
assert_eq!(schs.iter().cloned().collect::<String>(), s);
- assert_eq!(s.char_at(0), 'e');
- assert_eq!(s.char_at(1), 'é');
assert!((str::from_utf8(s.as_bytes()).is_ok()));
// invalid prefix
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ignore-msvc -- sprintf isn't a symbol in msvcrt? maybe a #define?
-
-#![feature(libc, std_misc)]
-
-extern crate libc;
-
-use std::ffi::{CStr, CString};
-use libc::{c_char, c_int};
-
-
+#[link(name = "rust_test_helpers")]
extern {
- fn sprintf(s: *mut c_char, format: *const c_char, ...) -> c_int;
-}
-
-unsafe fn check<T, F>(expected: &str, f: F) where F: FnOnce(*mut c_char) -> T {
- let mut x = [0 as c_char; 50];
- f(&mut x[0] as *mut c_char);
- assert_eq!(expected.as_bytes(), CStr::from_ptr(x.as_ptr()).to_bytes());
+ fn rust_interesting_average(_: u64, ...) -> f64;
}
pub fn main() {
-
+ // Call without variadic arguments
unsafe {
- // Call with just the named parameter
- let c = CString::new(&b"Hello World\n"[..]).unwrap();
- check("Hello World\n", |s| sprintf(s, c.as_ptr()));
-
- // Call with variable number of arguments
- let c = CString::new(&b"%d %f %c %s\n"[..]).unwrap();
- check("42 42.500000 a %d %f %c %s\n\n", |s| {
- sprintf(s, c.as_ptr(), 42, 42.5f64, 'a' as c_int, c.as_ptr());
- });
+ assert!(rust_interesting_average(0).is_nan());
+ }
- // Make a function pointer
- let x: unsafe extern fn(*mut c_char, *const c_char, ...) -> c_int = sprintf;
+ // Call with direct arguments
+ unsafe {
+ assert_eq!(rust_interesting_average(1, 10i64, 10.0f64) as i64, 20);
+ }
- // A function that takes a function pointer
- unsafe fn call(fp: unsafe extern fn(*mut c_char, *const c_char, ...) -> c_int) {
- // Call with just the named parameter
- let c = CString::new(&b"Hello World\n"[..]).unwrap();
- check("Hello World\n", |s| fp(s, c.as_ptr()));
+ // Call with named arguments, variable number of them
+ let (x1, x2, x3, x4) = (10i64, 10.0f64, 20i64, 20.0f64);
+ unsafe {
+ assert_eq!(rust_interesting_average(2, x1, x2, x3, x4) as i64, 30);
+ }
- // Call with variable number of arguments
- let c = CString::new(&b"%d %f %c %s\n"[..]).unwrap();
- check("42 42.500000 a %d %f %c %s\n\n", |s| {
- fp(s, c.as_ptr(), 42, 42.5f64, 'a' as c_int, c.as_ptr());
- });
- }
+ // A function that takes a function pointer
+ unsafe fn call(fp: unsafe extern fn(u64, ...) -> f64) {
+ let (x1, x2, x3, x4) = (10i64, 10.0f64, 20i64, 20.0f64);
+ assert_eq!(fp(2, x1, x2, x3, x4) as i64, 30);
+ }
- // Pass sprintf directly
- call(sprintf);
+ unsafe {
+ call(rust_interesting_average);
- // Pass sprintf indirectly
+ // Make a function pointer, pass indirectly
+ let x: unsafe extern fn(u64, ...) -> f64 = rust_interesting_average;
call(x);
}
-
}
Test {
name: "cargo",
repo: "https://github.com/rust-lang/cargo",
- sha: "26288f799427f9cc6e8bdddd782a17a8156ebc64",
+ sha: "7d79da08238e3d47e0bc4406155bdcc45ccb8c82",
lock: None,
},
Test {