+Version 1.29.0 (2018-09-13)
+==========================
+
+Compiler
+--------
+- [Bumped minimum LLVM version to 5.0.][51899]
+- [Added `powerpc64le-unknown-linux-musl` target.][51619]
+- [Added `aarch64-unknown-hermit` and `x86_64-unknown-hermit` targets.][52861]
+
+Libraries
+---------
+- [`Once::call_once` now no longer requires `Once` to be `'static`.][52239]
+- [`BuildHasherDefault` now implements `PartialEq` and `Eq`.][52402]
+- [`Box<CStr>`, `Box<OsStr>`, and `Box<Path>` now implement `Clone`.][51912]
+- [Implemented `PartialEq<&str>` for `OsString` and `PartialEq<OsString>`
+ for `&str`.][51178]
+- [`Cell<T>` now allows `T` to be unsized.][50494]
+- [`SocketAddr` is now stable on Redox.][52656]
+
+Stabilized APIs
+---------------
+- [`Arc::downcast`]
+- [`Iterator::flatten`]
+- [`Rc::downcast`]
+
+Cargo
+-----
+- [Cargo can silently fix some bad lockfiles ][cargo/5831] You can use
+ `--locked` to disable this behaviour.
+- [`cargo-install` will now allow you to cross compile an install
+ using `--target`][cargo/5614]
+- [Added the `cargo-fix` subcommand to automatically move project code from
+ 2015 edition to 2018.][cargo/5723]
+
+Misc
+----
+- [`rustdoc` now has the `--cap-lints` option which demotes all lints above
+ the specified level to that level.][52354] For example `--cap-lints warn`
+ will demote `deny` and `forbid` lints to `warn`.
+- [`rustc` and `rustdoc` will now have the exit code of `1` if compilation
+ fails, and `101` if there is a panic.][52197]
+- [A preview of clippy has been made available through rustup.][51122]
+ You can install the preview with `rustup component add clippy-preview`
+
+Compatibility Notes
+-------------------
+- [`str::{slice_unchecked, slice_unchecked_mut}` are now deprecated.][51807]
+ Use `str::get_unchecked(begin..end)` instead.
+- [`std::env::home_dir` is now deprecated for its unintuitive behaviour.][51656]
+ Consider using the `home_dir` function from
+ https://crates.io/crates/dirs instead.
+- [`rustc` will no longer silently ignore invalid data in target spec.][52330]
+
+[52861]: https://github.com/rust-lang/rust/pull/52861/
+[52656]: https://github.com/rust-lang/rust/pull/52656/
+[52239]: https://github.com/rust-lang/rust/pull/52239/
+[52330]: https://github.com/rust-lang/rust/pull/52330/
+[52354]: https://github.com/rust-lang/rust/pull/52354/
+[52402]: https://github.com/rust-lang/rust/pull/52402/
+[52103]: https://github.com/rust-lang/rust/pull/52103/
+[52197]: https://github.com/rust-lang/rust/pull/52197/
+[51807]: https://github.com/rust-lang/rust/pull/51807/
+[51899]: https://github.com/rust-lang/rust/pull/51899/
+[51912]: https://github.com/rust-lang/rust/pull/51912/
+[51511]: https://github.com/rust-lang/rust/pull/51511/
+[51619]: https://github.com/rust-lang/rust/pull/51619/
+[51656]: https://github.com/rust-lang/rust/pull/51656/
+[51178]: https://github.com/rust-lang/rust/pull/51178/
+[51122]: https://github.com/rust-lang/rust/pull/51122
+[50494]: https://github.com/rust-lang/rust/pull/50494/
+[cargo/5614]: https://github.com/rust-lang/cargo/pull/5614/
+[cargo/5723]: https://github.com/rust-lang/cargo/pull/5723/
+[cargo/5831]: https://github.com/rust-lang/cargo/pull/5831/
+[`Arc::downcast`]: https://doc.rust-lang.org/std/sync/struct.Arc.html#method.downcast
+[`Iterator::flatten`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.flatten
+[`Rc::downcast`]: https://doc.rust-lang.org/std/rc/struct.Rc.html#method.downcast
+
+
Version 1.28.0 (2018-08-02)
===========================
"crossbeam-utils 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
dependencies = [
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
"serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)",
]
+[[package]]
+name = "rls-data"
+version = "0.18.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
[[package]]
name = "rls-rustc"
version = "0.5.0"
"rustc_target 0.0.0",
"scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serialize 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax 0.0.0",
"syntax_pos 0.0.0",
"tempfile 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_data_structures 0.0.0",
"rustc_errors 0.0.0",
"rustc_target 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
"rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_cratesio_shim 0.0.0",
"serialize 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"stable_deref_trait 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
"rustc_errors 0.0.0",
"rustc_target 0.0.0",
"serialize 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
version = "0.0.0"
dependencies = [
"log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "rls-data 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rls-data 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc_data_structures 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
[[package]]
name = "serialize"
version = "0.0.0"
+dependencies = [
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
[[package]]
name = "shell-escape"
[[package]]
name = "smallvec"
-version = "0.6.3"
+version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_target 0.0.0",
"scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serialize 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax_pos 0.0.0",
]
"rustc_data_structures 0.0.0",
"rustc_errors 0.0.0",
"rustc_target 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
"checksum rls-analysis 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "96f84d303dcbe1c1bdd41b10867d3399c38fbdac32c4e3645cdb6dbd7f82db1d"
"checksum rls-blacklist 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e4a9cc2545ccb7e05b355bfe047b8039a6ec12270d5f3c996b766b340a50f7d2"
"checksum rls-data 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3dd20763e1c60ae8945384c8a8fa4ac44f8afa7b0a817511f5e8927e5d24f988"
+"checksum rls-data 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4f81e838ecff6830ed33c2907fd236f38d441c206e983a2aa29fbce99295fab9"
"checksum rls-rustc 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9dba7390427aefa953608429701e3665192ca810ba8ae09301e001b7c7bed0"
"checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a"
"checksum rls-vfs 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ecbc8541b4c341d6271eae10f869dd9d36db871afe184f5b6f9bffbd6ed0373f"
"checksum shell-escape 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "170a13e64f2a51b77a45702ba77287f5c6829375b04a69cf2222acd17d0cfab9"
"checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2"
"checksum siphasher 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0df90a788073e8d0235a67e50441d47db7c8ad9debd91cbf43736a2a92d36537"
-"checksum smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "26df3bb03ca5eac2e64192b723d51f56c1b1e0860e7c766281f4598f181acdc8"
+"checksum smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "153ffa32fd170e9944f7e0838edf824a754ec4c1fc64746fcc9fe1f8fa602e5d"
"checksum socket2 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "962a516af4d3a7c272cb3a1d50a8cc4e5b41802e4ad54cfb7bee8ba61d37d703"
"checksum stable_deref_trait 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ffbc596e092fe5f598b12ef46cc03754085ac2f4d8c739ad61c4ae266cc3b3fa"
"checksum string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "25d70109977172b127fe834e5449e5ab1740b9ba49fa18a2020f509174f25423"
export CFLAGS="-fPIC $CFLAGS"
-# FIXME: remove the patch when upate to 1.1.20
+# FIXME: remove the patch when updating to 1.1.20
MUSL=musl-1.1.19
# may have been downloaded in a previous run
-Subproject commit 88cdde350fd3a90c93f3bac8b4f168f105d28060
+Subproject commit 16c9dee7666c2b2766fd98d89003e028679d1207
# Use Rust
-Once you've gotten familliar with the language, these resources can help you
+Once you've gotten familiar with the language, these resources can help you
when you're actually using it day-to-day.
## The Standard Library
-Subproject commit 790e96b87f4b5817cac310e73a524d25c3d076d8
+Subproject commit ae42ad7aa4d7907cca941371c9eee8de8c2ee40d
This flag lets you control how many threads are used when doing
code generation.
-Increasing paralellism may speed up compile times, but may also
+Increasing parallelism may speed up compile times, but may also
produce slower code.
## remark
pub struct S(u8);
fn f() {
- // this is trying to use S from the 'use' line, but becuase the `u8` is
+ // this is trying to use S from the 'use' line, but because the `u8` is
// not pub, it is private
::S;
}
## mutable-transmutes
-This lint catches transmuting from `&T` to `&mut T` becuase it is undefined
+This lint catches transmuting from `&T` to `&mut T` because it is undefined
behavior. Some example code that triggers this lint:
```rust,ignore
# Unstable features
-Rustdoc is under active developement, and like the Rust compiler, some features are only available
+Rustdoc is under active development, and like the Rust compiler, some features are only available
on the nightly releases. Some of these are new and need some more testing before they're able to get
released to the world at large, and some of them are tied to features in the Rust compiler that are
themselves unstable. Several features here require a matching `#![feature(...)]` attribute to
+++ /dev/null
-# `catch_expr`
-
-The tracking issue for this feature is: [#31436]
-
-[#31436]: https://github.com/rust-lang/rust/issues/31436
-
-------------------------
-
-The `catch_expr` feature adds support for a `catch` expression. The `catch`
-expression creates a new scope one can use the `?` operator in.
-
-```rust
-#![feature(catch_expr)]
-
-use std::num::ParseIntError;
-
-let result: Result<i32, ParseIntError> = do catch {
- "1".parse::<i32>()?
- + "2".parse::<i32>()?
- + "3".parse::<i32>()?
-};
-assert_eq!(result, Ok(6));
-
-let result: Result<i32, ParseIntError> = do catch {
- "1".parse::<i32>()?
- + "foo".parse::<i32>()?
- + "3".parse::<i32>()?
-};
-assert!(result.is_err());
-```
------------------------
The `infer_outlives_requirements` feature indicates that certain
-outlives requirements can be infered by the compiler rather than
+outlives requirements can be inferred by the compiler rather than
stating them explicitly.
For example, currently generic struct definitions that contain
references, require where-clauses of the form T: 'a. By using
-this feature the outlives predicates will be infered, although
+this feature the outlives predicates will be inferred, although
they may still be written explicitly.
```rust,ignore (pseudo-Rust)
------------------------
The `infer_static_outlives_requirements` feature indicates that certain
-`'static` outlives requirements can be infered by the compiler rather than
+`'static` outlives requirements can be inferred by the compiler rather than
stating them explicitly.
Note: It is an accompanying feature to `infer_outlives_requirements`,
For example, currently generic struct definitions that contain
references, require where-clauses of the form T: 'static. By using
-this feature the outlives predicates will be infered, although
+this feature the outlives predicates will be inferred, although
they may still be written explicitly.
```rust,ignore (pseudo-Rust)
+++ /dev/null
-# `macro_vis_matcher`
-
-The tracking issue for this feature is: [#41022]
-
-With this feature gate enabled, the [list of fragment specifiers][frags] gains one more entry:
-
-* `vis`: a visibility qualifier. Examples: nothing (default visibility); `pub`; `pub(crate)`.
-
-A `vis` variable may be followed by a comma, ident, type, or path.
-
-[#41022]: https://github.com/rust-lang/rust/issues/41022
-[frags]: ../book/first-edition/macros.html#syntactic-requirements
-
-------------------------
```rust,ignore
#![feature(plugin_registrar)]
#![feature(box_syntax, rustc_private)]
-#![feature(macro_vis_matcher)]
#![feature(macro_at_most_once_rep)]
extern crate syntax;
+++ /dev/null
-# `tool_attributes`
-
-The tracking issue for this feature is: [#44690]
-
-[#44690]: https://github.com/rust-lang/rust/issues/44690
-
-------------------------
-
-Tool attributes let you use scoped attributes to control the behavior
-of certain tools.
-
-Currently tool names which can be appear in scoped attributes are restricted to
-`clippy` and `rustfmt`.
-
-## An example
-
-```rust
-#![feature(tool_attributes)]
-
-#[rustfmt::skip]
-fn foo() { println!("hello, world"); }
-
-fn main() {
- foo();
-}
-```
--- /dev/null
+# `try_blocks`
+
+The tracking issue for this feature is: [#31436]
+
+[#31436]: https://github.com/rust-lang/rust/issues/31436
+
+------------------------
+
+The `try_blocks` feature adds support for `try` blocks. A `try`
+block creates a new scope one can use the `?` operator in.
+
+```rust,ignore
+// This code needs the 2018 edition
+
+#![feature(try_blocks)]
+
+use std::num::ParseIntError;
+
+let result: Result<i32, ParseIntError> = try {
+ "1".parse::<i32>()?
+ + "2".parse::<i32>()?
+ + "3".parse::<i32>()?
+};
+assert_eq!(result, Ok(6));
+
+let result: Result<i32, ParseIntError> = try {
+ "1".parse::<i32>()?
+ + "foo".parse::<i32>()?
+ + "3".parse::<i32>()?
+};
+assert!(result.is_err());
+```
# Set the environment variable `RUST_GDB` to overwrite the call to a
# different/specific command (defaults to `gdb`).
RUST_GDB="${RUST_GDB:-gdb}"
-PYTHONPATH="$PYTHONPATH:$GDB_PYTHON_MODULE_DIRECTORY" ${RUST_GDB} \
+PYTHONPATH="$PYTHONPATH:$GDB_PYTHON_MODULE_DIRECTORY" exec ${RUST_GDB} \
--directory="$GDB_PYTHON_MODULE_DIRECTORY" \
-iex "add-auto-load-safe-path $GDB_PYTHON_MODULE_DIRECTORY" \
"$@"
echo "***"
fi
-# Create a tempfile containing the LLDB script we want to execute on startup
-TMPFILE=`mktemp /tmp/rust-lldb-commands.XXXXXX`
-
-# Make sure to delete the tempfile no matter what
-trap "rm -f $TMPFILE; exit" INT TERM EXIT
-
# Find out where to look for the pretty printer Python module
RUSTC_SYSROOT=`rustc --print sysroot`
-# Write the LLDB script to the tempfile
-echo "command script import \"$RUSTC_SYSROOT/lib/rustlib/etc/lldb_rust_formatters.py\"" >> $TMPFILE
-echo "type summary add --no-value --python-function lldb_rust_formatters.print_val -x \".*\" --category Rust" >> $TMPFILE
-echo "type category enable Rust" >> $TMPFILE
+# Prepare commands that will be loaded before any file on the command line has been loaded
+script_import="command script import \"$RUSTC_SYSROOT/lib/rustlib/etc/lldb_rust_formatters.py\""
+category_definition="type summary add --no-value --python-function lldb_rust_formatters.print_val -x \".*\" --category Rust"
+category_enable="type category enable Rust"
-# Call LLDB with the script added to the argument list
-lldb --source-before-file="$TMPFILE" "$@"
+# Call LLDB with the commands added to the argument list
+exec lldb --one-line-before-file="$script_import" \
+ --one-line-before-file="$category_definition" \
+ --one-line-before-file="$category_enable" \
+ "$@"
.unwrap_or_else(|_| handle_alloc_error(layout));
let mut i = ptr.cast::<u8>().as_ptr();
- let end = i.offset(layout.size() as isize);
+ let end = i.add(layout.size());
while i < end {
assert_eq!(*i, 0);
i = i.offset(1);
Box(Unique::new_unchecked(raw))
}
- /// Consumes the `Box`, returning the wrapped raw pointer.
+ /// Consumes the `Box`, returning a wrapped raw pointer.
+ ///
+ /// The pointer will be properly aligned and non-null.
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `Box`. In particular, the
impl<T> Drop for BoxBuilder<T> {
fn drop(&mut self) {
let mut data = self.data.ptr();
- let max = unsafe { data.offset(self.len as isize) };
+ let max = unsafe { data.add(self.len) };
while data != max {
unsafe {
let new_len = self.node.len() - self.idx - 1;
ptr::copy_nonoverlapping(
- self.node.keys().as_ptr().offset(self.idx as isize + 1),
+ self.node.keys().as_ptr().add(self.idx + 1),
new_node.keys.as_mut_ptr(),
new_len
);
ptr::copy_nonoverlapping(
- self.node.vals().as_ptr().offset(self.idx as isize + 1),
+ self.node.vals().as_ptr().add(self.idx + 1),
new_node.vals.as_mut_ptr(),
new_len
);
let new_len = self.node.len() - self.idx - 1;
ptr::copy_nonoverlapping(
- self.node.keys().as_ptr().offset(self.idx as isize + 1),
+ self.node.keys().as_ptr().add(self.idx + 1),
new_node.data.keys.as_mut_ptr(),
new_len
);
ptr::copy_nonoverlapping(
- self.node.vals().as_ptr().offset(self.idx as isize + 1),
+ self.node.vals().as_ptr().add(self.idx + 1),
new_node.data.vals.as_mut_ptr(),
new_len
);
ptr::copy_nonoverlapping(
- self.node.as_internal().edges.as_ptr().offset(self.idx as isize + 1),
+ self.node.as_internal().edges.as_ptr().add(self.idx + 1),
new_node.edges.as_mut_ptr(),
new_len + 1
);
slice_remove(self.node.keys_mut(), self.idx));
ptr::copy_nonoverlapping(
right_node.keys().as_ptr(),
- left_node.keys_mut().as_mut_ptr().offset(left_len as isize + 1),
+ left_node.keys_mut().as_mut_ptr().add(left_len + 1),
right_len
);
ptr::write(left_node.vals_mut().get_unchecked_mut(left_len),
slice_remove(self.node.vals_mut(), self.idx));
ptr::copy_nonoverlapping(
right_node.vals().as_ptr(),
- left_node.vals_mut().as_mut_ptr().offset(left_len as isize + 1),
+ left_node.vals_mut().as_mut_ptr().add(left_len + 1),
right_len
);
.as_internal_mut()
.edges
.as_mut_ptr()
- .offset(left_len as isize + 1),
+ .add(left_len + 1),
right_len + 1
);
// Make room for stolen elements in the right child.
ptr::copy(right_kv.0,
- right_kv.0.offset(count as isize),
+ right_kv.0.add(count),
right_len);
ptr::copy(right_kv.1,
- right_kv.1.offset(count as isize),
+ right_kv.1.add(count),
right_len);
// Move elements from the left child to the right one.
// Make room for stolen edges.
let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr();
ptr::copy(right_edges,
- right_edges.offset(count as isize),
+ right_edges.add(count),
right_len + 1);
right.correct_childrens_parent_links(count, count + right_len + 1);
move_kv(right_kv, count - 1, parent_kv, 0, 1);
// Fix right indexing
- ptr::copy(right_kv.0.offset(count as isize),
+ ptr::copy(right_kv.0.add(count),
right_kv.0,
new_right_len);
- ptr::copy(right_kv.1.offset(count as isize),
+ ptr::copy(right_kv.1.add(count),
right_kv.1,
new_right_len);
}
// Fix right indexing.
let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr();
- ptr::copy(right_edges.offset(count as isize),
+ ptr::copy(right_edges.add(count),
right_edges,
new_right_len + 1);
right.correct_childrens_parent_links(0, new_right_len + 1);
dest: (*mut K, *mut V), dest_offset: usize,
count: usize)
{
- ptr::copy_nonoverlapping(source.0.offset(source_offset as isize),
- dest.0.offset(dest_offset as isize),
+ ptr::copy_nonoverlapping(source.0.add(source_offset),
+ dest.0.add(dest_offset),
count);
- ptr::copy_nonoverlapping(source.1.offset(source_offset as isize),
- dest.1.offset(dest_offset as isize),
+ ptr::copy_nonoverlapping(source.1.add(source_offset),
+ dest.1.add(dest_offset),
count);
}
{
let source_ptr = source.as_internal_mut().edges.as_mut_ptr();
let dest_ptr = dest.as_internal_mut().edges.as_mut_ptr();
- ptr::copy_nonoverlapping(source_ptr.offset(source_offset as isize),
- dest_ptr.offset(dest_offset as isize),
+ ptr::copy_nonoverlapping(source_ptr.add(source_offset),
+ dest_ptr.add(dest_offset),
count);
dest.correct_childrens_parent_links(dest_offset, dest_offset + count);
}
unsafe fn slice_insert<T>(slice: &mut [T], idx: usize, val: T) {
ptr::copy(
- slice.as_ptr().offset(idx as isize),
- slice.as_mut_ptr().offset(idx as isize + 1),
+ slice.as_ptr().add(idx),
+ slice.as_mut_ptr().add(idx + 1),
slice.len() - idx
);
ptr::write(slice.get_unchecked_mut(idx), val);
unsafe fn slice_remove<T>(slice: &mut [T], idx: usize) -> T {
let ret = ptr::read(slice.get_unchecked(idx));
ptr::copy(
- slice.as_ptr().offset(idx as isize + 1),
- slice.as_mut_ptr().offset(idx as isize),
+ slice.as_ptr().add(idx + 1),
+ slice.as_mut_ptr().add(idx),
slice.len() - idx - 1
);
ret
/// Moves an element out of the buffer
#[inline]
unsafe fn buffer_read(&mut self, off: usize) -> T {
- ptr::read(self.ptr().offset(off as isize))
+ ptr::read(self.ptr().add(off))
}
/// Writes an element into the buffer, moving it.
#[inline]
unsafe fn buffer_write(&mut self, off: usize, value: T) {
- ptr::write(self.ptr().offset(off as isize), value);
+ ptr::write(self.ptr().add(off), value);
}
/// Returns `true` if and only if the buffer is at full capacity.
src,
len,
self.cap());
- ptr::copy(self.ptr().offset(src as isize),
- self.ptr().offset(dst as isize),
+ ptr::copy(self.ptr().add(src),
+ self.ptr().add(dst),
len);
}
src,
len,
self.cap());
- ptr::copy_nonoverlapping(self.ptr().offset(src as isize),
- self.ptr().offset(dst as isize),
+ ptr::copy_nonoverlapping(self.ptr().add(src),
+ self.ptr().add(dst),
len);
}
- /// Returns a pair of slices which contain the contents of the buffer not used by the VecDeque.
- #[inline]
- unsafe fn unused_as_mut_slices<'a>(&'a mut self) -> (&'a mut [T], &'a mut [T]) {
- let head = self.head;
- let tail = self.tail;
- let buf = self.buffer_as_mut_slice();
- if head != tail {
- // In buf, head..tail contains the VecDeque and tail..head is unused.
- // So calling `ring_slices` with tail and head swapped returns unused slices.
- RingSlices::ring_slices(buf, tail, head)
- } else {
- // Swapping doesn't help when head == tail.
- let (before, after) = buf.split_at_mut(head);
- (after, before)
- }
- }
-
/// Copies a potentially wrapping block of memory len long from src to dest.
/// (abs(dst - src) + len) must be no larger than cap() (There must be at
/// most one continuous overlapping region between src and dest).
pub fn get(&self, index: usize) -> Option<&T> {
if index < self.len() {
let idx = self.wrap_add(self.tail, index);
- unsafe { Some(&*self.ptr().offset(idx as isize)) }
+ unsafe { Some(&*self.ptr().add(idx)) }
} else {
None
}
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
if index < self.len() {
let idx = self.wrap_add(self.tail, index);
- unsafe { Some(&mut *self.ptr().offset(idx as isize)) }
+ unsafe { Some(&mut *self.ptr().add(idx)) }
} else {
None
}
let ri = self.wrap_add(self.tail, i);
let rj = self.wrap_add(self.tail, j);
unsafe {
- ptr::swap(self.ptr().offset(ri as isize),
- self.ptr().offset(rj as isize))
+ ptr::swap(self.ptr().add(ri),
+ self.ptr().add(rj))
}
}
// `at` lies in the first half.
let amount_in_first = first_len - at;
- ptr::copy_nonoverlapping(first_half.as_ptr().offset(at as isize),
+ ptr::copy_nonoverlapping(first_half.as_ptr().add(at),
other.ptr(),
amount_in_first);
// just take all of the second half.
ptr::copy_nonoverlapping(second_half.as_ptr(),
- other.ptr().offset(amount_in_first as isize),
+ other.ptr().add(amount_in_first),
second_len);
} else {
// `at` lies in the second half, need to factor in the elements we skipped
// in the first half.
let offset = at - first_len;
let amount_in_second = second_len - offset;
- ptr::copy_nonoverlapping(second_half.as_ptr().offset(offset as isize),
+ ptr::copy_nonoverlapping(second_half.as_ptr().add(offset),
other.ptr(),
amount_in_second);
}
#[inline]
#[stable(feature = "append", since = "1.4.0")]
pub fn append(&mut self, other: &mut Self) {
- // Copies all values from `src_slice` to the start of `dst_slice`.
- unsafe fn copy_whole_slice<T>(src_slice: &[T], dst_slice: &mut [T]) {
- let len = src_slice.len();
- ptr::copy_nonoverlapping(src_slice.as_ptr(), dst_slice[..len].as_mut_ptr(), len);
- }
-
- let src_total = other.len();
-
- // Guarantees there is space in `self` for `other`.
- self.reserve(src_total);
-
- self.head = {
- let original_head = self.head;
-
- // The goal is to copy all values from `other` into `self`. To avoid any
- // mismatch, all valid values in `other` are retrieved...
- let (src_high, src_low) = other.as_slices();
- // and unoccupied parts of self are retrieved.
- let (dst_high, dst_low) = unsafe { self.unused_as_mut_slices() };
-
- // Then all that is needed is to copy all values from
- // src (src_high and src_low) to dst (dst_high and dst_low).
- //
- // other [o o o . . . . . o o o o]
- // [5 6 7] [1 2 3 4]
- // src_low src_high
- //
- // self [. . . . . . o o o o . .]
- // [3 4 5 6 7 .] [1 2]
- // dst_low dst_high
- //
- // Values are not copied one by one but as slices in `copy_whole_slice`.
- // What slices are used depends on various properties of src and dst.
- // There are 6 cases in total:
- // 1. `src` is contiguous and fits in dst_high
- // 2. `src` is contiguous and does not fit in dst_high
- // 3. `src` is discontiguous and fits in dst_high
- // 4. `src` is discontiguous and does not fit in dst_high
- // + src_high is smaller than dst_high
- // 5. `src` is discontiguous and does not fit in dst_high
- // + dst_high is smaller than src_high
- // 6. `src` is discontiguous and does not fit in dst_high
- // + dst_high is the same size as src_high
- let src_contiguous = src_low.is_empty();
- let dst_high_fits_src = dst_high.len() >= src_total;
- match (src_contiguous, dst_high_fits_src) {
- (true, true) => {
- // 1.
- // other [. . . o o o . . . . . .]
- // [] [1 1 1]
- //
- // self [. o o o o o . . . . . .]
- // [.] [1 1 1 . . .]
-
- unsafe {
- copy_whole_slice(src_high, dst_high);
- }
- original_head + src_total
- }
- (true, false) => {
- // 2.
- // other [. . . o o o o o . . . .]
- // [] [1 1 2 2 2]
- //
- // self [. . . . . . . o o o . .]
- // [2 2 2 . . . .] [1 1]
-
- let (src_1, src_2) = src_high.split_at(dst_high.len());
- unsafe {
- copy_whole_slice(src_1, dst_high);
- copy_whole_slice(src_2, dst_low);
- }
- src_total - dst_high.len()
- }
- (false, true) => {
- // 3.
- // other [o o . . . . . . . o o o]
- // [2 2] [1 1 1]
- //
- // self [. o o . . . . . . . . .]
- // [.] [1 1 1 2 2 . . . .]
-
- let (dst_1, dst_2) = dst_high.split_at_mut(src_high.len());
- unsafe {
- copy_whole_slice(src_high, dst_1);
- copy_whole_slice(src_low, dst_2);
- }
- original_head + src_total
- }
- (false, false) => {
- if src_high.len() < dst_high.len() {
- // 4.
- // other [o o o . . . . . . o o o]
- // [2 3 3] [1 1 1]
- //
- // self [. . . . . . o o . . . .]
- // [3 3 . . . .] [1 1 1 2]
-
- let (dst_1, dst_2) = dst_high.split_at_mut(src_high.len());
- let (src_2, src_3) = src_low.split_at(dst_2.len());
- unsafe {
- copy_whole_slice(src_high, dst_1);
- copy_whole_slice(src_2, dst_2);
- copy_whole_slice(src_3, dst_low);
- }
- src_3.len()
- } else if src_high.len() > dst_high.len() {
- // 5.
- // other [o o o . . . . . o o o o]
- // [3 3 3] [1 1 2 2]
- //
- // self [. . . . . . o o o o . .]
- // [2 2 3 3 3 .] [1 1]
-
- let (src_1, src_2) = src_high.split_at(dst_high.len());
- let (dst_2, dst_3) = dst_low.split_at_mut(src_2.len());
- unsafe {
- copy_whole_slice(src_1, dst_high);
- copy_whole_slice(src_2, dst_2);
- copy_whole_slice(src_low, dst_3);
- }
- dst_2.len() + src_low.len()
- } else {
- // 6.
- // other [o o . . . . . . . o o o]
- // [2 2] [1 1 1]
- //
- // self [. . . . . . . o o . . .]
- // [2 2 . . . . .] [1 1 1]
-
- unsafe {
- copy_whole_slice(src_high, dst_high);
- copy_whole_slice(src_low, dst_low);
- }
- src_low.len()
- }
- }
- }
- };
-
- // Some values now exist in both `other` and `self` but are made inaccessible in `other`.
- other.tail = other.head;
+ // naive impl
+ self.extend(other.drain(..));
}
/// Retains only the elements specified by the predicate.
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
f.debug_tuple("Iter")
- .field(&self.ring)
- .field(&self.tail)
- .field(&self.head)
- .finish()
+ .field(&front)
+ .field(&back)
+ .finish()
}
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for IterMut<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let (front, back) = RingSlices::ring_slices(&*self.ring, self.head, self.tail);
f.debug_tuple("IterMut")
- .field(&self.ring)
- .field(&self.tail)
- .field(&self.head)
- .finish()
+ .field(&front)
+ .field(&back)
+ .finish()
}
}
// Need to move the ring to the front of the buffer, as vec will expect this.
if other.is_contiguous() {
- ptr::copy(buf.offset(tail as isize), buf, len);
+ ptr::copy(buf.add(tail), buf, len);
} else {
if (tail - head) >= cmp::min(cap - tail, head) {
// There is enough free space in the centre for the shortest block so we can
// do this in at most three copy moves.
if (cap - tail) > head {
// right hand block is the long one; move that enough for the left
- ptr::copy(buf.offset(tail as isize),
- buf.offset((tail - head) as isize),
+ ptr::copy(buf.add(tail),
+ buf.add(tail - head),
cap - tail);
// copy left in the end
- ptr::copy(buf, buf.offset((cap - head) as isize), head);
+ ptr::copy(buf, buf.add(cap - head), head);
// shift the new thing to the start
- ptr::copy(buf.offset((tail - head) as isize), buf, len);
+ ptr::copy(buf.add(tail - head), buf, len);
} else {
// left hand block is the long one, we can do it in two!
- ptr::copy(buf, buf.offset((cap - tail) as isize), head);
- ptr::copy(buf.offset(tail as isize), buf, cap - tail);
+ ptr::copy(buf, buf.add(cap - tail), head);
+ ptr::copy(buf.add(tail), buf, cap - tail);
}
} else {
// Need to use N swaps to move the ring
for i in left_edge..right_edge {
right_offset = (i - left_edge) % (cap - right_edge);
let src: isize = (right_edge + right_offset) as isize;
- ptr::swap(buf.offset(i as isize), buf.offset(src));
+ ptr::swap(buf.add(i), buf.offset(src));
}
let n_ops = right_edge - left_edge;
left_edge += n_ops;
}
}
+ #[test]
+ fn issue_53529() {
+ use boxed::Box;
+
+ let mut dst = VecDeque::new();
+ dst.push_front(Box::new(1));
+ dst.push_front(Box::new(2));
+ assert_eq!(*dst.pop_back().unwrap(), 1);
+
+ let mut src = VecDeque::new();
+ src.push_front(Box::new(2));
+ dst.append(&mut src);
+ for a in dst {
+ assert_eq!(*a, 2);
+ }
+ }
+
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![unstable(feature = "raw_vec_internals", reason = "implemention detail", issue = "0")]
+#![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "0")]
#![doc(hidden)]
use core::cmp;
/// // double would have aborted or panicked if the len exceeded
/// // `isize::MAX` so this is safe to do unchecked now.
/// unsafe {
- /// ptr::write(self.buf.ptr().offset(self.len as isize), elem);
+ /// ptr::write(self.buf.ptr().add(self.len), elem);
/// }
/// self.len += 1;
/// }
/// // `isize::MAX` so this is safe to do unchecked now.
/// for x in elems {
/// unsafe {
- /// ptr::write(self.buf.ptr().offset(self.len as isize), x.clone());
+ /// ptr::write(self.buf.ptr().add(self.len), x.clone());
/// }
/// self.len += 1;
/// }
};
for (i, item) in v.iter().enumerate() {
- ptr::write(elems.offset(i as isize), item.clone());
+ ptr::write(elems.add(i), item.clone());
guard.n_elems += 1;
}
#[inline]
fn inc_strong(&self) {
- self.inner().strong.set(self.strong().checked_add(1).unwrap_or_else(|| unsafe { abort() }));
+ // We want to abort on overflow instead of dropping the value.
+ // The reference count will never be zero when this is called;
+ // nevertheless, we insert an abort here to hint LLVM at
+ // an otherwise missed optimization.
+ if self.strong() == 0 || self.strong() == usize::max_value() {
+ unsafe { abort(); }
+ }
+ self.inner().strong.set(self.strong() + 1);
}
#[inline]
#[inline]
fn inc_weak(&self) {
- self.inner().weak.set(self.weak().checked_add(1).unwrap_or_else(|| unsafe { abort() }));
+ // We want to abort on overflow instead of dropping the value.
+ // The reference count will never be zero when this is called;
+ // nevertheless, we insert an abort here to hint LLVM at
+ // an otherwise missed optimization.
+ if self.weak() == 0 || self.weak() == usize::max_value() {
+ unsafe { abort(); }
+ }
+ self.inner().weak.set(self.weak() + 1);
}
#[inline]
{
let len = v.len();
let v = v.as_mut_ptr();
- let v_mid = v.offset(mid as isize);
- let v_end = v.offset(len as isize);
+ let v_mid = v.add(mid);
+ let v_end = v.add(len);
// The merge process first copies the shorter run into `buf`. Then it traces the newly copied
// run and the longer run forwards (or backwards), comparing their next unconsumed elements and
ptr::copy_nonoverlapping(v, buf, mid);
hole = MergeHole {
start: buf,
- end: buf.offset(mid as isize),
+ end: buf.add(mid),
dest: v,
};
ptr::copy_nonoverlapping(v_mid, buf, len - mid);
hole = MergeHole {
start: buf,
- end: buf.offset((len - mid) as isize),
+ end: buf.add(len - mid),
dest: v_mid,
};
unsafe { String::from_utf8_unchecked(slice.into_vec()) }
}
- /// Create a [`String`] by repeating a string `n` times.
+ /// Creates a new [`String`] by repeating a string `n` times.
///
/// [`String`]: string/struct.String.html
///
self.vec
}
- /// Extracts a string slice containing the entire string.
+ /// Extracts a string slice containing the entire `String`.
///
/// # Examples
///
let next = idx + ch.len_utf8();
let len = self.len();
unsafe {
- ptr::copy(self.vec.as_ptr().offset(next as isize),
- self.vec.as_mut_ptr().offset(idx as isize),
+ ptr::copy(self.vec.as_ptr().add(next),
+ self.vec.as_mut_ptr().add(idx),
len - next);
self.vec.set_len(len - (next - idx));
}
del_bytes += ch_len;
} else if del_bytes > 0 {
unsafe {
- ptr::copy(self.vec.as_ptr().offset(idx as isize),
- self.vec.as_mut_ptr().offset((idx - del_bytes) as isize),
+ ptr::copy(self.vec.as_ptr().add(idx),
+ self.vec.as_mut_ptr().add(idx - del_bytes),
ch_len);
}
}
let amt = bytes.len();
self.vec.reserve(amt);
- ptr::copy(self.vec.as_ptr().offset(idx as isize),
- self.vec.as_mut_ptr().offset((idx + amt) as isize),
+ ptr::copy(self.vec.as_ptr().add(idx),
+ self.vec.as_mut_ptr().add(idx + amt),
len - idx);
ptr::copy(bytes.as_ptr(),
- self.vec.as_mut_ptr().offset(idx as isize),
+ self.vec.as_mut_ptr().add(idx),
amt);
self.vec.set_len(len + amt);
}
self.vec.clear()
}
- /// Creates a draining iterator that removes the specified range in the string
- /// and yields the removed chars.
+ /// Creates a draining iterator that removes the specified range in the `String`
+ /// and yields the removed `chars`.
///
/// Note: The element range is removed even if the iterator is not
/// consumed until the end.
};
for (i, item) in v.iter().enumerate() {
- ptr::write(elems.offset(i as isize), item.clone());
+ ptr::write(elems.add(i), item.clone());
guard.n_elems += 1;
}
pub fn truncate(&mut self, len: usize) {
let current_len = self.len;
unsafe {
- let mut ptr = self.as_mut_ptr().offset(self.len as isize);
+ let mut ptr = self.as_mut_ptr().add(self.len);
// Set the final length at the end, keeping in mind that
// dropping an element might panic. Works around a missed
// optimization, as seen in the following issue:
// infallible
// The spot to put the new value
{
- let p = self.as_mut_ptr().offset(index as isize);
+ let p = self.as_mut_ptr().add(index);
// Shift everything over to make space. (Duplicating the
// `index`th element into two consecutive places.)
ptr::copy(p, p.offset(1), len - index);
let ret;
{
// the place we are taking from.
- let ptr = self.as_mut_ptr().offset(index as isize);
+ let ptr = self.as_mut_ptr().add(index);
// copy it out, unsafely having a copy of the value on
// the stack and in the vector at the same time.
ret = ptr::read(ptr);
let mut w: usize = 1;
while r < ln {
- let p_r = p.offset(r as isize);
- let p_wm1 = p.offset((w - 1) as isize);
+ let p_r = p.add(r);
+ let p_wm1 = p.add(w - 1);
if !same_bucket(&mut *p_r, &mut *p_wm1) {
if r != w {
let p_w = p_wm1.offset(1);
self.reserve(1);
}
unsafe {
- let end = self.as_mut_ptr().offset(self.len as isize);
+ let end = self.as_mut_ptr().add(self.len);
ptr::write(end, value);
self.len += 1;
}
self.set_len(start);
// Use the borrow in the IterMut to indicate borrowing behavior of the
// whole Drain iterator (like &mut T).
- let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().offset(start as isize),
+ let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(start),
end - start);
Drain {
tail_start: end,
self.set_len(at);
other.set_len(other_len);
- ptr::copy_nonoverlapping(self.as_ptr().offset(at as isize),
+ ptr::copy_nonoverlapping(self.as_ptr().add(at),
other.as_mut_ptr(),
other.len());
}
self.reserve(n);
unsafe {
- let mut ptr = self.as_mut_ptr().offset(self.len() as isize);
+ let mut ptr = self.as_mut_ptr().add(self.len());
// Use SetLenOnDrop to work around bug where compiler
// may not realize the store through `ptr` through self.set_len()
// don't alias.
let end = if mem::size_of::<T>() == 0 {
arith_offset(begin as *const i8, self.len() as isize) as *const T
} else {
- begin.offset(self.len() as isize) as *const T
+ begin.add(self.len()) as *const T
};
let cap = self.buf.cap();
mem::forget(self);
if let Some(additional) = high {
self.reserve(additional);
unsafe {
- let mut ptr = self.as_mut_ptr().offset(self.len() as isize);
+ let mut ptr = self.as_mut_ptr().add(self.len());
let mut local_len = SetLenOnDrop::new(&mut self.len);
for element in iterator {
ptr::write(ptr, element);
let start = source_vec.len();
let tail = self.tail_start;
if tail != start {
- let src = source_vec.as_ptr().offset(tail as isize);
- let dst = source_vec.as_mut_ptr().offset(start as isize);
+ let src = source_vec.as_ptr().add(tail);
+ let dst = source_vec.as_mut_ptr().add(start);
ptr::copy(src, dst, self.tail_len);
}
source_vec.set_len(start + self.tail_len);
let range_start = vec.len;
let range_end = self.tail_start;
let range_slice = slice::from_raw_parts_mut(
- vec.as_mut_ptr().offset(range_start as isize),
+ vec.as_mut_ptr().add(range_start),
range_end - range_start);
for place in range_slice {
vec.buf.reserve(used_capacity, extra_capacity);
let new_tail_start = self.tail_start + extra_capacity;
- let src = vec.as_ptr().offset(self.tail_start as isize);
- let dst = vec.as_mut_ptr().offset(new_tail_start as isize);
+ let src = vec.as_ptr().add(self.tail_start);
+ let dst = vec.as_mut_ptr().add(new_tail_start);
ptr::copy(src, dst, self.tail_len);
self.tail_start = new_tail_start;
}
}
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
- let aligned = ptr.offset((align - (ptr as usize & (align - 1))) as isize);
+ let aligned = ptr.add(align - (ptr as usize & (align - 1)));
*get_header(aligned) = Header(ptr);
aligned
}
// A pointer as large as possible for zero-sized elements.
!0 as *mut T
} else {
- self.start().offset(self.storage.cap() as isize)
+ self.start().add(self.storage.cap())
}
}
}
unsafe {
let start_ptr = self.ptr.get();
let arena_slice = slice::from_raw_parts_mut(start_ptr, slice.len());
- self.ptr.set(start_ptr.offset(arena_slice.len() as isize));
+ self.ptr.set(start_ptr.add(arena_slice.len()));
arena_slice.copy_from_slice(slice);
arena_slice
}
/// - The `Future` trait is currently not object safe: The `Future::poll`
/// method makes uses the arbitrary self types feature and traits in which
/// this feature is used are currently not object safe due to current compiler
-/// limitations. (See tracking issue for arbitray self types for more
+/// limitations. (See tracking issue for arbitrary self types for more
/// information #44874)
pub struct LocalFutureObj<'a, T> {
ptr: *mut (),
/// - The `Future` trait is currently not object safe: The `Future::poll`
/// method makes uses the arbitrary self types feature and traits in which
/// this feature is used are currently not object safe due to current compiler
-/// limitations. (See tracking issue for arbitray self types for more
+/// limitations. (See tracking issue for arbitrary self types for more
/// information #44874)
pub struct FutureObj<'a, T>(LocalFutureObj<'a, T>);
/// // treat it as "dead", and therefore, you only have two real
/// // mutable slices.
/// (slice::from_raw_parts_mut(ptr, mid),
- /// slice::from_raw_parts_mut(ptr.offset(mid as isize), len - mid))
+ /// slice::from_raw_parts_mut(ptr.add(mid), len - mid))
/// }
/// }
/// ```
/// let ptr = vec.as_ptr();
/// Slice {
/// start: ptr,
-/// end: unsafe { ptr.offset(vec.len() as isize) },
+/// end: unsafe { ptr.add(vec.len()) },
/// phantom: PhantomData,
/// }
/// }
unsafe impl<'a, T: ?Sized> Freeze for &'a T {}
unsafe impl<'a, T: ?Sized> Freeze for &'a mut T {}
-/// Types which can be moved out of a `PinMut`.
+/// Types which can be safely moved after being pinned.
///
-/// The `Unpin` trait is used to control the behavior of the [`PinMut`] type. If a
-/// type implements `Unpin`, it is safe to move a value of that type out of the
-/// `PinMut` pointer.
+/// Since Rust itself has no notion of immovable types, and will consider moves to always be safe,
+/// this trait cannot prevent types from moving by itself.
+///
+/// Instead it can be used to prevent moves through the type system,
+/// by controlling the behavior of special pointer types like [`PinMut`],
+/// which "pin" the type in place by not allowing it to be moved out of them.
+///
+/// Implementing this trait lifts the restrictions of pinning off a type,
+/// which then allows it to move out with functions such as [`replace`].
+///
+/// So this, for example, can only be done on types implementing `Unpin`:
+///
+/// ```rust
+/// #![feature(pin)]
+/// use std::mem::{PinMut, replace};
+///
+/// let mut string = "this".to_string();
+/// let mut pinned_string = PinMut::new(&mut string);
+///
+/// // dereferencing the pointer mutably is only possible because String implements Unpin
+/// replace(&mut *pinned_string, "other".to_string());
+/// ```
///
/// This trait is automatically implemented for almost every type.
///
/// [`PinMut`]: ../mem/struct.PinMut.html
+/// [`replace`]: ../mem/fn.replace.html
#[unstable(feature = "pin", issue = "49150")]
pub auto trait Unpin {}
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::transmute;
-/// Leaks a value: takes ownership and "forgets" about the value **without running
-/// its destructor**.
+/// Takes ownership and "forgets" about the value **without running its destructor**.
///
/// Any resources the value manages, such as heap memory or a file handle, will linger
-/// forever in an unreachable state.
+/// forever in an unreachable state. However, it does not guarantee that pointers
+/// to this memory will remain valid.
///
-/// If you want to dispose of a value properly, running its destructor, see
+/// * If you want to leak memory, see [`Box::leak`][leak].
+/// * If you want to obtain a raw pointer to the memory, see [`Box::into_raw`][into_raw].
+/// * If you want to dispose of a value properly, running its destructor, see
/// [`mem::drop`][drop].
///
/// # Safety
///
/// # Examples
///
-/// Leak some heap memory by never deallocating it:
-///
-/// ```
-/// use std::mem;
-///
-/// let heap_memory = Box::new(3);
-/// mem::forget(heap_memory);
-/// ```
-///
/// Leak an I/O object, never closing the file:
///
/// ```no_run
/// }
/// ```
///
-/// ## Use case 3
-///
-/// You are transferring ownership across a [FFI] boundary to code written in
-/// another language. You need to `forget` the value on the Rust side because Rust
-/// code is no longer responsible for it.
-///
-/// ```no_run
-/// use std::mem;
-///
-/// extern "C" {
-/// fn my_c_function(x: *const u32);
-/// }
-///
-/// let x: Box<u32> = Box::new(3);
-///
-/// // Transfer ownership into C code.
-/// unsafe {
-/// my_c_function(&*x);
-/// }
-/// mem::forget(x);
-/// ```
-///
-/// In this case, C code must call back into Rust to free the object. Calling C's `free`
-/// function on a [`Box`][box] is *not* safe! Also, `Box` provides an [`into_raw`][into_raw]
-/// method which is the preferred way to do this in practice.
-///
/// [drop]: fn.drop.html
/// [uninit]: fn.uninitialized.html
/// [clone]: ../clone/trait.Clone.html
/// [swap]: fn.swap.html
/// [FFI]: ../../book/first-edition/ffi.html
/// [box]: ../../std/boxed/struct.Box.html
+/// [leak]: ../../std/boxed/struct.Box.html#method.leak
/// [into_raw]: ../../std/boxed/struct.Box.html#method.into_raw
/// [ub]: ../../reference/behavior-considered-undefined.html
#[inline]
}
}
+macro_rules! doc_comment {
+ ($x:expr, $($tt:tt)*) => {
+ #[doc = $x]
+ $($tt)*
+ };
+}
+
macro_rules! nonzero_integers {
( $( $Ty: ident($Int: ty); )+ ) => {
$(
- /// An integer that is known not to equal zero.
- ///
- /// This enables some memory layout optimization.
- /// For example, `Option<NonZeroU32>` is the same size as `u32`:
- ///
- /// ```rust
- /// use std::mem::size_of;
- /// assert_eq!(size_of::<Option<std::num::NonZeroU32>>(), size_of::<u32>());
- /// ```
- #[stable(feature = "nonzero", since = "1.28.0")]
- #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
- #[repr(transparent)]
- pub struct $Ty(NonZero<$Int>);
+ doc_comment! {
+ concat!("An integer that is known not to equal zero.
+
+This enables some memory layout optimization.
+For example, `Option<", stringify!($Ty), ">` is the same size as `", stringify!($Int), "`:
+
+```rust
+use std::mem::size_of;
+assert_eq!(size_of::<Option<std::num::", stringify!($Ty), ">>(), size_of::<", stringify!($Int),
+">());
+```"),
+ #[stable(feature = "nonzero", since = "1.28.0")]
+ #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
+ #[repr(transparent)]
+ pub struct $Ty(NonZero<$Int>);
+ }
impl $Ty {
/// Create a non-zero without checking the value.
pub mod bignum;
pub mod diy_float;
-macro_rules! doc_comment {
- ($x:expr, $($tt:tt)*) => {
- #[doc = $x]
- $($tt)*
- };
-}
-
mod wrapping;
// `Int` + `SignedInt` implemented for signed integers
#[lang = "fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(Args="()", note="wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}"),
+ message="expected a `{Fn}<{Args}>` closure, found `{Self}`",
+ label="expected an `Fn<{Args}>` closure, found `{Self}`",
+)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
pub trait Fn<Args> : FnMut<Args> {
/// Performs the call operation.
#[lang = "fn_mut"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(Args="()", note="wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}"),
+ message="expected a `{FnMut}<{Args}>` closure, found `{Self}`",
+ label="expected an `FnMut<{Args}>` closure, found `{Self}`",
+)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
pub trait FnMut<Args> : FnOnce<Args> {
/// Performs the call operation.
#[lang = "fn_once"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(Args="()", note="wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}"),
+ message="expected a `{FnOnce}<{Args}>` closure, found `{Self}`",
+ label="expected an `FnOnce<{Args}>` closure, found `{Self}`",
+)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
pub trait FnOnce<Args> {
/// The returned type after the call operator is used.
// Declaring `t` here avoids aligning the stack when this loop is unused
let mut t: Block = mem::uninitialized();
let t = &mut t as *mut _ as *mut u8;
- let x = x.offset(i as isize);
- let y = y.offset(i as isize);
+ let x = x.add(i);
+ let y = y.add(i);
// Swap a block of bytes of x & y, using t as a temporary buffer
// This should be optimized into efficient SIMD operations where available
let rem = len - i;
let t = &mut t as *mut _ as *mut u8;
- let x = x.offset(i as isize);
- let y = y.offset(i as isize);
+ let x = x.add(i);
+ let y = y.add(i);
copy_nonoverlapping(x, t, rem);
copy_nonoverlapping(y, x, rem);
/// }
/// }
/// ```
+ ///
+ /// # Null-unchecked version
+ ///
+ /// If you are sure the pointer can never be null and are looking for some kind of
+ /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can
+ /// dereference the pointer directly.
+ ///
+ /// ```
+ /// let ptr: *const u8 = &10u8 as *const u8;
+ ///
+ /// unsafe {
+ /// let val_back = &*ptr;
+ /// println!("We got back the value: {}!", val_back);
+ /// }
+ /// ```
#[stable(feature = "ptr_as_ref", since = "1.9.0")]
#[inline]
pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
/// The compiler and standard library generally tries to ensure allocations
/// never reach a size where an offset is a concern. For instance, `Vec`
/// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
- /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
+ /// `vec.as_ptr().add(vec.len())` is always safe.
///
/// Most platforms fundamentally can't even construct such an allocation.
/// For instance, no known 64-bit platform can ever serve a request
/// let ptr = &x[n] as *const u8;
/// let offset = ptr.align_offset(align_of::<u16>());
/// if offset < x.len() - n - 1 {
- /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
+ /// let u16_ptr = ptr.add(offset) as *const u16;
/// assert_ne!(*u16_ptr, 500);
/// } else {
/// // while the pointer can be aligned via `offset`, it would point
/// }
/// }
/// ```
+ ///
+ /// # Null-unchecked version
+ ///
+ /// If you are sure the pointer can never be null and are looking for some kind of
+ /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can
+ /// dereference the pointer directly.
+ ///
+ /// ```
+ /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
+ ///
+ /// unsafe {
+ /// let val_back = &*ptr;
+ /// println!("We got back the value: {}!", val_back);
+ /// }
+ /// ```
#[stable(feature = "ptr_as_ref", since = "1.9.0")]
#[inline]
pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
/// The compiler and standard library generally tries to ensure allocations
/// never reach a size where an offset is a concern. For instance, `Vec`
/// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
- /// `vec.as_ptr().offset(vec.len() as isize)` is always safe.
+ /// `vec.as_ptr().add(vec.len())` is always safe.
///
/// Most platforms fundamentally can't even construct such an allocation.
/// For instance, no known 64-bit platform can ever serve a request
/// let ptr = &x[n] as *const u8;
/// let offset = ptr.align_offset(align_of::<u16>());
/// if offset < x.len() - n - 1 {
- /// let u16_ptr = ptr.offset(offset as isize) as *const u16;
+ /// let u16_ptr = ptr.add(offset) as *const u16;
/// assert_ne!(*u16_ptr, 500);
/// } else {
/// // while the pointer can be aligned via `offset`, it would point
///
/// If we ever decide to make it possible to call the intrinsic with `a` that is not a
/// power-of-two, it will probably be more prudent to just change to a naive implementation rather
-/// than trying to adapt this to accomodate that change.
+/// than trying to adapt this to accommodate that change.
///
/// Any questions go to @nagisa.
#[lang="align_offset"]
if len >= 2 * usize_bytes {
while offset <= len - 2 * usize_bytes {
unsafe {
- let u = *(ptr.offset(offset as isize) as *const usize);
- let v = *(ptr.offset((offset + usize_bytes) as isize) as *const usize);
+ let u = *(ptr.add(offset) as *const usize);
+ let v = *(ptr.add(offset + usize_bytes) as *const usize);
// break if there is a matching byte
let zu = contains_zero_byte(u ^ repeated_x);
///
/// unsafe {
/// for i in 0..x.len() {
- /// assert_eq!(x.get_unchecked(i), &*x_ptr.offset(i as isize));
+ /// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
/// }
/// }
/// ```
///
/// unsafe {
/// for i in 0..x.len() {
- /// *x_ptr.offset(i as isize) += 2;
+ /// *x_ptr.add(i) += 2;
/// }
/// }
/// assert_eq!(x, &[3, 4, 6]);
assume(!ptr.is_null());
let end = if mem::size_of::<T>() == 0 {
- (ptr as *const u8).wrapping_offset(self.len() as isize) as *const T
+ (ptr as *const u8).wrapping_add(self.len()) as *const T
} else {
- ptr.offset(self.len() as isize)
+ ptr.add(self.len())
};
Iter {
assume(!ptr.is_null());
let end = if mem::size_of::<T>() == 0 {
- (ptr as *mut u8).wrapping_offset(self.len() as isize) as *mut T
+ (ptr as *mut u8).wrapping_add(self.len()) as *mut T
} else {
- ptr.offset(self.len() as isize)
+ ptr.add(self.len())
};
IterMut {
assert!(mid <= len);
(from_raw_parts_mut(ptr, mid),
- from_raw_parts_mut(ptr.offset(mid as isize), len - mid))
+ from_raw_parts_mut(ptr.add(mid), len - mid))
}
}
unsafe {
let p = self.as_mut_ptr();
- rotate::ptr_rotate(mid, p.offset(mid as isize), k);
+ rotate::ptr_rotate(mid, p.add(mid), k);
}
}
unsafe {
let p = self.as_mut_ptr();
- rotate::ptr_rotate(mid, p.offset(mid as isize), k);
+ rotate::ptr_rotate(mid, p.add(mid), k);
}
}
}
}
- /// Function to calculate lenghts of the middle and trailing slice for `align_to{,_mut}`.
+ /// Function to calculate lengths of the middle and trailing slice for `align_to{,_mut}`.
fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
(us_len, ts_len)
}
- /// Transmute the slice to a slice of another type, ensuring aligment of the types is
+ /// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
let (us_len, ts_len) = rest.align_to_offsets::<U>();
(left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
- from_raw_parts(rest.as_ptr().offset((rest.len() - ts_len) as isize), ts_len))
+ from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len))
}
}
- /// Transmute the slice to a slice of another type, ensuring aligment of the types is
+ /// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
let mut_ptr = rest.as_mut_ptr();
(left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
- from_raw_parts_mut(mut_ptr.offset((rest.len() - ts_len) as isize), ts_len))
+ from_raw_parts_mut(mut_ptr.add(rest.len() - ts_len), ts_len))
}
}
}
#[inline]
unsafe fn get_unchecked(self, slice: &[T]) -> &T {
- &*slice.as_ptr().offset(self as isize)
+ &*slice.as_ptr().add(self)
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut T {
- &mut *slice.as_mut_ptr().offset(self as isize)
+ &mut *slice.as_mut_ptr().add(self)
}
#[inline]
#[inline]
unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
- from_raw_parts(slice.as_ptr().offset(self.start as isize), self.end - self.start)
+ from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start)
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
- from_raw_parts_mut(slice.as_mut_ptr().offset(self.start as isize), self.end - self.start)
+ from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start)
}
#[inline]
}
// We are in bounds. `offset` does the right thing even for ZSTs.
unsafe {
- let elem = Some(& $( $mut_ )* *self.ptr.offset(n as isize));
+ let elem = Some(& $( $mut_ )* *self.ptr.add(n));
self.post_inc_start((n as isize).wrapping_add(1));
elem
}
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for Windows<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
- from_raw_parts(self.v.as_ptr().offset(i as isize), self.size)
+ from_raw_parts(self.v.as_ptr().add(i), self.size)
}
fn may_have_side_effect() -> bool { false }
}
None => self.v.len(),
Some(end) => cmp::min(end, self.v.len()),
};
- from_raw_parts(self.v.as_ptr().offset(start as isize), end - start)
+ from_raw_parts(self.v.as_ptr().add(start), end - start)
}
fn may_have_side_effect() -> bool { false }
}
None => self.v.len(),
Some(end) => cmp::min(end, self.v.len()),
};
- from_raw_parts_mut(self.v.as_mut_ptr().offset(start as isize), end - start)
+ from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start)
}
fn may_have_side_effect() -> bool { false }
}
unsafe impl<'a, T> TrustedRandomAccess for ExactChunks<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
let start = i * self.chunk_size;
- from_raw_parts(self.v.as_ptr().offset(start as isize), self.chunk_size)
+ from_raw_parts(self.v.as_ptr().add(start), self.chunk_size)
}
fn may_have_side_effect() -> bool { false }
}
unsafe impl<'a, T> TrustedRandomAccess for ExactChunksMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
let start = i * self.chunk_size;
- from_raw_parts_mut(self.v.as_mut_ptr().offset(start as isize), self.chunk_size)
+ from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size)
}
fn may_have_side_effect() -> bool { false }
}
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a T {
- &*self.ptr.offset(i as isize)
+ &*self.ptr.add(i)
}
fn may_have_side_effect() -> bool { false }
}
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for IterMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut T {
- &mut *self.ptr.offset(i as isize)
+ &mut *self.ptr.add(i)
}
fn may_have_side_effect() -> bool { false }
}
}
ptr::swap_nonoverlapping(
- mid.offset(-(left as isize)),
- mid.offset((right-delta) as isize),
+ mid.sub(left),
+ mid.add(right - delta),
delta);
if left <= right {
let rawarray = RawArray::new();
let buf = rawarray.ptr();
- let dim = mid.offset(-(left as isize)).offset(right as isize);
+ let dim = mid.sub(left).add(right);
if left <= right {
- ptr::copy_nonoverlapping(mid.offset(-(left as isize)), buf, left);
- ptr::copy(mid, mid.offset(-(left as isize)), right);
+ ptr::copy_nonoverlapping(mid.sub(left), buf, left);
+ ptr::copy(mid, mid.sub(left), right);
ptr::copy_nonoverlapping(buf, dim, left);
}
else {
ptr::copy_nonoverlapping(mid, buf, right);
- ptr::copy(mid.offset(-(left as isize)), dim, left);
- ptr::copy_nonoverlapping(buf, mid.offset(-(left as isize)), right);
+ ptr::copy(mid.sub(left), dim, left);
+ ptr::copy_nonoverlapping(buf, mid.sub(left), right);
}
}
// 3. `end` - End pointer into the `offsets` array.
// 4. `offsets - Indices of out-of-order elements within the block.
- // The current block on the left side (from `l` to `l.offset(block_l)`).
+ // The current block on the left side (from `l` to `l.add(block_l)`).
let mut l = v.as_mut_ptr();
let mut block_l = BLOCK;
let mut start_l = ptr::null_mut();
let mut end_l = ptr::null_mut();
let mut offsets_l: [u8; BLOCK] = unsafe { mem::uninitialized() };
- // The current block on the right side (from `r.offset(-block_r)` to `r`).
- let mut r = unsafe { l.offset(v.len() as isize) };
+ // The current block on the right side (from `r.sub(block_r)` to `r`).
+ let mut r = unsafe { l.add(v.len()) };
let mut block_r = BLOCK;
let mut start_r = ptr::null_mut();
let mut end_r = ptr::null_mut();
let ptr = v.as_ptr();
let align = unsafe {
// the offset is safe, because `index` is guaranteed inbounds
- ptr.offset(index as isize).align_offset(usize_bytes)
+ ptr.add(index).align_offset(usize_bytes)
};
if align == 0 {
while index < blocks_end {
unsafe {
- let block = ptr.offset(index as isize) as *const usize;
+ let block = ptr.add(index) as *const usize;
// break if there is a nonascii byte
let zu = contains_nonascii(*block);
let zv = contains_nonascii(*block.offset(1));
}
#[inline]
unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
- let ptr = slice.as_ptr().offset(self.start as isize);
+ let ptr = slice.as_ptr().add(self.start);
let len = self.end - self.start;
super::from_utf8_unchecked(slice::from_raw_parts(ptr, len))
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
- let ptr = slice.as_ptr().offset(self.start as isize);
+ let ptr = slice.as_ptr().add(self.start);
let len = self.end - self.start;
super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr as *mut u8, len))
}
}
#[inline]
unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
- let ptr = slice.as_ptr().offset(self.start as isize);
+ let ptr = slice.as_ptr().add(self.start);
let len = slice.len() - self.start;
super::from_utf8_unchecked(slice::from_raw_parts(ptr, len))
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
- let ptr = slice.as_ptr().offset(self.start as isize);
+ let ptr = slice.as_ptr().add(self.start);
let len = slice.len() - self.start;
super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr as *mut u8, len))
}
unsafe {
(from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, mid)),
from_utf8_unchecked_mut(slice::from_raw_parts_mut(
- ptr.offset(mid as isize),
+ ptr.add(mid),
len - mid
)))
}
style: Option<usize>,
/// How many newlines have been seen in the string so far, to adjust the error spans
seen_newlines: usize,
- /// Start and end byte offset of every successfuly parsed argument
+ /// Start and end byte offset of every successfully parsed argument
pub arg_places: Vec<(usize, usize)>,
}
// telling the backend to generate "misalignment-safe" code.
pub unsafe fn read<T: Copy>(&mut self) -> T {
let Unaligned(result) = *(self.ptr as *const Unaligned<T>);
- self.ptr = self.ptr.offset(mem::size_of::<T>() as isize);
+ self.ptr = self.ptr.add(mem::size_of::<T>());
result
}
#[repr(C)]
pub struct _ThrowInfo {
- pub attribues: c_uint,
+ pub attributes: c_uint,
pub pnfnUnwind: imp::ptr_t,
pub pForwardCompat: imp::ptr_t,
pub pCatchableTypeArray: imp::ptr_t,
}
static mut THROW_INFO: _ThrowInfo = _ThrowInfo {
- attribues: 0,
+ attributes: 0,
pnfnUnwind: ptr!(0),
pForwardCompat: ptr!(0),
pCatchableTypeArray: ptr!(0),
//!
//! This library, provided by the standard distribution, provides the types
//! consumed in the interfaces of procedurally defined macro definitions such as
-//! function-like macros `#[proc_macro]`, macro attribures `#[proc_macro_attribute]` and
+//! function-like macros `#[proc_macro]`, macro attributes `#[proc_macro_attribute]` and
//! custom derive attributes`#[proc_macro_derive]`.
//!
//! Note that this crate is intentionally bare-bones currently.
byteorder = { version = "1.1", features = ["i128"]}
chalk-engine = { version = "0.7.0", default-features=false }
rustc_fs_util = { path = "../librustc_fs_util" }
+smallvec = { version = "0.6.5", features = ["union"] }
# Note that these dependencies are a lie, they're just here to get linkage to
# work.
// queries). Making them anonymous avoids hashing the result, which
// may save a bit of time.
[anon] EraseRegionsTy { ty: Ty<'tcx> },
- [anon] ConstValueToAllocation { val: &'tcx ty::Const<'tcx> },
+ [anon] ConstToAllocation { val: &'tcx ty::Const<'tcx> },
[input] Freevars(DefId),
[input] MaybeUnusedTraitImport(DefId),
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use rustc_data_structures::sync::{Lrc, Lock};
use std::env;
use std::hash::Hash;
} = task {
debug_assert_eq!(node, key);
let krate_idx = self.node_to_node_index[&DepNode::new_no_params(DepKind::Krate)];
- self.alloc_node(node, SmallVec::one(krate_idx))
+ self.alloc_node(node, smallvec![krate_idx])
} else {
bug!("complete_eval_always_task() - Expected eval always task to be popped");
}
Existential(DefId),
/// `type Foo = Bar;`
TyAlias(DefId),
- TyForeign(DefId),
+ ForeignTy(DefId),
TraitAlias(DefId),
AssociatedTy(DefId),
/// `existential type Foo: Bar;`
Def::AssociatedTy(id) | Def::TyParam(id) | Def::Struct(id) | Def::StructCtor(id, ..) |
Def::Union(id) | Def::Trait(id) | Def::Method(id) | Def::Const(id) |
Def::AssociatedConst(id) | Def::Macro(id, ..) |
- Def::Existential(id) | Def::AssociatedExistential(id) | Def::TyForeign(id) => {
+ Def::Existential(id) | Def::AssociatedExistential(id) | Def::ForeignTy(id) => {
id
}
Def::StructCtor(.., CtorKind::Fictive) => bug!("impossible struct constructor"),
Def::Union(..) => "union",
Def::Trait(..) => "trait",
- Def::TyForeign(..) => "foreign type",
+ Def::ForeignTy(..) => "foreign type",
Def::Method(..) => "method",
Def::Const(..) => "constant",
Def::AssociatedConst(..) => "associated constant",
}
ImplTraitContext::Universal(in_band_ty_params) => {
self.lower_node_id(def_node_id);
- // Add a definition for the in-band TyParam
+ // Add a definition for the in-band Param
let def_index = self
.resolver
.definitions()
fn lower_item_id(&mut self, i: &Item) -> OneVector<hir::ItemId> {
match i.node {
ItemKind::Use(ref use_tree) => {
- let mut vec = OneVector::one(hir::ItemId { id: i.id });
+ let mut vec = smallvec![hir::ItemId { id: i.id }];
self.lower_item_id_use_tree(use_tree, i.id, &mut vec);
vec
}
ItemKind::MacroDef(..) => OneVector::new(),
ItemKind::Fn(ref decl, ref header, ..) => {
- let mut ids = OneVector::one(hir::ItemId { id: i.id });
+ let mut ids = smallvec![hir::ItemId { id: i.id }];
self.lower_impl_trait_ids(decl, header, &mut ids);
ids
},
ItemKind::Impl(.., None, _, ref items) => {
- let mut ids = OneVector::one(hir::ItemId { id: i.id });
+ let mut ids = smallvec![hir::ItemId { id: i.id }];
for item in items {
if let ImplItemKind::Method(ref sig, _) = item.node {
self.lower_impl_trait_ids(&sig.decl, &sig.header, &mut ids);
}
ids
},
- _ => OneVector::one(hir::ItemId { id: i.id }),
+ _ => smallvec![hir::ItemId { id: i.id }],
}
}
hir::LoopSource::Loop,
)
}),
- ExprKind::Catch(ref body) => {
+ ExprKind::TryBlock(ref body) => {
self.with_catch_scope(body.id, |this| {
let unstable_span =
- this.allow_internal_unstable(CompilerDesugaringKind::Catch, body.span);
+ this.allow_internal_unstable(CompilerDesugaringKind::TryBlock, body.span);
let mut block = this.lower_block(body, true).into_inner();
let tail = block.expr.take().map_or_else(
|| {
}
fn lower_stmt(&mut self, s: &Stmt) -> OneVector<hir::Stmt> {
- OneVector::one(match s.node {
+ smallvec![match s.node {
StmtKind::Local(ref l) => Spanned {
node: hir::StmtKind::Decl(
P(Spanned {
span: s.span,
},
StmtKind::Mac(..) => panic!("Shouldn't exist here"),
- })
+ }]
}
fn lower_capture_clause(&mut self, c: CaptureBy) -> hir::CaptureClause {
match item.node {
ForeignItemKind::Fn(..) => Some(Def::Fn(def_id)),
ForeignItemKind::Static(_, m) => Some(Def::Static(def_id, m)),
- ForeignItemKind::Type => Some(Def::TyForeign(def_id)),
+ ForeignItemKind::Type => Some(Def::ForeignTy(def_id)),
}
}
NodeTraitItem(item) => {
GenericArg::Type(t) => t.span,
}
}
+
+ pub fn id(&self) -> NodeId {
+ match self {
+ GenericArg::Lifetime(l) => l.id,
+ GenericArg::Type(t) => t.id,
+ }
+ }
}
#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
}
bug!("GenericArgs::inputs: not a `Fn(T) -> U`");
}
+
+ pub fn own_counts(&self) -> GenericParamCount {
+ // We could cache this as a property of `GenericParamCount`, but
+ // the aim is to refactor this away entirely eventually and the
+ // presence of this method will be a constant reminder.
+ let mut own_counts: GenericParamCount = Default::default();
+
+ for arg in &self.args {
+ match arg {
+ GenericArg::Lifetime(_) => own_counts.lifetimes += 1,
+ GenericArg::Type(_) => own_counts.types += 1,
+ };
+ }
+
+ own_counts
+ }
}
/// A modifier on a bound, currently this is only used for `?Sized`, where the
pub kind: GenericParamKind,
}
+#[derive(Default)]
pub struct GenericParamCount {
pub lifetimes: usize,
pub types: usize,
// We could cache this as a property of `GenericParamCount`, but
// the aim is to refactor this away entirely eventually and the
// presence of this method will be a constant reminder.
- let mut own_counts = GenericParamCount {
- lifetimes: 0,
- types: 0,
- };
+ let mut own_counts: GenericParamCount = Default::default();
for param in &self.params {
match param.kind {
/// Not represented directly in the AST, referred to by name through a ty_path.
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
pub enum PrimTy {
- TyInt(IntTy),
- TyUint(UintTy),
- TyFloat(FloatTy),
- TyStr,
- TyBool,
- TyChar,
+ Int(IntTy),
+ Uint(UintTy),
+ Float(FloatTy),
+ Str,
+ Bool,
+ Char,
}
#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
}
impl_stable_hash_for!(enum hir::PrimTy {
- TyInt(int_ty),
- TyUint(uint_ty),
- TyFloat(float_ty),
- TyStr,
- TyBool,
- TyChar
+ Int(int_ty),
+ Uint(uint_ty),
+ Float(float_ty),
+ Str,
+ Bool,
+ Char
});
impl_stable_hash_for!(struct hir::BareFnTy {
PrimTy(prim_ty),
TyParam(def_id),
SelfTy(trait_def_id, impl_def_id),
- TyForeign(def_id),
+ ForeignTy(def_id),
Fn(def_id),
Const(def_id),
Static(def_id, is_mutbl),
QuestionMark,
ExistentialReturnType,
ForLoop,
- Catch
+ TryBlock
});
impl_stable_hash_for!(enum ::syntax_pos::FileName {
use mir;
impl<'a, 'gcx, T> HashStable<StableHashingContext<'a>>
-for &'gcx ty::Slice<T>
+for &'gcx ty::List<T>
where T: HashStable<StableHashingContext<'a>> {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
}
}
-impl<'a, 'gcx, T> ToStableHashKey<StableHashingContext<'a>> for &'gcx ty::Slice<T>
+impl<'a, 'gcx, T> ToStableHashKey<StableHashingContext<'a>> for &'gcx ty::List<T>
where T: HashStable<StableHashingContext<'a>>
{
type KeyType = Fingerprint;
Undef
});
-impl_stable_hash_for!(enum mir::interpret::Value {
- Scalar(v),
- ScalarPair(a, b),
- ByRef(ptr, align)
-});
-
impl_stable_hash_for!(struct mir::interpret::Pointer {
alloc_id,
offset
DeallocateNonBasePtr |
HeapAllocZeroBytes |
Unreachable |
- Panic |
ReadFromReturnPointer |
UnimplementedTraitSelection |
TypeckError |
GeneratorResumedAfterReturn |
GeneratorResumedAfterPanic |
InfiniteLoop => {}
+ Panic { ref msg, ref file, line, col } => {
+ msg.hash_stable(hcx, hasher);
+ file.hash_stable(hcx, hasher);
+ line.hash_stable(hcx, hasher);
+ col.hash_stable(hcx, hasher);
+ },
ReferencedConstant(ref err) => err.hash_stable(hcx, hasher),
MachineError(ref err) => err.hash_stable(hcx, hasher),
FunctionPointerTyMismatch(a, b) => {
});
impl<'a, 'gcx> HashStable<StableHashingContext<'a>>
-for ty::TypeVariants<'gcx>
+for ty::TyKind<'gcx>
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
- use ty::TypeVariants::*;
+ use ty::TyKind::*;
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
- TyBool |
- TyChar |
- TyStr |
- TyError |
- TyNever => {
+ Bool |
+ Char |
+ Str |
+ Error |
+ Never => {
// Nothing more to hash.
}
- TyInt(int_ty) => {
+ Int(int_ty) => {
int_ty.hash_stable(hcx, hasher);
}
- TyUint(uint_ty) => {
+ Uint(uint_ty) => {
uint_ty.hash_stable(hcx, hasher);
}
- TyFloat(float_ty) => {
+ Float(float_ty) => {
float_ty.hash_stable(hcx, hasher);
}
- TyAdt(adt_def, substs) => {
+ Adt(adt_def, substs) => {
adt_def.hash_stable(hcx, hasher);
substs.hash_stable(hcx, hasher);
}
- TyArray(inner_ty, len) => {
+ Array(inner_ty, len) => {
inner_ty.hash_stable(hcx, hasher);
len.hash_stable(hcx, hasher);
}
- TySlice(inner_ty) => {
+ Slice(inner_ty) => {
inner_ty.hash_stable(hcx, hasher);
}
- TyRawPtr(pointee_ty) => {
+ RawPtr(pointee_ty) => {
pointee_ty.hash_stable(hcx, hasher);
}
- TyRef(region, pointee_ty, mutbl) => {
+ Ref(region, pointee_ty, mutbl) => {
region.hash_stable(hcx, hasher);
pointee_ty.hash_stable(hcx, hasher);
mutbl.hash_stable(hcx, hasher);
}
- TyFnDef(def_id, substs) => {
+ FnDef(def_id, substs) => {
def_id.hash_stable(hcx, hasher);
substs.hash_stable(hcx, hasher);
}
- TyFnPtr(ref sig) => {
+ FnPtr(ref sig) => {
sig.hash_stable(hcx, hasher);
}
- TyDynamic(ref existential_predicates, region) => {
+ Dynamic(ref existential_predicates, region) => {
existential_predicates.hash_stable(hcx, hasher);
region.hash_stable(hcx, hasher);
}
- TyClosure(def_id, closure_substs) => {
+ Closure(def_id, closure_substs) => {
def_id.hash_stable(hcx, hasher);
closure_substs.hash_stable(hcx, hasher);
}
- TyGenerator(def_id, generator_substs, movability) => {
+ Generator(def_id, generator_substs, movability) => {
def_id.hash_stable(hcx, hasher);
generator_substs.hash_stable(hcx, hasher);
movability.hash_stable(hcx, hasher);
}
- TyGeneratorWitness(types) => {
+ GeneratorWitness(types) => {
types.hash_stable(hcx, hasher)
}
- TyTuple(inner_tys) => {
+ Tuple(inner_tys) => {
inner_tys.hash_stable(hcx, hasher);
}
- TyProjection(ref projection_ty) => {
+ Projection(ref projection_ty) => {
projection_ty.hash_stable(hcx, hasher);
}
- TyAnon(def_id, substs) => {
+ Anon(def_id, substs) => {
def_id.hash_stable(hcx, hasher);
substs.hash_stable(hcx, hasher);
}
- TyParam(param_ty) => {
+ Param(param_ty) => {
param_ty.hash_stable(hcx, hasher);
}
- TyForeign(def_id) => {
+ Foreign(def_id) => {
def_id.hash_stable(hcx, hasher);
}
- TyInfer(infer_ty) => {
+ Infer(infer_ty) => {
infer_ty.hash_stable(hcx, hasher);
}
}
_hasher: &mut StableHasher<W>) {
// TyVid values are confined to an inference context and hence
// should not be hashed.
- bug!("ty::TypeVariants::hash_stable() - can't hash a TyVid {:?}.", *self)
+ bug!("ty::TyKind::hash_stable() - can't hash a TyVid {:?}.", *self)
}
}
_hasher: &mut StableHasher<W>) {
// IntVid values are confined to an inference context and hence
// should not be hashed.
- bug!("ty::TypeVariants::hash_stable() - can't hash an IntVid {:?}.", *self)
+ bug!("ty::TyKind::hash_stable() - can't hash an IntVid {:?}.", *self)
}
}
_hasher: &mut StableHasher<W>) {
// FloatVid values are confined to an inference context and hence
// should not be hashed.
- bug!("ty::TypeVariants::hash_stable() - can't hash a FloatVid {:?}.", *self)
+ bug!("ty::TyKind::hash_stable() - can't hash a FloatVid {:?}.", *self)
}
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
match ty.sty {
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
// I am a horrible monster and I pray for death. When
// we encounter a closure here, it is always a closure
// from within the function that we are currently
tcx,
reg_op: |reg| reg,
fldop: |ty| {
- if let ty::TyAnon(def_id, substs) = ty.sty {
+ if let ty::Anon(def_id, substs) = ty.sty {
// Check that this is `impl Trait` type is
// declared by `parent_def_id` -- i.e., one whose
// value we are inferring. At present, this is
// ```
//
// Here, the return type of `foo` references a
- // `TyAnon` indeed, but not one whose value is
+ // `Anon` indeed, but not one whose value is
// presently being inferred. You can get into a
// similar situation with closure return types
// today:
let tcx = infcx.tcx;
debug!(
- "instantiate_anon_types: TyAnon(def_id={:?}, substs={:?})",
+ "instantiate_anon_types: Anon(def_id={:?}, substs={:?})",
def_id, substs
);
- // Use the same type variable if the exact same TyAnon appears more
+ // Use the same type variable if the exact same Anon appears more
// than once in the return type (e.g. if it's passed to a type alias).
if let Some(anon_defn) = self.anon_types.get(&def_id) {
return anon_defn.concrete_ty;
for predicate in bounds.predicates {
// Change the predicate to refer to the type variable,
- // which will be the concrete type, instead of the TyAnon.
+ // which will be the concrete type, instead of the Anon.
// This also instantiates nested `impl Trait`.
let predicate = self.instantiate_anon_types_in_map(&predicate);
use std::sync::atomic::Ordering;
use ty::fold::{TypeFoldable, TypeFolder};
use ty::subst::Kind;
-use ty::{self, CanonicalVar, Lift, Slice, Ty, TyCtxt, TypeFlags};
+use ty::{self, CanonicalVar, Lift, List, Ty, TyCtxt, TypeFlags};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexed_vec::Idx;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> {
/// Canonicalizes a query value `V`. When we canonicalize a query,
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
match t.sty {
- ty::TyInfer(ty::TyVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::General, t),
+ ty::Infer(ty::TyVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::General, t),
- ty::TyInfer(ty::IntVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Int, t),
+ ty::Infer(ty::IntVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Int, t),
- ty::TyInfer(ty::FloatVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Float, t),
+ ty::Infer(ty::FloatVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Float, t),
- ty::TyInfer(ty::FreshTy(_))
- | ty::TyInfer(ty::FreshIntTy(_))
- | ty::TyInfer(ty::FreshFloatTy(_)) => {
+ ty::Infer(ty::FreshTy(_))
+ | ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_)) => {
bug!("encountered a fresh type during canonicalization")
}
- ty::TyInfer(ty::CanonicalTy(_)) => {
+ ty::Infer(ty::CanonicalTy(_)) => {
bug!("encountered a canonical type during canonicalization")
}
- ty::TyClosure(..)
- | ty::TyGenerator(..)
- | ty::TyGeneratorWitness(..)
- | ty::TyBool
- | ty::TyChar
- | ty::TyInt(..)
- | ty::TyUint(..)
- | ty::TyFloat(..)
- | ty::TyAdt(..)
- | ty::TyStr
- | ty::TyError
- | ty::TyArray(..)
- | ty::TySlice(..)
- | ty::TyRawPtr(..)
- | ty::TyRef(..)
- | ty::TyFnDef(..)
- | ty::TyFnPtr(_)
- | ty::TyDynamic(..)
- | ty::TyNever
- | ty::TyTuple(..)
- | ty::TyProjection(..)
- | ty::TyForeign(..)
- | ty::TyParam(..)
- | ty::TyAnon(..) => {
+ ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::Adt(..)
+ | ty::Str
+ | ty::Error
+ | ty::Array(..)
+ | ty::Slice(..)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Dynamic(..)
+ | ty::Never
+ | ty::Tuple(..)
+ | ty::Projection(..)
+ | ty::Foreign(..)
+ | ty::Param(..)
+ | ty::Anon(..) => {
if t.flags.intersects(self.needs_canonical_flags) {
t.super_fold_with(self)
} else {
if !value.has_type_flags(needs_canonical_flags) {
let out_value = gcx.lift(value).unwrap();
let canon_value = Canonical {
- variables: Slice::empty(),
+ variables: List::empty(),
value: out_value,
};
return canon_value;
// avoid allocations in those cases. We also don't use `indices` to
// determine if a kind has been seen before until the limit of 8 has
// been exceeded, to also avoid allocations for `indices`.
- if var_values.is_array() {
+ if !var_values.spilled() {
// `var_values` is stack-allocated. `indices` isn't used yet. Do a
// direct linear search of `var_values`.
if let Some(idx) = var_values.iter().position(|&k| k == kind) {
// If `var_values` has become big enough to be heap-allocated,
// fill up `indices` to facilitate subsequent lookups.
- if !var_values.is_array() {
+ if var_values.spilled() {
assert!(indices.is_empty());
*indices =
var_values.iter()
use infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin};
use rustc_data_structures::indexed_vec::IndexVec;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use rustc_data_structures::sync::Lrc;
use serialize::UseSpecializedDecodable;
use std::ops::Index;
use syntax::source_map::Span;
use ty::fold::TypeFoldable;
use ty::subst::Kind;
-use ty::{self, CanonicalVar, Lift, Region, Slice, TyCtxt};
+use ty::{self, CanonicalVar, Lift, Region, List, TyCtxt};
mod canonicalizer;
mod substitute;
/// A "canonicalized" type `V` is one where all free inference
-/// variables have been rewriten to "canonical vars". These are
+/// variables have been rewritten to "canonical vars". These are
/// numbered starting from 0 in order of first appearance.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)]
pub struct Canonical<'gcx, V> {
pub value: V,
}
-pub type CanonicalVarInfos<'gcx> = &'gcx Slice<CanonicalVarInfo>;
+pub type CanonicalVarInfos<'gcx> = &'gcx List<CanonicalVarInfo>;
impl<'gcx> UseSpecializedDecodable for CanonicalVarInfos<'gcx> {}
fn fresh_inference_vars_for_canonical_vars(
&self,
span: Span,
- variables: &Slice<CanonicalVarInfo>,
+ variables: &List<CanonicalVarInfo>,
) -> CanonicalVarValues<'tcx> {
let var_values: IndexVec<CanonicalVar, Kind<'tcx>> = variables
.iter()
match result_value.unpack() {
UnpackedKind::Type(result_value) => {
// e.g., here `result_value` might be `?0` in the example above...
- if let ty::TyInfer(ty::InferTy::CanonicalTy(index)) = result_value.sty {
+ if let ty::Infer(ty::InferTy::CanonicalTy(index)) = result_value.sty {
// in which case we would set `canonical_vars[0]` to `Some(?U)`.
opt_values[index] = Some(*original_value);
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
match t.sty {
- ty::TyInfer(ty::InferTy::CanonicalTy(c)) => {
+ ty::Infer(ty::InferTy::CanonicalTy(c)) => {
match self.var_values.var_values[c].unpack() {
UnpackedKind::Type(ty) => ty,
r => bug!("{:?} is a type but value is {:?}", c, r),
match (&a.sty, &b.sty) {
// Relate integral variables to other types
- (&ty::TyInfer(ty::IntVar(a_id)), &ty::TyInfer(ty::IntVar(b_id))) => {
+ (&ty::Infer(ty::IntVar(a_id)), &ty::Infer(ty::IntVar(b_id))) => {
self.int_unification_table
.borrow_mut()
.unify_var_var(a_id, b_id)
.map_err(|e| int_unification_error(a_is_expected, e))?;
Ok(a)
}
- (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyInt(v)) => {
+ (&ty::Infer(ty::IntVar(v_id)), &ty::Int(v)) => {
self.unify_integral_variable(a_is_expected, v_id, IntType(v))
}
- (&ty::TyInt(v), &ty::TyInfer(ty::IntVar(v_id))) => {
+ (&ty::Int(v), &ty::Infer(ty::IntVar(v_id))) => {
self.unify_integral_variable(!a_is_expected, v_id, IntType(v))
}
- (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyUint(v)) => {
+ (&ty::Infer(ty::IntVar(v_id)), &ty::Uint(v)) => {
self.unify_integral_variable(a_is_expected, v_id, UintType(v))
}
- (&ty::TyUint(v), &ty::TyInfer(ty::IntVar(v_id))) => {
+ (&ty::Uint(v), &ty::Infer(ty::IntVar(v_id))) => {
self.unify_integral_variable(!a_is_expected, v_id, UintType(v))
}
// Relate floating-point variables to other types
- (&ty::TyInfer(ty::FloatVar(a_id)), &ty::TyInfer(ty::FloatVar(b_id))) => {
+ (&ty::Infer(ty::FloatVar(a_id)), &ty::Infer(ty::FloatVar(b_id))) => {
self.float_unification_table
.borrow_mut()
.unify_var_var(a_id, b_id)
.map_err(|e| float_unification_error(relation.a_is_expected(), e))?;
Ok(a)
}
- (&ty::TyInfer(ty::FloatVar(v_id)), &ty::TyFloat(v)) => {
+ (&ty::Infer(ty::FloatVar(v_id)), &ty::Float(v)) => {
self.unify_float_variable(a_is_expected, v_id, v)
}
- (&ty::TyFloat(v), &ty::TyInfer(ty::FloatVar(v_id))) => {
+ (&ty::Float(v), &ty::Infer(ty::FloatVar(v_id))) => {
self.unify_float_variable(!a_is_expected, v_id, v)
}
// All other cases of inference are errors
- (&ty::TyInfer(_), _) |
- (_, &ty::TyInfer(_)) => {
+ (&ty::Infer(_), _) |
+ (_, &ty::Infer(_)) => {
Err(TypeError::Sorts(ty::relate::expected_found(relation, &a, &b)))
}
// subtyping. This is basically our "occurs check", preventing
// us from creating infinitely sized types.
match t.sty {
- ty::TyInfer(ty::TyVar(vid)) => {
+ ty::Infer(ty::TyVar(vid)) => {
let mut variables = self.infcx.type_variables.borrow_mut();
let vid = variables.root_var(vid);
let sub_vid = variables.sub_root_var(vid);
}
}
}
- ty::TyInfer(ty::IntVar(_)) |
- ty::TyInfer(ty::FloatVar(_)) => {
+ ty::Infer(ty::IntVar(_)) |
+ ty::Infer(ty::FloatVar(_)) => {
// No matter what mode we are in,
// integer/floating-point types must be equal to be
// relatable.
let a = infcx.type_variables.borrow_mut().replace_if_possible(a);
let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
match (&a.sty, &b.sty) {
- (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => {
+ (&ty::Infer(TyVar(a_id)), &ty::Infer(TyVar(b_id))) => {
infcx.type_variables.borrow_mut().equate(a_id, b_id);
Ok(a)
}
- (&ty::TyInfer(TyVar(a_id)), _) => {
+ (&ty::Infer(TyVar(a_id)), _) => {
self.fields.instantiate(b, RelationDir::EqTo, a_id, self.a_is_expected)?;
Ok(a)
}
- (_, &ty::TyInfer(TyVar(b_id))) => {
+ (_, &ty::Infer(TyVar(b_id))) => {
self.fields.instantiate(a, RelationDir::EqTo, b_id, self.a_is_expected)?;
Ok(a)
}
use hir::def_id::DefId;
use middle::region;
use traits::{ObligationCause, ObligationCauseCode};
-use ty::{self, subst::Subst, Region, Ty, TyCtxt, TypeFoldable, TypeVariants};
+use ty::{self, subst::Subst, Region, Ty, TyCtxt, TypeFoldable, TyKind};
use ty::error::TypeError;
use syntax::ast::DUMMY_NODE_ID;
use syntax_pos::{Pos, Span};
// if they are both "path types", there's a chance of ambiguity
// due to different versions of the same crate
match (&exp_found.expected.sty, &exp_found.found.sty) {
- (&ty::TyAdt(exp_adt, _), &ty::TyAdt(found_adt, _)) => {
+ (&ty::Adt(exp_adt, _), &ty::Adt(found_adt, _)) => {
report_path_match(err, exp_adt.did, found_adt.did);
}
_ => (),
value.push_highlighted("<");
}
- // Output the lifetimes fot the first type
+ // Output the lifetimes for the first type
let lifetimes = sub.regions()
.map(|lifetime| {
let s = lifetime.to_string();
self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, &other_ty);
return Some(());
}
- if let &ty::TyAdt(def, _) = &ta.sty {
+ if let &ty::Adt(def, _) = &ta.sty {
let path_ = self.tcx.item_path_str(def.did.clone());
if path_ == other_path {
self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, &other_ty);
fn equals<'tcx>(a: &Ty<'tcx>, b: &Ty<'tcx>) -> bool {
match (&a.sty, &b.sty) {
(a, b) if *a == *b => true,
- (&ty::TyInt(_), &ty::TyInfer(ty::InferTy::IntVar(_)))
- | (&ty::TyInfer(ty::InferTy::IntVar(_)), &ty::TyInt(_))
- | (&ty::TyInfer(ty::InferTy::IntVar(_)), &ty::TyInfer(ty::InferTy::IntVar(_)))
- | (&ty::TyFloat(_), &ty::TyInfer(ty::InferTy::FloatVar(_)))
- | (&ty::TyInfer(ty::InferTy::FloatVar(_)), &ty::TyFloat(_))
+ (&ty::Int(_), &ty::Infer(ty::InferTy::IntVar(_)))
+ | (&ty::Infer(ty::InferTy::IntVar(_)), &ty::Int(_))
+ | (&ty::Infer(ty::InferTy::IntVar(_)), &ty::Infer(ty::InferTy::IntVar(_)))
+ | (&ty::Float(_), &ty::Infer(ty::InferTy::FloatVar(_)))
+ | (&ty::Infer(ty::InferTy::FloatVar(_)), &ty::Float(_))
| (
- &ty::TyInfer(ty::InferTy::FloatVar(_)),
- &ty::TyInfer(ty::InferTy::FloatVar(_)),
+ &ty::Infer(ty::InferTy::FloatVar(_)),
+ &ty::Infer(ty::InferTy::FloatVar(_)),
) => true,
_ => false,
}
}
match (&t1.sty, &t2.sty) {
- (&ty::TyAdt(def1, sub1), &ty::TyAdt(def2, sub2)) => {
+ (&ty::Adt(def1, sub1), &ty::Adt(def2, sub2)) => {
let sub_no_defaults_1 = self.strip_generic_default_params(def1.did, sub1);
let sub_no_defaults_2 = self.strip_generic_default_params(def2.did, sub2);
let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
}
// When finding T != &T, highlight only the borrow
- (&ty::TyRef(r1, ref_ty1, mutbl1), _) if equals(&ref_ty1, &t2) => {
+ (&ty::Ref(r1, ref_ty1, mutbl1), _) if equals(&ref_ty1, &t2) => {
let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
push_ty_ref(&r1, ref_ty1, mutbl1, &mut values.0);
values.1.push_normal(t2.to_string());
values
}
- (_, &ty::TyRef(r2, ref_ty2, mutbl2)) if equals(&t1, &ref_ty2) => {
+ (_, &ty::Ref(r2, ref_ty2, mutbl2)) if equals(&t1, &ref_ty2) => {
let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
values.0.push_normal(t1.to_string());
push_ty_ref(&r2, ref_ty2, mutbl2, &mut values.1);
}
// When encountering &T != &mut T, highlight only the borrow
- (&ty::TyRef(r1, ref_ty1, mutbl1),
- &ty::TyRef(r2, ref_ty2, mutbl2)) if equals(&ref_ty1, &ref_ty2) => {
+ (&ty::Ref(r1, ref_ty1, mutbl1),
+ &ty::Ref(r2, ref_ty2, mutbl2)) if equals(&ref_ty1, &ref_ty2) => {
let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
push_ty_ref(&r1, ref_ty1, mutbl1, &mut values.0);
push_ty_ref(&r2, ref_ty2, mutbl2, &mut values.1);
(_, false, _) => {
if let Some(exp_found) = exp_found {
let (def_id, ret_ty) = match exp_found.found.sty {
- TypeVariants::TyFnDef(def, _) => {
+ TyKind::FnDef(def, _) => {
(Some(def), Some(self.tcx.fn_sig(def).output()))
}
_ => (None, None),
};
let exp_is_struct = match exp_found.expected.sty {
- TypeVariants::TyAdt(def, _) => def.is_struct(),
+ TyKind::Adt(def, _) => def.is_struct(),
_ => false,
};
let type_param = generics.type_param(param, self.tcx);
let hir = &self.tcx.hir;
hir.as_local_node_id(type_param.def_id).map(|id| {
- // Get the `hir::TyParam` to verify whether it already has any bounds.
+ // Get the `hir::Param` to verify whether it already has any bounds.
// We do this to avoid suggesting code that ends up as `T: 'a'b`,
// instead we suggest `T: 'a + 'b` in that case.
let mut has_bounds = false;
s
};
let var_description = match var_origin {
- infer::MiscVariable(_) => "".to_string(),
+ infer::MiscVariable(_) => String::new(),
infer::PatternRegion(_) => " for pattern".to_string(),
infer::AddrOfRegion(_) => " for borrow expression".to_string(),
infer::Autoref(_) => " for autoref".to_string(),
use hir::intravisit::{self, Visitor, NestedVisitorMap};
use infer::InferCtxt;
use infer::type_variable::TypeVariableOrigin;
-use ty::{self, Ty, TyInfer, TyVar};
+use ty::{self, Ty, Infer, TyVar};
use syntax::source_map::CompilerDesugaringKind;
use syntax_pos::Span;
use errors::DiagnosticBuilder;
let ty = self.infcx.resolve_type_vars_if_possible(&ty);
ty.walk().any(|inner_ty| {
inner_ty == *self.target_ty || match (&inner_ty.sty, &self.target_ty.sty) {
- (&TyInfer(TyVar(a_vid)), &TyInfer(TyVar(b_vid))) => {
+ (&Infer(TyVar(a_vid)), &Infer(TyVar(b_vid))) => {
self.infcx
.type_variables
.borrow_mut()
impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
pub fn extract_type_name(&self, ty: &'a Ty<'tcx>) -> String {
- if let ty::TyInfer(ty::TyVar(ty_vid)) = (*ty).sty {
+ if let ty::Infer(ty::TyVar(ty_vid)) = (*ty).sty {
let ty_vars = self.type_variables.borrow();
if let TypeVariableOrigin::TypeParameterDefinition(_, name) =
*ty_vars.var_origin(ty_vid) {
) -> Option<Span> {
let ret_ty = self.tcx.type_of(scope_def_id);
match ret_ty.sty {
- ty::TyFnDef(_, _) => {
+ ty::FnDef(_, _) => {
let sig = ret_ty.fn_sig(self.tcx);
let late_bound_regions = self.tcx
.collect_referenced_late_bound_regions(&sig.output());
) -> bool {
let ret_ty = self.tcx.type_of(scope_def_id);
match ret_ty.sty {
- ty::TyFnDef(_, _) => {
+ ty::FnDef(_, _) => {
let sig = ret_ty.fn_sig(self.tcx);
let output = self.tcx.erase_late_bound_regions(&sig.output());
return output.is_impl_trait();
let tcx = self.infcx.tcx;
match t.sty {
- ty::TyInfer(ty::TyVar(v)) => {
+ ty::Infer(ty::TyVar(v)) => {
let opt_ty = self.infcx.type_variables.borrow_mut().probe(v).known();
self.freshen(
opt_ty,
ty::FreshTy)
}
- ty::TyInfer(ty::IntVar(v)) => {
+ ty::Infer(ty::IntVar(v)) => {
self.freshen(
self.infcx.int_unification_table.borrow_mut()
.probe_value(v)
ty::FreshIntTy)
}
- ty::TyInfer(ty::FloatVar(v)) => {
+ ty::Infer(ty::FloatVar(v)) => {
self.freshen(
self.infcx.float_unification_table.borrow_mut()
.probe_value(v)
ty::FreshFloatTy)
}
- ty::TyInfer(ty::FreshTy(c)) |
- ty::TyInfer(ty::FreshIntTy(c)) |
- ty::TyInfer(ty::FreshFloatTy(c)) => {
+ ty::Infer(ty::FreshTy(c)) |
+ ty::Infer(ty::FreshIntTy(c)) |
+ ty::Infer(ty::FreshFloatTy(c)) => {
if c >= self.freshen_count {
bug!("Encountered a freshend type with id {} \
but our counter is only at {}",
t
}
- ty::TyInfer(ty::CanonicalTy(..)) =>
+ ty::Infer(ty::CanonicalTy(..)) =>
bug!("encountered canonical ty during freshening"),
- ty::TyGenerator(..) |
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(..) |
- ty::TyUint(..) |
- ty::TyFloat(..) |
- ty::TyAdt(..) |
- ty::TyStr |
- ty::TyError |
- ty::TyArray(..) |
- ty::TySlice(..) |
- ty::TyRawPtr(..) |
- ty::TyRef(..) |
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) |
- ty::TyDynamic(..) |
- ty::TyNever |
- ty::TyTuple(..) |
- ty::TyProjection(..) |
- ty::TyForeign(..) |
- ty::TyParam(..) |
- ty::TyClosure(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyAnon(..) => {
+ ty::Generator(..) |
+ ty::Bool |
+ ty::Char |
+ ty::Int(..) |
+ ty::Uint(..) |
+ ty::Float(..) |
+ ty::Adt(..) |
+ ty::Str |
+ ty::Error |
+ ty::Array(..) |
+ ty::Slice(..) |
+ ty::RawPtr(..) |
+ ty::Ref(..) |
+ ty::FnDef(..) |
+ ty::FnPtr(_) |
+ ty::Dynamic(..) |
+ ty::Never |
+ ty::Tuple(..) |
+ ty::Projection(..) |
+ ty::Foreign(..) |
+ ty::Param(..) |
+ ty::Closure(..) |
+ ty::GeneratorWitness(..) |
+ ty::Anon(..) => {
t.super_fold_with(self)
}
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
match ty.sty {
- ty::TyInfer(ty::InferTy::TyVar(vid)) => {
+ ty::Infer(ty::InferTy::TyVar(vid)) => {
match self.type_variables.get(&vid) {
None => {
// This variable was created before the
* we're not careful, it will succeed.
*
* The reason is that when we walk through the subtyping
- * algorith, we begin by replacing `'a` with a skolemized
+ * algorithm, we begin by replacing `'a` with a skolemized
* variable `'1`. We then have `fn(_#0t) <: fn(&'1 int)`. This
* can be made true by unifying `_#0t` with `&'1 int`. In the
* process, we create a fresh variable for the skolemized
// is (e.g.) `Box<i32>`. A more obvious solution might be to
// iterate on the subtype obligations that are returned, but I
// think this suffices. -nmatsakis
- (&ty::TyInfer(TyVar(..)), _) => {
+ (&ty::Infer(TyVar(..)), _) => {
let v = infcx.next_ty_var(TypeVariableOrigin::LatticeVariable(this.cause().span));
this.relate_bound(v, b, a)?;
Ok(v)
}
- (_, &ty::TyInfer(TyVar(..))) => {
+ (_, &ty::Infer(TyVar(..))) => {
let v = infcx.next_ty_var(TypeVariableOrigin::LatticeVariable(this.cause().span));
this.relate_bound(v, a, b)?;
Ok(v)
pub fn type_var_diverges(&'a self, ty: Ty) -> bool {
match ty.sty {
- ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().var_diverges(vid),
+ ty::Infer(ty::TyVar(vid)) => self.type_variables.borrow().var_diverges(vid),
_ => false
}
}
use ty::error::UnconstrainedNumeric::Neither;
use ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
match ty.sty {
- ty::TyInfer(ty::IntVar(vid)) => {
+ ty::Infer(ty::IntVar(vid)) => {
if self.int_unification_table.borrow_mut().probe_value(vid).is_some() {
Neither
} else {
UnconstrainedInt
}
},
- ty::TyInfer(ty::FloatVar(vid)) => {
+ ty::Infer(ty::FloatVar(vid)) => {
if self.float_unification_table.borrow_mut().probe_value(vid).is_some() {
Neither
} else {
pub fn shallow_resolve(&self, typ: Ty<'tcx>) -> Ty<'tcx> {
match typ.sty {
- ty::TyInfer(ty::TyVar(v)) => {
+ ty::Infer(ty::TyVar(v)) => {
// Not entirely obvious: if `typ` is a type variable,
// it can be resolved to an int/float variable, which
// can then be recursively resolved, hence the
.unwrap_or(typ)
}
- ty::TyInfer(ty::IntVar(v)) => {
+ ty::Infer(ty::IntVar(v)) => {
self.int_unification_table
.borrow_mut()
.probe_value(v)
.unwrap_or(typ)
}
- ty::TyInfer(ty::FloatVar(v)) => {
+ ty::Infer(ty::FloatVar(v)) => {
self.float_unification_table
.borrow_mut()
.probe_value(v)
}
// [Note-Type-error-reporting]
- // An invariant is that anytime the expected or actual type is TyError (the special
+ // An invariant is that anytime the expected or actual type is Error (the special
// error type, meaning that an error occurred when typechecking this expression),
// this is a derived error. The error cascaded from another error (that was already
// reported), so it's not useful to display it to the user.
// The following methods implement this logic.
- // They check if either the actual or expected type is TyError, and don't print the error
+ // They check if either the actual or expected type is Error, and don't print the error
// in this case. The typechecker should only ever report type errors involving mismatched
// types using one of these methods, and should not call span_err directly for such
// errors.
let actual_ty = self.resolve_type_vars_if_possible(&actual_ty);
debug!("type_error_struct_with_diag({:?}, {:?})", sp, actual_ty);
- // Don't report an error if actual type is TyError.
+ // Don't report an error if actual type is Error.
if actual_ty.references_error() {
return self.tcx.sess.diagnostic().struct_dummy();
}
fn type_bound(&self, ty: Ty<'tcx>) -> VerifyBound<'tcx> {
match ty.sty {
- ty::TyParam(p) => self.param_bound(p),
- ty::TyProjection(data) => {
+ ty::Param(p) => self.param_bound(p),
+ ty::Projection(data) => {
let declared_bounds = self.projection_declared_bounds(data);
self.projection_bound(declared_bounds, data)
}
fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
let t = self.infcx.shallow_resolve(t);
if t.has_infer_types() {
- if let ty::TyInfer(_) = t.sty {
+ if let ty::Infer(_) = t.sty {
// Since we called `shallow_resolve` above, this must
// be an (as yet...) unresolved inference variable.
true
} else {
let t = self.infcx.shallow_resolve(t);
match t.sty {
- ty::TyInfer(ty::TyVar(vid)) => {
+ ty::Infer(ty::TyVar(vid)) => {
self.err = Some(FixupError::UnresolvedTy(vid));
self.tcx().types.err
}
- ty::TyInfer(ty::IntVar(vid)) => {
+ ty::Infer(ty::IntVar(vid)) => {
self.err = Some(FixupError::UnresolvedIntTy(vid));
self.tcx().types.err
}
- ty::TyInfer(ty::FloatVar(vid)) => {
+ ty::Infer(ty::FloatVar(vid)) => {
self.err = Some(FixupError::UnresolvedFloatTy(vid));
self.tcx().types.err
}
- ty::TyInfer(_) => {
+ ty::Infer(_) => {
bug!("Unexpected type in full type resolver: {:?}", t);
}
_ => {
let a = infcx.type_variables.borrow_mut().replace_if_possible(a);
let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
match (&a.sty, &b.sty) {
- (&ty::TyInfer(TyVar(a_vid)), &ty::TyInfer(TyVar(b_vid))) => {
+ (&ty::Infer(TyVar(a_vid)), &ty::Infer(TyVar(b_vid))) => {
// Shouldn't have any LBR here, so we can safely put
// this under a binder below without fear of accidental
// capture.
Ok(a)
}
- (&ty::TyInfer(TyVar(a_id)), _) => {
+ (&ty::Infer(TyVar(a_id)), _) => {
self.fields
.instantiate(b, RelationDir::SupertypeOf, a_id, !self.a_is_expected)?;
Ok(a)
}
- (_, &ty::TyInfer(TyVar(b_id))) => {
+ (_, &ty::Infer(TyVar(b_id))) => {
self.fields.instantiate(a, RelationDir::SubtypeOf, b_id, self.a_is_expected)?;
Ok(a)
}
- (&ty::TyError, _) | (_, &ty::TyError) => {
+ (&ty::Error, _) | (_, &ty::Error) => {
infcx.set_tainted_by_errors();
Ok(self.tcx().types.err)
}
/// instantiated. Otherwise, returns `t`.
pub fn replace_if_possible(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
match t.sty {
- ty::TyInfer(ty::TyVar(v)) => {
+ ty::Infer(ty::TyVar(v)) => {
match self.probe(v) {
TypeVariableValue::Unknown { .. } => t,
TypeVariableValue::Known { value } => value,
#![feature(drain_filter)]
#![feature(iterator_find_map)]
#![cfg_attr(windows, feature(libc))]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![feature(never_type)]
#![feature(exhaustive_patterns)]
#![feature(extern_types)]
#![feature(trace_macros)]
#![feature(trusted_len)]
#![feature(vec_remove_item)]
-#![feature(catch_expr)]
#![feature(step_trait)]
#![feature(integer_atomics)]
#![feature(test)]
extern crate byteorder;
extern crate backtrace;
+#[macro_use]
+extern crate smallvec;
+
// Note that librustc doesn't actually depend on these crates, see the note in
// `Cargo.toml` for this crate about why these are here.
#[allow(unused_extern_crates)]
// lonely orphan structs and enums looking for a better home
-#[derive(Clone, Debug, Copy)]
-pub struct LinkMeta {
- pub crate_hash: Svh,
-}
-
/// Where a crate came from on the local filesystem. One of these three options
/// must be non-None.
#[derive(PartialEq, Clone, Debug)]
// utility functions
fn encode_metadata<'a, 'tcx>(&self,
- tcx: TyCtxt<'a, 'tcx, 'tcx>,
- link_meta: &LinkMeta)
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
-> EncodedMetadata;
fn metadata_encoding_version(&self) -> &[u8];
}
fn handle_field_access(&mut self, lhs: &hir::Expr, node_id: ast::NodeId) {
match self.tables.expr_ty_adjusted(lhs).sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
let index = self.tcx.field_index(node_id, self.tables);
self.insert_def_id(def.non_enum_variant().fields[index].did);
}
- ty::TyTuple(..) => {}
+ ty::Tuple(..) => {}
_ => span_bug!(lhs.span, "named field access on non-ADT"),
}
}
fn handle_field_pattern_match(&mut self, lhs: &hir::Pat, def: Def,
pats: &[source_map::Spanned<hir::FieldPat>]) {
let variant = match self.tables.node_id_to_type(lhs.hir_id).sty {
- ty::TyAdt(adt, _) => adt.variant_of_def(def),
+ ty::Adt(adt, _) => adt.variant_of_def(def),
_ => span_bug!(lhs.span, "non-ADT in struct pattern")
};
for pat in pats {
self.handle_field_access(&lhs, expr.id);
}
hir::ExprKind::Struct(_, ref fields, _) => {
- if let ty::TypeVariants::TyAdt(ref adt, _) = self.tables.expr_ty(expr).sty {
+ if let ty::Adt(ref adt, _) = self.tables.expr_ty(expr).sty {
self.mark_as_used_if_union(adt, fields);
}
}
// make sure that the thing we are pointing out stays valid
// for the lifetime `scope_r` of the resulting ptr:
let expr_ty = return_if_err!(self.mc.expr_ty(expr));
- if let ty::TyRef(r, _, _) = expr_ty.sty {
+ if let ty::Ref(r, _, _) = expr_ty.sty {
let bk = ty::BorrowKind::from_mutbl(m);
self.borrow_expr(&base, r, bk, AddrOf);
}
debug!("walk_callee: callee={:?} callee_ty={:?}",
callee, callee_ty);
match callee_ty.sty {
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
self.consume_expr(callee);
}
- ty::TyError => { }
+ ty::Error => { }
_ => {
if let Some(def) = self.mc.tables.type_dependent_defs().get(call.hir_id) {
let def_id = def.def_id();
// Select just those fields of the `with`
// expression that will actually be used
match with_cmt.ty.sty {
- ty::TyAdt(adt, substs) if adt.is_struct() => {
+ ty::Adt(adt, substs) if adt.is_struct() => {
// Consume those fields of the with expression that are needed.
for (f_index, with_field) in adt.non_enum_variant().fields.iter().enumerate() {
let is_mentioned = fields.iter().any(|f| {
// It is also a borrow or copy/move of the value being matched.
match bm {
ty::BindByReference(m) => {
- if let ty::TyRef(r, _, _) = pat_ty.sty {
+ if let ty::Ref(r, _, _) = pat_ty.sty {
let bk = ty::BorrowKind::from_mutbl(m);
delegate.borrow(pat.id, pat.span, &cmt_pat, r, bk, RefBinding);
}
ty: Ty<'tcx>)
-> Ty<'tcx> {
let (def, substs) = match ty.sty {
- ty::TyAdt(def, substs) => (def, substs),
+ ty::Adt(def, substs) => (def, substs),
_ => return ty
};
// Special-case transmutting from `typeof(function)` and
// `Option<typeof(function)>` to present a clearer error.
let from = unpack_option_like(self.tcx.global_tcx(), from);
- if let (&ty::TyFnDef(..), SizeSkeleton::Known(size_to)) = (&from.sty, sk_to) {
+ if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (&from.sty, sk_to) {
if size_to == Pointer.size(self.tcx) {
struct_span_err!(self.tcx.sess, span, E0591,
"can't transmute zero-sized type")
PanicBoundsCheckFnLangItem, "panic_bounds_check", panic_bounds_check_fn;
PanicInfoLangItem, "panic_info", panic_info;
PanicImplLangItem, "panic_impl", panic_impl;
+ // Libstd panic entry point. Necessary for const eval to be able to catch it
+ BeginPanicFnLangItem, "begin_panic", begin_panic_fn;
ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn;
BoxFreeFnLangItem, "box_free", box_free_fn;
use ty::TyCtxt;
use syntax::symbol::Symbol;
use syntax::ast::{Attribute, MetaItem, MetaItemKind};
-use syntax_pos::{Span, DUMMY_SP};
+use syntax_pos::Span;
use hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
use errors::DiagnosticId;
pub fn collect<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> LibFeatures {
let mut collector = LibFeatureCollector::new(tcx);
- for &cnum in tcx.crates().iter() {
- for &(feature, since) in tcx.defined_lib_features(cnum).iter() {
- collector.collect_feature(feature, since, DUMMY_SP);
- }
- }
intravisit::walk_crate(&mut collector, tcx.hir.krate());
collector.lib_features
}
fn resolve_field(&self, field_index: usize) -> Option<(&'tcx ty::AdtDef, &'tcx ty::FieldDef)>
{
let adt_def = match self.ty.sty {
- ty::TyAdt(def, _) => def,
- ty::TyTuple(..) => return None,
+ ty::Adt(def, _) => def,
+ ty::Tuple(..) => return None,
// closures get `Categorization::Upvar` rather than `Categorization::Interior`
_ => bug!("interior cmt {:?} is not an ADT", self)
};
// FnOnce | copied | upvar -> &'up bk
let kind = match self.node_ty(fn_hir_id)?.sty {
- ty::TyGenerator(..) => ty::ClosureKind::FnOnce,
- ty::TyClosure(closure_def_id, closure_substs) => {
+ ty::Generator(..) => ty::ClosureKind::FnOnce,
+ ty::Closure(closure_def_id, closure_substs) => {
match self.infcx {
// During upvar inference we may not know the
// closure kind, just use the LATTICE_BOTTOM value.
// that the above is actually immutable and
// has a ref type. However, nothing should
// actually look at the type, so we can get
- // away with stuffing a `TyError` in there
+ // away with stuffing a `Error` in there
// instead of bothering to construct a proper
// one.
let cmt_result = cmt_ {
// Always promote `[T; 0]` (even when e.g. borrowed mutably).
let promotable = match expr_ty.sty {
- ty::TyArray(_, len) if len.assert_usize(self.tcx) == Some(0) => true,
+ ty::Array(_, len) if len.assert_usize(self.tcx) == Some(0) => true,
_ => promotable,
};
let base_ty = self.expr_ty_adjusted(base)?;
let (region, mutbl) = match base_ty.sty {
- ty::TyRef(region, _, mutbl) => (region, mutbl),
+ ty::Ref(region, _, mutbl) => (region, mutbl),
_ => {
span_bug!(expr.span, "cat_overloaded_place: base is not a reference")
}
};
let ptr = match base_cmt.ty.sty {
- ty::TyAdt(def, ..) if def.is_box() => Unique,
- ty::TyRawPtr(ref mt) => UnsafePtr(mt.mutbl),
- ty::TyRef(r, _, mutbl) => {
+ ty::Adt(def, ..) if def.is_box() => Unique,
+ ty::RawPtr(ref mt) => UnsafePtr(mt.mutbl),
+ ty::Ref(r, _, mutbl) => {
let bk = ty::BorrowKind::from_mutbl(mutbl);
BorrowedPtr(bk, r)
}
}
Def::StructCtor(_, CtorKind::Fn) => {
match self.pat_ty_unadjusted(&pat)?.sty {
- ty::TyAdt(adt_def, _) => {
+ ty::Adt(adt_def, _) => {
(cmt, adt_def.non_enum_variant().fields.len())
}
ref ty => {
PatKind::Tuple(ref subpats, ddpos) => {
// (p1, ..., pN)
let expected_len = match self.pat_ty_unadjusted(&pat)?.sty {
- ty::TyTuple(ref tys) => tys.len(),
+ ty::Tuple(ref tys) => tys.len(),
ref ty => span_bug!(pat.span, "tuple pattern unexpected type {:?}", ty),
};
for (i, subpat) in subpats.iter().enumerate_and_adjust(expected_len, ddpos) {
use hir::def::Def;
use hir::def_id::{DefId, CrateNum};
use rustc_data_structures::sync::Lrc;
-use ty::{self, TyCtxt, GenericParamDefKind};
+use ty::{self, TyCtxt};
use ty::query::Providers;
use middle::privacy;
use session::config;
use hir::itemlikevisit::ItemLikeVisitor;
use hir::intravisit;
-// Returns true if the given set of generics implies that the item it's
-// associated with must be inlined.
-fn generics_require_inlining(generics: &ty::Generics) -> bool {
- for param in &generics.params {
- match param.kind {
- GenericParamDefKind::Lifetime { .. } => {}
- GenericParamDefKind::Type { .. } => return true,
- }
- }
- false
-}
-
// Returns true if the given item must be inlined because it may be
// monomorphized or it was marked with `#[inline]`. This will only return
// true for functions.
hir::ItemKind::Impl(..) |
hir::ItemKind::Fn(..) => {
let generics = tcx.generics_of(tcx.hir.local_def_id(item.id));
- generics_require_inlining(generics)
+ generics.requires_monomorphization(tcx)
}
_ => false,
}
impl_src: DefId) -> bool {
let codegen_fn_attrs = tcx.codegen_fn_attrs(impl_item.hir_id.owner_def_id());
let generics = tcx.generics_of(tcx.hir.local_def_id(impl_item.id));
- if codegen_fn_attrs.requests_inline() || generics_require_inlining(generics) {
+ if codegen_fn_attrs.requests_inline() || generics.requires_monomorphization(tcx) {
return true
}
if let Some(impl_node_id) = tcx.hir.as_local_node_id(impl_src) {
hir::ImplItemKind::Method(..) => {
let attrs = self.tcx.codegen_fn_attrs(def_id);
let generics = self.tcx.generics_of(def_id);
- if generics_require_inlining(&generics) ||
- attrs.requests_inline() {
+ if generics.requires_monomorphization(self.tcx) || attrs.requests_inline() {
true
} else {
let impl_did = self.tcx
match self.tcx.hir.expect_item(impl_node_id).node {
hir::ItemKind::Impl(..) => {
let generics = self.tcx.generics_of(impl_did);
- generics_require_inlining(&generics)
+ generics.requires_monomorphization(self.tcx)
}
_ => false
}
remaining_lib_features.remove(&Symbol::intern("libc"));
remaining_lib_features.remove(&Symbol::intern("test"));
- for (feature, stable) in tcx.lib_features().to_vec() {
- if let Some(since) = stable {
- if let Some(span) = remaining_lib_features.get(&feature) {
- // Warn if the user has enabled an already-stable lib feature.
- unnecessary_stable_feature_lint(tcx, *span, feature, since);
+ let check_features =
+ |remaining_lib_features: &mut FxHashMap<_, _>, defined_features: &Vec<_>| {
+ for &(feature, since) in defined_features {
+ if let Some(since) = since {
+ if let Some(span) = remaining_lib_features.get(&feature) {
+ // Warn if the user has enabled an already-stable lib feature.
+ unnecessary_stable_feature_lint(tcx, *span, feature, since);
+ }
+ }
+ remaining_lib_features.remove(&feature);
+ if remaining_lib_features.is_empty() {
+ break;
+ }
+ }
+ };
+
+ // We always collect the lib features declared in the current crate, even if there are
+ // no unknown features, because the collection also does feature attribute validation.
+ let local_defined_features = tcx.lib_features().to_vec();
+ if !remaining_lib_features.is_empty() {
+ check_features(&mut remaining_lib_features, &local_defined_features);
+
+ for &cnum in &*tcx.crates() {
+ if remaining_lib_features.is_empty() {
+ break;
}
+ check_features(&mut remaining_lib_features, &tcx.defined_lib_features(cnum));
}
- remaining_lib_features.remove(&feature);
}
for (feature, span) in remaining_lib_features {
use syntax_pos::Span;
use syntax::ast;
+use syntax::symbol::Symbol;
pub type ConstEvalResult<'tcx> = Result<&'tcx ty::Const<'tcx>, Lrc<ConstEvalErr<'tcx>>>;
HeapAllocZeroBytes,
HeapAllocNonPowerOfTwoAlignment(u64),
Unreachable,
- Panic,
+ Panic {
+ msg: Symbol,
+ line: u32,
+ col: u32,
+ file: Symbol,
+ },
ReadFromReturnPointer,
PathNotFound(Vec<String>),
UnimplementedTraitSelection,
"tried to re-, de-, or allocate heap memory with alignment that is not a power of two",
Unreachable =>
"entered unreachable code",
- Panic =>
+ Panic { .. } =>
"the evaluated program panicked",
ReadFromReturnPointer =>
"tried to read from the return pointer",
write!(f, "{}", inner),
IncorrectAllocationInformation(size, size2, align, align2) =>
write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and align {}", size.bytes(), align.abi(), size2.bytes(), align2.abi()),
+ Panic { ref msg, line, col, ref file } =>
+ write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col),
_ => write!(f, "{}", self.description()),
}
}
FrameInfo, ConstEvalResult,
};
-pub use self::value::{Scalar, Value, ConstValue, ScalarMaybeUndef};
+pub use self::value::{Scalar, ConstValue, ScalarMaybeUndef};
use std::fmt;
use mir;
Pointer { alloc_id, offset }
}
- pub(crate) fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
+ pub fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
Pointer::new(
self.alloc_id,
Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)),
(Pointer::new(self.alloc_id, Size::from_bytes(res)), over)
}
- pub(crate) fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+ pub fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
Ok(Pointer::new(
self.alloc_id,
Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
}
}
-pub fn write_target_int(
- endianness: layout::Endian,
- mut target: &mut [u8],
- data: i128,
-) -> Result<(), io::Error> {
- let len = target.len();
- match endianness {
- layout::Endian::Little => target.write_int128::<LittleEndian>(data, len),
- layout::Endian::Big => target.write_int128::<BigEndian>(data, len),
- }
-}
-
pub fn read_target_uint(endianness: layout::Endian, mut source: &[u8]) -> Result<u128, io::Error> {
match endianness {
layout::Endian::Little => source.read_uint128::<LittleEndian>(source.len()),
}
}
+////////////////////////////////////////////////////////////////////////////////
+// Methods to faciliate working with signed integers stored in a u128
+////////////////////////////////////////////////////////////////////////////////
+
+pub fn sign_extend(value: u128, size: Size) -> u128 {
+ let size = size.bits();
+ // sign extend
+ let shift = 128 - size;
+ // shift the unsigned value to the left
+ // and back to the right as signed (essentially fills with FF on the left)
+ (((value << shift) as i128) >> shift) as u128
+}
+
+pub fn truncate(value: u128, size: Size) -> u128 {
+ let size = size.bits();
+ let shift = 128 - size;
+ // truncate (shift left to drop out leftover values, shift right to fill with zeroes)
+ (value << shift) >> shift
+}
+
////////////////////////////////////////////////////////////////////////////////
// Undefined byte tracking
////////////////////////////////////////////////////////////////////////////////
#![allow(unknown_lints)]
-use ty::layout::{Align, HasDataLayout, Size};
-use ty;
+use ty::layout::{HasDataLayout, Size};
use ty::subst::Substs;
use hir::def_id::DefId;
use super::{EvalResult, Pointer, PointerArithmetic, Allocation};
/// Represents a constant value in Rust. Scalar and ScalarPair are optimizations which
-/// matches Value's optimizations for easy conversions between these two types
+/// matches the LocalValue optimizations for easy conversions between Value and ConstValue.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, RustcEncodable, RustcDecodable, Hash)]
pub enum ConstValue<'tcx> {
/// Never returned from the `const_eval` query, but the HIR contains these frequently in order
/// evaluation
Unevaluated(DefId, &'tcx Substs<'tcx>),
/// Used only for types with layout::abi::Scalar ABI and ZSTs
+ ///
+ /// Not using the enum `Value` to encode that this must not be `Undef`
Scalar(Scalar),
/// Used only for types with layout::abi::ScalarPair
///
}
impl<'tcx> ConstValue<'tcx> {
- #[inline]
- pub fn from_byval_value(val: Value) -> EvalResult<'static, Self> {
- Ok(match val {
- Value::ByRef(..) => bug!(),
- Value::ScalarPair(a, b) => ConstValue::ScalarPair(a.unwrap_or_err()?, b),
- Value::Scalar(val) => ConstValue::Scalar(val.unwrap_or_err()?),
- })
- }
-
- #[inline]
- pub fn to_byval_value(&self) -> Option<Value> {
- match *self {
- ConstValue::Unevaluated(..) |
- ConstValue::ByRef(..) => None,
- ConstValue::ScalarPair(a, b) => Some(Value::ScalarPair(a.into(), b)),
- ConstValue::Scalar(val) => Some(Value::Scalar(val.into())),
- }
- }
-
#[inline]
pub fn try_to_scalar(&self) -> Option<Scalar> {
match *self {
}
#[inline]
- pub fn to_bits(&self, size: Size) -> Option<u128> {
+ pub fn try_to_bits(&self, size: Size) -> Option<u128> {
self.try_to_scalar()?.to_bits(size).ok()
}
#[inline]
- pub fn to_ptr(&self) -> Option<Pointer> {
+ pub fn try_to_ptr(&self) -> Option<Pointer> {
self.try_to_scalar()?.to_ptr().ok()
}
-}
-/// A `Value` represents a single self-contained Rust value.
-///
-/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitve
-/// value held directly, outside of any allocation (`Scalar`). For `ByRef`-values, we remember
-/// whether the pointer is supposed to be aligned or not (also see Place).
-///
-/// For optimization of a few very common cases, there is also a representation for a pair of
-/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
-/// operations and fat pointers. This idea was taken from rustc's codegen.
-#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
-pub enum Value {
- ByRef(Scalar, Align),
- Scalar(ScalarMaybeUndef),
- ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef),
-}
-
-impl<'tcx> ty::TypeFoldable<'tcx> for Value {
- fn super_fold_with<'gcx: 'tcx, F: ty::fold::TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> Self {
- *self
+ pub fn new_slice(
+ val: Scalar,
+ len: u64,
+ cx: impl HasDataLayout
+ ) -> Self {
+ ConstValue::ScalarPair(val, Scalar::Bits {
+ bits: len as u128,
+ size: cx.data_layout().pointer_size.bytes() as u8,
+ }.into())
}
- fn super_visit_with<V: ty::fold::TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
- false
+
+ pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self {
+ ConstValue::ScalarPair(val, Scalar::Ptr(vtable).into())
}
}
impl<'tcx> Scalar {
- pub fn ptr_null<C: HasDataLayout>(cx: C) -> Self {
+ pub fn ptr_null(cx: impl HasDataLayout) -> Self {
Scalar::Bits {
bits: 0,
size: cx.data_layout().pointer_size.bytes() as u8,
}
}
- pub fn to_value_with_len<C: HasDataLayout>(self, len: u64, cx: C) -> Value {
- ScalarMaybeUndef::Scalar(self).to_value_with_len(len, cx)
- }
-
- pub fn to_value_with_vtable(self, vtable: Pointer) -> Value {
- ScalarMaybeUndef::Scalar(self).to_value_with_vtable(vtable)
+ pub fn zst() -> Self {
+ Scalar::Bits { bits: 0, size: 0 }
}
- pub fn ptr_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+ pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
}
}
- pub fn ptr_offset<C: HasDataLayout>(self, i: Size, cx: C) -> EvalResult<'tcx, Self> {
+ pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
}
}
- pub fn ptr_wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
+ pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self {
let layout = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
}
}
- pub fn is_null_ptr<C: HasDataLayout>(self, cx: C) -> bool {
+ pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool {
match self {
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, cx.data_layout().pointer_size.bytes());
}
}
- pub fn to_value(self) -> Value {
- Value::Scalar(ScalarMaybeUndef::Scalar(self))
+ pub fn from_bool(b: bool) -> Self {
+ Scalar::Bits { bits: b as u128, size: 1 }
+ }
+
+ pub fn from_char(c: char) -> Self {
+ Scalar::Bits { bits: c as u128, size: 4 }
+ }
+
+ pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
+ match self {
+ Scalar::Bits { bits, size } => {
+ assert_eq!(target_size.bytes(), size as u64);
+ assert_ne!(size, 0, "to_bits cannot be used with zsts");
+ Ok(bits)
+ }
+ Scalar::Ptr(_) => err!(ReadPointerAsBytes),
+ }
+ }
+
+ pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
+ match self {
+ Scalar::Bits { bits: 0, .. } => err!(InvalidNullPointerUsage),
+ Scalar::Bits { .. } => err!(ReadBytesAsPointer),
+ Scalar::Ptr(p) => Ok(p),
+ }
+ }
+
+ pub fn is_bits(self) -> bool {
+ match self {
+ Scalar::Bits { .. } => true,
+ _ => false,
+ }
+ }
+
+ pub fn is_ptr(self) -> bool {
+ match self {
+ Scalar::Ptr(_) => true,
+ _ => false,
+ }
+ }
+
+ pub fn to_bool(self) -> EvalResult<'tcx, bool> {
+ match self {
+ Scalar::Bits { bits: 0, size: 1 } => Ok(false),
+ Scalar::Bits { bits: 1, size: 1 } => Ok(true),
+ _ => err!(InvalidBool),
+ }
}
}
impl From<Pointer> for Scalar {
+ #[inline(always)]
fn from(ptr: Pointer) -> Self {
Scalar::Ptr(ptr)
}
/// The raw bytes of a simple value.
Bits {
/// The first `size` bytes are the value.
- /// Do not try to read less or more bytes that that
+ /// Do not try to read less or more bytes that that. The remaining bytes must be 0.
size: u8,
bits: u128,
},
}
impl From<Scalar> for ScalarMaybeUndef {
+ #[inline(always)]
fn from(s: Scalar) -> Self {
ScalarMaybeUndef::Scalar(s)
}
}
-impl ScalarMaybeUndef {
- pub fn unwrap_or_err(self) -> EvalResult<'static, Scalar> {
+impl<'tcx> ScalarMaybeUndef {
+ pub fn not_undef(self) -> EvalResult<'static, Scalar> {
match self {
ScalarMaybeUndef::Scalar(scalar) => Ok(scalar),
ScalarMaybeUndef::Undef => err!(ReadUndefBytes),
}
}
- pub fn to_value_with_len<C: HasDataLayout>(self, len: u64, cx: C) -> Value {
- Value::ScalarPair(self, Scalar::Bits {
- bits: len as u128,
- size: cx.data_layout().pointer_size.bytes() as u8,
- }.into())
- }
-
- pub fn to_value_with_vtable(self, vtable: Pointer) -> Value {
- Value::ScalarPair(self, Scalar::Ptr(vtable).into())
- }
-
- pub fn ptr_offset<C: HasDataLayout>(self, i: Size, cx: C) -> EvalResult<'tcx, Self> {
- match self {
- ScalarMaybeUndef::Scalar(scalar) => {
- scalar.ptr_offset(i, cx).map(ScalarMaybeUndef::Scalar)
- },
- ScalarMaybeUndef::Undef => Ok(ScalarMaybeUndef::Undef)
- }
- }
-}
-
-impl<'tcx> Scalar {
- pub fn from_bool(b: bool) -> Self {
- Scalar::Bits { bits: b as u128, size: 1 }
- }
-
- pub fn from_char(c: char) -> Self {
- Scalar::Bits { bits: c as u128, size: 4 }
- }
-
- pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
- match self {
- Scalar::Bits { bits, size } => {
- assert_eq!(target_size.bytes(), size as u64);
- assert_ne!(size, 0, "to_bits cannot be used with zsts");
- Ok(bits)
- }
- Scalar::Ptr(_) => err!(ReadPointerAsBytes),
- }
- }
-
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
- match self {
- Scalar::Bits {..} => err!(ReadBytesAsPointer),
- Scalar::Ptr(p) => Ok(p),
- }
+ self.not_undef()?.to_ptr()
}
- pub fn is_bits(self) -> bool {
- match self {
- Scalar::Bits { .. } => true,
- _ => false,
- }
- }
-
- pub fn is_ptr(self) -> bool {
- match self {
- Scalar::Ptr(_) => true,
- _ => false,
- }
+ pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
+ self.not_undef()?.to_bits(target_size)
}
pub fn to_bool(self) -> EvalResult<'tcx, bool> {
- match self {
- Scalar::Bits { bits: 0, size: 1 } => Ok(false),
- Scalar::Bits { bits: 1, size: 1 } => Ok(true),
- _ => err!(InvalidBool),
- }
+ self.not_undef()?.to_bool()
}
}
use hir::def_id::DefId;
use hir::{self, HirId, InlineAsm};
use middle::region;
-use mir::interpret::{EvalErrorKind, Scalar, Value, ScalarMaybeUndef};
+use mir::interpret::{EvalErrorKind, Scalar, ScalarMaybeUndef, ConstValue};
use mir::visit::MirVisitable;
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float;
use rustc_data_structures::graph::dominators::{dominators, Dominators};
use rustc_data_structures::graph::{self, GraphPredecessors, GraphSuccessors};
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::sync::ReadGuard;
use rustc_serialize as serialize;
/// Drop(P, goto BB1, unwind BB2)
/// }
/// BB1 {
- /// // P is now unitialized
+ /// // P is now uninitialized
/// P <- V
/// }
/// BB2 {
- /// // P is now unitialized -- its dtor panicked
+ /// // P is now uninitialized -- its dtor panicked
/// P <- V
/// }
/// ```
.iter()
.map(|&u| {
let mut s = String::new();
- print_miri_value(
- Scalar::Bits {
- bits: u,
- size: size.bytes() as u8,
- }.to_value(),
- switch_ty,
- &mut s,
- ).unwrap();
+ let c = ty::Const {
+ val: ConstValue::Scalar(Scalar::Bits {
+ bits: u,
+ size: size.bytes() as u8,
+ }.into()),
+ ty: switch_ty,
+ };
+ fmt_const_val(&mut s, &c).unwrap();
s.into()
})
.chain(iter::once(String::from("otherwise").into()))
region
} else {
// Do not even print 'static
- "".to_owned()
+ String::new()
};
write!(fmt, "&{}{}{:?}", region, kind_str, place)
}
}
/// Write a `ConstValue` in a way closer to the original source code than the `Debug` output.
-pub fn fmt_const_val<W: Write>(fmt: &mut W, const_val: &ty::Const) -> fmt::Result {
- if let Some(value) = const_val.to_byval_value() {
- print_miri_value(value, const_val.ty, fmt)
- } else {
- write!(fmt, "{:?}:{}", const_val.val, const_val.ty)
- }
-}
-
-pub fn print_miri_value<W: Write>(value: Value, ty: Ty, f: &mut W) -> fmt::Result {
- use ty::TypeVariants::*;
+pub fn fmt_const_val(f: &mut impl Write, const_val: &ty::Const) -> fmt::Result {
+ use ty::TyKind::*;
+ let value = const_val.val;
+ let ty = const_val.ty;
// print some primitives
- if let Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) = value {
+ if let ConstValue::Scalar(Scalar::Bits { bits, .. }) = value {
match ty.sty {
- TyBool if bits == 0 => return write!(f, "false"),
- TyBool if bits == 1 => return write!(f, "true"),
- TyFloat(ast::FloatTy::F32) => return write!(f, "{}f32", Single::from_bits(bits)),
- TyFloat(ast::FloatTy::F64) => return write!(f, "{}f64", Double::from_bits(bits)),
- TyUint(ui) => return write!(f, "{:?}{}", bits, ui),
- TyInt(i) => {
+ Bool if bits == 0 => return write!(f, "false"),
+ Bool if bits == 1 => return write!(f, "true"),
+ Float(ast::FloatTy::F32) => return write!(f, "{}f32", Single::from_bits(bits)),
+ Float(ast::FloatTy::F64) => return write!(f, "{}f64", Double::from_bits(bits)),
+ Uint(ui) => return write!(f, "{:?}{}", bits, ui),
+ Int(i) => {
let bit_width = ty::tls::with(|tcx| {
let ty = tcx.lift_to_global(&ty).unwrap();
tcx.layout_of(ty::ParamEnv::empty().and(ty))
let shift = 128 - bit_width;
return write!(f, "{:?}{}", ((bits as i128) << shift) >> shift, i);
}
- TyChar => return write!(f, "{:?}", ::std::char::from_u32(bits as u32).unwrap()),
+ Char => return write!(f, "{:?}", ::std::char::from_u32(bits as u32).unwrap()),
_ => {},
}
}
// print function definitons
- if let TyFnDef(did, _) = ty.sty {
+ if let FnDef(did, _) = ty.sty {
return write!(f, "{}", item_path_str(did));
}
// print string literals
- if let Value::ScalarPair(ptr, len) = value {
- if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = ptr {
+ if let ConstValue::ScalarPair(ptr, len) = value {
+ if let Scalar::Ptr(ptr) = ptr {
if let ScalarMaybeUndef::Scalar(Scalar::Bits { bits: len, .. }) = len {
- if let TyRef(_, &ty::TyS { sty: TyStr, .. }, _) = ty.sty {
+ if let Ref(_, &ty::TyS { sty: Str, .. }, _) = ty.sty {
return ty::tls::with(|tcx| {
let alloc = tcx.alloc_map.lock().get(ptr.alloc_id);
if let Some(interpret::AllocType::Memory(alloc)) = alloc {
let ty = self.to_ty(tcx);
PlaceTy::Ty {
ty: match ty.sty {
- ty::TyArray(inner, size) => {
+ ty::Array(inner, size) => {
let size = size.unwrap_usize(tcx);
let len = size - (from as u64) - (to as u64);
tcx.mk_array(inner, len)
}
- ty::TySlice(..) => ty,
+ ty::Slice(..) => ty,
_ => {
bug!("cannot subslice non-array type: `{:?}`", self)
}
}
ProjectionElem::Downcast(adt_def1, index) =>
match self.to_ty(tcx).sty {
- ty::TyAdt(adt_def, substs) => {
+ ty::Adt(adt_def, substs) => {
assert!(adt_def.is_enum());
assert!(index < adt_def.variants.len());
assert_eq!(adt_def, adt_def1);
}
Rvalue::Discriminant(ref place) => {
let ty = place.ty(local_decls, tcx).to_ty(tcx);
- if let ty::TyAdt(adt_def, _) = ty.sty {
+ if let ty::Adt(adt_def, _) = ty.sty {
adt_def.repr.discr_type().to_ty(tcx)
} else {
// This can only be `0`, for now, so `u8` will suffice.
// (A, [C])]
//
// Now that the top of the stack has no successors we can traverse, each item will
- // be popped off during iteration until we get back to `A`. This yeilds [E, D, B].
+ // be popped off during iteration until we get back to `A`. This yields [E, D, B].
//
// When we yield `B` and call `traverse_successor`, we push `C` to the stack, but
// since we've already visited `E`, that child isn't added to the stack. The last
"perform LLVM link-time optimizations"),
target_cpu: Option<String> = (None, parse_opt_string, [TRACKED],
"select target processor (rustc --print target-cpus for details)"),
- target_feature: String = ("".to_string(), parse_string, [TRACKED],
+ target_feature: String = (String::new(), parse_string, [TRACKED],
"target specific attributes (rustc --print target-features for details)"),
passes: Vec<String> = (Vec::new(), parse_list, [TRACKED],
"a list of extra LLVM passes to run (space separated)"),
"choose the code model to use (rustc --print code-models for details)"),
metadata: Vec<String> = (Vec::new(), parse_list, [TRACKED],
"metadata to mangle symbol names with"),
- extra_filename: String = ("".to_string(), parse_string, [UNTRACKED],
+ extra_filename: String = (String::new(), parse_string, [UNTRACKED],
"extra data to put in each output filename"),
codegen_units: Option<usize> = (None, parse_opt_uint, [UNTRACKED],
"divide crate into N units to optimize in parallel"),
};
if cg.target_feature == "help" {
prints.push(PrintRequest::TargetFeatures);
- cg.target_feature = "".to_string();
+ cg.target_feature = String::new();
}
if cg.relocation_model.as_ref().map_or(false, |s| s == "help") {
prints.push(PrintRequest::RelocationModels);
// The core logic responsible for computing the bounds for our synthesized impl.
//
// To calculate the bounds, we call SelectionContext.select in a loop. Like FulfillmentContext,
- // we recursively select the nested obligations of predicates we encounter. However, whenver we
+ // we recursively select the nested obligations of predicates we encounter. However, whenever we
// encounter an UnimplementedError involving a type parameter, we add it to our ParamEnv. Since
// our goal is to determine when a particular type implements an auto trait, Unimplemented
// errors tell us what conditions need to be met.
//
- // This method ends up working somewhat similary to FulfillmentContext, but with a few key
+ // This method ends up working somewhat similarly to FulfillmentContext, but with a few key
// differences. FulfillmentContext works under the assumption that it's dealing with concrete
// user code. According, it considers all possible ways that a Predicate could be met - which
// isn't always what we want for a synthesized impl. For example, given the predicate 'T:
// we'll pick up any nested bounds, without ever inferring that 'T: IntoIterator' needs to
// hold.
//
- // One additonal consideration is supertrait bounds. Normally, a ParamEnv is only ever
+ // One additional consideration is supertrait bounds. Normally, a ParamEnv is only ever
// consutrcted once for a given type. As part of the construction process, the ParamEnv will
// have any supertrait bounds normalized - e.g. if we have a type 'struct Foo<T: Copy>', the
// ParamEnv will contain 'T: Copy' and 'T: Clone', since 'Copy: Clone'. When we construct our
- // own ParamEnv, we need to do this outselves, through traits::elaborate_predicates, or else
+ // own ParamEnv, we need to do this ourselves, through traits::elaborate_predicates, or else
// SelectionContext will choke on the missing predicates. However, this should never show up in
// the final synthesized generics: we don't want our generated docs page to contain something
// like 'T: Copy + Clone', as that's redundant. Therefore, we keep track of a separate
}
return match substs.type_at(0).sty {
- ty::TyParam(_) => true,
- ty::TyProjection(p) => self.is_of_param(p.substs),
+ ty::Param(_) => true,
+ ty::Projection(p) => self.is_of_param(p.substs),
_ => false,
};
}
fn is_possibly_remote_type(ty: Ty, _in_crate: InCrate) -> bool {
match ty.sty {
- ty::TyProjection(..) | ty::TyParam(..) => true,
+ ty::Projection(..) | ty::Param(..) => true,
_ => false,
}
}
fn fundamental_ty(tcx: TyCtxt, ty: Ty) -> bool {
match ty.sty {
- ty::TyRef(..) => true,
- ty::TyAdt(def, _) => def.is_fundamental(),
- ty::TyDynamic(ref data, ..) => {
+ ty::Ref(..) => true,
+ ty::Adt(def, _) => def.is_fundamental(),
+ ty::Dynamic(ref data, ..) => {
data.principal().map_or(false, |p| tcx.has_attr(p.def_id(), "fundamental"))
}
_ => false
debug!("ty_is_local_constructor({:?})", ty);
match ty.sty {
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(..) |
- ty::TyUint(..) |
- ty::TyFloat(..) |
- ty::TyStr |
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) |
- ty::TyArray(..) |
- ty::TySlice(..) |
- ty::TyRawPtr(..) |
- ty::TyRef(..) |
- ty::TyNever |
- ty::TyTuple(..) |
- ty::TyParam(..) |
- ty::TyProjection(..) => {
+ ty::Bool |
+ ty::Char |
+ ty::Int(..) |
+ ty::Uint(..) |
+ ty::Float(..) |
+ ty::Str |
+ ty::FnDef(..) |
+ ty::FnPtr(_) |
+ ty::Array(..) |
+ ty::Slice(..) |
+ ty::RawPtr(..) |
+ ty::Ref(..) |
+ ty::Never |
+ ty::Tuple(..) |
+ ty::Param(..) |
+ ty::Projection(..) => {
false
}
- ty::TyInfer(..) => match in_crate {
+ ty::Infer(..) => match in_crate {
InCrate::Local => false,
// The inference variable might be unified with a local
// type in that remote crate.
InCrate::Remote => true,
},
- ty::TyAdt(def, _) => def_id_is_local(def.did, in_crate),
- ty::TyForeign(did) => def_id_is_local(did, in_crate),
+ ty::Adt(def, _) => def_id_is_local(def.did, in_crate),
+ ty::Foreign(did) => def_id_is_local(did, in_crate),
- ty::TyDynamic(ref tt, ..) => {
+ ty::Dynamic(ref tt, ..) => {
tt.principal().map_or(false, |p| {
def_id_is_local(p.def_id(), in_crate)
})
}
- ty::TyError => {
+ ty::Error => {
true
}
- ty::TyClosure(..) |
- ty::TyGenerator(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyAnon(..) => {
+ ty::Closure(..) |
+ ty::Generator(..) |
+ ty::GeneratorWitness(..) |
+ ty::Anon(..) => {
bug!("ty_is_local invoked on unexpected type: {:?}", ty)
}
}
/// if the type can be equated to any type.
fn type_category<'tcx>(t: Ty<'tcx>) -> Option<u32> {
match t.sty {
- ty::TyBool => Some(0),
- ty::TyChar => Some(1),
- ty::TyStr => Some(2),
- ty::TyInt(..) | ty::TyUint(..) | ty::TyInfer(ty::IntVar(..)) => Some(3),
- ty::TyFloat(..) | ty::TyInfer(ty::FloatVar(..)) => Some(4),
- ty::TyRef(..) | ty::TyRawPtr(..) => Some(5),
- ty::TyArray(..) | ty::TySlice(..) => Some(6),
- ty::TyFnDef(..) | ty::TyFnPtr(..) => Some(7),
- ty::TyDynamic(..) => Some(8),
- ty::TyClosure(..) => Some(9),
- ty::TyTuple(..) => Some(10),
- ty::TyProjection(..) => Some(11),
- ty::TyParam(..) => Some(12),
- ty::TyAnon(..) => Some(13),
- ty::TyNever => Some(14),
- ty::TyAdt(adt, ..) => match adt.adt_kind() {
+ ty::Bool => Some(0),
+ ty::Char => Some(1),
+ ty::Str => Some(2),
+ ty::Int(..) | ty::Uint(..) | ty::Infer(ty::IntVar(..)) => Some(3),
+ ty::Float(..) | ty::Infer(ty::FloatVar(..)) => Some(4),
+ ty::Ref(..) | ty::RawPtr(..) => Some(5),
+ ty::Array(..) | ty::Slice(..) => Some(6),
+ ty::FnDef(..) | ty::FnPtr(..) => Some(7),
+ ty::Dynamic(..) => Some(8),
+ ty::Closure(..) => Some(9),
+ ty::Tuple(..) => Some(10),
+ ty::Projection(..) => Some(11),
+ ty::Param(..) => Some(12),
+ ty::Anon(..) => Some(13),
+ ty::Never => Some(14),
+ ty::Adt(adt, ..) => match adt.adt_kind() {
AdtKind::Struct => Some(15),
AdtKind::Union => Some(16),
AdtKind::Enum => Some(17),
},
- ty::TyGenerator(..) => Some(18),
- ty::TyForeign(..) => Some(19),
- ty::TyGeneratorWitness(..) => Some(20),
- ty::TyInfer(..) | ty::TyError => None
+ ty::Generator(..) => Some(18),
+ ty::Foreign(..) => Some(19),
+ ty::GeneratorWitness(..) => Some(20),
+ ty::Infer(..) | ty::Error => None
}
}
match (type_category(a), type_category(b)) {
(Some(cat_a), Some(cat_b)) => match (&a.sty, &b.sty) {
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => def_a == def_b,
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => def_a == def_b,
_ => cat_a == cat_b
},
// infer and error can be equated to all types
if len > 5 {
format!("\nand {} others", len - 4)
} else {
- "".to_owned()
+ String::new()
}
));
}
}
// If this error is due to `!: Trait` not implemented but `(): Trait` is
- // implemented, and fallback has occured, then it could be due to a
+ // implemented, and fallback has occurred, then it could be due to a
// variable that used to fallback to `()` now falling back to `!`. Issue a
// note informing about the change in behaviour.
if trait_predicate.skip_binder().self_ty().is_never()
let found_trait_ty = found_trait_ref.self_ty();
let found_did = match found_trait_ty.sty {
- ty::TyClosure(did, _) |
- ty::TyForeign(did) |
- ty::TyFnDef(did, _) => Some(did),
- ty::TyAdt(def, _) => Some(def.did),
+ ty::Closure(did, _) |
+ ty::Foreign(did) |
+ ty::FnDef(did, _) => Some(did),
+ ty::Adt(def, _) => Some(def.did),
_ => None,
};
let found_span = found_did.and_then(|did| {
}).map(|sp| self.tcx.sess.source_map().def_span(sp)); // the sp could be an fn def
let found = match found_trait_ref.skip_binder().substs.type_at(1).sty {
- ty::TyTuple(ref tys) => tys.iter()
+ ty::Tuple(ref tys) => tys.iter()
.map(|_| ArgKind::empty()).collect::<Vec<_>>(),
_ => vec![ArgKind::empty()],
};
let expected = match expected_trait_ref.skip_binder().substs.type_at(1).sty {
- ty::TyTuple(ref tys) => tys.iter()
+ ty::Tuple(ref tys) => tys.iter()
.map(|t| match t.sty {
- ty::TypeVariants::TyTuple(ref tys) => ArgKind::Tuple(
+ ty::Tuple(ref tys) => ArgKind::Tuple(
Some(span),
tys.iter()
.map(|ty| ("_".to_owned(), ty.sty.to_string()))
let mut trait_type = trait_ref.self_ty();
for refs_remaining in 0..refs_number {
- if let ty::TypeVariants::TyRef(_, t_type, _) = trait_type.sty {
+ if let ty::Ref(_, t_type, _) = trait_type.sty {
trait_type = t_type;
let substs = self.tcx.mk_substs_trait(trait_type, &[]);
remove_refs);
err.span_suggestion_short_with_applicability(
- sp, &format_str, String::from(""), Applicability::MachineApplicable
+ sp, &format_str, String::new(), Applicability::MachineApplicable
);
break;
}
.collect::<Vec<String>>()
.join(", "))
} else {
- "".to_owned()
+ String::new()
},
);
err.span_suggestion_with_applicability(
fn build_fn_sig_string<'a, 'gcx, 'tcx>(tcx: ty::TyCtxt<'a, 'gcx, 'tcx>,
trait_ref: &ty::TraitRef<'tcx>) -> String {
let inputs = trait_ref.substs.type_at(1);
- let sig = if let ty::TyTuple(inputs) = inputs.sty {
+ let sig = if let ty::Tuple(inputs) = inputs.sty {
tcx.mk_fn_sig(
inputs.iter().map(|&x| x),
tcx.mk_infer(ty::TyVar(ty::TyVid { index: 0 })),
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.infcx.tcx }
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
- if let ty::TyParam(ty::ParamTy {name, ..}) = ty.sty {
+ if let ty::Param(ty::ParamTy {name, ..}) = ty.sty {
let infcx = self.infcx;
self.var_map.entry(ty).or_insert_with(||
infcx.next_ty_var(
/// argument. This has no name (`_`) and no source spans..
pub fn from_expected_ty(t: Ty<'_>) -> ArgKind {
match t.sty {
- ty::TyTuple(ref tys) => ArgKind::Tuple(
+ ty::Tuple(ref tys) => ArgKind::Tuple(
None,
tys.iter()
.map(|ty| ("_".to_owned(), ty.sty.to_string()))
.map(|t| selcx.infcx().resolve_type_vars_if_possible(&t))
.filter(|t| t.has_infer_types())
.flat_map(|t| t.walk())
- .filter(|t| match t.sty { ty::TyInfer(_) => true, _ => false })
+ .filter(|t| match t.sty { ty::Infer(_) => true, _ => false })
.collect()
}
use middle::region;
use mir::interpret::ConstEvalErr;
use ty::subst::Substs;
-use ty::{self, AdtKind, Slice, Ty, TyCtxt, GenericParamDefKind, ToPredicate};
+use ty::{self, AdtKind, List, Ty, TyCtxt, GenericParamDefKind, ToPredicate};
use ty::error::{ExpectedFound, TypeError};
use ty::fold::{TypeFolder, TypeFoldable, TypeVisitor};
use infer::{InferCtxt};
CannotProve,
}
-pub type Goals<'tcx> = &'tcx Slice<Goal<'tcx>>;
+pub type Goals<'tcx> = &'tcx List<Goal<'tcx>>;
impl<'tcx> DomainGoal<'tcx> {
pub fn into_goal(self) -> Goal<'tcx> {
}
/// Multiple clauses.
-pub type Clauses<'tcx> = &'tcx Slice<Clause<'tcx>>;
+pub type Clauses<'tcx> = &'tcx List<Clause<'tcx>>;
/// A "program clause" has the form `D :- G1, ..., Gn`. It is saying
/// that the domain goal `D` is true if `G1...Gn` are provable. This
let mut error = false;
ty.maybe_walk(|ty| {
match ty.sty {
- ty::TyParam(ref param_ty) => {
+ ty::Param(ref param_ty) => {
if param_ty.is_self() {
error = true;
}
false // no contained types to walk
}
- ty::TyProjection(ref data) => {
+ ty::Projection(ref data) => {
// This is a projected type `<Foo as SomeTrait>::X`.
// Compute supertraits of current trait lazily.
let ty = ty.super_fold_with(self);
match ty.sty {
- ty::TyAnon(def_id, substs) if !substs.has_escaping_regions() => { // (*)
+ ty::Anon(def_id, substs) if !substs.has_escaping_regions() => { // (*)
// Only normalize `impl Trait` after type-checking, usually in codegen.
match self.param_env.reveal {
Reveal::UserFacing => ty,
}
}
- ty::TyProjection(ref data) if !data.has_escaping_regions() => { // (*)
+ ty::Projection(ref data) if !data.has_escaping_regions() => { // (*)
// (*) This is kind of hacky -- we need to be able to
// handle normalization within binders because
/// return an associated obligation that, when fulfilled, will lead to
/// an error.
///
-/// Note that we used to return `TyError` here, but that was quite
+/// Note that we used to return `Error` here, but that was quite
/// dubious -- the premise was that an error would *eventually* be
/// reported, when the obligation was processed. But in general once
-/// you see a `TyError` you are supposed to be able to assume that an
+/// you see a `Error` you are supposed to be able to assume that an
/// error *has been* reported, so that you can take whatever heuristic
/// paths you want to take. To make things worse, it was possible for
/// cycles to arise, where you basically had a setup like `<MyType<$0>
let tcx = selcx.tcx();
// Check whether the self-type is itself a projection.
let (def_id, substs) = match obligation_trait_ref.self_ty().sty {
- ty::TyProjection(ref data) => {
+ ty::Projection(ref data) => {
(data.trait_ref(tcx).def_id, data.substs)
}
- ty::TyAnon(def_id, substs) => (def_id, substs),
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Anon(def_id, substs) => (def_id, substs),
+ ty::Infer(ty::TyVar(_)) => {
// If the self-type is an inference variable, then it MAY wind up
// being a projected type, so induce an ambiguity.
candidate_set.mark_ambiguous();
debug!("confirm_object_candidate(object_ty={:?})",
object_ty);
let data = match object_ty.sty {
- ty::TyDynamic(ref data, ..) => data,
+ ty::Dynamic(ref data, ..) => data,
_ => {
span_bug!(
obligation.cause.span,
// This means that the impl is missing a definition for the
// associated type. This error will be reported by the type
// checker method `check_impl_items_against_trait`, so here we
- // just return TyError.
+ // just return Error.
debug!("confirm_impl_candidate: no associated type {:?} for {:?}",
assoc_ty.item.ident,
obligation.predicate);
use infer::at::At;
use infer::InferOk;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use std::iter::FromIterator;
use syntax::source_map::Span;
use ty::subst::Kind;
// Errors and ambiuity in dropck occur in two cases:
// - unresolved inference variables at the end of typeck
// - non well-formed types where projections cannot be resolved
- // Either of these should hvae created an error before.
+ // Either of these should have created an error before.
tcx.sess
.delay_span_bug(span, "dtorck encountered internal error");
return InferOk {
// None of these types have a destructor and hence they do not
// require anything in particular to outlive the dtor's
// execution.
- ty::TyInfer(ty::FreshIntTy(_))
- | ty::TyInfer(ty::FreshFloatTy(_))
- | ty::TyBool
- | ty::TyInt(_)
- | ty::TyUint(_)
- | ty::TyFloat(_)
- | ty::TyNever
- | ty::TyFnDef(..)
- | ty::TyFnPtr(_)
- | ty::TyChar
- | ty::TyGeneratorWitness(..)
- | ty::TyRawPtr(_)
- | ty::TyRef(..)
- | ty::TyStr
- | ty::TyForeign(..)
- | ty::TyError => true,
+ ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Bool
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Never
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Char
+ | ty::GeneratorWitness(..)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::Str
+ | ty::Foreign(..)
+ | ty::Error => true,
// [T; N] and [T] have same properties as T.
- ty::TyArray(ty, _) | ty::TySlice(ty) => trivial_dropck_outlives(tcx, ty),
+ ty::Array(ty, _) | ty::Slice(ty) => trivial_dropck_outlives(tcx, ty),
// (T1..Tn) and closures have same properties as T1..Tn --
// check if *any* of those are trivial.
- ty::TyTuple(ref tys) => tys.iter().cloned().all(|t| trivial_dropck_outlives(tcx, t)),
- ty::TyClosure(def_id, ref substs) => substs
+ ty::Tuple(ref tys) => tys.iter().cloned().all(|t| trivial_dropck_outlives(tcx, t)),
+ ty::Closure(def_id, ref substs) => substs
.upvar_tys(def_id, tcx)
.all(|t| trivial_dropck_outlives(tcx, t)),
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
if Some(def.did) == tcx.lang_items().manually_drop() {
// `ManuallyDrop` never has a dtor.
true
}
// The following *might* require a destructor: it would deeper inspection to tell.
- ty::TyDynamic(..)
- | ty::TyProjection(..)
- | ty::TyParam(_)
- | ty::TyAnon(..)
- | ty::TyInfer(_)
- | ty::TyGenerator(..) => false,
+ ty::Dynamic(..)
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Anon(..)
+ | ty::Infer(_)
+ | ty::Generator(..) => false,
}
}
// except according to those terms.
use infer::InferCtxt;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use traits::{EvaluationResult, PredicateObligation, SelectionContext,
TraitQueryMode, OverflowError};
use infer::at::At;
use infer::{InferCtxt, InferOk};
use mir::interpret::{ConstValue, GlobalId};
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use traits::project::Normalized;
use traits::{Obligation, ObligationCause, PredicateObligation, Reveal};
use ty::fold::{TypeFoldable, TypeFolder};
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
let ty = ty.super_fold_with(self);
match ty.sty {
- ty::TyAnon(def_id, substs) if !substs.has_escaping_regions() => {
+ ty::Anon(def_id, substs) if !substs.has_escaping_regions() => {
// (*)
// Only normalize `impl Trait` after type-checking, usually in codegen.
match self.param_env.reveal {
let concrete_ty = generic_ty.subst(self.tcx(), substs);
self.anon_depth += 1;
if concrete_ty == ty {
- // The type in question can only be inferred in terms of itself. This
- // is likely a user code issue, not a compiler issue. Thus, we will
- // induce a cycle error by calling the parent query again on the type.
- //
- // FIXME: Perhaps a better solution would be to have fold_ty()
- // itself be a query. Then, a type fold cycle would be detected
- // and reported more naturally as part of the query system, rather
- // than forcing it here.
- //
- // FIXME: Need a better span than just one pointing to the type def.
- // Should point to a defining use of the type that results in this
- // un-normalizable state.
- if let Some(param_env_lifted) =
- self.tcx().lift_to_global(&self.param_env)
- {
- if let Some(ty_lifted) = self.tcx().lift_to_global(&concrete_ty) {
- let span = self.tcx().def_span(def_id);
- self.tcx()
- .global_tcx()
- .at(span)
- .normalize_ty_after_erasing_regions(
- param_env_lifted.and(ty_lifted),
- );
- self.tcx().sess.abort_if_errors();
- }
- }
- // If a cycle error can't be emitted, indicate a NoSolution error
- // and let the caller handle it.
- self.error = true;
- return concrete_ty;
+ bug!(
+ "infinite recursion generic_ty: {:#?}, substs: {:#?}, \
+ concrete_ty: {:#?}, ty: {:#?}",
+ generic_ty,
+ substs,
+ concrete_ty,
+ ty
+ );
}
let folded_ty = self.fold_ty(concrete_ty);
self.anon_depth -= 1;
}
}
- ty::TyProjection(ref data) if !data.has_escaping_regions() => {
+ ty::Projection(ref data) if !data.has_escaping_regions() => {
// (*)
// (*) This is kind of hacky -- we need to be able to
// handle normalization within binders because
use infer::InferCtxt;
use syntax::ast;
use syntax::source_map::Span;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use traits::{FulfillmentContext, ObligationCause, TraitEngine, TraitEngineExt};
use traits::query::NoSolution;
use ty::{self, Ty, TyCtxt};
use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResult, QueryRegionConstraint,
QueryResult};
use infer::{InferCtxt, InferOk};
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use std::fmt;
use std::rc::Rc;
use traits::query::Fallible;
&IntercrateAmbiguityCause::DownstreamCrate { ref trait_desc, ref self_desc } => {
let self_desc = if let &Some(ref ty) = self_desc {
format!(" for type `{}`", ty)
- } else { "".to_string() };
+ } else { String::new() };
format!("downstream crates may implement trait `{}`{}", trait_desc, self_desc)
}
&IntercrateAmbiguityCause::UpstreamCrateUpdate { ref trait_desc, ref self_desc } => {
let self_desc = if let &Some(ref ty) = self_desc {
format!(" for type `{}`", ty)
- } else { "".to_string() };
+ } else { String::new() };
format!("upstream crates may add new impl of trait `{}`{} \
in future versions",
trait_desc, self_desc)
-> SelectionResult<'tcx, SelectionCandidate<'tcx>>
{
if stack.obligation.predicate.references_error() {
- // If we encounter a `TyError`, we generally prefer the
+ // If we encounter a `Error`, we generally prefer the
// most "optimistic" result in response -- that is, the
// one least likely to report downstream errors. But
// because this routine is shared by coherence and by
// before we go into the whole skolemization thing, just
// quickly check if the self-type is a projection at all.
match obligation.predicate.skip_binder().trait_ref.self_ty().sty {
- ty::TyProjection(_) | ty::TyAnon(..) => {}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Projection(_) | ty::Anon(..) => {}
+ ty::Infer(ty::TyVar(_)) => {
span_bug!(obligation.cause.span,
"Self=_ should have been handled by assemble_candidates");
}
skol_map);
let (def_id, substs) = match skol_trait_predicate.trait_ref.self_ty().sty {
- ty::TyProjection(ref data) =>
+ ty::Projection(ref data) =>
(data.trait_ref(self.tcx()).def_id, data.substs),
- ty::TyAnon(def_id, substs) => (def_id, substs),
+ ty::Anon(def_id, substs) => (def_id, substs),
_ => {
span_bug!(
obligation.cause.span,
// type/region parameters
let self_ty = *obligation.self_ty().skip_binder();
match self_ty.sty {
- ty::TyGenerator(..) => {
+ ty::Generator(..) => {
debug!("assemble_generator_candidates: self_ty={:?} obligation={:?}",
self_ty,
obligation);
candidates.vec.push(GeneratorCandidate);
Ok(())
}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
debug!("assemble_generator_candidates: ambiguous self-type");
candidates.ambiguous = true;
return Ok(());
// touch bound regions, they just capture the in-scope
// type/region parameters
match obligation.self_ty().skip_binder().sty {
- ty::TyClosure(closure_def_id, closure_substs) => {
+ ty::Closure(closure_def_id, closure_substs) => {
debug!("assemble_unboxed_candidates: kind={:?} obligation={:?}",
kind, obligation);
match self.infcx.closure_kind(closure_def_id, closure_substs) {
};
Ok(())
}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
debug!("assemble_unboxed_closure_candidates: ambiguous self-type");
candidates.ambiguous = true;
return Ok(());
// ok to skip binder because what we are inspecting doesn't involve bound regions
let self_ty = *obligation.self_ty().skip_binder();
match self_ty.sty {
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
debug!("assemble_fn_pointer_candidates: ambiguous self-type");
candidates.ambiguous = true; // could wind up being a fn() type
}
// provide an impl, but only for suitable `fn` pointers
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
if let ty::FnSig {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
if self.tcx().trait_is_auto(def_id) {
match self_ty.sty {
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
// For object types, we don't know what the closed
// over types are. This means we conservatively
// say nothing; a candidate may be added by
// `assemble_candidates_from_object_ty`.
}
- ty::TyForeign(..) => {
+ ty::Foreign(..) => {
// Since the contents of foreign types is unknown,
// we don't add any `..` impl. Default traits could
// still be provided by a manual implementation for
// this trait and type.
}
- ty::TyParam(..) |
- ty::TyProjection(..) => {
+ ty::Param(..) |
+ ty::Projection(..) => {
// In these cases, we don't know what the actual
// type is. Therefore, we cannot break it down
// into its constituent types. So we don't
// for an example of a test case that exercises
// this path.
}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
// the auto impl might apply, we don't know
candidates.ambiguous = true;
}
// any LBR.
let self_ty = this.tcx().erase_late_bound_regions(&obligation.self_ty());
let poly_trait_ref = match self_ty.sty {
- ty::TyDynamic(ref data, ..) => {
+ ty::Dynamic(ref data, ..) => {
if data.auto_traits().any(|did| did == obligation.predicate.def_id()) {
debug!("assemble_candidates_from_object_ty: matched builtin bound, \
pushing candidate");
None => return,
}
}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
debug!("assemble_candidates_from_object_ty: ambiguous");
candidates.ambiguous = true; // could wind up being an object type
return;
let may_apply = match (&source.sty, &target.sty) {
// Trait+Kx+'a -> Trait+Ky+'b (upcasts).
- (&ty::TyDynamic(ref data_a, ..), &ty::TyDynamic(ref data_b, ..)) => {
+ (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
// Upcasts permit two things:
//
// 1. Dropping builtin bounds, e.g. `Foo+Send` to `Foo`
}
// T -> Trait.
- (_, &ty::TyDynamic(..)) => true,
+ (_, &ty::Dynamic(..)) => true,
// Ambiguous handling is below T -> Trait, because inference
// variables can still implement Unsize<Trait> and nested
// obligations will have the final say (likely deferred).
- (&ty::TyInfer(ty::TyVar(_)), _) |
- (_, &ty::TyInfer(ty::TyVar(_))) => {
+ (&ty::Infer(ty::TyVar(_)), _) |
+ (_, &ty::Infer(ty::TyVar(_))) => {
debug!("assemble_candidates_for_unsizing: ambiguous");
candidates.ambiguous = true;
false
}
// [T; n] -> [T].
- (&ty::TyArray(..), &ty::TySlice(_)) => true,
+ (&ty::Array(..), &ty::Slice(_)) => true,
// Struct<T> -> Struct<U>.
- (&ty::TyAdt(def_id_a, _), &ty::TyAdt(def_id_b, _)) if def_id_a.is_struct() => {
+ (&ty::Adt(def_id_a, _), &ty::Adt(def_id_b, _)) if def_id_a.is_struct() => {
def_id_a == def_id_b
}
// (.., T) -> (.., U).
- (&ty::TyTuple(tys_a), &ty::TyTuple(tys_b)) => {
+ (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => {
tys_a.len() == tys_b.len()
}
obligation.predicate.skip_binder().self_ty());
match self_ty.sty {
- ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) |
- ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) |
- ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyRawPtr(..) |
- ty::TyChar | ty::TyRef(..) | ty::TyGenerator(..) |
- ty::TyGeneratorWitness(..) | ty::TyArray(..) | ty::TyClosure(..) |
- ty::TyNever | ty::TyError => {
+ ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) |
+ ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) |
+ ty::FnDef(..) | ty::FnPtr(_) | ty::RawPtr(..) |
+ ty::Char | ty::Ref(..) | ty::Generator(..) |
+ ty::GeneratorWitness(..) | ty::Array(..) | ty::Closure(..) |
+ ty::Never | ty::Error => {
// safe for everything
Where(ty::Binder::dummy(Vec::new()))
}
- ty::TyStr | ty::TySlice(_) | ty::TyDynamic(..) | ty::TyForeign(..) => None,
+ ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None,
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
Where(ty::Binder::bind(tys.last().into_iter().cloned().collect()))
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
let sized_crit = def.sized_constraint(self.tcx());
// (*) binder moved here
Where(ty::Binder::bind(
))
}
- ty::TyProjection(_) | ty::TyParam(_) | ty::TyAnon(..) => None,
- ty::TyInfer(ty::TyVar(_)) => Ambiguous,
+ ty::Projection(_) | ty::Param(_) | ty::Anon(..) => None,
+ ty::Infer(ty::TyVar(_)) => Ambiguous,
- ty::TyInfer(ty::CanonicalTy(_)) |
- ty::TyInfer(ty::FreshTy(_)) |
- ty::TyInfer(ty::FreshIntTy(_)) |
- ty::TyInfer(ty::FreshFloatTy(_)) => {
+ ty::Infer(ty::CanonicalTy(_)) |
+ ty::Infer(ty::FreshTy(_)) |
+ ty::Infer(ty::FreshIntTy(_)) |
+ ty::Infer(ty::FreshFloatTy(_)) => {
bug!("asked to assemble builtin bounds of unexpected type: {:?}",
self_ty);
}
use self::BuiltinImplConditions::{Ambiguous, None, Where};
match self_ty.sty {
- ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) |
- ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyError => {
+ ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) |
+ ty::FnDef(..) | ty::FnPtr(_) | ty::Error => {
Where(ty::Binder::dummy(Vec::new()))
}
- ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) |
- ty::TyChar | ty::TyRawPtr(..) | ty::TyNever |
- ty::TyRef(_, _, hir::MutImmutable) => {
+ ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) |
+ ty::Char | ty::RawPtr(..) | ty::Never |
+ ty::Ref(_, _, hir::MutImmutable) => {
// Implementations provided in libcore
None
}
- ty::TyDynamic(..) | ty::TyStr | ty::TySlice(..) |
- ty::TyGenerator(..) | ty::TyGeneratorWitness(..) | ty::TyForeign(..) |
- ty::TyRef(_, _, hir::MutMutable) => {
+ ty::Dynamic(..) | ty::Str | ty::Slice(..) |
+ ty::Generator(..) | ty::GeneratorWitness(..) | ty::Foreign(..) |
+ ty::Ref(_, _, hir::MutMutable) => {
None
}
- ty::TyArray(element_ty, _) => {
+ ty::Array(element_ty, _) => {
// (*) binder moved here
Where(ty::Binder::bind(vec![element_ty]))
}
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
// (*) binder moved here
Where(ty::Binder::bind(tys.to_vec()))
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let trait_id = obligation.predicate.def_id();
let is_copy_trait = Some(trait_id) == self.tcx().lang_items().copy_trait();
let is_clone_trait = Some(trait_id) == self.tcx().lang_items().clone_trait();
}
}
- ty::TyAdt(..) | ty::TyProjection(..) | ty::TyParam(..) | ty::TyAnon(..) => {
+ ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Anon(..) => {
// Fallback to whatever user-defined impls exist in this case.
None
}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
// Unbound type variable. Might or might not have
// applicable impls and so forth, depending on what
// those type variables wind up being bound to.
Ambiguous
}
- ty::TyInfer(ty::CanonicalTy(_)) |
- ty::TyInfer(ty::FreshTy(_)) |
- ty::TyInfer(ty::FreshIntTy(_)) |
- ty::TyInfer(ty::FreshFloatTy(_)) => {
+ ty::Infer(ty::CanonicalTy(_)) |
+ ty::Infer(ty::FreshTy(_)) |
+ ty::Infer(ty::FreshIntTy(_)) |
+ ty::Infer(ty::FreshFloatTy(_)) => {
bug!("asked to assemble builtin bounds of unexpected type: {:?}",
self_ty);
}
/// ```
fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> {
match t.sty {
- ty::TyUint(_) |
- ty::TyInt(_) |
- ty::TyBool |
- ty::TyFloat(_) |
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) |
- ty::TyStr |
- ty::TyError |
- ty::TyInfer(ty::IntVar(_)) |
- ty::TyInfer(ty::FloatVar(_)) |
- ty::TyNever |
- ty::TyChar => {
+ ty::Uint(_) |
+ ty::Int(_) |
+ ty::Bool |
+ ty::Float(_) |
+ ty::FnDef(..) |
+ ty::FnPtr(_) |
+ ty::Str |
+ ty::Error |
+ ty::Infer(ty::IntVar(_)) |
+ ty::Infer(ty::FloatVar(_)) |
+ ty::Never |
+ ty::Char => {
Vec::new()
}
- ty::TyDynamic(..) |
- ty::TyParam(..) |
- ty::TyForeign(..) |
- ty::TyProjection(..) |
- ty::TyInfer(ty::CanonicalTy(_)) |
- ty::TyInfer(ty::TyVar(_)) |
- ty::TyInfer(ty::FreshTy(_)) |
- ty::TyInfer(ty::FreshIntTy(_)) |
- ty::TyInfer(ty::FreshFloatTy(_)) => {
+ ty::Dynamic(..) |
+ ty::Param(..) |
+ ty::Foreign(..) |
+ ty::Projection(..) |
+ ty::Infer(ty::CanonicalTy(_)) |
+ ty::Infer(ty::TyVar(_)) |
+ ty::Infer(ty::FreshTy(_)) |
+ ty::Infer(ty::FreshIntTy(_)) |
+ ty::Infer(ty::FreshFloatTy(_)) => {
bug!("asked to assemble constituent types of unexpected type: {:?}",
t);
}
- ty::TyRawPtr(ty::TypeAndMut { ty: element_ty, ..}) |
- ty::TyRef(_, element_ty, _) => {
+ ty::RawPtr(ty::TypeAndMut { ty: element_ty, ..}) |
+ ty::Ref(_, element_ty, _) => {
vec![element_ty]
},
- ty::TyArray(element_ty, _) | ty::TySlice(element_ty) => {
+ ty::Array(element_ty, _) | ty::Slice(element_ty) => {
vec![element_ty]
}
- ty::TyTuple(ref tys) => {
+ ty::Tuple(ref tys) => {
// (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
tys.to_vec()
}
- ty::TyClosure(def_id, ref substs) => {
+ ty::Closure(def_id, ref substs) => {
substs.upvar_tys(def_id, self.tcx()).collect()
}
- ty::TyGenerator(def_id, ref substs, _) => {
+ ty::Generator(def_id, ref substs, _) => {
let witness = substs.witness(def_id, self.tcx());
substs.upvar_tys(def_id, self.tcx()).chain(iter::once(witness)).collect()
}
- ty::TyGeneratorWitness(types) => {
+ ty::GeneratorWitness(types) => {
// This is sound because no regions in the witness can refer to
// the binder outside the witness. So we'll effectivly reuse
// the implicit binder around the witness.
}
// for `PhantomData<T>`, we pass `T`
- ty::TyAdt(def, substs) if def.is_phantom_data() => {
+ ty::Adt(def, substs) if def.is_phantom_data() => {
substs.types().collect()
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
def.all_fields()
.map(|f| f.ty(self.tcx(), substs))
.collect()
}
- ty::TyAnon(def_id, substs) => {
+ ty::Anon(def_id, substs) => {
// We can resolve the `impl Trait` to its concrete type,
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
// case that results. -nmatsakis
let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
let poly_trait_ref = match self_ty.sty {
- ty::TyDynamic(ref data, ..) => {
+ ty::Dynamic(ref data, ..) => {
data.principal().unwrap().with_self_ty(self.tcx(), self_ty)
}
_ => {
// type/region parameters
let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
let (generator_def_id, substs) = match self_ty.sty {
- ty::TyGenerator(id, substs, _) => (id, substs),
+ ty::Generator(id, substs, _) => (id, substs),
_ => bug!("closure candidate for non-closure {:?}", obligation)
};
// type/region parameters
let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
let (closure_def_id, substs) = match self_ty.sty {
- ty::TyClosure(id, substs) => (id, substs),
+ ty::Closure(id, substs) => (id, substs),
_ => bug!("closure candidate for non-closure {:?}", obligation)
};
let mut nested = vec![];
match (&source.sty, &target.sty) {
// Trait+Kx+'a -> Trait+Ky+'b (upcasts).
- (&ty::TyDynamic(ref data_a, r_a), &ty::TyDynamic(ref data_b, r_b)) => {
+ (&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => {
// See assemble_candidates_for_unsizing for more info.
let existential_predicates = data_a.map_bound(|data_a| {
let principal = data_a.principal();
}
// T -> Trait.
- (_, &ty::TyDynamic(ref data, r)) => {
+ (_, &ty::Dynamic(ref data, r)) => {
let mut object_dids =
data.auto_traits().chain(data.principal().map(|p| p.def_id()));
if let Some(did) = object_dids.find(|did| {
}
// [T; n] -> [T].
- (&ty::TyArray(a, _), &ty::TySlice(b)) => {
+ (&ty::Array(a, _), &ty::Slice(b)) => {
let InferOk { obligations, .. } =
self.infcx.at(&obligation.cause, obligation.param_env)
.eq(b, a)
}
// Struct<T> -> Struct<U>.
- (&ty::TyAdt(def, substs_a), &ty::TyAdt(_, substs_b)) => {
+ (&ty::Adt(def, substs_a), &ty::Adt(_, substs_b)) => {
let fields = def
.all_fields()
.map(|f| tcx.type_of(f.did))
let mut ty_params = BitArray::new(substs_a.types().count());
let mut found = false;
for ty in field.walk() {
- if let ty::TyParam(p) = ty.sty {
+ if let ty::Param(p) = ty.sty {
ty_params.insert(p.idx as usize);
found = true;
}
}
// Replace type parameters used in unsizing with
- // TyError and ensure they do not affect any other fields.
+ // Error and ensure they do not affect any other fields.
// This could be checked after type collection for any struct
// with a potentially unsized trailing field.
let params = substs_a.iter().enumerate().map(|(i, &k)| {
}
// (.., T) -> (.., U).
- (&ty::TyTuple(tys_a), &ty::TyTuple(tys_b)) => {
+ (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => {
assert_eq!(tys_a.len(), tys_b.len());
// The last field of the tuple has to exist.
.unwrap()
.subst(infcx.tcx, &source_substs);
- // translate the Self and TyParam parts of the substitution, since those
+ // translate the Self and Param parts of the substitution, since those
// vary across impls
let target_substs = match target_node {
specialization_graph::Node::Impl(target_impl) => {
}
}
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice<traits::Goal<'tcx>> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<traits::Goal<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let v = self.iter()
.map(|t| t.fold_with(folder))
}
}
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice<traits::Clause<'tcx>> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<traits::Clause<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let v = self.iter()
.map(|t| t.fold_with(folder))
if a == b { return Ok(a); }
match (&a.sty, &b.sty) {
- (_, &ty::TyInfer(ty::FreshTy(_))) |
- (_, &ty::TyInfer(ty::FreshIntTy(_))) |
- (_, &ty::TyInfer(ty::FreshFloatTy(_))) => {
+ (_, &ty::Infer(ty::FreshTy(_))) |
+ (_, &ty::Infer(ty::FreshIntTy(_))) |
+ (_, &ty::Infer(ty::FreshFloatTy(_))) => {
Ok(a)
}
- (&ty::TyInfer(_), _) |
- (_, &ty::TyInfer(_)) => {
+ (&ty::Infer(_), _) |
+ (_, &ty::Infer(_)) => {
Err(TypeError::Sorts(relate::expected_found(self, &a, &b)))
}
- (&ty::TyError, _) | (_, &ty::TyError) => {
+ (&ty::Error, _) | (_, &ty::Error) => {
Ok(self.tcx().types.err)
}
impl<'tcx> CastTy<'tcx> {
pub fn from_ty(t: Ty<'tcx>) -> Option<CastTy<'tcx>> {
match t.sty {
- ty::TyBool => Some(CastTy::Int(IntTy::Bool)),
- ty::TyChar => Some(CastTy::Int(IntTy::Char)),
- ty::TyInt(_) => Some(CastTy::Int(IntTy::I)),
- ty::TyInfer(ty::InferTy::IntVar(_)) => Some(CastTy::Int(IntTy::I)),
- ty::TyInfer(ty::InferTy::FloatVar(_)) => Some(CastTy::Float),
- ty::TyUint(u) => Some(CastTy::Int(IntTy::U(u))),
- ty::TyFloat(_) => Some(CastTy::Float),
- ty::TyAdt(d,_) if d.is_enum() && d.is_payloadfree() =>
+ ty::Bool => Some(CastTy::Int(IntTy::Bool)),
+ ty::Char => Some(CastTy::Int(IntTy::Char)),
+ ty::Int(_) => Some(CastTy::Int(IntTy::I)),
+ ty::Infer(ty::InferTy::IntVar(_)) => Some(CastTy::Int(IntTy::I)),
+ ty::Infer(ty::InferTy::FloatVar(_)) => Some(CastTy::Float),
+ ty::Uint(u) => Some(CastTy::Int(IntTy::U(u))),
+ ty::Float(_) => Some(CastTy::Float),
+ ty::Adt(d,_) if d.is_enum() && d.is_payloadfree() =>
Some(CastTy::Int(IntTy::CEnum)),
- ty::TyRawPtr(mt) => Some(CastTy::Ptr(mt)),
- ty::TyRef(_, ty, mutbl) => Some(CastTy::RPtr(ty::TypeAndMut { ty, mutbl })),
- ty::TyFnPtr(..) => Some(CastTy::FnPtr),
+ ty::RawPtr(mt) => Some(CastTy::Ptr(mt)),
+ ty::Ref(_, ty, mutbl) => Some(CastTy::RPtr(ty::TypeAndMut { ty, mutbl })),
+ ty::FnPtr(..) => Some(CastTy::FnPtr),
_ => None,
}
}
}
impl<'tcx> EncodableWithShorthand for Ty<'tcx> {
- type Variant = ty::TypeVariants<'tcx>;
+ type Variant = ty::TyKind<'tcx>;
fn variant(&self) -> &Self::Variant {
&self.sty
}
})
} else {
let tcx = decoder.tcx();
- Ok(tcx.mk_ty(ty::TypeVariants::decode(decoder)?))
+ Ok(tcx.mk_ty(ty::TyKind::decode(decoder)?))
}
}
#[inline]
pub fn decode_ty_slice<'a, 'tcx, D>(decoder: &mut D)
- -> Result<&'tcx ty::Slice<Ty<'tcx>>, D::Error>
+ -> Result<&'tcx ty::List<Ty<'tcx>>, D::Error>
where D: TyDecoder<'a, 'tcx>,
'tcx: 'a,
{
#[inline]
pub fn decode_existential_predicate_slice<'a, 'tcx, D>(decoder: &mut D)
- -> Result<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>, D::Error>
+ -> Result<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>, D::Error>
where D: TyDecoder<'a, 'tcx>,
'tcx: 'a,
{
}
}
- impl<$($typaram),*> SpecializedDecoder<&'tcx ty::Slice<ty::Ty<'tcx>>>
+ impl<$($typaram),*> SpecializedDecoder<&'tcx ty::List<ty::Ty<'tcx>>>
for $DecoderName<$($typaram),*> {
fn specialized_decode(&mut self)
- -> Result<&'tcx ty::Slice<ty::Ty<'tcx>>, Self::Error> {
+ -> Result<&'tcx ty::List<ty::Ty<'tcx>>, Self::Error> {
decode_ty_slice(self)
}
}
}
}
- impl<$($typaram),*> SpecializedDecoder<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>
+ impl<$($typaram),*> SpecializedDecoder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>
for $DecoderName<$($typaram),*> {
fn specialized_decode(&mut self)
- -> Result<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>, Self::Error> {
+ -> Result<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>, Self::Error> {
decode_existential_predicate_slice(self)
}
}
use ich::{StableHashingContext, NodeIdHashingMode};
use infer::canonical::{CanonicalVarInfo, CanonicalVarInfos};
use infer::outlives::free_region_map::FreeRegionMap;
-use middle::cstore::{CrateStoreDyn, LinkMeta};
+use middle::cstore::CrateStoreDyn;
use middle::cstore::EncodedMetadata;
use middle::lang_items;
use middle::resolve_lifetime::{self, ObjectLifetimeDefault};
use traits;
use traits::{Clause, Clauses, Goal, Goals};
use ty::{self, Ty, TypeAndMut};
-use ty::{TyS, TypeVariants, Slice};
+use ty::{TyS, TyKind, List};
use ty::{AdtKind, AdtDef, ClosureSubsts, GeneratorSubsts, Region, Const};
use ty::{PolyFnSig, InferTy, ParamTy, ProjectionTy, ExistentialPredicate, Predicate};
use ty::RegionKind;
use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid};
-use ty::TypeVariants::*;
+use ty::TyKind::*;
use ty::GenericParamDefKind;
use ty::layout::{LayoutDetails, TargetDataLayout};
use ty::query;
/// Specifically use a speedy hash algorithm for these hash sets,
/// they're accessed quite often.
type_: InternedSet<'tcx, TyS<'tcx>>,
- type_list: InternedSet<'tcx, Slice<Ty<'tcx>>>,
+ type_list: InternedSet<'tcx, List<Ty<'tcx>>>,
substs: InternedSet<'tcx, Substs<'tcx>>,
- canonical_var_infos: InternedSet<'tcx, Slice<CanonicalVarInfo>>,
+ canonical_var_infos: InternedSet<'tcx, List<CanonicalVarInfo>>,
region: InternedSet<'tcx, RegionKind>,
- existential_predicates: InternedSet<'tcx, Slice<ExistentialPredicate<'tcx>>>,
- predicates: InternedSet<'tcx, Slice<Predicate<'tcx>>>,
+ existential_predicates: InternedSet<'tcx, List<ExistentialPredicate<'tcx>>>,
+ predicates: InternedSet<'tcx, List<Predicate<'tcx>>>,
const_: InternedSet<'tcx, Const<'tcx>>,
- clauses: InternedSet<'tcx, Slice<Clause<'tcx>>>,
- goals: InternedSet<'tcx, Slice<Goal<'tcx>>>,
+ clauses: InternedSet<'tcx, List<Clause<'tcx>>>,
+ goals: InternedSet<'tcx, List<Goal<'tcx>>>,
}
impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> {
fn intern_ty(
local: &CtxtInterners<'tcx>,
global: &CtxtInterners<'gcx>,
- st: TypeVariants<'tcx>
+ st: TyKind<'tcx>
) -> Ty<'tcx> {
let flags = super::flags::FlagComputation::for_sty(&st);
fn new(interners: &CtxtInterners<'tcx>) -> CommonTypes<'tcx> {
// Ensure our type representation does not grow
#[cfg(target_pointer_width = "64")]
- assert!(mem::size_of::<ty::TypeVariants>() <= 24);
+ assert!(mem::size_of::<ty::TyKind>() <= 24);
#[cfg(target_pointer_width = "64")]
assert!(mem::size_of::<ty::TyS>() <= 32);
&*r
};
CommonTypes {
- bool: mk(TyBool),
- char: mk(TyChar),
- never: mk(TyNever),
- err: mk(TyError),
- isize: mk(TyInt(ast::IntTy::Isize)),
- i8: mk(TyInt(ast::IntTy::I8)),
- i16: mk(TyInt(ast::IntTy::I16)),
- i32: mk(TyInt(ast::IntTy::I32)),
- i64: mk(TyInt(ast::IntTy::I64)),
- i128: mk(TyInt(ast::IntTy::I128)),
- usize: mk(TyUint(ast::UintTy::Usize)),
- u8: mk(TyUint(ast::UintTy::U8)),
- u16: mk(TyUint(ast::UintTy::U16)),
- u32: mk(TyUint(ast::UintTy::U32)),
- u64: mk(TyUint(ast::UintTy::U64)),
- u128: mk(TyUint(ast::UintTy::U128)),
- f32: mk(TyFloat(ast::FloatTy::F32)),
- f64: mk(TyFloat(ast::FloatTy::F64)),
+ bool: mk(Bool),
+ char: mk(Char),
+ never: mk(Never),
+ err: mk(Error),
+ isize: mk(Int(ast::IntTy::Isize)),
+ i8: mk(Int(ast::IntTy::I8)),
+ i16: mk(Int(ast::IntTy::I16)),
+ i32: mk(Int(ast::IntTy::I32)),
+ i64: mk(Int(ast::IntTy::I64)),
+ i128: mk(Int(ast::IntTy::I128)),
+ usize: mk(Uint(ast::UintTy::Usize)),
+ u8: mk(Uint(ast::UintTy::U8)),
+ u16: mk(Uint(ast::UintTy::U16)),
+ u32: mk(Uint(ast::UintTy::U32)),
+ u64: mk(Uint(ast::UintTy::U64)),
+ u128: mk(Uint(ast::UintTy::U128)),
+ f32: mk(Float(ast::FloatTy::F32)),
+ f64: mk(Float(ast::FloatTy::F64)),
re_empty: mk_region(RegionKind::ReEmpty),
re_static: mk_region(RegionKind::ReStatic),
pub(crate) queries: query::Queries<'tcx>,
- // Records the free variables refrenced by every closure
+ // Records the free variables referenced by every closure
// expression. Do not track deps for this, just recompute it from
// scratch every time.
freevars: FxHashMap<DefId, Lrc<Vec<hir::Freevar>>>,
}
impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
- pub fn encode_metadata(self, link_meta: &LinkMeta)
+ pub fn encode_metadata(self)
-> EncodedMetadata
{
- self.cstore.encode_metadata(self, link_meta)
+ self.cstore.encode_metadata(self)
}
}
/// None is returned if the value or one of the components is not part
/// of the provided context.
/// For Ty, None can be returned if either the type interner doesn't
-/// contain the TypeVariants key or if the address of the interned
+/// contain the TyKind key or if the address of the interned
/// pointer differs. The latter case is possible if a primitive type,
/// e.g. `()` or `u8`, was interned in a different context.
pub trait Lift<'tcx>: fmt::Debug {
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<Goal<'a>> {
- type Lifted = &'tcx Slice<Goal<'tcx>>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<Goal<'a>> {
+ type Lifted = &'tcx List<Goal<'tcx>>;
fn lift_to_tcx<'b, 'gcx>(
&self,
tcx: TyCtxt<'b, 'gcx, 'tcx>,
- ) -> Option<&'tcx Slice<Goal<'tcx>>> {
+ ) -> Option<&'tcx List<Goal<'tcx>>> {
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<Clause<'a>> {
- type Lifted = &'tcx Slice<Clause<'tcx>>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<Clause<'a>> {
+ type Lifted = &'tcx List<Clause<'tcx>>;
fn lift_to_tcx<'b, 'gcx>(
&self,
tcx: TyCtxt<'b, 'gcx, 'tcx>,
- ) -> Option<&'tcx Slice<Clause<'tcx>>> {
+ ) -> Option<&'tcx List<Clause<'tcx>>> {
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
type Lifted = &'tcx Substs<'tcx>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Substs<'tcx>> {
if self.len() == 0 {
- return Some(Slice::empty());
+ return Some(List::empty());
}
if tcx.interners.arena.in_arena(&self[..] as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<Ty<'a>> {
- type Lifted = &'tcx Slice<Ty<'tcx>>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<Ty<'a>> {
+ type Lifted = &'tcx List<Ty<'tcx>>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>)
- -> Option<&'tcx Slice<Ty<'tcx>>> {
+ -> Option<&'tcx List<Ty<'tcx>>> {
if self.len() == 0 {
- return Some(Slice::empty());
+ return Some(List::empty());
}
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<ExistentialPredicate<'a>> {
- type Lifted = &'tcx Slice<ExistentialPredicate<'tcx>>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<ExistentialPredicate<'a>> {
+ type Lifted = &'tcx List<ExistentialPredicate<'tcx>>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>)
- -> Option<&'tcx Slice<ExistentialPredicate<'tcx>>> {
+ -> Option<&'tcx List<ExistentialPredicate<'tcx>>> {
if self.is_empty() {
- return Some(Slice::empty());
+ return Some(List::empty());
}
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<Predicate<'a>> {
- type Lifted = &'tcx Slice<Predicate<'tcx>>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<Predicate<'a>> {
+ type Lifted = &'tcx List<Predicate<'tcx>>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>)
- -> Option<&'tcx Slice<Predicate<'tcx>>> {
+ -> Option<&'tcx List<Predicate<'tcx>>> {
if self.is_empty() {
- return Some(Slice::empty());
+ return Some(List::empty());
}
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<CanonicalVarInfo> {
- type Lifted = &'tcx Slice<CanonicalVarInfo>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<CanonicalVarInfo> {
+ type Lifted = &'tcx List<CanonicalVarInfo>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
if self.len() == 0 {
- return Some(Slice::empty());
+ return Some(List::empty());
}
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
for &Interned(t) in tcx.interners.type_.borrow().iter() {
let variant = match t.sty {
- ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) |
- ty::TyFloat(..) | ty::TyStr | ty::TyNever => continue,
- ty::TyError => /* unimportant */ continue,
+ ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
+ ty::Float(..) | ty::Str | ty::Never => continue,
+ ty::Error => /* unimportant */ continue,
$(ty::$variant(..) => &mut $variant,)*
};
let region = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER);
pub fn print_debug_stats(self) {
sty_debug_print!(
self,
- TyAdt, TyArray, TySlice, TyRawPtr, TyRef, TyFnDef, TyFnPtr,
- TyGenerator, TyGeneratorWitness, TyDynamic, TyClosure, TyTuple,
- TyParam, TyInfer, TyProjection, TyAnon, TyForeign);
+ Adt, Array, Slice, RawPtr, Ref, FnDef, FnPtr,
+ Generator, GeneratorWitness, Dynamic, Closure, Tuple,
+ Param, Infer, Projection, Anon, Foreign);
println!("Substs interner: #{}", self.interners.substs.borrow().len());
println!("Region interner: #{}", self.interners.region.borrow().len());
}
}
-impl<'tcx: 'lcx, 'lcx> Borrow<TypeVariants<'lcx>> for Interned<'tcx, TyS<'tcx>> {
- fn borrow<'a>(&'a self) -> &'a TypeVariants<'lcx> {
+impl<'tcx: 'lcx, 'lcx> Borrow<TyKind<'lcx>> for Interned<'tcx, TyS<'tcx>> {
+ fn borrow<'a>(&'a self) -> &'a TyKind<'lcx> {
&self.0.sty
}
}
-// NB: An Interned<Slice<T>> compares and hashes as its elements.
-impl<'tcx, T: PartialEq> PartialEq for Interned<'tcx, Slice<T>> {
- fn eq(&self, other: &Interned<'tcx, Slice<T>>) -> bool {
+// NB: An Interned<List<T>> compares and hashes as its elements.
+impl<'tcx, T: PartialEq> PartialEq for Interned<'tcx, List<T>> {
+ fn eq(&self, other: &Interned<'tcx, List<T>>) -> bool {
self.0[..] == other.0[..]
}
}
-impl<'tcx, T: Eq> Eq for Interned<'tcx, Slice<T>> {}
+impl<'tcx, T: Eq> Eq for Interned<'tcx, List<T>> {}
-impl<'tcx, T: Hash> Hash for Interned<'tcx, Slice<T>> {
+impl<'tcx, T: Hash> Hash for Interned<'tcx, List<T>> {
fn hash<H: Hasher>(&self, s: &mut H) {
self.0[..].hash(s)
}
}
-impl<'tcx: 'lcx, 'lcx> Borrow<[Ty<'lcx>]> for Interned<'tcx, Slice<Ty<'tcx>>> {
+impl<'tcx: 'lcx, 'lcx> Borrow<[Ty<'lcx>]> for Interned<'tcx, List<Ty<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Ty<'lcx>] {
&self.0[..]
}
}
-impl<'tcx: 'lcx, 'lcx> Borrow<[CanonicalVarInfo]> for Interned<'tcx, Slice<CanonicalVarInfo>> {
+impl<'tcx: 'lcx, 'lcx> Borrow<[CanonicalVarInfo]> for Interned<'tcx, List<CanonicalVarInfo>> {
fn borrow<'a>(&'a self) -> &'a [CanonicalVarInfo] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[ExistentialPredicate<'lcx>]>
- for Interned<'tcx, Slice<ExistentialPredicate<'tcx>>> {
+ for Interned<'tcx, List<ExistentialPredicate<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [ExistentialPredicate<'lcx>] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[Predicate<'lcx>]>
- for Interned<'tcx, Slice<Predicate<'tcx>>> {
+ for Interned<'tcx, List<Predicate<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Predicate<'lcx>] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[Clause<'lcx>]>
-for Interned<'tcx, Slice<Clause<'tcx>>> {
+for Interned<'tcx, List<Clause<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Clause<'lcx>] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[Goal<'lcx>]>
-for Interned<'tcx, Slice<Goal<'tcx>>> {
+for Interned<'tcx, List<Goal<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Goal<'lcx>] {
&self.0[..]
}
($($field:ident: $method:ident($ty:ident)),+) => (
$(intern_method!( 'tcx, $field: $method(
&[$ty<'tcx>],
- |a, v| Slice::from_arena(a, v),
+ |a, v| List::from_arena(a, v),
Deref::deref,
- |xs: &[$ty]| xs.iter().any(keep_local)) -> Slice<$ty<'tcx>>);)+
+ |xs: &[$ty]| xs.iter().any(keep_local)) -> List<$ty<'tcx>>);)+
)
}
'tcx,
canonical_var_infos: _intern_canonical_var_infos(
&[CanonicalVarInfo],
- |a, v| Slice::from_arena(a, v),
+ |a, v| List::from_arena(a, v),
Deref::deref,
|_xs: &[CanonicalVarInfo]| -> bool { false }
- ) -> Slice<CanonicalVarInfo>
+ ) -> List<CanonicalVarInfo>
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn coerce_closure_fn_ty(self, sig: PolyFnSig<'tcx>) -> Ty<'tcx> {
let converted_sig = sig.map_bound(|s| {
let params_iter = match s.inputs()[0].sty {
- ty::TyTuple(params) => {
+ ty::Tuple(params) => {
params.into_iter().cloned()
}
_ => bug!(),
self.mk_fn_ptr(converted_sig)
}
- pub fn mk_ty(&self, st: TypeVariants<'tcx>) -> Ty<'tcx> {
+ pub fn mk_ty(&self, st: TyKind<'tcx>) -> Ty<'tcx> {
CtxtInterners::intern_ty(&self.interners, &self.global_interners, st)
}
}
pub fn mk_str(self) -> Ty<'tcx> {
- self.mk_ty(TyStr)
+ self.mk_ty(Str)
}
pub fn mk_static_str(self) -> Ty<'tcx> {
pub fn mk_adt(self, def: &'tcx AdtDef, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
// take a copy of substs so that we own the vectors inside
- self.mk_ty(TyAdt(def, substs))
+ self.mk_ty(Adt(def, substs))
}
pub fn mk_foreign(self, def_id: DefId) -> Ty<'tcx> {
- self.mk_ty(TyForeign(def_id))
+ self.mk_ty(Foreign(def_id))
}
pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> {
}
}
});
- self.mk_ty(TyAdt(adt_def, substs))
+ self.mk_ty(Adt(adt_def, substs))
}
pub fn mk_ptr(self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyRawPtr(tm))
+ self.mk_ty(RawPtr(tm))
}
pub fn mk_ref(self, r: Region<'tcx>, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyRef(r, tm.ty, tm.mutbl))
+ self.mk_ty(Ref(r, tm.ty, tm.mutbl))
}
pub fn mk_mut_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
}
pub fn mk_array(self, ty: Ty<'tcx>, n: u64) -> Ty<'tcx> {
- self.mk_ty(TyArray(ty, ty::Const::from_usize(self, n)))
+ self.mk_ty(Array(ty, ty::Const::from_usize(self, n)))
}
pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TySlice(ty))
+ self.mk_ty(Slice(ty))
}
pub fn intern_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> {
- self.mk_ty(TyTuple(self.intern_type_list(ts)))
+ self.mk_ty(Tuple(self.intern_type_list(ts)))
}
pub fn mk_tup<I: InternAs<[Ty<'tcx>], Ty<'tcx>>>(self, iter: I) -> I::Output {
- iter.intern_with(|ts| self.mk_ty(TyTuple(self.intern_type_list(ts))))
+ iter.intern_with(|ts| self.mk_ty(Tuple(self.intern_type_list(ts))))
}
pub fn mk_nil(self) -> Ty<'tcx> {
}
pub fn mk_bool(self) -> Ty<'tcx> {
- self.mk_ty(TyBool)
+ self.mk_ty(Bool)
}
pub fn mk_fn_def(self, def_id: DefId,
substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyFnDef(def_id, substs))
+ self.mk_ty(FnDef(def_id, substs))
}
pub fn mk_fn_ptr(self, fty: PolyFnSig<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyFnPtr(fty))
+ self.mk_ty(FnPtr(fty))
}
pub fn mk_dynamic(
self,
- obj: ty::Binder<&'tcx Slice<ExistentialPredicate<'tcx>>>,
+ obj: ty::Binder<&'tcx List<ExistentialPredicate<'tcx>>>,
reg: ty::Region<'tcx>
) -> Ty<'tcx> {
- self.mk_ty(TyDynamic(obj, reg))
+ self.mk_ty(Dynamic(obj, reg))
}
pub fn mk_projection(self,
item_def_id: DefId,
substs: &'tcx Substs<'tcx>)
-> Ty<'tcx> {
- self.mk_ty(TyProjection(ProjectionTy {
+ self.mk_ty(Projection(ProjectionTy {
item_def_id,
substs,
}))
pub fn mk_closure(self, closure_id: DefId, closure_substs: ClosureSubsts<'tcx>)
-> Ty<'tcx> {
- self.mk_ty(TyClosure(closure_id, closure_substs))
+ self.mk_ty(Closure(closure_id, closure_substs))
}
pub fn mk_generator(self,
generator_substs: GeneratorSubsts<'tcx>,
movability: hir::GeneratorMovability)
-> Ty<'tcx> {
- self.mk_ty(TyGenerator(id, generator_substs, movability))
+ self.mk_ty(Generator(id, generator_substs, movability))
}
- pub fn mk_generator_witness(self, types: ty::Binder<&'tcx Slice<Ty<'tcx>>>) -> Ty<'tcx> {
- self.mk_ty(TyGeneratorWitness(types))
+ pub fn mk_generator_witness(self, types: ty::Binder<&'tcx List<Ty<'tcx>>>) -> Ty<'tcx> {
+ self.mk_ty(GeneratorWitness(types))
}
pub fn mk_var(self, v: TyVid) -> Ty<'tcx> {
}
pub fn mk_infer(self, it: InferTy) -> Ty<'tcx> {
- self.mk_ty(TyInfer(it))
+ self.mk_ty(Infer(it))
}
pub fn mk_ty_param(self,
index: u32,
name: InternedString) -> Ty<'tcx> {
- self.mk_ty(TyParam(ParamTy { idx: index, name: name }))
+ self.mk_ty(Param(ParamTy { idx: index, name: name }))
}
pub fn mk_self_type(self) -> Ty<'tcx> {
}
pub fn mk_anon(self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyAnon(def_id, substs))
+ self.mk_ty(Anon(def_id, substs))
}
pub fn intern_existential_predicates(self, eps: &[ExistentialPredicate<'tcx>])
- -> &'tcx Slice<ExistentialPredicate<'tcx>> {
+ -> &'tcx List<ExistentialPredicate<'tcx>> {
assert!(!eps.is_empty());
assert!(eps.windows(2).all(|w| w[0].stable_cmp(self, &w[1]) != Ordering::Greater));
self._intern_existential_predicates(eps)
}
pub fn intern_predicates(self, preds: &[Predicate<'tcx>])
- -> &'tcx Slice<Predicate<'tcx>> {
+ -> &'tcx List<Predicate<'tcx>> {
// FIXME consider asking the input slice to be sorted to avoid
// re-interning permutations, in which case that would be asserted
// here.
if preds.len() == 0 {
// The macro-generated method below asserts we don't intern an empty slice.
- Slice::empty()
+ List::empty()
} else {
self._intern_predicates(preds)
}
}
- pub fn intern_type_list(self, ts: &[Ty<'tcx>]) -> &'tcx Slice<Ty<'tcx>> {
+ pub fn intern_type_list(self, ts: &[Ty<'tcx>]) -> &'tcx List<Ty<'tcx>> {
if ts.len() == 0 {
- Slice::empty()
+ List::empty()
} else {
self._intern_type_list(ts)
}
}
- pub fn intern_substs(self, ts: &[Kind<'tcx>]) -> &'tcx Slice<Kind<'tcx>> {
+ pub fn intern_substs(self, ts: &[Kind<'tcx>]) -> &'tcx List<Kind<'tcx>> {
if ts.len() == 0 {
- Slice::empty()
+ List::empty()
} else {
self._intern_substs(ts)
}
pub fn intern_canonical_var_infos(self, ts: &[CanonicalVarInfo]) -> CanonicalVarInfos<'gcx> {
if ts.len() == 0 {
- Slice::empty()
+ List::empty()
} else {
self.global_tcx()._intern_canonical_var_infos(ts)
}
pub fn intern_clauses(self, ts: &[Clause<'tcx>]) -> Clauses<'tcx> {
if ts.len() == 0 {
- Slice::empty()
+ List::empty()
} else {
self._intern_clauses(ts)
}
pub fn intern_goals(self, ts: &[Goal<'tcx>]) -> Goals<'tcx> {
if ts.len() == 0 {
- Slice::empty()
+ List::empty()
} else {
self._intern_goals(ts)
}
}
pub fn mk_existential_predicates<I: InternAs<[ExistentialPredicate<'tcx>],
- &'tcx Slice<ExistentialPredicate<'tcx>>>>(self, iter: I)
+ &'tcx List<ExistentialPredicate<'tcx>>>>(self, iter: I)
-> I::Output {
iter.intern_with(|xs| self.intern_existential_predicates(xs))
}
pub fn mk_predicates<I: InternAs<[Predicate<'tcx>],
- &'tcx Slice<Predicate<'tcx>>>>(self, iter: I)
+ &'tcx List<Predicate<'tcx>>>>(self, iter: I)
-> I::Output {
iter.intern_with(|xs| self.intern_predicates(xs))
}
pub fn mk_type_list<I: InternAs<[Ty<'tcx>],
- &'tcx Slice<Ty<'tcx>>>>(self, iter: I) -> I::Output {
+ &'tcx List<Ty<'tcx>>>>(self, iter: I) -> I::Output {
iter.intern_with(|xs| self.intern_type_list(xs))
}
pub fn mk_substs<I: InternAs<[Kind<'tcx>],
- &'tcx Slice<Kind<'tcx>>>>(self, iter: I) -> I::Output {
+ &'tcx List<Kind<'tcx>>>>(self, iter: I) -> I::Output {
iter.intern_with(|xs| self.intern_substs(xs))
}
CyclicTy(Ty<'tcx>),
ProjectionMismatched(ExpectedFound<DefId>),
ProjectionBoundsLength(ExpectedFound<usize>),
- ExistentialMismatch(ExpectedFound<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>),
+ ExistentialMismatch(ExpectedFound<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>),
OldStyleLUB(Box<TypeError<'tcx>>),
}
impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> {
pub fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> String {
match self.sty {
- ty::TyBool | ty::TyChar | ty::TyInt(_) |
- ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr | ty::TyNever => self.to_string(),
- ty::TyTuple(ref tys) if tys.is_empty() => self.to_string(),
+ ty::Bool | ty::Char | ty::Int(_) |
+ ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => self.to_string(),
+ ty::Tuple(ref tys) if tys.is_empty() => self.to_string(),
- ty::TyAdt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)),
- ty::TyForeign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)),
- ty::TyArray(_, n) => {
+ ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)),
+ ty::Foreign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)),
+ ty::Array(_, n) => {
match n.assert_usize(tcx) {
Some(n) => format!("array of {} elements", n),
None => "array".to_string(),
}
}
- ty::TySlice(_) => "slice".to_string(),
- ty::TyRawPtr(_) => "*-ptr".to_string(),
- ty::TyRef(region, ty, mutbl) => {
+ ty::Slice(_) => "slice".to_string(),
+ ty::RawPtr(_) => "*-ptr".to_string(),
+ ty::Ref(region, ty, mutbl) => {
let tymut = ty::TypeAndMut { ty, mutbl };
let tymut_string = tymut.to_string();
if tymut_string == "_" || //unknown type name,
format!("&{}", tymut_string)
}
}
- ty::TyFnDef(..) => "fn item".to_string(),
- ty::TyFnPtr(_) => "fn pointer".to_string(),
- ty::TyDynamic(ref inner, ..) => {
+ ty::FnDef(..) => "fn item".to_string(),
+ ty::FnPtr(_) => "fn pointer".to_string(),
+ ty::Dynamic(ref inner, ..) => {
inner.principal().map_or_else(|| "trait".to_string(),
|p| format!("trait {}", tcx.item_path_str(p.def_id())))
}
- ty::TyClosure(..) => "closure".to_string(),
- ty::TyGenerator(..) => "generator".to_string(),
- ty::TyGeneratorWitness(..) => "generator witness".to_string(),
- ty::TyTuple(..) => "tuple".to_string(),
- ty::TyInfer(ty::TyVar(_)) => "inferred type".to_string(),
- ty::TyInfer(ty::IntVar(_)) => "integral variable".to_string(),
- ty::TyInfer(ty::FloatVar(_)) => "floating-point variable".to_string(),
- ty::TyInfer(ty::CanonicalTy(_)) |
- ty::TyInfer(ty::FreshTy(_)) => "skolemized type".to_string(),
- ty::TyInfer(ty::FreshIntTy(_)) => "skolemized integral type".to_string(),
- ty::TyInfer(ty::FreshFloatTy(_)) => "skolemized floating-point type".to_string(),
- ty::TyProjection(_) => "associated type".to_string(),
- ty::TyParam(ref p) => {
+ ty::Closure(..) => "closure".to_string(),
+ ty::Generator(..) => "generator".to_string(),
+ ty::GeneratorWitness(..) => "generator witness".to_string(),
+ ty::Tuple(..) => "tuple".to_string(),
+ ty::Infer(ty::TyVar(_)) => "inferred type".to_string(),
+ ty::Infer(ty::IntVar(_)) => "integral variable".to_string(),
+ ty::Infer(ty::FloatVar(_)) => "floating-point variable".to_string(),
+ ty::Infer(ty::CanonicalTy(_)) |
+ ty::Infer(ty::FreshTy(_)) => "skolemized type".to_string(),
+ ty::Infer(ty::FreshIntTy(_)) => "skolemized integral type".to_string(),
+ ty::Infer(ty::FreshFloatTy(_)) => "skolemized floating-point type".to_string(),
+ ty::Projection(_) => "associated type".to_string(),
+ ty::Param(ref p) => {
if p.is_self() {
"Self".to_string()
} else {
"type parameter".to_string()
}
}
- ty::TyAnon(..) => "anonymized type".to_string(),
- ty::TyError => "type error".to_string(),
+ ty::Anon(..) => "anonymized type".to_string(),
+ ty::Error => "type error".to_string(),
}
}
}
db.help("consider boxing your closure and/or using it as a trait object");
}
match (&values.found.sty, &values.expected.sty) { // Issue #53280
- (ty::TyInfer(ty::IntVar(_)), ty::TyFloat(_)) => {
+ (ty::Infer(ty::IntVar(_)), ty::Float(_)) => {
if let Ok(snippet) = self.sess.source_map().span_to_snippet(sp) {
if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') {
db.span_suggestion_with_applicability(
-> Option<SimplifiedType>
{
match ty.sty {
- ty::TyBool => Some(BoolSimplifiedType),
- ty::TyChar => Some(CharSimplifiedType),
- ty::TyInt(int_type) => Some(IntSimplifiedType(int_type)),
- ty::TyUint(uint_type) => Some(UintSimplifiedType(uint_type)),
- ty::TyFloat(float_type) => Some(FloatSimplifiedType(float_type)),
- ty::TyAdt(def, _) => Some(AdtSimplifiedType(def.did)),
- ty::TyStr => Some(StrSimplifiedType),
- ty::TyArray(..) | ty::TySlice(_) => Some(ArraySimplifiedType),
- ty::TyRawPtr(_) => Some(PtrSimplifiedType),
- ty::TyDynamic(ref trait_info, ..) => {
+ ty::Bool => Some(BoolSimplifiedType),
+ ty::Char => Some(CharSimplifiedType),
+ ty::Int(int_type) => Some(IntSimplifiedType(int_type)),
+ ty::Uint(uint_type) => Some(UintSimplifiedType(uint_type)),
+ ty::Float(float_type) => Some(FloatSimplifiedType(float_type)),
+ ty::Adt(def, _) => Some(AdtSimplifiedType(def.did)),
+ ty::Str => Some(StrSimplifiedType),
+ ty::Array(..) | ty::Slice(_) => Some(ArraySimplifiedType),
+ ty::RawPtr(_) => Some(PtrSimplifiedType),
+ ty::Dynamic(ref trait_info, ..) => {
trait_info.principal().map(|p| TraitSimplifiedType(p.def_id()))
}
- ty::TyRef(_, ty, _) => {
+ ty::Ref(_, ty, _) => {
// since we introduce auto-refs during method lookup, we
// just treat &T and T as equivalent from the point of
// view of possibly unifying
simplify_type(tcx, ty, can_simplify_params)
}
- ty::TyFnDef(def_id, _) |
- ty::TyClosure(def_id, _) => {
+ ty::FnDef(def_id, _) |
+ ty::Closure(def_id, _) => {
Some(ClosureSimplifiedType(def_id))
}
- ty::TyGenerator(def_id, _, _) => {
+ ty::Generator(def_id, _, _) => {
Some(GeneratorSimplifiedType(def_id))
}
- ty::TyGeneratorWitness(ref tys) => {
+ ty::GeneratorWitness(ref tys) => {
Some(GeneratorWitnessSimplifiedType(tys.skip_binder().len()))
}
- ty::TyNever => Some(NeverSimplifiedType),
- ty::TyTuple(ref tys) => {
+ ty::Never => Some(NeverSimplifiedType),
+ ty::Tuple(ref tys) => {
Some(TupleSimplifiedType(tys.len()))
}
- ty::TyFnPtr(ref f) => {
+ ty::FnPtr(ref f) => {
Some(FunctionSimplifiedType(f.skip_binder().inputs().len()))
}
- ty::TyProjection(_) | ty::TyParam(_) => {
+ ty::Projection(_) | ty::Param(_) => {
if can_simplify_params {
// In normalized types, projections don't unify with
// anything. when lazy normalization happens, this
None
}
}
- ty::TyAnon(def_id, _) => {
+ ty::Anon(def_id, _) => {
Some(AnonSimplifiedType(def_id))
}
- ty::TyForeign(def_id) => {
+ ty::Foreign(def_id) => {
Some(ForeignSimplifiedType(def_id))
}
- ty::TyInfer(_) | ty::TyError => None,
+ ty::Infer(_) | ty::Error => None,
}
}
}
}
- pub fn for_sty(st: &ty::TypeVariants) -> FlagComputation {
+ pub fn for_sty(st: &ty::TyKind) -> FlagComputation {
let mut result = FlagComputation::new();
result.add_sty(st);
result
}
}
- fn add_sty(&mut self, st: &ty::TypeVariants) {
+ fn add_sty(&mut self, st: &ty::TyKind) {
match st {
- &ty::TyBool |
- &ty::TyChar |
- &ty::TyInt(_) |
- &ty::TyFloat(_) |
- &ty::TyUint(_) |
- &ty::TyNever |
- &ty::TyStr |
- &ty::TyForeign(..) => {
+ &ty::Bool |
+ &ty::Char |
+ &ty::Int(_) |
+ &ty::Float(_) |
+ &ty::Uint(_) |
+ &ty::Never |
+ &ty::Str |
+ &ty::Foreign(..) => {
}
- // You might think that we could just return TyError for
- // any type containing TyError as a component, and get
+ // You might think that we could just return Error for
+ // any type containing Error as a component, and get
// rid of the TypeFlags::HAS_TY_ERR flag -- likewise for ty_bot (with
// the exception of function types that return bot).
// But doing so caused sporadic memory corruption, and
// neither I (tjc) nor nmatsakis could figure out why,
// so we're doing it this way.
- &ty::TyError => {
+ &ty::Error => {
self.add_flags(TypeFlags::HAS_TY_ERR)
}
- &ty::TyParam(ref p) => {
+ &ty::Param(ref p) => {
self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES);
if p.is_self() {
self.add_flags(TypeFlags::HAS_SELF);
}
}
- &ty::TyGenerator(_, ref substs, _) => {
+ &ty::Generator(_, ref substs, _) => {
self.add_flags(TypeFlags::HAS_TY_CLOSURE);
self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES);
self.add_substs(&substs.substs);
}
- &ty::TyGeneratorWitness(ref ts) => {
+ &ty::GeneratorWitness(ref ts) => {
let mut computation = FlagComputation::new();
computation.add_tys(&ts.skip_binder()[..]);
self.add_bound_computation(&computation);
}
- &ty::TyClosure(_, ref substs) => {
+ &ty::Closure(_, ref substs) => {
self.add_flags(TypeFlags::HAS_TY_CLOSURE);
self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES);
self.add_substs(&substs.substs);
}
- &ty::TyInfer(infer) => {
+ &ty::Infer(infer) => {
self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES); // it might, right?
self.add_flags(TypeFlags::HAS_TY_INFER);
match infer {
}
}
- &ty::TyAdt(_, substs) => {
+ &ty::Adt(_, substs) => {
self.add_substs(substs);
}
- &ty::TyProjection(ref data) => {
+ &ty::Projection(ref data) => {
// currently we can't normalize projections that
// include bound regions, so track those separately.
if !data.has_escaping_regions() {
self.add_projection_ty(data);
}
- &ty::TyAnon(_, substs) => {
+ &ty::Anon(_, substs) => {
self.add_flags(TypeFlags::HAS_PROJECTION);
self.add_substs(substs);
}
- &ty::TyDynamic(ref obj, r) => {
+ &ty::Dynamic(ref obj, r) => {
let mut computation = FlagComputation::new();
for predicate in obj.skip_binder().iter() {
match *predicate {
self.add_region(r);
}
- &ty::TyArray(tt, len) => {
+ &ty::Array(tt, len) => {
self.add_ty(tt);
self.add_const(len);
}
- &ty::TySlice(tt) => {
+ &ty::Slice(tt) => {
self.add_ty(tt)
}
- &ty::TyRawPtr(ref m) => {
+ &ty::RawPtr(ref m) => {
self.add_ty(m.ty);
}
- &ty::TyRef(r, ty, _) => {
+ &ty::Ref(r, ty, _) => {
self.add_region(r);
self.add_ty(ty);
}
- &ty::TyTuple(ref ts) => {
+ &ty::Tuple(ref ts) => {
self.add_tys(&ts[..]);
}
- &ty::TyFnDef(_, substs) => {
+ &ty::FnDef(_, substs) => {
self.add_substs(substs);
}
- &ty::TyFnPtr(f) => {
+ &ty::FnPtr(f) => {
self.add_fn_sig(f);
}
}
// in the normalized form
if self.just_constrained {
match t.sty {
- ty::TyProjection(..) | ty::TyAnon(..) => { return false; }
+ ty::Projection(..) | ty::Anon(..) => { return false; }
_ => { }
}
}
// except according to those terms.
use std::mem;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use syntax::ast::CRATE_NODE_ID;
use ty::context::TyCtxt;
use ty::{DefId, DefIdTree};
let mut next_ret = SmallVec::new();
let mut old_ret: SmallVec<[DefId; 1]> = SmallVec::new();
for next_forest in iter {
- for id in ret.root_ids.drain(..) {
+ for id in ret.root_ids.drain() {
if next_forest.contains(tcx, id) {
next_ret.push(id);
} else {
old_ret.push(id);
}
}
- ret.root_ids.extend(old_ret.drain(..));
+ ret.root_ids.extend(old_ret.drain());
for id in next_forest.root_ids {
if ret.contains(tcx, id) {
}
mem::swap(&mut next_ret, &mut ret.root_ids);
- next_ret.drain(..);
+ next_ret.drain();
}
ret
}
let mut ret = DefIdForest::empty();
let mut next_ret = SmallVec::new();
for next_forest in iter {
- for id in ret.root_ids.drain(..) {
+ for id in ret.root_ids.drain() {
if !next_forest.contains(tcx, id) {
next_ret.push(id);
}
}
mem::swap(&mut next_ret, &mut ret.root_ids);
- next_ret.drain(..);
+ next_ret.drain();
}
ret
}
use ty::{AdtDef, VariantDef, FieldDef, Ty, TyS};
use ty::{DefId, Substs};
use ty::{AdtKind, Visibility};
-use ty::TypeVariants::*;
+use ty::TyKind::*;
pub use self::def_id_forest::DefIdForest;
tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest
{
match self.sty {
- TyAdt(def, substs) => {
+ Adt(def, substs) => {
{
let substs_set = visited.entry(def.did).or_default();
if !substs_set.insert(substs) {
ret
},
- TyNever => DefIdForest::full(tcx),
- TyTuple(ref tys) => {
+ Never => DefIdForest::full(tcx),
+ Tuple(ref tys) => {
DefIdForest::union(tcx, tys.iter().map(|ty| {
ty.uninhabited_from(visited, tcx)
}))
},
- TyArray(ty, len) => {
+ Array(ty, len) => {
match len.assert_usize(tcx) {
// If the array is definitely non-empty, it's uninhabited if
// the type of its elements is uninhabited.
_ => DefIdForest::empty()
}
}
- TyRef(_, ty, _) => {
+ Ref(_, ty, _) => {
ty.uninhabited_from(visited, tcx)
}
);
let def = match item_type.sty {
- ty::TyFnDef(..) if {
+ ty::FnDef(..) if {
let f = item_type.fn_sig(tcx);
f.abi() == Abi::RustIntrinsic ||
f.abi() == Abi::PlatformIntrinsic
// impl on `Foo`, but fallback to `<Foo>::bar` if self-type is
// anything other than a simple path.
match self_ty.sty {
- ty::TyAdt(adt_def, substs) => {
+ ty::Adt(adt_def, substs) => {
if substs.types().next().is_none() { // ignore regions
self.push_item_path(buffer, adt_def.did);
} else {
}
}
- ty::TyForeign(did) => self.push_item_path(buffer, did),
+ ty::Foreign(did) => self.push_item_path(buffer, did),
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyFloat(_) |
- ty::TyStr => {
+ ty::Bool |
+ ty::Char |
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) |
+ ty::Str => {
buffer.push(&self_ty.to_string());
}
/// decisions and we may want to adjust it later.
pub fn characteristic_def_id_of_type(ty: Ty) -> Option<DefId> {
match ty.sty {
- ty::TyAdt(adt_def, _) => Some(adt_def.did),
+ ty::Adt(adt_def, _) => Some(adt_def.did),
- ty::TyDynamic(data, ..) => data.principal().map(|p| p.def_id()),
+ ty::Dynamic(data, ..) => data.principal().map(|p| p.def_id()),
- ty::TyArray(subty, _) |
- ty::TySlice(subty) => characteristic_def_id_of_type(subty),
+ ty::Array(subty, _) |
+ ty::Slice(subty) => characteristic_def_id_of_type(subty),
- ty::TyRawPtr(mt) => characteristic_def_id_of_type(mt.ty),
+ ty::RawPtr(mt) => characteristic_def_id_of_type(mt.ty),
- ty::TyRef(_, ty, _) => characteristic_def_id_of_type(ty),
+ ty::Ref(_, ty, _) => characteristic_def_id_of_type(ty),
- ty::TyTuple(ref tys) => tys.iter()
+ ty::Tuple(ref tys) => tys.iter()
.filter_map(|ty| characteristic_def_id_of_type(ty))
.next(),
- ty::TyFnDef(def_id, _) |
- ty::TyClosure(def_id, _) |
- ty::TyGenerator(def_id, _, _) |
- ty::TyForeign(def_id) => Some(def_id),
-
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyStr |
- ty::TyFnPtr(_) |
- ty::TyProjection(_) |
- ty::TyParam(_) |
- ty::TyAnon(..) |
- ty::TyInfer(_) |
- ty::TyError |
- ty::TyGeneratorWitness(..) |
- ty::TyNever |
- ty::TyFloat(_) => None,
+ ty::FnDef(def_id, _) |
+ ty::Closure(def_id, _) |
+ ty::Generator(def_id, _, _) |
+ ty::Foreign(def_id) => Some(def_id),
+
+ ty::Bool |
+ ty::Char |
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Str |
+ ty::FnPtr(_) |
+ ty::Projection(_) |
+ ty::Param(_) |
+ ty::Anon(..) |
+ ty::Infer(_) |
+ ty::Error |
+ ty::GeneratorWitness(..) |
+ ty::Never |
+ ty::Float(_) => None,
}
}
Ok(match ty.sty {
// Basic scalars.
- ty::TyBool => {
+ ty::Bool => {
tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
value: Int(I8, false),
valid_range: 0..=1
}))
}
- ty::TyChar => {
+ ty::Char => {
tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
value: Int(I32, false),
valid_range: 0..=0x10FFFF
}))
}
- ty::TyInt(ity) => {
+ ty::Int(ity) => {
scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
}
- ty::TyUint(ity) => {
+ ty::Uint(ity) => {
scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
}
- ty::TyFloat(fty) => scalar(Float(fty)),
- ty::TyFnPtr(_) => {
+ ty::Float(fty) => scalar(Float(fty)),
+ ty::FnPtr(_) => {
let mut ptr = scalar_unit(Pointer);
ptr.valid_range = 1..=*ptr.valid_range.end();
tcx.intern_layout(LayoutDetails::scalar(self, ptr))
}
// The never type.
- ty::TyNever => {
+ ty::Never => {
tcx.intern_layout(LayoutDetails {
variants: Variants::Single { index: 0 },
fields: FieldPlacement::Union(0),
}
// Potentially-fat pointers.
- ty::TyRef(_, pointee, _) |
- ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ ty::Ref(_, pointee, _) |
+ ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
let mut data_ptr = scalar_unit(Pointer);
if !ty.is_unsafe_ptr() {
data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
let unsized_part = tcx.struct_tail(pointee);
let metadata = match unsized_part.sty {
- ty::TyForeign(..) => {
+ ty::Foreign(..) => {
return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
}
- ty::TySlice(_) | ty::TyStr => {
+ ty::Slice(_) | ty::Str => {
scalar_unit(Int(dl.ptr_sized_integer(), false))
}
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
let mut vtable = scalar_unit(Pointer);
vtable.valid_range = 1..=*vtable.valid_range.end();
vtable
}
// Arrays and slices.
- ty::TyArray(element, mut count) => {
+ ty::Array(element, mut count) => {
if count.has_projections() {
count = tcx.normalize_erasing_regions(param_env, count);
if count.has_projections() {
size
})
}
- ty::TySlice(element) => {
+ ty::Slice(element) => {
let element = self.layout_of(element)?;
tcx.intern_layout(LayoutDetails {
variants: Variants::Single { index: 0 },
size: Size::ZERO
})
}
- ty::TyStr => {
+ ty::Str => {
tcx.intern_layout(LayoutDetails {
variants: Variants::Single { index: 0 },
fields: FieldPlacement::Array {
}
// Odd unit types.
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
}
- ty::TyDynamic(..) | ty::TyForeign(..) => {
+ ty::Dynamic(..) | ty::Foreign(..) => {
let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
StructKind::AlwaysSized)?;
match unit.abi {
}
// Tuples, generators and closures.
- ty::TyGenerator(def_id, ref substs, _) => {
+ ty::Generator(def_id, ref substs, _) => {
let tys = substs.field_tys(def_id, tcx);
univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
&ReprOptions::default(),
StructKind::AlwaysSized)?
}
- ty::TyClosure(def_id, ref substs) => {
+ ty::Closure(def_id, ref substs) => {
let tys = substs.upvar_tys(def_id, tcx);
univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
&ReprOptions::default(),
StructKind::AlwaysSized)?
}
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
let kind = if tys.len() == 0 {
StructKind::AlwaysSized
} else {
}
// SIMD vector types.
- ty::TyAdt(def, ..) if def.repr.simd() => {
+ ty::Adt(def, ..) if def.repr.simd() => {
let element = self.layout_of(ty.simd_type(tcx))?;
let count = ty.simd_size(tcx) as u64;
assert!(count > 0);
}
// ADTs.
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
// Cache the field layouts.
let variants = def.variants.iter().map(|v| {
v.fields.iter().map(|field| {
}
// Types with no meaningful known layout.
- ty::TyProjection(_) | ty::TyAnon(..) => {
+ ty::Projection(_) | ty::Anon(..) => {
let normalized = tcx.normalize_erasing_regions(param_env, ty);
if ty == normalized {
return Err(LayoutError::Unknown(ty));
}
tcx.layout_raw(param_env.and(normalized))?
}
- ty::TyGeneratorWitness(..) | ty::TyInfer(_) => {
+ ty::GeneratorWitness(..) | ty::Infer(_) => {
bug!("LayoutDetails::compute: unexpected type `{}`", ty)
}
- ty::TyParam(_) | ty::TyError => {
+ ty::Param(_) | ty::Error => {
return Err(LayoutError::Unknown(ty));
}
})
};
let adt_def = match layout.ty.sty {
- ty::TyAdt(ref adt_def, _) => {
+ ty::Adt(ref adt_def, _) => {
debug!("print-type-size t: `{:?}` process adt", layout.ty);
adt_def
}
- ty::TyClosure(..) => {
+ ty::Closure(..) => {
debug!("print-type-size t: `{:?}` record closure", layout.ty);
record(DataTypeKind::Closure, false, None, vec![]);
return;
};
match ty.sty {
- ty::TyRef(_, pointee, _) |
- ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ ty::Ref(_, pointee, _) |
+ ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
let non_zero = !ty.is_unsafe_ptr();
let tail = tcx.struct_tail(pointee);
match tail.sty {
- ty::TyParam(_) | ty::TyProjection(_) => {
+ ty::Param(_) | ty::Projection(_) => {
debug_assert!(tail.has_param_types() || tail.has_self_ty());
Ok(SizeSkeleton::Pointer {
non_zero,
}
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
// Only newtypes and enums w/ nullable pointer optimization.
if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
return Err(err);
}
}
- ty::TyProjection(_) | ty::TyAnon(..) => {
+ ty::Projection(_) | ty::Anon(..) => {
let normalized = tcx.normalize_erasing_regions(param_env, ty);
if ty == normalized {
Err(err)
});
let fields = match this.ty.sty {
- ty::TyAdt(def, _) => def.variants[variant_index].fields.len(),
+ ty::Adt(def, _) => def.variants[variant_index].fields.len(),
_ => bug!()
};
let tcx = cx.tcx();
fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
let tcx = cx.tcx();
cx.layout_of(match this.ty.sty {
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyFloat(_) |
- ty::TyFnPtr(_) |
- ty::TyNever |
- ty::TyFnDef(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyForeign(..) |
- ty::TyDynamic(..) => {
+ ty::Bool |
+ ty::Char |
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) |
+ ty::FnPtr(_) |
+ ty::Never |
+ ty::FnDef(..) |
+ ty::GeneratorWitness(..) |
+ ty::Foreign(..) |
+ ty::Dynamic(..) => {
bug!("TyLayout::field_type({:?}): not applicable", this)
}
// Potentially-fat pointers.
- ty::TyRef(_, pointee, _) |
- ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ ty::Ref(_, pointee, _) |
+ ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
assert!(i < this.fields.count());
// Reuse the fat *T type as its own thin pointer data field.
}
match tcx.struct_tail(pointee).sty {
- ty::TySlice(_) |
- ty::TyStr => tcx.types.usize,
- ty::TyDynamic(data, _) => {
+ ty::Slice(_) |
+ ty::Str => tcx.types.usize,
+ ty::Dynamic(data, _) => {
let trait_def_id = data.principal().unwrap().def_id();
let num_fns: u64 = crate::traits::supertrait_def_ids(tcx, trait_def_id)
.map(|trait_def_id| {
}
// Arrays and slices.
- ty::TyArray(element, _) |
- ty::TySlice(element) => element,
- ty::TyStr => tcx.types.u8,
+ ty::Array(element, _) |
+ ty::Slice(element) => element,
+ ty::Str => tcx.types.u8,
// Tuples, generators and closures.
- ty::TyClosure(def_id, ref substs) => {
+ ty::Closure(def_id, ref substs) => {
substs.upvar_tys(def_id, tcx).nth(i).unwrap()
}
- ty::TyGenerator(def_id, ref substs, _) => {
+ ty::Generator(def_id, ref substs, _) => {
substs.field_tys(def_id, tcx).nth(i).unwrap()
}
- ty::TyTuple(tys) => tys[i],
+ ty::Tuple(tys) => tys[i],
// SIMD vector types.
- ty::TyAdt(def, ..) if def.repr.simd() => {
+ ty::Adt(def, ..) if def.repr.simd() => {
this.ty.simd_type(tcx)
}
// ADTs.
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
match this.variants {
Variants::Single { index } => {
def.variants[index].fields[i].ty(tcx, substs)
}
}
- ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
- ty::TyInfer(_) | ty::TyError => {
+ ty::Projection(_) | ty::Anon(..) | ty::Param(_) |
+ ty::Infer(_) | ty::Error => {
bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
}
})
// Locals variables which live across yields are stored
// in the generator type as fields. These may be uninitialized
// so we don't look for niches there.
- if let ty::TyGenerator(..) = layout.ty.sty {
+ if let ty::Generator(..) = layout.ty.sty {
return Ok(None);
}
use syntax::symbol::{keywords, Symbol, LocalInternedString, InternedString};
use syntax_pos::{DUMMY_SP, Span};
-use rustc_data_structures::accumulate_vec::IntoIter as AccIntoIter;
+use smallvec;
use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult,
HashStable};
pub use self::sty::{FnSig, GenSig, PolyFnSig, PolyGenSig};
pub use self::sty::{InferTy, ParamTy, ProjectionTy, ExistentialPredicate};
pub use self::sty::{ClosureSubsts, GeneratorSubsts, UpvarSubsts, TypeAndMut};
-pub use self::sty::{TraitRef, TypeVariants, PolyTraitRef};
+pub use self::sty::{TraitRef, TyKind, PolyTraitRef};
pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef};
pub use self::sty::{ExistentialProjection, PolyExistentialProjection, Const};
pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region};
pub use self::sty::BoundRegion::*;
pub use self::sty::InferTy::*;
pub use self::sty::RegionKind::*;
-pub use self::sty::TypeVariants::*;
+pub use self::sty::TyKind::*;
pub use self::binding::BindingMode;
pub use self::binding::BindingMode::*;
const HAS_FREE_LOCAL_NAMES = 1 << 10;
// Present if the type belongs in a local type context.
- // Only set for TyInfer other than Fresh.
+ // Only set for Infer other than Fresh.
const KEEP_IN_LOCAL_TCX = 1 << 11;
// Is there a projection that does not involve a bound region?
}
pub struct TyS<'tcx> {
- pub sty: TypeVariants<'tcx>,
+ pub sty: TyKind<'tcx>,
pub flags: TypeFlags,
/// This is a kind of confusing thing: it stores the smallest
impl<'tcx> TyS<'tcx> {
pub fn is_primitive_ty(&self) -> bool {
match self.sty {
- TypeVariants::TyBool |
- TypeVariants::TyChar |
- TypeVariants::TyInt(_) |
- TypeVariants::TyUint(_) |
- TypeVariants::TyFloat(_) |
- TypeVariants::TyInfer(InferTy::IntVar(_)) |
- TypeVariants::TyInfer(InferTy::FloatVar(_)) |
- TypeVariants::TyInfer(InferTy::FreshIntTy(_)) |
- TypeVariants::TyInfer(InferTy::FreshFloatTy(_)) => true,
- TypeVariants::TyRef(_, x, _) => x.is_primitive_ty(),
+ TyKind::Bool |
+ TyKind::Char |
+ TyKind::Int(_) |
+ TyKind::Uint(_) |
+ TyKind::Float(_) |
+ TyKind::Infer(InferTy::IntVar(_)) |
+ TyKind::Infer(InferTy::FloatVar(_)) |
+ TyKind::Infer(InferTy::FreshIntTy(_)) |
+ TyKind::Infer(InferTy::FreshFloatTy(_)) => true,
+ TyKind::Ref(_, x, _) => x.is_primitive_ty(),
_ => false,
}
}
pub fn is_suggestable(&self) -> bool {
match self.sty {
- TypeVariants::TyAnon(..) |
- TypeVariants::TyFnDef(..) |
- TypeVariants::TyFnPtr(..) |
- TypeVariants::TyDynamic(..) |
- TypeVariants::TyClosure(..) |
- TypeVariants::TyInfer(..) |
- TypeVariants::TyProjection(..) => false,
+ TyKind::Anon(..) |
+ TyKind::FnDef(..) |
+ TyKind::FnPtr(..) |
+ TyKind::Dynamic(..) |
+ TyKind::Closure(..) |
+ TyKind::Infer(..) |
+ TyKind::Projection(..) => false,
_ => true,
}
}
pub type CanonicalTy<'gcx> = Canonical<'gcx, Ty<'gcx>>;
extern {
- /// A dummy type used to force Slice to by unsized without requiring fat pointers
- type OpaqueSliceContents;
+ /// A dummy type used to force List to by unsized without requiring fat pointers
+ type OpaqueListContents;
}
/// A wrapper for slices with the additional invariant
/// the same contents can exist in the same context.
/// This means we can use pointer for both
/// equality comparisons and hashing.
+/// Note: `Slice` was already taken by the `Ty`.
#[repr(C)]
-pub struct Slice<T> {
+pub struct List<T> {
len: usize,
data: [T; 0],
- opaque: OpaqueSliceContents,
+ opaque: OpaqueListContents,
}
-unsafe impl<T: Sync> Sync for Slice<T> {}
+unsafe impl<T: Sync> Sync for List<T> {}
-impl<T: Copy> Slice<T> {
+impl<T: Copy> List<T> {
#[inline]
- fn from_arena<'tcx>(arena: &'tcx SyncDroplessArena, slice: &[T]) -> &'tcx Slice<T> {
+ fn from_arena<'tcx>(arena: &'tcx SyncDroplessArena, slice: &[T]) -> &'tcx List<T> {
assert!(!mem::needs_drop::<T>());
assert!(mem::size_of::<T>() != 0);
assert!(slice.len() != 0);
size,
cmp::max(mem::align_of::<T>(), mem::align_of::<usize>()));
unsafe {
- let result = &mut *(mem.as_mut_ptr() as *mut Slice<T>);
+ let result = &mut *(mem.as_mut_ptr() as *mut List<T>);
// Write the length
result.len = slice.len();
}
}
-impl<T: fmt::Debug> fmt::Debug for Slice<T> {
+impl<T: fmt::Debug> fmt::Debug for List<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
}
}
-impl<T: Encodable> Encodable for Slice<T> {
+impl<T: Encodable> Encodable for List<T> {
#[inline]
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
(**self).encode(s)
}
}
-impl<T> Ord for Slice<T> where T: Ord {
- fn cmp(&self, other: &Slice<T>) -> Ordering {
+impl<T> Ord for List<T> where T: Ord {
+ fn cmp(&self, other: &List<T>) -> Ordering {
if self == other { Ordering::Equal } else {
<[T] as Ord>::cmp(&**self, &**other)
}
}
}
-impl<T> PartialOrd for Slice<T> where T: PartialOrd {
- fn partial_cmp(&self, other: &Slice<T>) -> Option<Ordering> {
+impl<T> PartialOrd for List<T> where T: PartialOrd {
+ fn partial_cmp(&self, other: &List<T>) -> Option<Ordering> {
if self == other { Some(Ordering::Equal) } else {
<[T] as PartialOrd>::partial_cmp(&**self, &**other)
}
}
}
-impl<T: PartialEq> PartialEq for Slice<T> {
+impl<T: PartialEq> PartialEq for List<T> {
#[inline]
- fn eq(&self, other: &Slice<T>) -> bool {
+ fn eq(&self, other: &List<T>) -> bool {
ptr::eq(self, other)
}
}
-impl<T: Eq> Eq for Slice<T> {}
+impl<T: Eq> Eq for List<T> {}
-impl<T> Hash for Slice<T> {
+impl<T> Hash for List<T> {
#[inline]
fn hash<H: Hasher>(&self, s: &mut H) {
- (self as *const Slice<T>).hash(s)
+ (self as *const List<T>).hash(s)
}
}
-impl<T> Deref for Slice<T> {
+impl<T> Deref for List<T> {
type Target = [T];
#[inline(always)]
fn deref(&self) -> &[T] {
}
}
-impl<'a, T> IntoIterator for &'a Slice<T> {
+impl<'a, T> IntoIterator for &'a List<T> {
type Item = &'a T;
type IntoIter = <&'a [T] as IntoIterator>::IntoIter;
#[inline(always)]
}
}
-impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Slice<Ty<'tcx>> {}
+impl<'tcx> serialize::UseSpecializedDecodable for &'tcx List<Ty<'tcx>> {}
-impl<T> Slice<T> {
+impl<T> List<T> {
#[inline(always)]
- pub fn empty<'a>() -> &'a Slice<T> {
+ pub fn empty<'a>() -> &'a List<T> {
#[repr(align(64), C)]
struct EmptySlice([u8; 64]);
static EMPTY_SLICE: EmptySlice = EmptySlice([0; 64]);
assert!(mem::align_of::<T>() <= 64);
unsafe {
- &*(&EMPTY_SLICE as *const _ as *const Slice<T>)
+ &*(&EMPTY_SLICE as *const _ as *const List<T>)
}
}
}
}
}
+#[derive(Default)]
pub struct GenericParamCount {
pub lifetimes: usize,
pub types: usize,
// We could cache this as a property of `GenericParamCount`, but
// the aim is to refactor this away entirely eventually and the
// presence of this method will be a constant reminder.
- let mut own_counts = GenericParamCount {
- lifetimes: 0,
- types: 0,
- };
+ let mut own_counts: GenericParamCount = Default::default();
for param in &self.params {
match param.kind {
GenericParamDefKind::Lifetime => own_counts.lifetimes += 1,
- GenericParamDefKind::Type {..} => own_counts.types += 1,
+ GenericParamDefKind::Type { .. } => own_counts.types += 1,
};
}
pub fn requires_monomorphization(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> bool {
for param in &self.params {
match param.kind {
- GenericParamDefKind::Type {..} => return true,
+ GenericParamDefKind::Type { .. } => return true,
GenericParamDefKind::Lifetime => {}
}
}
/// Creates a universe index from the given integer. Not to be
/// used lightly lest you pick a bad value. But sometimes we
- /// convert universe indicies into integers and back for various
+ /// convert universe indices into integers and back for various
/// reasons.
pub fn from_u32(index: u32) -> Self {
UniverseIndex(index)
/// Obligations that the caller must satisfy. This is basically
/// the set of bounds on the in-scope type parameters, translated
/// into Obligations, and elaborated and normalized.
- pub caller_bounds: &'tcx Slice<ty::Predicate<'tcx>>,
+ pub caller_bounds: &'tcx List<ty::Predicate<'tcx>>,
/// Typically, this is `Reveal::UserFacing`, but during codegen we
/// want `Reveal::All` -- note that this is always paired with an
/// Trait`) are left hidden, so this is suitable for ordinary
/// type-checking.
pub fn empty() -> Self {
- Self::new(ty::Slice::empty(), Reveal::UserFacing)
+ Self::new(List::empty(), Reveal::UserFacing)
}
/// Construct a trait environment with no where clauses in scope
/// NB. If you want to have predicates in scope, use `ParamEnv::new`,
/// or invoke `param_env.with_reveal_all()`.
pub fn reveal_all() -> Self {
- Self::new(ty::Slice::empty(), Reveal::All)
+ Self::new(List::empty(), Reveal::All)
}
/// Construct a trait environment with the given set of predicates.
- pub fn new(caller_bounds: &'tcx ty::Slice<ty::Predicate<'tcx>>,
+ pub fn new(caller_bounds: &'tcx List<ty::Predicate<'tcx>>,
reveal: Reveal)
-> Self {
ty::ParamEnv { caller_bounds, reveal }
/// Returns this same environment but with no caller bounds.
pub fn without_caller_bounds(self) -> Self {
- ty::ParamEnv { caller_bounds: ty::Slice::empty(), ..self }
+ ty::ParamEnv { caller_bounds: List::empty(), ..self }
}
/// Creates a suitable environment in which to perform trait
ty: Ty<'tcx>)
-> Vec<Ty<'tcx>> {
let result = match ty.sty {
- TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
- TyRawPtr(..) | TyRef(..) | TyFnDef(..) | TyFnPtr(_) |
- TyArray(..) | TyClosure(..) | TyGenerator(..) | TyNever => {
+ Bool | Char | Int(..) | Uint(..) | Float(..) |
+ RawPtr(..) | Ref(..) | FnDef(..) | FnPtr(_) |
+ Array(..) | Closure(..) | Generator(..) | Never => {
vec![]
}
- TyStr |
- TyDynamic(..) |
- TySlice(_) |
- TyForeign(..) |
- TyError |
- TyGeneratorWitness(..) => {
+ Str |
+ Dynamic(..) |
+ Slice(_) |
+ Foreign(..) |
+ Error |
+ GeneratorWitness(..) => {
// these are never sized - return the target type
vec![ty]
}
- TyTuple(ref tys) => {
+ Tuple(ref tys) => {
match tys.last() {
None => vec![],
Some(ty) => self.sized_constraint_for_ty(tcx, ty)
}
}
- TyAdt(adt, substs) => {
+ Adt(adt, substs) => {
// recursive case
let adt_tys = adt.sized_constraint(tcx);
debug!("sized_constraint_for_ty({:?}) intermediate = {:?}",
.collect()
}
- TyProjection(..) | TyAnon(..) => {
+ Projection(..) | Anon(..) => {
// must calculate explicitly.
// FIXME: consider special-casing always-Sized projections
vec![ty]
}
- TyParam(..) => {
+ Param(..) => {
// perf hack: if there is a `T: Sized` bound, then
// we know that `T` is Sized and do not need to check
// it on the impl.
}
}
- TyInfer(..) => {
+ Infer(..) => {
bug!("unexpected type `{:?}` in sized_constraint_for_ty",
ty)
}
/// Iterator that walks the immediate children of `self`. Hence
/// `Foo<Bar<i32>, u32>` yields the sequence `[Bar<i32>, u32]`
/// (but not `i32`, like `walk`).
- pub fn walk_shallow(&'tcx self) -> AccIntoIter<walk::TypeWalkerArray<'tcx>> {
+ pub fn walk_shallow(&'tcx self) -> smallvec::IntoIter<walk::TypeWalkerArray<'tcx>> {
walk::walk_shallow(self)
}
/// - a type parameter or projection whose Sizedness can't be known
/// - a tuple of type parameters or projections, if there are multiple
/// such.
-/// - a TyError, if a type contained itself. The representability
+/// - a Error, if a type contained itself. The representability
/// check should catch this case.
fn adt_sized_constraint<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
// in the `subtys` iterator (e.g., when encountering a
// projection).
match ty.sty {
- ty::TyClosure(def_id, ref substs) => {
+ ty::Closure(def_id, ref substs) => {
for upvar_ty in substs.upvar_tys(def_id, *self) {
self.compute_components(upvar_ty, out);
}
}
- ty::TyGenerator(def_id, ref substs, _) => {
+ ty::Generator(def_id, ref substs, _) => {
// Same as the closure case
for upvar_ty in substs.upvar_tys(def_id, *self) {
self.compute_components(upvar_ty, out);
}
// All regions are bound inside a witness
- ty::TyGeneratorWitness(..) => (),
+ ty::GeneratorWitness(..) => (),
// OutlivesTypeParameterEnv -- the actual checking that `X:'a`
// is implied by the environment is done in regionck.
- ty::TyParam(p) => {
+ ty::Param(p) => {
out.push(Component::Param(p));
}
// trait-ref. Therefore, if we see any higher-ranke regions,
// we simply fallback to the most restrictive rule, which
// requires that `Pi: 'a` for all `i`.
- ty::TyProjection(ref data) => {
+ ty::Projection(ref data) => {
if !data.has_escaping_regions() {
// best case: no escaping regions, so push the
// projection and skip the subtree (thus generating no
// We assume that inference variables are fully resolved.
// So, if we encounter an inference variable, just record
// the unresolved variable as a component.
- ty::TyInfer(infer_ty) => {
+ ty::Infer(infer_ty) => {
out.push(Component::UnresolvedInferenceVariable(infer_ty));
}
// the type and then visits the types that are lexically
// contained within. (The comments refer to relevant rules
// from RFC1214.)
- ty::TyBool | // OutlivesScalar
- ty::TyChar | // OutlivesScalar
- ty::TyInt(..) | // OutlivesScalar
- ty::TyUint(..) | // OutlivesScalar
- ty::TyFloat(..) | // OutlivesScalar
- ty::TyNever | // ...
- ty::TyAdt(..) | // OutlivesNominalType
- ty::TyAnon(..) | // OutlivesNominalType (ish)
- ty::TyForeign(..) | // OutlivesNominalType
- ty::TyStr | // OutlivesScalar (ish)
- ty::TyArray(..) | // ...
- ty::TySlice(..) | // ...
- ty::TyRawPtr(..) | // ...
- ty::TyRef(..) | // OutlivesReference
- ty::TyTuple(..) | // ...
- ty::TyFnDef(..) | // OutlivesFunction (*)
- ty::TyFnPtr(_) | // OutlivesFunction (*)
- ty::TyDynamic(..) | // OutlivesObject, OutlivesFragment (*)
- ty::TyError => {
+ ty::Bool | // OutlivesScalar
+ ty::Char | // OutlivesScalar
+ ty::Int(..) | // OutlivesScalar
+ ty::Uint(..) | // OutlivesScalar
+ ty::Float(..) | // OutlivesScalar
+ ty::Never | // ...
+ ty::Adt(..) | // OutlivesNominalType
+ ty::Anon(..) | // OutlivesNominalType (ish)
+ ty::Foreign(..) | // OutlivesNominalType
+ ty::Str | // OutlivesScalar (ish)
+ ty::Array(..) | // ...
+ ty::Slice(..) | // ...
+ ty::RawPtr(..) | // ...
+ ty::Ref(..) | // OutlivesReference
+ ty::Tuple(..) | // ...
+ ty::FnDef(..) | // OutlivesFunction (*)
+ ty::FnPtr(_) | // OutlivesFunction (*)
+ ty::Dynamic(..) | // OutlivesObject, OutlivesFragment (*)
+ ty::Error => {
// (*) Bare functions and traits are both binders. In the
// RFC, this means we would add the bound regions to the
// "bound regions list". In our representation, no such
}
}
-impl<'tcx> QueryDescription<'tcx> for queries::const_value_to_allocation<'tcx> {
+impl<'tcx> QueryDescription<'tcx> for queries::const_to_allocation<'tcx> {
fn describe(_tcx: TyCtxt, val: &'tcx ty::Const<'tcx>) -> String {
- format!("converting value `{:?}` to an allocation", val)
+ format!("converting constant `{:?}` to an allocation", val)
}
}
}
}
- // Visit the explict waiters which use condvars and are resumable
+ // Visit the explicit waiters which use condvars and are resumable
for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() {
if let Some(ref waiter_query) = waiter.query {
if visit(waiter.span, waiter_query.clone()).is_some() {
use util::common::{ErrorReported};
use util::profiling::ProfileCategory::*;
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_target::spec::PanicStrategy;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
/// Maps DefId's that have an associated Mir to the result
/// of the MIR qualify_consts pass. The actual meaning of
/// the value isn't known except to the pass itself.
- [] fn mir_const_qualif: MirConstQualif(DefId) -> (u8, Lrc<IdxSetBuf<mir::Local>>),
+ [] fn mir_const_qualif: MirConstQualif(DefId) -> (u8, Lrc<IdxSet<mir::Local>>),
/// Fetch the MIR for a given def-id right after it's built - this includes
/// unreachable code.
[] fn const_eval: const_eval_dep_node(ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>)
-> ConstEvalResult<'tcx>,
- /// Converts a constant value to an constant allocation
- [] fn const_value_to_allocation: const_value_to_allocation(
+ /// Converts a constant value to a constant allocation
+ [] fn const_to_allocation: const_to_allocation(
&'tcx ty::Const<'tcx>
) -> &'tcx Allocation,
},
DepConstructor::EraseRegionsTy { ty }
}
-fn const_value_to_allocation<'tcx>(
+fn const_to_allocation<'tcx>(
val: &'tcx ty::Const<'tcx>,
) -> DepConstructor<'tcx> {
- DepConstructor::ConstValueToAllocation { val }
+ DepConstructor::ConstToAllocation { val }
}
fn type_param_predicates<'tcx>((item_id, param_id): (DefId, DefId)) -> DepConstructor<'tcx> {
DepKind::FulfillObligation |
DepKind::VtableMethods |
DepKind::EraseRegionsTy |
- DepKind::ConstValueToAllocation |
+ DepKind::ConstToAllocation |
DepKind::NormalizeProjectionTy |
DepKind::NormalizeTyAfterErasingRegions |
DepKind::ImpliedOutlivesBounds |
}
#[derive(Debug, Clone)]
-struct GeneratorWitness<'tcx>(&'tcx ty::Slice<Ty<'tcx>>);
+struct GeneratorWitness<'tcx>(&'tcx ty::List<Ty<'tcx>>);
TupleStructTypeFoldableImpl! {
impl<'tcx> TypeFoldable<'tcx> for GeneratorWitness<'tcx> {
let b_sty = &b.sty;
debug!("super_tys: a_sty={:?} b_sty={:?}", a_sty, b_sty);
match (a_sty, b_sty) {
- (&ty::TyInfer(_), _) |
- (_, &ty::TyInfer(_)) =>
+ (&ty::Infer(_), _) |
+ (_, &ty::Infer(_)) =>
{
// The caller should handle these cases!
bug!("var types encountered in super_relate_tys")
}
- (&ty::TyError, _) | (_, &ty::TyError) =>
+ (&ty::Error, _) | (_, &ty::Error) =>
{
Ok(tcx.types.err)
}
- (&ty::TyNever, _) |
- (&ty::TyChar, _) |
- (&ty::TyBool, _) |
- (&ty::TyInt(_), _) |
- (&ty::TyUint(_), _) |
- (&ty::TyFloat(_), _) |
- (&ty::TyStr, _)
+ (&ty::Never, _) |
+ (&ty::Char, _) |
+ (&ty::Bool, _) |
+ (&ty::Int(_), _) |
+ (&ty::Uint(_), _) |
+ (&ty::Float(_), _) |
+ (&ty::Str, _)
if a == b =>
{
Ok(a)
}
- (&ty::TyParam(ref a_p), &ty::TyParam(ref b_p))
+ (&ty::Param(ref a_p), &ty::Param(ref b_p))
if a_p.idx == b_p.idx =>
{
Ok(a)
}
- (&ty::TyAdt(a_def, a_substs), &ty::TyAdt(b_def, b_substs))
+ (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs))
if a_def == b_def =>
{
let substs = relation.relate_item_substs(a_def.did, a_substs, b_substs)?;
Ok(tcx.mk_adt(a_def, substs))
}
- (&ty::TyForeign(a_id), &ty::TyForeign(b_id))
+ (&ty::Foreign(a_id), &ty::Foreign(b_id))
if a_id == b_id =>
{
Ok(tcx.mk_foreign(a_id))
}
- (&ty::TyDynamic(ref a_obj, ref a_region), &ty::TyDynamic(ref b_obj, ref b_region)) => {
+ (&ty::Dynamic(ref a_obj, ref a_region), &ty::Dynamic(ref b_obj, ref b_region)) => {
let region_bound = relation.with_cause(Cause::ExistentialRegionBound,
|relation| {
relation.relate_with_variance(
Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound))
}
- (&ty::TyGenerator(a_id, a_substs, movability),
- &ty::TyGenerator(b_id, b_substs, _))
+ (&ty::Generator(a_id, a_substs, movability),
+ &ty::Generator(b_id, b_substs, _))
if a_id == b_id =>
{
- // All TyGenerator types with the same id represent
+ // All Generator types with the same id represent
// the (anonymous) type of the same generator expression. So
// all of their regions should be equated.
let substs = relation.relate(&a_substs, &b_substs)?;
Ok(tcx.mk_generator(a_id, substs, movability))
}
- (&ty::TyGeneratorWitness(a_types), &ty::TyGeneratorWitness(b_types)) =>
+ (&ty::GeneratorWitness(a_types), &ty::GeneratorWitness(b_types)) =>
{
// Wrap our types with a temporary GeneratorWitness struct
// inside the binder so we can related them
Ok(tcx.mk_generator_witness(types))
}
- (&ty::TyClosure(a_id, a_substs),
- &ty::TyClosure(b_id, b_substs))
+ (&ty::Closure(a_id, a_substs),
+ &ty::Closure(b_id, b_substs))
if a_id == b_id =>
{
- // All TyClosure types with the same id represent
+ // All Closure types with the same id represent
// the (anonymous) type of the same closure expression. So
// all of their regions should be equated.
let substs = relation.relate(&a_substs, &b_substs)?;
Ok(tcx.mk_closure(a_id, substs))
}
- (&ty::TyRawPtr(ref a_mt), &ty::TyRawPtr(ref b_mt)) =>
+ (&ty::RawPtr(ref a_mt), &ty::RawPtr(ref b_mt)) =>
{
let mt = relation.relate(a_mt, b_mt)?;
Ok(tcx.mk_ptr(mt))
}
- (&ty::TyRef(a_r, a_ty, a_mutbl), &ty::TyRef(b_r, b_ty, b_mutbl)) =>
+ (&ty::Ref(a_r, a_ty, a_mutbl), &ty::Ref(b_r, b_ty, b_mutbl)) =>
{
let r = relation.relate_with_variance(ty::Contravariant, &a_r, &b_r)?;
let a_mt = ty::TypeAndMut { ty: a_ty, mutbl: a_mutbl };
Ok(tcx.mk_ref(r, mt))
}
- (&ty::TyArray(a_t, sz_a), &ty::TyArray(b_t, sz_b)) =>
+ (&ty::Array(a_t, sz_a), &ty::Array(b_t, sz_b)) =>
{
let t = relation.relate(&a_t, &b_t)?;
assert_eq!(sz_a.ty, tcx.types.usize);
match (to_u64(sz_a), to_u64(sz_b)) {
(Ok(sz_a_u64), Ok(sz_b_u64)) => {
if sz_a_u64 == sz_b_u64 {
- Ok(tcx.mk_ty(ty::TyArray(t, sz_a)))
+ Ok(tcx.mk_ty(ty::Array(t, sz_a)))
} else {
Err(TypeError::FixedArraySize(
expected_found(relation, &sz_a_u64, &sz_b_u64)))
}
}
- // We reported an error or will ICE, so we can return TyError.
+ // We reported an error or will ICE, so we can return Error.
(Err(ErrorReported), _) | (_, Err(ErrorReported)) => {
Ok(tcx.types.err)
}
}
}
- (&ty::TySlice(a_t), &ty::TySlice(b_t)) =>
+ (&ty::Slice(a_t), &ty::Slice(b_t)) =>
{
let t = relation.relate(&a_t, &b_t)?;
Ok(tcx.mk_slice(t))
}
- (&ty::TyTuple(as_), &ty::TyTuple(bs)) =>
+ (&ty::Tuple(as_), &ty::Tuple(bs)) =>
{
if as_.len() == bs.len() {
Ok(tcx.mk_tup(as_.iter().zip(bs).map(|(a, b)| relation.relate(a, b)))?)
}
}
- (&ty::TyFnDef(a_def_id, a_substs), &ty::TyFnDef(b_def_id, b_substs))
+ (&ty::FnDef(a_def_id, a_substs), &ty::FnDef(b_def_id, b_substs))
if a_def_id == b_def_id =>
{
let substs = relation.relate_item_substs(a_def_id, a_substs, b_substs)?;
Ok(tcx.mk_fn_def(a_def_id, substs))
}
- (&ty::TyFnPtr(a_fty), &ty::TyFnPtr(b_fty)) =>
+ (&ty::FnPtr(a_fty), &ty::FnPtr(b_fty)) =>
{
let fty = relation.relate(&a_fty, &b_fty)?;
Ok(tcx.mk_fn_ptr(fty))
}
- (&ty::TyProjection(ref a_data), &ty::TyProjection(ref b_data)) =>
+ (&ty::Projection(ref a_data), &ty::Projection(ref b_data)) =>
{
let projection_ty = relation.relate(a_data, b_data)?;
Ok(tcx.mk_projection(projection_ty.item_def_id, projection_ty.substs))
}
- (&ty::TyAnon(a_def_id, a_substs), &ty::TyAnon(b_def_id, b_substs))
+ (&ty::Anon(a_def_id, a_substs), &ty::Anon(b_def_id, b_substs))
if a_def_id == b_def_id =>
{
let substs = relate_substs(relation, None, a_substs, b_substs)?;
}
}
-impl<'tcx> Relate<'tcx> for &'tcx ty::Slice<ty::ExistentialPredicate<'tcx>> {
+impl<'tcx> Relate<'tcx> for &'tcx ty::List<ty::ExistentialPredicate<'tcx>> {
fn relate<'a, 'gcx, R>(relation: &mut R,
a: &Self,
b: &Self)
HeapAllocZeroBytes => HeapAllocZeroBytes,
HeapAllocNonPowerOfTwoAlignment(n) => HeapAllocNonPowerOfTwoAlignment(n),
Unreachable => Unreachable,
- Panic => Panic,
+ Panic { ref msg, ref file, line, col } => Panic {
+ msg: msg.clone(),
+ file: file.clone(),
+ line, col,
+ },
ReadFromReturnPointer => ReadFromReturnPointer,
PathNotFound(ref v) => PathNotFound(v.clone()),
UnimplementedTraitSelection => UnimplementedTraitSelection,
impl<'tcx> TypeFoldable<'tcx> for ty::ParamEnv<'tcx> { reveal, caller_bounds }
}
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice<ty::ExistentialPredicate<'tcx>> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::ExistentialPredicate<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let v = self.iter().map(|p| p.fold_with(folder)).collect::<AccumulateVec<[_; 8]>>();
folder.tcx().intern_existential_predicates(&v)
}
}
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice<Ty<'tcx>> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<Ty<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let v = self.iter().map(|t| t.fold_with(folder)).collect::<AccumulateVec<[_; 8]>>();
folder.tcx().intern_type_list(&v)
impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let sty = match self.sty {
- ty::TyRawPtr(tm) => ty::TyRawPtr(tm.fold_with(folder)),
- ty::TyArray(typ, sz) => ty::TyArray(typ.fold_with(folder), sz.fold_with(folder)),
- ty::TySlice(typ) => ty::TySlice(typ.fold_with(folder)),
- ty::TyAdt(tid, substs) => ty::TyAdt(tid, substs.fold_with(folder)),
- ty::TyDynamic(ref trait_ty, ref region) =>
- ty::TyDynamic(trait_ty.fold_with(folder), region.fold_with(folder)),
- ty::TyTuple(ts) => ty::TyTuple(ts.fold_with(folder)),
- ty::TyFnDef(def_id, substs) => {
- ty::TyFnDef(def_id, substs.fold_with(folder))
+ ty::RawPtr(tm) => ty::RawPtr(tm.fold_with(folder)),
+ ty::Array(typ, sz) => ty::Array(typ.fold_with(folder), sz.fold_with(folder)),
+ ty::Slice(typ) => ty::Slice(typ.fold_with(folder)),
+ ty::Adt(tid, substs) => ty::Adt(tid, substs.fold_with(folder)),
+ ty::Dynamic(ref trait_ty, ref region) =>
+ ty::Dynamic(trait_ty.fold_with(folder), region.fold_with(folder)),
+ ty::Tuple(ts) => ty::Tuple(ts.fold_with(folder)),
+ ty::FnDef(def_id, substs) => {
+ ty::FnDef(def_id, substs.fold_with(folder))
}
- ty::TyFnPtr(f) => ty::TyFnPtr(f.fold_with(folder)),
- ty::TyRef(ref r, ty, mutbl) => {
- ty::TyRef(r.fold_with(folder), ty.fold_with(folder), mutbl)
+ ty::FnPtr(f) => ty::FnPtr(f.fold_with(folder)),
+ ty::Ref(ref r, ty, mutbl) => {
+ ty::Ref(r.fold_with(folder), ty.fold_with(folder), mutbl)
}
- ty::TyGenerator(did, substs, movability) => {
- ty::TyGenerator(
+ ty::Generator(did, substs, movability) => {
+ ty::Generator(
did,
substs.fold_with(folder),
movability)
}
- ty::TyGeneratorWitness(types) => ty::TyGeneratorWitness(types.fold_with(folder)),
- ty::TyClosure(did, substs) => ty::TyClosure(did, substs.fold_with(folder)),
- ty::TyProjection(ref data) => ty::TyProjection(data.fold_with(folder)),
- ty::TyAnon(did, substs) => ty::TyAnon(did, substs.fold_with(folder)),
- ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) |
- ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) |
- ty::TyParam(..) | ty::TyNever | ty::TyForeign(..) => return self
+ ty::GeneratorWitness(types) => ty::GeneratorWitness(types.fold_with(folder)),
+ ty::Closure(did, substs) => ty::Closure(did, substs.fold_with(folder)),
+ ty::Projection(ref data) => ty::Projection(data.fold_with(folder)),
+ ty::Anon(did, substs) => ty::Anon(did, substs.fold_with(folder)),
+ ty::Bool | ty::Char | ty::Str | ty::Int(_) |
+ ty::Uint(_) | ty::Float(_) | ty::Error | ty::Infer(_) |
+ ty::Param(..) | ty::Never | ty::Foreign(..) => return self
};
if self.sty == sty {
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
match self.sty {
- ty::TyRawPtr(ref tm) => tm.visit_with(visitor),
- ty::TyArray(typ, sz) => typ.visit_with(visitor) || sz.visit_with(visitor),
- ty::TySlice(typ) => typ.visit_with(visitor),
- ty::TyAdt(_, substs) => substs.visit_with(visitor),
- ty::TyDynamic(ref trait_ty, ref reg) =>
+ ty::RawPtr(ref tm) => tm.visit_with(visitor),
+ ty::Array(typ, sz) => typ.visit_with(visitor) || sz.visit_with(visitor),
+ ty::Slice(typ) => typ.visit_with(visitor),
+ ty::Adt(_, substs) => substs.visit_with(visitor),
+ ty::Dynamic(ref trait_ty, ref reg) =>
trait_ty.visit_with(visitor) || reg.visit_with(visitor),
- ty::TyTuple(ts) => ts.visit_with(visitor),
- ty::TyFnDef(_, substs) => substs.visit_with(visitor),
- ty::TyFnPtr(ref f) => f.visit_with(visitor),
- ty::TyRef(r, ty, _) => r.visit_with(visitor) || ty.visit_with(visitor),
- ty::TyGenerator(_did, ref substs, _) => {
+ ty::Tuple(ts) => ts.visit_with(visitor),
+ ty::FnDef(_, substs) => substs.visit_with(visitor),
+ ty::FnPtr(ref f) => f.visit_with(visitor),
+ ty::Ref(r, ty, _) => r.visit_with(visitor) || ty.visit_with(visitor),
+ ty::Generator(_did, ref substs, _) => {
substs.visit_with(visitor)
}
- ty::TyGeneratorWitness(ref types) => types.visit_with(visitor),
- ty::TyClosure(_did, ref substs) => substs.visit_with(visitor),
- ty::TyProjection(ref data) => data.visit_with(visitor),
- ty::TyAnon(_, ref substs) => substs.visit_with(visitor),
- ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) |
- ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) |
- ty::TyParam(..) | ty::TyNever | ty::TyForeign(..) => false,
+ ty::GeneratorWitness(ref types) => types.visit_with(visitor),
+ ty::Closure(_did, ref substs) => substs.visit_with(visitor),
+ ty::Projection(ref data) => data.visit_with(visitor),
+ ty::Anon(_, ref substs) => substs.visit_with(visitor),
+ ty::Bool | ty::Char | ty::Str | ty::Int(_) |
+ ty::Uint(_) | ty::Float(_) | ty::Error | ty::Infer(_) |
+ ty::Param(..) | ty::Never | ty::Foreign(..) => false,
}
}
}
}
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice<ty::Predicate<'tcx>> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let v = self.iter().map(|p| p.fold_with(folder)).collect::<AccumulateVec<[_; 8]>>();
folder.tcx().intern_predicates(&v)
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! This module contains TypeVariants and its major components
+//! This module contains TyKind and its major components
use hir::def_id::DefId;
use rustc_data_structures::indexed_vec::Idx;
use ty::subst::{Substs, Subst, Kind, UnpackedKind};
use ty::{self, AdtDef, TypeFlags, Ty, TyCtxt, TypeFoldable};
-use ty::{Slice, TyS, ParamEnvAnd, ParamEnv};
+use ty::{List, TyS, ParamEnvAnd, ParamEnv};
use util::captures::Captures;
-use mir::interpret::{Scalar, Pointer, Value};
+use mir::interpret::{Scalar, Pointer};
use std::iter;
use std::cmp::Ordering;
use hir;
use self::InferTy::*;
-use self::TypeVariants::*;
+use self::TyKind::*;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct TypeAndMut<'tcx> {
/// NB: If you change this, you'll probably want to change the corresponding
/// AST structure in libsyntax/ast.rs as well.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
-pub enum TypeVariants<'tcx> {
+pub enum TyKind<'tcx> {
/// The primitive boolean type. Written as `bool`.
- TyBool,
+ Bool,
/// The primitive character type; holds a Unicode scalar value
/// (a non-surrogate code point). Written as `char`.
- TyChar,
+ Char,
/// A primitive signed integer type. For example, `i32`.
- TyInt(ast::IntTy),
+ Int(ast::IntTy),
/// A primitive unsigned integer type. For example, `u32`.
- TyUint(ast::UintTy),
+ Uint(ast::UintTy),
/// A primitive floating-point type. For example, `f64`.
- TyFloat(ast::FloatTy),
+ Float(ast::FloatTy),
/// Structures, enumerations and unions.
///
- /// Substs here, possibly against intuition, *may* contain `TyParam`s.
+ /// Substs here, possibly against intuition, *may* contain `Param`s.
/// That is, even after substitution it is possible that there are type
- /// variables. This happens when the `TyAdt` corresponds to an ADT
+ /// variables. This happens when the `Adt` corresponds to an ADT
/// definition and not a concrete use of it.
- TyAdt(&'tcx AdtDef, &'tcx Substs<'tcx>),
+ Adt(&'tcx AdtDef, &'tcx Substs<'tcx>),
- TyForeign(DefId),
+ Foreign(DefId),
/// The pointee of a string slice. Written as `str`.
- TyStr,
+ Str,
/// An array with the given length. Written as `[T; n]`.
- TyArray(Ty<'tcx>, &'tcx ty::Const<'tcx>),
+ Array(Ty<'tcx>, &'tcx ty::Const<'tcx>),
/// The pointee of an array slice. Written as `[T]`.
- TySlice(Ty<'tcx>),
+ Slice(Ty<'tcx>),
/// A raw pointer. Written as `*mut T` or `*const T`
- TyRawPtr(TypeAndMut<'tcx>),
+ RawPtr(TypeAndMut<'tcx>),
/// A reference; a pointer with an associated lifetime. Written as
/// `&'a mut T` or `&'a T`.
- TyRef(Region<'tcx>, Ty<'tcx>, hir::Mutability),
+ Ref(Region<'tcx>, Ty<'tcx>, hir::Mutability),
/// The anonymous type of a function declaration/definition. Each
/// function has a unique type.
- TyFnDef(DefId, &'tcx Substs<'tcx>),
+ FnDef(DefId, &'tcx Substs<'tcx>),
/// A pointer to a function. Written as `fn() -> i32`.
- TyFnPtr(PolyFnSig<'tcx>),
+ FnPtr(PolyFnSig<'tcx>),
/// A trait, defined with `trait`.
- TyDynamic(Binder<&'tcx Slice<ExistentialPredicate<'tcx>>>, ty::Region<'tcx>),
+ Dynamic(Binder<&'tcx List<ExistentialPredicate<'tcx>>>, ty::Region<'tcx>),
/// The anonymous type of a closure. Used to represent the type of
/// `|a| a`.
- TyClosure(DefId, ClosureSubsts<'tcx>),
+ Closure(DefId, ClosureSubsts<'tcx>),
/// The anonymous type of a generator. Used to represent the type of
/// `|a| yield a`.
- TyGenerator(DefId, GeneratorSubsts<'tcx>, hir::GeneratorMovability),
+ Generator(DefId, GeneratorSubsts<'tcx>, hir::GeneratorMovability),
/// A type representin the types stored inside a generator.
/// This should only appear in GeneratorInteriors.
- TyGeneratorWitness(Binder<&'tcx Slice<Ty<'tcx>>>),
+ GeneratorWitness(Binder<&'tcx List<Ty<'tcx>>>),
/// The never type `!`
- TyNever,
+ Never,
/// A tuple type. For example, `(i32, bool)`.
- TyTuple(&'tcx Slice<Ty<'tcx>>),
+ Tuple(&'tcx List<Ty<'tcx>>),
/// The projection of an associated type. For example,
/// `<T as Trait<..>>::N`.
- TyProjection(ProjectionTy<'tcx>),
+ Projection(ProjectionTy<'tcx>),
/// Anonymized (`impl Trait`) type found in a return type.
/// The DefId comes either from
/// * or the `existential type` declaration
/// The substitutions are for the generics of the function in question.
/// After typeck, the concrete type can be found in the `types` map.
- TyAnon(DefId, &'tcx Substs<'tcx>),
+ Anon(DefId, &'tcx Substs<'tcx>),
/// A type parameter; for example, `T` in `fn f<T>(x: T) {}
- TyParam(ParamTy),
+ Param(ParamTy),
/// A type variable used during type-checking.
- TyInfer(InferTy),
+ Infer(InferTy),
/// A placeholder for a type which could not be computed; this is
/// propagated to avoid useless error messages.
- TyError,
+ Error,
}
/// A closure can be modeled as a struct that looks like:
/// If you have an inference context, use `infcx.closure_sig()`.
pub fn closure_sig(self, def_id: DefId, tcx: TyCtxt<'_, 'tcx, 'tcx>) -> ty::PolyFnSig<'tcx> {
match self.closure_sig_ty(def_id, tcx).sty {
- ty::TyFnPtr(sig) => sig,
+ ty::FnPtr(sig) => sig,
ref t => bug!("closure_sig_ty is not a fn-ptr: {:?}", t),
}
}
}
}
-impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Slice<ExistentialPredicate<'tcx>> {}
+impl<'tcx> serialize::UseSpecializedDecodable for &'tcx List<ExistentialPredicate<'tcx>> {}
-impl<'tcx> Slice<ExistentialPredicate<'tcx>> {
+impl<'tcx> List<ExistentialPredicate<'tcx>> {
pub fn principal(&self) -> Option<ExistentialTraitRef<'tcx>> {
match self.get(0) {
Some(&ExistentialPredicate::Trait(tr)) => Some(tr),
}
}
-impl<'tcx> Binder<&'tcx Slice<ExistentialPredicate<'tcx>>> {
+impl<'tcx> Binder<&'tcx List<ExistentialPredicate<'tcx>>> {
pub fn principal(&self) -> Option<PolyExistentialTraitRef<'tcx>> {
self.skip_binder().principal().map(Binder::bind)
}
/// - `variadic` indicates whether this is a variadic function. (only true for foreign fns)
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
pub struct FnSig<'tcx> {
- pub inputs_and_output: &'tcx Slice<Ty<'tcx>>,
+ pub inputs_and_output: &'tcx List<Ty<'tcx>>,
pub variadic: bool,
pub unsafety: hir::Unsafety,
pub abi: abi::Abi,
pub fn input(&self, index: usize) -> ty::Binder<Ty<'tcx>> {
self.map_bound_ref(|fn_sig| fn_sig.inputs()[index])
}
- pub fn inputs_and_output(&self) -> ty::Binder<&'tcx Slice<Ty<'tcx>>> {
+ pub fn inputs_and_output(&self) -> ty::Binder<&'tcx List<Ty<'tcx>>> {
self.map_bound_ref(|fn_sig| fn_sig.inputs_and_output)
}
pub fn output(&self) -> ty::Binder<Ty<'tcx>> {
/// the likes of `liberate_late_bound_regions`. The distinction exists
/// because higher-ranked lifetimes aren't supported in all places. See [1][2].
///
-/// Unlike TyParam-s, bound regions are not supposed to exist "in the wild"
+/// Unlike Param-s, bound regions are not supposed to exist "in the wild"
/// outside their binder, e.g. in types passed to type inference, and
/// should first be substituted (by skolemized regions, free regions,
/// or region variables).
impl<'a, 'gcx, 'tcx> TyS<'tcx> {
pub fn is_nil(&self) -> bool {
match self.sty {
- TyTuple(ref tys) => tys.is_empty(),
+ Tuple(ref tys) => tys.is_empty(),
_ => false,
}
}
pub fn is_never(&self) -> bool {
match self.sty {
- TyNever => true,
+ Never => true,
_ => false,
}
}
pub fn is_primitive(&self) -> bool {
match self.sty {
- TyBool | TyChar | TyInt(_) | TyUint(_) | TyFloat(_) => true,
+ Bool | Char | Int(_) | Uint(_) | Float(_) => true,
_ => false,
}
}
pub fn is_ty_var(&self) -> bool {
match self.sty {
- TyInfer(TyVar(_)) => true,
+ Infer(TyVar(_)) => true,
_ => false,
}
}
pub fn is_ty_infer(&self) -> bool {
match self.sty {
- TyInfer(_) => true,
+ Infer(_) => true,
_ => false,
}
}
pub fn is_phantom_data(&self) -> bool {
- if let TyAdt(def, _) = self.sty {
+ if let Adt(def, _) = self.sty {
def.is_phantom_data()
} else {
false
}
}
- pub fn is_bool(&self) -> bool { self.sty == TyBool }
+ pub fn is_bool(&self) -> bool { self.sty == Bool }
pub fn is_param(&self, index: u32) -> bool {
match self.sty {
- ty::TyParam(ref data) => data.idx == index,
+ ty::Param(ref data) => data.idx == index,
_ => false,
}
}
pub fn is_self(&self) -> bool {
match self.sty {
- TyParam(ref p) => p.is_self(),
+ Param(ref p) => p.is_self(),
_ => false,
}
}
pub fn is_slice(&self) -> bool {
match self.sty {
- TyRawPtr(TypeAndMut { ty, .. }) | TyRef(_, ty, _) => match ty.sty {
- TySlice(_) | TyStr => true,
+ RawPtr(TypeAndMut { ty, .. }) | Ref(_, ty, _) => match ty.sty {
+ Slice(_) | Str => true,
_ => false,
},
_ => false
#[inline]
pub fn is_simd(&self) -> bool {
match self.sty {
- TyAdt(def, _) => def.repr.simd(),
+ Adt(def, _) => def.repr.simd(),
_ => false,
}
}
pub fn sequence_element_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
match self.sty {
- TyArray(ty, _) | TySlice(ty) => ty,
- TyStr => tcx.mk_mach_uint(ast::UintTy::U8),
+ Array(ty, _) | Slice(ty) => ty,
+ Str => tcx.mk_mach_uint(ast::UintTy::U8),
_ => bug!("sequence_element_type called on non-sequence value: {}", self),
}
}
pub fn simd_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
match self.sty {
- TyAdt(def, substs) => {
+ Adt(def, substs) => {
def.non_enum_variant().fields[0].ty(tcx, substs)
}
_ => bug!("simd_type called on invalid type")
pub fn simd_size(&self, _cx: TyCtxt) -> usize {
match self.sty {
- TyAdt(def, _) => def.non_enum_variant().fields.len(),
+ Adt(def, _) => def.non_enum_variant().fields.len(),
_ => bug!("simd_size called on invalid type")
}
}
pub fn is_region_ptr(&self) -> bool {
match self.sty {
- TyRef(..) => true,
+ Ref(..) => true,
_ => false,
}
}
pub fn is_mutable_pointer(&self) -> bool {
match self.sty {
- TyRawPtr(TypeAndMut { mutbl: hir::Mutability::MutMutable, .. }) |
- TyRef(_, _, hir::Mutability::MutMutable) => true,
+ RawPtr(TypeAndMut { mutbl: hir::Mutability::MutMutable, .. }) |
+ Ref(_, _, hir::Mutability::MutMutable) => true,
_ => false
}
}
pub fn is_unsafe_ptr(&self) -> bool {
match self.sty {
- TyRawPtr(_) => return true,
+ RawPtr(_) => return true,
_ => return false,
}
}
pub fn is_box(&self) -> bool {
match self.sty {
- TyAdt(def, _) => def.is_box(),
+ Adt(def, _) => def.is_box(),
_ => false,
}
}
/// panics if called on any type other than `Box<T>`
pub fn boxed_ty(&self) -> Ty<'tcx> {
match self.sty {
- TyAdt(def, substs) if def.is_box() => substs.type_at(0),
+ Adt(def, substs) if def.is_box() => substs.type_at(0),
_ => bug!("`boxed_ty` is called on non-box type {:?}", self),
}
}
/// A scalar type is one that denotes an atomic datum, with no sub-components.
- /// (A TyRawPtr is scalar because it represents a non-managed pointer, so its
+ /// (A RawPtr is scalar because it represents a non-managed pointer, so its
/// contents are abstract to rustc.)
pub fn is_scalar(&self) -> bool {
match self.sty {
- TyBool | TyChar | TyInt(_) | TyFloat(_) | TyUint(_) |
- TyInfer(IntVar(_)) | TyInfer(FloatVar(_)) |
- TyFnDef(..) | TyFnPtr(_) | TyRawPtr(_) => true,
+ Bool | Char | Int(_) | Float(_) | Uint(_) |
+ Infer(IntVar(_)) | Infer(FloatVar(_)) |
+ FnDef(..) | FnPtr(_) | RawPtr(_) => true,
_ => false
}
}
/// Returns true if this type is a floating point type and false otherwise.
pub fn is_floating_point(&self) -> bool {
match self.sty {
- TyFloat(_) |
- TyInfer(FloatVar(_)) => true,
+ Float(_) |
+ Infer(FloatVar(_)) => true,
_ => false,
}
}
pub fn is_trait(&self) -> bool {
match self.sty {
- TyDynamic(..) => true,
+ Dynamic(..) => true,
_ => false,
}
}
pub fn is_enum(&self) -> bool {
match self.sty {
- TyAdt(adt_def, _) => {
+ Adt(adt_def, _) => {
adt_def.is_enum()
}
_ => false,
pub fn is_closure(&self) -> bool {
match self.sty {
- TyClosure(..) => true,
+ Closure(..) => true,
_ => false,
}
}
pub fn is_generator(&self) -> bool {
match self.sty {
- TyGenerator(..) => true,
+ Generator(..) => true,
_ => false,
}
}
pub fn is_integral(&self) -> bool {
match self.sty {
- TyInfer(IntVar(_)) | TyInt(_) | TyUint(_) => true,
+ Infer(IntVar(_)) | Int(_) | Uint(_) => true,
_ => false
}
}
pub fn is_fresh_ty(&self) -> bool {
match self.sty {
- TyInfer(FreshTy(_)) => true,
+ Infer(FreshTy(_)) => true,
_ => false,
}
}
pub fn is_fresh(&self) -> bool {
match self.sty {
- TyInfer(FreshTy(_)) => true,
- TyInfer(FreshIntTy(_)) => true,
- TyInfer(FreshFloatTy(_)) => true,
+ Infer(FreshTy(_)) => true,
+ Infer(FreshIntTy(_)) => true,
+ Infer(FreshFloatTy(_)) => true,
_ => false,
}
}
pub fn is_char(&self) -> bool {
match self.sty {
- TyChar => true,
+ Char => true,
_ => false,
}
}
pub fn is_fp(&self) -> bool {
match self.sty {
- TyInfer(FloatVar(_)) | TyFloat(_) => true,
+ Infer(FloatVar(_)) | Float(_) => true,
_ => false
}
}
pub fn is_signed(&self) -> bool {
match self.sty {
- TyInt(_) => true,
+ Int(_) => true,
_ => false,
}
}
pub fn is_machine(&self) -> bool {
match self.sty {
- TyInt(ast::IntTy::Isize) | TyUint(ast::UintTy::Usize) => false,
- TyInt(..) | TyUint(..) | TyFloat(..) => true,
+ Int(ast::IntTy::Isize) | Uint(ast::UintTy::Usize) => false,
+ Int(..) | Uint(..) | Float(..) => true,
_ => false,
}
}
pub fn has_concrete_skeleton(&self) -> bool {
match self.sty {
- TyParam(_) | TyInfer(_) | TyError => false,
+ Param(_) | Infer(_) | Error => false,
_ => true,
}
}
/// Some types---notably unsafe ptrs---can only be dereferenced explicitly.
pub fn builtin_deref(&self, explicit: bool) -> Option<TypeAndMut<'tcx>> {
match self.sty {
- TyAdt(def, _) if def.is_box() => {
+ Adt(def, _) if def.is_box() => {
Some(TypeAndMut {
ty: self.boxed_ty(),
mutbl: hir::MutImmutable,
})
},
- TyRef(_, ty, mutbl) => Some(TypeAndMut { ty, mutbl }),
- TyRawPtr(mt) if explicit => Some(mt),
+ Ref(_, ty, mutbl) => Some(TypeAndMut { ty, mutbl }),
+ RawPtr(mt) if explicit => Some(mt),
_ => None,
}
}
/// Returns the type of `ty[i]`.
pub fn builtin_index(&self) -> Option<Ty<'tcx>> {
match self.sty {
- TyArray(ty, _) | TySlice(ty) => Some(ty),
+ Array(ty, _) | Slice(ty) => Some(ty),
_ => None,
}
}
pub fn fn_sig(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> PolyFnSig<'tcx> {
match self.sty {
- TyFnDef(def_id, substs) => {
+ FnDef(def_id, substs) => {
tcx.fn_sig(def_id).subst(tcx, substs)
}
- TyFnPtr(f) => f,
+ FnPtr(f) => f,
_ => bug!("Ty::fn_sig() called on non-fn type: {:?}", self)
}
}
pub fn is_fn(&self) -> bool {
match self.sty {
- TyFnDef(..) | TyFnPtr(_) => true,
+ FnDef(..) | FnPtr(_) => true,
_ => false,
}
}
pub fn is_impl_trait(&self) -> bool {
match self.sty {
- TyAnon(..) => true,
+ Anon(..) => true,
_ => false,
}
}
pub fn ty_adt_def(&self) -> Option<&'tcx AdtDef> {
match self.sty {
- TyAdt(adt, _) => Some(adt),
+ Adt(adt, _) => Some(adt),
_ => None,
}
}
/// ignores late-bound regions binders.
pub fn regions(&self) -> Vec<ty::Region<'tcx>> {
match self.sty {
- TyRef(region, _, _) => {
+ Ref(region, _, _) => {
vec![region]
}
- TyDynamic(ref obj, region) => {
+ Dynamic(ref obj, region) => {
let mut v = vec![region];
if let Some(p) = obj.principal() {
v.extend(p.skip_binder().substs.regions());
}
v
}
- TyAdt(_, substs) | TyAnon(_, substs) => {
+ Adt(_, substs) | Anon(_, substs) => {
substs.regions().collect()
}
- TyClosure(_, ClosureSubsts { ref substs }) |
- TyGenerator(_, GeneratorSubsts { ref substs }, _) => {
+ Closure(_, ClosureSubsts { ref substs }) |
+ Generator(_, GeneratorSubsts { ref substs }, _) => {
substs.regions().collect()
}
- TyProjection(ref data) => {
+ Projection(ref data) => {
data.substs.regions().collect()
}
- TyFnDef(..) |
- TyFnPtr(_) |
- TyGeneratorWitness(..) |
- TyBool |
- TyChar |
- TyInt(_) |
- TyUint(_) |
- TyFloat(_) |
- TyStr |
- TyArray(..) |
- TySlice(_) |
- TyRawPtr(_) |
- TyNever |
- TyTuple(..) |
- TyForeign(..) |
- TyParam(_) |
- TyInfer(_) |
- TyError => {
+ FnDef(..) |
+ FnPtr(_) |
+ GeneratorWitness(..) |
+ Bool |
+ Char |
+ Int(_) |
+ Uint(_) |
+ Float(_) |
+ Str |
+ Array(..) |
+ Slice(_) |
+ RawPtr(_) |
+ Never |
+ Tuple(..) |
+ Foreign(..) |
+ Param(_) |
+ Infer(_) |
+ Error => {
vec![]
}
}
/// is complete, that type variable will be unified.
pub fn to_opt_closure_kind(&self) -> Option<ty::ClosureKind> {
match self.sty {
- TyInt(int_ty) => match int_ty {
+ Int(int_ty) => match int_ty {
ast::IntTy::I8 => Some(ty::ClosureKind::Fn),
ast::IntTy::I16 => Some(ty::ClosureKind::FnMut),
ast::IntTy::I32 => Some(ty::ClosureKind::FnOnce),
_ => bug!("cannot convert type `{:?}` to a closure kind", self),
},
- TyInfer(_) => None,
+ Infer(_) => None,
- TyError => Some(ty::ClosureKind::Fn),
+ Error => Some(ty::ClosureKind::Fn),
_ => bug!("cannot convert type `{:?}` to a closure kind", self),
}
/// `false` means nothing -- could be sized, might not be.
pub fn is_trivially_sized(&self, tcx: TyCtxt<'_, '_, 'tcx>) -> bool {
match self.sty {
- ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) |
- ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) |
- ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyRawPtr(..) |
- ty::TyChar | ty::TyRef(..) | ty::TyGenerator(..) |
- ty::TyGeneratorWitness(..) | ty::TyArray(..) | ty::TyClosure(..) |
- ty::TyNever | ty::TyError =>
+ ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) |
+ ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) |
+ ty::FnDef(..) | ty::FnPtr(_) | ty::RawPtr(..) |
+ ty::Char | ty::Ref(..) | ty::Generator(..) |
+ ty::GeneratorWitness(..) | ty::Array(..) | ty::Closure(..) |
+ ty::Never | ty::Error =>
true,
- ty::TyStr | ty::TySlice(_) | ty::TyDynamic(..) | ty::TyForeign(..) =>
+ ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) =>
false,
- ty::TyTuple(tys) =>
+ ty::Tuple(tys) =>
tys.iter().all(|ty| ty.is_trivially_sized(tcx)),
- ty::TyAdt(def, _substs) =>
+ ty::Adt(def, _substs) =>
def.sized_constraint(tcx).is_empty(),
- ty::TyProjection(_) | ty::TyParam(_) | ty::TyAnon(..) => false,
+ ty::Projection(_) | ty::Param(_) | ty::Anon(..) => false,
- ty::TyInfer(ty::TyVar(_)) => false,
+ ty::Infer(ty::TyVar(_)) => false,
- ty::TyInfer(ty::CanonicalTy(_)) |
- ty::TyInfer(ty::FreshTy(_)) |
- ty::TyInfer(ty::FreshIntTy(_)) |
- ty::TyInfer(ty::FreshFloatTy(_)) =>
+ ty::Infer(ty::CanonicalTy(_)) |
+ ty::Infer(ty::FreshTy(_)) |
+ ty::Infer(ty::FreshIntTy(_)) |
+ ty::Infer(ty::FreshFloatTy(_)) =>
bug!("is_trivially_sized applied to unexpected type: {:?}", self),
}
}
}
let ty = tcx.lift_to_global(&ty).unwrap();
let size = tcx.layout_of(ty).ok()?.size;
- self.val.to_bits(size)
+ self.val.try_to_bits(size)
}
#[inline]
pub fn to_ptr(&self) -> Option<Pointer> {
- self.val.to_ptr()
- }
-
- #[inline]
- pub fn to_byval_value(&self) -> Option<Value> {
- self.val.to_byval_value()
+ self.val.try_to_ptr()
}
#[inline]
assert_eq!(self.ty, ty.value);
let ty = tcx.lift_to_global(&ty).unwrap();
let size = tcx.layout_of(ty).ok()?.size;
- self.val.to_bits(size)
+ self.val.try_to_bits(size)
}
#[inline]
// Type substitutions.
use hir::def_id::DefId;
-use ty::{self, Lift, Slice, Ty, TyCtxt};
+use ty::{self, Lift, List, Ty, TyCtxt};
use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use serialize::{self, Encodable, Encoder, Decodable, Decoder};
}
/// A substitution mapping generic parameters to new values.
-pub type Substs<'tcx> = Slice<Kind<'tcx>>;
+pub type Substs<'tcx> = List<Kind<'tcx>>;
impl<'a, 'gcx, 'tcx> Substs<'tcx> {
/// Creates a Substs that maps each generic parameter to itself.
mk_kind: &mut F)
where F: FnMut(&ty::GenericParamDef, &[Kind<'tcx>]) -> Kind<'tcx>
{
-
if let Some(def_id) = defs.parent {
let parent_defs = tcx.generics_of(def_id);
Substs::fill_item(substs, tcx, parent_defs, mk_kind);
self.ty_stack_depth += 1;
let t1 = match t.sty {
- ty::TyParam(p) => {
+ ty::Param(p) => {
self.ty_for_param(p, t)
}
_ => {
use ty::{self, Ty, TyCtxt, GenericParamDefKind, TypeFoldable};
use ty::subst::{Substs, UnpackedKind};
use ty::query::TyCtxtAt;
-use ty::TypeVariants::*;
+use ty::TyKind::*;
use ty::layout::{Integer, IntegerExt};
use util::common::ErrorReported;
use middle::lang_items;
impl<'tcx> fmt::Display for Discr<'tcx> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self.ty.sty {
- ty::TyInt(ity) => {
+ ty::Int(ity) => {
let bits = ty::tls::with(|tcx| {
Integer::from_attr(tcx, SignedInt(ity)).size().bits()
});
}
pub fn checked_add<'a, 'gcx>(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, n: u128) -> (Self, bool) {
let (int, signed) = match self.ty.sty {
- TyInt(ity) => (Integer::from_attr(tcx, SignedInt(ity)), true),
- TyUint(uty) => (Integer::from_attr(tcx, UnsignedInt(uty)), false),
+ Int(ity) => (Integer::from_attr(tcx, SignedInt(ity)), true),
+ Uint(uty) => (Integer::from_attr(tcx, UnsignedInt(uty)), false),
_ => bug!("non integer discriminant"),
};
let (adt, substs) = match self_type.sty {
// These types used to have a builtin impl.
// Now libcore provides that impl.
- ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) |
- ty::TyChar | ty::TyRawPtr(..) | ty::TyNever |
- ty::TyRef(_, _, hir::MutImmutable) => return Ok(()),
+ ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) |
+ ty::Char | ty::RawPtr(..) | ty::Never |
+ ty::Ref(_, _, hir::MutImmutable) => return Ok(()),
- ty::TyAdt(adt, substs) => (adt, substs),
+ ty::Adt(adt, substs) => (adt, substs),
_ => return Err(CopyImplementationError::NotAnAdt),
};
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
match ty.sty {
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
for field in def.all_fields() {
let field_ty = field.ty(self, substs);
- if let TyError = field_ty.sty {
+ if let Error = field_ty.sty {
return true;
}
}
pub fn struct_tail(self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
loop {
match ty.sty {
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
if !def.is_struct() {
break;
}
}
}
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
if let Some((&last_ty, _)) = tys.split_last() {
ty = last_ty;
} else {
let (mut a, mut b) = (source, target);
loop {
match (&a.sty, &b.sty) {
- (&TyAdt(a_def, a_substs), &TyAdt(b_def, b_substs))
+ (&Adt(a_def, a_substs), &Adt(b_def, b_substs))
if a_def == b_def && a_def.is_struct() => {
if let Some(f) = a_def.non_enum_variant().fields.last() {
a = f.ty(self, a_substs);
break;
}
},
- (&TyTuple(a_tys), &TyTuple(b_tys))
+ (&Tuple(a_tys), &Tuple(b_tys))
if a_tys.len() == b_tys.len() => {
if let Some(a_last) = a_tys.last() {
a = a_last;
// parameters marked as pure.
let impl_substs = match self.type_of(impl_def_id).sty {
- ty::TyAdt(def_, substs) if def_ == def => substs,
+ ty::Adt(def_, substs) if def_ == def => substs,
_ => bug!()
};
let item_substs = match self.type_of(def.did).sty {
- ty::TyAdt(def_, substs) if def_ == def => substs,
+ ty::Adt(def_, substs) if def_ == def => substs,
_ => bug!()
};
!impl_generics.region_param(ebr, self).pure_wrt_drop
}
UnpackedKind::Type(&ty::TyS {
- sty: ty::TypeVariants::TyParam(ref pt), ..
+ sty: ty::Param(ref pt), ..
}) => {
!impl_generics.type_param(pt, self).pure_wrt_drop
}
-> Representability
{
match ty.sty {
- TyTuple(ref ts) => {
+ Tuple(ref ts) => {
// Find non representable
fold_repr(ts.iter().map(|ty| {
is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
}
// Fixed-length vectors.
// FIXME(#11924) Behavior undecided for zero-length vectors.
- TyArray(ty, _) => {
+ Array(ty, _) => {
is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
}
- TyAdt(def, substs) => {
+ Adt(def, substs) => {
// Find non representable fields with their spans
fold_repr(def.all_fields().map(|field| {
let ty = field.ty(tcx, substs);
}
}))
}
- TyClosure(..) => {
+ Closure(..) => {
// this check is run on type definitions, so we don't expect
// to see closure types
bug!("requires check invoked on inapplicable type: {:?}", ty)
fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: &'tcx ty::AdtDef) -> bool {
match ty.sty {
- TyAdt(ty_def, _) => {
+ Adt(ty_def, _) => {
ty_def == def
}
_ => false
fn same_type<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
match (&a.sty, &b.sty) {
- (&TyAdt(did_a, substs_a), &TyAdt(did_b, substs_b)) => {
+ (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => {
if did_a != did_b {
return false;
}
ty: Ty<'tcx>) -> Representability
{
match ty.sty {
- TyAdt(def, _) => {
+ Adt(def, _) => {
{
// Iterate through stack of previously seen types.
let mut iter = seen.iter();
match ty.sty {
// Fast-path for primitive types
- ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) |
- ty::TyBool | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyNever |
- ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar | ty::TyGeneratorWitness(..) |
- ty::TyRawPtr(_) | ty::TyRef(..) | ty::TyStr => false,
+ ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) |
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Never |
+ ty::FnDef(..) | ty::FnPtr(_) | ty::Char | ty::GeneratorWitness(..) |
+ ty::RawPtr(_) | ty::Ref(..) | ty::Str => false,
// Foreign types can never have destructors
- ty::TyForeign(..) => false,
+ ty::Foreign(..) => false,
// `ManuallyDrop` doesn't have a destructor regardless of field types.
- ty::TyAdt(def, _) if Some(def.did) == tcx.lang_items().manually_drop() => false,
+ ty::Adt(def, _) if Some(def.did) == tcx.lang_items().manually_drop() => false,
// Issue #22536: We first query type_moves_by_default. It sees a
// normalized version of the type, and therefore will definitely
// (see above), it is sound to treat it as having a destructor.
// User destructors are the only way to have concrete drop types.
- ty::TyAdt(def, _) if def.has_dtor(tcx) => true,
+ ty::Adt(def, _) if def.has_dtor(tcx) => true,
// Can refer to a type which may drop.
// FIXME(eddyb) check this against a ParamEnv.
- ty::TyDynamic(..) | ty::TyProjection(..) | ty::TyParam(_) |
- ty::TyAnon(..) | ty::TyInfer(_) | ty::TyError => true,
+ ty::Dynamic(..) | ty::Projection(..) | ty::Param(_) |
+ ty::Anon(..) | ty::Infer(_) | ty::Error => true,
// Structural recursion.
- ty::TyArray(ty, _) | ty::TySlice(ty) => needs_drop(ty),
+ ty::Array(ty, _) | ty::Slice(ty) => needs_drop(ty),
- ty::TyClosure(def_id, ref substs) => substs.upvar_tys(def_id, tcx).any(needs_drop),
+ ty::Closure(def_id, ref substs) => substs.upvar_tys(def_id, tcx).any(needs_drop),
// Pessimistically assume that all generators will require destructors
// as we don't know if a destructor is a noop or not until after the MIR
// state transformation pass
- ty::TyGenerator(..) => true,
+ ty::Generator(..) => true,
- ty::TyTuple(ref tys) => tys.iter().cloned().any(needs_drop),
+ ty::Tuple(ref tys) => tys.iter().cloned().any(needs_drop),
// unions don't have destructors because of the child types,
// only if they manually implement `Drop` (handled above).
- ty::TyAdt(def, _) if def.is_union() => false,
+ ty::Adt(def, _) if def.is_union() => false,
- ty::TyAdt(def, substs) =>
+ ty::Adt(def, substs) =>
def.variants.iter().any(
|variant| variant.fields.iter().any(
|field| needs_drop(field.ty(tcx, substs)))),
match self_arg_ty.sty {
_ if is_self_ty(self_arg_ty) => ByValue,
- ty::TyRef(region, ty, mutbl) if is_self_ty(ty) => {
+ ty::Ref(region, ty, mutbl) if is_self_ty(ty) => {
ByReference(region, mutbl)
}
- ty::TyRawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => {
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => {
ByRawPointer(mutbl)
}
- ty::TyAdt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => {
+ ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => {
ByBox
}
_ => Other
use mir::interpret::ConstValue;
use ty::{self, Ty};
-use rustc_data_structures::small_vec::SmallVec;
-use rustc_data_structures::accumulate_vec::IntoIter as AccIntoIter;
+use smallvec::{self, SmallVec};
// The TypeWalker's stack is hot enough that it's worth going to some effort to
// avoid heap allocations.
impl<'tcx> TypeWalker<'tcx> {
pub fn new(ty: Ty<'tcx>) -> TypeWalker<'tcx> {
- TypeWalker { stack: SmallVec::one(ty), last_subtree: 1, }
+ TypeWalker { stack: smallvec![ty], last_subtree: 1, }
}
/// Skips the subtree of types corresponding to the last type
}
}
-pub fn walk_shallow<'tcx>(ty: Ty<'tcx>) -> AccIntoIter<TypeWalkerArray<'tcx>> {
+pub fn walk_shallow<'tcx>(ty: Ty<'tcx>) -> smallvec::IntoIter<TypeWalkerArray<'tcx>> {
let mut stack = SmallVec::new();
push_subtypes(&mut stack, ty);
stack.into_iter()
// types as they are written).
fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) {
match parent_ty.sty {
- ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) |
- ty::TyStr | ty::TyInfer(_) | ty::TyParam(_) | ty::TyNever | ty::TyError |
- ty::TyForeign(..) => {
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) |
+ ty::Str | ty::Infer(_) | ty::Param(_) | ty::Never | ty::Error |
+ ty::Foreign(..) => {
}
- ty::TyArray(ty, len) => {
+ ty::Array(ty, len) => {
push_const(stack, len);
stack.push(ty);
}
- ty::TySlice(ty) => {
+ ty::Slice(ty) => {
stack.push(ty);
}
- ty::TyRawPtr(ref mt) => {
+ ty::RawPtr(ref mt) => {
stack.push(mt.ty);
}
- ty::TyRef(_, ty, _) => {
+ ty::Ref(_, ty, _) => {
stack.push(ty);
}
- ty::TyProjection(ref data) => {
+ ty::Projection(ref data) => {
stack.extend(data.substs.types().rev());
}
- ty::TyDynamic(ref obj, ..) => {
+ ty::Dynamic(ref obj, ..) => {
stack.extend(obj.iter().rev().flat_map(|predicate| {
let (substs, opt_ty) = match *predicate.skip_binder() {
ty::ExistentialPredicate::Trait(tr) => (tr.substs, None),
substs.types().rev().chain(opt_ty)
}));
}
- ty::TyAdt(_, substs) | ty::TyAnon(_, substs) => {
+ ty::Adt(_, substs) | ty::Anon(_, substs) => {
stack.extend(substs.types().rev());
}
- ty::TyClosure(_, ref substs) => {
+ ty::Closure(_, ref substs) => {
stack.extend(substs.substs.types().rev());
}
- ty::TyGenerator(_, ref substs, _) => {
+ ty::Generator(_, ref substs, _) => {
stack.extend(substs.substs.types().rev());
}
- ty::TyGeneratorWitness(ts) => {
+ ty::GeneratorWitness(ts) => {
stack.extend(ts.skip_binder().iter().cloned().rev());
}
- ty::TyTuple(ts) => {
+ ty::Tuple(ts) => {
stack.extend(ts.iter().cloned().rev());
}
- ty::TyFnDef(_, substs) => {
+ ty::FnDef(_, substs) => {
stack.extend(substs.types().rev());
}
- ty::TyFnPtr(sig) => {
+ ty::FnPtr(sig) => {
stack.push(sig.skip_binder().output());
stack.extend(sig.skip_binder().inputs().iter().cloned().rev());
}
let param_env = self.param_env;
while let Some(ty) = subtys.next() {
match ty.sty {
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(..) |
- ty::TyUint(..) |
- ty::TyFloat(..) |
- ty::TyError |
- ty::TyStr |
- ty::TyGeneratorWitness(..) |
- ty::TyNever |
- ty::TyParam(_) |
- ty::TyForeign(..) => {
+ ty::Bool |
+ ty::Char |
+ ty::Int(..) |
+ ty::Uint(..) |
+ ty::Float(..) |
+ ty::Error |
+ ty::Str |
+ ty::GeneratorWitness(..) |
+ ty::Never |
+ ty::Param(_) |
+ ty::Foreign(..) => {
// WfScalar, WfParameter, etc
}
- ty::TySlice(subty) => {
+ ty::Slice(subty) => {
self.require_sized(subty, traits::SliceOrArrayElem);
}
- ty::TyArray(subty, len) => {
+ ty::Array(subty, len) => {
self.require_sized(subty, traits::SliceOrArrayElem);
assert_eq!(len.ty, self.infcx.tcx.types.usize);
self.compute_const(len);
}
- ty::TyTuple(ref tys) => {
+ ty::Tuple(ref tys) => {
if let Some((_last, rest)) = tys.split_last() {
for elem in rest {
self.require_sized(elem, traits::TupleElem);
}
}
- ty::TyRawPtr(_) => {
+ ty::RawPtr(_) => {
// simple cases that are WF if their type args are WF
}
- ty::TyProjection(data) => {
+ ty::Projection(data) => {
subtys.skip_current_subtree(); // subtree handled by compute_projection
self.compute_projection(data);
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
// WfNominalType
let obligations = self.nominal_obligations(def.did, substs);
self.out.extend(obligations);
}
- ty::TyRef(r, rty, _) => {
+ ty::Ref(r, rty, _) => {
// WfReference
if !r.has_escaping_regions() && !rty.has_escaping_regions() {
let cause = self.cause(traits::ReferenceOutlivesReferent(ty));
}
}
- ty::TyGenerator(..) => {
+ ty::Generator(..) => {
// Walk ALL the types in the generator: this will
// include the upvar types as well as the yield
// type. Note that this is mildly distinct from
// generators don't take arguments.
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
// Only check the upvar types for WF, not the rest
// of the types within. This is needed because we
// capture the signature and it may not be WF
}
}
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
// let the loop iterate into the argument/return
// types appearing in the fn signature
}
- ty::TyAnon(did, substs) => {
+ ty::Anon(did, substs) => {
// all of the requirements on type parameters
// should've been checked by the instantiation
// of whatever returned this exact `impl Trait`.
}
}
- ty::TyDynamic(data, r) => {
+ ty::Dynamic(data, r) => {
// WfObject
//
// Here, we defer WF checking due to higher-ranked
// register a pending obligation and keep
// moving. (Goal is that an "inductive hypothesis"
// is satisfied to ensure termination.)
- ty::TyInfer(_) => {
+ ty::Infer(_) => {
let ty = self.infcx.shallow_resolve(ty);
- if let ty::TyInfer(_) = ty.sty { // not yet resolved...
+ if let ty::Infer(_) = ty.sty { // not yet resolved...
if ty == ty0 { // ...this is the type we started from! no progress.
return false;
}
} else {
// Yes, resolved, proceed with the
// result. Should never return false because
- // `ty` is not a TyInfer.
+ // `ty` is not a Infer.
assert!(self.compute(ty));
}
}
}
fn from_object_ty(&mut self, ty: Ty<'tcx>,
- data: ty::Binder<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>,
+ data: ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>,
region: ty::Region<'tcx>) {
// Imagine a type like this:
//
/// `ty::required_region_bounds`, see that for more information.
pub fn object_region_bounds<'a, 'gcx, 'tcx>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- existential_predicates: ty::Binder<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>)
+ existential_predicates: ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>)
-> Vec<ty::Region<'tcx>>
{
// Since we don't actually *know* the self type for an object,
let mb = n as f64 / 1_000_000.0;
format!("; rss: {}MB", mb.round() as usize)
}
- None => "".to_owned(),
+ None => String::new(),
};
println!("{}time: {}{}\t{}",
" ".repeat(indentation),
use middle::region::{self, BlockRemainder};
use ty::subst::{self, Subst};
use ty::{BrAnon, BrEnv, BrFresh, BrNamed};
-use ty::{TyBool, TyChar, TyAdt};
-use ty::{TyError, TyStr, TyArray, TySlice, TyFloat, TyFnDef, TyFnPtr};
-use ty::{TyParam, TyRawPtr, TyRef, TyNever, TyTuple};
-use ty::{TyClosure, TyGenerator, TyGeneratorWitness, TyForeign, TyProjection, TyAnon};
-use ty::{TyDynamic, TyInt, TyUint, TyInfer};
+use ty::{Bool, Char, Adt};
+use ty::{Error, Str, Array, Slice, Float, FnDef, FnPtr};
+use ty::{Param, RawPtr, Ref, Never, Tuple};
+use ty::{Closure, Generator, GeneratorWitness, Foreign, Projection, Anon};
+use ty::{Dynamic, Int, Uint, Infer};
use ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable, GenericParamCount, GenericParamDefKind};
use util::nodemap::FxHashSet;
let verbose = self.is_verbose;
let mut num_supplied_defaults = 0;
let mut has_self = false;
- let mut own_counts = GenericParamCount {
- lifetimes: 0,
- types: 0,
- };
+ let mut own_counts: GenericParamCount = Default::default();
let mut is_value_path = false;
let fn_trait_kind = ty::tls::with(|tcx| {
// Unfortunately, some kinds of items (e.g., closures) don't have
if !verbose && fn_trait_kind.is_some() && projections.len() == 1 {
let projection_ty = projections[0].ty;
- if let TyTuple(ref args) = substs.type_at(1).sty {
+ if let Tuple(ref args) = substs.type_at(1).sty {
return self.fn_sig(f, args, false, projection_ty);
}
}
}
define_print! {
- ('tcx) &'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>, (self, f, cx) {
+ ('tcx) &'tcx ty::List<ty::ExistentialPredicate<'tcx>>, (self, f, cx) {
display {
// Generate the main trait ref, including associated types.
ty::tls::with(|tcx| {
}
define_print! {
- ('tcx) &'tcx ty::Slice<Ty<'tcx>>, (self, f, cx) {
+ ('tcx) &'tcx ty::List<Ty<'tcx>>, (self, f, cx) {
display {
write!(f, "{{")?;
let mut tys = self.iter();
define_print_multi! {
[
- ('tcx) ty::Binder<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>,
+ ('tcx) ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>,
('tcx) ty::Binder<ty::TraitRef<'tcx>>,
('tcx) ty::Binder<ty::FnSig<'tcx>>,
('tcx) ty::Binder<ty::TraitPredicate<'tcx>>,
}
define_print! {
- ('tcx) ty::TypeVariants<'tcx>, (self, f, cx) {
+ ('tcx) ty::TyKind<'tcx>, (self, f, cx) {
display {
match *self {
- TyBool => write!(f, "bool"),
- TyChar => write!(f, "char"),
- TyInt(t) => write!(f, "{}", t.ty_to_string()),
- TyUint(t) => write!(f, "{}", t.ty_to_string()),
- TyFloat(t) => write!(f, "{}", t.ty_to_string()),
- TyRawPtr(ref tm) => {
+ Bool => write!(f, "bool"),
+ Char => write!(f, "char"),
+ Int(t) => write!(f, "{}", t.ty_to_string()),
+ Uint(t) => write!(f, "{}", t.ty_to_string()),
+ Float(t) => write!(f, "{}", t.ty_to_string()),
+ RawPtr(ref tm) => {
write!(f, "*{} ", match tm.mutbl {
hir::MutMutable => "mut",
hir::MutImmutable => "const",
})?;
tm.ty.print(f, cx)
}
- TyRef(r, ty, mutbl) => {
+ Ref(r, ty, mutbl) => {
write!(f, "&")?;
let s = r.print_to_string(cx);
if s != "'_" {
}
ty::TypeAndMut { ty, mutbl }.print(f, cx)
}
- TyNever => write!(f, "!"),
- TyTuple(ref tys) => {
+ Never => write!(f, "!"),
+ Tuple(ref tys) => {
write!(f, "(")?;
let mut tys = tys.iter();
if let Some(&ty) = tys.next() {
}
write!(f, ")")
}
- TyFnDef(def_id, substs) => {
+ FnDef(def_id, substs) => {
ty::tls::with(|tcx| {
let mut sig = tcx.fn_sig(def_id);
if let Some(substs) = tcx.lift(&substs) {
cx.parameterized(f, substs, def_id, &[])?;
write!(f, "}}")
}
- TyFnPtr(ref bare_fn) => {
+ FnPtr(ref bare_fn) => {
bare_fn.print(f, cx)
}
- TyInfer(infer_ty) => write!(f, "{}", infer_ty),
- TyError => write!(f, "[type error]"),
- TyParam(ref param_ty) => write!(f, "{}", param_ty),
- TyAdt(def, substs) => cx.parameterized(f, substs, def.did, &[]),
- TyDynamic(data, r) => {
+ Infer(infer_ty) => write!(f, "{}", infer_ty),
+ Error => write!(f, "[type error]"),
+ Param(ref param_ty) => write!(f, "{}", param_ty),
+ Adt(def, substs) => cx.parameterized(f, substs, def.did, &[]),
+ Dynamic(data, r) => {
let r = r.print_to_string(cx);
if !r.is_empty() {
write!(f, "(")?;
Ok(())
}
}
- TyForeign(def_id) => parameterized(f, subst::Substs::empty(), def_id, &[]),
- TyProjection(ref data) => data.print(f, cx),
- TyAnon(def_id, substs) => {
+ Foreign(def_id) => parameterized(f, subst::Substs::empty(), def_id, &[]),
+ Projection(ref data) => data.print(f, cx),
+ Anon(def_id, substs) => {
if cx.is_verbose {
- return write!(f, "TyAnon({:?}, {:?})", def_id, substs);
+ return write!(f, "Anon({:?}, {:?})", def_id, substs);
}
ty::tls::with(|tcx| {
Ok(())
})
}
- TyStr => write!(f, "str"),
- TyGenerator(did, substs, movability) => ty::tls::with(|tcx| {
+ Str => write!(f, "str"),
+ Generator(did, substs, movability) => ty::tls::with(|tcx| {
let upvar_tys = substs.upvar_tys(did, tcx);
let witness = substs.witness(did, tcx);
if movability == hir::GeneratorMovability::Movable {
print!(f, cx, write(" "), print(witness), write("]"))
}),
- TyGeneratorWitness(types) => {
+ GeneratorWitness(types) => {
ty::tls::with(|tcx| cx.in_binder(f, tcx, &types, tcx.lift(&types)))
}
- TyClosure(did, substs) => ty::tls::with(|tcx| {
+ Closure(did, substs) => ty::tls::with(|tcx| {
let upvar_tys = substs.upvar_tys(did, tcx);
write!(f, "[closure")?;
write!(f, "]")
}),
- TyArray(ty, sz) => {
+ Array(ty, sz) => {
print!(f, cx, write("["), print(ty), write("; "))?;
match sz.val {
ConstValue::Unevaluated(_def_id, _substs) => {
}
write!(f, "]")
}
- TySlice(ty) => {
+ Slice(ty) => {
print!(f, cx, write("["), print(ty), write("]"))
}
}
(format!("{:.2}",
(((hits as f32) / (total as f32)) * 100.0)), total.to_string())
} else {
- ("".into(), "".into())
+ (String::new(), String::new())
};
writeln!(
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
log = "0.4"
+smallvec = { version = "0.6.5", features = ["union"] }
_ => {
self.handler
.span_err(item.span, "allocators must be statics");
- return OneVector::one(item);
+ return smallvec![item];
}
}
if self.in_submod > 0 {
self.handler
.span_err(item.span, "`global_allocator` cannot be used in submodules");
- return OneVector::one(item);
+ return smallvec![item];
}
if self.found {
self.handler
.span_err(item.span, "cannot define more than one #[global_allocator]");
- return OneVector::one(item);
+ return smallvec![item];
}
self.found = true;
extern crate rustc_target;
extern crate syntax;
extern crate syntax_pos;
+#[macro_use]
+extern crate smallvec;
pub mod expand;
}
LpExtend(ref lp_base, _, LpInterior(_, InteriorField(_))) => {
match lp_base.to_type().sty {
- ty::TyAdt(def, _) if def.has_dtor(self.tcx()) => {
+ ty::Adt(def, _) if def.has_dtor(self.tcx()) => {
// In the case where the owner implements drop, then
// the path must be initialized to prevent a case of
// partial reinitialization
Categorization::Interior(ref b, mc::InteriorField(_)) |
Categorization::Interior(ref b, mc::InteriorElement(Kind::Pattern)) => {
match b.ty.sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
if def.has_dtor(bccx.tcx) {
Some(cmt.clone())
} else {
check_and_get_illegal_move_origin(bccx, b)
}
}
- ty::TySlice(..) => Some(cmt.clone()),
+ ty::Slice(..) => Some(cmt.clone()),
_ => {
check_and_get_illegal_move_origin(bccx, b)
}
use rustc_mir::util::borrowck_errors::{BorrowckErrors, Origin};
use syntax::ast;
use syntax_pos;
-use errors::DiagnosticBuilder;
+use errors::{DiagnosticBuilder, Applicability};
use borrowck::gather_loans::gather_moves::PatternSource;
pub struct MoveErrorCollector<'tcx> {
let initializer =
e.init.as_ref().expect("should have an initializer to get an error");
if let Ok(snippet) = bccx.tcx.sess.source_map().span_to_snippet(initializer.span) {
- err.span_suggestion(initializer.span,
- "consider using a reference instead",
- format!("&{}", snippet));
+ err.span_suggestion_with_applicability(
+ initializer.span,
+ "consider using a reference instead",
+ format!("&{}", snippet),
+ Applicability::MaybeIncorrect // using a reference may not be the right fix
+ );
}
}
_ => {
Categorization::Downcast(ref b, _) |
Categorization::Interior(ref b, mc::InteriorField(_)) => {
match b.ty.sty {
- ty::TyAdt(def, _) if def.has_dtor(bccx.tcx) => {
+ ty::Adt(def, _) if def.has_dtor(bccx.tcx) => {
bccx.cannot_move_out_of_interior_of_drop(
move_from.span, b.ty, Origin::Ast)
}
let result = self.restrict(&cmt_base);
// Borrowing one union field automatically borrows all its fields.
match base_ty.sty {
- ty::TyAdt(adt_def, _) if adt_def.is_union() => match result {
+ ty::Adt(adt_def, _) if adt_def.is_union() => match result {
RestrictionResult::Safe => RestrictionResult::Safe,
RestrictionResult::SafeIf(base_lp, mut base_vec) => {
for (i, field) in adt_def.non_enum_variant().fields.iter().enumerate() {
Some(nl.to_string()),
Origin::Ast);
let need_note = match lp.ty.sty {
- ty::TypeVariants::TyClosure(id, _) => {
+ ty::Closure(id, _) => {
let node_id = self.tcx.hir.as_local_node_id(id).unwrap();
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
if let Some((span, name)) = self.tables.closure_kind_origins().get(hir_id) {
// all parent union fields, moves do not propagate upwards automatically.
let mut lp = orig_lp.clone();
while let LpExtend(ref base_lp, mutbl, lp_elem) = lp.clone().kind {
- if let (&ty::TyAdt(adt_def, _), LpInterior(opt_variant_id, interior))
+ if let (&ty::Adt(adt_def, _), LpInterior(opt_variant_id, interior))
= (&base_lp.ty.sty, lp_elem) {
if adt_def.is_union() {
for (i, field) in adt_def.non_enum_variant().fields.iter().enumerate() {
span: Span) {
// Assigning to one union field automatically assigns to all its fields.
if let LpExtend(ref base_lp, mutbl, LpInterior(opt_variant_id, interior)) = lp.kind {
- if let ty::TyAdt(adt_def, _) = base_lp.ty.sty {
+ if let ty::Adt(adt_def, _) = base_lp.ty.sty {
if adt_def.is_union() {
for (i, field) in adt_def.non_enum_variant().fields.iter().enumerate() {
let field =
use rustc::lint::builtin::UNUSED_MUT;
use rustc::ty;
use rustc::util::nodemap::{FxHashMap, FxHashSet};
+use errors::Applicability;
use std::slice;
use syntax::ptr::P;
hir_id,
span,
"variable does not need to be mutable")
- .span_suggestion_short(mut_span, "remove this `mut`", "".to_owned())
+ .span_suggestion_short_with_applicability(
+ mut_span,
+ "remove this `mut`",
+ String::new(),
+ Applicability::MachineApplicable)
.emit();
}
}
let gens_str = if gens.iter().any(|&u| u != 0) {
format!(" gen: {}", bits_to_string(gens))
} else {
- "".to_string()
+ String::new()
};
let action_kills = &self.action_kills[start .. end];
let action_kills_str = if action_kills.iter().any(|&u| u != 0) {
format!(" action_kill: {}", bits_to_string(action_kills))
} else {
- "".to_string()
+ String::new()
};
let scope_kills = &self.scope_kills[start .. end];
let scope_kills_str = if scope_kills.iter().any(|&u| u != 0) {
format!(" scope_kill: {}", bits_to_string(scope_kills))
} else {
- "".to_string()
+ String::new()
};
ps.synth_comment(
fn dataflow_for(&self, e: EntryOrExit, n: &Node<'a>) -> String {
let id = n.1.data.id();
debug!("dataflow_for({:?}, id={:?}) {:?}", e, id, self.variants);
- let mut sets = "".to_string();
+ let mut sets = String::new();
let mut seen_one = false;
for &variant in &self.variants {
if seen_one { sets.push_str(" "); } else { seen_one = true; }
assert!(!sig.variadic && extra_args.is_empty());
match sig.inputs().last().unwrap().sty {
- ty::TyTuple(ref tupled_arguments) => {
+ ty::Tuple(ref tupled_arguments) => {
inputs = &sig.inputs()[0..sig.inputs().len() - 1];
tupled_arguments
}
use syntax::attr;
pub use rustc_codegen_utils::link::{find_crate_name, filename_for_input, default_output_for_target,
- invalid_output_for_target, build_link_meta, out_filename,
- check_file_is_writeable};
+ invalid_output_for_target, out_filename, check_file_is_writeable};
// The third parameter is for env vars, used on windows to set up the
// path for MSVC to find its DLLs, and gcc to find its bundled
use consts;
use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, in_incr_comp_dir};
use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind};
-use rustc::middle::cstore::{LinkMeta, EncodedMetadata};
+use rustc::middle::cstore::EncodedMetadata;
use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitizer, Lto};
use rustc::session::Session;
use rustc::util::nodemap::FxHashMap;
use rustc::util::common::{time_ext, time_depth, set_time_depth, print_time_passes_entry};
use rustc_fs_util::{path2cstr, link_or_copy};
use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_data_structures::svh::Svh;
use errors::{self, Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId};
use errors::emitter::{Emitter};
use syntax::attr;
/// Additional resources used by optimize_and_codegen (not module specific)
#[derive(Clone)]
pub struct CodegenContext {
- // Resouces needed when running LTO
+ // Resources needed when running LTO
pub time_passes: bool,
pub lto: Lto,
pub no_landing_pads: bool,
-C passes=name-anon-globals to the compiler command line.");
} else {
bug!("We are using thin LTO buffers without running the NameAnonGlobals pass. \
- This will likely cause errors in LLVM and shoud never happen.");
+ This will likely cause errors in LLVM and should never happen.");
}
}
}
pub fn start_async_codegen(tcx: TyCtxt,
time_graph: Option<TimeGraph>,
- link: LinkMeta,
metadata: EncodedMetadata,
coordinator_receive: Receiver<Box<dyn Any + Send>>,
total_cgus: usize)
-> OngoingCodegen {
let sess = tcx.sess;
let crate_name = tcx.crate_name(LOCAL_CRATE);
+ let crate_hash = tcx.crate_hash(LOCAL_CRATE);
let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins");
let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs,
"windows_subsystem");
OngoingCodegen {
crate_name,
- link,
+ crate_hash,
metadata,
windows_subsystem,
linker_info,
pub struct OngoingCodegen {
crate_name: Symbol,
- link: LinkMeta,
+ crate_hash: Svh,
metadata: EncodedMetadata,
windows_subsystem: Option<String>,
linker_info: LinkerInfo,
(CodegenResults {
crate_name: self.crate_name,
- link: self.link,
+ crate_hash: self.crate_hash,
metadata: self.metadata,
windows_subsystem: self.windows_subsystem,
linker_info: self.linker_info,
use super::ModuleKind;
use abi;
-use back::link;
use back::write::{self, OngoingCodegen};
use llvm::{self, TypeKind, get_param};
use metadata;
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf};
use rustc::ty::query::Providers;
use rustc::dep_graph::{DepNode, DepConstructor};
-use rustc::middle::cstore::{self, LinkMeta, LinkagePreference};
+use rustc::middle::cstore::{self, LinkagePreference};
use rustc::middle::exported_symbols;
use rustc::util::common::{time, print_time_passes_entry};
use rustc::util::profiling::ProfileCategory;
op: hir::BinOpKind
) -> &'ll Value {
let signed = match t.sty {
- ty::TyFloat(_) => {
+ ty::Float(_) => {
let cmp = bin_op_to_fcmp_predicate(op);
return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty);
},
- ty::TyUint(_) => false,
- ty::TyInt(_) => true,
+ ty::Uint(_) => false,
+ ty::Int(_) => true,
_ => bug!("compare_simd_types: invalid SIMD type"),
};
) -> &'ll Value {
let (source, target) = cx.tcx.struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
- (&ty::TyArray(_, len), &ty::TySlice(_)) => {
+ (&ty::Array(_, len), &ty::Slice(_)) => {
C_usize(cx, len.unwrap_usize(cx.tcx))
}
- (&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
+ (&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
old_info.expect("unsized_info: missing old info for trait upcast")
}
- (_, &ty::TyDynamic(ref data, ..)) => {
+ (_, &ty::Dynamic(ref data, ..)) => {
let vtable_ptr = cx.layout_of(cx.tcx.mk_mut_ptr(target))
.field(cx, abi::FAT_PTR_EXTRA);
consts::ptrcast(meth::get_vtable(cx, source, data.principal()),
) -> (&'ll Value, &'ll Value) {
debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
match (&src_ty.sty, &dst_ty.sty) {
- (&ty::TyRef(_, a, _),
- &ty::TyRef(_, b, _)) |
- (&ty::TyRef(_, a, _),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
- (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
+ (&ty::Ref(_, a, _),
+ &ty::Ref(_, b, _)) |
+ (&ty::Ref(_, a, _),
+ &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
+ (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
+ &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert!(bx.cx.type_is_sized(a));
let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
}
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
assert!(bx.cx.type_is_sized(a));
let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
}
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
let src_layout = bx.cx.layout_of(src_ty);
OperandValue::Pair(base, info).store(bx, dst);
};
match (&src_ty.sty, &dst_ty.sty) {
- (&ty::TyRef(..), &ty::TyRef(..)) |
- (&ty::TyRef(..), &ty::TyRawPtr(..)) |
- (&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => {
+ (&ty::Ref(..), &ty::Ref(..)) |
+ (&ty::Ref(..), &ty::RawPtr(..)) |
+ (&ty::RawPtr(..), &ty::RawPtr(..)) => {
coerce_ptr()
}
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
coerce_ptr()
}
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
for i in 0..def_a.variants[0].fields.len() {
}
fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>,
- llvm_module: &ModuleLlvm,
- link_meta: &LinkMeta)
+ llvm_module: &ModuleLlvm)
-> EncodedMetadata {
use std::io::Write;
use flate2::Compression;
return EncodedMetadata::new();
}
- let metadata = tcx.encode_metadata(link_meta);
+ let metadata = tcx.encode_metadata();
if kind == MetadataKind::Uncompressed {
return metadata;
}
tcx.sess.fatal("this compiler's LLVM does not support PGO");
}
- let crate_hash = tcx.crate_hash(LOCAL_CRATE);
- let link_meta = link::build_link_meta(crate_hash);
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
// Codegen the metadata.
.to_string();
let metadata_llvm_module = ModuleLlvm::new(tcx.sess, &metadata_cgu_name);
let metadata = time(tcx.sess, "write metadata", || {
- write_metadata(tcx, &metadata_llvm_module, &link_meta)
+ write_metadata(tcx, &metadata_llvm_module)
});
tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen));
let ongoing_codegen = write::start_async_codegen(
tcx,
time_graph.clone(),
- link_meta,
metadata,
rx,
1);
let ongoing_codegen = write::start_async_codegen(
tcx,
time_graph.clone(),
- link_meta,
metadata,
rx,
codegen_units.len());
-> ty::PolyFnSig<'tcx>
{
match ty.sty {
- ty::TyFnDef(..) |
- // Shims currently have type TyFnPtr. Not sure this should remain.
- ty::TyFnPtr(_) => ty.fn_sig(cx.tcx),
- ty::TyClosure(def_id, substs) => {
+ ty::FnDef(..) |
+ // Shims currently have type FnPtr. Not sure this should remain.
+ ty::FnPtr(_) => ty.fn_sig(cx.tcx),
+ ty::Closure(def_id, substs) => {
let tcx = cx.tcx;
let sig = substs.closure_sig(def_id, tcx);
sig.abi
))
}
- ty::TyGenerator(def_id, substs, _) => {
+ ty::Generator(def_id, substs, _) => {
let tcx = cx.tcx;
let sig = substs.poly_sig(def_id, cx.tcx);
// static and call it a day. Some linkages (like weak) will make it such
// that the static actually has a null value.
let llty2 = match ty.sty {
- ty::TyRawPtr(ref mt) => cx.layout_of(mt.ty).llvm_type(cx),
+ ty::RawPtr(ref mt) => cx.layout_of(mt.ty).llvm_type(cx),
_ => {
if span.is_some() {
cx.sess().span_fatal(span.unwrap(), "must have type `*const T` or `*mut T`")
let tail = self.tcx.struct_tail(ty);
match tail.sty {
- ty::TyForeign(..) => false,
- ty::TyStr | ty::TySlice(..) | ty::TyDynamic(..) => true,
+ ty::Foreign(..) => false,
+ ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
_ => bug!("unexpected unsized tail: {:?}", tail.sty),
}
}
let (size, align) = cx.size_and_align_of(array_or_slice_type);
let upper_bound = match array_or_slice_type.sty {
- ty::TyArray(_, len) => {
+ ty::Array(_, len) => {
len.unwrap_usize(cx.tcx) as c_longlong
}
_ => -1
let signature_metadata: Vec<_> = iter::once(
// return type
match signature.output().sty {
- ty::TyTuple(ref tys) if tys.is_empty() => None,
+ ty::Tuple(ref tys) if tys.is_empty() => None,
_ => Some(type_metadata(cx, signature.output(), span))
}
).chain(
// But it does not describe the trait's methods.
let containing_scope = match trait_type.sty {
- ty::TyDynamic(ref data, ..) => if let Some(principal) = data.principal() {
+ ty::Dynamic(ref data, ..) => if let Some(principal) = data.principal() {
let def_id = principal.def_id();
Some(get_namespace_for_item(cx, def_id))
} else {
let ptr_metadata = |ty: Ty<'tcx>| {
match ty.sty {
- ty::TySlice(typ) => {
+ ty::Slice(typ) => {
Ok(vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span))
}
- ty::TyStr => {
+ ty::Str => {
Ok(vec_slice_metadata(cx, t, cx.tcx.types.u8, unique_type_id, usage_site_span))
}
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
Ok(MetadataCreationResult::new(
trait_pointer_metadata(cx, ty, Some(t), unique_type_id),
false))
};
let MetadataCreationResult { metadata, already_stored_in_typemap } = match t.sty {
- ty::TyNever |
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyFloat(_) => {
+ ty::Never |
+ ty::Bool |
+ ty::Char |
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) => {
MetadataCreationResult::new(basic_type_metadata(cx, t), false)
}
- ty::TyTuple(ref elements) if elements.is_empty() => {
+ ty::Tuple(ref elements) if elements.is_empty() => {
MetadataCreationResult::new(basic_type_metadata(cx, t), false)
}
- ty::TyArray(typ, _) |
- ty::TySlice(typ) => {
+ ty::Array(typ, _) |
+ ty::Slice(typ) => {
fixed_vec_metadata(cx, unique_type_id, t, typ, usage_site_span)
}
- ty::TyStr => {
+ ty::Str => {
fixed_vec_metadata(cx, unique_type_id, t, cx.tcx.types.i8, usage_site_span)
}
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
MetadataCreationResult::new(
trait_pointer_metadata(cx, t, None, unique_type_id),
false)
}
- ty::TyForeign(..) => {
+ ty::Foreign(..) => {
MetadataCreationResult::new(
foreign_type_metadata(cx, t, unique_type_id),
false)
}
- ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
- ty::TyRef(_, ty, _) => {
+ ty::RawPtr(ty::TypeAndMut{ty, ..}) |
+ ty::Ref(_, ty, _) => {
match ptr_metadata(ty) {
Ok(res) => res,
Err(metadata) => return metadata,
}
}
- ty::TyAdt(def, _) if def.is_box() => {
+ ty::Adt(def, _) if def.is_box() => {
match ptr_metadata(t.boxed_ty()) {
Ok(res) => res,
Err(metadata) => return metadata,
}
}
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
let fn_metadata = subroutine_type_metadata(cx,
unique_type_id,
t.fn_sig(cx.tcx),
MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false)
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let upvar_tys : Vec<_> = substs.upvar_tys(def_id, cx.tcx).collect();
prepare_tuple_metadata(cx,
t,
unique_type_id,
usage_site_span).finalize(cx)
}
- ty::TyGenerator(def_id, substs, _) => {
+ ty::Generator(def_id, substs, _) => {
let upvar_tys : Vec<_> = substs.field_tys(def_id, cx.tcx).map(|t| {
cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t)
}).collect();
unique_type_id,
usage_site_span).finalize(cx)
}
- ty::TyAdt(def, ..) => match def.adt_kind() {
+ ty::Adt(def, ..) => match def.adt_kind() {
AdtKind::Struct => {
prepare_struct_metadata(cx,
t,
usage_site_span).finalize(cx)
}
},
- ty::TyTuple(ref elements) => {
+ ty::Tuple(ref elements) => {
prepare_tuple_metadata(cx,
t,
&elements[..],
debug!("basic_type_metadata: {:?}", t);
let (name, encoding) = match t.sty {
- ty::TyNever => ("!", DW_ATE_unsigned),
- ty::TyTuple(ref elements) if elements.is_empty() =>
+ ty::Never => ("!", DW_ATE_unsigned),
+ ty::Tuple(ref elements) if elements.is_empty() =>
("()", DW_ATE_unsigned),
- ty::TyBool => ("bool", DW_ATE_boolean),
- ty::TyChar => ("char", DW_ATE_unsigned_char),
- ty::TyInt(int_ty) => {
+ ty::Bool => ("bool", DW_ATE_boolean),
+ ty::Char => ("char", DW_ATE_unsigned_char),
+ ty::Int(int_ty) => {
(int_ty.ty_to_string(), DW_ATE_signed)
},
- ty::TyUint(uint_ty) => {
+ ty::Uint(uint_ty) => {
(uint_ty.ty_to_string(), DW_ATE_unsigned)
},
- ty::TyFloat(float_ty) => {
+ ty::Float(float_ty) => {
(float_ty.ty_to_string(), DW_ATE_float)
},
_ => bug!("debuginfo::basic_type_metadata - t is invalid type")
let struct_name = compute_debuginfo_type_name(cx, struct_type, false);
let (struct_def_id, variant) = match struct_type.sty {
- ty::TyAdt(def, _) => (def.did, def.non_enum_variant()),
+ ty::Adt(def, _) => (def.did, def.non_enum_variant()),
_ => bug!("prepare_struct_metadata on a non-ADT")
};
let union_name = compute_debuginfo_type_name(cx, union_type, false);
let (union_def_id, variant) = match union_type.sty {
- ty::TyAdt(def, _) => (def.did, def.non_enum_variant()),
+ ty::Adt(def, _) => (def.did, def.non_enum_variant()),
_ => bug!("prepare_union_metadata on a non-ADT")
};
member_descriptions);
vec![
MemberDescription {
- name: "".to_string(),
+ name: String::new(),
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: self.layout.size,
variant_type_metadata,
member_descriptions);
MemberDescription {
- name: "".to_string(),
+ name: String::new(),
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: variant.size,
// If this is not a univariant enum, there is also the discriminant field.
let (discr_offset, discr_arg) = match discriminant_info {
RegularDiscriminant(_) => {
+ // We have the layout of an enum variant, we need the layout of the outer enum
let enum_layout = cx.layout_of(layout.ty);
(Some(enum_layout.fields.offset(0)),
Some(("RUST$ENUM$DISR".to_string(), enum_layout.field(cx, 0).ty)))
// Return type -- llvm::DIBuilder wants this at index 0
signature.push(match sig.output().sty {
- ty::TyTuple(ref tys) if tys.is_empty() => None,
+ ty::Tuple(ref tys) if tys.is_empty() => None,
_ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP))
});
// already inaccurate due to ABI adjustments (see #42800).
signature.extend(inputs.iter().map(|&t| {
let t = match t.sty {
- ty::TyArray(ct, _)
+ ty::Array(ct, _)
if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => {
cx.tcx.mk_imm_ptr(ct)
}
}
if sig.abi == Abi::RustCall && !sig.inputs().is_empty() {
- if let ty::TyTuple(args) = sig.inputs()[sig.inputs().len() - 1].sty {
+ if let ty::Tuple(args) = sig.inputs()[sig.inputs().len() - 1].sty {
signature.extend(
args.iter().map(|argument_type| {
Some(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP))
// Only "class" methods are generally understood by LLVM,
// so avoid methods on other types (e.g. `<*mut T>::null`).
match impl_self_ty.sty {
- ty::TyAdt(def, ..) if !def.is_box() => {
+ ty::Adt(def, ..) if !def.is_box() => {
Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP))
}
_ => None
let cpp_like_names = cx.sess().target.target.options.is_like_msvc;
match t.sty {
- ty::TyBool => output.push_str("bool"),
- ty::TyChar => output.push_str("char"),
- ty::TyStr => output.push_str("str"),
- ty::TyNever => output.push_str("!"),
- ty::TyInt(int_ty) => output.push_str(int_ty.ty_to_string()),
- ty::TyUint(uint_ty) => output.push_str(uint_ty.ty_to_string()),
- ty::TyFloat(float_ty) => output.push_str(float_ty.ty_to_string()),
- ty::TyForeign(def_id) => push_item_name(cx, def_id, qualified, output),
- ty::TyAdt(def, substs) => {
+ ty::Bool => output.push_str("bool"),
+ ty::Char => output.push_str("char"),
+ ty::Str => output.push_str("str"),
+ ty::Never => output.push_str("!"),
+ ty::Int(int_ty) => output.push_str(int_ty.ty_to_string()),
+ ty::Uint(uint_ty) => output.push_str(uint_ty.ty_to_string()),
+ ty::Float(float_ty) => output.push_str(float_ty.ty_to_string()),
+ ty::Foreign(def_id) => push_item_name(cx, def_id, qualified, output),
+ ty::Adt(def, substs) => {
push_item_name(cx, def.did, qualified, output);
push_type_params(cx, substs, output);
},
- ty::TyTuple(component_types) => {
+ ty::Tuple(component_types) => {
output.push('(');
for &component_type in component_types {
push_debuginfo_type_name(cx, component_type, true, output);
}
output.push(')');
},
- ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
+ ty::RawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
if !cpp_like_names {
output.push('*');
}
output.push('*');
}
},
- ty::TyRef(_, inner_type, mutbl) => {
+ ty::Ref(_, inner_type, mutbl) => {
if !cpp_like_names {
output.push('&');
}
output.push('*');
}
},
- ty::TyArray(inner_type, len) => {
+ ty::Array(inner_type, len) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push_str(&format!("; {}", len.unwrap_usize(cx.tcx)));
output.push(']');
},
- ty::TySlice(inner_type) => {
+ ty::Slice(inner_type) => {
if cpp_like_names {
output.push_str("slice<");
} else {
output.push(']');
}
},
- ty::TyDynamic(ref trait_data, ..) => {
+ ty::Dynamic(ref trait_data, ..) => {
if let Some(principal) = trait_data.principal() {
let principal = cx.tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
push_type_params(cx, principal.substs, output);
}
},
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
let sig = t.fn_sig(cx.tcx);
if sig.unsafety() == hir::Unsafety::Unsafe {
output.push_str("unsafe ");
push_debuginfo_type_name(cx, sig.output(), true, output);
}
},
- ty::TyClosure(..) => {
+ ty::Closure(..) => {
output.push_str("closure");
}
- ty::TyGenerator(..) => {
+ ty::Generator(..) => {
output.push_str("generator");
}
- ty::TyError |
- ty::TyInfer(_) |
- ty::TyProjection(..) |
- ty::TyAnon(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyParam(_) => {
+ ty::Error |
+ ty::Infer(_) |
+ ty::Projection(..) |
+ ty::Anon(..) |
+ ty::GeneratorWitness(..) |
+ ty::Param(_) => {
bug!("debuginfo: Trying to create type name for \
unexpected type: {:?}", t);
}
return (size, align);
}
match t.sty {
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
// load size/align from vtable
let vtable = info.unwrap();
(meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable))
}
- ty::TySlice(_) | ty::TyStr => {
+ ty::Slice(_) | ty::Str => {
let unit = t.sequence_element_type(bx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let size = bx.add(sized_size, unsized_size);
// Packed types ignore the alignment of their fields.
- if let ty::TyAdt(def, _) = t.sty {
+ if let ty::Adt(def, _) = t.sty {
if def.repr.packed() {
unsized_align = sized_align;
}
let tcx = cx.tcx;
let (def_id, substs) = match callee_ty.sty {
- ty::TyFnDef(def_id, substs) => (def_id, substs),
+ ty::FnDef(def_id, substs) => (def_id, substs),
_ => bug!("expected fn item type, found {}", callee_ty)
};
m_len, v_len
);
match m_elem_ty.sty {
- ty::TyInt(_) => {},
+ ty::Int(_) => {},
_ => {
return_error!("mask element type is `{}`, expected `i_`", m_elem_ty);
}
}
}
let ety = match in_elem.sty {
- ty::TyFloat(f) if f.bit_width() == 32 => {
+ ty::Float(f) if f.bit_width() == 32 => {
if in_len < 2 || in_len > 16 {
return_error!(
"unsupported floating-point vector `{}` with length `{}` \
}
"f32"
},
- ty::TyFloat(f) if f.bit_width() == 64 => {
+ ty::Float(f) if f.bit_width() == 64 => {
if in_len < 2 || in_len > 8 {
return_error!("unsupported floating-point vector `{}` with length `{}` \
out-of-range [2, 8]",
}
"f64"
},
- ty::TyFloat(f) => {
+ ty::Float(f) => {
return_error!("unsupported element type `{}` of floating-point vector `{}`",
f, in_ty);
},
fn llvm_vector_str(elem_ty: ty::Ty, vec_len: usize, no_pointers: usize) -> String {
let p0s: String = "p0".repeat(no_pointers);
match elem_ty.sty {
- ty::TyInt(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
- ty::TyUint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
- ty::TyFloat(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
+ ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
+ ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
+ ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
_ => unreachable!(),
}
}
mut no_pointers: usize) -> &'ll Type {
// FIXME: use cx.layout_of(ty).llvm_type() ?
let mut elem_ty = match elem_ty.sty {
- ty::TyInt(v) => Type::int_from_ty(cx, v),
- ty::TyUint(v) => Type::uint_from_ty(cx, v),
- ty::TyFloat(v) => Type::float_from_ty(cx, v),
+ ty::Int(v) => Type::int_from_ty(cx, v),
+ ty::Uint(v) => Type::uint_from_ty(cx, v),
+ ty::Float(v) => Type::float_from_ty(cx, v),
_ => unreachable!(),
};
while no_pointers > 0 {
// This counts how many pointers
fn ptr_count(t: ty::Ty) -> usize {
match t.sty {
- ty::TyRawPtr(p) => 1 + ptr_count(p.ty),
+ ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: ty::Ty) -> ty::Ty {
match t.sty {
- ty::TyRawPtr(p) => non_ptr(p.ty),
+ ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
- ty::TyRawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)),
+ ty::RawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)),
non_ptr(arg_tys[1].simd_type(tcx))),
_ => {
require!(false, "expected element type `{}` of second argument `{}` \
// The element type of the third argument must be a signed integer type of any width:
match arg_tys[2].simd_type(tcx).sty {
- ty::TyInt(_) => (),
+ ty::Int(_) => (),
_ => {
require!(false, "expected element type `{}` of third argument `{}` \
to be a signed integer type",
// This counts how many pointers
fn ptr_count(t: ty::Ty) -> usize {
match t.sty {
- ty::TyRawPtr(p) => 1 + ptr_count(p.ty),
+ ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: ty::Ty) -> ty::Ty {
match t.sty {
- ty::TyRawPtr(p) => non_ptr(p.ty),
+ ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
- ty::TyRawPtr(p) if p.ty == in_elem && p.mutbl == hir::MutMutable
+ ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::MutMutable
=> (ptr_count(arg_tys[1].simd_type(tcx)),
non_ptr(arg_tys[1].simd_type(tcx))),
_ => {
// The element type of the third argument must be a signed integer type of any width:
match arg_tys[2].simd_type(tcx).sty {
- ty::TyInt(_) => (),
+ ty::Int(_) => (),
_ => {
require!(false, "expected element type `{}` of third argument `{}` \
to be a signed integer type",
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem, in_ty, ret_ty);
return match in_elem.sty {
- ty::TyInt(_) | ty::TyUint(_) => {
+ ty::Int(_) | ty::Uint(_) => {
let r = bx.$integer_reduce(args[0].immediate());
if $ordered {
// if overflow occurs, the result is the
Ok(bx.$integer_reduce(args[0].immediate()))
}
},
- ty::TyFloat(f) => {
+ ty::Float(f) => {
// ordered arithmetic reductions take an accumulator
let acc = if $ordered {
let acc = args[1].immediate();
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem, in_ty, ret_ty);
return match in_elem.sty {
- ty::TyInt(_i) => {
+ ty::Int(_i) => {
Ok(bx.$int_red(args[0].immediate(), true))
},
- ty::TyUint(_u) => {
+ ty::Uint(_u) => {
Ok(bx.$int_red(args[0].immediate(), false))
},
- ty::TyFloat(_f) => {
+ ty::Float(_f) => {
Ok(bx.$float_red(args[0].immediate()))
}
_ => {
args[0].immediate()
} else {
match in_elem.sty {
- ty::TyInt(_) | ty::TyUint(_) => {},
+ ty::Int(_) | ty::Uint(_) => {},
_ => {
return_error!("unsupported {} from `{}` with element `{}` to `{}`",
$name, in_ty, in_elem, ret_ty)
bx.trunc(args[0].immediate(), i1xn)
};
return match in_elem.sty {
- ty::TyInt(_) | ty::TyUint(_) => {
+ ty::Int(_) | ty::Uint(_) => {
let r = bx.$red(input);
Ok(
if !$boolean {
let (in_style, in_width) = match in_elem.sty {
// vectors of pointer-sized integers should've been
// disallowed before here, so this unwrap is safe.
- ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
- ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
- ty::TyFloat(f) => (Style::Float, f.bit_width()),
+ ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
+ ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
+ ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0)
};
let (out_style, out_width) = match out_elem.sty {
- ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
- ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
- ty::TyFloat(f) => (Style::Float, f.bit_width()),
+ ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
+ ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
+ ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0)
};
}
}
arith! {
- simd_add: TyUint, TyInt => add, TyFloat => fadd;
- simd_sub: TyUint, TyInt => sub, TyFloat => fsub;
- simd_mul: TyUint, TyInt => mul, TyFloat => fmul;
- simd_div: TyUint => udiv, TyInt => sdiv, TyFloat => fdiv;
- simd_rem: TyUint => urem, TyInt => srem, TyFloat => frem;
- simd_shl: TyUint, TyInt => shl;
- simd_shr: TyUint => lshr, TyInt => ashr;
- simd_and: TyUint, TyInt => and;
- simd_or: TyUint, TyInt => or;
- simd_xor: TyUint, TyInt => xor;
- simd_fmax: TyFloat => maxnum;
- simd_fmin: TyFloat => minnum;
+ simd_add: Uint, Int => add, Float => fadd;
+ simd_sub: Uint, Int => sub, Float => fsub;
+ simd_mul: Uint, Int => mul, Float => fmul;
+ simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
+ simd_rem: Uint => urem, Int => srem, Float => frem;
+ simd_shl: Uint, Int => shl;
+ simd_shr: Uint => lshr, Int => ashr;
+ simd_and: Uint, Int => and;
+ simd_or: Uint, Int => or;
+ simd_xor: Uint, Int => xor;
+ simd_fmax: Float => maxnum;
+ simd_fmin: Float => minnum;
}
span_bug!(span, "unknown SIMD intrinsic");
}
// stuffs.
fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> {
match ty.sty {
- ty::TyInt(t) => Some((match t {
+ ty::Int(t) => Some((match t {
ast::IntTy::Isize => cx.tcx.sess.target.isize_ty.bit_width().unwrap() as u64,
ast::IntTy::I8 => 8,
ast::IntTy::I16 => 16,
ast::IntTy::I64 => 64,
ast::IntTy::I128 => 128,
}, true)),
- ty::TyUint(t) => Some((match t {
+ ty::Uint(t) => Some((match t {
ast::UintTy::Usize => cx.tcx.sess.target.usize_ty.bit_width().unwrap() as u64,
ast::UintTy::U8 => 8,
ast::UintTy::U16 => 16,
// Returns the width of a float TypeVariant
// Returns None if the type is not a float
-fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>) -> Option<u64> {
+fn float_type_width<'tcx>(sty: &ty::TyKind<'tcx>) -> Option<u64> {
match *sty {
- ty::TyFloat(t) => Some(t.bit_width() as u64),
+ ty::Float(t) => Some(t.bit_width() as u64),
_ => None,
}
}
use rustc::util::profiling::ProfileCategory;
use rustc_mir::monomorphize;
use rustc_codegen_utils::codegen_backend::CodegenBackend;
+use rustc_data_structures::svh::Svh;
mod diagnostics;
// Now that we won't touch anything in the incremental compilation directory
// any more, we can finalize it (which involves renaming it)
- rustc_incremental::finalize_session_directory(sess, ongoing_codegen.link.crate_hash);
+ rustc_incremental::finalize_session_directory(sess, ongoing_codegen.crate_hash);
Ok(())
}
modules: Vec<CompiledModule>,
allocator_module: Option<CompiledModule>,
metadata_module: CompiledModule,
- link: rustc::middle::cstore::LinkMeta,
+ crate_hash: Svh,
metadata: rustc::middle::cstore::EncodedMetadata,
windows_subsystem: Option<String>,
linker_info: back::linker::LinkerInfo,
func: mir::Operand::Constant(ref c),
ref args, ..
} => match c.ty.sty {
- ty::TyFnDef(did, _) => Some((did, args)),
+ ty::FnDef(did, _) => Some((did, args)),
_ => None,
},
_ => None,
&args1[..]
};
let (drop_fn, fn_ty) = match ty.sty {
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
let fn_ty = drop_fn.ty(bx.cx.tcx);
let sig = common::ty_fn_sig(bx.cx, fn_ty);
let sig = bx.tcx().normalize_erasing_late_bound_regions(
let callee = self.codegen_operand(&bx, func);
let (instance, mut llfn) = match callee.layout.ty.sty {
- ty::TyFnDef(def_id, substs) => {
+ ty::FnDef(def_id, substs) => {
(Some(ty::Instance::resolve(bx.cx.tcx,
ty::ParamEnv::reveal_all(),
def_id,
substs).unwrap()),
None)
}
- ty::TyFnPtr(_) => {
+ ty::FnPtr(_) => {
(None, Some(callee.immediate()))
}
_ => bug!("{} is not callable", callee.layout.ty)
llargs.push(b);
return;
}
- _ => bug!("codegen_argument: {:?} invalid for pair arugment", op)
+ _ => bug!("codegen_argument: {:?} invalid for pair argument", op)
}
} else if arg.is_unsized_indirect() {
match op.val {
// except according to those terms.
use llvm;
-use rustc::mir::interpret::ConstEvalErr;
-use rustc_mir::interpret::{read_target_uint, const_val_field};
+use rustc::mir::interpret::{ConstEvalErr, read_target_uint};
+use rustc_mir::interpret::{const_field};
use rustc::hir::def_id::DefId;
use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
.and_then(|c| {
let field_ty = c.ty.builtin_index().unwrap();
let fields = match c.ty.sty {
- ty::TyArray(_, n) => n.unwrap_usize(bx.tcx()),
+ ty::Array(_, n) => n.unwrap_usize(bx.tcx()),
ref other => bug!("invalid simd shuffle type: {}", other),
};
let values: Result<Vec<_>, Lrc<_>> = (0..fields).map(|field| {
- let field = const_val_field(
+ let field = const_field(
bx.tcx(),
ty::ParamEnv::reveal_all(),
self.instance,
let arg_ty = fx.monomorphize(&arg_decl.ty);
let tupled_arg_tys = match arg_ty.sty {
- ty::TyTuple(ref tys) => tys,
+ ty::Tuple(ref tys) => tys,
_ => bug!("spread argument isn't a tuple?!")
};
// Or is it the closure environment?
let (closure_layout, env_ref) = match arg.layout.ty.sty {
- ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
- ty::TyRef(_, ty, _) => (bx.cx.layout_of(ty), true),
+ ty::RawPtr(ty::TypeAndMut { ty, .. }) |
+ ty::Ref(_, ty, _) => (bx.cx.layout_of(ty), true),
_ => (arg.layout, false)
};
let (def_id, upvar_substs) = match closure_layout.ty.sty {
- ty::TyClosure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
- ty::TyGenerator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
+ ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
+ ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
_ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_layout.ty)
};
let upvar_tys = upvar_substs.upvar_tys(def_id, tcx);
// a pointer in an alloca for debuginfo atm.
let mut ops = if env_ref || env_alloca { &ops[..] } else { &ops[1..] };
- let ty = if let (true, &ty::TyRef(_, ty, _)) = (decl.by_ref, &ty.sty) {
+ let ty = if let (true, &ty::Ref(_, ty, _)) = (decl.by_ref, &ty.sty) {
ty
} else {
ops = &ops[..ops.len() - 1];
return simple();
}
_ if !field.is_unsized() => return simple(),
- ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => return simple(),
- ty::TyAdt(def, _) => {
+ ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
+ ty::Adt(def, _) => {
if def.repr.packed() {
// FIXME(eddyb) generalize the adjustment when we
// start supporting packing to larger alignments.
let val = match *kind {
mir::CastKind::ReifyFnPointer => {
match operand.layout.ty.sty {
- ty::TyFnDef(def_id, substs) => {
+ ty::FnDef(def_id, substs) => {
if bx.cx.tcx.has_attr(def_id, "rustc_args_required_const") {
bug!("reifying a fn ptr that requires \
const arguments");
}
mir::CastKind::ClosureFnPointer => {
match operand.layout.ty.sty {
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let instance = monomorphize::resolve_closure(
bx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce);
OperandValue::Immediate(callee::get_fn(bx.cx, instance))
// because codegen_place() panics if Local is operand.
if let mir::Place::Local(index) = *place {
if let LocalRef::Operand(Some(op)) = self.locals[index] {
- if let ty::TyArray(_, n) = op.layout.ty.sty {
+ if let ty::Array(_, n) = op.layout.ty.sty {
let n = n.unwrap_usize(bx.cx.tcx);
return common::C_usize(bx.cx, n);
}
fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> &'ll Value {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
- use rustc::ty::{TyInt, TyUint};
+ use rustc::ty::{Int, Uint};
let tcx = bx.tcx();
let new_sty = match ty.sty {
- TyInt(Isize) => TyInt(tcx.sess.target.isize_ty),
- TyUint(Usize) => TyUint(tcx.sess.target.usize_ty),
- ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
+ Int(Isize) => Int(tcx.sess.target.isize_ty),
+ Uint(Usize) => Uint(tcx.sess.target.usize_ty),
+ ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
_ => panic!("tried to get overflow intrinsic for op applied to non-int type")
};
let name = match oop {
OverflowOp::Add => match new_sty {
- TyInt(I8) => "llvm.sadd.with.overflow.i8",
- TyInt(I16) => "llvm.sadd.with.overflow.i16",
- TyInt(I32) => "llvm.sadd.with.overflow.i32",
- TyInt(I64) => "llvm.sadd.with.overflow.i64",
- TyInt(I128) => "llvm.sadd.with.overflow.i128",
-
- TyUint(U8) => "llvm.uadd.with.overflow.i8",
- TyUint(U16) => "llvm.uadd.with.overflow.i16",
- TyUint(U32) => "llvm.uadd.with.overflow.i32",
- TyUint(U64) => "llvm.uadd.with.overflow.i64",
- TyUint(U128) => "llvm.uadd.with.overflow.i128",
+ Int(I8) => "llvm.sadd.with.overflow.i8",
+ Int(I16) => "llvm.sadd.with.overflow.i16",
+ Int(I32) => "llvm.sadd.with.overflow.i32",
+ Int(I64) => "llvm.sadd.with.overflow.i64",
+ Int(I128) => "llvm.sadd.with.overflow.i128",
+
+ Uint(U8) => "llvm.uadd.with.overflow.i8",
+ Uint(U16) => "llvm.uadd.with.overflow.i16",
+ Uint(U32) => "llvm.uadd.with.overflow.i32",
+ Uint(U64) => "llvm.uadd.with.overflow.i64",
+ Uint(U128) => "llvm.uadd.with.overflow.i128",
_ => unreachable!(),
},
OverflowOp::Sub => match new_sty {
- TyInt(I8) => "llvm.ssub.with.overflow.i8",
- TyInt(I16) => "llvm.ssub.with.overflow.i16",
- TyInt(I32) => "llvm.ssub.with.overflow.i32",
- TyInt(I64) => "llvm.ssub.with.overflow.i64",
- TyInt(I128) => "llvm.ssub.with.overflow.i128",
-
- TyUint(U8) => "llvm.usub.with.overflow.i8",
- TyUint(U16) => "llvm.usub.with.overflow.i16",
- TyUint(U32) => "llvm.usub.with.overflow.i32",
- TyUint(U64) => "llvm.usub.with.overflow.i64",
- TyUint(U128) => "llvm.usub.with.overflow.i128",
+ Int(I8) => "llvm.ssub.with.overflow.i8",
+ Int(I16) => "llvm.ssub.with.overflow.i16",
+ Int(I32) => "llvm.ssub.with.overflow.i32",
+ Int(I64) => "llvm.ssub.with.overflow.i64",
+ Int(I128) => "llvm.ssub.with.overflow.i128",
+
+ Uint(U8) => "llvm.usub.with.overflow.i8",
+ Uint(U16) => "llvm.usub.with.overflow.i16",
+ Uint(U32) => "llvm.usub.with.overflow.i32",
+ Uint(U64) => "llvm.usub.with.overflow.i64",
+ Uint(U128) => "llvm.usub.with.overflow.i128",
_ => unreachable!(),
},
OverflowOp::Mul => match new_sty {
- TyInt(I8) => "llvm.smul.with.overflow.i8",
- TyInt(I16) => "llvm.smul.with.overflow.i16",
- TyInt(I32) => "llvm.smul.with.overflow.i32",
- TyInt(I64) => "llvm.smul.with.overflow.i64",
- TyInt(I128) => "llvm.smul.with.overflow.i128",
-
- TyUint(U8) => "llvm.umul.with.overflow.i8",
- TyUint(U16) => "llvm.umul.with.overflow.i16",
- TyUint(U32) => "llvm.umul.with.overflow.i32",
- TyUint(U64) => "llvm.umul.with.overflow.i64",
- TyUint(U128) => "llvm.umul.with.overflow.i128",
+ Int(I8) => "llvm.smul.with.overflow.i8",
+ Int(I16) => "llvm.smul.with.overflow.i16",
+ Int(I32) => "llvm.smul.with.overflow.i32",
+ Int(I64) => "llvm.smul.with.overflow.i64",
+ Int(I128) => "llvm.smul.with.overflow.i128",
+
+ Uint(U8) => "llvm.umul.with.overflow.i8",
+ Uint(U16) => "llvm.umul.with.overflow.i16",
+ Uint(U32) => "llvm.umul.with.overflow.i32",
+ Uint(U64) => "llvm.umul.with.overflow.i64",
+ Uint(U128) => "llvm.umul.with.overflow.i128",
_ => unreachable!(),
},
}
let name = match layout.ty.sty {
- ty::TyClosure(..) |
- ty::TyGenerator(..) |
- ty::TyAdt(..) |
+ ty::Closure(..) |
+ ty::Generator(..) |
+ ty::Adt(..) |
// FIXME(eddyb) producing readable type names for trait objects can result
// in problematically distinct types due to HRTB and subtyping (see #47638).
- // ty::TyDynamic(..) |
- ty::TyForeign(..) |
- ty::TyStr => {
+ // ty::Dynamic(..) |
+ ty::Foreign(..) |
+ ty::Str => {
let mut name = String::with_capacity(32);
let printer = DefPathBasedNames::new(cx.tcx, true, true);
printer.push_type_name(layout.ty, &mut name);
match (&layout.ty.sty, &layout.variants) {
- (&ty::TyAdt(def, _), &layout::Variants::Single { index }) => {
+ (&ty::Adt(def, _), &layout::Variants::Single { index }) => {
if def.is_enum() && !def.variants.is_empty() {
write!(&mut name, "::{}", def.variants[index].name).unwrap();
}
return llty;
}
let llty = match self.ty.sty {
- ty::TyRef(_, ty, _) |
- ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
+ ty::Ref(_, ty, _) |
+ ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
cx.layout_of(ty).llvm_type(cx).ptr_to()
}
- ty::TyAdt(def, _) if def.is_box() => {
+ ty::Adt(def, _) if def.is_box() => {
cx.layout_of(self.ty.boxed_ty()).llvm_type(cx).ptr_to()
}
- ty::TyFnPtr(sig) => {
+ ty::FnPtr(sig) => {
let sig = cx.tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
&sig,
// HACK(eddyb) special-case fat pointers until LLVM removes
// pointee types, to avoid bitcasting every `OperandRef::deref`.
match self.ty.sty {
- ty::TyRef(..) |
- ty::TyRawPtr(_) => {
+ ty::Ref(..) |
+ ty::RawPtr(_) => {
return self.field(cx, index).llvm_type(cx);
}
- ty::TyAdt(def, _) if def.is_box() => {
+ ty::Adt(def, _) if def.is_box() => {
let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
}
let mut result = None;
match self.ty.sty {
- ty::TyRawPtr(mt) if offset.bytes() == 0 => {
+ ty::RawPtr(mt) if offset.bytes() == 0 => {
let (size, align) = cx.size_and_align_of(mt.ty);
result = Some(PointeeInfo {
size,
});
}
- ty::TyRef(_, ty, mt) if offset.bytes() == 0 => {
+ ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
let (size, align) = cx.size_and_align_of(ty);
let kind = match mt {
// FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
if let Some(ref mut pointee) = result {
- if let ty::TyAdt(def, _) = self.ty.sty {
+ if let ty::Adt(def, _) = self.ty.sty {
if def.is_box() && offset.bytes() == 0 {
pointee.safe = Some(PointerKind::UniqueOwned);
}
use rustc_target::spec::Target;
use rustc_data_structures::fx::FxHashMap;
use rustc_mir::monomorphize::collector;
-use link::{build_link_meta, out_filename};
+use link::out_filename;
pub use rustc_data_structures::sync::MetadataRef;
}
tcx.sess.abort_if_errors();
- let link_meta = build_link_meta(tcx.crate_hash(LOCAL_CRATE));
- let metadata = tcx.encode_metadata(&link_meta);
+ let metadata = tcx.encode_metadata();
box OngoingCodegen {
metadata: metadata,
use rustc::session::config::{self, OutputFilenames, Input, OutputType};
use rustc::session::Session;
-use rustc::middle::cstore::LinkMeta;
-use rustc_data_structures::svh::Svh;
use std::path::{Path, PathBuf};
use syntax::{ast, attr};
use syntax_pos::Span;
}
}
-pub fn build_link_meta(crate_hash: Svh) -> LinkMeta {
- let r = LinkMeta {
- crate_hash,
- };
- info!("{:?}", r);
- return r;
-}
-
pub fn find_crate_name(sess: Option<&Session>,
attrs: &[ast::Attribute],
input: &Input) -> String {
// If this is a function, we hash the signature as well.
// This is not *strictly* needed, but it may help in some
// situations, see the `run-make/a-b-a-linker-guard` test.
- if let ty::TyFnDef(..) = item_type.sty {
+ if let ty::FnDef(..) = item_type.sty {
item_type.fn_sig(tcx).hash_stable(&mut hcx, &mut hasher);
}
rustc-rayon = "0.1.1"
rustc-rayon-core = "0.1.1"
rustc-hash = "1.0.1"
+smallvec = { version = "0.6.5", features = ["union"] }
[dependencies.parking_lot]
version = "0.5"
// whole Drain iterator (like &mut T).
let range_slice = {
let arr = &mut self.values as &mut [ManuallyDrop<<A as Array>::Element>];
- slice::from_raw_parts_mut(arr.as_mut_ptr().offset(start as isize),
+ slice::from_raw_parts_mut(arr.as_mut_ptr().add(start),
end - start)
};
Drain {
{
let arr =
&mut source_array_vec.values as &mut [ManuallyDrop<<A as Array>::Element>];
- let src = arr.as_ptr().offset(tail as isize);
- let dst = arr.as_mut_ptr().offset(start as isize);
+ let src = arr.as_ptr().add(tail);
+ let dst = arr.as_mut_ptr().add(start);
ptr::copy(src, dst, self.tail_len);
};
source_array_vec.set_len(start + self.tail_len);
// except according to those terms.
use array_vec::ArrayVec;
-use std::borrow::{Borrow, BorrowMut, ToOwned};
use std::fmt;
use std::iter;
use std::marker::PhantomData;
use std::mem;
-use std::ops::{Deref, DerefMut, Range};
use std::slice;
use bitslice::{BitSlice, Word};
use bitslice::{bitwise, Union, Subtract, Intersect};
use indexed_vec::Idx;
use rustc_serialize;
-/// Represents a set (or packed family of sets), of some element type
-/// E, where each E is identified by some unique index type `T`.
+/// Represents a set of some element type E, where each E is identified by some
+/// unique index type `T`.
///
/// In other words, `T` is the type used to index into the bitvector
/// this type uses to represent the set of object it holds.
///
/// The representation is dense, using one bit per possible element.
#[derive(Eq, PartialEq)]
-pub struct IdxSetBuf<T: Idx> {
+pub struct IdxSet<T: Idx> {
_pd: PhantomData<fn(&T)>,
bits: Vec<Word>,
}
-impl<T: Idx> Clone for IdxSetBuf<T> {
+impl<T: Idx> Clone for IdxSet<T> {
fn clone(&self) -> Self {
- IdxSetBuf { _pd: PhantomData, bits: self.bits.clone() }
+ IdxSet { _pd: PhantomData, bits: self.bits.clone() }
}
}
-impl<T: Idx> rustc_serialize::Encodable for IdxSetBuf<T> {
+impl<T: Idx> rustc_serialize::Encodable for IdxSet<T> {
fn encode<E: rustc_serialize::Encoder>(&self,
encoder: &mut E)
-> Result<(), E::Error> {
}
}
-impl<T: Idx> rustc_serialize::Decodable for IdxSetBuf<T> {
- fn decode<D: rustc_serialize::Decoder>(d: &mut D) -> Result<IdxSetBuf<T>, D::Error> {
+impl<T: Idx> rustc_serialize::Decodable for IdxSet<T> {
+ fn decode<D: rustc_serialize::Decoder>(d: &mut D) -> Result<IdxSet<T>, D::Error> {
let words: Vec<Word> = rustc_serialize::Decodable::decode(d)?;
- Ok(IdxSetBuf {
+ Ok(IdxSet {
_pd: PhantomData,
bits: words,
})
}
}
-
-// pnkfelix wants to have this be `IdxSet<T>([Word]) and then pass
-// around `&mut IdxSet<T>` or `&IdxSet<T>`.
-
-/// Represents a set (or packed family of sets), of some element type
-/// E, where each E is identified by some unique index type `T`.
-///
-/// In other words, `T` is the type used to index into the bitslice
-/// this type uses to represent the set of object it holds.
-#[repr(transparent)]
-pub struct IdxSet<T: Idx> {
- _pd: PhantomData<fn(&T)>,
- bits: [Word],
-}
-
-impl<T: Idx> Borrow<IdxSet<T>> for IdxSetBuf<T> {
- fn borrow(&self) -> &IdxSet<T> {
- &*self
- }
-}
-
-impl<T: Idx> BorrowMut<IdxSet<T>> for IdxSetBuf<T> {
- fn borrow_mut(&mut self) -> &mut IdxSet<T> {
- &mut *self
- }
-}
-
-impl<T: Idx> ToOwned for IdxSet<T> {
- type Owned = IdxSetBuf<T>;
- fn to_owned(&self) -> Self::Owned {
- IdxSet::to_owned(self)
- }
-}
-
const BITS_PER_WORD: usize = mem::size_of::<Word>() * 8;
-impl<T: Idx> fmt::Debug for IdxSetBuf<T> {
- fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
- w.debug_list()
- .entries(self.iter())
- .finish()
- }
-}
-
impl<T: Idx> fmt::Debug for IdxSet<T> {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
w.debug_list()
}
}
-impl<T: Idx> IdxSetBuf<T> {
+impl<T: Idx> IdxSet<T> {
fn new(init: Word, universe_size: usize) -> Self {
let num_words = (universe_size + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
- IdxSetBuf {
+ IdxSet {
_pd: Default::default(),
bits: vec![init; num_words],
}
pub fn new_empty(universe_size: usize) -> Self {
Self::new(0, universe_size)
}
-}
-
-impl<T: Idx> IdxSet<T> {
- unsafe fn from_slice(s: &[Word]) -> &Self {
- &*(s as *const [Word] as *const Self)
- }
-
- unsafe fn from_slice_mut(s: &mut [Word]) -> &mut Self {
- &mut *(s as *mut [Word] as *mut Self)
- }
-}
-
-impl<T: Idx> Deref for IdxSetBuf<T> {
- type Target = IdxSet<T>;
- fn deref(&self) -> &IdxSet<T> {
- unsafe { IdxSet::from_slice(&self.bits) }
- }
-}
-
-impl<T: Idx> DerefMut for IdxSetBuf<T> {
- fn deref_mut(&mut self) -> &mut IdxSet<T> {
- unsafe { IdxSet::from_slice_mut(&mut self.bits) }
- }
-}
-
-impl<T: Idx> IdxSet<T> {
- pub fn to_owned(&self) -> IdxSetBuf<T> {
- IdxSetBuf {
- _pd: Default::default(),
- bits: self.bits.to_owned(),
- }
- }
/// Duplicates as a hybrid set.
- pub fn to_hybrid(&self) -> HybridIdxSetBuf<T> {
+ pub fn to_hybrid(&self) -> HybridIdxSet<T> {
// This universe_size may be slightly larger than the one specified
// upon creation, due to rounding up to a whole word. That's ok.
let universe_size = self.bits.len() * BITS_PER_WORD;
// Note: we currently don't bother trying to make a Sparse set.
- HybridIdxSetBuf::Dense(self.to_owned(), universe_size)
+ HybridIdxSet::Dense(self.to_owned(), universe_size)
}
/// Removes all elements
self.bits.set_bit(elem.index())
}
- pub fn range(&self, elems: &Range<T>) -> &Self {
- let elems = elems.start.index()..elems.end.index();
- unsafe { Self::from_slice(&self.bits[elems]) }
- }
-
- pub fn range_mut(&mut self, elems: &Range<T>) -> &mut Self {
- let elems = elems.start.index()..elems.end.index();
- unsafe { Self::from_slice_mut(&mut self.bits[elems]) }
- }
-
/// Returns true iff set `self` contains `elem`.
pub fn contains(&self, elem: &T) -> bool {
self.bits.get_bit(elem.index())
bitwise(self.words_mut(), other.words(), &Union)
}
- /// Like `union()`, but takes a `SparseIdxSetBuf` argument.
- fn union_sparse(&mut self, other: &SparseIdxSetBuf<T>) -> bool {
+ /// Like `union()`, but takes a `SparseIdxSet` argument.
+ fn union_sparse(&mut self, other: &SparseIdxSet<T>) -> bool {
let mut changed = false;
for elem in other.iter() {
changed |= self.add(&elem);
changed
}
- /// Like `union()`, but takes a `HybridIdxSetBuf` argument.
- pub fn union_hybrid(&mut self, other: &HybridIdxSetBuf<T>) -> bool {
+ /// Like `union()`, but takes a `HybridIdxSet` argument.
+ pub fn union_hybrid(&mut self, other: &HybridIdxSet<T>) -> bool {
match other {
- HybridIdxSetBuf::Sparse(sparse, _) => self.union_sparse(sparse),
- HybridIdxSetBuf::Dense(dense, _) => self.union(dense),
+ HybridIdxSet::Sparse(sparse, _) => self.union_sparse(sparse),
+ HybridIdxSet::Dense(dense, _) => self.union(dense),
}
}
bitwise(self.words_mut(), other.words(), &Subtract)
}
- /// Like `subtract()`, but takes a `SparseIdxSetBuf` argument.
- fn subtract_sparse(&mut self, other: &SparseIdxSetBuf<T>) -> bool {
+ /// Like `subtract()`, but takes a `SparseIdxSet` argument.
+ fn subtract_sparse(&mut self, other: &SparseIdxSet<T>) -> bool {
let mut changed = false;
for elem in other.iter() {
changed |= self.remove(&elem);
changed
}
- /// Like `subtract()`, but takes a `HybridIdxSetBuf` argument.
- pub fn subtract_hybrid(&mut self, other: &HybridIdxSetBuf<T>) -> bool {
+ /// Like `subtract()`, but takes a `HybridIdxSet` argument.
+ pub fn subtract_hybrid(&mut self, other: &HybridIdxSet<T>) -> bool {
match other {
- HybridIdxSetBuf::Sparse(sparse, _) => self.subtract_sparse(sparse),
- HybridIdxSetBuf::Dense(dense, _) => self.subtract(dense),
+ HybridIdxSet::Sparse(sparse, _) => self.subtract_sparse(sparse),
+ HybridIdxSet::Dense(dense, _) => self.subtract(dense),
}
}
const SPARSE_MAX: usize = 8;
/// A sparse index set with a maximum of SPARSE_MAX elements. Used by
-/// HybridIdxSetBuf; do not use directly.
+/// HybridIdxSet; do not use directly.
///
/// The elements are stored as an unsorted vector with no duplicates.
#[derive(Clone, Debug)]
-pub struct SparseIdxSetBuf<T: Idx>(ArrayVec<[T; SPARSE_MAX]>);
+pub struct SparseIdxSet<T: Idx>(ArrayVec<[T; SPARSE_MAX]>);
-impl<T: Idx> SparseIdxSetBuf<T> {
+impl<T: Idx> SparseIdxSet<T> {
fn new() -> Self {
- SparseIdxSetBuf(ArrayVec::new())
+ SparseIdxSet(ArrayVec::new())
}
fn len(&self) -> usize {
}
}
- fn to_dense(&self, universe_size: usize) -> IdxSetBuf<T> {
- let mut dense = IdxSetBuf::new_empty(universe_size);
+ fn to_dense(&self, universe_size: usize) -> IdxSet<T> {
+ let mut dense = IdxSet::new_empty(universe_size);
for elem in self.0.iter() {
dense.add(elem);
}
}
}
-/// Like IdxSetBuf, but with a hybrid representation: sparse when there are few
+/// Like IdxSet, but with a hybrid representation: sparse when there are few
/// elements in the set, but dense when there are many. It's especially
/// efficient for sets that typically have a small number of elements, but a
/// large `universe_size`, and are cleared frequently.
#[derive(Clone, Debug)]
-pub enum HybridIdxSetBuf<T: Idx> {
- Sparse(SparseIdxSetBuf<T>, usize),
- Dense(IdxSetBuf<T>, usize),
+pub enum HybridIdxSet<T: Idx> {
+ Sparse(SparseIdxSet<T>, usize),
+ Dense(IdxSet<T>, usize),
}
-impl<T: Idx> HybridIdxSetBuf<T> {
+impl<T: Idx> HybridIdxSet<T> {
pub fn new_empty(universe_size: usize) -> Self {
- HybridIdxSetBuf::Sparse(SparseIdxSetBuf::new(), universe_size)
+ HybridIdxSet::Sparse(SparseIdxSet::new(), universe_size)
}
fn universe_size(&mut self) -> usize {
match *self {
- HybridIdxSetBuf::Sparse(_, size) => size,
- HybridIdxSetBuf::Dense(_, size) => size,
+ HybridIdxSet::Sparse(_, size) => size,
+ HybridIdxSet::Dense(_, size) => size,
}
}
pub fn clear(&mut self) {
let universe_size = self.universe_size();
- *self = HybridIdxSetBuf::new_empty(universe_size);
+ *self = HybridIdxSet::new_empty(universe_size);
}
/// Returns true iff set `self` contains `elem`.
pub fn contains(&self, elem: &T) -> bool {
match self {
- HybridIdxSetBuf::Sparse(sparse, _) => sparse.contains(elem),
- HybridIdxSetBuf::Dense(dense, _) => dense.contains(elem),
+ HybridIdxSet::Sparse(sparse, _) => sparse.contains(elem),
+ HybridIdxSet::Dense(dense, _) => dense.contains(elem),
}
}
/// Adds `elem` to the set `self`.
pub fn add(&mut self, elem: &T) -> bool {
match self {
- HybridIdxSetBuf::Sparse(sparse, _) if sparse.len() < SPARSE_MAX => {
+ HybridIdxSet::Sparse(sparse, _) if sparse.len() < SPARSE_MAX => {
// The set is sparse and has space for `elem`.
sparse.add(elem)
}
- HybridIdxSetBuf::Sparse(sparse, _) if sparse.contains(elem) => {
+ HybridIdxSet::Sparse(sparse, _) if sparse.contains(elem) => {
// The set is sparse and does not have space for `elem`, but
// that doesn't matter because `elem` is already present.
false
}
- HybridIdxSetBuf::Sparse(_, _) => {
+ HybridIdxSet::Sparse(_, _) => {
// The set is sparse and full. Convert to a dense set.
//
// FIXME: This code is awful, but I can't work out how else to
// appease the borrow checker.
- let dummy = HybridIdxSetBuf::Sparse(SparseIdxSetBuf::new(), 0);
+ let dummy = HybridIdxSet::Sparse(SparseIdxSet::new(), 0);
match mem::replace(self, dummy) {
- HybridIdxSetBuf::Sparse(sparse, universe_size) => {
+ HybridIdxSet::Sparse(sparse, universe_size) => {
let mut dense = sparse.to_dense(universe_size);
let changed = dense.add(elem);
assert!(changed);
- mem::replace(self, HybridIdxSetBuf::Dense(dense, universe_size));
+ mem::replace(self, HybridIdxSet::Dense(dense, universe_size));
changed
}
_ => panic!("impossible"),
}
}
- HybridIdxSetBuf::Dense(dense, _) => dense.add(elem),
+ HybridIdxSet::Dense(dense, _) => dense.add(elem),
}
}
pub fn remove(&mut self, elem: &T) -> bool {
// Note: we currently don't bother going from Dense back to Sparse.
match self {
- HybridIdxSetBuf::Sparse(sparse, _) => sparse.remove(elem),
- HybridIdxSetBuf::Dense(dense, _) => dense.remove(elem),
+ HybridIdxSet::Sparse(sparse, _) => sparse.remove(elem),
+ HybridIdxSet::Dense(dense, _) => dense.remove(elem),
}
}
/// Converts to a dense set, consuming itself in the process.
- pub fn to_dense(self) -> IdxSetBuf<T> {
+ pub fn to_dense(self) -> IdxSet<T> {
match self {
- HybridIdxSetBuf::Sparse(sparse, universe_size) => sparse.to_dense(universe_size),
- HybridIdxSetBuf::Dense(dense, _) => dense,
+ HybridIdxSet::Sparse(sparse, universe_size) => sparse.to_dense(universe_size),
+ HybridIdxSet::Dense(dense, _) => dense,
}
}
/// Iteration order is unspecified.
pub fn iter(&self) -> HybridIter<T> {
match self {
- HybridIdxSetBuf::Sparse(sparse, _) => HybridIter::Sparse(sparse.iter()),
- HybridIdxSetBuf::Dense(dense, _) => HybridIter::Dense(dense.iter()),
+ HybridIdxSet::Sparse(sparse, _) => HybridIter::Sparse(sparse.iter()),
+ HybridIdxSet::Dense(dense, _) => HybridIter::Dense(dense.iter()),
}
}
}
use std::cmp;
for i in 0..256 {
- let mut idx_buf: IdxSetBuf<usize> = IdxSetBuf::new_filled(128);
+ let mut idx_buf: IdxSet<usize> = IdxSet::new_filled(128);
idx_buf.trim_to(i);
let elems: Vec<usize> = idx_buf.iter().collect();
fn test_set_up_to() {
for i in 0..128 {
for mut idx_buf in
- vec![IdxSetBuf::new_empty(128), IdxSetBuf::new_filled(128)]
+ vec![IdxSet::new_empty(128), IdxSet::new_filled(128)]
.into_iter()
{
idx_buf.set_up_to(i);
#[test]
fn test_new_filled() {
for i in 0..128 {
- let idx_buf = IdxSetBuf::new_filled(i);
+ let idx_buf = IdxSet::new_filled(i);
let elems: Vec<usize> = idx_buf.iter().collect();
let expected: Vec<usize> = (0..i).collect();
assert_eq!(elems, expected);
#![feature(unsize)]
#![feature(specialization)]
#![feature(optin_builtin_traits)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![cfg_attr(not(stage0), feature(nll))]
#![feature(allow_internal_unstable)]
#![feature(vec_resize_with)]
extern crate rustc_rayon_core as rayon_core;
extern crate rustc_hash;
extern crate serialize;
+#[cfg_attr(test, macro_use)]
+extern crate smallvec;
// See librustc_cratesio_shim/Cargo.toml for a comment explaining this.
#[allow(unused_extern_crates)]
//!
//! The N above is determined by Array's implementor, by way of an associated constant.
-use std::ops::{Deref, DerefMut};
-use std::iter::{IntoIterator, FromIterator};
-use std::fmt::{self, Debug};
-use std::mem;
-use std::ptr;
-
-use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
-
-use accumulate_vec::{IntoIter, AccumulateVec};
-use array_vec::Array;
-
-pub struct SmallVec<A: Array>(AccumulateVec<A>);
+use smallvec::{Array, SmallVec};
pub type OneVector<T> = SmallVec<[T; 1]>;
-impl<A> Clone for SmallVec<A>
- where A: Array,
- A::Element: Clone {
- fn clone(&self) -> Self {
- SmallVec(self.0.clone())
- }
-}
-
-impl<A> Debug for SmallVec<A>
- where A: Array + Debug,
- A::Element: Debug {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_tuple("SmallVec").field(&self.0).finish()
- }
+pub trait ExpectOne<A: Array> {
+ fn expect_one(self, err: &'static str) -> A::Item;
}
-impl<A: Array> SmallVec<A> {
- pub fn new() -> Self {
- SmallVec(AccumulateVec::new())
- }
-
- pub fn is_array(&self) -> bool {
- self.0.is_array()
- }
-
- pub fn with_capacity(cap: usize) -> Self {
- let mut vec = SmallVec::new();
- vec.reserve(cap);
- vec
- }
-
- pub fn one(el: A::Element) -> Self {
- SmallVec(AccumulateVec::one(el))
- }
-
- pub fn many<I: IntoIterator<Item=A::Element>>(els: I) -> Self {
- SmallVec(AccumulateVec::many(els))
- }
-
- pub fn expect_one(self, err: &'static str) -> A::Element {
+impl<A: Array> ExpectOne<A> for SmallVec<A> {
+ fn expect_one(self, err: &'static str) -> A::Item {
assert!(self.len() == 1, err);
- match self.0 {
- AccumulateVec::Array(arr) => arr.into_iter().next().unwrap(),
- AccumulateVec::Heap(vec) => vec.into_iter().next().unwrap(),
- }
- }
-
- /// Will reallocate onto the heap if needed.
- pub fn push(&mut self, el: A::Element) {
- self.reserve(1);
- match self.0 {
- AccumulateVec::Array(ref mut array) => array.push(el),
- AccumulateVec::Heap(ref mut vec) => vec.push(el),
- }
- }
-
- pub fn reserve(&mut self, n: usize) {
- match self.0 {
- AccumulateVec::Array(_) => {
- if self.len() + n > A::LEN {
- let len = self.len();
- let array = mem::replace(&mut self.0,
- AccumulateVec::Heap(Vec::with_capacity(len + n)));
- if let AccumulateVec::Array(array) = array {
- match self.0 {
- AccumulateVec::Heap(ref mut vec) => vec.extend(array),
- _ => unreachable!()
- }
- }
- }
- }
- AccumulateVec::Heap(ref mut vec) => vec.reserve(n)
- }
- }
-
- pub unsafe fn set_len(&mut self, len: usize) {
- match self.0 {
- AccumulateVec::Array(ref mut arr) => arr.set_len(len),
- AccumulateVec::Heap(ref mut vec) => vec.set_len(len),
- }
- }
-
- pub fn insert(&mut self, index: usize, element: A::Element) {
- let len = self.len();
-
- // Reserve space for shifting elements to the right
- self.reserve(1);
-
- assert!(index <= len);
-
- unsafe {
- // infallible
- // The spot to put the new value
- {
- let p = self.as_mut_ptr().offset(index as isize);
- // Shift everything over to make space. (Duplicating the
- // `index`th element into two consecutive places.)
- ptr::copy(p, p.offset(1), len - index);
- // Write it in, overwriting the first copy of the `index`th
- // element.
- ptr::write(p, element);
- }
- self.set_len(len + 1);
- }
- }
-
- pub fn truncate(&mut self, len: usize) {
- unsafe {
- while len < self.len() {
- // Decrement len before the drop_in_place(), so a panic on Drop
- // doesn't re-drop the just-failed value.
- let newlen = self.len() - 1;
- self.set_len(newlen);
- ::std::ptr::drop_in_place(self.get_unchecked_mut(newlen));
- }
- }
- }
-}
-
-impl<A: Array> Deref for SmallVec<A> {
- type Target = AccumulateVec<A>;
- fn deref(&self) -> &Self::Target {
- &self.0
- }
-}
-
-impl<A: Array> DerefMut for SmallVec<A> {
- fn deref_mut(&mut self) -> &mut AccumulateVec<A> {
- &mut self.0
- }
-}
-
-impl<A: Array> FromIterator<A::Element> for SmallVec<A> {
- fn from_iter<I>(iter: I) -> Self where I: IntoIterator<Item=A::Element> {
- SmallVec(iter.into_iter().collect())
- }
-}
-
-impl<A: Array> Extend<A::Element> for SmallVec<A> {
- fn extend<I: IntoIterator<Item=A::Element>>(&mut self, iter: I) {
- let iter = iter.into_iter();
- self.reserve(iter.size_hint().0);
- match self.0 {
- AccumulateVec::Heap(ref mut vec) => vec.extend(iter),
- _ => iter.for_each(|el| self.push(el))
- }
- }
-}
-
-impl<A: Array> IntoIterator for SmallVec<A> {
- type Item = A::Element;
- type IntoIter = IntoIter<A>;
- fn into_iter(self) -> Self::IntoIter {
- self.0.into_iter()
- }
-}
-
-impl<A: Array> Default for SmallVec<A> {
- fn default() -> SmallVec<A> {
- SmallVec::new()
- }
-}
-
-impl<A> Encodable for SmallVec<A>
- where A: Array,
- A::Element: Encodable {
- fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
- s.emit_seq(self.len(), |s| {
- for (i, e) in self.iter().enumerate() {
- s.emit_seq_elt(i, |s| e.encode(s))?;
- }
- Ok(())
- })
- }
-}
-
-impl<A> Decodable for SmallVec<A>
- where A: Array,
- A::Element: Decodable {
- fn decode<D: Decoder>(d: &mut D) -> Result<SmallVec<A>, D::Error> {
- d.read_seq(|d, len| {
- let mut vec = SmallVec::with_capacity(len);
- // FIXME(#48994) - could just be collected into a Result<SmallVec, D::Error>
- for i in 0..len {
- vec.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
- }
- Ok(vec)
- })
+ self.into_iter().next().unwrap()
}
}
#[cfg(test)]
mod tests {
extern crate test;
- use self::test::Bencher;
-
use super::*;
- #[test]
- fn test_len() {
- let v: OneVector<isize> = OneVector::new();
- assert_eq!(0, v.len());
-
- assert_eq!(1, OneVector::one(1).len());
- assert_eq!(5, OneVector::many(vec![1, 2, 3, 4, 5]).len());
- }
-
- #[test]
- fn test_push_get() {
- let mut v = OneVector::new();
- v.push(1);
- assert_eq!(1, v.len());
- assert_eq!(1, v[0]);
- v.push(2);
- assert_eq!(2, v.len());
- assert_eq!(2, v[1]);
- v.push(3);
- assert_eq!(3, v.len());
- assert_eq!(3, v[2]);
- }
-
- #[test]
- fn test_from_iter() {
- let v: OneVector<isize> = (vec![1, 2, 3]).into_iter().collect();
- assert_eq!(3, v.len());
- assert_eq!(1, v[0]);
- assert_eq!(2, v[1]);
- assert_eq!(3, v[2]);
- }
-
- #[test]
- fn test_move_iter() {
- let v = OneVector::new();
- let v: Vec<isize> = v.into_iter().collect();
- assert_eq!(v, Vec::new());
-
- let v = OneVector::one(1);
- assert_eq!(v.into_iter().collect::<Vec<_>>(), [1]);
-
- let v = OneVector::many(vec![1, 2, 3]);
- assert_eq!(v.into_iter().collect::<Vec<_>>(), [1, 2, 3]);
- }
-
#[test]
#[should_panic]
fn test_expect_one_zero() {
#[test]
#[should_panic]
fn test_expect_one_many() {
- OneVector::many(vec![1, 2]).expect_one("");
+ OneVector::from_vec(vec![1, 2]).expect_one("");
}
#[test]
fn test_expect_one_one() {
- assert_eq!(1, OneVector::one(1).expect_one(""));
- assert_eq!(1, OneVector::many(vec![1]).expect_one(""));
- }
-
- #[bench]
- fn fill_small_vec_1_10_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 1]> = SmallVec::with_capacity(10);
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_1_10_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 1]> = SmallVec::new();
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_8_10_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 8]> = SmallVec::with_capacity(10);
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_8_10_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 8]> = SmallVec::new();
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_32_10_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 32]> = SmallVec::with_capacity(10);
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_32_10_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 32]> = SmallVec::new();
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_1_50_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 1]> = SmallVec::with_capacity(50);
-
- sv.extend(0..50);
- })
- }
-
- #[bench]
- fn fill_small_vec_1_50_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 1]> = SmallVec::new();
-
- sv.extend(0..50);
- })
- }
-
- #[bench]
- fn fill_small_vec_8_50_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 8]> = SmallVec::with_capacity(50);
-
- sv.extend(0..50);
- })
- }
-
- #[bench]
- fn fill_small_vec_8_50_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 8]> = SmallVec::new();
-
- sv.extend(0..50);
- })
- }
-
- #[bench]
- fn fill_small_vec_32_50_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 32]> = SmallVec::with_capacity(50);
-
- sv.extend(0..50);
- })
- }
-
- #[bench]
- fn fill_small_vec_32_50_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 32]> = SmallVec::new();
-
- sv.extend(0..50);
- })
+ assert_eq!(1, (smallvec![1] as OneVector<_>).expect_one(""));
+ assert_eq!(1, OneVector::from_vec(vec![1]).expect_one(""));
}
}
}
-impl<I: ::indexed_vec::Idx, CTX> HashStable<CTX> for ::indexed_set::IdxSetBuf<I>
+impl<I: ::indexed_vec::Idx, CTX> HashStable<CTX> for ::indexed_set::IdxSet<I>
{
fn hash_stable<W: StableHasherResult>(&self,
ctx: &mut CTX,
//!
//! `MTLock` is a mutex which disappears if cfg!(parallel_queries) is false.
//!
-//! `MTRef` is a immutable refernce if cfg!(parallel_queries), and an mutable reference otherwise.
+//! `MTRef` is a immutable reference if cfg!(parallel_queries), and an mutable reference otherwise.
//!
//! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync
//! depending on the value of cfg!(parallel_queries).
/// closures may concurrently be computing a value which the inner value should take.
/// Only one of these closures are used to actually initialize the value.
/// If some other closure already set the value, we assert that it our closure computed
- /// a value equal to the value aready set and then
+ /// a value equal to the value already set and then
/// we return the value our closure computed wrapped in a `Option`.
/// If our closure set the value, `None` is returned.
/// If the value is already initialized, the closure is not called and `None` is returned.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use indexed_set::IdxSetBuf;
+use indexed_set::IdxSet;
use indexed_vec::Idx;
use std::collections::VecDeque;
/// and also use a bit set to track occupancy.
pub struct WorkQueue<T: Idx> {
deque: VecDeque<T>,
- set: IdxSetBuf<T>,
+ set: IdxSet<T>,
}
impl<T: Idx> WorkQueue<T> {
pub fn with_all(len: usize) -> Self {
WorkQueue {
deque: (0..len).map(T::new).collect(),
- set: IdxSetBuf::new_filled(len),
+ set: IdxSet::new_filled(len),
}
}
pub fn with_none(len: usize) -> Self {
WorkQueue {
deque: VecDeque::with_capacity(len),
- set: IdxSetBuf::new_empty(len),
+ set: IdxSet::new_empty(len),
}
}
fn html_of_duration(_start: &Instant, dur: &Duration) -> (String, String) {
use rustc::util::common::duration_to_secs_str;
(duration_to_secs_str(dur.clone()),
- "".to_string()
+ String::new()
)
}
// at by "in this macro invocation"
format!(" (#{})", i + 1)
} else {
- "".to_string()
+ String::new()
})));
}
// Check to make sure we're not in any <*macros>
// backtrace is multiple levels deep
format!(" (#{})", i + 1)
} else {
- "".to_string()
+ String::new()
})));
if !always_backtrace {
break;
let col = if let Some(first_annotation) = first_line.annotations.first() {
format!(":{}", first_annotation.start_col + 1)
} else {
- "".to_string()
+ String::new()
};
format!("{}:{}{}",
annotated_file.file.name,
declare_lint! {
pub MISSING_DOCS,
Allow,
- "detects missing documentation for public members"
+ "detects missing documentation for public members",
+ report_in_external_macro: true
}
pub struct MissingDoc {
err.span_suggestion_short_with_applicability(
attr.span,
"remove this attribute",
- "".to_owned(),
+ String::new(),
Applicability::MachineApplicable
);
err.emit();
// NB. this has an edge case with non-returning statements,
// like `loop {}` or `panic!()`: control flow never reaches
// the exit node through these, so one can have a function
- // that never actually calls itselfs but is still picked up by
+ // that never actually calls itself but is still picked up by
// this lint:
//
// fn f(cond: bool) {
err.span_suggestion_short_with_applicability(
no_mangle_attr.span,
"remove this attribute",
- "".to_owned(),
+ String::new(),
// Use of `#[no_mangle]` suggests FFI intent; correct
// fix may be to monomorphize source by hand
Applicability::MaybeIncorrect
let msg = "mutating transmuted &mut T from &T may cause undefined behavior, \
consider instead using an UnsafeCell";
match get_transmute_from_to(cx, expr) {
- Some((&ty::TyRef(_, _, from_mt), &ty::TyRef(_, _, to_mt))) => {
+ Some((&ty::Ref(_, _, from_mt), &ty::Ref(_, _, to_mt))) => {
if to_mt == hir::Mutability::MutMutable &&
from_mt == hir::Mutability::MutImmutable {
cx.span_lint(MUTABLE_TRANSMUTES, expr.span, msg);
fn get_transmute_from_to<'a, 'tcx>
(cx: &LateContext<'a, 'tcx>,
expr: &hir::Expr)
- -> Option<(&'tcx ty::TypeVariants<'tcx>, &'tcx ty::TypeVariants<'tcx>)> {
+ -> Option<(&'tcx ty::TyKind<'tcx>, &'tcx ty::TyKind<'tcx>)> {
let def = if let hir::ExprKind::Path(ref qpath) = expr.node {
cx.tables.qpath_def(qpath, expr.hir_id)
} else {
) {
let mut ecx = ::rustc_mir::interpret::mk_eval_cx(tcx, gid.instance, param_env).unwrap();
let result = (|| {
- let val = ecx.const_to_value(constant.val)?;
use rustc_target::abi::LayoutOf;
+ use rustc_mir::interpret::OpTy;
+
+ let op = ecx.const_value_to_op(constant.val)?;
let layout = ecx.layout_of(constant.ty)?;
- let place = ecx.allocate_place_for_value(val, layout, None)?;
- let ptr = place.to_ptr()?;
- let mut todo = vec![(ptr, layout.ty, String::new())];
+ let place = ecx.allocate_op(OpTy { op, layout })?.into();
+
+ let mut todo = vec![(place, Vec::new())];
let mut seen = FxHashSet();
- seen.insert((ptr, layout.ty));
- while let Some((ptr, ty, path)) = todo.pop() {
- let layout = ecx.layout_of(ty)?;
- ecx.validate_ptr_target(
- ptr,
- layout.align,
- layout,
- path,
+ seen.insert(place);
+ while let Some((place, mut path)) = todo.pop() {
+ ecx.validate_mplace(
+ place,
+ &mut path,
&mut seen,
&mut todo,
)?;
);
// Don't suggest about raw identifiers if the feature isn't active
- if cx.sess.features_untracked().raw_identifiers {
- lint.span_suggestion_with_applicability(
- span,
- "you can use a raw identifier to stay compatible",
- "r#async".to_string(),
- Applicability::MachineApplicable,
- );
- }
+ lint.span_suggestion_with_applicability(
+ span,
+ "you can use a raw identifier to stay compatible",
+ "r#async".to_string(),
+ Applicability::MachineApplicable,
+ );
lint.emit()
}
}
#![cfg_attr(test, feature(test))]
#![feature(box_patterns)]
#![feature(box_syntax)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![cfg_attr(not(stage0), feature(nll))]
#![feature(quote)]
#![feature(rustc_diagnostic_macros)]
}
hir::ExprKind::Lit(ref lit) => {
match cx.tables.node_id_to_type(e.hir_id).sty {
- ty::TyInt(t) => {
+ ty::Int(t) => {
match lit.node {
ast::LitKind::Int(v, ast::LitIntType::Signed(_)) |
ast::LitKind::Int(v, ast::LitIntType::Unsuffixed) => {
report_bin_hex_error(
cx,
e,
- ty::TyInt(t),
+ ty::Int(t),
repr_str,
v,
negative,
_ => bug!(),
};
}
- ty::TyUint(t) => {
+ ty::Uint(t) => {
let uint_type = if let ast::UintTy::Usize = t {
cx.sess().target.usize_ty
} else {
let parent_id = cx.tcx.hir.get_parent_node(e.id);
if let hir_map::NodeExpr(parent_expr) = cx.tcx.hir.get(parent_id) {
if let hir::ExprKind::Cast(..) = parent_expr.node {
- if let ty::TyChar = cx.tables.expr_ty(parent_expr).sty {
+ if let ty::Char = cx.tables.expr_ty(parent_expr).sty {
let mut err = cx.struct_span_lint(
OVERFLOWING_LITERALS,
parent_expr.span,
report_bin_hex_error(
cx,
e,
- ty::TyUint(t),
+ ty::Uint(t),
repr_str,
lit_val,
false,
);
}
}
- ty::TyFloat(t) => {
+ ty::Float(t) => {
let is_infinite = match lit.node {
ast::LitKind::Float(v, _) |
ast::LitKind::FloatUnsuffixed(v) => {
// the comparison
let norm_binop = if swap { rev_binop(binop) } else { binop };
match cx.tables.node_id_to_type(expr.hir_id).sty {
- ty::TyInt(int_ty) => {
+ ty::Int(int_ty) => {
let (min, max) = int_ty_range(int_ty);
let lit_val: i128 = match lit.node {
hir::ExprKind::Lit(ref li) => {
};
is_valid(norm_binop, lit_val, min, max)
}
- ty::TyUint(uint_ty) => {
+ ty::Uint(uint_ty) => {
let (min, max) :(u128, u128) = uint_ty_range(uint_ty);
let lit_val: u128 = match lit.node {
hir::ExprKind::Lit(ref li) => {
//
// No suggestion for: `isize`, `usize`.
fn get_type_suggestion<'a>(
- t: &ty::TypeVariants,
+ t: &ty::TyKind,
val: u128,
negative: bool,
) -> Option<String> {
}
}
match t {
- &ty::TyInt(i) => find_fit!(i, val, negative,
+ &ty::Int(i) => find_fit!(i, val, negative,
I8 => [U8] => [I16, I32, I64, I128],
I16 => [U16] => [I32, I64, I128],
I32 => [U32] => [I64, I128],
I64 => [U64] => [I128],
I128 => [U128] => []),
- &ty::TyUint(u) => find_fit!(u, val, negative,
+ &ty::Uint(u) => find_fit!(u, val, negative,
U8 => [U8, U16, U32, U64, U128] => [],
U16 => [U16, U32, U64, U128] => [],
U32 => [U32, U64, U128] => [],
fn report_bin_hex_error(
cx: &LateContext,
expr: &hir::Expr,
- ty: ty::TypeVariants,
+ ty: ty::TyKind,
repr_str: String,
val: u128,
negative: bool,
) {
let (t, actually) = match ty {
- ty::TyInt(t) => {
+ ty::Int(t) => {
let ity = attr::IntType::SignedInt(t);
let bits = layout::Integer::from_attr(cx.tcx, ity).size().bits();
let actually = (val << (128 - bits)) as i128 >> (128 - bits);
(format!("{:?}", t), actually.to_string())
}
- ty::TyUint(t) => {
+ ty::Uint(t) => {
let ity = attr::IntType::UnsignedInt(t);
let bits = layout::Integer::from_attr(cx.tcx, ity).size().bits();
let actually = (val << (128 - bits)) >> (128 - bits);
if def.variants[data_idx].fields.len() == 1 {
match def.variants[data_idx].fields[0].ty(tcx, substs).sty {
- ty::TyFnPtr(_) => {
+ ty::FnPtr(_) => {
return true;
}
- ty::TyRef(..) => {
+ ty::Ref(..) => {
return true;
}
_ => {}
// Protect against infinite recursion, for example
// `struct S(*mut S);`.
// FIXME: A recursion limit is necessary as well, for irregular
- // recusive types.
+ // recursive types.
if !cache.insert(ty) {
return FfiSafe;
}
match ty.sty {
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
if def.is_phantom_data() {
return FfiPhantom(ty);
}
}
}
- ty::TyChar => FfiUnsafe {
+ ty::Char => FfiUnsafe {
ty: ty,
reason: "the `char` type has no C equivalent",
help: Some("consider using `u32` or `libc::wchar_t` instead"),
},
- ty::TyInt(ast::IntTy::I128) | ty::TyUint(ast::UintTy::U128) => FfiUnsafe {
+ ty::Int(ast::IntTy::I128) | ty::Uint(ast::UintTy::U128) => FfiUnsafe {
ty: ty,
reason: "128-bit integers don't currently have a known stable ABI",
help: None,
},
// Primitive types with a stable representation.
- ty::TyBool | ty::TyInt(..) | ty::TyUint(..) | ty::TyFloat(..) | ty::TyNever => FfiSafe,
+ ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe,
- ty::TySlice(_) => FfiUnsafe {
+ ty::Slice(_) => FfiUnsafe {
ty: ty,
reason: "slices have no C equivalent",
help: Some("consider using a raw pointer instead"),
},
- ty::TyDynamic(..) => FfiUnsafe {
+ ty::Dynamic(..) => FfiUnsafe {
ty: ty,
reason: "trait objects have no C equivalent",
help: None,
},
- ty::TyStr => FfiUnsafe {
+ ty::Str => FfiUnsafe {
ty: ty,
reason: "string slices have no C equivalent",
help: Some("consider using `*const u8` and a length instead"),
},
- ty::TyTuple(..) => FfiUnsafe {
+ ty::Tuple(..) => FfiUnsafe {
ty: ty,
reason: "tuples have unspecified layout",
help: Some("consider using a struct instead"),
},
- ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
- ty::TyRef(_, ty, _) => self.check_type_for_ffi(cache, ty),
+ ty::RawPtr(ty::TypeAndMut { ty, .. }) |
+ ty::Ref(_, ty, _) => self.check_type_for_ffi(cache, ty),
- ty::TyArray(ty, _) => self.check_type_for_ffi(cache, ty),
+ ty::Array(ty, _) => self.check_type_for_ffi(cache, ty),
- ty::TyFnPtr(sig) => {
+ ty::FnPtr(sig) => {
match sig.abi() {
Abi::Rust | Abi::RustIntrinsic | Abi::PlatformIntrinsic | Abi::RustCall => {
return FfiUnsafe {
FfiSafe
}
- ty::TyForeign(..) => FfiSafe,
-
- ty::TyParam(..) |
- ty::TyInfer(..) |
- ty::TyError |
- ty::TyClosure(..) |
- ty::TyGenerator(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyProjection(..) |
- ty::TyAnon(..) |
- ty::TyFnDef(..) => bug!("Unexpected type in foreign function"),
+ ty::Foreign(..) => FfiSafe,
+
+ ty::Param(..) |
+ ty::Infer(..) |
+ ty::Error |
+ ty::Closure(..) |
+ ty::Generator(..) |
+ ty::GeneratorWitness(..) |
+ ty::Projection(..) |
+ ty::Anon(..) |
+ ty::FnDef(..) => bug!("Unexpected type in foreign function"),
}
}
if let Some(s) = help {
diag.help(s);
}
- if let ty::TyAdt(def, _) = unsafe_ty.sty {
+ if let ty::Adt(def, _) = unsafe_ty.sty {
if let Some(sp) = self.cx.tcx.hir.span_if_local(def.did) {
diag.span_note(sp, "type defined here");
}
fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
if let hir::ItemKind::Enum(ref enum_definition, _) = it.node {
let item_def_id = cx.tcx.hir.local_def_id(it.id);
- let generics = cx.tcx.generics_of(item_def_id);
- for param in &generics.params {
- match param.kind {
- ty::GenericParamDefKind::Lifetime { .. } => {},
- ty::GenericParamDefKind::Type { .. } => return,
- }
- }
- // Sizes only make sense for non-generic types.
let t = cx.tcx.type_of(item_def_id);
let ty = cx.tcx.erase_regions(&t);
match cx.layout_of(ty) {
let t = cx.tables.expr_ty(&expr);
let ty_warned = match t.sty {
- ty::TyTuple(ref tys) if tys.is_empty() => return,
- ty::TyNever => return,
- ty::TyAdt(def, _) => {
+ ty::Tuple(ref tys) if tys.is_empty() => return,
+ ty::Never => return,
+ ty::Adt(def, _) => {
if def.variants.is_empty() {
return;
} else {
use rustc::ty::query::QueryConfig;
use rustc::middle::cstore::{CrateStore, DepKind,
- LinkMeta,
EncodedMetadata, NativeLibraryKind};
use rustc::middle::exported_symbols::ExportedSymbol;
use rustc::middle::stability::DeprecationEntry;
use syntax::parse::source_file_to_stream;
use syntax::symbol::Symbol;
use syntax_pos::{Span, NO_EXPANSION, FileName};
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc::hir;
macro_rules! provide {
mir
}
mir_const_qualif => {
- (cdata.mir_const_qualif(def_id.index), Lrc::new(IdxSetBuf::new_empty(0)))
+ (cdata.mir_const_qualif(def_id.index), Lrc::new(IdxSet::new_empty(0)))
}
fn_sig => { cdata.fn_sig(def_id.index, tcx) }
inherent_impls => { Lrc::new(cdata.get_inherent_implementations_for_type(def_id.index)) }
}
fn encode_metadata<'a, 'tcx>(&self,
- tcx: TyCtxt<'a, 'tcx, 'tcx>,
- link_meta: &LinkMeta)
+ tcx: TyCtxt<'a, 'tcx, 'tcx>)
-> EncodedMetadata
{
- encoder::encode_metadata(tcx, link_meta)
+ encoder::encode_metadata(tcx)
}
fn metadata_encoding_version(&self) -> &[u8]
EntryKind::Trait(_) => Def::Trait(did),
EntryKind::Enum(..) => Def::Enum(did),
EntryKind::MacroDef(_) => Def::Macro(did, MacroKind::Bang),
- EntryKind::ForeignType => Def::TyForeign(did),
+ EntryKind::ForeignType => Def::ForeignTy(did),
EntryKind::ForeignMod |
EntryKind::GlobalAsm |
use isolated_encoder::IsolatedEncoder;
use schema::*;
-use rustc::middle::cstore::{LinkMeta, LinkagePreference, NativeLibrary,
+use rustc::middle::cstore::{LinkagePreference, NativeLibrary,
EncodedMetadata, ForeignModule};
use rustc::hir::def::CtorKind;
use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefIndex, DefId, LocalDefId, LOCAL_CRATE};
pub struct EncodeContext<'a, 'tcx: 'a> {
opaque: opaque::Encoder,
pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
- link_meta: &'a LinkMeta,
lazy_state: LazyState,
type_shorthands: FxHashMap<Ty<'tcx>, usize>,
let index_bytes = self.position() - i;
let attrs = tcx.hir.krate_attrs();
- let link_meta = self.link_meta;
let is_proc_macro = tcx.sess.crate_types.borrow().contains(&CrateType::ProcMacro);
let has_default_lib_allocator = attr::contains_name(&attrs, "default_lib_allocator");
let has_global_allocator = *tcx.sess.has_global_allocator.get();
name: tcx.crate_name(LOCAL_CRATE),
extra_filename: tcx.sess.opts.cg.extra_filename.clone(),
triple: tcx.sess.opts.target_triple.clone(),
- hash: link_meta.crate_hash,
+ hash: tcx.crate_hash(LOCAL_CRATE),
disambiguator: tcx.sess.local_crate_disambiguator(),
panic_strategy: tcx.sess.panic_strategy(),
edition: hygiene::default_edition(),
hir::ItemKind::Const(..) => self.encode_optimized_mir(def_id),
hir::ItemKind::Fn(_, header, ..) => {
let generics = tcx.generics_of(def_id);
- let has_types = generics.params.iter().any(|param| match param.kind {
- ty::GenericParamDefKind::Type { .. } => true,
- _ => false,
- });
let needs_inline =
- (has_types || tcx.codegen_fn_attrs(def_id).requests_inline()) &&
+ (generics.requires_monomorphization(tcx) ||
+ tcx.codegen_fn_attrs(def_id).requests_inline()) &&
!self.metadata_output_only();
let always_encode_mir = self.tcx.sess.opts.debugging_opts.always_encode_mir;
if needs_inline
let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap();
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
let kind = match tables.node_id_to_type(hir_id).sty {
- ty::TyGenerator(def_id, ..) => {
+ ty::Generator(def_id, ..) => {
let layout = self.tcx.generator_layout(def_id);
let data = GeneratorData {
layout: layout.clone(),
EntryKind::Generator(self.lazy(&data))
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let sig = substs.closure_sig(def_id, self.tcx);
let data = ClosureData { sig: self.lazy(&sig) };
EntryKind::Closure(self.lazy(&data))
}
fn encode_info_for_generics(&mut self, generics: &hir::Generics) {
- generics.params.iter().for_each(|param| match param.kind {
- hir::GenericParamKind::Lifetime { .. } => {}
- hir::GenericParamKind::Type { ref default, .. } => {
- let def_id = self.tcx.hir.local_def_id(param.id);
- let has_default = Untracked(default.is_some());
- let encode_info = IsolatedEncoder::encode_info_for_ty_param;
- self.record(def_id, encode_info, (def_id, has_default));
+ for param in &generics.params {
+ match param.kind {
+ hir::GenericParamKind::Lifetime { .. } => {}
+ hir::GenericParamKind::Type { ref default, .. } => {
+ let def_id = self.tcx.hir.local_def_id(param.id);
+ let has_default = Untracked(default.is_some());
+ let encode_info = IsolatedEncoder::encode_info_for_ty_param;
+ self.record(def_id, encode_info, (def_id, has_default));
+ }
}
- });
+ }
}
fn encode_info_for_ty(&mut self, ty: &hir::Ty) {
// will allow us to slice the metadata to the precise length that we just
// generated regardless of trailing bytes that end up in it.
-pub fn encode_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- link_meta: &LinkMeta)
+pub fn encode_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>)
-> EncodedMetadata
{
let mut encoder = opaque::Encoder::new(vec![]);
let mut ecx = EncodeContext {
opaque: encoder,
tcx,
- link_meta,
lazy_state: LazyState::NoNode,
type_shorthands: Default::default(),
predicate_shorthands: Default::default(),
pub fn get_repr_options<'a, 'tcx, 'gcx>(tcx: &TyCtxt<'a, 'tcx, 'gcx>, did: DefId) -> ReprOptions {
let ty = tcx.type_of(did);
match ty.sty {
- ty::TyAdt(ref def, _) => return def.repr,
+ ty::Adt(ref def, _) => return def.repr,
_ => bug!("{} is not an ADT", ty),
}
}
}
}
- // Update kind and, optionally, the name of all native libaries
+ // Update kind and, optionally, the name of all native libraries
// (there may be more than one) with the specified name.
for &(ref name, ref new_name, kind) in &self.tcx.sess.opts.libs {
let mut found = false;
syntax_pos = { path = "../libsyntax_pos" }
byteorder = { version = "1.1", features = ["i128"] }
rustc_apfloat = { path = "../librustc_apfloat" }
+smallvec = { version = "0.6.5", features = ["union"] }
if let Some(ty) = self.retrieve_type_for_place(place) {
let needs_note = match ty.sty {
- ty::TypeVariants::TyClosure(id, _) => {
+ ty::Closure(id, _) => {
let tables = self.tcx.typeck_tables_of(id);
let node_id = self.tcx.hir.as_local_node_id(id).unwrap();
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
self.describe_field_from_ty(&ty.boxed_ty(), field)
} else {
match ty.sty {
- ty::TyAdt(def, _) => if def.is_enum() {
+ ty::Adt(def, _) => if def.is_enum() {
field.index().to_string()
} else {
def.non_enum_variant().fields[field.index()]
.ident
.to_string()
},
- ty::TyTuple(_) => field.index().to_string(),
- ty::TyRef(_, ty, _) | ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
+ ty::Tuple(_) => field.index().to_string(),
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
self.describe_field_from_ty(&ty, field)
}
- ty::TyArray(ty, _) | ty::TySlice(ty) => self.describe_field_from_ty(&ty, field),
- ty::TyClosure(def_id, _) | ty::TyGenerator(def_id, _, _) => {
+ ty::Array(ty, _) | ty::Slice(ty) => self.describe_field_from_ty(&ty, field),
+ ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
// Convert the def-id into a node-id. node-ids are only valid for
// the local code in the current crate, so this returns an `Option` in case
// the closure comes from another crate. But in that case we wouldn't
use rustc::ty::query::Providers;
use rustc::ty::{self, ParamEnv, TyCtxt, Ty};
-use rustc_errors::{Diagnostic, DiagnosticBuilder, Level};
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, Level};
use rustc_data_structures::graph::dominators::Dominators;
use rustc_data_structures::fx::FxHashSet;
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::Idx;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use std::rc::Rc;
_ => Some(tcx.hir.body_owned_by(id)),
};
- let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
+ let dead_unwinds = IdxSet::new_empty(mir.basic_blocks().len());
let mut flow_inits = FlowAtLocation::new(do_dataflow(
tcx,
mir,
span,
"variable does not need to be mutable",
);
- err.span_suggestion_short(mut_span, "remove this `mut`", "".to_owned());
+ err.span_suggestion_short_with_applicability(
+ mut_span,
+ "remove this `mut`",
+ String::new(),
+ Applicability::MachineApplicable);
err.buffer(&mut mbcx.errors_buffer);
}
// individual fields instead. This way if `foo` has a
// destructor but `bar` does not, we will only check for
// borrows of `x.foo` and not `x.bar`. See #47703.
- ty::TyAdt(def, substs) if def.is_struct() && !def.has_dtor(self.tcx) => {
+ ty::Adt(def, substs) if def.is_struct() && !def.has_dtor(self.tcx) => {
def.all_fields()
.map(|field| field.ty(gcx, substs))
.enumerate()
.for_each(|field| drop_field(self, field));
}
// Same as above, but for tuples.
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
tys.iter()
.cloned()
.enumerate()
}
// Closures also have disjoint fields, but they are only
// directly accessed in the body of the closure.
- ty::TyClosure(def, substs)
+ ty::Closure(def, substs)
if *drop_place == Place::Local(Local::new(1))
&& !self.mir.upvar_decls.is_empty() =>
{
}
// Generators also have disjoint fields, but they are only
// directly accessed in the body of the generator.
- ty::TyGenerator(def, substs, _)
+ ty::Generator(def, substs, _)
if *drop_place == Place::Local(Local::new(1))
&& !self.mir.upvar_decls.is_empty() =>
{
// the base case below, we would have a Deep Write due to
// the box being `needs_drop`, and that Deep Write would
// touch `&mut` data in the box.
- ty::TyAdt(def, _) if def.is_box() => {
+ ty::Adt(def, _) if def.is_box() => {
// When/if we add a `&own T` type, this action would
// be like running the destructor of the `&own T`.
// (And the owner of backing storage referenced by the
}
}
- if self
+ // Check is_empty() first because it's the common case, and doing that
+ // way we avoid the clone() call.
+ if !self.access_place_error_reported.is_empty() &&
+ self
.access_place_error_reported
.contains(&(place_span.0.clone(), place_span.1))
{
// be already initialized
let tcx = self.tcx;
match base.ty(self.mir, tcx).to_ty(tcx).sty {
- ty::TyAdt(def, _) if def.has_dtor(tcx) => {
+ ty::Adt(def, _) if def.has_dtor(tcx) => {
// FIXME: analogous code in
// check_loans.rs first maps
// Check the kind of deref to decide
match base_ty.sty {
- ty::TyRef(_, _, mutbl) => {
+ ty::Ref(_, _, mutbl) => {
match mutbl {
// Shared borrowed data is never mutable
hir::MutImmutable => Err(place),
}
}
}
- ty::TyRawPtr(tnm) => {
+ ty::RawPtr(tnm) => {
match tnm.mutbl {
// `*const` raw pointers are not mutable
hir::MutImmutable => return Err(place),
.any(|p| p.is_upvar_field_projection(self.mir, &self.tcx)
.is_some());
match ty.sty {
- ty::TyArray(..) | ty::TySlice(..) => self
+ ty::Array(..) | ty::Slice(..) => self
.tcx
.cannot_move_out_of_interior_noncopy(span, ty, None, origin),
- ty::TyClosure(def_id, closure_substs)
+ ty::Closure(def_id, closure_substs)
if !self.mir.upvar_decls.is_empty() && is_upvar_field_projection
=> {
let closure_kind_ty =
}
} else {
item_msg = format!("data in a {}", pointer_type);
- reason = "".to_string();
+ reason = String::new();
}
}
}
Place::Static(box Static { def_id, ty: _ }) => {
if let Place::Static(_) = access_place {
item_msg = format!("immutable static item `{}`", access_place_desc.unwrap());
- reason = "".to_string();
+ reason = String::new();
} else {
item_msg = format!("`{}`", access_place_desc.unwrap());
let static_name = &self.tcx.item_name(*def_id);
// individual fields instead. This way if `foo` has a
// destructor but `bar` does not, we will only check for
// borrows of `x.foo` and not `x.bar`. See #47703.
- ty::TyAdt(def, substs) if def.is_struct() && !def.has_dtor(self.infcx.tcx) => {
+ ty::Adt(def, substs) if def.is_struct() && !def.has_dtor(self.infcx.tcx) => {
def.all_fields()
.map(|field| field.ty(gcx, substs))
.enumerate()
.for_each(|field| drop_field(self, field));
}
// Same as above, but for tuples.
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
tys.iter().cloned().enumerate()
.for_each(|field| drop_field(self, field));
}
// Closures and generators also have disjoint fields, but they are only
// directly accessed in the body of the closure/generator.
- ty::TyGenerator(def, substs, ..)
+ ty::Generator(def, substs, ..)
if *drop_place == Place::Local(Local::new(1)) && !self.mir.upvar_decls.is_empty()
=> {
substs.upvar_tys(def, self.infcx.tcx).enumerate()
.for_each(|field| drop_field(self, field));
}
- ty::TyClosure(def, substs)
+ ty::Closure(def, substs)
if *drop_place == Place::Local(Local::new(1)) && !self.mir.upvar_decls.is_empty()
=> {
substs.upvar_tys(def, self.infcx.tcx).enumerate()
// unique or mutable borrows are invalidated by writes.
// Reservations count as writes since we need to check
// that activating the borrow will be OK
- // TOOD(bob_twinkles) is this actually the right thing to do?
+ // FIXME(bob_twinkles) is this actually the right thing to do?
this.generate_invalidates(borrow_index, context.loc);
}
}
);
// Also dump the inference graph constraints as a graphviz file.
- let _: io::Result<()> = do catch {
+ let _: io::Result<()> = try_block! {
let mut file =
pretty::create_dump_file(infcx.tcx, "regioncx.all.dot", None, "nll", &0, source)?;
regioncx.dump_graphviz_raw_constraints(&mut file)?;
};
// Also dump the inference graph constraints as a graphviz file.
- let _: io::Result<()> = do catch {
+ let _: io::Result<()> = try_block! {
let mut file =
pretty::create_dump_file(infcx.tcx, "regioncx.scc.dot", None, "nll", &0, source)?;
regioncx.dump_graphviz_scc_constraints(&mut file)?;
// &
// - let's call the lifetime of this reference `'1`
(
- ty::TyRef(region, referent_ty, _),
+ ty::Ref(region, referent_ty, _),
hir::TyKind::Rptr(_lifetime, referent_hir_ty),
) => {
if region.to_region_vid() == needle_fr {
// Match up something like `Foo<'1>`
(
- ty::TyAdt(_adt_def, substs),
+ ty::Adt(_adt_def, substs),
hir::TyKind::Path(hir::QPath::Resolved(None, path)),
) => {
if let Some(last_segment) = path.segments.last() {
// The following cases don't have lifetimes, so we
// just worry about trying to match up the rustc type
// with the HIR types:
- (ty::TyTuple(elem_tys), hir::TyKind::Tup(elem_hir_tys)) => {
+ (ty::Tuple(elem_tys), hir::TyKind::Tup(elem_hir_tys)) => {
search_stack.extend(elem_tys.iter().cloned().zip(elem_hir_tys));
}
- (ty::TySlice(elem_ty), hir::TyKind::Slice(elem_hir_ty))
- | (ty::TyArray(elem_ty, _), hir::TyKind::Array(elem_hir_ty, _)) => {
+ (ty::Slice(elem_ty), hir::TyKind::Slice(elem_hir_ty))
+ | (ty::Array(elem_ty, _), hir::TyKind::Array(elem_hir_ty, _)) => {
search_stack.push((elem_ty, elem_hir_ty));
}
- (ty::TyRawPtr(mut_ty), hir::TyKind::Ptr(mut_hir_ty)) => {
+ (ty::RawPtr(mut_ty), hir::TyKind::Ptr(mut_hir_ty)) => {
search_stack.push((mut_ty.ty, &mut_hir_ty.ty));
}
use rustc::ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable};
use rustc::util::common;
use rustc_data_structures::graph::scc::Sccs;
-use rustc_data_structures::indexed_set::{IdxSet, IdxSetBuf};
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_errors::Diagnostic;
// SCC. For each SCC, we visit its successors and compute
// their values, then we union all those values to get our
// own.
- let visited = &mut IdxSetBuf::new_empty(self.constraint_sccs.num_sccs());
+ let visited = &mut IdxSet::new_empty(self.constraint_sccs.num_sccs());
for scc_index in self.constraint_sccs.all_sccs() {
self.propagate_constraint_sccs_if_new(scc_index, visited);
}
use rustc::traits::query::type_op;
use rustc::traits::query::{Fallible, NoSolution};
use rustc::ty::fold::TypeFoldable;
-use rustc::ty::{self, CanonicalTy, RegionVid, ToPolyTraitRef, Ty, TyCtxt, TypeVariants};
+use rustc::ty::{self, CanonicalTy, RegionVid, ToPolyTraitRef, Ty, TyCtxt, TyKind};
use rustc_errors::Diagnostic;
use std::fmt;
use std::rc::Rc;
// constraints on `'a` and `'b`. These constraints
// would be lost if we just look at the normalized
// value.
- if let ty::TyFnDef(def_id, substs) = constant.literal.ty.sty {
+ if let ty::FnDef(def_id, substs) = constant.literal.ty.sty {
let tcx = self.tcx();
let type_checker = &mut self.cx;
}
ProjectionElem::Subslice { from, to } => PlaceTy::Ty {
ty: match base_ty.sty {
- ty::TyArray(inner, size) => {
+ ty::Array(inner, size) => {
let size = size.unwrap_usize(tcx);
let min_size = (from as u64) + (to as u64);
if let Some(rest_size) = size.checked_sub(min_size) {
)
}
}
- ty::TySlice(..) => base_ty,
+ ty::Slice(..) => base_ty,
_ => span_mirbug_and_err!(self, place, "slice of non-array {:?}", base_ty),
},
},
ProjectionElem::Downcast(adt_def1, index) => match base_ty.sty {
- ty::TyAdt(adt_def, substs) if adt_def.is_enum() && adt_def == adt_def1 => {
+ ty::Adt(adt_def, substs) if adt_def.is_enum() && adt_def == adt_def1 => {
if index >= adt_def.variants.len() {
PlaceTy::Ty {
ty: span_mirbug_and_err!(
variant_index,
} => (&adt_def.variants[variant_index], substs),
PlaceTy::Ty { ty } => match ty.sty {
- ty::TyAdt(adt_def, substs) if !adt_def.is_enum() => (&adt_def.variants[0], substs),
- ty::TyClosure(def_id, substs) => {
+ ty::Adt(adt_def, substs) if !adt_def.is_enum() => (&adt_def.variants[0], substs),
+ ty::Closure(def_id, substs) => {
return match substs.upvar_tys(def_id, tcx).nth(field.index()) {
Some(ty) => Ok(ty),
None => Err(FieldAccessError::OutOfRange {
}),
}
}
- ty::TyGenerator(def_id, substs, _) => {
+ ty::Generator(def_id, substs, _) => {
// Try pre-transform fields first (upvars and current state)
if let Some(ty) = substs.pre_transforms_tys(def_id, tcx).nth(field.index()) {
return Ok(ty);
}),
};
}
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
return match tys.get(field.index()) {
Some(&ty) => Ok(ty),
None => Err(FieldAccessError::OutOfRange {
/// predicates, or otherwise uses the inference context, executes
/// `op` and then executes all the further obligations that `op`
/// returns. This will yield a set of outlives constraints amongst
- /// regions which are extracted and stored as having occured at
+ /// regions which are extracted and stored as having occurred at
/// `locations`.
///
/// **Any `rustc::infer` operations that might generate region
} => {
let place_type = place.ty(mir, tcx).to_ty(tcx);
let adt = match place_type.sty {
- TypeVariants::TyAdt(adt, _) if adt.is_enum() => adt,
+ TyKind::Adt(adt, _) if adt.is_enum() => adt,
_ => {
span_bug!(
stmt.source_info.span,
let func_ty = func.ty(mir, tcx);
debug!("check_terminator: call, func_ty={:?}", func_ty);
let sig = match func_ty.sty {
- ty::TyFnDef(..) | ty::TyFnPtr(_) => func_ty.fn_sig(tcx),
+ ty::FnDef(..) | ty::FnPtr(_) => func_ty.fn_sig(tcx),
_ => {
span_mirbug!(self, term, "call to non-function {:?}", func_ty);
return;
CastKind::ClosureFnPointer => {
let sig = match op.ty(mir, tcx).sty {
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
substs.closure_sig_ty(def_id, tcx).fn_sig(tcx)
}
_ => bug!(),
debug!("add_reborrow_constraint - base_ty = {:?}", base_ty);
match base_ty.sty {
- ty::TyRef(ref_region, _, mutbl) => {
+ ty::Ref(ref_region, _, mutbl) => {
constraints.outlives_constraints.push(OutlivesConstraint {
sup: ref_region.to_region_vid(),
sub: borrow_region.to_region_vid(),
}
}
}
- ty::TyRawPtr(..) => {
+ ty::RawPtr(..) => {
// deref of raw pointer, guaranteed to be valid
break;
}
- ty::TyAdt(def, _) if def.is_box() => {
+ ty::Adt(def, _) if def.is_box() => {
// deref of `Box`, need the base to be valid - propagate
}
_ => bug!("unexpected deref ty {:?} in {:?}", base_ty, borrowed_place),
ty::Variance::Covariant,
locations,
borrowck_context,
- ty::Slice::empty(),
+ ty::List::empty(),
).relate(&a, &b)?;
Ok(())
}
ty::Variance::Invariant,
locations,
borrowck_context,
- ty::Slice::empty(),
+ ty::List::empty(),
).relate(&a, &b)?;
Ok(())
}
fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
// Watch out for the case that we are matching a `?T` against the
// right-hand side.
- if let ty::TyInfer(ty::CanonicalTy(var)) = a.sty {
+ if let ty::Infer(ty::CanonicalTy(var)) = a.sty {
self.equate_var(var, b.into())?;
Ok(a)
} else {
/// The "defining" type for this function, with all universal
/// regions instantiated. For a closure or generator, this is the
- /// closure type, but for a top-level function it's the `TyFnDef`.
+ /// closure type, but for a top-level function it's the `FnDef`.
pub defining_ty: DefiningTy<'tcx>,
/// The return type of this function, with all regions replaced by
.replace_free_regions_with_nll_infer_vars(FR, &defining_ty);
match defining_ty.sty {
- ty::TyClosure(def_id, substs) => DefiningTy::Closure(def_id, substs),
- ty::TyGenerator(def_id, substs, movability) => {
+ ty::Closure(def_id, substs) => DefiningTy::Closure(def_id, substs),
+ ty::Generator(def_id, substs, movability) => {
DefiningTy::Generator(def_id, substs, movability)
}
- ty::TyFnDef(def_id, substs) => DefiningTy::FnDef(def_id, substs),
+ ty::FnDef(def_id, substs) => DefiningTy::FnDef(def_id, substs),
_ => span_bug!(
tcx.def_span(self.mir_def_id),
"expected defining type for `{:?}`: `{:?}`",
&self,
indices: &UniversalRegionIndices<'tcx>,
defining_ty: DefiningTy<'tcx>,
- ) -> ty::Binder<&'tcx ty::Slice<Ty<'tcx>>> {
+ ) -> ty::Binder<&'tcx ty::List<Ty<'tcx>>> {
let tcx = self.infcx.tcx;
match defining_ty {
DefiningTy::Closure(def_id, substs) => {
let (&output, tuplized_inputs) = inputs_and_output.split_last().unwrap();
assert_eq!(tuplized_inputs.len(), 1, "multiple closure inputs");
let inputs = match tuplized_inputs[0].sty {
- ty::TyTuple(inputs) => inputs,
+ ty::Tuple(inputs) => inputs,
_ => bug!("closure inputs not a tuple: {:?}", tuplized_inputs[0]),
};
// original path into a new variable and
// borrowed *that* one, leaving the original
// path unborrowed.
- ty::TyRawPtr(..) | ty::TyRef(_, _, hir::MutImmutable) => true,
+ ty::RawPtr(..) | ty::Ref(_, _, hir::MutImmutable) => true,
_ => proj.base.ignore_borrow(tcx, mir),
}
}
// Our invariant is, that at each step of the iteration:
// - If we didn't run out of access to match, our borrow and access are comparable
// and either equal or disjoint.
- // - If we did run out of accesss, the borrow can access a part of it.
+ // - If we did run out of access, the borrow can access a part of it.
loop {
// loop invariant: borrow_c is always either equal to access_c or disjoint from it.
if let Some(borrow_c) = borrow_components.next() {
debug!("places_conflict: shallow access behind ptr");
return false;
}
- (ProjectionElem::Deref, ty::TyRef(_, _, hir::MutImmutable), _) => {
+ (ProjectionElem::Deref, ty::Ref(_, _, hir::MutImmutable), _) => {
// the borrow goes through a dereference of a shared reference.
//
// I'm not sure why we are tracking these borrows - shared
}
(Place::Promoted(p1), Place::Promoted(p2)) => {
if p1.0 == p2.0 {
- if let ty::TyArray(_, size) = p1.1.sty {
+ if let ty::Array(_, size) = p1.1.sty {
if size.unwrap_usize(tcx) == 0 {
// Ignore conflicts with promoted [T; 0].
debug!("place_element_conflict: IGNORE-LEN-0-PROMOTED");
} else {
let ty = pi1.base.ty(mir, tcx).to_ty(tcx);
match ty.sty {
- ty::TyAdt(def, _) if def.is_union() => {
+ ty::Adt(def, _) if def.is_union() => {
// Different fields of a union, we are basically stuck.
debug!("place_element_conflict: STUCK-UNION");
Overlap::Arbitrary
let ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx);
match ty.sty {
- ty::TyRawPtr(_) |
- ty::TyRef(
+ ty::RawPtr(_) |
+ ty::Ref(
_, /*rgn*/
_, /*ty*/
hir::MutImmutable
return Some(cursor);
}
- ty::TyRef(
+ ty::Ref(
_, /*rgn*/
_, /*ty*/
hir::MutMutable,
return Some(cursor);
}
- ty::TyAdt(..) if ty.is_box() => {
+ ty::Adt(..) if ty.is_box() => {
self.next = Some(&proj.base);
return Some(cursor);
}
// FIXME(canndrew): This is_never should probably be an is_uninhabited
let diverges = expr.ty.is_never();
let intrinsic = match ty.sty {
- ty::TyFnDef(def_id, _) => {
+ ty::FnDef(def_id, _) => {
let f = ty.fn_sig(this.hir.tcx());
if f.abi() == Abi::RustIntrinsic ||
f.abi() == Abi::PlatformIntrinsic {
}
TestKind::SwitchInt { switch_ty, ref options, indices: _ } => {
- let (ret, terminator) = if switch_ty.sty == ty::TyBool {
+ let (ret, terminator) = if switch_ty.sty == ty::Bool {
assert!(options.len() > 0 && options.len() <= 2);
let (true_bb, false_bb) = (self.cfg.start_new_block(),
self.cfg.start_new_block());
// array, so we can call `<[u8]>::eq` rather than having to find an
// `<[u8; N]>::eq`.
let unsize = |ty: Ty<'tcx>| match ty.sty {
- ty::TyRef(region, rty, _) => match rty.sty {
- ty::TyArray(inner_ty, n) => Some((region, inner_ty, n)),
+ ty::Ref(region, rty, _) => match rty.sty {
+ ty::Array(inner_ty, n) => Some((region, inner_ty, n)),
_ => None,
},
_ => None,
let ty = tcx.type_of(tcx.hir.local_def_id(id));
let mut abi = fn_sig.abi;
let implicit_argument = match ty.sty {
- ty::TyClosure(..) => {
+ ty::Closure(..) => {
// HACK(eddyb) Avoid having RustCall on closures,
// as it adds unnecessary (and wrong) auto-tupling.
abi = Abi::Rust;
Some(ArgInfo(liberated_closure_env_ty(tcx, id, body_id), None, None, None))
}
- ty::TyGenerator(..) => {
+ ty::Generator(..) => {
let gen_ty = tcx.body_tables(body_id).node_id_to_type(fn_hir_id);
Some(ArgInfo(gen_ty, None, None, None))
}
let (yield_ty, return_ty) = if body.is_generator {
let gen_sig = match ty.sty {
- ty::TyGenerator(gen_def_id, gen_substs, ..) =>
+ ty::Generator(gen_def_id, gen_substs, ..) =>
gen_substs.sig(gen_def_id, tcx),
_ =>
span_bug!(tcx.hir.span(id), "generator w/o generator type: {:?}", ty),
let closure_ty = tcx.body_tables(body_id).node_id_to_type(closure_expr_hir_id);
let (closure_def_id, closure_substs) = match closure_ty.sty {
- ty::TyClosure(closure_def_id, closure_substs) => (closure_def_id, closure_substs),
+ ty::Closure(closure_def_id, closure_substs) => (closure_def_id, closure_substs),
_ => bug!("closure expr does not have closure type: {:?}", closure_ty)
};
//! locations.
use rustc::mir::{BasicBlock, Location};
-use rustc_data_structures::indexed_set::{HybridIdxSetBuf, IdxSetBuf, Iter};
+use rustc_data_structures::indexed_set::{HybridIdxSet, IdxSet, Iter};
use rustc_data_structures::indexed_vec::Idx;
use dataflow::{BitDenotation, BlockSets, DataflowResults};
BD: BitDenotation,
{
base_results: DataflowResults<BD>,
- curr_state: IdxSetBuf<BD::Idx>,
- stmt_gen: HybridIdxSetBuf<BD::Idx>,
- stmt_kill: HybridIdxSetBuf<BD::Idx>,
+ curr_state: IdxSet<BD::Idx>,
+ stmt_gen: HybridIdxSet<BD::Idx>,
+ stmt_kill: HybridIdxSet<BD::Idx>,
}
impl<BD> FlowAtLocation<BD>
pub fn new(results: DataflowResults<BD>) -> Self {
let bits_per_block = results.sets().bits_per_block();
- let curr_state = IdxSetBuf::new_empty(bits_per_block);
- let stmt_gen = HybridIdxSetBuf::new_empty(bits_per_block);
- let stmt_kill = HybridIdxSetBuf::new_empty(bits_per_block);
+ let curr_state = IdxSet::new_empty(bits_per_block);
+ let stmt_gen = HybridIdxSet::new_empty(bits_per_block);
+ let stmt_kill = HybridIdxSet::new_empty(bits_per_block);
FlowAtLocation {
base_results: results,
curr_state: curr_state,
place: &mir::Place<'tcx>) -> bool {
let ty = place.ty(mir, tcx).to_ty(tcx);
match ty.sty {
- ty::TyArray(..) => {
+ ty::Array(..) => {
debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} => false",
place, ty);
false
}
- ty::TySlice(..) | ty::TyRef(..) | ty::TyRawPtr(..) => {
+ ty::Slice(..) | ty::Ref(..) | ty::RawPtr(..) => {
debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} refd => true",
place, ty);
true
}
- ty::TyAdt(def, _) if (def.has_dtor(tcx) && !def.is_box()) || def.is_union() => {
+ ty::Adt(def, _) if (def.has_dtor(tcx) && !def.is_box()) || def.is_union() => {
debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} Drop => true",
place, ty);
true
use syntax::ast::{self, MetaItem};
-use rustc_data_structures::bitslice::{bitwise, BitwiseOperator, Word};
-use rustc_data_structures::indexed_set::{HybridIdxSetBuf, IdxSet, IdxSetBuf};
+use rustc_data_structures::bitslice::{bitwise, BitwiseOperator};
+use rustc_data_structures::indexed_set::{HybridIdxSet, IdxSet};
use rustc_data_structures::indexed_vec::Idx;
use rustc_data_structures::work_queue::WorkQueue;
use std::borrow::Borrow;
use std::fmt;
use std::io;
-use std::mem;
use std::path::PathBuf;
use std::usize;
impl<'a, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> where BD: BitDenotation
{
fn propagate(&mut self) {
- let mut temp = IdxSetBuf::new_empty(self.flow_state.sets.bits_per_block);
+ let mut temp = IdxSet::new_empty(self.flow_state.sets.bits_per_block);
let mut propcx = PropagationContext {
builder: self,
};
}
}
-/// Maps each block to a set of bits
-#[derive(Clone, Debug)]
-pub(crate) struct Bits<E:Idx> {
- bits: IdxSetBuf<E>,
-}
-
-impl<E:Idx> Bits<E> {
- fn new(bits: IdxSetBuf<E>) -> Self {
- Bits { bits: bits }
- }
-}
-
/// DataflowResultsConsumer abstracts over walking the MIR with some
/// already constructed dataflow results.
///
analysis: &T,
result: &DataflowResults<T>,
mir: &Mir<'tcx>)
- -> IdxSetBuf<T::Idx> {
+ -> IdxSet<T::Idx> {
let mut on_entry = result.sets().on_entry_set_for(loc.block.index()).to_owned();
let mut kill_set = on_entry.to_hybrid();
let mut gen_set = kill_set.clone();
pub(crate) fn interpret_hybrid_set<'c, P>(&self,
o: &'c O,
- set: &HybridIdxSetBuf<O::Idx>,
+ set: &HybridIdxSet<O::Idx>,
render_idx: &P)
-> Vec<DebugFormatted>
where P: Fn(&O, O::Idx) -> DebugFormatted
/// Analysis bitwidth for each block.
bits_per_block: usize,
- /// Number of words associated with each block entry
- /// equal to bits_per_block / (mem::size_of::<Word> * 8), rounded up.
- words_per_block: usize,
-
/// For each block, bits valid on entry to the block.
- on_entry_sets: Bits<E>,
+ on_entry_sets: Vec<IdxSet<E>>,
/// For each block, bits generated by executing the statements in
/// the block. (For comparison, the Terminator for each block is
/// handled in a flow-specific manner during propagation.)
- gen_sets: Vec<HybridIdxSetBuf<E>>,
+ gen_sets: Vec<HybridIdxSet<E>>,
/// For each block, bits killed by executing the statements in the
/// block. (For comparison, the Terminator for each block is
/// handled in a flow-specific manner during propagation.)
- kill_sets: Vec<HybridIdxSetBuf<E>>,
+ kill_sets: Vec<HybridIdxSet<E>>,
}
/// Triple of sets associated with a given block.
/// Bits that are set to 1 by the time we exit the given block. Hybrid
/// because it usually contains only 0 or 1 elements.
- pub(crate) gen_set: &'a mut HybridIdxSetBuf<E>,
+ pub(crate) gen_set: &'a mut HybridIdxSet<E>,
/// Bits that are set to 0 by the time we exit the given block. Hybrid
/// because it usually contains only 0 or 1 elements.
- pub(crate) kill_set: &'a mut HybridIdxSetBuf<E>,
+ pub(crate) kill_set: &'a mut HybridIdxSet<E>,
}
impl<'a, E:Idx> BlockSets<'a, E> {
impl<E:Idx> AllSets<E> {
pub fn bits_per_block(&self) -> usize { self.bits_per_block }
pub fn for_block(&mut self, block_idx: usize) -> BlockSets<E> {
- let offset = self.words_per_block * block_idx;
- let range = E::new(offset)..E::new(offset + self.words_per_block);
BlockSets {
- on_entry: self.on_entry_sets.bits.range_mut(&range),
+ on_entry: &mut self.on_entry_sets[block_idx],
gen_set: &mut self.gen_sets[block_idx],
kill_set: &mut self.kill_sets[block_idx],
}
}
pub fn on_entry_set_for(&self, block_idx: usize) -> &IdxSet<E> {
- let offset = self.words_per_block * block_idx;
- let range = E::new(offset)..E::new(offset + self.words_per_block);
- self.on_entry_sets.bits.range(&range)
+ &self.on_entry_sets[block_idx]
}
- pub fn gen_set_for(&self, block_idx: usize) -> &HybridIdxSetBuf<E> {
+ pub fn gen_set_for(&self, block_idx: usize) -> &HybridIdxSet<E> {
&self.gen_sets[block_idx]
}
- pub fn kill_set_for(&self, block_idx: usize) -> &HybridIdxSetBuf<E> {
+ pub fn kill_set_for(&self, block_idx: usize) -> &HybridIdxSet<E> {
&self.kill_sets[block_idx]
}
}
/// `sets.on_entry` to that local clone into `statement_effect` and
/// `terminator_effect`).
///
- /// When its false, no local clone is constucted; instead a
+ /// When it's false, no local clone is constructed; instead a
/// reference directly into `on_entry` is passed along via
/// `sets.on_entry` instead, which represents the flow state at
/// the block's start, not necessarily the state immediately prior
dead_unwinds: &'a IdxSet<mir::BasicBlock>,
denotation: D) -> Self where D: InitialFlow {
let bits_per_block = denotation.bits_per_block();
- let bits_per_word = mem::size_of::<Word>() * 8;
- let words_per_block = (bits_per_block + bits_per_word - 1) / bits_per_word;
- let bits_per_block_rounded_up = words_per_block * bits_per_word; // a multiple of word size
let num_blocks = mir.basic_blocks().len();
- let num_overall = num_blocks * bits_per_block_rounded_up;
- let on_entry = Bits::new(if D::bottom_value() {
- IdxSetBuf::new_filled(num_overall)
+ let on_entry_sets = if D::bottom_value() {
+ vec![IdxSet::new_filled(bits_per_block); num_blocks]
} else {
- IdxSetBuf::new_empty(num_overall)
- });
- let empties = vec![HybridIdxSetBuf::new_empty(bits_per_block); num_blocks];
+ vec![IdxSet::new_empty(bits_per_block); num_blocks]
+ };
+ let gen_sets = vec![HybridIdxSet::new_empty(bits_per_block); num_blocks];
+ let kill_sets = gen_sets.clone();
DataflowAnalysis {
mir,
flow_state: DataflowState {
sets: AllSets {
bits_per_block,
- words_per_block,
- on_entry_sets: on_entry,
- gen_sets: empties.clone(),
- kill_sets: empties,
+ on_entry_sets,
+ gen_sets,
+ kill_sets,
},
operator: denotation,
}
dirty_queue.insert(bb);
}
}
-
}
let tcx = self.builder.tcx;
let place_ty = proj.base.ty(mir, tcx).to_ty(tcx);
match place_ty.sty {
- ty::TyRef(..) | ty::TyRawPtr(..) =>
+ ty::Ref(..) | ty::RawPtr(..) =>
return Err(MoveError::cannot_move_out_of(
self.loc,
BorrowedContent { target_place: place.clone() })),
- ty::TyAdt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() =>
+ ty::Adt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() =>
return Err(MoveError::cannot_move_out_of(self.loc,
InteriorOfTypeWithDestructor {
container_ty: place_ty
})),
// move out of union - always move the entire union
- ty::TyAdt(adt, _) if adt.is_union() =>
+ ty::Adt(adt, _) if adt.is_union() =>
return Err(MoveError::UnionMove { path: base }),
- ty::TySlice(_) =>
+ ty::Slice(_) =>
return Err(MoveError::cannot_move_out_of(
self.loc,
InteriorOfSliceOrArray {
_ => false
},
})),
- ty::TyArray(..) => match proj.elem {
+ ty::Array(..) => match proj.elem {
ProjectionElem::Index(..) =>
return Err(MoveError::cannot_move_out_of(
self.loc,
hir::ExprKind::AddrOf(mutbl, ref expr) => {
let region = match expr_ty.sty {
- ty::TyRef(r, _, _) => r,
+ ty::Ref(r, _, _) => r,
_ => span_bug!(expr.span, "type of & not region"),
};
ExprKind::Borrow {
hir::ExprKind::Struct(ref qpath, ref fields, ref base) => {
match expr_ty.sty {
- ty::TyAdt(adt, substs) => {
+ ty::Adt(adt, substs) => {
match adt.adt_kind() {
AdtKind::Struct | AdtKind::Union => {
ExprKind::Adt {
hir::ExprKind::Closure(..) => {
let closure_ty = cx.tables().expr_ty(expr);
let (def_id, substs, movability) = match closure_ty.sty {
- ty::TyClosure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs), None),
- ty::TyGenerator(def_id, substs, movability) => {
+ ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs), None),
+ ty::Generator(def_id, substs, movability) => {
(def_id, UpvarSubsts::Generator(substs), Some(movability))
}
_ => {
match cx.tables().node_id_to_type(expr.hir_id).sty {
// A unit struct/variant which is used as a value.
// We return a completely different ExprKind here to account for this special case.
- ty::TyAdt(adt_def, substs) => {
+ ty::Adt(adt_def, substs) => {
ExprKind::Adt {
adt_def,
variant_index: adt_def.variant_index_with_id(def_id),
});
let region = cx.tcx.mk_region(region);
- let self_expr = if let ty::TyClosure(_, closure_substs) = closure_ty.sty {
+ let self_expr = if let ty::Closure(_, closure_substs) = closure_ty.sty {
match cx.infcx.closure_kind(closure_def_id, closure_substs).unwrap() {
ty::ClosureKind::Fn => {
let ref_closure_ty = cx.tcx.mk_ref(region,
// same region and mutability as the receiver. This holds for
// `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`.
let (region, mutbl) = match recv_ty.sty {
- ty::TyRef(region, _, mutbl) => (region, mutbl),
+ ty::Ref(region, _, mutbl) => (region, mutbl),
_ => span_bug!(expr.span, "overloaded_place: receiver is not a reference"),
};
let ref_ty = cx.tcx.mk_ref(region, ty::TypeAndMut {
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = self.tcx.allocate_bytes(s.as_bytes());
- let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, self.tcx);
- ConstValue::from_byval_value(value).unwrap()
+ ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, self.tcx)
},
LitKind::ByteStr(ref data) => {
let id = self.tcx.allocate_bytes(data);
}
LitKind::FloatUnsuffixed(n) => {
let fty = match ty.sty {
- ty::TyFloat(fty) => fty,
+ ty::Float(fty) => fty,
_ => bug!()
};
parse_float(n, fty)
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+/// This file includes the logic for exhaustiveness and usefulness checking for
+/// pattern-matching. Specifically, given a list of patterns for a type, we can
+/// tell whether:
+/// (a) the patterns cover every possible constructor for the type [exhaustiveness]
+/// (b) each pattern is necessary [usefulness]
+///
+/// The algorithm implemented here is a modified version of the one described in:
+/// http://moscova.inria.fr/~maranget/papers/warn/index.html
+/// However, to save future implementors from reading the original paper, I'm going
+/// to summarise the algorithm here to hopefully save time and be a little clearer
+/// (without being so rigorous).
+///
+/// The core of the algorithm revolves about a "usefulness" check. In particular, we
+/// are trying to compute a predicate `U(P, p_{m + 1})` where `P` is a list of patterns
+/// of length `m` for a compound (product) type with `n` components (we refer to this as
+/// a matrix). `U(P, p_{m + 1})` represents whether, given an existing list of patterns
+/// `p_1 ..= p_m`, adding a new pattern will be "useful" (that is, cover previously-
+/// uncovered values of the type).
+///
+/// If we have this predicate, then we can easily compute both exhaustiveness of an
+/// entire set of patterns and the individual usefulness of each one.
+/// (a) the set of patterns is exhaustive iff `U(P, _)` is false (i.e. adding a wildcard
+/// match doesn't increase the number of values we're matching)
+/// (b) a pattern `p_i` is not useful if `U(P[0..=(i-1), p_i)` is false (i.e. adding a
+/// pattern to those that have come before it doesn't increase the number of values
+/// we're matching).
+///
+/// For example, say we have the following:
+/// ```
+/// // x: (Option<bool>, Result<()>)
+/// match x {
+/// (Some(true), _) => {}
+/// (None, Err(())) => {}
+/// (None, Err(_)) => {}
+/// }
+/// ```
+/// Here, the matrix `P` is 3 x 2 (rows x columns).
+/// [
+/// [Some(true), _],
+/// [None, Err(())],
+/// [None, Err(_)],
+/// ]
+/// We can tell it's not exhaustive, because `U(P, _)` is true (we're not covering
+/// `[Some(false), _]`, for instance). In addition, row 3 is not useful, because
+/// all the values it covers are already covered by row 2.
+///
+/// To compute `U`, we must have two other concepts.
+/// 1. `S(c, P)` is a "specialised matrix", where `c` is a constructor (like `Some` or
+/// `None`). You can think of it as filtering `P` to just the rows whose *first* pattern
+/// can cover `c` (and expanding OR-patterns into distinct patterns), and then expanding
+/// the constructor into all of its components.
+/// The specialisation of a row vector is computed by `specialize`.
+///
+/// It is computed as follows. For each row `p_i` of P, we have four cases:
+/// 1.1. `p_(i,1) = c(r_1, .., r_a)`. Then `S(c, P)` has a corresponding row:
+/// r_1, .., r_a, p_(i,2), .., p_(i,n)
+/// 1.2. `p_(i,1) = c'(r_1, .., r_a')` where `c ≠c'`. Then `S(c, P)` has no
+/// corresponding row.
+/// 1.3. `p_(i,1) = _`. Then `S(c, P)` has a corresponding row:
+/// _, .., _, p_(i,2), .., p_(i,n)
+/// 1.4. `p_(i,1) = r_1 | r_2`. Then `S(c, P)` has corresponding rows inlined from:
+/// S(c, (r_1, p_(i,2), .., p_(i,n)))
+/// S(c, (r_2, p_(i,2), .., p_(i,n)))
+///
+/// 2. `D(P)` is a "default matrix". This is used when we know there are missing
+/// constructor cases, but there might be existing wildcard patterns, so to check the
+/// usefulness of the matrix, we have to check all its *other* components.
+/// The default matrix is computed inline in `is_useful`.
+///
+/// It is computed as follows. For each row `p_i` of P, we have three cases:
+/// 1.1. `p_(i,1) = c(r_1, .., r_a)`. Then `D(P)` has no corresponding row.
+/// 1.2. `p_(i,1) = _`. Then `D(P)` has a corresponding row:
+/// p_(i,2), .., p_(i,n)
+/// 1.3. `p_(i,1) = r_1 | r_2`. Then `D(P)` has corresponding rows inlined from:
+/// D((r_1, p_(i,2), .., p_(i,n)))
+/// D((r_2, p_(i,2), .., p_(i,n)))
+///
+/// Note that the OR-patterns are not always used directly in Rust, but are used to derive
+/// the exhaustive integer matching rules, so they're written here for posterity.
+///
+/// The algorithm for computing `U`
+/// -------------------------------
+/// The algorithm is inductive (on the number of columns: i.e. components of tuple patterns).
+/// That means we're going to check the components from left-to-right, so the algorithm
+/// operates principally on the first component of the matrix and new pattern `p_{m + 1}`.
+/// This algorithm is realised in the `is_useful` function.
+///
+/// Base case. (`n = 0`, i.e. an empty tuple pattern)
+/// - If `P` already contains an empty pattern (i.e. if the number of patterns `m > 0`),
+/// then `U(P, p_{m + 1})` is false.
+/// - Otherwise, `P` must be empty, so `U(P, p_{m + 1})` is true.
+///
+/// Inductive step. (`n > 0`, i.e. whether there's at least one column
+/// [which may then be expanded into further columns later])
+/// We're going to match on the new pattern, `p_{m + 1}`.
+/// - If `p_{m + 1} == c(r_1, .., r_a)`, then we have a constructor pattern.
+/// Thus, the usefulness of `p_{m + 1}` can be reduced to whether it is useful when
+/// we ignore all the patterns in `P` that involve other constructors. This is where
+/// `S(c, P)` comes in:
+/// `U(P, p_{m + 1}) := U(S(c, P), S(c, p_{m + 1}))`
+/// This special case is handled in `is_useful_specialized`.
+/// - If `p_{m + 1} == _`, then we have two more cases:
+/// + All the constructors of the first component of the type exist within
+/// all the rows (after having expanded OR-patterns). In this case:
+/// `U(P, p_{m + 1}) := ∨(k ϵ constructors) U(S(k, P), S(k, p_{m + 1}))`
+/// I.e. the pattern `p_{m + 1}` is only useful when all the constructors are
+/// present *if* its later components are useful for the respective constructors
+/// covered by `p_{m + 1}` (usually a single constructor, but all in the case of `_`).
+/// + Some constructors are not present in the existing rows (after having expanded
+/// OR-patterns). However, there might be wildcard patterns (`_`) present. Thus, we
+/// are only really concerned with the other patterns leading with wildcards. This is
+/// where `D` comes in:
+/// `U(P, p_{m + 1}) := U(D(P), p_({m + 1},2), .., p_({m + 1},n))`
+/// - If `p_{m + 1} == r_1 | r_2`, then the usefulness depends on each separately:
+/// `U(P, p_{m + 1}) := U(P, (r_1, p_({m + 1},2), .., p_({m + 1},n)))
+/// || U(P, (r_2, p_({m + 1},2), .., p_({m + 1},n)))`
+///
+/// Modifications to the algorithm
+/// ------------------------------
+/// The algorithm in the paper doesn't cover some of the special cases that arise in Rust, for
+/// example uninhabited types and variable-length slice patterns. These are drawn attention to
+/// throughout the code below. I'll make a quick note here about how exhaustive integer matching
+/// is accounted for, though.
+///
+/// Exhaustive integer matching
+/// ---------------------------
+/// An integer type can be thought of as a (huge) sum type: 1 | 2 | 3 | ...
+/// So to support exhaustive integer matching, we can make use of the logic in the paper for
+/// OR-patterns. However, we obviously can't just treat ranges x..=y as individual sums, because
+/// they are likely gigantic. So we instead treat ranges as constructors of the integers. This means
+/// that we have a constructor *of* constructors (the integers themselves). We then need to work
+/// through all the inductive step rules above, deriving how the ranges would be treated as
+/// OR-patterns, and making sure that they're treated in the same way even when they're ranges.
+/// There are really only four special cases here:
+/// - When we match on a constructor that's actually a range, we have to treat it as if we would
+/// an OR-pattern.
+/// + It turns out that we can simply extend the case for single-value patterns in
+/// `specialize` to either be *equal* to a value constructor, or *contained within* a range
+/// constructor.
+/// + When the pattern itself is a range, you just want to tell whether any of the values in
+/// the pattern range coincide with values in the constructor range, which is precisely
+/// intersection.
+/// Since when encountering a range pattern for a value constructor, we also use inclusion, it
+/// means that whenever the constructor is a value/range and the pattern is also a value/range,
+/// we can simply use intersection to test usefulness.
+/// - When we're testing for usefulness of a pattern and the pattern's first component is a
+/// wildcard.
+/// + If all the constructors appear in the matrix, we have a slight complication. By default,
+/// the behaviour (i.e. a disjunction over specialised matrices for each constructor) is
+/// invalid, because we want a disjunction over every *integer* in each range, not just a
+/// disjunction over every range. This is a bit more tricky to deal with: essentially we need
+/// to form equivalence classes of subranges of the constructor range for which the behaviour
+/// of the matrix `P` and new pattern `p_{m + 1}` are the same. This is described in more
+/// detail in `split_grouped_constructors`.
+/// + If some constructors are missing from the matrix, it turns out we don't need to do
+/// anything special (because we know none of the integers are actually wildcards: i.e. we
+/// can't span wildcards using ranges).
+
use self::Constructor::*;
use self::Usefulness::*;
use self::WitnessPreference::*;
use rustc::hir::def_id::DefId;
use rustc::hir::RangeEnd;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
+use rustc::ty::layout::{Integer, IntegerExt};
use rustc::mir::Field;
use rustc::mir::interpret::ConstValue;
use rustc::util::common::ErrorReported;
+use syntax::attr::{SignedInt, UnsignedInt};
use syntax_pos::{Span, DUMMY_SP};
use arena::TypedArena;
-use std::cmp::{self, Ordering};
+use std::cmp::{self, Ordering, min, max};
use std::fmt;
use std::iter::{FromIterator, IntoIterator};
+use std::ops::RangeInclusive;
+use std::u128;
pub fn expand_pattern<'a, 'tcx>(cx: &MatchCheckCtxt<'a, 'tcx>, pat: Pattern<'tcx>)
-> &'a Pattern<'tcx>
impl<'tcx> PatternFolder<'tcx> for LiteralExpander {
fn fold_pattern(&mut self, pat: &Pattern<'tcx>) -> Pattern<'tcx> {
match (&pat.ty.sty, &*pat.kind) {
- (&ty::TyRef(_, rty, _), &PatternKind::Constant { ref value }) => {
+ (&ty::Ref(_, rty, _), &PatternKind::Constant { ref value }) => {
Pattern {
ty: pat.ty,
span: pat.span,
}
}
-//NOTE: appears to be the only place other then InferCtxt to contain a ParamEnv
pub struct MatchCheckCtxt<'a, 'tcx: 'a> {
pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
/// The module in which the match occurs. This is necessary for
tcx,
module,
pattern_arena: &pattern_arena,
- byte_array_map: FxHashMap(),
+ byte_array_map: FxHashMap::default(),
})
}
fn is_non_exhaustive_enum(&self, ty: Ty<'tcx>) -> bool {
match ty.sty {
- ty::TyAdt(adt_def, ..) => adt_def.is_enum() && adt_def.is_non_exhaustive(),
+ ty::Adt(adt_def, ..) => adt_def.is_enum() && adt_def.is_non_exhaustive(),
_ => false,
}
}
fn is_local(&self, ty: Ty<'tcx>) -> bool {
match ty.sty {
- ty::TyAdt(adt_def, ..) => adt_def.did.is_local(),
+ ty::Adt(adt_def, ..) => adt_def.did.is_local(),
_ => false,
}
}
}
}
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub enum Usefulness<'tcx> {
Useful,
UsefulWithWitness(Vec<Witness<'tcx>>),
}
}
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Debug)]
pub enum WitnessPreference {
ConstructWitness,
LeaveOutWitness
max_slice_length: u64,
}
-/// A stack of patterns in reverse order of construction
-#[derive(Clone)]
+/// A witness of non-exhaustiveness for error reporting, represented
+/// as a list of patterns (in reverse order of construction) with
+/// wildcards inside to represent elements that can take any inhabitant
+/// of the type as a value.
+///
+/// A witness against a list of patterns should have the same types
+/// and length as the pattern matched against. Because Rust `match`
+/// is always against a single pattern, at the end the witness will
+/// have length 1, but in the middle of the algorithm, it can contain
+/// multiple patterns.
+///
+/// For example, if we are constructing a witness for the match against
+/// ```
+/// struct Pair(Option<(u32, u32)>, bool);
+///
+/// match (p: Pair) {
+/// Pair(None, _) => {}
+/// Pair(_, false) => {}
+/// }
+/// ```
+///
+/// We'll perform the following steps:
+/// 1. Start with an empty witness
+/// `Witness(vec![])`
+/// 2. Push a witness `Some(_)` against the `None`
+/// `Witness(vec![Some(_)])`
+/// 3. Push a witness `true` against the `false`
+/// `Witness(vec![Some(_), true])`
+/// 4. Apply the `Pair` constructor to the witnesses
+/// `Witness(vec![Pair(Some(_), true)])`
+///
+/// The final `Pair(Some(_), true)` is then the resulting witness.
+#[derive(Clone, Debug)]
pub struct Witness<'tcx>(Vec<Pattern<'tcx>>);
impl<'tcx> Witness<'tcx> {
let arity = constructor_arity(cx, ctor, ty);
let pat = {
let len = self.0.len() as u64;
- let mut pats = self.0.drain((len-arity) as usize..).rev();
+ let mut pats = self.0.drain((len - arity) as usize..).rev();
match ty.sty {
- ty::TyAdt(..) |
- ty::TyTuple(..) => {
+ ty::Adt(..) |
+ ty::Tuple(..) => {
let pats = pats.enumerate().map(|(i, p)| {
FieldPattern {
field: Field::new(i),
}
}).collect();
- if let ty::TyAdt(adt, substs) = ty.sty {
+ if let ty::Adt(adt, substs) = ty.sty {
if adt.is_enum() {
PatternKind::Variant {
adt_def: adt,
}
}
- ty::TyRef(..) => {
+ ty::Ref(..) => {
PatternKind::Deref { subpattern: pats.nth(0).unwrap() }
}
- ty::TySlice(_) | ty::TyArray(..) => {
+ ty::Slice(_) | ty::Array(..) => {
PatternKind::Slice {
prefix: pats.collect(),
slice: None,
_ => {
match *ctor {
ConstantValue(value) => PatternKind::Constant { value },
+ ConstantRange(lo, hi, end) => PatternKind::Range { lo, hi, end },
_ => PatternKind::Wild,
}
}
/// but is instead bounded by the maximum fixed length of slice patterns in
/// the column of patterns being analyzed.
///
-/// This intentionally does not list ConstantValue specializations for
-/// non-booleans, because we currently assume that there is always a
-/// "non-standard constant" that matches. See issue #12483.
-///
/// We make sure to omit constructors that are statically impossible. eg for
/// Option<!> we do not include Some(_) in the returned list of constructors.
fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>,
-> Vec<Constructor<'tcx>>
{
debug!("all_constructors({:?})", pcx.ty);
- match pcx.ty.sty {
- ty::TyBool => {
+ let exhaustive_integer_patterns = cx.tcx.features().exhaustive_integer_patterns;
+ let ctors = match pcx.ty.sty {
+ ty::Bool => {
[true, false].iter().map(|&b| {
ConstantValue(ty::Const::from_bool(cx.tcx, b))
}).collect()
}
- ty::TyArray(ref sub_ty, len) if len.assert_usize(cx.tcx).is_some() => {
+ ty::Array(ref sub_ty, len) if len.assert_usize(cx.tcx).is_some() => {
let len = len.unwrap_usize(cx.tcx);
if len != 0 && cx.is_uninhabited(sub_ty) {
vec![]
}
}
// Treat arrays of a constant but unknown length like slices.
- ty::TyArray(ref sub_ty, _) |
- ty::TySlice(ref sub_ty) => {
+ ty::Array(ref sub_ty, _) |
+ ty::Slice(ref sub_ty) => {
if cx.is_uninhabited(sub_ty) {
vec![Slice(0)]
} else {
(0..pcx.max_slice_length+1).map(|length| Slice(length)).collect()
}
}
- ty::TyAdt(def, substs) if def.is_enum() => {
+ ty::Adt(def, substs) if def.is_enum() => {
def.variants.iter()
.filter(|v| !cx.is_variant_uninhabited(v, substs))
.map(|v| Variant(v.did))
.collect()
}
+ ty::Char if exhaustive_integer_patterns => {
+ let endpoint = |c: char| {
+ let ty = ty::ParamEnv::empty().and(cx.tcx.types.char);
+ ty::Const::from_bits(cx.tcx, c as u128, ty)
+ };
+ vec![
+ // The valid Unicode Scalar Value ranges.
+ ConstantRange(endpoint('\u{0000}'), endpoint('\u{D7FF}'), RangeEnd::Included),
+ ConstantRange(endpoint('\u{E000}'), endpoint('\u{10FFFF}'), RangeEnd::Included),
+ ]
+ }
+ ty::Int(ity) if exhaustive_integer_patterns => {
+ // FIXME(49937): refactor these bit manipulations into interpret.
+ let bits = Integer::from_attr(cx.tcx, SignedInt(ity)).size().bits() as u128;
+ let min = 1u128 << (bits - 1);
+ let max = (1u128 << (bits - 1)) - 1;
+ let ty = ty::ParamEnv::empty().and(pcx.ty);
+ vec![ConstantRange(ty::Const::from_bits(cx.tcx, min as u128, ty),
+ ty::Const::from_bits(cx.tcx, max as u128, ty),
+ RangeEnd::Included)]
+ }
+ ty::Uint(uty) if exhaustive_integer_patterns => {
+ // FIXME(49937): refactor these bit manipulations into interpret.
+ let bits = Integer::from_attr(cx.tcx, UnsignedInt(uty)).size().bits() as u128;
+ let max = !0u128 >> (128 - bits);
+ let ty = ty::ParamEnv::empty().and(pcx.ty);
+ vec![ConstantRange(ty::Const::from_bits(cx.tcx, 0, ty),
+ ty::Const::from_bits(cx.tcx, max, ty),
+ RangeEnd::Included)]
+ }
_ => {
if cx.is_uninhabited(pcx.ty) {
vec![]
vec![Single]
}
}
- }
+ };
+ ctors
}
fn max_slice_length<'p, 'a: 'p, 'tcx: 'a, I>(
// `[true, ..]`
// `[.., false]`
// Then any slice of length ≥1 that matches one of these two
- // patterns can be be trivially turned to a slice of any
+ // patterns can be trivially turned to a slice of any
// other length ≥1 that matches them and vice-versa - for
// but the slice from length 2 `[false, true]` that matches neither
// of these patterns can't be turned to a slice from length 1 that
cmp::max(max_fixed_len + 1, max_prefix_len + max_suffix_len)
}
+/// An inclusive interval, used for precise integer exhaustiveness checking.
+/// `IntRange`s always store a contiguous range. This means that values are
+/// encoded such that `0` encodes the minimum value for the integer,
+/// regardless of the signedness.
+/// For example, the pattern `-128...127i8` is encoded as `0..=255`.
+/// This makes comparisons and arithmetic on interval endpoints much more
+/// straightforward. See `signed_bias` for details.
+///
+/// `IntRange` is never used to encode an empty range or a "range" that wraps
+/// around the (offset) space: i.e. `range.lo <= range.hi`.
+#[derive(Clone)]
+struct IntRange<'tcx> {
+ pub range: RangeInclusive<u128>,
+ pub ty: Ty<'tcx>,
+}
+
+impl<'tcx> IntRange<'tcx> {
+ fn from_ctor(tcx: TyCtxt<'_, 'tcx, 'tcx>,
+ ctor: &Constructor<'tcx>)
+ -> Option<IntRange<'tcx>> {
+ match ctor {
+ ConstantRange(lo, hi, end) => {
+ assert_eq!(lo.ty, hi.ty);
+ let ty = lo.ty;
+ let env_ty = ty::ParamEnv::empty().and(ty);
+ if let Some(lo) = lo.assert_bits(tcx, env_ty) {
+ if let Some(hi) = hi.assert_bits(tcx, env_ty) {
+ // Perform a shift if the underlying types are signed,
+ // which makes the interval arithmetic simpler.
+ let bias = IntRange::signed_bias(tcx, ty);
+ let (lo, hi) = (lo ^ bias, hi ^ bias);
+ // Make sure the interval is well-formed.
+ return if lo > hi || lo == hi && *end == RangeEnd::Excluded {
+ None
+ } else {
+ let offset = (*end == RangeEnd::Excluded) as u128;
+ Some(IntRange { range: lo..=(hi - offset), ty })
+ };
+ }
+ }
+ None
+ }
+ ConstantValue(val) => {
+ let ty = val.ty;
+ if let Some(val) = val.assert_bits(tcx, ty::ParamEnv::empty().and(ty)) {
+ let bias = IntRange::signed_bias(tcx, ty);
+ let val = val ^ bias;
+ Some(IntRange { range: val..=val, ty })
+ } else {
+ None
+ }
+ }
+ Single | Variant(_) | Slice(_) => {
+ None
+ }
+ }
+ }
+
+ fn from_pat(tcx: TyCtxt<'_, 'tcx, 'tcx>,
+ pat: &Pattern<'tcx>)
+ -> Option<IntRange<'tcx>> {
+ Self::from_ctor(tcx, &match pat.kind {
+ box PatternKind::Constant { value } => ConstantValue(value),
+ box PatternKind::Range { lo, hi, end } => ConstantRange(lo, hi, end),
+ _ => return None,
+ })
+ }
+
+ // The return value of `signed_bias` should be XORed with an endpoint to encode/decode it.
+ fn signed_bias(tcx: TyCtxt<'_, 'tcx, 'tcx>, ty: Ty<'tcx>) -> u128 {
+ match ty.sty {
+ ty::Int(ity) => {
+ let bits = Integer::from_attr(tcx, SignedInt(ity)).size().bits() as u128;
+ 1u128 << (bits - 1)
+ }
+ _ => 0
+ }
+ }
+
+ /// Convert a `RangeInclusive` to a `ConstantValue` or inclusive `ConstantRange`.
+ fn range_to_ctor(
+ tcx: TyCtxt<'_, 'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+ r: RangeInclusive<u128>,
+ ) -> Constructor<'tcx> {
+ let bias = IntRange::signed_bias(tcx, ty);
+ let ty = ty::ParamEnv::empty().and(ty);
+ let (lo, hi) = r.into_inner();
+ if lo == hi {
+ ConstantValue(ty::Const::from_bits(tcx, lo ^ bias, ty))
+ } else {
+ ConstantRange(ty::Const::from_bits(tcx, lo ^ bias, ty),
+ ty::Const::from_bits(tcx, hi ^ bias, ty),
+ RangeEnd::Included)
+ }
+ }
+
+ /// Return a collection of ranges that spans the values covered by `ranges`, subtracted
+ /// by the values covered by `self`: i.e. `ranges \ self` (in set notation).
+ fn subtract_from(self,
+ tcx: TyCtxt<'_, 'tcx, 'tcx>,
+ ranges: Vec<Constructor<'tcx>>)
+ -> Vec<Constructor<'tcx>> {
+ let ranges = ranges.into_iter().filter_map(|r| {
+ IntRange::from_ctor(tcx, &r).map(|i| i.range)
+ });
+ let mut remaining_ranges = vec![];
+ let ty = self.ty;
+ let (lo, hi) = self.range.into_inner();
+ for subrange in ranges {
+ let (subrange_lo, subrange_hi) = subrange.into_inner();
+ if lo > subrange_hi || subrange_lo > hi {
+ // The pattern doesn't intersect with the subrange at all,
+ // so the subrange remains untouched.
+ remaining_ranges.push(Self::range_to_ctor(tcx, ty, subrange_lo..=subrange_hi));
+ } else {
+ if lo > subrange_lo {
+ // The pattern intersects an upper section of the
+ // subrange, so a lower section will remain.
+ remaining_ranges.push(Self::range_to_ctor(tcx, ty, subrange_lo..=(lo - 1)));
+ }
+ if hi < subrange_hi {
+ // The pattern intersects a lower section of the
+ // subrange, so an upper section will remain.
+ remaining_ranges.push(Self::range_to_ctor(tcx, ty, (hi + 1)..=subrange_hi));
+ }
+ }
+ }
+ remaining_ranges
+ }
+
+ fn intersection(&self, other: &Self) -> Option<Self> {
+ let ty = self.ty;
+ let (lo, hi) = (*self.range.start(), *self.range.end());
+ let (other_lo, other_hi) = (*other.range.start(), *other.range.end());
+ if lo <= other_hi && other_lo <= hi {
+ Some(IntRange { range: max(lo, other_lo)..=min(hi, other_hi), ty })
+ } else {
+ None
+ }
+ }
+}
+
+// Return a set of constructors equivalent to `all_ctors \ used_ctors`.
+fn compute_missing_ctors<'a, 'tcx: 'a>(
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ all_ctors: &Vec<Constructor<'tcx>>,
+ used_ctors: &Vec<Constructor<'tcx>>,
+) -> Vec<Constructor<'tcx>> {
+ let mut missing_ctors = vec![];
+
+ for req_ctor in all_ctors {
+ let mut refined_ctors = vec![req_ctor.clone()];
+ for used_ctor in used_ctors {
+ if used_ctor == req_ctor {
+ // If a constructor appears in a `match` arm, we can
+ // eliminate it straight away.
+ refined_ctors = vec![]
+ } else if tcx.features().exhaustive_integer_patterns {
+ if let Some(interval) = IntRange::from_ctor(tcx, used_ctor) {
+ // Refine the required constructors for the type by subtracting
+ // the range defined by the current constructor pattern.
+ refined_ctors = interval.subtract_from(tcx, refined_ctors);
+ }
+ }
+
+ // If the constructor patterns that have been considered so far
+ // already cover the entire range of values, then we the
+ // constructor is not missing, and we can move on to the next one.
+ if refined_ctors.is_empty() {
+ break;
+ }
+ }
+ // If a constructor has not been matched, then it is missing.
+ // We add `refined_ctors` instead of `req_ctor`, because then we can
+ // provide more detailed error information about precisely which
+ // ranges have been omitted.
+ missing_ctors.extend(refined_ctors);
+ }
+
+ missing_ctors
+}
+
/// Algorithm from http://moscova.inria.fr/~maranget/papers/warn/index.html
/// The algorithm from the paper has been modified to correctly handle empty
/// types. The changes are:
// FIXME: this might lead to "unstable" behavior with macro hygiene
// introducing uninhabited patterns for inaccessible fields. We
// need to figure out how to model that.
- ty: rows.iter().map(|r| r[0].ty).find(|ty| !ty.references_error())
- .unwrap_or(v[0].ty),
+ ty: rows.iter().map(|r| r[0].ty).find(|ty| !ty.references_error()).unwrap_or(v[0].ty),
max_slice_length: max_slice_length(cx, rows.iter().map(|r| r[0]).chain(Some(v[0])))
};
if let Some(constructors) = pat_constructors(cx, v[0], pcx) {
debug!("is_useful - expanding constructors: {:#?}", constructors);
- constructors.into_iter().map(|c|
+ split_grouped_constructors(cx.tcx, constructors, matrix, pcx.ty).into_iter().map(|c|
is_useful_specialized(cx, matrix, v, c.clone(), pcx.ty, witness)
).find(|result| result.is_useful()).unwrap_or(NotUseful)
} else {
pat_constructors(cx, row[0], pcx).unwrap_or(vec![])
}).collect();
debug!("used_ctors = {:#?}", used_ctors);
+ // `all_ctors` are all the constructors for the given type, which
+ // should all be represented (or caught with the wild pattern `_`).
let all_ctors = all_constructors(cx, pcx);
debug!("all_ctors = {:#?}", all_ctors);
- let missing_ctors: Vec<Constructor> = all_ctors.iter().filter(|c| {
- !used_ctors.contains(*c)
- }).cloned().collect();
// `missing_ctors` is the set of constructors from the same type as the
// first column of `matrix` that are matched only by wildcard patterns
// feature flag is not present, so this is only
// needed for that case.
- let is_privately_empty =
- all_ctors.is_empty() && !cx.is_uninhabited(pcx.ty);
- let is_declared_nonexhaustive =
- cx.is_non_exhaustive_enum(pcx.ty) && !cx.is_local(pcx.ty);
+ // Find those constructors that are not matched by any non-wildcard patterns in the
+ // current column.
+ let missing_ctors = compute_missing_ctors(cx.tcx, &all_ctors, &used_ctors);
+
+ let is_privately_empty = all_ctors.is_empty() && !cx.is_uninhabited(pcx.ty);
+ let is_declared_nonexhaustive = cx.is_non_exhaustive_enum(pcx.ty) && !cx.is_local(pcx.ty);
debug!("missing_ctors={:#?} is_privately_empty={:#?} is_declared_nonexhaustive={:#?}",
missing_ctors, is_privately_empty, is_declared_nonexhaustive);
let is_non_exhaustive = is_privately_empty || is_declared_nonexhaustive;
if missing_ctors.is_empty() && !is_non_exhaustive {
- all_ctors.into_iter().map(|c| {
+ split_grouped_constructors(cx.tcx, all_ctors, matrix, pcx.ty).into_iter().map(|c| {
is_useful_specialized(cx, matrix, v, c.clone(), pcx.ty, witness)
}).find(|result| result.is_useful()).unwrap_or(NotUseful)
} else {
// `used_ctors` is empty.
let new_witnesses = if is_non_exhaustive || used_ctors.is_empty() {
// All constructors are unused. Add wild patterns
- // rather than each individual constructor
+ // rather than each individual constructor.
pats.into_iter().map(|mut witness| {
witness.0.push(Pattern {
ty: pcx.ty,
} else {
pats.into_iter().flat_map(|witness| {
missing_ctors.iter().map(move |ctor| {
+ // Extends the witness with a "wild" version of this
+ // constructor, that matches everything that can be built with
+ // it. For example, if `ctor` is a `Constructor::Variant` for
+ // `Option::Some`, this pushes the witness for `Some(_)`.
witness.clone().push_wild_constructor(cx, ctor, pcx.ty)
})
}).collect()
}
}
+/// A shorthand for the `U(S(c, P), S(c, q))` operation from the paper. I.e. `is_useful` applied
+/// to the specialised version of both the pattern matrix `P` and the new pattern `q`.
fn is_useful_specialized<'p, 'a:'p, 'tcx: 'a>(
cx: &mut MatchCheckCtxt<'a, 'tcx>,
&Matrix(ref m): &Matrix<'p, 'tcx>,
v: &[&'p Pattern<'tcx>],
ctor: Constructor<'tcx>,
lty: Ty<'tcx>,
- witness: WitnessPreference) -> Usefulness<'tcx>
-{
+ witness: WitnessPreference,
+) -> Usefulness<'tcx> {
debug!("is_useful_specialized({:#?}, {:#?}, {:?})", v, ctor, lty);
let sub_pat_tys = constructor_sub_pattern_tys(cx, &ctor, lty);
let wild_patterns_owned: Vec<_> = sub_pat_tys.iter().map(|ty| {
.collect()
),
result => result
- },
+ }
None => NotUseful
}
}
/// Slice patterns, however, can match slices of different lengths. For instance,
/// `[a, b, ..tail]` can match a slice of length 2, 3, 4 and so on.
///
-/// Returns None in case of a catch-all, which can't be specialized.
+/// Returns `None` in case of a catch-all, which can't be specialized.
fn pat_constructors<'tcx>(cx: &mut MatchCheckCtxt,
pat: &Pattern<'tcx>,
pcx: PatternContext)
-> Option<Vec<Constructor<'tcx>>>
{
match *pat.kind {
- PatternKind::Binding { .. } | PatternKind::Wild =>
- None,
- PatternKind::Leaf { .. } | PatternKind::Deref { .. } =>
- Some(vec![Single]),
- PatternKind::Variant { adt_def, variant_index, .. } =>
- Some(vec![Variant(adt_def.variants[variant_index].did)]),
- PatternKind::Constant { value } =>
- Some(vec![ConstantValue(value)]),
- PatternKind::Range { lo, hi, end } =>
- Some(vec![ConstantRange(lo, hi, end)]),
+ PatternKind::Binding { .. } | PatternKind::Wild => None,
+ PatternKind::Leaf { .. } | PatternKind::Deref { .. } => Some(vec![Single]),
+ PatternKind::Variant { adt_def, variant_index, .. } => {
+ Some(vec![Variant(adt_def.variants[variant_index].did)])
+ }
+ PatternKind::Constant { value } => Some(vec![ConstantValue(value)]),
+ PatternKind::Range { lo, hi, end } => Some(vec![ConstantRange(lo, hi, end)]),
PatternKind::Array { .. } => match pcx.ty.sty {
- ty::TyArray(_, length) => Some(vec![
+ ty::Array(_, length) => Some(vec![
Slice(length.unwrap_usize(cx.tcx))
]),
_ => span_bug!(pat.span, "bad ty {:?} for array pattern", pcx.ty)
fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> u64 {
debug!("constructor_arity({:#?}, {:?})", ctor, ty);
match ty.sty {
- ty::TyTuple(ref fs) => fs.len() as u64,
- ty::TySlice(..) | ty::TyArray(..) => match *ctor {
+ ty::Tuple(ref fs) => fs.len() as u64,
+ ty::Slice(..) | ty::Array(..) => match *ctor {
Slice(length) => length,
ConstantValue(_) => 0,
_ => bug!("bad slice pattern {:?} {:?}", ctor, ty)
},
- ty::TyRef(..) => 1,
- ty::TyAdt(adt, _) => {
+ ty::Ref(..) => 1,
+ ty::Adt(adt, _) => {
adt.variants[ctor.variant_index_for_adt(adt)].fields.len() as u64
}
_ => 0
{
debug!("constructor_sub_pattern_tys({:#?}, {:?})", ctor, ty);
match ty.sty {
- ty::TyTuple(ref fs) => fs.into_iter().map(|t| *t).collect(),
- ty::TySlice(ty) | ty::TyArray(ty, _) => match *ctor {
+ ty::Tuple(ref fs) => fs.into_iter().map(|t| *t).collect(),
+ ty::Slice(ty) | ty::Array(ty, _) => match *ctor {
Slice(length) => (0..length).map(|_| ty).collect(),
ConstantValue(_) => vec![],
_ => bug!("bad slice pattern {:?} {:?}", ctor, ty)
},
- ty::TyRef(_, rty, _) => vec![rty],
- ty::TyAdt(adt, substs) => {
+ ty::Ref(_, rty, _) => vec![rty],
+ ty::Adt(adt, substs) => {
if adt.is_box() {
// Use T as the sub pattern type of Box<T>.
vec![substs.type_at(0)]
Ok(true)
}
+// Whether to evaluate a constructor using exhaustive integer matching. This is true if the
+// constructor is a range or constant with an integer type.
+fn should_treat_range_exhaustively(tcx: TyCtxt<'_, 'tcx, 'tcx>, ctor: &Constructor<'tcx>) -> bool {
+ if tcx.features().exhaustive_integer_patterns {
+ if let ConstantValue(value) | ConstantRange(value, _, _) = ctor {
+ if let ty::Char | ty::Int(_) | ty::Uint(_) = value.ty.sty {
+ return true;
+ }
+ }
+ }
+ false
+}
+
+/// For exhaustive integer matching, some constructors are grouped within other constructors
+/// (namely integer typed values are grouped within ranges). However, when specialising these
+/// constructors, we want to be specialising for the underlying constructors (the integers), not
+/// the groups (the ranges). Thus we need to split the groups up. Splitting them up naïvely would
+/// mean creating a separate constructor for every single value in the range, which is clearly
+/// impractical. However, observe that for some ranges of integers, the specialisation will be
+/// identical across all values in that range (i.e. there are equivalence classes of ranges of
+/// constructors based on their `is_useful_specialised` outcome). These classes are grouped by
+/// the patterns that apply to them (in the matrix `P`). We can split the range whenever the
+/// patterns that apply to that range (specifically: the patterns that *intersect* with that range)
+/// change.
+/// Our solution, therefore, is to split the range constructor into subranges at every single point
+/// the group of intersecting patterns changes (using the method described below).
+/// And voilà ! We're testing precisely those ranges that we need to, without any exhaustive matching
+/// on actual integers. The nice thing about this is that the number of subranges is linear in the
+/// number of rows in the matrix (i.e. the number of cases in the `match` statement), so we don't
+/// need to be worried about matching over gargantuan ranges.
+///
+/// Essentially, given the first column of a matrix representing ranges, looking like the following:
+///
+/// |------| |----------| |-------| ||
+/// |-------| |-------| |----| ||
+/// |---------|
+///
+/// We split the ranges up into equivalence classes so the ranges are no longer overlapping:
+///
+/// |--|--|||-||||--||---|||-------| |-|||| ||
+///
+/// The logic for determining how to split the ranges is fairly straightforward: we calculate
+/// boundaries for each interval range, sort them, then create constructors for each new interval
+/// between every pair of boundary points. (This essentially sums up to performing the intuitive
+/// merging operation depicted above.)
+fn split_grouped_constructors<'p, 'a: 'p, 'tcx: 'a>(
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ ctors: Vec<Constructor<'tcx>>,
+ &Matrix(ref m): &Matrix<'p, 'tcx>,
+ ty: Ty<'tcx>,
+) -> Vec<Constructor<'tcx>> {
+ let mut split_ctors = Vec::with_capacity(ctors.len());
+
+ for ctor in ctors.into_iter() {
+ match ctor {
+ // For now, only ranges may denote groups of "subconstructors", so we only need to
+ // special-case constant ranges.
+ ConstantRange(..) if should_treat_range_exhaustively(tcx, &ctor) => {
+ // We only care about finding all the subranges within the range of the constructor
+ // range. Anything else is irrelevant, because it is guaranteed to result in
+ // `NotUseful`, which is the default case anyway, and can be ignored.
+ let ctor_range = IntRange::from_ctor(tcx, &ctor).unwrap();
+
+ /// Represents a border between 2 integers. Because the intervals spanning borders
+ /// must be able to cover every integer, we need to be able to represent
+ /// 2^128 + 1 such borders.
+ #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+ enum Border {
+ JustBefore(u128),
+ AfterMax,
+ }
+
+ // A function for extracting the borders of an integer interval.
+ fn range_borders(r: IntRange<'_>) -> impl Iterator<Item = Border> {
+ let (lo, hi) = r.range.into_inner();
+ let from = Border::JustBefore(lo);
+ let to = match hi.checked_add(1) {
+ Some(m) => Border::JustBefore(m),
+ None => Border::AfterMax,
+ };
+ vec![from, to].into_iter()
+ }
+
+ // `borders` is the set of borders between equivalence classes: each equivalence
+ // class lies between 2 borders.
+ let row_borders = m.iter()
+ .flat_map(|row| IntRange::from_pat(tcx, row[0]))
+ .flat_map(|range| ctor_range.intersection(&range))
+ .flat_map(|range| range_borders(range));
+ let ctor_borders = range_borders(ctor_range.clone());
+ let mut borders: Vec<_> = row_borders.chain(ctor_borders).collect();
+ borders.sort_unstable();
+
+ // We're going to iterate through every pair of borders, making sure that each
+ // represents an interval of nonnegative length, and convert each such interval
+ // into a constructor.
+ for IntRange { range, .. } in borders.windows(2).filter_map(|window| {
+ match (window[0], window[1]) {
+ (Border::JustBefore(n), Border::JustBefore(m)) => {
+ if n < m {
+ Some(IntRange { range: n..=(m - 1), ty })
+ } else {
+ None
+ }
+ }
+ (Border::JustBefore(n), Border::AfterMax) => {
+ Some(IntRange { range: n..=u128::MAX, ty })
+ }
+ (Border::AfterMax, _) => None,
+ }
+ }) {
+ split_ctors.push(IntRange::range_to_ctor(tcx, ty, range));
+ }
+ }
+ // Any other constructor can be used unchanged.
+ _ => split_ctors.push(ctor),
+ }
+ }
+
+ split_ctors
+}
+
+/// Check whether there exists any shared value in either `ctor` or `pat` by intersecting them.
+fn constructor_intersects_pattern<'p, 'a: 'p, 'tcx: 'a>(
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ ctor: &Constructor<'tcx>,
+ pat: &'p Pattern<'tcx>,
+) -> Option<Vec<&'p Pattern<'tcx>>> {
+ if should_treat_range_exhaustively(tcx, ctor) {
+ match (IntRange::from_ctor(tcx, ctor), IntRange::from_pat(tcx, pat)) {
+ (Some(ctor), Some(pat)) => {
+ ctor.intersection(&pat).map(|_| {
+ let (pat_lo, pat_hi) = pat.range.into_inner();
+ let (ctor_lo, ctor_hi) = ctor.range.into_inner();
+ assert!(pat_lo <= ctor_lo && ctor_hi <= pat_hi);
+ vec![]
+ })
+ }
+ _ => None,
+ }
+ } else {
+ // Fallback for non-ranges and ranges that involve floating-point numbers, which are not
+ // conveniently handled by `IntRange`. For these cases, the constructor may not be a range
+ // so intersection actually devolves into being covered by the pattern.
+ match constructor_covered_by_range(tcx, ctor, pat) {
+ Ok(true) => Some(vec![]),
+ Ok(false) | Err(ErrorReported) => None,
+ }
+ }
+}
+
fn constructor_covered_by_range<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
ctor: &Constructor<'tcx>,
- from: &'tcx ty::Const<'tcx>, to: &'tcx ty::Const<'tcx>,
- end: RangeEnd,
- ty: Ty<'tcx>,
+ pat: &Pattern<'tcx>,
) -> Result<bool, ErrorReported> {
+ let (from, to, end, ty) = match pat.kind {
+ box PatternKind::Constant { value } => (value, value, RangeEnd::Included, value.ty),
+ box PatternKind::Range { lo, hi, end } => (lo, hi, end, lo.ty),
+ _ => bug!("`constructor_covered_by_range` called with {:?}", pat),
+ };
trace!("constructor_covered_by_range {:#?}, {:#?}, {:#?}, {}", ctor, from, to, ty);
let cmp_from = |c_from| compare_const_vals(tcx, c_from, from, ty::ParamEnv::empty().and(ty))
.map(|res| res != Ordering::Less);
cx: &mut MatchCheckCtxt<'a, 'tcx>,
r: &[&'p Pattern<'tcx>],
constructor: &Constructor<'tcx>,
- wild_patterns: &[&'p Pattern<'tcx>])
- -> Option<Vec<&'p Pattern<'tcx>>>
-{
+ wild_patterns: &[&'p Pattern<'tcx>],
+) -> Option<Vec<&'p Pattern<'tcx>>> {
let pat = &r[0];
let head: Option<Vec<&Pattern>> = match *pat.kind {
PatternKind::Binding { .. } | PatternKind::Wild => {
Some(wild_patterns.to_owned())
- },
+ }
PatternKind::Variant { adt_def, variant_index, ref subpatterns, .. } => {
let ref variant = adt_def.variants[variant_index];
PatternKind::Leaf { ref subpatterns } => {
Some(patterns_for_variant(subpatterns, wild_patterns))
}
+
PatternKind::Deref { ref subpattern } => {
Some(vec![subpattern])
}
span_bug!(pat.span,
"unexpected const-val {:?} with ctor {:?}", value, constructor)
}
- },
+ }
_ => {
- match constructor_covered_by_range(
- cx.tcx,
- constructor, value, value, RangeEnd::Included,
- value.ty,
- ) {
- Ok(true) => Some(vec![]),
- Ok(false) => None,
- Err(ErrorReported) => None,
- }
+ // If the constructor is a:
+ // Single value: add a row if the constructor equals the pattern.
+ // Range: add a row if the constructor contains the pattern.
+ constructor_intersects_pattern(cx.tcx, constructor, pat)
}
}
}
- PatternKind::Range { lo, hi, ref end } => {
- match constructor_covered_by_range(
- cx.tcx,
- constructor, lo, hi, end.clone(), lo.ty,
- ) {
- Ok(true) => Some(vec![]),
- Ok(false) => None,
- Err(ErrorReported) => None,
- }
+ PatternKind::Range { .. } => {
+ // If the constructor is a:
+ // Single value: add a row if the pattern contains the constructor.
+ // Range: add a row if the constructor intersects the pattern.
+ constructor_intersects_pattern(cx.tcx, constructor, pat)
}
PatternKind::Array { ref prefix, ref slice, ref suffix } |
let pat_len = prefix.len() + suffix.len();
if let Some(slice_count) = wild_patterns.len().checked_sub(pat_len) {
if slice_count == 0 || slice.is_some() {
- Some(
- prefix.iter().chain(
- wild_patterns.iter().map(|p| *p)
- .skip(prefix.len())
- .take(slice_count)
- .chain(
- suffix.iter()
- )).collect())
+ Some(prefix.iter().chain(
+ wild_patterns.iter().map(|p| *p)
+ .skip(prefix.len())
+ .take(slice_count)
+ .chain(suffix.iter())
+ ).collect())
} else {
None
}
fn conservative_is_uninhabited(&self, scrutinee_ty: Ty<'tcx>) -> bool {
// "rustc-1.0-style" uncontentious uninhabitableness check
match scrutinee_ty.sty {
- ty::TyNever => true,
- ty::TyAdt(def, _) => def.variants.is_empty(),
+ ty::Never => true,
+ ty::Adt(def, _) => def.variants.is_empty(),
_ => false
}
}
self.tables);
let pattern = patcx.lower_pattern(pat);
let pattern_ty = pattern.ty;
- let pats : Matrix = vec![vec![
+ let pats: Matrix = vec![vec![
expand_pattern(cx, pattern)
]].into_iter().collect();
return true;
}
let pat_ty = cx.tables.pat_ty(p);
- if let ty::TyAdt(edef, _) = pat_ty.sty {
+ if let ty::Adt(edef, _) = pat_ty.sty {
if edef.is_enum() && edef.variants.iter().any(|variant| {
variant.name == ident.name && variant.ctor_kind == CtorKind::Const
}) {
printed_if_let_err = true;
}
}
- },
+ }
hir::MatchSource::WhileLetDesugar => {
// check which arm we're on.
pub use self::check_match::check_crate;
pub(crate) use self::check_match::check_match;
-use interpret::{const_val_field, const_variant_index, self};
+use interpret::{const_field, const_variant_index};
use rustc::mir::{fmt_const_val, Field, BorrowKind, Mutability};
-use rustc::mir::interpret::{Scalar, GlobalId, ConstValue};
+use rustc::mir::interpret::{Scalar, GlobalId, ConstValue, sign_extend};
use rustc::ty::{self, TyCtxt, AdtDef, Ty, Region};
use rustc::ty::subst::{Substs, Kind};
use rustc::hir::{self, PatKind, RangeEnd};
PatternKind::Variant { adt_def, variant_index, .. } => {
Some(&adt_def.variants[variant_index])
}
- _ => if let ty::TyAdt(adt, _) = self.ty.sty {
+ _ => if let ty::Adt(adt, _) = self.ty.sty {
if !adt.is_enum() {
Some(&adt.variants[0])
} else {
if let Some(variant) = variant {
write!(f, "{}", variant.name)?;
- // Only for TyAdt we can have `S {...}`,
+ // Only for Adt we can have `S {...}`,
// which we handle separately here.
if variant.ctor_kind == CtorKind::Fictive {
write!(f, " {{ ")?;
}
PatternKind::Deref { ref subpattern } => {
match self.ty.sty {
- ty::TyAdt(def, _) if def.is_box() => write!(f, "box ")?,
- ty::TyRef(_, _, mutbl) => {
+ ty::Adt(def, _) if def.is_box() => write!(f, "box ")?,
+ ty::Ref(_, _, mutbl) => {
write!(f, "&")?;
if mutbl == hir::MutMutable {
write!(f, "mut ")?;
PatternKind::Range { lo, hi, end } => {
fmt_const_val(f, lo)?;
match end {
- RangeEnd::Included => write!(f, "...")?,
+ RangeEnd::Included => write!(f, "..=")?,
RangeEnd::Excluded => write!(f, "..")?,
}
fmt_const_val(f, hi)
"lower range bound must be less than upper",
);
PatternKind::Wild
- },
- (RangeEnd::Included, None) |
- (RangeEnd::Included, Some(Ordering::Greater)) => {
+ }
+ (RangeEnd::Included, Some(Ordering::Equal)) => {
+ PatternKind::Constant { value: lo }
+ }
+ (RangeEnd::Included, Some(Ordering::Less)) => {
+ PatternKind::Range { lo, hi, end }
+ }
+ (RangeEnd::Included, _) => {
let mut err = struct_span_err!(
self.tcx.sess,
lo_expr.span,
}
err.emit();
PatternKind::Wild
- },
- (RangeEnd::Included, Some(_)) => PatternKind::Range { lo, hi, end },
+ }
}
}
_ => PatternKind::Wild
PatKind::Slice(ref prefix, ref slice, ref suffix) => {
match ty.sty {
- ty::TyRef(_, ty, _) =>
+ ty::Ref(_, ty, _) =>
PatternKind::Deref {
subpattern: Pattern {
ty,
pat.span, ty, prefix, slice, suffix))
},
},
- ty::TySlice(..) |
- ty::TyArray(..) =>
+ ty::Slice(..) |
+ ty::Array(..) =>
self.slice_or_array_pattern(pat.span, ty, prefix, slice, suffix),
- ty::TyError => { // Avoid ICE
+ ty::Error => { // Avoid ICE
return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) };
}
ref sty =>
PatKind::Tuple(ref subpatterns, ddpos) => {
match ty.sty {
- ty::TyTuple(ref tys) => {
+ ty::Tuple(ref tys) => {
let subpatterns =
subpatterns.iter()
.enumerate_and_adjust(tys.len(), ddpos)
PatternKind::Leaf { subpatterns: subpatterns }
}
- ty::TyError => { // Avoid ICE (#50577)
+ ty::Error => { // Avoid ICE (#50577)
return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) };
}
ref sty => span_bug!(pat.span, "unexpected type for tuple pattern: {:?}", sty),
PatKind::Binding(_, id, ident, ref sub) => {
let var_ty = self.tables.node_id_to_type(pat.hir_id);
let region = match var_ty.sty {
- ty::TyRef(r, _, _) => Some(r),
- ty::TyError => { // Avoid ICE
+ ty::Ref(r, _, _) => Some(r),
+ ty::Error => { // Avoid ICE
return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) };
}
_ => None,
// A ref x pattern is the same node used for x, and as such it has
// x's type, which is &T, where we want T (the type being matched).
if let ty::BindByReference(_) = bm {
- if let ty::TyRef(_, rty, _) = ty.sty {
+ if let ty::Ref(_, rty, _) = ty.sty {
ty = rty;
} else {
bug!("`ref {}` has wrong type {}", ident, ty);
PatKind::TupleStruct(ref qpath, ref subpatterns, ddpos) => {
let def = self.tables.qpath_def(qpath, pat.hir_id);
let adt_def = match ty.sty {
- ty::TyAdt(adt_def, _) => adt_def,
- ty::TyError => { // Avoid ICE (#50585)
+ ty::Adt(adt_def, _) => adt_def,
+ ty::Error => { // Avoid ICE (#50585)
return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) };
}
_ => span_bug!(pat.span,
self.flatten_nested_slice_patterns(prefix, slice, suffix);
match ty.sty {
- ty::TySlice(..) => {
+ ty::Slice(..) => {
// matching a slice or fixed-length array
PatternKind::Slice { prefix: prefix, slice: slice, suffix: suffix }
}
- ty::TyArray(_, len) => {
+ ty::Array(_, len) => {
// fixed-length array
let len = len.unwrap_usize(self.tcx);
assert!(len >= prefix.len() as u64 + suffix.len() as u64);
let adt_def = self.tcx.adt_def(enum_id);
if adt_def.is_enum() {
let substs = match ty.sty {
- ty::TyAdt(_, substs) |
- ty::TyFnDef(_, substs) => substs,
- ty::TyError => { // Avoid ICE (#50585)
+ ty::Adt(_, substs) |
+ ty::FnDef(_, substs) => substs,
+ ty::Error => { // Avoid ICE (#50585)
return PatternKind::Wild;
}
_ => bug!("inappropriate type for def: {:?}", ty.sty),
debug!("const_to_pat: cv={:#?}", cv);
let adt_subpattern = |i, variant_opt| {
let field = Field::new(i);
- let val = const_val_field(
+ let val = const_field(
self.tcx, self.param_env, instance,
variant_opt, field, cv,
).expect("field access failed");
}).collect::<Vec<_>>()
};
let kind = match cv.ty.sty {
- ty::TyFloat(_) => {
+ ty::Float(_) => {
let id = self.tcx.hir.hir_to_node_id(id);
self.tcx.lint_node(
::rustc::lint::builtin::ILLEGAL_FLOATING_POINT_LITERAL_PATTERN,
value: cv,
}
},
- ty::TyAdt(adt_def, _) if adt_def.is_union() => {
+ ty::Adt(adt_def, _) if adt_def.is_union() => {
// Matching on union fields is unsafe, we can't hide it in constants
self.tcx.sess.span_err(span, "cannot use unions in constant patterns");
PatternKind::Wild
}
- ty::TyAdt(adt_def, _) if !self.tcx.has_attr(adt_def.did, "structural_match") => {
+ ty::Adt(adt_def, _) if !self.tcx.has_attr(adt_def.did, "structural_match") => {
let msg = format!("to use a constant of type `{}` in a pattern, \
`{}` must be annotated with `#[derive(PartialEq, Eq)]`",
self.tcx.item_path_str(adt_def.did),
self.tcx.sess.span_err(span, &msg);
PatternKind::Wild
},
- ty::TyAdt(adt_def, substs) if adt_def.is_enum() => {
+ ty::Adt(adt_def, substs) if adt_def.is_enum() => {
let variant_index = const_variant_index(
self.tcx, self.param_env, instance, cv
).expect("const_variant_index failed");
subpatterns,
}
},
- ty::TyAdt(adt_def, _) => {
+ ty::Adt(adt_def, _) => {
let struct_var = adt_def.non_enum_variant();
PatternKind::Leaf {
subpatterns: adt_subpatterns(struct_var.fields.len(), None),
}
}
- ty::TyTuple(fields) => {
+ ty::Tuple(fields) => {
PatternKind::Leaf {
subpatterns: adt_subpatterns(fields.len(), None),
}
}
- ty::TyArray(_, n) => {
+ ty::Array(_, n) => {
PatternKind::Array {
prefix: (0..n.unwrap_usize(self.tcx))
.map(|i| adt_subpattern(i as usize, None))
if let (Some(a), Some(b)) = (a.to_bits(tcx, ty), b.to_bits(tcx, ty)) {
use ::rustc_apfloat::Float;
return match ty.value.sty {
- ty::TyFloat(ast::FloatTy::F32) => {
+ ty::Float(ast::FloatTy::F32) => {
let l = ::rustc_apfloat::ieee::Single::from_bits(a);
let r = ::rustc_apfloat::ieee::Single::from_bits(b);
l.partial_cmp(&r)
},
- ty::TyFloat(ast::FloatTy::F64) => {
+ ty::Float(ast::FloatTy::F64) => {
let l = ::rustc_apfloat::ieee::Double::from_bits(a);
let r = ::rustc_apfloat::ieee::Double::from_bits(b);
l.partial_cmp(&r)
},
- ty::TyInt(_) => {
+ ty::Int(_) => {
let layout = tcx.layout_of(ty).ok()?;
- let a = interpret::sign_extend(a, layout);
- let b = interpret::sign_extend(b, layout);
+ assert!(layout.abi.is_signed());
+ let a = sign_extend(a, layout.size);
+ let b = sign_extend(b, layout.size);
Some((a as i128).cmp(&(b as i128)))
},
_ => Some(a.cmp(&b)),
}
}
- if let ty::TyRef(_, rty, _) = ty.value.sty {
- if let ty::TyStr = rty.sty {
+ if let ty::Ref(_, rty, _) = ty.value.sty {
+ if let ty::Str = rty.sty {
match (a.val, b.val) {
(
ConstValue::ScalarPair(
len_b,
),
) if ptr_a.offset.bytes() == 0 && ptr_b.offset.bytes() == 0 => {
- let len_a = len_a.unwrap_or_err().ok();
- let len_b = len_b.unwrap_or_err().ok();
+ let len_a = len_a.not_undef().ok();
+ let len_b = len_b.not_undef().ok();
if len_a.is_none() || len_b.is_none() {
tcx.sess.struct_err("str slice len is undef").delay_as_bug();
}
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = tcx.allocate_bytes(s.as_bytes());
- let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, tcx);
- ConstValue::from_byval_value(value).unwrap()
+ ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, tcx)
},
LitKind::ByteStr(ref data) => {
let id = tcx.allocate_bytes(data);
Unsigned(UintTy),
}
let ity = match ty.sty {
- ty::TyInt(IntTy::Isize) => Int::Signed(tcx.sess.target.isize_ty),
- ty::TyInt(other) => Int::Signed(other),
- ty::TyUint(UintTy::Usize) => Int::Unsigned(tcx.sess.target.usize_ty),
- ty::TyUint(other) => Int::Unsigned(other),
- ty::TyError => { // Avoid ICE (#51963)
+ ty::Int(IntTy::Isize) => Int::Signed(tcx.sess.target.isize_ty),
+ ty::Int(other) => Int::Signed(other),
+ ty::Uint(UintTy::Usize) => Int::Unsigned(tcx.sess.target.usize_ty),
+ ty::Uint(other) => Int::Unsigned(other),
+ ty::Error => { // Avoid ICE (#51963)
return Err(LitToConstError::Propagated);
}
_ => bug!("literal integer type with bad type ({:?})", ty.sty),
}
LitKind::FloatUnsuffixed(n) => {
let fty = match ty.sty {
- ty::TyFloat(fty) => fty,
+ ty::Float(fty) => fty,
_ => bug!()
};
parse_float(n, fty, neg).map_err(|_| LitToConstError::UnparseableFloat)?
-use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, LayoutOf, TyLayout};
+use rustc::ty::{self, Ty, TypeAndMut};
+use rustc::ty::layout::{self, TyLayout, Size};
use syntax::ast::{FloatTy, IntTy, UintTy};
use rustc_apfloat::ieee::{Single, Double};
-use super::{EvalContext, Machine};
-use rustc::mir::interpret::{Scalar, EvalResult, Pointer, PointerArithmetic, Value, EvalErrorKind};
+use rustc::mir::interpret::{
+ Scalar, EvalResult, Pointer, PointerArithmetic, EvalErrorKind,
+ truncate, sign_extend
+};
use rustc::mir::CastKind;
use rustc_apfloat::Float;
-use interpret::eval_context::ValTy;
-use interpret::Place;
+
+use super::{EvalContext, Machine, PlaceTy, OpTy, Value};
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
+ fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
+ match ty.sty {
+ ty::RawPtr(ty::TypeAndMut { ty, .. }) |
+ ty::Ref(_, ty, _) => !self.type_is_sized(ty),
+ ty::Adt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()),
+ _ => false,
+ }
+ }
+
crate fn cast(
&mut self,
- src: ValTy<'tcx>,
+ src: OpTy<'tcx>,
kind: CastKind,
- dest_ty: Ty<'tcx>,
- dest: Place,
+ dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx> {
- let src_layout = self.layout_of(src.ty)?;
- let dst_layout = self.layout_of(dest_ty)?;
+ let src_layout = src.layout;
+ let dst_layout = dest.layout;
use rustc::mir::CastKind::*;
match kind {
Unsize => {
- self.unsize_into(src.value, src_layout, dest, dst_layout)?;
+ self.unsize_into(src, dest)?;
}
Misc => {
- if self.type_is_fat_ptr(src.ty) {
- match (src.value, self.type_is_fat_ptr(dest_ty)) {
- (Value::ByRef { .. }, _) |
+ let src = self.read_value(src)?;
+ if self.type_is_fat_ptr(src_layout.ty) {
+ match (src.value, self.type_is_fat_ptr(dest.layout.ty)) {
// pointers to extern types
(Value::Scalar(_),_) |
// slices and trait objects to other slices/trait objects
(Value::ScalarPair(..), true) => {
- let valty = ValTy {
- value: src.value,
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
+ // No change to value
+ self.write_value(src.value, dest)?;
}
// slices and trait objects to thin pointers (dropping the metadata)
(Value::ScalarPair(data, _), false) => {
- let valty = ValTy {
- value: Value::Scalar(data),
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
+ self.write_scalar(data, dest)?;
}
}
} else {
- let src_layout = self.layout_of(src.ty)?;
match src_layout.variants {
layout::Variants::Single { index } => {
- if let Some(def) = src.ty.ty_adt_def() {
+ if let Some(def) = src_layout.ty.ty_adt_def() {
let discr_val = def
.discriminant_for_variant(*self.tcx, index)
.val;
return self.write_scalar(
- dest,
Scalar::Bits {
bits: discr_val,
size: dst_layout.size.bytes() as u8,
},
- dest_ty);
+ dest);
}
}
layout::Variants::Tagged { .. } |
layout::Variants::NicheFilling { .. } => {},
}
- let src_val = self.value_to_scalar(src)?;
- let dest_val = self.cast_scalar(src_val, src_layout, dst_layout)?;
- let valty = ValTy {
- value: Value::Scalar(dest_val.into()),
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
+ let src = src.to_scalar()?;
+ let dest_val = self.cast_scalar(src, src_layout, dest.layout)?;
+ self.write_scalar(dest_val, dest)?;
}
}
ReifyFnPointer => {
- match src.ty.sty {
- ty::TyFnDef(def_id, substs) => {
+ // The src operand does not matter, just its type
+ match src_layout.ty.sty {
+ ty::FnDef(def_id, substs) => {
if self.tcx.has_attr(def_id, "rustc_args_required_const") {
bug!("reifying a fn ptr that requires \
const arguments");
substs,
).ok_or_else(|| EvalErrorKind::TooGeneric.into());
let fn_ptr = self.memory.create_fn_alloc(instance?);
- let valty = ValTy {
- value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()),
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
+ self.write_scalar(Scalar::Ptr(fn_ptr.into()), dest)?;
}
ref other => bug!("reify fn pointer on {:?}", other),
}
}
UnsafeFnPointer => {
- match dest_ty.sty {
- ty::TyFnPtr(_) => {
- let mut src = src;
- src.ty = dest_ty;
- self.write_value(src, dest)?;
+ let src = self.read_value(src)?;
+ match dest.layout.ty.sty {
+ ty::FnPtr(_) => {
+ // No change to value
+ self.write_value(*src, dest)?;
}
ref other => bug!("fn to unsafe fn cast on {:?}", other),
}
}
ClosureFnPointer => {
- match src.ty.sty {
- ty::TyClosure(def_id, substs) => {
+ // The src operand does not matter, just its type
+ match src_layout.ty.sty {
+ ty::Closure(def_id, substs) => {
let substs = self.tcx.subst_and_normalize_erasing_regions(
self.substs(),
ty::ParamEnv::reveal_all(),
ty::ClosureKind::FnOnce,
);
let fn_ptr = self.memory.create_fn_alloc(instance);
- let valty = ValTy {
- value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()),
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
+ let val = Value::Scalar(Scalar::Ptr(fn_ptr.into()).into());
+ self.write_value(val, dest)?;
}
ref other => bug!("closure fn pointer on {:?}", other),
}
src_layout: TyLayout<'tcx>,
dest_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Scalar> {
- use rustc::ty::TypeVariants::*;
+ use rustc::ty::TyKind::*;
trace!("Casting {:?}: {:?} to {:?}", val, src_layout.ty, dest_layout.ty);
match val {
Scalar::Ptr(ptr) => self.cast_from_ptr(ptr, dest_layout.ty),
Scalar::Bits { bits, size } => {
- assert_eq!(size as u64, src_layout.size.bytes());
- match src_layout.ty.sty {
- TyFloat(fty) => self.cast_from_float(bits, fty, dest_layout.ty),
- _ => self.cast_from_int(bits, src_layout, dest_layout),
+ debug_assert_eq!(size as u64, src_layout.size.bytes());
+ debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
+ "Unexpected value of size {} before casting", size);
+
+ let res = match src_layout.ty.sty {
+ Float(fty) => self.cast_from_float(bits, fty, dest_layout.ty)?,
+ _ => self.cast_from_int(bits, src_layout, dest_layout)?,
+ };
+
+ // Sanity check
+ match res {
+ Scalar::Ptr(_) => bug!("Fabricated a ptr value from an int...?"),
+ Scalar::Bits { bits, size } => {
+ debug_assert_eq!(size as u64, dest_layout.size.bytes());
+ debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
+ "Unexpected value of size {} after casting", size);
+ }
}
+ // Done
+ Ok(res)
}
}
}
v
};
trace!("cast_from_int: {}, {}, {}", v, src_layout.ty, dest_layout.ty);
- use rustc::ty::TypeVariants::*;
+ use rustc::ty::TyKind::*;
match dest_layout.ty.sty {
- TyInt(_) | TyUint(_) => {
+ Int(_) | Uint(_) => {
let v = self.truncate(v, dest_layout);
Ok(Scalar::Bits {
bits: v,
})
}
- TyFloat(FloatTy::F32) if signed => Ok(Scalar::Bits {
+ Float(FloatTy::F32) if signed => Ok(Scalar::Bits {
bits: Single::from_i128(v as i128).value.to_bits(),
size: 4,
}),
- TyFloat(FloatTy::F64) if signed => Ok(Scalar::Bits {
+ Float(FloatTy::F64) if signed => Ok(Scalar::Bits {
bits: Double::from_i128(v as i128).value.to_bits(),
size: 8,
}),
- TyFloat(FloatTy::F32) => Ok(Scalar::Bits {
+ Float(FloatTy::F32) => Ok(Scalar::Bits {
bits: Single::from_u128(v).value.to_bits(),
size: 4,
}),
- TyFloat(FloatTy::F64) => Ok(Scalar::Bits {
+ Float(FloatTy::F64) => Ok(Scalar::Bits {
bits: Double::from_u128(v).value.to_bits(),
size: 8,
}),
- TyChar => {
+ Char => {
assert_eq!(v as u8 as u128, v);
Ok(Scalar::Bits { bits: v, size: 4 })
},
// No alignment check needed for raw pointers. But we have to truncate to target ptr size.
- TyRawPtr(_) => {
+ RawPtr(_) => {
Ok(Scalar::Bits {
bits: self.memory.truncate_to_ptr(v).0 as u128,
size: self.memory.pointer_size().bytes() as u8,
}
fn cast_from_float(&self, bits: u128, fty: FloatTy, dest_ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> {
- use rustc::ty::TypeVariants::*;
+ use rustc::ty::TyKind::*;
use rustc_apfloat::FloatConvert;
match dest_ty.sty {
// float -> uint
- TyUint(t) => {
+ Uint(t) => {
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize);
- match fty {
- FloatTy::F32 => Ok(Scalar::Bits {
- bits: Single::from_bits(bits).to_u128(width).value,
- size: (width / 8) as u8,
- }),
- FloatTy::F64 => Ok(Scalar::Bits {
- bits: Double::from_bits(bits).to_u128(width).value,
- size: (width / 8) as u8,
- }),
- }
+ let v = match fty {
+ FloatTy::F32 => Single::from_bits(bits).to_u128(width).value,
+ FloatTy::F64 => Double::from_bits(bits).to_u128(width).value,
+ };
+ // This should already fit the bit width
+ Ok(Scalar::Bits {
+ bits: v,
+ size: (width / 8) as u8,
+ })
},
// float -> int
- TyInt(t) => {
+ Int(t) => {
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize);
- match fty {
- FloatTy::F32 => Ok(Scalar::Bits {
- bits: Single::from_bits(bits).to_i128(width).value as u128,
- size: (width / 8) as u8,
- }),
- FloatTy::F64 => Ok(Scalar::Bits {
- bits: Double::from_bits(bits).to_i128(width).value as u128,
- size: (width / 8) as u8,
- }),
- }
+ let v = match fty {
+ FloatTy::F32 => Single::from_bits(bits).to_i128(width).value,
+ FloatTy::F64 => Double::from_bits(bits).to_i128(width).value,
+ };
+ // We got an i128, but we may need something smaller. We have to truncate ourselves.
+ let truncated = truncate(v as u128, Size::from_bits(width as u64));
+ assert_eq!(sign_extend(truncated, Size::from_bits(width as u64)) as i128, v,
+ "truncating and extending changed the value?!?");
+ Ok(Scalar::Bits {
+ bits: truncated,
+ size: (width / 8) as u8,
+ })
},
// f64 -> f32
- TyFloat(FloatTy::F32) if fty == FloatTy::F64 => {
+ Float(FloatTy::F32) if fty == FloatTy::F64 => {
Ok(Scalar::Bits {
bits: Single::to_bits(Double::from_bits(bits).convert(&mut false).value),
size: 4,
})
},
// f32 -> f64
- TyFloat(FloatTy::F64) if fty == FloatTy::F32 => {
+ Float(FloatTy::F64) if fty == FloatTy::F32 => {
Ok(Scalar::Bits {
bits: Double::to_bits(Single::from_bits(bits).convert(&mut false).value),
size: 8,
})
},
// identity cast
- TyFloat(FloatTy:: F64) => Ok(Scalar::Bits {
+ Float(FloatTy:: F64) => Ok(Scalar::Bits {
bits,
size: 8,
}),
- TyFloat(FloatTy:: F32) => Ok(Scalar::Bits {
+ Float(FloatTy:: F32) => Ok(Scalar::Bits {
bits,
size: 4,
}),
}
fn cast_from_ptr(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> {
- use rustc::ty::TypeVariants::*;
+ use rustc::ty::TyKind::*;
match ty.sty {
// Casting to a reference or fn pointer is not permitted by rustc, no need to support it here.
- TyRawPtr(_) |
- TyInt(IntTy::Isize) |
- TyUint(UintTy::Usize) => Ok(ptr.into()),
- TyInt(_) | TyUint(_) => err!(ReadPointerAsBytes),
+ RawPtr(_) |
+ Int(IntTy::Isize) |
+ Uint(UintTy::Usize) => Ok(ptr.into()),
+ Int(_) | Uint(_) => err!(ReadPointerAsBytes),
_ => err!(Unimplemented(format!("ptr to {:?} cast", ty))),
}
}
+
+ fn unsize_into_ptr(
+ &mut self,
+ src: OpTy<'tcx>,
+ dest: PlaceTy<'tcx>,
+ // The pointee types
+ sty: Ty<'tcx>,
+ dty: Ty<'tcx>,
+ ) -> EvalResult<'tcx> {
+ // A<Struct> -> A<Trait> conversion
+ let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
+
+ match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
+ (&ty::Array(_, length), &ty::Slice(_)) => {
+ let ptr = self.read_value(src)?.to_scalar_ptr()?;
+ // u64 cast is from usize to u64, which is always good
+ let val = Value::new_slice(ptr, length.unwrap_usize(self.tcx.tcx), self.tcx.tcx);
+ self.write_value(val, dest)
+ }
+ (&ty::Dynamic(..), &ty::Dynamic(..)) => {
+ // For now, upcasts are limited to changes in marker
+ // traits, and hence never actually require an actual
+ // change to the vtable.
+ self.copy_op(src, dest)
+ }
+ (_, &ty::Dynamic(ref data, _)) => {
+ // Initial cast from sized to dyn trait
+ let trait_ref = data.principal().unwrap().with_self_ty(
+ *self.tcx,
+ src_pointee_ty,
+ );
+ let trait_ref = self.tcx.erase_regions(&trait_ref);
+ let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
+ let ptr = self.read_value(src)?.to_scalar_ptr()?;
+ let val = Value::new_dyn_trait(ptr, vtable);
+ self.write_value(val, dest)
+ }
+
+ _ => bug!("invalid unsizing {:?} -> {:?}", src.layout.ty, dest.layout.ty),
+ }
+ }
+
+ fn unsize_into(
+ &mut self,
+ src: OpTy<'tcx>,
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ match (&src.layout.ty.sty, &dest.layout.ty.sty) {
+ (&ty::Ref(_, s, _), &ty::Ref(_, d, _)) |
+ (&ty::Ref(_, s, _), &ty::RawPtr(TypeAndMut { ty: d, .. })) |
+ (&ty::RawPtr(TypeAndMut { ty: s, .. }),
+ &ty::RawPtr(TypeAndMut { ty: d, .. })) => {
+ self.unsize_into_ptr(src, dest, s, d)
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+ if def_a.is_box() || def_b.is_box() {
+ if !def_a.is_box() || !def_b.is_box() {
+ bug!("invalid unsizing between {:?} -> {:?}", src.layout, dest.layout);
+ }
+ return self.unsize_into_ptr(
+ src,
+ dest,
+ src.layout.ty.boxed_ty(),
+ dest.layout.ty.boxed_ty(),
+ );
+ }
+
+ // unsizing of generic struct with pointer fields
+ // Example: `Arc<T>` -> `Arc<Trait>`
+ // here we need to increase the size of every &T thin ptr field to a fat ptr
+ for i in 0..src.layout.fields.count() {
+ let dst_field = self.place_field(dest, i as u64)?;
+ if dst_field.layout.is_zst() {
+ continue;
+ }
+ let src_field = match src.try_as_mplace() {
+ Ok(mplace) => {
+ let src_field = self.mplace_field(mplace, i as u64)?;
+ src_field.into()
+ }
+ Err(..) => {
+ let src_field_layout = src.layout.field(&self, i)?;
+ // this must be a field covering the entire thing
+ assert_eq!(src.layout.fields.offset(i).bytes(), 0);
+ assert_eq!(src_field_layout.size, src.layout.size);
+ // just sawp out the layout
+ OpTy { op: src.op, layout: src_field_layout }
+ }
+ };
+ if src_field.layout.ty == dst_field.layout.ty {
+ self.copy_op(src_field, dst_field)?;
+ } else {
+ self.unsize_into(src_field, dst_field)?;
+ }
+ }
+ Ok(())
+ }
+ _ => {
+ bug!(
+ "unsize_into: invalid conversion: {:?} -> {:?}",
+ src.layout,
+ dest.layout
+ )
+ }
+ }
+ }
}
use std::error::Error;
use rustc::hir;
-use rustc::mir::interpret::{ConstEvalErr, ScalarMaybeUndef};
+use rustc::mir::interpret::ConstEvalErr;
use rustc::mir;
-use rustc::ty::{self, TyCtxt, Ty, Instance};
-use rustc::ty::layout::{self, LayoutOf, Primitive, TyLayout};
+use rustc::ty::{self, TyCtxt, Instance};
+use rustc::ty::layout::{LayoutOf, Primitive, TyLayout, Size};
use rustc::ty::subst::Subst;
-use rustc_data_structures::indexed_vec::IndexVec;
+use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use syntax::ast::Mutability;
use syntax::source_map::Span;
use syntax::source_map::DUMMY_SP;
+use syntax::symbol::Symbol;
use rustc::mir::interpret::{
EvalResult, EvalError, EvalErrorKind, GlobalId,
- Value, Scalar, AllocId, Allocation, ConstValue,
+ Scalar, AllocId, Allocation, ConstValue,
+};
+use super::{
+ Place, PlaceExtra, PlaceTy, MemPlace, OpTy, Operand, Value,
+ EvalContext, StackPopCleanup, Memory, MemoryKind, MPlaceTy,
};
-use super::{Place, EvalContext, StackPopCleanup, ValTy, Memory, MemoryKind};
pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
instance,
span,
mir,
- return_place: Place::undef(),
+ return_place: Place::null(tcx),
return_to_block: StackPopCleanup::None,
stmt: 0,
});
instance,
mir.span,
mir,
- Place::undef(),
+ Place::null(tcx),
StackPopCleanup::None,
)?;
Ok(ecx)
cid: GlobalId<'tcx>,
mir: &'mir mir::Mir<'tcx>,
param_env: ty::ParamEnv<'tcx>,
-) -> EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)> {
+) -> EvalResult<'tcx, OpTy<'tcx>> {
ecx.with_fresh_body(|ecx| {
eval_body_using_ecx(ecx, cid, Some(mir), param_env)
})
}
-pub fn value_to_const_value<'tcx>(
+pub fn op_to_const<'tcx>(
ecx: &EvalContext<'_, '_, 'tcx, CompileTimeEvaluator>,
- val: Value,
- layout: TyLayout<'tcx>,
+ op: OpTy<'tcx>,
+ normalize: bool,
) -> EvalResult<'tcx, &'tcx ty::Const<'tcx>> {
- match (val, &layout.abi) {
- (Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size: 0, ..})), _) if layout.is_zst() => {},
- (Value::ByRef(..), _) |
- (Value::Scalar(_), &layout::Abi::Scalar(_)) |
- (Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) => {},
- _ => bug!("bad value/layout combo: {:#?}, {:#?}", val, layout),
- }
- let val = match val {
- Value::Scalar(val) => ConstValue::Scalar(val.unwrap_or_err()?),
- Value::ScalarPair(a, b) => ConstValue::ScalarPair(a.unwrap_or_err()?, b),
- Value::ByRef(ptr, align) => {
- let ptr = ptr.to_ptr().unwrap();
+ let normalized_op = if normalize {
+ ecx.try_read_value(op)?
+ } else {
+ match op.op {
+ Operand::Indirect(mplace) => Err(mplace),
+ Operand::Immediate(val) => Ok(val)
+ }
+ };
+ let val = match normalized_op {
+ Err(MemPlace { ptr, align, extra }) => {
+ // extract alloc-offset pair
+ assert_eq!(extra, PlaceExtra::None);
+ let ptr = ptr.to_ptr()?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align.abi() >= align.abi());
- assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= layout.size.bytes());
+ assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes());
let mut alloc = alloc.clone();
alloc.align = align;
let alloc = ecx.tcx.intern_const_alloc(alloc);
ConstValue::ByRef(alloc, ptr.offset)
- }
+ },
+ Ok(Value::Scalar(x)) =>
+ ConstValue::Scalar(x.not_undef()?),
+ Ok(Value::ScalarPair(a, b)) =>
+ ConstValue::ScalarPair(a.not_undef()?, b),
};
- Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, layout.ty))
+ Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, op.layout.ty))
+}
+pub fn const_to_op<'tcx>(
+ ecx: &mut EvalContext<'_, '_, 'tcx, CompileTimeEvaluator>,
+ cnst: &'tcx ty::Const<'tcx>,
+) -> EvalResult<'tcx, OpTy<'tcx>> {
+ let op = ecx.const_value_to_op(cnst.val)?;
+ Ok(OpTy { op, layout: ecx.layout_of(cnst.ty)? })
}
fn eval_body_and_ecx<'a, 'mir, 'tcx>(
cid: GlobalId<'tcx>,
mir: Option<&'mir mir::Mir<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
-) -> (EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) {
+) -> (EvalResult<'tcx, OpTy<'tcx>>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) {
debug!("eval_body_and_ecx: {:?}, {:?}", cid, param_env);
// we start out with the best span we have
// and try improving it down the road when more information is available
(r, ecx)
}
+// Returns a pointer to where the result lives
fn eval_body_using_ecx<'a, 'mir, 'tcx>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>,
cid: GlobalId<'tcx>,
mir: Option<&'mir mir::Mir<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
-) -> EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)> {
+) -> EvalResult<'tcx, OpTy<'tcx>> {
debug!("eval_body: {:?}, {:?}", cid, param_env);
let tcx = ecx.tcx.tcx;
let mut mir = match mir {
}
let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?;
assert!(!layout.is_unsized());
- let ptr = ecx.memory.allocate(
- layout.size,
- layout.align,
- MemoryKind::Stack,
- )?;
+ let ret = ecx.allocate(layout, MemoryKind::Stack)?;
let internally_mutable = !layout.ty.is_freeze(tcx, param_env, mir.span);
let is_static = tcx.is_static(cid.instance.def_id());
let mutability = if is_static == Some(hir::Mutability::MutMutable) || internally_mutable {
cid.instance,
mir.span,
mir,
- Place::from_ptr(ptr, layout.align),
+ Place::Ptr(*ret),
cleanup,
)?;
+ // The main interpreter loop.
while ecx.step()? {}
- let ptr = ptr.into();
- // always try to read the value and report errors
- let value = match ecx.try_read_value(ptr, layout.align, layout.ty)? {
- Some(val) if is_static.is_none() && cid.promoted.is_none() => val,
- // point at the allocation
- _ => Value::ByRef(ptr, layout.align),
- };
- Ok((value, ptr, layout))
+
+ Ok(ret.into())
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
fn eval_fn_call<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
- destination: Option<(Place, mir::BasicBlock)>,
- args: &[ValTy<'tcx>],
+ destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+ args: &[OpTy<'tcx>],
span: Span,
- sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool> {
debug!("eval_fn_call: {:?}", instance);
if !ecx.tcx.is_const_fn(instance.def_id()) {
let def_id = instance.def_id();
- let (op, oflo) = if let Some(op) = ecx.tcx.is_binop_lang_item(def_id) {
- op
+ // Some fn calls are actually BinOp intrinsics
+ let _: ! = if let Some((op, oflo)) = ecx.tcx.is_binop_lang_item(def_id) {
+ let (dest, bb) = destination.expect("128 lowerings can't diverge");
+ let l = ecx.read_value(args[0])?;
+ let r = ecx.read_value(args[1])?;
+ if oflo {
+ ecx.binop_with_overflow(op, l, r, dest)?;
+ } else {
+ ecx.binop_ignore_overflow(op, l, r, dest)?;
+ }
+ ecx.goto_block(bb);
+ return Ok(true);
+ } else if Some(def_id) == ecx.tcx.lang_items().panic_fn() {
+ assert!(args.len() == 1);
+ // &(&'static str, &'static str, u32, u32)
+ let ptr = ecx.read_value(args[0])?;
+ let place = ecx.ref_to_mplace(ptr)?;
+ let (msg, file, line, col) = (
+ place_field(ecx, 0, place)?,
+ place_field(ecx, 1, place)?,
+ place_field(ecx, 2, place)?,
+ place_field(ecx, 3, place)?,
+ );
+
+ let msg = to_str(ecx, msg)?;
+ let file = to_str(ecx, file)?;
+ let line = to_u32(line)?;
+ let col = to_u32(col)?;
+ return Err(EvalErrorKind::Panic { msg, file, line, col }.into());
+ } else if Some(def_id) == ecx.tcx.lang_items().begin_panic_fn() {
+ assert!(args.len() == 2);
+ // &'static str, &(&'static str, u32, u32)
+ let msg = ecx.read_value(args[0])?;
+ let ptr = ecx.read_value(args[1])?;
+ let place = ecx.ref_to_mplace(ptr)?;
+ let (file, line, col) = (
+ place_field(ecx, 0, place)?,
+ place_field(ecx, 1, place)?,
+ place_field(ecx, 2, place)?,
+ );
+
+ let msg = to_str(ecx, msg.value)?;
+ let file = to_str(ecx, file)?;
+ let line = to_u32(line)?;
+ let col = to_u32(col)?;
+ return Err(EvalErrorKind::Panic { msg, file, line, col }.into());
} else {
return Err(
ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(),
);
};
- let (dest, bb) = destination.expect("128 lowerings can't diverge");
- let dest_ty = sig.output();
- if oflo {
- ecx.intrinsic_with_overflow(op, args[0], args[1], dest, dest_ty)?;
- } else {
- ecx.intrinsic_overflowing(op, args[0], args[1], dest, dest_ty)?;
- }
- ecx.goto_block(bb);
- return Ok(true);
}
let mir = match ecx.load_mir(instance.def) {
Ok(mir) => mir,
}
};
let (return_place, return_to_block) = match destination {
- Some((place, block)) => (place, StackPopCleanup::Goto(block)),
- None => (Place::undef(), StackPopCleanup::None),
+ Some((place, block)) => (*place, StackPopCleanup::Goto(block)),
+ None => (Place::null(&ecx), StackPopCleanup::None),
};
ecx.push_stack_frame(
fn call_intrinsic<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
- args: &[ValTy<'tcx>],
- dest: Place,
- dest_layout: layout::TyLayout<'tcx>,
+ args: &[OpTy<'tcx>],
+ dest: PlaceTy<'tcx>,
target: mir::BasicBlock,
) -> EvalResult<'tcx> {
let substs = instance.substs;
let elem_align = ecx.layout_of(elem_ty)?.align.abi();
let align_val = Scalar::Bits {
bits: elem_align as u128,
- size: dest_layout.size.bytes() as u8,
+ size: dest.layout.size.bytes() as u8,
};
- ecx.write_scalar(dest, align_val, dest_layout.ty)?;
+ ecx.write_scalar(align_val, dest)?;
}
"size_of" => {
let size = ecx.layout_of(ty)?.size.bytes() as u128;
let size_val = Scalar::Bits {
bits: size,
- size: dest_layout.size.bytes() as u8,
+ size: dest.layout.size.bytes() as u8,
};
- ecx.write_scalar(dest, size_val, dest_layout.ty)?;
+ ecx.write_scalar(size_val, dest)?;
}
"type_id" => {
let type_id = ecx.tcx.type_id_hash(ty) as u128;
let id_val = Scalar::Bits {
bits: type_id,
- size: dest_layout.size.bytes() as u8,
+ size: dest.layout.size.bytes() as u8,
};
- ecx.write_scalar(dest, id_val, dest_layout.ty)?;
+ ecx.write_scalar(id_val, dest)?;
}
"ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
let ty = substs.type_at(0);
let layout_of = ecx.layout_of(ty)?;
- let bits = ecx.value_to_scalar(args[0])?.to_bits(layout_of.size)?;
+ let bits = ecx.read_scalar(args[0])?.to_bits(layout_of.size)?;
let kind = match layout_of.abi {
ty::layout::Abi::Scalar(ref scalar) => scalar.value,
_ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?,
} else {
numeric_intrinsic(intrinsic_name, bits, kind)?
};
- ecx.write_scalar(dest, out_val, ty)?;
+ ecx.write_scalar(out_val, dest)?;
}
name => return Err(
_ecx: &EvalContext<'a, 'mir, 'tcx, Self>,
_bin_op: mir::BinOp,
left: Scalar,
- _left_ty: Ty<'tcx>,
+ _left_layout: TyLayout<'tcx>,
right: Scalar,
- _right_ty: Ty<'tcx>,
+ _right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Option<(Scalar, bool)>> {
if left.is_bits() && right.is_bits() {
Ok(None)
fn box_alloc<'a>(
_ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
- _ty: Ty<'tcx>,
- _dest: Place,
+ _dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx> {
Err(
ConstEvalError::NeedsRfc("heap allocations via `box` keyword".to_string()).into(),
}
}
-pub fn const_val_field<'a, 'tcx>(
+fn place_field<'a, 'tcx, 'mir>(
+ ecx: &mut EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>,
+ i: u64,
+ place: MPlaceTy<'tcx>,
+) -> EvalResult<'tcx, Value> {
+ let place = ecx.mplace_field(place, i)?;
+ Ok(ecx.try_read_value_from_mplace(place)?.expect("bad panic arg layout"))
+}
+
+fn to_str<'a, 'tcx, 'mir>(
+ ecx: &mut EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>,
+ val: Value,
+) -> EvalResult<'tcx, Symbol> {
+ if let Value::ScalarPair(ptr, len) = val {
+ let len = len.not_undef()?.to_bits(ecx.memory.pointer_size())?;
+ let bytes = ecx.memory.read_bytes(ptr.not_undef()?, Size::from_bytes(len as u64))?;
+ let str = ::std::str::from_utf8(bytes).map_err(|err| EvalErrorKind::ValidationFailure(err.to_string()))?;
+ Ok(Symbol::intern(str))
+ } else {
+ bug!("panic arg is not a str")
+ }
+}
+
+fn to_u32<'a, 'tcx, 'mir>(
+ val: Value,
+) -> EvalResult<'tcx, u32> {
+ if let Value::Scalar(n) = val {
+ Ok(n.not_undef()?.to_bits(Size::from_bits(32))? as u32)
+ } else {
+ bug!("panic arg is not a str")
+ }
+}
+
+/// Project to a field of a (variant of a) const
+pub fn const_field<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
instance: ty::Instance<'tcx>,
field: mir::Field,
value: &'tcx ty::Const<'tcx>,
) -> ::rustc::mir::interpret::ConstEvalResult<'tcx> {
- trace!("const_val_field: {:?}, {:?}, {:?}", instance, field, value);
+ trace!("const_field: {:?}, {:?}, {:?}", instance, field, value);
let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap();
let result = (|| {
- let ty = value.ty;
- let value = ecx.const_to_value(value.val)?;
- let layout = ecx.layout_of(ty)?;
- let place = ecx.allocate_place_for_value(value, layout, variant)?;
- let (place, layout) = ecx.place_field(place, field, layout)?;
- let (ptr, align) = place.to_ptr_align();
- let mut new_value = Value::ByRef(ptr.unwrap_or_err()?, align);
- new_value = ecx.try_read_by_ref(new_value, layout.ty)?;
- use rustc_data_structures::indexed_vec::Idx;
- match (value, new_value) {
- (Value::Scalar(_), Value::ByRef(..)) |
- (Value::ScalarPair(..), Value::ByRef(..)) |
- (Value::Scalar(_), Value::ScalarPair(..)) => bug!(
- "field {} of {:?} yielded {:?}",
- field.index(),
- value,
- new_value,
- ),
- _ => {},
- }
- value_to_const_value(&ecx, new_value, layout)
+ // get the operand again
+ let op = const_to_op(&mut ecx, value)?;
+ // downcast
+ let down = match variant {
+ None => op,
+ Some(variant) => ecx.operand_downcast(op, variant)?
+ };
+ // then project
+ let field = ecx.operand_field(down, field.index() as u64)?;
+ // and finally move back to the const world, always normalizing because
+ // this is not called for statics.
+ op_to_const(&ecx, field, true)
})();
result.map_err(|err| {
let (trace, span) = ecx.generate_stacktrace(None);
) -> EvalResult<'tcx, usize> {
trace!("const_variant_index: {:?}, {:?}", instance, val);
let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap();
- let value = ecx.const_to_value(val.val)?;
- let layout = ecx.layout_of(val.ty)?;
- let (ptr, align) = match value {
- Value::ScalarPair(..) | Value::Scalar(_) => {
- let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?.into();
- ecx.write_value_to_ptr(value, ptr, layout.align, val.ty)?;
- (ptr, layout.align)
- },
- Value::ByRef(ptr, align) => (ptr, align),
- };
- let place = Place::from_scalar_ptr(ptr.into(), align);
- ecx.read_discriminant_as_variant_index(place, layout)
+ let op = const_to_op(&mut ecx, val)?;
+ ecx.read_discriminant_as_variant_index(op)
}
-pub fn const_value_to_allocation_provider<'a, 'tcx>(
+pub fn const_to_allocation_provider<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
val: &'tcx ty::Const<'tcx>,
) -> &'tcx Allocation {
ty::ParamEnv::reveal_all(),
CompileTimeEvaluator,
());
- let value = ecx.const_to_value(val.val)?;
- let layout = ecx.layout_of(val.ty)?;
- let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?;
- ecx.write_value_to_ptr(value, ptr.into(), layout.align, val.ty)?;
- let alloc = ecx.memory.get(ptr.alloc_id)?;
+ let op = const_to_op(&mut ecx, val)?;
+ // Make a new allocation, copy things there
+ let ptr = ecx.allocate(op.layout, MemoryKind::Stack)?;
+ ecx.copy_op(op, ptr.into())?;
+ let alloc = ecx.memory.get(ptr.to_ptr()?.alloc_id)?;
Ok(tcx.intern_const_alloc(alloc.clone()))
};
result().expect("unable to convert ConstValue to Allocation")
};
let (res, ecx) = eval_body_and_ecx(tcx, cid, None, key.param_env);
- res.and_then(|(mut val, _, layout)| {
- if tcx.is_static(def_id).is_none() && cid.promoted.is_none() {
- val = ecx.try_read_by_ref(val, layout.ty)?;
+ res.and_then(|op| {
+ let normalize = tcx.is_static(def_id).is_none() && cid.promoted.is_none();
+ if !normalize {
+ // Sanity check: These must always be a MemPlace
+ match op.op {
+ Operand::Indirect(_) => { /* all is good */ },
+ Operand::Immediate(_) => bug!("const eval gave us an Immediate"),
+ }
}
- value_to_const_value(&ecx, val, layout)
+ op_to_const(&ecx, op, normalize)
}).map_err(|err| {
let (trace, span) = ecx.generate_stacktrace(None);
let err = ConstEvalErr {
use rustc::hir::def::Def;
use rustc::hir::map::definitions::DefPathData;
use rustc::mir;
-use rustc::ty::layout::{self, Size, Align, HasDataLayout, IntegerExt, LayoutOf, TyLayout, Primitive};
+use rustc::ty::layout::{
+ self, Size, Align, HasDataLayout, LayoutOf, TyLayout
+};
use rustc::ty::subst::{Subst, Substs};
-use rustc::ty::{self, Ty, TyCtxt, TypeAndMut};
+use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::query::TyCtxtAt;
use rustc_data_structures::fx::{FxHashSet, FxHasher};
-use rustc_data_structures::indexed_vec::{IndexVec, Idx};
+use rustc_data_structures::indexed_vec::IndexVec;
use rustc::mir::interpret::{
- GlobalId, Value, Scalar, FrameInfo, AllocType,
- EvalResult, EvalErrorKind, Pointer, ConstValue,
+ GlobalId, Scalar, FrameInfo,
+ EvalResult, EvalErrorKind,
ScalarMaybeUndef,
+ truncate, sign_extend,
};
use syntax::source_map::{self, Span};
use syntax::ast::Mutability;
-use super::{Place, PlaceExtra, Memory,
- HasMemory, MemoryKind,
- Machine};
-
-macro_rules! validation_failure{
- ($what:expr, $where:expr, $details:expr) => {{
- let where_ = if $where.is_empty() {
- String::new()
- } else {
- format!(" at {}", $where)
- };
- err!(ValidationFailure(format!(
- "encountered {}{}, but expected {}",
- $what, where_, $details,
- )))
- }};
- ($what:expr, $where:expr) => {{
- let where_ = if $where.is_empty() {
- String::new()
- } else {
- format!(" at {}", $where)
- };
- err!(ValidationFailure(format!(
- "encountered {}{}",
- $what, where_,
- )))
- }};
-}
+use super::{
+ Value, Operand, MemPlace, MPlaceTy, Place, PlaceExtra,
+ Memory, Machine
+};
pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
/// Stores the `Machine` instance.
pub stmt: usize,
}
-#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-pub enum LocalValue {
- Dead,
- Live(Value),
-}
-
-impl LocalValue {
- pub fn access(self) -> EvalResult<'static, Value> {
- match self {
- LocalValue::Dead => err!(DeadLocal),
- LocalValue::Live(val) => Ok(val),
- }
- }
-}
-
impl<'mir, 'tcx: 'mir> Eq for Frame<'mir, 'tcx> {}
impl<'mir, 'tcx: 'mir> PartialEq for Frame<'mir, 'tcx> {
}
}
+// State of a local variable
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub enum LocalValue {
+ Dead,
+ // Mostly for convenience, we re-use the `Operand` type here.
+ // This is an optimization over just always having a pointer here;
+ // we can thus avoid doing an allocation when the local just stores
+ // immediate values *and* never has its address taken.
+ Live(Operand),
+}
+
+impl<'tcx> LocalValue {
+ pub fn access(&self) -> EvalResult<'tcx, &Operand> {
+ match self {
+ LocalValue::Dead => err!(DeadLocal),
+ LocalValue::Live(ref val) => Ok(val),
+ }
+ }
+
+ pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand> {
+ match self {
+ LocalValue::Dead => err!(DeadLocal),
+ LocalValue::Live(ref mut val) => Ok(val),
+ }
+ }
+}
+
/// The virtual machine state during const-evaluation at a given point in time.
type EvalSnapshot<'a, 'mir, 'tcx, M>
= (M, Vec<Frame<'mir, 'tcx>>, Memory<'a, 'mir, 'tcx, M>);
None,
}
-#[derive(Copy, Clone, Debug)]
-pub struct TyAndPacked<'tcx> {
- pub ty: Ty<'tcx>,
- pub packed: bool,
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct ValTy<'tcx> {
- pub value: Value,
- pub ty: Ty<'tcx>,
-}
-
-impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
- type Target = Value;
- fn deref(&self) -> &Value {
- &self.value
- }
-}
-
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for &'a EvalContext<'a, 'mir, 'tcx, M> {
#[inline]
fn data_layout(&self) -> &layout::TargetDataLayout {
type Ty = Ty<'tcx>;
type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
+ #[inline]
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(self.param_env.and(ty))
.map_err(|layout| EvalErrorKind::Layout(layout).into())
r
}
- pub fn alloc_ptr(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Pointer> {
- assert!(!layout.is_unsized(), "cannot alloc memory for unsized type");
-
- self.memory.allocate(layout.size, layout.align, MemoryKind::Stack)
- }
-
pub fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
&self.memory
}
self.stack.len() - 1
}
- pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
- let ptr = self.memory.allocate_bytes(s.as_bytes());
- Ok(Scalar::Ptr(ptr).to_value_with_len(s.len() as u64, self.tcx.tcx))
+ /// Mark a storage as live, killing the previous content and returning it.
+ /// Remember to deallocate that!
+ pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> {
+ trace!("{:?} is now live", local);
+
+ let layout = self.layout_of_local(self.cur_frame(), local)?;
+ let init = LocalValue::Live(self.uninit_operand(layout)?);
+ // StorageLive *always* kills the value that's currently stored
+ Ok(mem::replace(&mut self.frame_mut().locals[local], init))
}
- pub fn const_to_value(
- &mut self,
- val: ConstValue<'tcx>,
- ) -> EvalResult<'tcx, Value> {
- match val {
- ConstValue::Unevaluated(def_id, substs) => {
- let instance = self.resolve(def_id, substs)?;
- self.read_global_as_value(GlobalId {
- instance,
- promoted: None,
- })
- }
- ConstValue::ByRef(alloc, offset) => {
- // FIXME: Allocate new AllocId for all constants inside
- let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?;
- Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align))
- },
- ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a.into(), b.into())),
- ConstValue::Scalar(val) => Ok(Value::Scalar(val.into())),
- }
+ /// Returns the old value of the local.
+ /// Remember to deallocate that!
+ pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue {
+ trace!("{:?} is now dead", local);
+
+ mem::replace(&mut self.frame_mut().locals[local], LocalValue::Dead)
+ }
+
+ pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
+ let ptr = self.memory.allocate_bytes(s.as_bytes());
+ Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx))
}
pub(super) fn resolve(&self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, ty::Instance<'tcx>> {
}
}
- pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
+ pub fn monomorphize<T: TypeFoldable<'tcx> + Subst<'tcx>>(
+ &self,
+ t: T,
+ substs: &'tcx Substs<'tcx>
+ ) -> T {
// miri doesn't care about lifetimes, and will choke on some crazy ones
// let's simply get rid of them
- let substituted = ty.subst(*self.tcx, substs);
+ let substituted = t.subst(*self.tcx, substs);
self.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substituted)
}
- /// Return the size and aligment of the value at the given type.
+ pub fn layout_of_local(
+ &self,
+ frame: usize,
+ local: mir::Local
+ ) -> EvalResult<'tcx, TyLayout<'tcx>> {
+ let local_ty = self.stack[frame].mir.local_decls[local].ty;
+ let local_ty = self.monomorphize(
+ local_ty,
+ self.stack[frame].instance.substs
+ );
+ self.layout_of(local_ty)
+ }
+
+ /// Return the actual dynamic size and alignment of the place at the given type.
/// Note that the value does not matter if the type is sized. For unsized types,
/// the value has to be a fat pointer, and we only care about the "extra" data in it.
- pub fn size_and_align_of_dst(
+ pub fn size_and_align_of_mplace(
&self,
- ty: Ty<'tcx>,
- value: Value,
+ mplace: MPlaceTy<'tcx>,
) -> EvalResult<'tcx, (Size, Align)> {
- let layout = self.layout_of(ty)?;
- if !layout.is_unsized() {
- Ok(layout.size_and_align())
+ if let PlaceExtra::None = mplace.extra {
+ assert!(!mplace.layout.is_unsized());
+ Ok(mplace.layout.size_and_align())
} else {
- match ty.sty {
- ty::TyAdt(..) | ty::TyTuple(..) => {
+ let layout = mplace.layout;
+ assert!(layout.is_unsized());
+ match layout.ty.sty {
+ ty::Adt(..) | ty::Tuple(..) => {
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized,
// and it also rounds up to alignment, which we want to avoid,
// as the unsized field's alignment could be smaller.
- assert!(!ty.is_simd());
- debug!("DST {} layout: {:?}", ty, layout);
+ assert!(!layout.ty.is_simd());
+ debug!("DST layout: {:?}", layout);
let sized_size = layout.fields.offset(layout.fields.count() - 1);
let sized_align = layout.align;
debug!(
"DST {} statically sized prefix size: {:?} align: {:?}",
- ty,
+ layout.ty,
sized_size,
sized_align
);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
- let field_ty = layout.field(self, layout.fields.count() - 1)?.ty;
- let (unsized_size, unsized_align) =
- self.size_and_align_of_dst(field_ty, value)?;
+ let field = self.mplace_field(mplace, layout.fields.count() as u64 - 1)?;
+ let (unsized_size, unsized_align) = self.size_and_align_of_mplace(field)?;
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
Ok((size.abi_align(align), align))
}
- ty::TyDynamic(..) => {
- let (_, vtable) = self.into_ptr_vtable_pair(value)?;
+ ty::Dynamic(..) => {
+ let vtable = match mplace.extra {
+ PlaceExtra::Vtable(vtable) => vtable,
+ _ => bug!("Expected vtable"),
+ };
// the second entry in the vtable is the dynamic size of the object.
self.read_size_and_align_from_vtable(vtable)
}
- ty::TySlice(_) | ty::TyStr => {
+ ty::Slice(_) | ty::Str => {
+ let len = match mplace.extra {
+ PlaceExtra::Length(len) => len,
+ _ => bug!("Expected length"),
+ };
let (elem_size, align) = layout.field(self, 0)?.size_and_align();
- let (_, len) = self.into_slice(value)?;
Ok((elem_size * len, align))
}
- _ => bug!("size_of_val::<{:?}>", ty),
+ _ => bug!("size_of_val::<{:?}> not supported", layout.ty),
}
}
}
// don't allocate at all for trivial constants
if mir.local_decls.len() > 1 {
- let mut locals = IndexVec::from_elem(LocalValue::Dead, &mir.local_decls);
- for (local, decl) in locals.iter_mut().zip(mir.local_decls.iter()) {
- *local = LocalValue::Live(self.init_value(decl.ty)?);
- }
+ // We put some marker value into the locals that we later want to initialize.
+ // This can be anything except for LocalValue::Dead -- because *that* is the
+ // value we use for things that we know are initially dead.
+ let dummy =
+ LocalValue::Live(Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef)));
+ let mut locals = IndexVec::from_elem(dummy, &mir.local_decls);
+ // Now mark those locals as dead that we do not want to initialize
match self.tcx.describe_def(instance.def_id()) {
// statics and constants don't have `Storage*` statements, no need to look for them
Some(Def::Static(..)) | Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => {},
use rustc::mir::StatementKind::{StorageDead, StorageLive};
match stmt.kind {
StorageLive(local) |
- StorageDead(local) => locals[local] = LocalValue::Dead,
+ StorageDead(local) => {
+ locals[local] = LocalValue::Dead;
+ }
_ => {}
}
}
}
},
}
+ // Finally, properly initialize all those that still have the dummy value
+ for (local, decl) in locals.iter_mut().zip(mir.local_decls.iter()) {
+ match *local {
+ LocalValue::Live(_) => {
+ // This needs to be peoperly initialized.
+ let layout = self.layout_of(self.monomorphize(decl.ty, instance.substs))?;
+ *local = LocalValue::Live(self.uninit_operand(layout)?);
+ }
+ LocalValue::Dead => {
+ // Nothing to do
+ }
+ }
+ }
+ // done
self.frame_mut().locals = locals;
}
- self.memory.cur_frame = self.cur_frame();
-
if self.stack.len() > self.stack_limit {
err!(StackFrameLimitReached)
} else {
let frame = self.stack.pop().expect(
"tried to pop a stack frame, but there were none",
);
- if !self.stack.is_empty() {
- // TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame?
- self.memory.cur_frame = self.cur_frame();
- }
match frame.return_to_block {
StackPopCleanup::MarkStatic(mutable) => {
- if let Place::Ptr { ptr, .. } = frame.return_place {
+ if let Place::Ptr(MemPlace { ptr, .. }) = frame.return_place {
// FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
self.memory.mark_static_initialized(
- ptr.unwrap_or_err()?.to_ptr()?.alloc_id,
+ ptr.to_ptr()?.alloc_id,
mutable,
)?
} else {
Ok(())
}
- pub fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> {
+ crate fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> {
// FIXME: should we tell the user that there was a local which was never written to?
- if let LocalValue::Live(Value::ByRef(ptr, _align)) = local {
+ if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
trace!("deallocating local");
let ptr = ptr.to_ptr()?;
self.memory.dump_alloc(ptr.alloc_id);
Ok(())
}
- /// Evaluate an assignment statement.
- ///
- /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
- /// type writes its results directly into the memory specified by the place.
- pub(super) fn eval_rvalue_into_place(
- &mut self,
- rvalue: &mir::Rvalue<'tcx>,
- place: &mir::Place<'tcx>,
- ) -> EvalResult<'tcx> {
- let dest = self.eval_place(place)?;
- let dest_ty = self.place_ty(place);
- let dest_layout = self.layout_of(dest_ty)?;
-
- use rustc::mir::Rvalue::*;
- match *rvalue {
- Use(ref operand) => {
- let value = self.eval_operand(operand)?.value;
- let valty = ValTy {
- value,
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
- }
-
- BinaryOp(bin_op, ref left, ref right) => {
- let left = self.eval_operand(left)?;
- let right = self.eval_operand(right)?;
- self.intrinsic_overflowing(
- bin_op,
- left,
- right,
- dest,
- dest_ty,
- )?;
- }
-
- CheckedBinaryOp(bin_op, ref left, ref right) => {
- let left = self.eval_operand(left)?;
- let right = self.eval_operand(right)?;
- self.intrinsic_with_overflow(
- bin_op,
- left,
- right,
- dest,
- dest_ty,
- )?;
- }
-
- UnaryOp(un_op, ref operand) => {
- let val = self.eval_operand_to_scalar(operand)?;
- let val = self.unary_op(un_op, val, dest_layout)?;
- self.write_scalar(
- dest,
- val,
- dest_ty,
- )?;
- }
-
- Aggregate(ref kind, ref operands) => {
- let (dest, active_field_index) = match **kind {
- mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
- self.write_discriminant_value(dest_ty, dest, variant_index)?;
- if adt_def.is_enum() {
- (self.place_downcast(dest, variant_index)?, active_field_index)
- } else {
- (dest, active_field_index)
- }
- }
- _ => (dest, None)
- };
-
- let layout = self.layout_of(dest_ty)?;
- for (i, operand) in operands.iter().enumerate() {
- let value = self.eval_operand(operand)?;
- // Ignore zero-sized fields.
- if !self.layout_of(value.ty)?.is_zst() {
- let field_index = active_field_index.unwrap_or(i);
- let (field_dest, _) = self.place_field(dest, mir::Field::new(field_index), layout)?;
- self.write_value(value, field_dest)?;
- }
- }
- }
-
- Repeat(ref operand, _) => {
- let (elem_ty, length) = match dest_ty.sty {
- ty::TyArray(elem_ty, n) => (elem_ty, n.unwrap_usize(self.tcx.tcx)),
- _ => {
- bug!(
- "tried to assign array-repeat to non-array type {:?}",
- dest_ty
- )
- }
- };
- let elem_size = self.layout_of(elem_ty)?.size;
- let value = self.eval_operand(operand)?.value;
-
- let (dest, dest_align) = self.force_allocation(dest)?.to_ptr_align();
-
- if length > 0 {
- let dest = dest.unwrap_or_err()?;
- //write the first value
- self.write_value_to_ptr(value, dest, dest_align, elem_ty)?;
-
- if length > 1 {
- let rest = dest.ptr_offset(elem_size * 1 as u64, &self)?;
- self.memory.copy_repeatedly(dest, dest_align, rest, dest_align, elem_size, length - 1, false)?;
- }
- }
- }
-
- Len(ref place) => {
- // FIXME(CTFE): don't allow computing the length of arrays in const eval
- let src = self.eval_place(place)?;
- let ty = self.place_ty(place);
- let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx);
- let size = self.memory.pointer_size().bytes() as u8;
- self.write_scalar(
- dest,
- Scalar::Bits {
- bits: len as u128,
- size,
- },
- dest_ty,
- )?;
- }
-
- Ref(_, _, ref place) => {
- let src = self.eval_place(place)?;
- // We ignore the alignment of the place here -- special handling for packed structs ends
- // at the `&` operator.
- let (ptr, _align, extra) = self.force_allocation(src)?.to_ptr_align_extra();
-
- let val = match extra {
- PlaceExtra::None => Value::Scalar(ptr),
- PlaceExtra::Length(len) => ptr.to_value_with_len(len, self.tcx.tcx),
- PlaceExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable),
- PlaceExtra::DowncastVariant(..) => {
- bug!("attempted to take a reference to an enum downcast place")
- }
- };
- let valty = ValTy {
- value: val,
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
- }
-
- NullaryOp(mir::NullOp::Box, ty) => {
- let ty = self.monomorphize(ty, self.substs());
- M::box_alloc(self, ty, dest)?;
- }
-
- NullaryOp(mir::NullOp::SizeOf, ty) => {
- let ty = self.monomorphize(ty, self.substs());
- let layout = self.layout_of(ty)?;
- assert!(!layout.is_unsized(),
- "SizeOf nullary MIR operator called for unsized type");
- let size = self.memory.pointer_size().bytes() as u8;
- self.write_scalar(
- dest,
- Scalar::Bits {
- bits: layout.size.bytes() as u128,
- size,
- },
- dest_ty,
- )?;
- }
-
- Cast(kind, ref operand, cast_ty) => {
- debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty);
- let src = self.eval_operand(operand)?;
- self.cast(src, kind, dest_ty, dest)?;
- }
-
- Discriminant(ref place) => {
- let ty = self.place_ty(place);
- let layout = self.layout_of(ty)?;
- let place = self.eval_place(place)?;
- let discr_val = self.read_discriminant_value(place, layout)?;
- let size = self.layout_of(dest_ty).unwrap().size.bytes() as u8;
- self.write_scalar(dest, Scalar::Bits {
- bits: discr_val,
- size,
- }, dest_ty)?;
- }
- }
-
- self.dump_local(dest);
-
- Ok(())
- }
-
- pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
- match ty.sty {
- ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
- ty::TyRef(_, ty, _) => !self.type_is_sized(ty),
- ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()),
- _ => false,
- }
- }
-
- pub(super) fn eval_operand_to_scalar(
- &mut self,
- op: &mir::Operand<'tcx>,
- ) -> EvalResult<'tcx, Scalar> {
- let valty = self.eval_operand(op)?;
- self.value_to_scalar(valty)
- }
-
- pub(crate) fn operands_to_args(
- &mut self,
- ops: &[mir::Operand<'tcx>],
- ) -> EvalResult<'tcx, Vec<ValTy<'tcx>>> {
- ops.into_iter()
- .map(|op| self.eval_operand(op))
- .collect()
- }
-
- pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
- use rustc::mir::Operand::*;
- let ty = self.monomorphize(op.ty(self.mir(), *self.tcx), self.substs());
- match *op {
- // FIXME: do some more logic on `move` to invalidate the old location
- Copy(ref place) |
- Move(ref place) => {
- Ok(ValTy {
- value: self.eval_and_read_place(place)?,
- ty
- })
- },
-
- Constant(ref constant) => {
- let value = self.const_to_value(constant.literal.val)?;
-
- Ok(ValTy {
- value,
- ty,
- })
- }
- }
- }
-
- /// reads a tag and produces the corresponding variant index
- pub fn read_discriminant_as_variant_index(
- &self,
- place: Place,
- layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, usize> {
- match layout.variants {
- ty::layout::Variants::Single { index } => Ok(index),
- ty::layout::Variants::Tagged { .. } => {
- let discr_val = self.read_discriminant_value(place, layout)?;
- layout
- .ty
- .ty_adt_def()
- .expect("tagged layout for non adt")
- .discriminants(self.tcx.tcx)
- .position(|var| var.val == discr_val)
- .ok_or_else(|| EvalErrorKind::InvalidDiscriminant.into())
- }
- ty::layout::Variants::NicheFilling { .. } => {
- let discr_val = self.read_discriminant_value(place, layout)?;
- assert_eq!(discr_val as usize as u128, discr_val);
- Ok(discr_val as usize)
- },
- }
- }
-
- pub fn read_discriminant_value(
- &self,
- place: Place,
- layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, u128> {
- trace!("read_discriminant_value {:#?}", layout);
- if layout.abi == layout::Abi::Uninhabited {
- return Ok(0);
- }
-
- match layout.variants {
- layout::Variants::Single { index } => {
- let discr_val = layout.ty.ty_adt_def().map_or(
- index as u128,
- |def| def.discriminant_for_variant(*self.tcx, index).val);
- return Ok(discr_val);
- }
- layout::Variants::Tagged { .. } |
- layout::Variants::NicheFilling { .. } => {},
- }
- let discr_place_val = self.read_place(place)?;
- let (discr_val, discr) = self.read_field(discr_place_val, None, mir::Field::new(0), layout)?;
- trace!("discr value: {:?}, {:?}", discr_val, discr);
- let raw_discr = self.value_to_scalar(ValTy {
- value: discr_val,
- ty: discr.ty
- })?;
- let discr_val = match layout.variants {
- layout::Variants::Single { .. } => bug!(),
- // FIXME: should we catch invalid discriminants here?
- layout::Variants::Tagged { .. } => {
- if discr.ty.is_signed() {
- let i = raw_discr.to_bits(discr.size)? as i128;
- // going from layout tag type to typeck discriminant type
- // requires first sign extending with the layout discriminant
- let shift = 128 - discr.size.bits();
- let sexted = (i << shift) >> shift;
- // and then zeroing with the typeck discriminant type
- let discr_ty = layout
- .ty
- .ty_adt_def().expect("tagged layout corresponds to adt")
- .repr
- .discr_type();
- let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty);
- let shift = 128 - discr_ty.size().bits();
- let truncatee = sexted as u128;
- (truncatee << shift) >> shift
- } else {
- raw_discr.to_bits(discr.size)?
- }
- },
- layout::Variants::NicheFilling {
- dataful_variant,
- ref niche_variants,
- niche_start,
- ..
- } => {
- let variants_start = *niche_variants.start() as u128;
- let variants_end = *niche_variants.end() as u128;
- match raw_discr {
- Scalar::Ptr(_) => {
- assert!(niche_start == 0);
- assert!(variants_start == variants_end);
- dataful_variant as u128
- },
- Scalar::Bits { bits: raw_discr, size } => {
- assert_eq!(size as u64, discr.size.bytes());
- let discr = raw_discr.wrapping_sub(niche_start)
- .wrapping_add(variants_start);
- if variants_start <= discr && discr <= variants_end {
- discr
- } else {
- dataful_variant as u128
- }
- },
- }
- }
- };
-
- Ok(discr_val)
- }
-
-
- pub fn write_discriminant_value(
- &mut self,
- dest_ty: Ty<'tcx>,
- dest: Place,
- variant_index: usize,
- ) -> EvalResult<'tcx> {
- let layout = self.layout_of(dest_ty)?;
-
- match layout.variants {
- layout::Variants::Single { index } => {
- if index != variant_index {
- // If the layout of an enum is `Single`, all
- // other variants are necessarily uninhabited.
- assert_eq!(layout.for_variant(&self, variant_index).abi,
- layout::Abi::Uninhabited);
- }
- }
- layout::Variants::Tagged { ref tag, .. } => {
- let discr_val = dest_ty.ty_adt_def().unwrap()
- .discriminant_for_variant(*self.tcx, variant_index)
- .val;
-
- // raw discriminants for enums are isize or bigger during
- // their computation, but the in-memory tag is the smallest possible
- // representation
- let size = tag.value.size(self.tcx.tcx);
- let shift = 128 - size.bits();
- let discr_val = (discr_val << shift) >> shift;
-
- let (discr_dest, tag) = self.place_field(dest, mir::Field::new(0), layout)?;
- self.write_scalar(discr_dest, Scalar::Bits {
- bits: discr_val,
- size: size.bytes() as u8,
- }, tag.ty)?;
- }
- layout::Variants::NicheFilling {
- dataful_variant,
- ref niche_variants,
- niche_start,
- ..
- } => {
- if variant_index != dataful_variant {
- let (niche_dest, niche) =
- self.place_field(dest, mir::Field::new(0), layout)?;
- let niche_value = ((variant_index - niche_variants.start()) as u128)
- .wrapping_add(niche_start);
- self.write_scalar(niche_dest, Scalar::Bits {
- bits: niche_value,
- size: niche.size.bytes() as u8,
- }, niche.ty)?;
- }
- }
- }
-
- Ok(())
- }
-
- pub fn read_global_as_value(&mut self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Value> {
- let cv = self.const_eval(gid)?;
- self.const_to_value(cv.val)
- }
-
pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Const<'tcx>> {
let param_env = if self.tcx.is_static(gid.instance.def_id()).is_some() {
ty::ParamEnv::reveal_all()
self.tcx.const_eval(param_env.and(gid)).map_err(|err| EvalErrorKind::ReferencedConstant(err).into())
}
- pub fn allocate_place_for_value(
- &mut self,
- value: Value,
- layout: TyLayout<'tcx>,
- variant: Option<usize>,
- ) -> EvalResult<'tcx, Place> {
- let (ptr, align) = match value {
- Value::ByRef(ptr, align) => (ptr, align),
- Value::ScalarPair(..) | Value::Scalar(_) => {
- let ptr = self.alloc_ptr(layout)?.into();
- self.write_value_to_ptr(value, ptr, layout.align, layout.ty)?;
- (ptr, layout.align)
- },
- };
- Ok(Place::Ptr {
- ptr: ptr.into(),
- align,
- extra: variant.map_or(PlaceExtra::None, PlaceExtra::DowncastVariant),
- })
- }
-
- pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> {
- let new_place = match place {
- Place::Local { frame, local } => {
- match self.stack[frame].locals[local].access()? {
- Value::ByRef(ptr, align) => {
- Place::Ptr {
- ptr: ptr.into(),
- align,
- extra: PlaceExtra::None,
- }
- }
- val => {
- let ty = self.stack[frame].mir.local_decls[local].ty;
- let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
- let layout = self.layout_of(ty)?;
- let ptr = self.alloc_ptr(layout)?;
- self.stack[frame].locals[local] =
- LocalValue::Live(Value::ByRef(ptr.into(), layout.align)); // it stays live
-
- let place = Place::from_ptr(ptr, layout.align);
- self.write_value(ValTy { value: val, ty }, place)?;
- place
- }
- }
- }
- Place::Ptr { .. } => place,
- };
- Ok(new_place)
- }
-
- /// ensures this Value is not a ByRef
- pub fn follow_by_ref_value(
- &self,
- value: Value,
- ty: Ty<'tcx>,
- ) -> EvalResult<'tcx, Value> {
- match value {
- Value::ByRef(ptr, align) => {
- self.read_value(ptr, align, ty)
- }
- other => Ok(other),
- }
- }
-
- pub fn value_to_scalar(
- &self,
- ValTy { value, ty } : ValTy<'tcx>,
- ) -> EvalResult<'tcx, Scalar> {
- match self.follow_by_ref_value(value, ty)? {
- Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"),
-
- Value::Scalar(scalar) => scalar.unwrap_or_err(),
-
- Value::ScalarPair(..) => bug!("value_to_scalar can't work with fat pointers"),
- }
- }
-
- pub fn write_ptr(&mut self, dest: Place, val: Scalar, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> {
- let valty = ValTy {
- value: val.to_value(),
- ty: dest_ty,
- };
- self.write_value(valty, dest)
- }
-
- pub fn write_scalar(
- &mut self,
- dest: Place,
- val: impl Into<ScalarMaybeUndef>,
- dest_ty: Ty<'tcx>,
- ) -> EvalResult<'tcx> {
- let valty = ValTy {
- value: Value::Scalar(val.into()),
- ty: dest_ty,
- };
- self.write_value(valty, dest)
- }
-
- pub fn write_value(
- &mut self,
- ValTy { value: src_val, ty: dest_ty } : ValTy<'tcx>,
- dest: Place,
- ) -> EvalResult<'tcx> {
- //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty);
- // Note that it is really important that the type here is the right one, and matches the type things are read at.
- // In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only
- // correct if we never look at this data with the wrong type.
-
- match dest {
- Place::Ptr { ptr, align, extra } => {
- assert_eq!(extra, PlaceExtra::None);
- self.write_value_to_ptr(src_val, ptr.unwrap_or_err()?, align, dest_ty)
- }
-
- Place::Local { frame, local } => {
- let old_val = self.stack[frame].locals[local].access()?;
- self.write_value_possibly_by_val(
- src_val,
- |this, val| this.stack[frame].set_local(local, val),
- old_val,
- dest_ty,
- )
- }
- }
- }
-
- // The cases here can be a bit subtle. Read carefully!
- fn write_value_possibly_by_val<F: FnOnce(&mut Self, Value) -> EvalResult<'tcx>>(
- &mut self,
- src_val: Value,
- write_dest: F,
- old_dest_val: Value,
- dest_ty: Ty<'tcx>,
- ) -> EvalResult<'tcx> {
- // FIXME: this should be a layout check, not underlying value
- if let Value::ByRef(dest_ptr, align) = old_dest_val {
- // If the value is already `ByRef` (that is, backed by an `Allocation`),
- // then we must write the new value into this allocation, because there may be
- // other pointers into the allocation. These other pointers are logically
- // pointers into the local variable, and must be able to observe the change.
- //
- // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
- // knew for certain that there were no outstanding pointers to this allocation.
- self.write_value_to_ptr(src_val, dest_ptr, align, dest_ty)?;
- } else if let Value::ByRef(src_ptr, align) = src_val {
- // If the value is not `ByRef`, then we know there are no pointers to it
- // and we can simply overwrite the `Value` in the locals array directly.
- //
- // In this specific case, where the source value is `ByRef`, we must duplicate
- // the allocation, because this is a by-value operation. It would be incorrect
- // if they referred to the same allocation, since then a change to one would
- // implicitly change the other.
- //
- // It is a valid optimization to attempt reading a primitive value out of the
- // source and write that into the destination without making an allocation, so
- // we do so here.
- if let Ok(Some(src_val)) = self.try_read_value(src_ptr, align, dest_ty) {
- write_dest(self, src_val)?;
- } else {
- let layout = self.layout_of(dest_ty)?;
- let dest_ptr = self.alloc_ptr(layout)?.into();
- self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?;
- write_dest(self, Value::ByRef(dest_ptr, layout.align))?;
- }
- } else {
- // Finally, we have the simple case where neither source nor destination are
- // `ByRef`. We may simply copy the source value over the the destintion.
- write_dest(self, src_val)?;
- }
- Ok(())
- }
-
- pub fn write_value_to_ptr(
- &mut self,
- value: Value,
- dest: Scalar,
- dest_align: Align,
- dest_ty: Ty<'tcx>,
- ) -> EvalResult<'tcx> {
- let layout = self.layout_of(dest_ty)?;
- trace!("write_value_to_ptr: {:#?}, {}, {:#?}", value, dest_ty, layout);
- match value {
- Value::ByRef(ptr, align) => {
- self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false)
- }
- Value::Scalar(scalar) => {
- let signed = match layout.abi {
- layout::Abi::Scalar(ref scal) => match scal.value {
- layout::Primitive::Int(_, signed) => signed,
- _ => false,
- },
- _ => false,
- };
- self.memory.write_scalar(dest, dest_align, scalar, layout.size, layout.align, signed)
- }
- Value::ScalarPair(a_val, b_val) => {
- trace!("write_value_to_ptr valpair: {:#?}", layout);
- let (a, b) = match layout.abi {
- layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
- _ => bug!("write_value_to_ptr: invalid ScalarPair layout: {:#?}", layout)
- };
- let (a_size, b_size) = (a.size(&self), b.size(&self));
- let (a_align, b_align) = (a.align(&self), b.align(&self));
- let a_ptr = dest;
- let b_offset = a_size.abi_align(b_align);
- let b_ptr = dest.ptr_offset(b_offset, &self)?.into();
- // TODO: What about signedess?
- self.memory.write_scalar(a_ptr, dest_align, a_val, a_size, a_align, false)?;
- self.memory.write_scalar(b_ptr, dest_align, b_val, b_size, b_align, false)
- }
- }
- }
-
- pub fn read_value(&self, ptr: Scalar, align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
- if let Some(val) = self.try_read_value(ptr, align, ty)? {
- Ok(val)
- } else {
- bug!("primitive read failed for type: {:?}", ty);
- }
- }
-
- fn validate_scalar(
- &self,
- value: ScalarMaybeUndef,
- size: Size,
- scalar: &layout::Scalar,
- path: &str,
- ty: Ty,
- ) -> EvalResult<'tcx> {
- trace!("validate scalar: {:#?}, {:#?}, {:#?}, {}", value, size, scalar, ty);
- let (lo, hi) = scalar.valid_range.clone().into_inner();
-
- let value = match value {
- ScalarMaybeUndef::Scalar(scalar) => scalar,
- ScalarMaybeUndef::Undef => return validation_failure!("undefined bytes", path),
- };
-
- let bits = match value {
- Scalar::Bits { bits, size: value_size } => {
- assert_eq!(value_size as u64, size.bytes());
- bits
- },
- Scalar::Ptr(_) => {
- let ptr_size = self.memory.pointer_size();
- let ptr_max = u128::max_value() >> (128 - ptr_size.bits());
- return if lo > hi {
- if lo - hi == 1 {
- // no gap, all values are ok
- Ok(())
- } else if hi < ptr_max || lo > 1 {
- let max = u128::max_value() >> (128 - size.bits());
- validation_failure!(
- "pointer",
- path,
- format!("something in the range {:?} or {:?}", 0..=lo, hi..=max)
- )
- } else {
- Ok(())
- }
- } else if hi < ptr_max || lo > 1 {
- validation_failure!(
- "pointer",
- path,
- format!("something in the range {:?}", scalar.valid_range)
- )
- } else {
- Ok(())
- };
- },
- };
-
- // char gets a special treatment, because its number space is not contiguous so `TyLayout`
- // has no special checks for chars
- match ty.sty {
- ty::TyChar => {
- debug_assert_eq!(size.bytes(), 4);
- if ::std::char::from_u32(bits as u32).is_none() {
- return err!(InvalidChar(bits));
- }
- }
- _ => {},
- }
-
- use std::ops::RangeInclusive;
- let in_range = |bound: RangeInclusive<u128>| bound.contains(&bits);
- if lo > hi {
- if in_range(0..=hi) || in_range(lo..=u128::max_value()) {
- Ok(())
- } else {
- validation_failure!(
- bits,
- path,
- format!("something in the range {:?} or {:?}", ..=hi, lo..)
- )
- }
- } else {
- if in_range(scalar.valid_range.clone()) {
- Ok(())
- } else {
- validation_failure!(
- bits,
- path,
- format!("something in the range {:?}", scalar.valid_range)
- )
- }
- }
- }
-
- /// This function checks the memory where `ptr` points to.
- /// It will error if the bits at the destination do not match the ones described by the layout.
- pub fn validate_ptr_target(
- &self,
- ptr: Pointer,
- ptr_align: Align,
- mut layout: TyLayout<'tcx>,
- path: String,
- seen: &mut FxHashSet<(Pointer, Ty<'tcx>)>,
- todo: &mut Vec<(Pointer, Ty<'tcx>, String)>,
- ) -> EvalResult<'tcx> {
- self.memory.dump_alloc(ptr.alloc_id);
- trace!("validate_ptr_target: {:?}, {:#?}", ptr, layout);
-
- let variant;
- match layout.variants {
- layout::Variants::NicheFilling { niche: ref tag, .. } |
- layout::Variants::Tagged { ref tag, .. } => {
- let size = tag.value.size(self);
- let (tag_value, tag_layout) = self.read_field(
- Value::ByRef(ptr.into(), ptr_align),
- None,
- mir::Field::new(0),
- layout,
- )?;
- let tag_value = match self.follow_by_ref_value(tag_value, tag_layout.ty)? {
- Value::Scalar(val) => val,
- _ => bug!("tag must be scalar"),
- };
- let path = format!("{}.TAG", path);
- self.validate_scalar(tag_value, size, tag, &path, tag_layout.ty)?;
- let variant_index = self.read_discriminant_as_variant_index(
- Place::from_ptr(ptr, ptr_align),
- layout,
- )?;
- variant = variant_index;
- layout = layout.for_variant(self, variant_index);
- trace!("variant layout: {:#?}", layout);
- },
- layout::Variants::Single { index } => variant = index,
- }
- match layout.fields {
- // primitives are unions with zero fields
- layout::FieldPlacement::Union(0) => {
- match layout.abi {
- // nothing to do, whatever the pointer points to, it is never going to be read
- layout::Abi::Uninhabited => validation_failure!("a value of an uninhabited type", path),
- // check that the scalar is a valid pointer or that its bit range matches the
- // expectation.
- layout::Abi::Scalar(ref scalar) => {
- let size = scalar.value.size(self);
- let value = self.memory.read_scalar(ptr, ptr_align, size)?;
- self.validate_scalar(value, size, scalar, &path, layout.ty)?;
- if scalar.value == Primitive::Pointer {
- // ignore integer pointers, we can't reason about the final hardware
- if let Scalar::Ptr(ptr) = value.unwrap_or_err()? {
- let alloc_kind = self.tcx.alloc_map.lock().get(ptr.alloc_id);
- if let Some(AllocType::Static(did)) = alloc_kind {
- // statics from other crates are already checked
- // extern statics should not be validated as they have no body
- if !did.is_local() || self.tcx.is_foreign_item(did) {
- return Ok(());
- }
- }
- if let Some(tam) = layout.ty.builtin_deref(false) {
- // we have not encountered this pointer+layout combination before
- if seen.insert((ptr, tam.ty)) {
- todo.push((ptr, tam.ty, format!("(*{})", path)))
- }
- }
- }
- }
- Ok(())
- },
- _ => bug!("bad abi for FieldPlacement::Union(0): {:#?}", layout.abi),
- }
- }
- layout::FieldPlacement::Union(_) => {
- // We can't check unions, their bits are allowed to be anything.
- // The fields don't need to correspond to any bit pattern of the union's fields.
- // See https://github.com/rust-lang/rust/issues/32836#issuecomment-406875389
- Ok(())
- },
- layout::FieldPlacement::Array { stride, count } => {
- let elem_layout = layout.field(self, 0)?;
- for i in 0..count {
- let mut path = path.clone();
- self.write_field_name(&mut path, layout.ty, i as usize, variant).unwrap();
- self.validate_ptr_target(ptr.offset(stride * i, self)?, ptr_align, elem_layout, path, seen, todo)?;
- }
- Ok(())
- },
- layout::FieldPlacement::Arbitrary { ref offsets, .. } => {
-
- // check length field and vtable field
- match layout.ty.builtin_deref(false).map(|tam| &tam.ty.sty) {
- | Some(ty::TyStr)
- | Some(ty::TySlice(_)) => {
- let (len, len_layout) = self.read_field(
- Value::ByRef(ptr.into(), ptr_align),
- None,
- mir::Field::new(1),
- layout,
- )?;
- let len = self.value_to_scalar(ValTy { value: len, ty: len_layout.ty })?;
- if len.to_bits(len_layout.size).is_err() {
- return validation_failure!("length is not a valid integer", path);
- }
- },
- Some(ty::TyDynamic(..)) => {
- let (vtable, vtable_layout) = self.read_field(
- Value::ByRef(ptr.into(), ptr_align),
- None,
- mir::Field::new(1),
- layout,
- )?;
- let vtable = self.value_to_scalar(ValTy { value: vtable, ty: vtable_layout.ty })?;
- if vtable.to_ptr().is_err() {
- return validation_failure!("vtable address is not a pointer", path);
- }
- }
- _ => {},
- }
- for (i, &offset) in offsets.iter().enumerate() {
- let field_layout = layout.field(self, i)?;
- let mut path = path.clone();
- self.write_field_name(&mut path, layout.ty, i, variant).unwrap();
- self.validate_ptr_target(ptr.offset(offset, self)?, ptr_align, field_layout, path, seen, todo)?;
- }
- Ok(())
- }
- }
- }
-
- pub fn try_read_by_ref(&self, mut val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
- // Convert to ByVal or ScalarPair if possible
- if let Value::ByRef(ptr, align) = val {
- if let Some(read_val) = self.try_read_value(ptr, align, ty)? {
- val = read_val;
- }
- }
- Ok(val)
- }
-
- pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
- let layout = self.layout_of(ty)?;
- self.memory.check_align(ptr, ptr_align)?;
-
- if layout.size.bytes() == 0 {
- return Ok(Some(Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 }))));
- }
-
- let ptr = ptr.to_ptr()?;
-
- match layout.abi {
- layout::Abi::Scalar(..) => {
- let scalar = self.memory.read_scalar(ptr, ptr_align, layout.size)?;
- Ok(Some(Value::Scalar(scalar)))
- }
- layout::Abi::ScalarPair(ref a, ref b) => {
- let (a, b) = (&a.value, &b.value);
- let (a_size, b_size) = (a.size(self), b.size(self));
- let a_ptr = ptr;
- let b_offset = a_size.abi_align(b.align(self));
- let b_ptr = ptr.offset(b_offset, self)?.into();
- let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?;
- let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?;
- Ok(Some(Value::ScalarPair(a_val, b_val)))
- }
- _ => Ok(None),
- }
- }
-
+ #[inline(always)]
pub fn frame(&self) -> &Frame<'mir, 'tcx> {
self.stack.last().expect("no call frames exist")
}
+ #[inline(always)]
pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> {
self.stack.last_mut().expect("no call frames exist")
}
}
}
- fn unsize_into_ptr(
- &mut self,
- src: Value,
- src_ty: Ty<'tcx>,
- dest: Place,
- dest_ty: Ty<'tcx>,
- sty: Ty<'tcx>,
- dty: Ty<'tcx>,
- ) -> EvalResult<'tcx> {
- // A<Struct> -> A<Trait> conversion
- let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
-
- match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
- (&ty::TyArray(_, length), &ty::TySlice(_)) => {
- let ptr = self.into_ptr(src)?;
- // u64 cast is from usize to u64, which is always good
- let valty = ValTy {
- value: ptr.to_value_with_len(length.unwrap_usize(self.tcx.tcx), self.tcx.tcx),
- ty: dest_ty,
- };
- self.write_value(valty, dest)
- }
- (&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
- // For now, upcasts are limited to changes in marker
- // traits, and hence never actually require an actual
- // change to the vtable.
- let valty = ValTy {
- value: src,
- ty: dest_ty,
- };
- self.write_value(valty, dest)
- }
- (_, &ty::TyDynamic(ref data, _)) => {
- let trait_ref = data.principal().unwrap().with_self_ty(
- *self.tcx,
- src_pointee_ty,
- );
- let trait_ref = self.tcx.erase_regions(&trait_ref);
- let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
- let ptr = self.into_ptr(src)?;
- let valty = ValTy {
- value: ptr.to_value_with_vtable(vtable),
- ty: dest_ty,
- };
- self.write_value(valty, dest)
- }
-
- _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty),
- }
- }
-
- crate fn unsize_into(
- &mut self,
- src: Value,
- src_layout: TyLayout<'tcx>,
- dst: Place,
- dst_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx> {
- match (&src_layout.ty.sty, &dst_layout.ty.sty) {
- (&ty::TyRef(_, s, _), &ty::TyRef(_, d, _)) |
- (&ty::TyRef(_, s, _), &ty::TyRawPtr(TypeAndMut { ty: d, .. })) |
- (&ty::TyRawPtr(TypeAndMut { ty: s, .. }),
- &ty::TyRawPtr(TypeAndMut { ty: d, .. })) => {
- self.unsize_into_ptr(src, src_layout.ty, dst, dst_layout.ty, s, d)
- }
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
- assert_eq!(def_a, def_b);
- if def_a.is_box() || def_b.is_box() {
- if !def_a.is_box() || !def_b.is_box() {
- bug!("invalid unsizing between {:?} -> {:?}", src_layout, dst_layout);
- }
- return self.unsize_into_ptr(
- src,
- src_layout.ty,
- dst,
- dst_layout.ty,
- src_layout.ty.boxed_ty(),
- dst_layout.ty.boxed_ty(),
- );
- }
-
- // unsizing of generic struct with pointer fields
- // Example: `Arc<T>` -> `Arc<Trait>`
- // here we need to increase the size of every &T thin ptr field to a fat ptr
- for i in 0..src_layout.fields.count() {
- let (dst_f_place, dst_field) =
- self.place_field(dst, mir::Field::new(i), dst_layout)?;
- if dst_field.is_zst() {
- continue;
- }
- let (src_f_value, src_field) = match src {
- Value::ByRef(ptr, align) => {
- let src_place = Place::from_scalar_ptr(ptr.into(), align);
- let (src_f_place, src_field) =
- self.place_field(src_place, mir::Field::new(i), src_layout)?;
- (self.read_place(src_f_place)?, src_field)
- }
- Value::Scalar(_) | Value::ScalarPair(..) => {
- let src_field = src_layout.field(&self, i)?;
- assert_eq!(src_layout.fields.offset(i).bytes(), 0);
- assert_eq!(src_field.size, src_layout.size);
- (src, src_field)
- }
- };
- if src_field.ty == dst_field.ty {
- self.write_value(ValTy {
- value: src_f_value,
- ty: src_field.ty,
- }, dst_f_place)?;
- } else {
- self.unsize_into(src_f_value, src_field, dst_f_place, dst_field)?;
- }
- }
- Ok(())
- }
- _ => {
- bug!(
- "unsize_into: invalid conversion: {:?} -> {:?}",
- src_layout,
- dst_layout
- )
- }
- }
- }
-
- pub fn dump_local(&self, place: Place) {
+ pub fn dump_place(&self, place: Place) {
// Debug output
if !log_enabled!(::log::Level::Trace) {
return;
panic!("Failed to access local: {:?}", err);
}
}
- Ok(Value::ByRef(ptr, align)) => {
+ Ok(Operand::Indirect(mplace)) => {
+ let (ptr, align) = mplace.to_scalar_ptr_align();
match ptr {
Scalar::Ptr(ptr) => {
write!(msg, " by align({}) ref:", align.abi()).unwrap();
allocs.push(ptr.alloc_id);
}
- ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
+ ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
}
}
- Ok(Value::Scalar(val)) => {
+ Ok(Operand::Immediate(Value::Scalar(val))) => {
write!(msg, " {:?}", val).unwrap();
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val {
allocs.push(ptr.alloc_id);
}
}
- Ok(Value::ScalarPair(val1, val2)) => {
+ Ok(Operand::Immediate(Value::ScalarPair(val1, val2))) => {
write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 {
allocs.push(ptr.alloc_id);
trace!("{}", msg);
self.memory.dump_allocs(allocs);
}
- Place::Ptr { ptr, align, .. } => {
- match ptr {
- ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) => {
- trace!("by align({}) ref:", align.abi());
+ Place::Ptr(mplace) => {
+ match mplace.ptr {
+ Scalar::Ptr(ptr) => {
+ trace!("by align({}) ref:", mplace.align.abi());
self.memory.dump_alloc(ptr.alloc_id);
}
ptr => trace!(" integral by ref: {:?}", ptr),
(frames, self.tcx.span)
}
+ #[inline(always)]
pub fn sign_extend(&self, value: u128, ty: TyLayout<'_>) -> u128 {
- super::sign_extend(value, ty)
+ assert!(ty.abi.is_signed());
+ sign_extend(value, ty.size)
}
+ #[inline(always)]
pub fn truncate(&self, value: u128, ty: TyLayout<'_>) -> u128 {
- super::truncate(value, ty)
- }
-
- fn write_field_name(&self, s: &mut String, ty: Ty<'tcx>, i: usize, variant: usize) -> ::std::fmt::Result {
- match ty.sty {
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyFloat(_) |
- ty::TyFnPtr(_) |
- ty::TyNever |
- ty::TyFnDef(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyForeign(..) |
- ty::TyDynamic(..) => {
- bug!("field_name({:?}): not applicable", ty)
- }
-
- // Potentially-fat pointers.
- ty::TyRef(_, pointee, _) |
- ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
- assert!(i < 2);
-
- // Reuse the fat *T type as its own thin pointer data field.
- // This provides information about e.g. DST struct pointees
- // (which may have no non-DST form), and will work as long
- // as the `Abi` or `FieldPlacement` is checked by users.
- if i == 0 {
- return write!(s, ".data_ptr");
- }
-
- match self.tcx.struct_tail(pointee).sty {
- ty::TySlice(_) |
- ty::TyStr => write!(s, ".len"),
- ty::TyDynamic(..) => write!(s, ".vtable_ptr"),
- _ => bug!("field_name({:?}): not applicable", ty)
- }
- }
-
- // Arrays and slices.
- ty::TyArray(_, _) |
- ty::TySlice(_) |
- ty::TyStr => write!(s, "[{}]", i),
-
- // generators and closures.
- ty::TyClosure(def_id, _) | ty::TyGenerator(def_id, _, _) => {
- let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap();
- let freevar = self.tcx.with_freevars(node_id, |fv| fv[i]);
- write!(s, ".upvar({})", self.tcx.hir.name(freevar.var_id()))
- }
-
- ty::TyTuple(_) => write!(s, ".{}", i),
-
- // enums
- ty::TyAdt(def, ..) if def.is_enum() => {
- let variant = &def.variants[variant];
- write!(s, ".{}::{}", variant.name, variant.fields[i].ident)
- }
-
- // other ADTs.
- ty::TyAdt(def, _) => write!(s, ".{}", def.non_enum_variant().fields[i].ident),
-
- ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
- ty::TyInfer(_) | ty::TyError => {
- bug!("write_field_name: unexpected type `{}`", ty)
- }
- }
- }
-
- pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> {
- trace!("{:?} is now live", local);
-
- let ty = self.frame().mir.local_decls[local].ty;
- let init = self.init_value(ty)?;
- // StorageLive *always* kills the value that's currently stored
- Ok(mem::replace(&mut self.frame_mut().locals[local], LocalValue::Live(init)))
- }
-
- fn init_value(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
- let ty = self.monomorphize(ty, self.substs());
- let layout = self.layout_of(ty)?;
- Ok(match layout.abi {
- layout::Abi::Scalar(..) => Value::Scalar(ScalarMaybeUndef::Undef),
- layout::Abi::ScalarPair(..) => Value::ScalarPair(
- ScalarMaybeUndef::Undef,
- ScalarMaybeUndef::Undef,
- ),
- _ => Value::ByRef(self.alloc_ptr(layout)?.into(), layout.align),
- })
+ truncate(value, ty.size)
}
}
-impl<'mir, 'tcx> Frame<'mir, 'tcx> {
- fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> {
- match self.locals[local] {
- LocalValue::Dead => err!(DeadLocal),
- LocalValue::Live(ref mut local) => {
- *local = value;
- Ok(())
- }
- }
- }
-
- /// Returns the old value of the local
- pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue {
- trace!("{:?} is now dead", local);
-
- mem::replace(&mut self.locals[local], LocalValue::Dead)
- }
-}
use std::hash::Hash;
use rustc::mir::interpret::{AllocId, EvalResult, Scalar, Pointer, AccessKind, GlobalId};
-use super::{EvalContext, Place, ValTy, Memory};
+use super::{EvalContext, PlaceTy, OpTy, Memory};
use rustc::mir;
-use rustc::ty::{self, Ty};
+use rustc::ty::{self, layout::TyLayout};
use rustc::ty::layout::Size;
use syntax::source_map::Span;
use syntax::ast::Mutability;
fn eval_fn_call<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
- destination: Option<(Place, mir::BasicBlock)>,
- args: &[ValTy<'tcx>],
+ destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+ args: &[OpTy<'tcx>],
span: Span,
- sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool>;
/// directly process an intrinsic without pushing a stack frame.
fn call_intrinsic<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
- args: &[ValTy<'tcx>],
- dest: Place,
- dest_layout: ty::layout::TyLayout<'tcx>,
+ args: &[OpTy<'tcx>],
+ dest: PlaceTy<'tcx>,
target: mir::BasicBlock,
) -> EvalResult<'tcx>;
ecx: &EvalContext<'a, 'mir, 'tcx, Self>,
bin_op: mir::BinOp,
left: Scalar,
- left_ty: Ty<'tcx>,
+ left_layout: TyLayout<'tcx>,
right: Scalar,
- right_ty: Ty<'tcx>,
+ right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Option<(Scalar, bool)>>;
/// Called when trying to mark machine defined `MemoryKinds` as static
/// Returns a pointer to the allocated memory
fn box_alloc<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
- ty: Ty<'tcx>,
- dest: Place,
+ dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx>;
/// Called when trying to access a global declared with a `linkage` attribute
+//! The memory subsystem.
+//!
+//! Generally, we use `Pointer` to denote memory addresses. However, some operations
+//! have a "size"-like parameter, and they take `Scalar` for the address because
+//! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
+//! integer. It is crucial that these operations call `check_align` *before*
+//! short-circuiting the empty case!
+
use std::collections::VecDeque;
use std::hash::{Hash, Hasher};
use std::ptr;
use rustc::ty::ParamEnv;
use rustc::ty::query::TyCtxtAt;
use rustc::ty::layout::{self, Align, TargetDataLayout, Size};
-use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, Value, ScalarMaybeUndef,
- EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType};
-pub use rustc::mir::interpret::{write_target_uint, write_target_int, read_target_uint};
+use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, ScalarMaybeUndef,
+ EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType, truncate};
+pub use rustc::mir::interpret::{write_target_uint, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher};
use syntax::ast::Mutability;
use super::{EvalContext, Machine};
+
////////////////////////////////////////////////////////////////////////////////
// Allocations and pointers
////////////////////////////////////////////////////////////////////////////////
/// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
alloc_map: FxHashMap<AllocId, Allocation>,
- /// The current stack frame. Used to check accesses against locks.
- pub cur_frame: usize,
-
pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
}
data,
alloc_kind,
alloc_map,
- cur_frame,
tcx: _,
} = self;
*data == other.data
&& *alloc_kind == other.alloc_kind
&& *alloc_map == other.alloc_map
- && *cur_frame == other.cur_frame
}
}
data,
alloc_kind: _,
alloc_map: _,
- cur_frame,
tcx: _,
} = self;
data.hash(state);
- cur_frame.hash(state);
// We ignore some fields which don't change between evaluation steps.
alloc_kind: FxHashMap::default(),
alloc_map: FxHashMap::default(),
tcx,
- cur_frame: usize::max_value(),
}
}
self.tcx.data_layout.endian
}
- /// Check that the pointer is aligned AND non-NULL.
+ /// Check that the pointer is aligned AND non-NULL. This supports scalars
+ /// for the benefit of other parts of miri that need to check alignment even for ZST.
pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx> {
// Check non-NULL/Undef, extract offset
let (offset, alloc_align) = match ptr {
}
}
+ /// Check if the pointer is "in-bounds". Notice that a pointer pointing at the end
+ /// of an allocation (i.e., at the first *inaccessible* location) *is* considered
+ /// in-bounds! This follows C's/LLVM's rules.
pub fn check_bounds(&self, ptr: Pointer, access: bool) -> EvalResult<'tcx> {
let alloc = self.get(ptr.alloc_id)?;
let allocation_size = alloc.bytes.len() as u64;
assert!(self.tcx.is_static(def_id).is_some());
EvalErrorKind::ReferencedConstant(err).into()
}).map(|val| {
- self.tcx.const_value_to_allocation(val)
+ self.tcx.const_to_allocation(val)
})
}
/// Byte accessors
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
+ /// This checks alignment!
fn get_bytes_unchecked(
&self,
ptr: Pointer,
Ok(&alloc.bytes[offset..offset + size.bytes() as usize])
}
+ /// This checks alignment!
fn get_bytes_unchecked_mut(
&mut self,
ptr: Pointer,
) -> EvalResult<'tcx, &mut [u8]> {
assert_ne!(size.bytes(), 0);
self.clear_relocations(ptr, size)?;
- self.mark_definedness(ptr.into(), size, true)?;
+ self.mark_definedness(ptr, size, true)?;
self.get_bytes_unchecked_mut(ptr, size, align)
}
}
Some(MemoryKind::Stack) => {},
}
if let Some(mut alloc) = alloc {
- // ensure llvm knows not to put this into immutable memroy
+ // ensure llvm knows not to put this into immutable memory
alloc.runtime_mutability = mutability;
let alloc = self.tcx.intern_const_alloc(alloc);
self.tcx.alloc_map.lock().set_id_memory(alloc_id, alloc);
length: u64,
nonoverlapping: bool,
) -> EvalResult<'tcx> {
- // Empty accesses don't need to be valid pointers, but they should still be aligned
- self.check_align(src, src_align)?;
- self.check_align(dest, dest_align)?;
if size.bytes() == 0 {
+ // Nothing to do for ZST, other than checking alignment and non-NULLness.
+ self.check_align(src, src_align)?;
+ self.check_align(dest, dest_align)?;
return Ok(());
}
let src = src.to_ptr()?;
new_relocations
};
+ // This also checks alignment.
let src_bytes = self.get_bytes_unchecked(src, size, src_align)?.as_ptr();
let dest_bytes = self.get_bytes_mut(dest, size * length, dest_align)?.as_mut_ptr();
pub fn read_bytes(&self, ptr: Scalar, size: Size) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
- self.check_align(ptr, align)?;
if size.bytes() == 0 {
+ self.check_align(ptr, align)?;
return Ok(&[]);
}
self.get_bytes(ptr.to_ptr()?, size, align)
pub fn write_bytes(&mut self, ptr: Scalar, src: &[u8]) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
- self.check_align(ptr, align)?;
if src.is_empty() {
+ self.check_align(ptr, align)?;
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, Size::from_bytes(src.len() as u64), align)?;
pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
- self.check_align(ptr, align)?;
if count.bytes() == 0 {
+ self.check_align(ptr, align)?;
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?;
Ok(())
}
+ /// Read a *non-ZST* scalar
pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, ScalarMaybeUndef> {
self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
let endianness = self.endianness();
+ // get_bytes_unchecked tests alignment
let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?;
// Undef check happens *after* we established that the alignment is correct.
// We must not return Ok() for unaligned pointers!
self.read_scalar(ptr, ptr_align, self.pointer_size())
}
+ /// Write a *non-ZST* scalar
pub fn write_scalar(
&mut self,
- ptr: Scalar,
+ ptr: Pointer,
ptr_align: Align,
val: ScalarMaybeUndef,
type_size: Size,
- type_align: Align,
- signed: bool,
) -> EvalResult<'tcx> {
let endianness = self.endianness();
- self.check_align(ptr, ptr_align)?;
let val = match val {
ScalarMaybeUndef::Scalar(scalar) => scalar,
val.offset.bytes() as u128
}
- Scalar::Bits { size: 0, .. } => {
- // nothing to do for ZSTs
- assert_eq!(type_size.bytes(), 0);
- return Ok(());
- }
-
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, type_size.bytes());
+ assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
+ "Unexpected value of size {} when writing to memory", size);
bits
},
};
- let ptr = ptr.to_ptr()?;
-
{
- let dst = self.get_bytes_mut(ptr, type_size, ptr_align.min(type_align))?;
- if signed {
- write_target_int(endianness, dst, bytes as i128).unwrap();
- } else {
- write_target_uint(endianness, dst, bytes).unwrap();
- }
+ // get_bytes_mut checks alignment
+ let dst = self.get_bytes_mut(ptr, type_size, ptr_align)?;
+ write_target_uint(endianness, dst, bytes).unwrap();
}
// See if we have to also write a relocation
Ok(())
}
- pub fn write_ptr_sized_unsigned(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) -> EvalResult<'tcx> {
+ pub fn write_ptr_sized(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) -> EvalResult<'tcx> {
let ptr_size = self.pointer_size();
- self.write_scalar(ptr.into(), ptr_align, val, ptr_size, ptr_align, false)
+ self.write_scalar(ptr.into(), ptr_align, val, ptr_size)
}
fn int_align(&self, size: Size) -> Align {
pub fn mark_definedness(
&mut self,
- ptr: Scalar,
+ ptr: Pointer,
size: Size,
new_state: bool,
) -> EvalResult<'tcx> {
if size.bytes() == 0 {
return Ok(());
}
- let ptr = ptr.to_ptr()?;
let alloc = self.get_mut(ptr.alloc_id)?;
alloc.undef_mask.set_range(
ptr.offset,
pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M>;
fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M>;
-
- /// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef,
- /// this may have to perform a load.
- fn into_ptr(
- &self,
- value: Value,
- ) -> EvalResult<'tcx, ScalarMaybeUndef> {
- Ok(match value {
- Value::ByRef(ptr, align) => {
- self.memory().read_ptr_sized(ptr.to_ptr()?, align)?
- }
- Value::Scalar(ptr) |
- Value::ScalarPair(ptr, _) => ptr,
- }.into())
- }
-
- fn into_ptr_vtable_pair(
- &self,
- value: Value,
- ) -> EvalResult<'tcx, (ScalarMaybeUndef, Pointer)> {
- match value {
- Value::ByRef(ref_ptr, align) => {
- let mem = self.memory();
- let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into();
- let vtable = mem.read_ptr_sized(
- ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
- align
- )?.unwrap_or_err()?.to_ptr()?;
- Ok((ptr, vtable))
- }
-
- Value::ScalarPair(ptr, vtable) => Ok((ptr, vtable.unwrap_or_err()?.to_ptr()?)),
- _ => bug!("expected ptr and vtable, got {:?}", value),
- }
- }
-
- fn into_slice(
- &self,
- value: Value,
- ) -> EvalResult<'tcx, (ScalarMaybeUndef, u64)> {
- match value {
- Value::ByRef(ref_ptr, align) => {
- let mem = self.memory();
- let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into();
- let len = mem.read_ptr_sized(
- ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
- align
- )?.unwrap_or_err()?.to_bits(mem.pointer_size())? as u64;
- Ok((ptr, len))
- }
- Value::ScalarPair(ptr, val) => {
- let len = val.unwrap_or_err()?.to_bits(self.memory().pointer_size())?;
- Ok((ptr, len as u64))
- }
- Value::Scalar(_) => bug!("expected ptr and length, got {:?}", value),
- }
- }
}
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for Memory<'a, 'mir, 'tcx, M> {
//! An interpreter for MIR used in CTFE and by miri
mod cast;
-mod const_eval;
mod eval_context;
mod place;
+mod operand;
mod machine;
mod memory;
mod operator;
mod step;
mod terminator;
mod traits;
+mod const_eval;
+mod validity;
pub use self::eval_context::{
- EvalContext, Frame, StackPopCleanup,
- TyAndPacked, ValTy,
+ EvalContext, Frame, StackPopCleanup, LocalValue,
};
-pub use self::place::{Place, PlaceExtra};
+pub use self::place::{Place, PlaceExtra, PlaceTy, MemPlace, MPlaceTy};
pub use self::memory::{Memory, MemoryKind, HasMemory};
mk_borrowck_eval_cx,
mk_eval_cx,
CompileTimeEvaluator,
- const_value_to_allocation_provider,
+ const_to_allocation_provider,
const_eval_provider,
- const_val_field,
+ const_field,
const_variant_index,
- value_to_const_value,
+ op_to_const,
};
pub use self::machine::Machine;
-pub use self::memory::{write_target_uint, write_target_int, read_target_uint};
-
-use rustc::ty::layout::TyLayout;
-
-pub fn sign_extend(value: u128, layout: TyLayout<'_>) -> u128 {
- let size = layout.size.bits();
- assert!(layout.abi.is_signed());
- // sign extend
- let shift = 128 - size;
- // shift the unsigned value to the left
- // and back to the right as signed (essentially fills with FF on the left)
- (((value << shift) as i128) >> shift) as u128
-}
-
-pub fn truncate(value: u128, layout: TyLayout<'_>) -> u128 {
- let size = layout.size.bits();
- let shift = 128 - size;
- // truncate (shift left to drop out leftover values, shift right to fill with zeroes)
- (value << shift) >> shift
-}
+pub use self::operand::{Value, ValTy, Operand, OpTy};
--- /dev/null
+//! Functions concerning immediate values and operands, and reading from operands.
+//! All high-level functions to read from memory work on operands as sources.
+
+use std::convert::TryInto;
+
+use rustc::mir;
+use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, HasDataLayout, IntegerExt};
+use rustc_data_structures::indexed_vec::Idx;
+
+use rustc::mir::interpret::{
+ GlobalId, ConstValue, Scalar, EvalResult, Pointer, ScalarMaybeUndef, EvalErrorKind
+};
+use super::{EvalContext, Machine, MemPlace, MPlaceTy, PlaceExtra, MemoryKind};
+
+/// A `Value` represents a single immediate self-contained Rust value.
+///
+/// For optimization of a few very common cases, there is also a representation for a pair of
+/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
+/// operations and fat pointers. This idea was taken from rustc's codegen.
+/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
+/// defined on `Value`, and do not have to work with a `Place`.
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
+pub enum Value {
+ Scalar(ScalarMaybeUndef),
+ ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef),
+}
+
+impl<'tcx> Value {
+ pub fn new_slice(
+ val: Scalar,
+ len: u64,
+ cx: impl HasDataLayout
+ ) -> Self {
+ Value::ScalarPair(val.into(), Scalar::Bits {
+ bits: len as u128,
+ size: cx.data_layout().pointer_size.bytes() as u8,
+ }.into())
+ }
+
+ pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self {
+ Value::ScalarPair(val.into(), Scalar::Ptr(vtable).into())
+ }
+
+ #[inline]
+ pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef {
+ match self {
+ Value::Scalar(val) => val,
+ Value::ScalarPair(..) => bug!("Got a fat pointer where a scalar was expected"),
+ }
+ }
+
+ #[inline]
+ pub fn to_scalar(self) -> EvalResult<'tcx, Scalar> {
+ self.to_scalar_or_undef().not_undef()
+ }
+
+ /// Convert the value into a pointer (or a pointer-sized integer).
+ /// Throws away the second half of a ScalarPair!
+ #[inline]
+ pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> {
+ match self {
+ Value::Scalar(ptr) |
+ Value::ScalarPair(ptr, _) => ptr.not_undef(),
+ }
+ }
+
+ pub fn to_scalar_dyn_trait(self) -> EvalResult<'tcx, (Scalar, Pointer)> {
+ match self {
+ Value::ScalarPair(ptr, vtable) =>
+ Ok((ptr.not_undef()?, vtable.to_ptr()?)),
+ _ => bug!("expected ptr and vtable, got {:?}", self),
+ }
+ }
+
+ pub fn to_scalar_slice(self, cx: impl HasDataLayout) -> EvalResult<'tcx, (Scalar, u64)> {
+ match self {
+ Value::ScalarPair(ptr, val) => {
+ let len = val.to_bits(cx.data_layout().pointer_size)?;
+ Ok((ptr.not_undef()?, len as u64))
+ }
+ _ => bug!("expected ptr and length, got {:?}", self),
+ }
+ }
+}
+
+// ScalarPair needs a type to interpret, so we often have a value and a type together
+// as input for binary and cast operations.
+#[derive(Copy, Clone, Debug)]
+pub struct ValTy<'tcx> {
+ pub value: Value,
+ pub layout: TyLayout<'tcx>,
+}
+
+impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
+ type Target = Value;
+ #[inline(always)]
+ fn deref(&self) -> &Value {
+ &self.value
+ }
+}
+
+/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
+/// or still in memory. The latter is an optimization, to delay reading that chunk of
+/// memory and to avoid having to store arbitrary-sized data here.
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
+pub enum Operand {
+ Immediate(Value),
+ Indirect(MemPlace),
+}
+
+impl Operand {
+ #[inline]
+ pub fn from_ptr(ptr: Pointer, align: Align) -> Self {
+ Operand::Indirect(MemPlace::from_ptr(ptr, align))
+ }
+
+ #[inline]
+ pub fn from_scalar_value(val: Scalar) -> Self {
+ Operand::Immediate(Value::Scalar(val.into()))
+ }
+
+ #[inline]
+ pub fn to_mem_place(self) -> MemPlace {
+ match self {
+ Operand::Indirect(mplace) => mplace,
+ _ => bug!("to_mem_place: expected Operand::Indirect, got {:?}", self),
+
+ }
+ }
+
+ #[inline]
+ pub fn to_immediate(self) -> Value {
+ match self {
+ Operand::Immediate(val) => val,
+ _ => bug!("to_immediate: expected Operand::Immediate, got {:?}", self),
+
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct OpTy<'tcx> {
+ pub op: Operand,
+ pub layout: TyLayout<'tcx>,
+}
+
+impl<'tcx> ::std::ops::Deref for OpTy<'tcx> {
+ type Target = Operand;
+ #[inline(always)]
+ fn deref(&self) -> &Operand {
+ &self.op
+ }
+}
+
+impl<'tcx> From<MPlaceTy<'tcx>> for OpTy<'tcx> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx>) -> Self {
+ OpTy {
+ op: Operand::Indirect(*mplace),
+ layout: mplace.layout
+ }
+ }
+}
+
+impl<'tcx> From<ValTy<'tcx>> for OpTy<'tcx> {
+ #[inline(always)]
+ fn from(val: ValTy<'tcx>) -> Self {
+ OpTy {
+ op: Operand::Immediate(val.value),
+ layout: val.layout
+ }
+ }
+}
+
+impl<'tcx> OpTy<'tcx> {
+ #[inline]
+ pub fn from_ptr(ptr: Pointer, align: Align, layout: TyLayout<'tcx>) -> Self {
+ OpTy { op: Operand::from_ptr(ptr, align), layout }
+ }
+
+ #[inline]
+ pub fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self {
+ OpTy { op: Operand::from_ptr(ptr, layout.align), layout }
+ }
+
+ #[inline]
+ pub fn from_scalar_value(val: Scalar, layout: TyLayout<'tcx>) -> Self {
+ OpTy { op: Operand::Immediate(Value::Scalar(val.into())), layout }
+ }
+}
+
+// Use the existing layout if given (but sanity check in debug mode),
+// or compute the layout.
+#[inline(always)]
+fn from_known_layout<'tcx>(
+ layout: Option<TyLayout<'tcx>>,
+ compute: impl FnOnce() -> EvalResult<'tcx, TyLayout<'tcx>>
+) -> EvalResult<'tcx, TyLayout<'tcx>> {
+ match layout {
+ None => compute(),
+ Some(layout) => {
+ if cfg!(debug_assertions) {
+ let layout2 = compute()?;
+ assert_eq!(layout.details, layout2.details,
+ "Mismatch in layout of supposedly equal-layout types {:?} and {:?}",
+ layout.ty, layout2.ty);
+ }
+ Ok(layout)
+ }
+ }
+}
+
+impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
+ /// Try reading a value in memory; this is interesting particularily for ScalarPair.
+ /// Return None if the layout does not permit loading this as a value.
+ pub(super) fn try_read_value_from_mplace(
+ &self,
+ mplace: MPlaceTy<'tcx>,
+ ) -> EvalResult<'tcx, Option<Value>> {
+ if mplace.extra != PlaceExtra::None {
+ return Ok(None);
+ }
+ let (ptr, ptr_align) = mplace.to_scalar_ptr_align();
+
+ if mplace.layout.size.bytes() == 0 {
+ // Not all ZSTs have a layout we would handle below, so just short-circuit them
+ // all here.
+ self.memory.check_align(ptr, ptr_align)?;
+ return Ok(Some(Value::Scalar(Scalar::zst().into())));
+ }
+
+ let ptr = ptr.to_ptr()?;
+ match mplace.layout.abi {
+ layout::Abi::Scalar(..) => {
+ let scalar = self.memory.read_scalar(ptr, ptr_align, mplace.layout.size)?;
+ Ok(Some(Value::Scalar(scalar)))
+ }
+ layout::Abi::ScalarPair(ref a, ref b) => {
+ let (a, b) = (&a.value, &b.value);
+ let (a_size, b_size) = (a.size(self), b.size(self));
+ let a_ptr = ptr;
+ let b_offset = a_size.abi_align(b.align(self));
+ assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use
+ let b_ptr = ptr.offset(b_offset, self)?.into();
+ let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?;
+ let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?;
+ Ok(Some(Value::ScalarPair(a_val, b_val)))
+ }
+ _ => Ok(None),
+ }
+ }
+
+ /// Try returning an immediate value for the operand.
+ /// If the layout does not permit loading this as a value, return where in memory
+ /// we can find the data.
+ /// Note that for a given layout, this operation will either always fail or always
+ /// succeed! Whether it succeeds depends on whether the layout can be represented
+ /// in a `Value`, not on which data is stored there currently.
+ pub(super) fn try_read_value(
+ &self,
+ src: OpTy<'tcx>,
+ ) -> EvalResult<'tcx, Result<Value, MemPlace>> {
+ Ok(match src.try_as_mplace() {
+ Ok(mplace) => {
+ if let Some(val) = self.try_read_value_from_mplace(mplace)? {
+ Ok(val)
+ } else {
+ Err(*mplace)
+ }
+ },
+ Err(val) => Ok(val),
+ })
+ }
+
+ /// Read a value from a place, asserting that that is possible with the given layout.
+ #[inline(always)]
+ pub fn read_value(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
+ if let Ok(value) = self.try_read_value(op)? {
+ Ok(ValTy { value, layout: op.layout })
+ } else {
+ bug!("primitive read failed for type: {:?}", op.layout.ty);
+ }
+ }
+
+ /// Read a scalar from a place
+ pub fn read_scalar(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ScalarMaybeUndef> {
+ match *self.read_value(op)? {
+ Value::ScalarPair(..) => bug!("got ScalarPair for type: {:?}", op.layout.ty),
+ Value::Scalar(val) => Ok(val),
+ }
+ }
+
+ pub fn uninit_operand(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Operand> {
+ // This decides which types we will use the Immediate optimization for, and hence should
+ // match what `try_read_value` and `eval_place_to_op` support.
+ if layout.is_zst() {
+ return Ok(Operand::Immediate(Value::Scalar(Scalar::zst().into())));
+ }
+
+ Ok(match layout.abi {
+ layout::Abi::Scalar(..) =>
+ Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef)),
+ layout::Abi::ScalarPair(..) =>
+ Operand::Immediate(Value::ScalarPair(
+ ScalarMaybeUndef::Undef,
+ ScalarMaybeUndef::Undef,
+ )),
+ _ => {
+ trace!("Forcing allocation for local of type {:?}", layout.ty);
+ Operand::Indirect(
+ *self.allocate(layout, MemoryKind::Stack)?
+ )
+ }
+ })
+ }
+
+ /// Projection functions
+ pub fn operand_field(
+ &self,
+ op: OpTy<'tcx>,
+ field: u64,
+ ) -> EvalResult<'tcx, OpTy<'tcx>> {
+ let base = match op.try_as_mplace() {
+ Ok(mplace) => {
+ // The easy case
+ let field = self.mplace_field(mplace, field)?;
+ return Ok(field.into());
+ },
+ Err(value) => value
+ };
+
+ let field = field.try_into().unwrap();
+ let field_layout = op.layout.field(self, field)?;
+ if field_layout.size.bytes() == 0 {
+ let val = Value::Scalar(Scalar::zst().into());
+ return Ok(OpTy { op: Operand::Immediate(val), layout: field_layout });
+ }
+ let offset = op.layout.fields.offset(field);
+ let value = match base {
+ // the field covers the entire type
+ _ if offset.bytes() == 0 && field_layout.size == op.layout.size => base,
+ // extract fields from types with `ScalarPair` ABI
+ Value::ScalarPair(a, b) => {
+ let val = if offset.bytes() == 0 { a } else { b };
+ Value::Scalar(val)
+ },
+ Value::Scalar(val) =>
+ bug!("field access on non aggregate {:#?}, {:#?}", val, op.layout),
+ };
+ Ok(OpTy { op: Operand::Immediate(value), layout: field_layout })
+ }
+
+ pub(super) fn operand_downcast(
+ &self,
+ op: OpTy<'tcx>,
+ variant: usize,
+ ) -> EvalResult<'tcx, OpTy<'tcx>> {
+ // Downcasts only change the layout
+ Ok(match op.try_as_mplace() {
+ Ok(mplace) => {
+ self.mplace_downcast(mplace, variant)?.into()
+ },
+ Err(..) => {
+ let layout = op.layout.for_variant(self, variant);
+ OpTy { layout, ..op }
+ }
+ })
+ }
+
+ // Take an operand, representing a pointer, and dereference it -- that
+ // will always be a MemPlace.
+ pub(super) fn deref_operand(
+ &self,
+ src: OpTy<'tcx>,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ let val = self.read_value(src)?;
+ trace!("deref to {} on {:?}", val.layout.ty, val);
+ Ok(self.ref_to_mplace(val)?)
+ }
+
+ pub fn operand_projection(
+ &self,
+ base: OpTy<'tcx>,
+ proj_elem: &mir::PlaceElem<'tcx>,
+ ) -> EvalResult<'tcx, OpTy<'tcx>> {
+ use rustc::mir::ProjectionElem::*;
+ Ok(match *proj_elem {
+ Field(field, _) => self.operand_field(base, field.index() as u64)?,
+ Downcast(_, variant) => self.operand_downcast(base, variant)?,
+ Deref => self.deref_operand(base)?.into(),
+ // The rest should only occur as mplace, we do not use Immediates for types
+ // allowing such operations. This matches place_projection forcing an allocation.
+ Subslice { .. } | ConstantIndex { .. } | Index(_) => {
+ let mplace = base.to_mem_place();
+ self.mplace_projection(mplace, proj_elem)?.into()
+ }
+ })
+ }
+
+ // Evaluate a place with the goal of reading from it. This lets us sometimes
+ // avoid allocations. If you already know the layout, you can pass it in
+ // to avoid looking it up again.
+ fn eval_place_to_op(
+ &mut self,
+ mir_place: &mir::Place<'tcx>,
+ layout: Option<TyLayout<'tcx>>,
+ ) -> EvalResult<'tcx, OpTy<'tcx>> {
+ use rustc::mir::Place::*;
+ Ok(match *mir_place {
+ Local(mir::RETURN_PLACE) => return err!(ReadFromReturnPointer),
+ Local(local) => {
+ let op = *self.frame().locals[local].access()?;
+ let layout = from_known_layout(layout,
+ || self.layout_of_local(self.cur_frame(), local))?;
+ OpTy { op, layout }
+ },
+
+ Projection(ref proj) => {
+ let op = self.eval_place_to_op(&proj.base, None)?;
+ self.operand_projection(op, &proj.elem)?
+ }
+
+ // Everything else is an mplace, so we just call `eval_place`.
+ // Note that getting an mplace for a static aways requires `&mut`,
+ // so this does not "cost" us anything in terms if mutability.
+ Promoted(_) | Static(_) => {
+ let place = self.eval_place(mir_place)?;
+ place.to_mem_place().into()
+ }
+ })
+ }
+
+ /// Evaluate the operand, returning a place where you can then find the data.
+ /// if you already know the layout, you can save two some table lookups
+ /// by passing it in here.
+ pub fn eval_operand(
+ &mut self,
+ mir_op: &mir::Operand<'tcx>,
+ layout: Option<TyLayout<'tcx>>,
+ ) -> EvalResult<'tcx, OpTy<'tcx>> {
+ use rustc::mir::Operand::*;
+ let op = match *mir_op {
+ // FIXME: do some more logic on `move` to invalidate the old location
+ Copy(ref place) |
+ Move(ref place) =>
+ self.eval_place_to_op(place, layout)?,
+
+ Constant(ref constant) => {
+ let layout = from_known_layout(layout, || {
+ let ty = self.monomorphize(mir_op.ty(self.mir(), *self.tcx), self.substs());
+ self.layout_of(ty)
+ })?;
+ let op = self.const_value_to_op(constant.literal.val)?;
+ OpTy { op, layout }
+ }
+ };
+ trace!("{:?}: {:?}", mir_op, *op);
+ Ok(op)
+ }
+
+ /// Evaluate a bunch of operands at once
+ pub(crate) fn eval_operands(
+ &mut self,
+ ops: &[mir::Operand<'tcx>],
+ ) -> EvalResult<'tcx, Vec<OpTy<'tcx>>> {
+ ops.into_iter()
+ .map(|op| self.eval_operand(op, None))
+ .collect()
+ }
+
+ // Also used e.g. when miri runs into a constant.
+ // Unfortunately, this needs an `&mut` to be able to allocate a copy of a `ByRef`
+ // constant. This bleeds up to `eval_operand` needing `&mut`.
+ pub fn const_value_to_op(
+ &mut self,
+ val: ConstValue<'tcx>,
+ ) -> EvalResult<'tcx, Operand> {
+ match val {
+ ConstValue::Unevaluated(def_id, substs) => {
+ let instance = self.resolve(def_id, substs)?;
+ self.global_to_op(GlobalId {
+ instance,
+ promoted: None,
+ })
+ }
+ ConstValue::ByRef(alloc, offset) => {
+ // FIXME: Allocate new AllocId for all constants inside
+ let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?;
+ Ok(Operand::from_ptr(Pointer::new(id, offset), alloc.align))
+ },
+ ConstValue::ScalarPair(a, b) =>
+ Ok(Operand::Immediate(Value::ScalarPair(a.into(), b))),
+ ConstValue::Scalar(x) =>
+ Ok(Operand::Immediate(Value::Scalar(x.into()))),
+ }
+ }
+
+ pub(super) fn global_to_op(&mut self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Operand> {
+ let cv = self.const_eval(gid)?;
+ self.const_value_to_op(cv.val)
+ }
+
+ /// We cannot do self.read_value(self.eval_operand) due to eval_operand taking &mut self,
+ /// so this helps avoid unnecessary let.
+ #[inline]
+ pub fn eval_operand_and_read_value(
+ &mut self,
+ op: &mir::Operand<'tcx>,
+ layout: Option<TyLayout<'tcx>>,
+ ) -> EvalResult<'tcx, ValTy<'tcx>> {
+ let op = self.eval_operand(op, layout)?;
+ self.read_value(op)
+ }
+
+ /// reads a tag and produces the corresponding variant index
+ pub fn read_discriminant_as_variant_index(
+ &self,
+ rval: OpTy<'tcx>,
+ ) -> EvalResult<'tcx, usize> {
+ match rval.layout.variants {
+ layout::Variants::Single { index } => Ok(index),
+ layout::Variants::Tagged { .. } => {
+ let discr_val = self.read_discriminant_value(rval)?;
+ rval.layout.ty
+ .ty_adt_def()
+ .expect("tagged layout for non adt")
+ .discriminants(self.tcx.tcx)
+ .position(|var| var.val == discr_val)
+ .ok_or_else(|| EvalErrorKind::InvalidDiscriminant.into())
+ }
+ layout::Variants::NicheFilling { .. } => {
+ let discr_val = self.read_discriminant_value(rval)?;
+ assert_eq!(discr_val as usize as u128, discr_val);
+ Ok(discr_val as usize)
+ },
+ }
+ }
+
+ pub fn read_discriminant_value(
+ &self,
+ rval: OpTy<'tcx>,
+ ) -> EvalResult<'tcx, u128> {
+ trace!("read_discriminant_value {:#?}", rval.layout);
+ if rval.layout.abi == layout::Abi::Uninhabited {
+ return err!(Unreachable);
+ }
+
+ match rval.layout.variants {
+ layout::Variants::Single { index } => {
+ let discr_val = rval.layout.ty.ty_adt_def().map_or(
+ index as u128,
+ |def| def.discriminant_for_variant(*self.tcx, index).val);
+ return Ok(discr_val);
+ }
+ layout::Variants::Tagged { .. } |
+ layout::Variants::NicheFilling { .. } => {},
+ }
+ let discr_op = self.operand_field(rval, 0)?;
+ let discr_val = self.read_value(discr_op)?;
+ trace!("discr value: {:?}", discr_val);
+ let raw_discr = discr_val.to_scalar()?;
+ Ok(match rval.layout.variants {
+ layout::Variants::Single { .. } => bug!(),
+ // FIXME: We should catch invalid discriminants here!
+ layout::Variants::Tagged { .. } => {
+ if discr_val.layout.ty.is_signed() {
+ let i = raw_discr.to_bits(discr_val.layout.size)? as i128;
+ // going from layout tag type to typeck discriminant type
+ // requires first sign extending with the layout discriminant
+ let shift = 128 - discr_val.layout.size.bits();
+ let sexted = (i << shift) >> shift;
+ // and then zeroing with the typeck discriminant type
+ let discr_ty = rval.layout.ty
+ .ty_adt_def().expect("tagged layout corresponds to adt")
+ .repr
+ .discr_type();
+ let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty);
+ let shift = 128 - discr_ty.size().bits();
+ let truncatee = sexted as u128;
+ (truncatee << shift) >> shift
+ } else {
+ raw_discr.to_bits(discr_val.layout.size)?
+ }
+ },
+ layout::Variants::NicheFilling {
+ dataful_variant,
+ ref niche_variants,
+ niche_start,
+ ..
+ } => {
+ let variants_start = *niche_variants.start() as u128;
+ let variants_end = *niche_variants.end() as u128;
+ match raw_discr {
+ Scalar::Ptr(_) => {
+ assert!(niche_start == 0);
+ assert!(variants_start == variants_end);
+ dataful_variant as u128
+ },
+ Scalar::Bits { bits: raw_discr, size } => {
+ assert_eq!(size as u64, discr_val.layout.size.bytes());
+ let discr = raw_discr.wrapping_sub(niche_start)
+ .wrapping_add(variants_start);
+ if variants_start <= discr && discr <= variants_end {
+ discr
+ } else {
+ dataful_variant as u128
+ }
+ },
+ }
+ }
+ })
+ }
+
+}
use rustc::mir;
-use rustc::ty::{self, Ty, layout};
+use rustc::ty::{self, layout::{self, TyLayout}};
use syntax::ast::FloatTy;
-use rustc::ty::layout::{LayoutOf, TyLayout};
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float;
+use rustc::mir::interpret::{EvalResult, Scalar};
-use super::{EvalContext, Place, Machine, ValTy};
+use super::{EvalContext, PlaceTy, Value, Machine, ValTy};
-use rustc::mir::interpret::{EvalResult, Scalar, Value};
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
- fn binop_with_overflow(
- &self,
- op: mir::BinOp,
- left: ValTy<'tcx>,
- right: ValTy<'tcx>,
- ) -> EvalResult<'tcx, (Scalar, bool)> {
- let left_val = self.value_to_scalar(left)?;
- let right_val = self.value_to_scalar(right)?;
- self.binary_op(op, left_val, left.ty, right_val, right.ty)
- }
-
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
/// and a boolean signifying the potential overflow to the destination.
- pub fn intrinsic_with_overflow(
+ pub fn binop_with_overflow(
&mut self,
op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
- dest: Place,
- dest_ty: Ty<'tcx>,
+ dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx> {
- let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
+ let (val, overflowed) = self.binary_op(op, left, right)?;
let val = Value::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
- let valty = ValTy {
- value: val,
- ty: dest_ty,
- };
- self.write_value(valty, dest)
+ self.write_value(val, dest)
}
/// Applies the binary operation `op` to the arguments and writes the result to the
- /// destination. Returns `true` if the operation overflowed.
- pub fn intrinsic_overflowing(
+ /// destination.
+ pub fn binop_ignore_overflow(
&mut self,
op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
- dest: Place,
- dest_ty: Ty<'tcx>,
- ) -> EvalResult<'tcx, bool> {
- let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
- self.write_scalar(dest, val, dest_ty)?;
- Ok(overflowed)
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ let (val, _overflowed) = self.binary_op(op, left, right)?;
+ self.write_scalar(val, dest)
}
}
pub fn binary_op(
&self,
bin_op: mir::BinOp,
- left: Scalar,
- left_ty: Ty<'tcx>,
- right: Scalar,
- right_ty: Ty<'tcx>,
+ ValTy { value: left, layout: left_layout }: ValTy<'tcx>,
+ ValTy { value: right, layout: right_layout }: ValTy<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)> {
use rustc::mir::BinOp::*;
- let left_layout = self.layout_of(left_ty)?;
- let right_layout = self.layout_of(right_ty)?;
+ let left = left.to_scalar()?;
+ let right = right.to_scalar()?;
let left_kind = match left_layout.abi {
layout::Abi::Scalar(ref scalar) => scalar.value,
- _ => return err!(TypeNotPrimitive(left_ty)),
+ _ => return err!(TypeNotPrimitive(left_layout.ty)),
};
let right_kind = match right_layout.abi {
layout::Abi::Scalar(ref scalar) => scalar.value,
- _ => return err!(TypeNotPrimitive(right_ty)),
+ _ => return err!(TypeNotPrimitive(right_layout.ty)),
};
trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind);
// I: Handle operations that support pointers
if !left_kind.is_float() && !right_kind.is_float() {
- if let Some(handled) = M::try_ptr_op(self, bin_op, left, left_ty, right, right_ty)? {
+ if let Some(handled) =
+ M::try_ptr_op(self, bin_op, left, left_layout, right, right_layout)?
+ {
return Ok(handled);
}
}
}
}
- if let ty::TyFloat(fty) = left_ty.sty {
+ if let ty::Float(fty) = left_layout.ty.sty {
macro_rules! float_math {
($ty:path, $size:expr) => {{
let l = <$ty>::from_bits(l);
}
}
- let size = self.layout_of(left_ty).unwrap().size.bytes() as u8;
+ let size = left_layout.size.bytes() as u8;
// only ints left
let val = match bin_op {
"unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op,
left,
- left_ty,
+ left_layout.ty,
right,
- right_ty,
+ right_layout.ty,
);
return err!(Unimplemented(msg));
}
let result_bytes = match (un_op, &layout.ty.sty) {
- (Not, ty::TyBool) => !val.to_bool()? as u128,
+ (Not, ty::Bool) => !val.to_bool()? as u128,
(Not, _) => !bytes,
- (Neg, ty::TyFloat(FloatTy::F32)) => Single::to_bits(-Single::from_bits(bytes)),
- (Neg, ty::TyFloat(FloatTy::F64)) => Double::to_bits(-Double::from_bits(bytes)),
+ (Neg, ty::Float(FloatTy::F32)) => Single::to_bits(-Single::from_bits(bytes)),
+ (Neg, ty::Float(FloatTy::F64)) => Double::to_bits(-Double::from_bits(bytes)),
(Neg, _) if bytes == (1 << (size.bits() - 1)) => return err!(OverflowNeg),
(Neg, _) => (-(bytes as i128)) as u128,
+//! Computations on places -- field projections, going from mir::Place, and writing
+//! into a place.
+//! All high-level functions to write to memory work on places as destinations.
+
+use std::hash::{Hash, Hasher};
+use std::convert::TryFrom;
+
use rustc::mir;
-use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
+use rustc::ty::{self, Ty};
+use rustc::ty::layout::{self, Size, Align, LayoutOf, TyLayout, HasDataLayout};
use rustc_data_structures::indexed_vec::Idx;
-use rustc::mir::interpret::{GlobalId, Value, Scalar, EvalResult, Pointer, ScalarMaybeUndef};
-use super::{EvalContext, Machine, ValTy};
-use interpret::memory::HasMemory;
+use rustc::mir::interpret::{
+ GlobalId, Scalar, EvalResult, Pointer, ScalarMaybeUndef
+};
+use super::{EvalContext, Machine, Value, ValTy, Operand, OpTy, MemoryKind};
+
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
+pub struct MemPlace {
+ /// A place may have an integral pointer for ZSTs, and since it might
+ /// be turned back into a reference before ever being dereferenced.
+ /// However, it may never be undef.
+ pub ptr: Scalar,
+ pub align: Align,
+ pub extra: PlaceExtra,
+}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Place {
/// A place referring to a value allocated in the `Memory` system.
- Ptr {
- /// A place may have an invalid (integral or undef) pointer,
- /// since it might be turned back into a reference
- /// before ever being dereferenced.
- ptr: ScalarMaybeUndef,
- align: Align,
- extra: PlaceExtra,
- },
+ Ptr(MemPlace),
- /// A place referring to a value on the stack. Represented by a stack frame index paired with
- /// a Mir local index.
- Local { frame: usize, local: mir::Local },
+ /// To support alloc-free locals, we are able to write directly to a local.
+ /// (Without that optimization, we'd just always be a `MemPlace`.)
+ Local {
+ frame: usize,
+ local: mir::Local,
+ },
}
+// Extra information for fat pointers / places
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum PlaceExtra {
None,
Length(u64),
Vtable(Pointer),
- DowncastVariant(usize),
}
-impl<'tcx> Place {
- /// Produces a Place that will error if attempted to be read from
- pub fn undef() -> Self {
- Self::from_scalar_ptr(ScalarMaybeUndef::Undef, Align::from_bytes(1, 1).unwrap())
+#[derive(Copy, Clone, Debug)]
+pub struct PlaceTy<'tcx> {
+ place: Place,
+ pub layout: TyLayout<'tcx>,
+}
+
+impl<'tcx> ::std::ops::Deref for PlaceTy<'tcx> {
+ type Target = Place;
+ #[inline(always)]
+ fn deref(&self) -> &Place {
+ &self.place
+ }
+}
+
+/// A MemPlace with its layout. Constructing it is only possible in this module.
+#[derive(Copy, Clone, Debug)]
+pub struct MPlaceTy<'tcx> {
+ mplace: MemPlace,
+ pub layout: TyLayout<'tcx>,
+}
+
+impl<'tcx> ::std::ops::Deref for MPlaceTy<'tcx> {
+ type Target = MemPlace;
+ #[inline(always)]
+ fn deref(&self) -> &MemPlace {
+ &self.mplace
}
+}
+
+impl<'tcx> From<MPlaceTy<'tcx>> for PlaceTy<'tcx> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx>) -> Self {
+ PlaceTy {
+ place: Place::Ptr(mplace.mplace),
+ layout: mplace.layout
+ }
+ }
+}
- pub fn from_scalar_ptr(ptr: ScalarMaybeUndef, align: Align) -> Self {
- Place::Ptr {
+impl MemPlace {
+ #[inline(always)]
+ pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self {
+ MemPlace {
ptr,
align,
extra: PlaceExtra::None,
}
}
+ #[inline(always)]
pub fn from_ptr(ptr: Pointer, align: Align) -> Self {
- Self::from_scalar_ptr(ScalarMaybeUndef::Scalar(ptr.into()), align)
+ Self::from_scalar_ptr(ptr.into(), align)
+ }
+
+ #[inline(always)]
+ pub fn to_scalar_ptr_align(self) -> (Scalar, Align) {
+ assert_eq!(self.extra, PlaceExtra::None);
+ (self.ptr, self.align)
}
- pub fn to_ptr_align_extra(self) -> (ScalarMaybeUndef, Align, PlaceExtra) {
+ /// Extract the ptr part of the mplace
+ #[inline(always)]
+ pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
+ // At this point, we forget about the alignment information -- the place has been turned into a reference,
+ // and no matter where it came from, it now must be aligned.
+ self.to_scalar_ptr_align().0.to_ptr()
+ }
+
+ /// Turn a mplace into a (thin or fat) pointer, as a reference, pointing to the same space.
+ /// This is the inverse of `ref_to_mplace`.
+ pub fn to_ref(self, cx: impl HasDataLayout) -> Value {
+ // We ignore the alignment of the place here -- special handling for packed structs ends
+ // at the `&` operator.
+ match self.extra {
+ PlaceExtra::None => Value::Scalar(self.ptr.into()),
+ PlaceExtra::Length(len) => Value::new_slice(self.ptr.into(), len, cx),
+ PlaceExtra::Vtable(vtable) => Value::new_dyn_trait(self.ptr.into(), vtable),
+ }
+ }
+}
+
+impl<'tcx> MPlaceTy<'tcx> {
+ #[inline]
+ fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self {
+ MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align), layout }
+ }
+
+ #[inline]
+ pub(super) fn len(self) -> u64 {
+ // Sanity check
+ let ty_len = match self.layout.fields {
+ layout::FieldPlacement::Array { count, .. } => count,
+ _ => bug!("Length for non-array layout {:?} requested", self.layout),
+ };
+ if let PlaceExtra::Length(len) = self.extra {
+ len
+ } else {
+ ty_len
+ }
+ }
+}
+
+// Validation needs to hash MPlaceTy, but we cannot hash Layout -- so we just hash the type
+impl<'tcx> Hash for MPlaceTy<'tcx> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.mplace.hash(state);
+ self.layout.ty.hash(state);
+ }
+}
+impl<'tcx> PartialEq for MPlaceTy<'tcx> {
+ fn eq(&self, other: &Self) -> bool {
+ self.mplace == other.mplace && self.layout.ty == other.layout.ty
+ }
+}
+impl<'tcx> Eq for MPlaceTy<'tcx> {}
+
+impl<'tcx> OpTy<'tcx> {
+ #[inline(always)]
+ pub fn try_as_mplace(self) -> Result<MPlaceTy<'tcx>, Value> {
+ match *self {
+ Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
+ Operand::Immediate(value) => Err(value),
+ }
+ }
+
+ #[inline(always)]
+ pub fn to_mem_place(self) -> MPlaceTy<'tcx> {
+ self.try_as_mplace().unwrap()
+ }
+}
+
+impl<'tcx> Place {
+ /// Produces a Place that will error if attempted to be read from or written to
+ #[inline]
+ pub fn null(cx: impl HasDataLayout) -> Self {
+ Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1, 1).unwrap())
+ }
+
+ #[inline]
+ pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self {
+ Place::Ptr(MemPlace::from_scalar_ptr(ptr, align))
+ }
+
+ #[inline]
+ pub fn from_ptr(ptr: Pointer, align: Align) -> Self {
+ Place::Ptr(MemPlace::from_ptr(ptr, align))
+ }
+
+ #[inline]
+ pub fn to_mem_place(self) -> MemPlace {
match self {
- Place::Ptr { ptr, align, extra } => (ptr, align, extra),
- _ => bug!("to_ptr_and_extra: expected Place::Ptr, got {:?}", self),
+ Place::Ptr(mplace) => mplace,
+ _ => bug!("to_mem_place: expected Place::Ptr, got {:?}", self),
}
}
- pub fn to_ptr_align(self) -> (ScalarMaybeUndef, Align) {
- let (ptr, align, _extra) = self.to_ptr_align_extra();
- (ptr, align)
+ #[inline]
+ pub fn to_scalar_ptr_align(self) -> (Scalar, Align) {
+ self.to_mem_place().to_scalar_ptr_align()
}
+ #[inline]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
- // At this point, we forget about the alignment information -- the place has been turned into a reference,
- // and no matter where it came from, it now must be aligned.
- self.to_ptr_align().0.unwrap_or_err()?.to_ptr()
- }
-
- pub(super) fn elem_ty_and_len(
- self,
- ty: Ty<'tcx>,
- tcx: TyCtxt<'_, 'tcx, '_>
- ) -> (Ty<'tcx>, u64) {
- match ty.sty {
- ty::TyArray(elem, n) => (elem, n.unwrap_usize(tcx)),
-
- ty::TySlice(elem) => {
- match self {
- Place::Ptr { extra: PlaceExtra::Length(len), .. } => (elem, len),
- _ => {
- bug!(
- "elem_ty_and_len of a TySlice given non-slice place: {:?}",
- self
- )
- }
- }
- }
+ self.to_mem_place().to_ptr()
+ }
+}
- _ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty),
- }
+impl<'tcx> PlaceTy<'tcx> {
+ /// Produces a Place that will error if attempted to be read from or written to
+ #[inline]
+ pub fn null(cx: impl HasDataLayout, layout: TyLayout<'tcx>) -> Self {
+ PlaceTy { place: Place::from_scalar_ptr(Scalar::ptr_null(cx), layout.align), layout }
+ }
+
+ #[inline]
+ pub fn to_mem_place(self) -> MPlaceTy<'tcx> {
+ MPlaceTy { mplace: self.place.to_mem_place(), layout: self.layout }
}
}
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
- /// Reads a value from the place without going through the intermediate step of obtaining
- /// a `miri::Place`
- pub fn try_read_place(
+ /// Take a value, which represents a (thin or fat) reference, and make it a place.
+ /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref`.
+ pub fn ref_to_mplace(
+ &self, val: ValTy<'tcx>
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ let pointee_type = val.layout.ty.builtin_deref(true).unwrap().ty;
+ let layout = self.layout_of(pointee_type)?;
+ let mplace = match self.tcx.struct_tail(pointee_type).sty {
+ ty::Dynamic(..) => {
+ let (ptr, vtable) = val.to_scalar_dyn_trait()?;
+ MemPlace {
+ ptr,
+ align: layout.align,
+ extra: PlaceExtra::Vtable(vtable),
+ }
+ }
+ ty::Str | ty::Slice(_) => {
+ let (ptr, len) = val.to_scalar_slice(self)?;
+ MemPlace {
+ ptr,
+ align: layout.align,
+ extra: PlaceExtra::Length(len),
+ }
+ }
+ _ => MemPlace {
+ ptr: val.to_scalar()?,
+ align: layout.align,
+ extra: PlaceExtra::None,
+ },
+ };
+ Ok(MPlaceTy { mplace, layout })
+ }
+
+ /// Offset a pointer to project to a field. Unlike place_field, this is always
+ /// possible without allocating, so it can take &self. Also return the field's layout.
+ /// This supports both struct and array fields.
+ #[inline(always)]
+ pub fn mplace_field(
&self,
- place: &mir::Place<'tcx>,
- ) -> EvalResult<'tcx, Option<Value>> {
- use rustc::mir::Place::*;
- match *place {
- // Might allow this in the future, right now there's no way to do this from Rust code anyway
- Local(mir::RETURN_PLACE) => err!(ReadFromReturnPointer),
- // Directly reading a local will always succeed
- Local(local) => self.frame().locals[local].access().map(Some),
- // No fast path for statics. Reading from statics is rare and would require another
- // Machine function to handle differently in miri.
- Promoted(_) |
- Static(_) => Ok(None),
- Projection(ref proj) => self.try_read_place_projection(proj),
- }
+ base: MPlaceTy<'tcx>,
+ field: u64,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ // Not using the layout method because we want to compute on u64
+ let offset = match base.layout.fields {
+ layout::FieldPlacement::Arbitrary { ref offsets, .. } =>
+ offsets[usize::try_from(field).unwrap()],
+ layout::FieldPlacement::Array { stride, .. } => {
+ let len = base.len();
+ assert!(field < len, "Tried to access element {} of array/slice with length {}", field, len);
+ stride * field
+ }
+ layout::FieldPlacement::Union(count) => {
+ assert!(field < count as u64, "Tried to access field {} of union with {} fields", field, count);
+ // Offset is always 0
+ Size::from_bytes(0)
+ }
+ };
+ // the only way conversion can fail if is this is an array (otherwise we already panicked
+ // above). In that case, all fields are equal.
+ let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?;
+
+ // Adjust offset
+ let offset = match base.extra {
+ PlaceExtra::Vtable(vtable) => {
+ let (_, align) = self.read_size_and_align_from_vtable(vtable)?;
+ // FIXME: Is this right? Should we always do this, or only when actually
+ // accessing the field to which the vtable applies?
+ offset.abi_align(align)
+ }
+ _ => {
+ // No adjustment needed
+ offset
+ }
+ };
+
+ let ptr = base.ptr.ptr_offset(offset, self)?;
+ let align = base.align.min(field_layout.align);
+ let extra = if !field_layout.is_unsized() {
+ PlaceExtra::None
+ } else {
+ assert!(base.extra != PlaceExtra::None, "Expected fat ptr");
+ base.extra
+ };
+
+ Ok(MPlaceTy { mplace: MemPlace { ptr, align, extra }, layout: field_layout })
}
- pub fn read_field(
+ // Iterates over all fields of an array. Much more efficient than doing the
+ // same by repeatedly calling `mplace_array`.
+ pub fn mplace_array_fields(
&self,
- base: Value,
- variant: Option<usize>,
- field: mir::Field,
- mut base_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, (Value, TyLayout<'tcx>)> {
- if let Some(variant_index) = variant {
- base_layout = base_layout.for_variant(self, variant_index);
- }
- let field_index = field.index();
- let field = base_layout.field(self, field_index)?;
- if field.size.bytes() == 0 {
- return Ok((
- Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 })),
- field,
- ));
- }
- let offset = base_layout.fields.offset(field_index);
- let value = match base {
- // the field covers the entire type
- Value::ScalarPair(..) |
- Value::Scalar(_) if offset.bytes() == 0 && field.size == base_layout.size => base,
- // extract fields from types with `ScalarPair` ABI
- Value::ScalarPair(a, b) => {
- let val = if offset.bytes() == 0 { a } else { b };
- Value::Scalar(val)
- },
- Value::ByRef(base_ptr, align) => {
- let offset = base_layout.fields.offset(field_index);
- let ptr = base_ptr.ptr_offset(offset, self)?;
- let align = align.min(base_layout.align).min(field.align);
- assert!(!field.is_unsized());
- Value::ByRef(ptr, align)
- },
- Value::Scalar(val) => bug!("field access on non aggregate {:#?}, {:#?}", val, base_layout),
+ base: MPlaceTy<'tcx>,
+ ) -> EvalResult<'tcx, impl Iterator<Item=EvalResult<'tcx, MPlaceTy<'tcx>>> + 'a> {
+ let len = base.len();
+ let stride = match base.layout.fields {
+ layout::FieldPlacement::Array { stride, .. } => stride,
+ _ => bug!("mplace_array_fields: expected an array layout"),
};
- Ok((value, field))
+ let layout = base.layout.field(self, 0)?;
+ let dl = &self.tcx.data_layout;
+ Ok((0..len).map(move |i| {
+ let ptr = base.ptr.ptr_offset(i * stride, dl)?;
+ Ok(MPlaceTy {
+ mplace: MemPlace { ptr, align: base.align, extra: PlaceExtra::None },
+ layout
+ })
+ }))
}
- fn try_read_place_projection(
+ pub fn mplace_subslice(
&self,
- proj: &mir::PlaceProjection<'tcx>,
- ) -> EvalResult<'tcx, Option<Value>> {
- use rustc::mir::ProjectionElem::*;
- let base = match self.try_read_place(&proj.base)? {
- Some(base) => base,
- None => return Ok(None),
+ base: MPlaceTy<'tcx>,
+ from: u64,
+ to: u64,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ let len = base.len();
+ assert!(from <= len - to);
+
+ // Not using layout method because that works with usize, and does not work with slices
+ // (that have count 0 in their layout).
+ let from_offset = match base.layout.fields {
+ layout::FieldPlacement::Array { stride, .. } =>
+ stride * from,
+ _ => bug!("Unexpected layout of index access: {:#?}", base.layout),
};
- let base_ty = self.place_ty(&proj.base);
- let base_layout = self.layout_of(base_ty)?;
- match proj.elem {
- Field(field, _) => Ok(Some(self.read_field(base, None, field, base_layout)?.0)),
- // The NullablePointer cases should work fine, need to take care for normal enums
- Downcast(..) |
- Subslice { .. } |
- // reading index 0 or index 1 from a ByVal or ByVal pair could be optimized
- ConstantIndex { .. } | Index(_) |
- // No way to optimize this projection any better than the normal place path
- Deref => Ok(None),
- }
+ let ptr = base.ptr.ptr_offset(from_offset, self)?;
+
+ // Compute extra and new layout
+ let inner_len = len - to - from;
+ let (extra, ty) = match base.layout.ty.sty {
+ ty::Array(inner, _) =>
+ (PlaceExtra::None, self.tcx.mk_array(inner, inner_len)),
+ ty::Slice(..) =>
+ (PlaceExtra::Length(inner_len), base.layout.ty),
+ _ =>
+ bug!("cannot subslice non-array type: `{:?}`", base.layout.ty),
+ };
+ let layout = self.layout_of(ty)?;
+
+ Ok(MPlaceTy {
+ mplace: MemPlace { ptr, align: base.align, extra },
+ layout
+ })
+ }
+
+ pub fn mplace_downcast(
+ &self,
+ base: MPlaceTy<'tcx>,
+ variant: usize,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ // Downcasts only change the layout
+ assert_eq!(base.extra, PlaceExtra::None);
+ Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base })
+ }
+
+ /// Project into an mplace
+ pub fn mplace_projection(
+ &self,
+ base: MPlaceTy<'tcx>,
+ proj_elem: &mir::PlaceElem<'tcx>,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ use rustc::mir::ProjectionElem::*;
+ Ok(match *proj_elem {
+ Field(field, _) => self.mplace_field(base, field.index() as u64)?,
+ Downcast(_, variant) => self.mplace_downcast(base, variant)?,
+ Deref => self.deref_operand(base.into())?,
+
+ Index(local) => {
+ let n = *self.frame().locals[local].access()?;
+ let n_layout = self.layout_of(self.tcx.types.usize)?;
+ let n = self.read_scalar(OpTy { op: n, layout: n_layout })?;
+ let n = n.to_bits(self.tcx.data_layout.pointer_size)?;
+ self.mplace_field(base, u64::try_from(n).unwrap())?
+ }
+
+ ConstantIndex {
+ offset,
+ min_length,
+ from_end,
+ } => {
+ let n = base.len();
+ assert!(n >= min_length as u64);
+
+ let index = if from_end {
+ n - u64::from(offset)
+ } else {
+ u64::from(offset)
+ };
+
+ self.mplace_field(base, index)?
+ }
+
+ Subslice { from, to } =>
+ self.mplace_subslice(base, u64::from(from), u64::from(to))?,
+ })
}
- /// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses.
- pub(super) fn eval_and_read_place(
+ /// Get the place of a field inside the place, and also the field's type.
+ /// Just a convenience function, but used quite a bit.
+ pub fn place_field(
&mut self,
- place: &mir::Place<'tcx>,
- ) -> EvalResult<'tcx, Value> {
- // Shortcut for things like accessing a fat pointer's field,
- // which would otherwise (in the `eval_place` path) require moving a `ScalarPair` to memory
- // and returning an `Place::Ptr` to it
- if let Some(val) = self.try_read_place(place)? {
- return Ok(val);
- }
- let place = self.eval_place(place)?;
- self.read_place(place)
+ base: PlaceTy<'tcx>,
+ field: u64,
+ ) -> EvalResult<'tcx, PlaceTy<'tcx>> {
+ // FIXME: We could try to be smarter and avoid allocation for fields that span the
+ // entire place.
+ let mplace = self.force_allocation(base)?;
+ Ok(self.mplace_field(mplace, field)?.into())
}
- pub fn read_place(&self, place: Place) -> EvalResult<'tcx, Value> {
- match place {
- Place::Ptr { ptr, align, extra } => {
- assert_eq!(extra, PlaceExtra::None);
- Ok(Value::ByRef(ptr.unwrap_or_err()?, align))
+ pub fn place_downcast(
+ &mut self,
+ base: PlaceTy<'tcx>,
+ variant: usize,
+ ) -> EvalResult<'tcx, PlaceTy<'tcx>> {
+ // Downcast just changes the layout
+ Ok(match base.place {
+ Place::Ptr(mplace) =>
+ self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into(),
+ Place::Local { .. } => {
+ let layout = base.layout.for_variant(&self, variant);
+ PlaceTy { layout, ..base }
}
- Place::Local { frame, local } => self.stack[frame].locals[local].access(),
- }
+ })
}
- pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, Place> {
+ /// Project into a place
+ pub fn place_projection(
+ &mut self,
+ base: PlaceTy<'tcx>,
+ proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>,
+ ) -> EvalResult<'tcx, PlaceTy<'tcx>> {
+ use rustc::mir::ProjectionElem::*;
+ Ok(match *proj_elem {
+ Field(field, _) => self.place_field(base, field.index() as u64)?,
+ Downcast(_, variant) => self.place_downcast(base, variant)?,
+ Deref => self.deref_operand(self.place_to_op(base)?)?.into(),
+ // For the other variants, we have to force an allocation.
+ // This matches `operand_projection`.
+ Subslice { .. } | ConstantIndex { .. } | Index(_) => {
+ let mplace = self.force_allocation(base)?;
+ self.mplace_projection(mplace, proj_elem)?.into()
+ }
+ })
+ }
+
+ /// Compute a place. You should only use this if you intend to write into this
+ /// place; for reading, a more efficient alternative is `eval_place_for_read`.
+ pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, PlaceTy<'tcx>> {
use rustc::mir::Place::*;
let place = match *mir_place {
- Local(mir::RETURN_PLACE) => self.frame().return_place,
- Local(local) => Place::Local {
- frame: self.cur_frame(),
- local,
+ Local(mir::RETURN_PLACE) => PlaceTy {
+ place: self.frame().return_place,
+ layout: self.layout_of_local(self.cur_frame(), mir::RETURN_PLACE)?,
+ },
+ Local(local) => PlaceTy {
+ place: Place::Local {
+ frame: self.cur_frame(),
+ local,
+ },
+ layout: self.layout_of_local(self.cur_frame(), local)?,
},
Promoted(ref promoted) => {
let instance = self.frame().instance;
- let val = self.read_global_as_value(GlobalId {
+ let op = self.global_to_op(GlobalId {
instance,
promoted: Some(promoted.0),
})?;
- if let Value::ByRef(ptr, align) = val {
- Place::Ptr {
- ptr: ptr.into(),
- align,
- extra: PlaceExtra::None,
- }
- } else {
- bug!("evaluated promoted and got {:#?}", val);
+ let mplace = op.to_mem_place();
+ let ty = self.monomorphize(promoted.1, self.substs());
+ PlaceTy {
+ place: Place::Ptr(mplace),
+ layout: self.layout_of(ty)?,
}
}
Static(ref static_) => {
- let layout = self.layout_of(self.place_ty(mir_place))?;
+ let ty = self.monomorphize(static_.ty, self.substs());
+ let layout = self.layout_of(ty)?;
let instance = ty::Instance::mono(*self.tcx, static_.def_id);
let cid = GlobalId {
instance,
promoted: None
};
let alloc = Machine::init_static(self, cid)?;
- Place::Ptr {
- ptr: ScalarMaybeUndef::Scalar(Scalar::Ptr(alloc.into())),
- align: layout.align,
- extra: PlaceExtra::None,
- }
+ MPlaceTy::from_aligned_ptr(alloc.into(), layout).into()
}
Projection(ref proj) => {
- let ty = self.place_ty(&proj.base);
let place = self.eval_place(&proj.base)?;
- return self.eval_place_projection(place, ty, &proj.elem);
+ self.place_projection(place, &proj.elem)?
}
};
- self.dump_local(place);
+ self.dump_place(place.place);
Ok(place)
}
- pub fn place_field(
+ /// Write a scalar to a place
+ pub fn write_scalar(
&mut self,
- base: Place,
- field: mir::Field,
- mut base_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, (Place, TyLayout<'tcx>)> {
- match base {
- Place::Ptr { extra: PlaceExtra::DowncastVariant(variant_index), .. } => {
- base_layout = base_layout.for_variant(&self, variant_index);
- }
- _ => {}
- }
- let field_index = field.index();
- let field = base_layout.field(&self, field_index)?;
- let offset = base_layout.fields.offset(field_index);
+ val: impl Into<ScalarMaybeUndef>,
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ self.write_value(Value::Scalar(val.into()), dest)
+ }
- // Do not allocate in trivial cases
- let (base_ptr, base_align, base_extra) = match base {
- Place::Ptr { ptr, align, extra } => (ptr, align, extra),
+ /// Write a value to a place
+ pub fn write_value(
+ &mut self,
+ src_val: Value,
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ trace!("write_value: {:?} <- {:?}", *dest, src_val);
+ // See if we can avoid an allocation. This is the counterpart to `try_read_value`,
+ // but not factored as a separate function.
+ let mplace = match dest.place {
Place::Local { frame, local } => {
- match (self.stack[frame].locals[local].access()?, &base_layout.abi) {
- // in case the field covers the entire type, just return the value
- (Value::Scalar(_), &layout::Abi::Scalar(_)) |
- (Value::ScalarPair(..), &layout::Abi::ScalarPair(..))
- if offset.bytes() == 0 && field.size == base_layout.size => {
- return Ok((base, field))
+ match *self.stack[frame].locals[local].access_mut()? {
+ Operand::Immediate(ref mut dest_val) => {
+ // Yay, we can just change the local directly.
+ *dest_val = src_val;
+ return Ok(());
},
- _ => self.force_allocation(base)?.to_ptr_align_extra(),
+ Operand::Indirect(mplace) => mplace, // already in memory
}
- }
+ },
+ Place::Ptr(mplace) => mplace, // already in memory
};
- let offset = match base_extra {
- PlaceExtra::Vtable(tab) => {
- let (_, align) = self.size_and_align_of_dst(
- base_layout.ty,
- base_ptr.to_value_with_vtable(tab),
- )?;
- offset.abi_align(align)
+ // This is already in memory, write there.
+ let dest = MPlaceTy { mplace, layout: dest.layout };
+ self.write_value_to_mplace(src_val, dest)
+ }
+
+ /// Write a value to memory
+ fn write_value_to_mplace(
+ &mut self,
+ value: Value,
+ dest: MPlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ let (ptr, ptr_align) = dest.to_scalar_ptr_align();
+ // Note that it is really important that the type here is the right one, and matches the type things are read at.
+ // In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only
+ // correct if we never look at this data with the wrong type.
+
+ // Nothing to do for ZSTs, other than checking alignment
+ if dest.layout.size.bytes() == 0 {
+ self.memory.check_align(ptr, ptr_align)?;
+ return Ok(());
+ }
+
+ let ptr = ptr.to_ptr()?;
+ match value {
+ Value::Scalar(scalar) => {
+ self.memory.write_scalar(
+ ptr, ptr_align.min(dest.layout.align), scalar, dest.layout.size
+ )
}
- _ => offset,
- };
+ Value::ScalarPair(a_val, b_val) => {
+ let (a, b) = match dest.layout.abi {
+ layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
+ _ => bug!("write_value_to_mplace: invalid ScalarPair layout: {:#?}", dest.layout)
+ };
+ let (a_size, b_size) = (a.size(&self), b.size(&self));
+ let (a_align, b_align) = (a.align(&self), b.align(&self));
+ let b_offset = a_size.abi_align(b_align);
+ let b_ptr = ptr.offset(b_offset, &self)?.into();
- let ptr = base_ptr.ptr_offset(offset, &self)?;
- let align = base_align.min(base_layout.align).min(field.align);
- let extra = if !field.is_unsized() {
- PlaceExtra::None
- } else {
- match base_extra {
- PlaceExtra::None => bug!("expected fat pointer"),
- PlaceExtra::DowncastVariant(..) => {
- bug!("Rust doesn't support unsized fields in enum variants")
- }
- PlaceExtra::Vtable(_) |
- PlaceExtra::Length(_) => {}
+ self.memory.write_scalar(ptr, ptr_align.min(a_align), a_val, a_size)?;
+ self.memory.write_scalar(b_ptr, ptr_align.min(b_align), b_val, b_size)
}
- base_extra
- };
+ }
+ }
- Ok((Place::Ptr { ptr, align, extra }, field))
+ /// Copy the data from an operand to a place
+ pub fn copy_op(
+ &mut self,
+ src: OpTy<'tcx>,
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ assert_eq!(src.layout.size, dest.layout.size,
+ "Size mismatch when copying!\nsrc: {:#?}\ndest: {:#?}", src, dest);
+
+ // Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
+ let (src_ptr, src_align) = match self.try_read_value(src)? {
+ Ok(src_val) =>
+ // Yay, we got a value that we can write directly. We write with the
+ // *source layout*, because that was used to load, and if they do not match
+ // this is a transmute we want to support.
+ return self.write_value(src_val, PlaceTy { place: *dest, layout: src.layout }),
+ Err(mplace) => mplace.to_scalar_ptr_align(),
+ };
+ // Slow path, this does not fit into an immediate. Just memcpy.
+ trace!("copy_op: {:?} <- {:?}", *dest, *src);
+ let (dest_ptr, dest_align) = self.force_allocation(dest)?.to_scalar_ptr_align();
+ self.memory.copy(
+ src_ptr, src_align,
+ dest_ptr, dest_align,
+ src.layout.size, false
+ )
}
- pub fn val_to_place(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Place> {
- let layout = self.layout_of(ty)?;
- Ok(match self.tcx.struct_tail(ty).sty {
- ty::TyDynamic(..) => {
- let (ptr, vtable) = self.into_ptr_vtable_pair(val)?;
- Place::Ptr {
- ptr,
- align: layout.align,
- extra: PlaceExtra::Vtable(vtable),
- }
- }
- ty::TyStr | ty::TySlice(_) => {
- let (ptr, len) = self.into_slice(val)?;
- Place::Ptr {
- ptr,
- align: layout.align,
- extra: PlaceExtra::Length(len),
- }
+ /// Make sure that a place is in memory, and return where it is.
+ /// This is essentially `force_to_memplace`.
+ pub fn force_allocation(
+ &mut self,
+ place: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ let mplace = match place.place {
+ Place::Local { frame, local } => {
+ // FIXME: Consider not doing anything for a ZST, and just returning
+ // a fake pointer?
+
+ // We need the layout of the local. We can NOT use the layout we got,
+ // that might e.g. be a downcast variant!
+ let local_layout = self.layout_of_local(frame, local)?;
+ // Make sure it has a place
+ let rval = *self.stack[frame].locals[local].access()?;
+ let mplace = self.allocate_op(OpTy { op: rval, layout: local_layout })?.mplace;
+ // This might have allocated the flag
+ *self.stack[frame].locals[local].access_mut()? =
+ Operand::Indirect(mplace);
+ // done
+ mplace
}
- _ => Place::from_scalar_ptr(self.into_ptr(val)?, layout.align),
- })
+ Place::Ptr(mplace) => mplace
+ };
+ // Return with the original layout, so that the caller can go on
+ Ok(MPlaceTy { mplace, layout: place.layout })
}
- pub fn place_index(
+ pub fn allocate(
&mut self,
- base: Place,
- outer_ty: Ty<'tcx>,
- n: u64,
- ) -> EvalResult<'tcx, Place> {
- // Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
- let base = self.force_allocation(base)?;
- let (base_ptr, align) = base.to_ptr_align();
-
- let (elem_ty, len) = base.elem_ty_and_len(outer_ty, self.tcx.tcx);
- let elem_size = self.layout_of(elem_ty)?.size;
- assert!(
- n < len,
- "Tried to access element {} of array/slice with length {}",
- n,
- len
- );
- let ptr = base_ptr.ptr_offset(elem_size * n, &*self)?;
- Ok(Place::Ptr {
- ptr,
- align,
- extra: PlaceExtra::None,
- })
+ layout: TyLayout<'tcx>,
+ kind: MemoryKind<M::MemoryKinds>,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ assert!(!layout.is_unsized(), "cannot alloc memory for unsized type");
+ let ptr = self.memory.allocate(layout.size, layout.align, kind)?;
+ Ok(MPlaceTy::from_aligned_ptr(ptr, layout))
}
- pub(super) fn place_downcast(
+ /// Make a place for an operand, allocating if needed
+ pub fn allocate_op(
&mut self,
- base: Place,
- variant: usize,
- ) -> EvalResult<'tcx, Place> {
- // FIXME(solson)
- let base = self.force_allocation(base)?;
- let (ptr, align) = base.to_ptr_align();
- let extra = PlaceExtra::DowncastVariant(variant);
- Ok(Place::Ptr { ptr, align, extra })
+ OpTy { op, layout }: OpTy<'tcx>,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ Ok(match op {
+ Operand::Indirect(mplace) => MPlaceTy { mplace, layout },
+ Operand::Immediate(value) => {
+ // FIXME: Is stack always right here?
+ let ptr = self.allocate(layout, MemoryKind::Stack)?;
+ self.write_value_to_mplace(value, ptr)?;
+ ptr
+ },
+ })
}
- pub fn eval_place_projection(
+ pub fn write_discriminant_value(
&mut self,
- base: Place,
- base_ty: Ty<'tcx>,
- proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>,
- ) -> EvalResult<'tcx, Place> {
- use rustc::mir::ProjectionElem::*;
- match *proj_elem {
- Field(field, _) => {
- let layout = self.layout_of(base_ty)?;
- Ok(self.place_field(base, field, layout)?.0)
- }
-
- Downcast(_, variant) => {
- self.place_downcast(base, variant)
+ variant_index: usize,
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ match dest.layout.variants {
+ layout::Variants::Single { index } => {
+ if index != variant_index {
+ // If the layout of an enum is `Single`, all
+ // other variants are necessarily uninhabited.
+ assert_eq!(dest.layout.for_variant(&self, variant_index).abi,
+ layout::Abi::Uninhabited);
+ }
}
-
- Deref => {
- let val = self.read_place(base)?;
-
- let pointee_type = match base_ty.sty {
- ty::TyRawPtr(ref tam) => tam.ty,
- ty::TyRef(_, ty, _) => ty,
- ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(),
- _ => bug!("can only deref pointer types"),
- };
-
- trace!("deref to {} on {:?}", pointee_type, val);
-
- self.val_to_place(val, pointee_type)
+ layout::Variants::Tagged { ref tag, .. } => {
+ let discr_val = dest.layout.ty.ty_adt_def().unwrap()
+ .discriminant_for_variant(*self.tcx, variant_index)
+ .val;
+
+ // raw discriminants for enums are isize or bigger during
+ // their computation, but the in-memory tag is the smallest possible
+ // representation
+ let size = tag.value.size(self.tcx.tcx);
+ let shift = 128 - size.bits();
+ let discr_val = (discr_val << shift) >> shift;
+
+ let discr_dest = self.place_field(dest, 0)?;
+ self.write_scalar(Scalar::Bits {
+ bits: discr_val,
+ size: size.bytes() as u8,
+ }, discr_dest)?;
}
-
- Index(local) => {
- let value = self.frame().locals[local].access()?;
- let ty = self.tcx.types.usize;
- let n = self
- .value_to_scalar(ValTy { value, ty })?
- .to_bits(self.tcx.data_layout.pointer_size)?;
- self.place_index(base, base_ty, n as u64)
- }
-
- ConstantIndex {
- offset,
- min_length,
- from_end,
+ layout::Variants::NicheFilling {
+ dataful_variant,
+ ref niche_variants,
+ niche_start,
+ ..
} => {
- // FIXME(solson)
- let base = self.force_allocation(base)?;
- let (base_ptr, align) = base.to_ptr_align();
-
- let (elem_ty, n) = base.elem_ty_and_len(base_ty, self.tcx.tcx);
- let elem_size = self.layout_of(elem_ty)?.size;
- assert!(n >= min_length as u64);
-
- let index = if from_end {
- n - u64::from(offset)
- } else {
- u64::from(offset)
- };
-
- let ptr = base_ptr.ptr_offset(elem_size * index, &self)?;
- Ok(Place::Ptr { ptr, align, extra: PlaceExtra::None })
+ if variant_index != dataful_variant {
+ let niche_dest =
+ self.place_field(dest, 0)?;
+ let niche_value = ((variant_index - niche_variants.start()) as u128)
+ .wrapping_add(niche_start);
+ self.write_scalar(Scalar::Bits {
+ bits: niche_value,
+ size: niche_dest.layout.size.bytes() as u8,
+ }, niche_dest)?;
+ }
}
+ }
- Subslice { from, to } => {
- // FIXME(solson)
- let base = self.force_allocation(base)?;
- let (base_ptr, align) = base.to_ptr_align();
-
- let (elem_ty, n) = base.elem_ty_and_len(base_ty, self.tcx.tcx);
- let elem_size = self.layout_of(elem_ty)?.size;
- assert!(u64::from(from) <= n - u64::from(to));
- let ptr = base_ptr.ptr_offset(elem_size * u64::from(from), &self)?;
- // sublicing arrays produces arrays
- let extra = if self.type_is_sized(base_ty) {
- PlaceExtra::None
- } else {
- PlaceExtra::Length(n - u64::from(to) - u64::from(from))
- };
- Ok(Place::Ptr { ptr, align, extra })
+ Ok(())
+ }
+
+ /// Every place can be read from, so we can turm them into an operand
+ #[inline(always)]
+ pub fn place_to_op(&self, place: PlaceTy<'tcx>) -> EvalResult<'tcx, OpTy<'tcx>> {
+ let op = match place.place {
+ Place::Ptr(mplace) => {
+ Operand::Indirect(mplace)
}
- }
+ Place::Local { frame, local } =>
+ *self.stack[frame].locals[local].access()?
+ };
+ Ok(OpTy { op, layout: place.layout })
}
- pub fn place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
- self.monomorphize(
- place.ty(self.mir(), *self.tcx).to_ty(*self.tcx),
- self.substs(),
- )
+ /// Turn a place that is a dyn trait (i.e., PlaceExtra::Vtable and the appropriate layout)
+ /// or a slice into the specific fixed-size place and layout that is given by the vtable/len.
+ /// This "unpacks" the existential quantifier, so to speak.
+ pub fn unpack_unsized_mplace(&self, mplace: MPlaceTy<'tcx>) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ trace!("Unpacking {:?} ({:?})", *mplace, mplace.layout.ty);
+ let layout = match mplace.extra {
+ PlaceExtra::Vtable(vtable) => {
+ // the drop function signature
+ let drop_instance = self.read_drop_type_from_vtable(vtable)?;
+ trace!("Found drop fn: {:?}", drop_instance);
+ let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx);
+ let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig);
+ // the drop function takes *mut T where T is the type being dropped, so get that
+ let ty = fn_sig.inputs()[0].builtin_deref(true).unwrap().ty;
+ let layout = self.layout_of(ty)?;
+ // Sanity checks
+ let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
+ assert_eq!(size, layout.size);
+ assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved
+ // FIXME: More checks for the vtable? We could make sure it is exactly
+ // the one one would expect for this type.
+ // Done!
+ layout
+ },
+ PlaceExtra::Length(len) => {
+ let ty = self.tcx.mk_array(mplace.layout.field(self, 0)?.ty, len);
+ self.layout_of(ty)?
+ }
+ PlaceExtra::None => bug!("Expected a fat pointer"),
+ };
+ trace!("Unpacked type: {:?}", layout.ty);
+ Ok(MPlaceTy {
+ mplace: MemPlace { extra: PlaceExtra::None, ..*mplace },
+ layout
+ })
}
}
//! The main entry point is the `step` method.
use rustc::mir;
+use rustc::ty::layout::LayoutOf;
+use rustc::mir::interpret::{EvalResult, Scalar};
-use rustc::mir::interpret::EvalResult;
use super::{EvalContext, Machine};
+/// Classify whether an operator is "left-homogeneous", i.e. the LHS has the
+/// same type as the result.
+#[inline]
+fn binop_left_homogeneous(op: mir::BinOp) -> bool {
+ use rustc::mir::BinOp::*;
+ match op {
+ Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr |
+ Offset | Shl | Shr =>
+ true,
+ Eq | Ne | Lt | Le | Gt | Ge =>
+ false,
+ }
+}
+/// Classify whether an operator is "right-homogeneous", i.e. the RHS has the
+/// same type as the LHS.
+#[inline]
+fn binop_right_homogeneous(op: mir::BinOp) -> bool {
+ use rustc::mir::BinOp::*;
+ match op {
+ Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr |
+ Eq | Ne | Lt | Le | Gt | Ge =>
+ true,
+ Offset | Shl | Shr =>
+ false,
+ }
+}
+
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
pub fn inc_step_counter_and_detect_loops(&mut self) -> EvalResult<'tcx, ()> {
/// The number of steps between loop detector snapshots.
}
fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
- trace!("{:?}", stmt);
+ debug!("{:?}", stmt);
use rustc::mir::StatementKind::*;
variant_index,
} => {
let dest = self.eval_place(place)?;
- let dest_ty = self.place_ty(place);
- self.write_discriminant_value(dest_ty, dest, variant_index)?;
+ self.write_discriminant_value(variant_index, dest)?;
}
// Mark locals as alive
// Mark locals as dead
StorageDead(local) => {
- let old_val = self.frame_mut().storage_dead(local);
+ let old_val = self.storage_dead(local);
self.deallocate_local(old_val)?;
}
Ok(())
}
+ /// Evaluate an assignment statement.
+ ///
+ /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
+ /// type writes its results directly into the memory specified by the place.
+ fn eval_rvalue_into_place(
+ &mut self,
+ rvalue: &mir::Rvalue<'tcx>,
+ place: &mir::Place<'tcx>,
+ ) -> EvalResult<'tcx> {
+ let dest = self.eval_place(place)?;
+
+ use rustc::mir::Rvalue::*;
+ match *rvalue {
+ Use(ref operand) => {
+ // Avoid recomputing the layout
+ let op = self.eval_operand(operand, Some(dest.layout))?;
+ self.copy_op(op, dest)?;
+ }
+
+ BinaryOp(bin_op, ref left, ref right) => {
+ let layout = if binop_left_homogeneous(bin_op) { Some(dest.layout) } else { None };
+ let left = self.eval_operand_and_read_value(left, layout)?;
+ let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None };
+ let right = self.eval_operand_and_read_value(right, layout)?;
+ self.binop_ignore_overflow(
+ bin_op,
+ left,
+ right,
+ dest,
+ )?;
+ }
+
+ CheckedBinaryOp(bin_op, ref left, ref right) => {
+ // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
+ let left = self.eval_operand_and_read_value(left, None)?;
+ let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None };
+ let right = self.eval_operand_and_read_value(right, layout)?;
+ self.binop_with_overflow(
+ bin_op,
+ left,
+ right,
+ dest,
+ )?;
+ }
+
+ UnaryOp(un_op, ref operand) => {
+ // The operand always has the same type as the result.
+ let val = self.eval_operand_and_read_value(operand, Some(dest.layout))?;
+ let val = self.unary_op(un_op, val.to_scalar()?, dest.layout)?;
+ self.write_scalar(val, dest)?;
+ }
+
+ Aggregate(ref kind, ref operands) => {
+ let (dest, active_field_index) = match **kind {
+ mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
+ self.write_discriminant_value(variant_index, dest)?;
+ if adt_def.is_enum() {
+ (self.place_downcast(dest, variant_index)?, active_field_index)
+ } else {
+ (dest, active_field_index)
+ }
+ }
+ _ => (dest, None)
+ };
+
+ for (i, operand) in operands.iter().enumerate() {
+ let op = self.eval_operand(operand, None)?;
+ // Ignore zero-sized fields.
+ if !op.layout.is_zst() {
+ let field_index = active_field_index.unwrap_or(i);
+ let field_dest = self.place_field(dest, field_index as u64)?;
+ self.copy_op(op, field_dest)?;
+ }
+ }
+ }
+
+ Repeat(ref operand, _) => {
+ let op = self.eval_operand(operand, None)?;
+ let dest = self.force_allocation(dest)?;
+ let length = dest.len();
+
+ if length > 0 {
+ // write the first
+ let first = self.mplace_field(dest, 0)?;
+ self.copy_op(op, first.into())?;
+
+ if length > 1 {
+ // copy the rest
+ let (dest, dest_align) = first.to_scalar_ptr_align();
+ let rest = dest.ptr_offset(first.layout.size, &self)?;
+ self.memory.copy_repeatedly(
+ dest, dest_align, rest, dest_align, first.layout.size, length - 1, true
+ )?;
+ }
+ }
+ }
+
+ Len(ref place) => {
+ // FIXME(CTFE): don't allow computing the length of arrays in const eval
+ let src = self.eval_place(place)?;
+ let mplace = self.force_allocation(src)?;
+ let len = mplace.len();
+ let size = self.memory.pointer_size().bytes() as u8;
+ self.write_scalar(
+ Scalar::Bits {
+ bits: len as u128,
+ size,
+ },
+ dest,
+ )?;
+ }
+
+ Ref(_, _, ref place) => {
+ let src = self.eval_place(place)?;
+ let val = self.force_allocation(src)?.to_ref(&self);
+ self.write_value(val, dest)?;
+ }
+
+ NullaryOp(mir::NullOp::Box, _) => {
+ M::box_alloc(self, dest)?;
+ }
+
+ NullaryOp(mir::NullOp::SizeOf, ty) => {
+ let ty = self.monomorphize(ty, self.substs());
+ let layout = self.layout_of(ty)?;
+ assert!(!layout.is_unsized(),
+ "SizeOf nullary MIR operator called for unsized type");
+ let size = self.memory.pointer_size().bytes() as u8;
+ self.write_scalar(
+ Scalar::Bits {
+ bits: layout.size.bytes() as u128,
+ size,
+ },
+ dest,
+ )?;
+ }
+
+ Cast(kind, ref operand, cast_ty) => {
+ debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest.layout.ty);
+ let src = self.eval_operand(operand, None)?;
+ self.cast(src, kind, dest)?;
+ }
+
+ Discriminant(ref place) => {
+ let place = self.eval_place(place)?;
+ let discr_val = self.read_discriminant_value(self.place_to_op(place)?)?;
+ let size = dest.layout.size.bytes() as u8;
+ self.write_scalar(Scalar::Bits {
+ bits: discr_val,
+ size,
+ }, dest)?;
+ }
+ }
+
+ self.dump_place(*dest);
+
+ Ok(())
+ }
+
fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> {
- trace!("{:?}", terminator.kind);
+ debug!("{:?}", terminator.kind);
self.tcx.span = terminator.source_info.span;
self.memory.tcx.span = terminator.source_info.span;
self.eval_terminator(terminator)?;
if !self.stack.is_empty() {
- trace!("// {:?}", self.frame().block);
+ debug!("// {:?}", self.frame().block);
}
Ok(())
}
use rustc::mir::BasicBlock;
-use rustc::ty::{self, Ty};
+use rustc::ty::{self, layout::LayoutOf};
use syntax::source_map::Span;
-use rustc::mir::interpret::{EvalResult, Value};
-use interpret::{Machine, ValTy, EvalContext, Place, PlaceExtra};
+use rustc::mir::interpret::EvalResult;
+use interpret::{Machine, EvalContext, PlaceTy, PlaceExtra, OpTy, Operand};
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
- pub(crate) fn drop_place(
+ pub(crate) fn drop_in_place(
&mut self,
- place: Place,
+ place: PlaceTy<'tcx>,
instance: ty::Instance<'tcx>,
- ty: Ty<'tcx>,
span: Span,
target: BasicBlock,
) -> EvalResult<'tcx> {
- trace!("drop_place: {:#?}", place);
+ trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
// We take the address of the object. This may well be unaligned, which is fine for us here.
// However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared
// by rustc.
- let val = match self.force_allocation(place)? {
- Place::Ptr {
- ptr,
- align: _,
- extra: PlaceExtra::Vtable(vtable),
- } => ptr.to_value_with_vtable(vtable),
- Place::Ptr {
- ptr,
- align: _,
- extra: PlaceExtra::Length(len),
- } => ptr.to_value_with_len(len, self.tcx.tcx),
- Place::Ptr {
- ptr,
- align: _,
- extra: PlaceExtra::None,
- } => Value::Scalar(ptr),
- _ => bug!("force_allocation broken"),
- };
- self.drop(val, instance, ty, span, target)
- }
+ let place = self.force_allocation(place)?;
- fn drop(
- &mut self,
- arg: Value,
- instance: ty::Instance<'tcx>,
- ty: Ty<'tcx>,
- span: Span,
- target: BasicBlock,
- ) -> EvalResult<'tcx> {
- trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def);
-
- let instance = match ty.sty {
- ty::TyDynamic(..) => {
- if let Value::ScalarPair(_, vtable) = arg {
- self.read_drop_type_from_vtable(vtable.unwrap_or_err()?.to_ptr()?)?
- } else {
- bug!("expected fat ptr, got {:?}", arg);
- }
+ let (instance, place) = match place.layout.ty.sty {
+ ty::Dynamic(..) => {
+ // Dropping a trait object.
+ let vtable = match place.extra {
+ PlaceExtra::Vtable(vtable) => vtable,
+ _ => bug!("Expected vtable when dropping {:#?}", place),
+ };
+ let place = self.unpack_unsized_mplace(place)?;
+ let instance = self.read_drop_type_from_vtable(vtable)?;
+ (instance, place)
}
- _ => instance,
+ _ => (instance, place),
};
- // the drop function expects a reference to the value
- let valty = ValTy {
- value: arg,
- ty: self.tcx.mk_mut_ptr(ty),
+ let fn_sig = instance.ty(*self.tcx).fn_sig(*self.tcx);
+ let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig);
+
+ let arg = OpTy {
+ op: Operand::Immediate(place.to_ref(&self)),
+ layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
};
- let fn_sig = self.tcx.fn_sig(instance.def_id()).skip_binder().clone();
+ // This should always be (), but getting it from the sig seems
+ // easier than creating a layout of ().
+ let dest = PlaceTy::null(&self, self.layout_of(fn_sig.output())?);
self.eval_fn_call(
instance,
- Some((Place::undef(), target)),
- &[valty],
+ Some((dest, target)),
+ &[arg],
span,
fn_sig,
)
use rustc::mir;
use rustc::ty::{self, Ty};
-use rustc::ty::layout::{LayoutOf, Size};
+use rustc::ty::layout::LayoutOf;
use syntax::source_map::Span;
use rustc_target::spec::abi::Abi;
-use rustc::mir::interpret::{EvalResult, Scalar, Value};
-use super::{EvalContext, Place, Machine, ValTy};
+use rustc::mir::interpret::{EvalResult, Scalar};
+use super::{EvalContext, Machine, Value, OpTy, PlaceTy, ValTy, Operand};
use rustc_data_structures::indexed_vec::Idx;
-use interpret::memory::HasMemory;
mod drop;
use rustc::mir::TerminatorKind::*;
match terminator.kind {
Return => {
- self.dump_local(self.frame().return_place);
+ self.dump_place(self.frame().return_place);
self.pop_stack_frame()?
}
ref targets,
..
} => {
- let discr_val = self.eval_operand(discr)?;
- let discr_prim = self.value_to_scalar(discr_val)?;
- let discr_layout = self.layout_of(discr_val.ty).unwrap();
- trace!("SwitchInt({:?}, {:#?})", discr_prim, discr_layout);
+ let discr_val = self.eval_operand(discr, None)?;
+ let discr = self.read_value(discr_val)?;
+ trace!("SwitchInt({:?})", *discr);
// Branch to the `otherwise` case by default, if no match is found.
let mut target_block = targets[targets.len() - 1];
for (index, &const_int) in values.iter().enumerate() {
// Compare using binary_op
- let const_int = Scalar::Bits { bits: const_int, size: discr_layout.size.bytes() as u8 };
- let res = self.binary_op(mir::BinOp::Eq,
- discr_prim, discr_val.ty,
- const_int, discr_val.ty
+ let const_int = Scalar::Bits { bits: const_int, size: discr.layout.size.bytes() as u8 };
+ let (res, _) = self.binary_op(mir::BinOp::Eq,
+ discr,
+ ValTy { value: Value::Scalar(const_int.into()), layout: discr.layout }
)?;
- if res.0.to_bits(Size::from_bytes(1))? != 0 {
+ if res.to_bool()? {
target_block = targets[index];
break;
}
None => None,
};
- let func = self.eval_operand(func)?;
- let (fn_def, sig) = match func.ty.sty {
- ty::TyFnPtr(sig) => {
- let fn_ptr = self.value_to_scalar(func)?.to_ptr()?;
+ let func = self.eval_operand(func, None)?;
+ let (fn_def, sig) = match func.layout.ty.sty {
+ ty::FnPtr(sig) => {
+ let fn_ptr = self.read_scalar(func)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
let instance_ty = instance.ty(*self.tcx);
match instance_ty.sty {
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
let real_sig = instance_ty.fn_sig(*self.tcx);
let sig = self.tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
}
(instance, sig)
}
- ty::TyFnDef(def_id, substs) => (
+ ty::FnDef(def_id, substs) => (
self.resolve(def_id, substs)?,
- func.ty.fn_sig(*self.tcx),
+ func.layout.ty.fn_sig(*self.tcx),
),
_ => {
- let msg = format!("can't handle callee of type {:?}", func.ty);
+ let msg = format!("can't handle callee of type {:?}", func.layout.ty);
return err!(Unimplemented(msg));
}
};
- let args = self.operands_to_args(args)?;
+ let args = self.eval_operands(args)?;
let sig = self.tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
&sig,
self.eval_fn_call(
fn_def,
destination,
- &args,
+ &args[..],
terminator.source_info.span,
sig,
)?;
} => {
// FIXME(CTFE): forbid drop in const eval
let place = self.eval_place(location)?;
- let ty = self.place_ty(location);
- let ty = self.tcx.subst_and_normalize_erasing_regions(
- self.substs(),
- ty::ParamEnv::reveal_all(),
- &ty,
- );
+ let ty = place.layout.ty;
trace!("TerminatorKind::drop: {:?}, type {}", location, ty);
let instance = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
- self.drop_place(
+ self.drop_in_place(
place,
instance,
- ty,
terminator.source_info.span,
target,
)?;
target,
..
} => {
- let cond_val = self.eval_operand_to_scalar(cond)?.to_bool()?;
+ let cond_val = self.eval_operand_and_read_value(cond, None)?.to_scalar()?.to_bool()?;
if expected == cond_val {
self.goto_block(target);
} else {
use rustc::mir::interpret::EvalErrorKind::*;
return match *msg {
BoundsCheck { ref len, ref index } => {
- let len = self.eval_operand_to_scalar(len)
- .expect("can't eval len")
+ let len = self.eval_operand_and_read_value(len, None)
+ .expect("can't eval len").to_scalar()?
.to_bits(self.memory().pointer_size())? as u64;
- let index = self.eval_operand_to_scalar(index)
- .expect("can't eval index")
+ let index = self.eval_operand_and_read_value(index, None)
+ .expect("can't eval index").to_scalar()?
.to_bits(self.memory().pointer_size())? as u64;
err!(BoundsCheck { len, index })
}
// Permit changing the pointer type of raw pointers and references as well as
// mutability of raw pointers.
// TODO: Should not be allowed when fat pointers are involved.
- (&ty::TyRawPtr(_), &ty::TyRawPtr(_)) => true,
- (&ty::TyRef(_, _, _), &ty::TyRef(_, _, _)) => {
+ (&ty::RawPtr(_), &ty::RawPtr(_)) => true,
+ (&ty::Ref(_, _, _), &ty::Ref(_, _, _)) => {
ty.is_mutable_pointer() == real_ty.is_mutable_pointer()
}
// rule out everything else
// Second argument must be a tuple matching the argument list of sig
let snd_ty = real_sig.inputs_and_output[1];
match snd_ty.sty {
- ty::TyTuple(tys) if sig.inputs().len() == tys.len() =>
+ ty::Tuple(tys) if sig.inputs().len() == tys.len() =>
if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) {
return Ok(true)
},
fn eval_fn_call(
&mut self,
instance: ty::Instance<'tcx>,
- destination: Option<(Place, mir::BasicBlock)>,
- args: &[ValTy<'tcx>],
+ destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+ args: &[OpTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx> {
trace!("eval_fn_call: {:#?}", instance);
+ if let Some((place, _)) = destination {
+ assert_eq!(place.layout.ty, sig.output());
+ }
match instance.def {
ty::InstanceDef::Intrinsic(..) => {
let (ret, target) = match destination {
Some(dest) => dest,
_ => return err!(Unreachable),
};
- let ty = sig.output();
- let layout = self.layout_of(ty)?;
- M::call_intrinsic(self, instance, args, ret, layout, target)?;
- self.dump_local(ret);
+ M::call_intrinsic(self, instance, args, ret, target)?;
+ self.dump_place(*ret);
Ok(())
}
// FIXME: figure out why we can't just go through the shim
ty::InstanceDef::ClosureOnceShim { .. } => {
- if M::eval_fn_call(self, instance, destination, args, span, sig)? {
+ if M::eval_fn_call(self, instance, destination, args, span)? {
return Ok(());
}
let mut arg_locals = self.frame().mir.args_iter();
match sig.abi {
// closure as closure once
Abi::RustCall => {
- for (arg_local, &valty) in arg_locals.zip(args) {
+ for (arg_local, &op) in arg_locals.zip(args) {
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
- self.write_value(valty, dest)?;
+ self.copy_op(op, dest)?;
}
}
// non capture closure as fn ptr
// and need to pack arguments
Abi::Rust => {
trace!(
- "arg_locals: {:#?}",
- self.frame().mir.args_iter().collect::<Vec<_>>()
+ "args: {:#?}",
+ self.frame().mir.args_iter().zip(args.iter())
+ .map(|(local, arg)| (local, **arg, arg.layout.ty)).collect::<Vec<_>>()
);
- trace!("args: {:#?}", args);
let local = arg_locals.nth(1).unwrap();
- for (i, &valty) in args.into_iter().enumerate() {
+ for (i, &op) in args.into_iter().enumerate() {
let dest = self.eval_place(&mir::Place::Local(local).field(
mir::Field::new(i),
- valty.ty,
+ op.layout.ty,
))?;
- self.write_value(valty, dest)?;
+ self.copy_op(op, dest)?;
}
}
_ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi),
ty::InstanceDef::CloneShim(..) |
ty::InstanceDef::Item(_) => {
// Push the stack frame, and potentially be entirely done if the call got hooked
- if M::eval_fn_call(self, instance, destination, args, span, sig)? {
+ if M::eval_fn_call(self, instance, destination, args, span)? {
+ // TODO: Can we make it return the frame to push, instead
+ // of the hook doing half of the work and us doing the argument
+ // initialization?
return Ok(());
}
let mut arg_locals = self.frame().mir.args_iter();
trace!("ABI: {:?}", sig.abi);
trace!(
- "arg_locals: {:#?}",
- self.frame().mir.args_iter().collect::<Vec<_>>()
+ "args: {:#?}",
+ self.frame().mir.args_iter().zip(args.iter())
+ .map(|(local, arg)| (local, **arg, arg.layout.ty)).collect::<Vec<_>>()
);
- trace!("args: {:#?}", args);
match sig.abi {
Abi::RustCall => {
assert_eq!(args.len(), 2);
// write first argument
let first_local = arg_locals.next().unwrap();
let dest = self.eval_place(&mir::Place::Local(first_local))?;
- self.write_value(args[0], dest)?;
+ self.copy_op(args[0], dest)?;
}
// unpack and write all other args
- let layout = self.layout_of(args[1].ty)?;
- if let ty::TyTuple(_) = args[1].ty.sty {
+ let layout = args[1].layout;
+ if let ty::Tuple(_) = layout.ty.sty {
if layout.is_zst() {
// Nothing to do, no need to unpack zsts
return Ok(());
}
if self.frame().mir.args_iter().count() == layout.fields.count() + 1 {
for (i, arg_local) in arg_locals.enumerate() {
- let field = mir::Field::new(i);
- let (value, layout) = self.read_field(args[1].value, None, field, layout)?;
+ let arg = self.operand_field(args[1], i as u64)?;
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
- let valty = ValTy {
- value,
- ty: layout.ty,
- };
- self.write_value(valty, dest)?;
+ self.copy_op(arg, dest)?;
}
} else {
trace!("manual impl of rust-call ABI");
let dest = self.eval_place(
&mir::Place::Local(arg_locals.next().unwrap()),
)?;
- self.write_value(args[1], dest)?;
+ self.copy_op(args[1], dest)?;
}
} else {
bug!(
- "rust-call ABI tuple argument was {:#?}, {:#?}",
- args[1].ty,
+ "rust-call ABI tuple argument was {:#?}",
layout
);
}
}
_ => {
- for (arg_local, &valty) in arg_locals.zip(args) {
+ for (arg_local, &op) in arg_locals.zip(args) {
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
- self.write_value(valty, dest)?;
+ self.copy_op(op, dest)?;
}
}
}
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
- let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?;
+ let (ptr, vtable) = self.read_value(args[0])?.to_scalar_dyn_trait()?;
let fn_ptr = self.memory.read_ptr_sized(
vtable.offset(ptr_size * (idx as u64 + 3), &self)?,
ptr_align
- )?.unwrap_or_err()?.to_ptr()?;
+ )?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
+
+ // We have to patch the self argument, in particular get the layout
+ // expected by the actual function. Cannot just use "field 0" due to
+ // Box<self>.
let mut args = args.to_vec();
- let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty;
- args[0].ty = ty;
- args[0].value = Value::Scalar(ptr);
+ let pointee = args[0].layout.ty.builtin_deref(true).unwrap().ty;
+ let fake_fat_ptr_ty = self.tcx.mk_mut_ptr(pointee);
+ args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(&self, 0)?;
+ args[0].op = Operand::Immediate(Value::Scalar(ptr.into())); // strip vtable
+ trace!("Patched self operand to {:#?}", args[0]);
// recurse with concrete function
self.eval_fn_call(instance, destination, &args, span, sig)
}
let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
let drop = self.memory.create_fn_alloc(drop);
- self.memory.write_ptr_sized_unsigned(vtable, ptr_align, Scalar::Ptr(drop).into())?;
+ self.memory.write_ptr_sized(vtable, ptr_align, Scalar::Ptr(drop).into())?;
let size_ptr = vtable.offset(ptr_size, &self)?;
- self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits {
+ self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::Bits {
bits: size as u128,
size: ptr_size.bytes() as u8,
}.into())?;
let align_ptr = vtable.offset(ptr_size * 2, &self)?;
- self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits {
+ self.memory.write_ptr_sized(align_ptr, ptr_align, Scalar::Bits {
bits: align as u128,
size: ptr_size.bytes() as u8,
}.into())?;
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance);
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
- self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?;
+ self.memory.write_ptr_sized(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?;
}
}
) -> EvalResult<'tcx, ty::Instance<'tcx>> {
// we don't care about the pointee type, we just want a pointer
let pointer_align = self.tcx.data_layout.pointer_align;
- let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.unwrap_or_err()?.to_ptr()?;
+ let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?;
self.memory.get_fn(drop_fn)
}
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align;
- let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.unwrap_or_err()?.to_bits(pointer_size)? as u64;
+ let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64;
let align = self.memory.read_ptr_sized(
vtable.offset(pointer_size * 2, self)?,
pointer_align
- )?.unwrap_or_err()?.to_bits(pointer_size)? as u64;
+ )?.to_bits(pointer_size)? as u64;
Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap()))
}
}
--- /dev/null
+use std::fmt::Write;
+
+use syntax_pos::symbol::Symbol;
+use rustc::ty::layout::{self, Size, Primitive};
+use rustc::ty::{self, Ty};
+use rustc_data_structures::fx::FxHashSet;
+use rustc::mir::interpret::{
+ Scalar, AllocType, EvalResult, ScalarMaybeUndef, EvalErrorKind
+};
+
+use super::{
+ MPlaceTy, Machine, EvalContext
+};
+
+macro_rules! validation_failure{
+ ($what:expr, $where:expr, $details:expr) => {{
+ let where_ = path_format($where);
+ let where_ = if where_.is_empty() {
+ String::new()
+ } else {
+ format!(" at {}", where_)
+ };
+ err!(ValidationFailure(format!(
+ "encountered {}{}, but expected {}",
+ $what, where_, $details,
+ )))
+ }};
+ ($what:expr, $where:expr) => {{
+ let where_ = path_format($where);
+ let where_ = if where_.is_empty() {
+ String::new()
+ } else {
+ format!(" at {}", where_)
+ };
+ err!(ValidationFailure(format!(
+ "encountered {}{}",
+ $what, where_,
+ )))
+ }};
+}
+
+/// We want to show a nice path to the invalid field for diagnotsics,
+/// but avoid string operations in the happy case where no error happens.
+/// So we track a `Vec<PathElem>` where `PathElem` contains all the data we
+/// need to later print something for the user.
+#[derive(Copy, Clone, Debug)]
+pub enum PathElem {
+ Field(Symbol),
+ ClosureVar(Symbol),
+ ArrayElem(usize),
+ TupleElem(usize),
+ Deref,
+ Tag,
+}
+
+// Adding a Deref and making a copy of the path to be put into the queue
+// always go together. This one does it with only new allocation.
+fn path_clone_and_deref(path: &Vec<PathElem>) -> Vec<PathElem> {
+ let mut new_path = Vec::with_capacity(path.len()+1);
+ new_path.clone_from(path);
+ new_path.push(PathElem::Deref);
+ new_path
+}
+
+/// Format a path
+fn path_format(path: &Vec<PathElem>) -> String {
+ use self::PathElem::*;
+
+ let mut out = String::new();
+ for elem in path.iter() {
+ match elem {
+ Field(name) => write!(out, ".{}", name).unwrap(),
+ ClosureVar(name) => write!(out, ".<closure-var({})>", name).unwrap(),
+ TupleElem(idx) => write!(out, ".{}", idx).unwrap(),
+ ArrayElem(idx) => write!(out, "[{}]", idx).unwrap(),
+ Deref =>
+ // This does not match Rust syntax, but it is more readable for long paths -- and
+ // some of the other items here also are not Rust syntax. Actually we can't
+ // even use the usual syntax because we are just showing the projections,
+ // not the root.
+ write!(out, ".<deref>").unwrap(),
+ Tag => write!(out, ".<enum-tag>").unwrap(),
+ }
+ }
+ out
+}
+
+impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
+ fn validate_scalar(
+ &self,
+ value: ScalarMaybeUndef,
+ size: Size,
+ scalar: &layout::Scalar,
+ path: &Vec<PathElem>,
+ ty: Ty,
+ ) -> EvalResult<'tcx> {
+ trace!("validate scalar: {:#?}, {:#?}, {:#?}, {}", value, size, scalar, ty);
+ let (lo, hi) = scalar.valid_range.clone().into_inner();
+
+ let value = match value {
+ ScalarMaybeUndef::Scalar(scalar) => scalar,
+ ScalarMaybeUndef::Undef => return validation_failure!("undefined bytes", path),
+ };
+
+ let bits = match value {
+ Scalar::Bits { bits, size: value_size } => {
+ assert_eq!(value_size as u64, size.bytes());
+ bits
+ },
+ Scalar::Ptr(_) => {
+ let ptr_size = self.memory.pointer_size();
+ let ptr_max = u128::max_value() >> (128 - ptr_size.bits());
+ return if lo > hi {
+ if lo - hi == 1 {
+ // no gap, all values are ok
+ Ok(())
+ } else if hi < ptr_max || lo > 1 {
+ let max = u128::max_value() >> (128 - size.bits());
+ validation_failure!(
+ "pointer",
+ path,
+ format!("something in the range {:?} or {:?}", 0..=lo, hi..=max)
+ )
+ } else {
+ Ok(())
+ }
+ } else if hi < ptr_max || lo > 1 {
+ validation_failure!(
+ "pointer",
+ path,
+ format!("something in the range {:?}", scalar.valid_range)
+ )
+ } else {
+ Ok(())
+ };
+ },
+ };
+
+ // char gets a special treatment, because its number space is not contiguous so `TyLayout`
+ // has no special checks for chars
+ match ty.sty {
+ ty::Char => {
+ debug_assert_eq!(size.bytes(), 4);
+ if ::std::char::from_u32(bits as u32).is_none() {
+ return validation_failure!(
+ "character",
+ path,
+ "a valid unicode codepoint"
+ );
+ }
+ }
+ _ => {},
+ }
+
+ use std::ops::RangeInclusive;
+ let in_range = |bound: RangeInclusive<u128>| bound.contains(&bits);
+ if lo > hi {
+ if in_range(0..=hi) || in_range(lo..=u128::max_value()) {
+ Ok(())
+ } else {
+ validation_failure!(
+ bits,
+ path,
+ format!("something in the range {:?} or {:?}", ..=hi, lo..)
+ )
+ }
+ } else {
+ if in_range(scalar.valid_range.clone()) {
+ Ok(())
+ } else {
+ validation_failure!(
+ bits,
+ path,
+ format!("something in the range {:?}", scalar.valid_range)
+ )
+ }
+ }
+ }
+
+ /// This function checks the memory where `dest` points to. The place must be sized
+ /// (i.e., dest.extra == PlaceExtra::None).
+ /// It will error if the bits at the destination do not match the ones described by the layout.
+ /// The `path` may be pushed to, but the part that is present when the function
+ /// starts must not be changed!
+ pub fn validate_mplace(
+ &self,
+ dest: MPlaceTy<'tcx>,
+ path: &mut Vec<PathElem>,
+ seen: &mut FxHashSet<(MPlaceTy<'tcx>)>,
+ todo: &mut Vec<(MPlaceTy<'tcx>, Vec<PathElem>)>,
+ ) -> EvalResult<'tcx> {
+ self.memory.dump_alloc(dest.to_ptr()?.alloc_id);
+ trace!("validate_mplace: {:?}, {:#?}", *dest, dest.layout);
+
+ // Find the right variant. We have to handle this as a prelude, not via
+ // proper recursion with the new inner layout, to be able to later nicely
+ // print the field names of the enum field that is being accessed.
+ let (variant, dest) = match dest.layout.variants {
+ layout::Variants::NicheFilling { niche: ref tag, .. } |
+ layout::Variants::Tagged { ref tag, .. } => {
+ let size = tag.value.size(self);
+ // we first read the tag value as scalar, to be able to validate it
+ let tag_mplace = self.mplace_field(dest, 0)?;
+ let tag_value = self.read_scalar(tag_mplace.into())?;
+ path.push(PathElem::Tag);
+ self.validate_scalar(
+ tag_value, size, tag, &path, tag_mplace.layout.ty
+ )?;
+ path.pop(); // remove the element again
+ // then we read it again to get the index, to continue
+ let variant = self.read_discriminant_as_variant_index(dest.into())?;
+ let inner_dest = self.mplace_downcast(dest, variant)?;
+ // Put the variant projection onto the path, as a field
+ path.push(PathElem::Field(dest.layout.ty.ty_adt_def().unwrap().variants[variant].name));
+ trace!("variant layout: {:#?}", dest.layout);
+ (variant, inner_dest)
+ },
+ layout::Variants::Single { index } => {
+ (index, dest)
+ }
+ };
+
+ // Remember the length, in case we need to truncate
+ let path_len = path.len();
+
+ // Validate all fields
+ match dest.layout.fields {
+ // primitives are unions with zero fields
+ // We still check `layout.fields`, not `layout.abi`, because `layout.abi`
+ // is `Scalar` for newtypes around scalars, but we want to descend through the
+ // fields to get a proper `path`.
+ layout::FieldPlacement::Union(0) => {
+ match dest.layout.abi {
+ // nothing to do, whatever the pointer points to, it is never going to be read
+ layout::Abi::Uninhabited =>
+ return validation_failure!("a value of an uninhabited type", path),
+ // check that the scalar is a valid pointer or that its bit range matches the
+ // expectation.
+ layout::Abi::Scalar(ref scalar_layout) => {
+ let size = scalar_layout.value.size(self);
+ let value = self.read_value(dest.into())?;
+ let scalar = value.to_scalar_or_undef();
+ self.validate_scalar(scalar, size, scalar_layout, &path, dest.layout.ty)?;
+ if scalar_layout.value == Primitive::Pointer {
+ // ignore integer pointers, we can't reason about the final hardware
+ if let Scalar::Ptr(ptr) = scalar.not_undef()? {
+ let alloc_kind = self.tcx.alloc_map.lock().get(ptr.alloc_id);
+ if let Some(AllocType::Static(did)) = alloc_kind {
+ // statics from other crates are already checked.
+ // extern statics should not be validated as they have no body.
+ if !did.is_local() || self.tcx.is_foreign_item(did) {
+ return Ok(());
+ }
+ }
+ if value.layout.ty.builtin_deref(false).is_some() {
+ trace!("Recursing below ptr {:#?}", value);
+ let ptr_place = self.ref_to_mplace(value)?;
+ // we have not encountered this pointer+layout combination before
+ if seen.insert(ptr_place) {
+ todo.push((ptr_place, path_clone_and_deref(path)));
+ }
+ }
+ }
+ }
+ },
+ _ => bug!("bad abi for FieldPlacement::Union(0): {:#?}", dest.layout.abi),
+ }
+ }
+ layout::FieldPlacement::Union(_) => {
+ // We can't check unions, their bits are allowed to be anything.
+ // The fields don't need to correspond to any bit pattern of the union's fields.
+ // See https://github.com/rust-lang/rust/issues/32836#issuecomment-406875389
+ },
+ layout::FieldPlacement::Array { .. } => {
+ for (i, field) in self.mplace_array_fields(dest)?.enumerate() {
+ let field = field?;
+ path.push(PathElem::ArrayElem(i));
+ self.validate_mplace(field, path, seen, todo)?;
+ path.truncate(path_len);
+ }
+ },
+ layout::FieldPlacement::Arbitrary { ref offsets, .. } => {
+ // Fat pointers need special treatment.
+ if dest.layout.ty.builtin_deref(true).is_some() {
+ // This is a fat pointer.
+ let ptr = match self.ref_to_mplace(self.read_value(dest.into())?) {
+ Ok(ptr) => ptr,
+ Err(err) => match err.kind {
+ EvalErrorKind::ReadPointerAsBytes =>
+ return validation_failure!(
+ "fat pointer length is not a valid integer", path
+ ),
+ EvalErrorKind::ReadBytesAsPointer =>
+ return validation_failure!(
+ "fat pointer vtable is not a valid pointer", path
+ ),
+ _ => return Err(err),
+ }
+ };
+ let unpacked_ptr = self.unpack_unsized_mplace(ptr)?;
+ // for safe ptrs, recursively check it
+ if !dest.layout.ty.is_unsafe_ptr() {
+ trace!("Recursing below fat ptr {:?} (unpacked: {:?})", ptr, unpacked_ptr);
+ if seen.insert(unpacked_ptr) {
+ todo.push((unpacked_ptr, path_clone_and_deref(path)));
+ }
+ }
+ } else {
+ // Not a pointer, perform regular aggregate handling below
+ for i in 0..offsets.len() {
+ let field = self.mplace_field(dest, i as u64)?;
+ path.push(self.aggregate_field_path_elem(dest.layout.ty, variant, i));
+ self.validate_mplace(field, path, seen, todo)?;
+ path.truncate(path_len);
+ }
+ // FIXME: For a TyStr, check that this is valid UTF-8.
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn aggregate_field_path_elem(&self, ty: Ty<'tcx>, variant: usize, field: usize) -> PathElem {
+ match ty.sty {
+ // generators and closures.
+ ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
+ let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap();
+ let freevar = self.tcx.with_freevars(node_id, |fv| fv[field]);
+ PathElem::ClosureVar(self.tcx.hir.name(freevar.var_id()))
+ }
+
+ // tuples
+ ty::Tuple(_) => PathElem::TupleElem(field),
+
+ // enums
+ ty::Adt(def, ..) if def.is_enum() => {
+ let variant = &def.variants[variant];
+ PathElem::Field(variant.fields[field].ident.name)
+ }
+
+ // other ADTs
+ ty::Adt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].ident.name),
+
+ // nothing else has an aggregate layout
+ _ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", ty),
+ }
+ }
+}
#![feature(slice_sort_by_cached_key)]
#![feature(box_patterns)]
#![feature(box_syntax)]
-#![feature(catch_expr)]
#![feature(crate_visibility_modifier)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(decl_macro)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![feature(exhaustive_patterns)]
#![feature(range_contains)]
#![feature(rustc_diagnostic_macros)]
#![feature(unicode_internals)]
#![feature(step_trait)]
#![feature(slice_concat_ext)]
+#![feature(if_while_or_patterns)]
+#![feature(try_from)]
#![recursion_limit="256"]
extern crate rustc_apfloat;
extern crate byteorder;
extern crate core;
+extern crate smallvec;
+
+// Once we can use edition 2018 in the compiler,
+// replace this with real try blocks.
+macro_rules! try_block {
+ ($($inside:tt)*) => (
+ (||{ ::std::ops::Try::from_ok({ $($inside)* }) })()
+ )
+}
mod diagnostics;
shim::provide(providers);
transform::provide(providers);
providers.const_eval = interpret::const_eval_provider;
- providers.const_value_to_allocation = interpret::const_value_to_allocation_provider;
+ providers.const_to_allocation = interpret::const_to_allocation_provider;
providers.check_match = hair::pattern::check_match;
}
&source_ty,
);
match source_ty.sty {
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let instance = monomorphize::resolve_closure(
self.tcx, def_id, substs, ty::ClosureKind::FnOnce);
if should_monomorphize_locally(self.tcx, &instance) {
is_direct_call: bool,
output: &mut Vec<MonoItem<'tcx>>)
{
- if let ty::TyFnDef(def_id, substs) = ty.sty {
+ if let ty::FnDef(def_id, substs) = ty.sty {
let instance = ty::Instance::resolve(tcx,
ty::ParamEnv::reveal_all(),
def_id,
}
let tail = tcx.struct_tail(ty);
match tail.sty {
- ty::TyForeign(..) => false,
- ty::TyStr | ty::TySlice(..) | ty::TyDynamic(..) => true,
+ ty::Foreign(..) => false,
+ ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
_ => bug!("unexpected unsized tail: {:?}", tail.sty),
}
};
};
match (&source_ty.sty, &target_ty.sty) {
- (&ty::TyRef(_, a, _),
- &ty::TyRef(_, b, _)) |
- (&ty::TyRef(_, a, _),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
- (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
+ (&ty::Ref(_, a, _),
+ &ty::Ref(_, b, _)) |
+ (&ty::Ref(_, a, _),
+ &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
+ (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
+ &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
ptr_vtable(a, b)
}
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
ptr_vtable(source_ty.boxed_ty(), target_ty.boxed_ty())
}
- (&ty::TyAdt(source_adt_def, source_substs),
- &ty::TyAdt(target_adt_def, target_substs)) => {
+ (&ty::Adt(source_adt_def, source_substs),
+ &ty::Adt(target_adt_def, target_substs)) => {
assert_eq!(source_adt_def, target_adt_def);
let kind =
assert!(!trait_ty.needs_subst() && !trait_ty.has_escaping_regions() &&
!impl_ty.needs_subst() && !impl_ty.has_escaping_regions());
- if let ty::TyDynamic(ref trait_ty, ..) = trait_ty.sty {
+ if let ty::Dynamic(ref trait_ty, ..) = trait_ty.sty {
if let Some(principal) = trait_ty.principal() {
let poly_trait_ref = principal.with_self_ty(tcx, impl_ty);
assert!(!poly_trait_ref.has_escaping_regions());
pub fn push_type_name(&self, t: Ty<'tcx>, output: &mut String) {
match t.sty {
- ty::TyBool => output.push_str("bool"),
- ty::TyChar => output.push_str("char"),
- ty::TyStr => output.push_str("str"),
- ty::TyNever => output.push_str("!"),
- ty::TyInt(ast::IntTy::Isize) => output.push_str("isize"),
- ty::TyInt(ast::IntTy::I8) => output.push_str("i8"),
- ty::TyInt(ast::IntTy::I16) => output.push_str("i16"),
- ty::TyInt(ast::IntTy::I32) => output.push_str("i32"),
- ty::TyInt(ast::IntTy::I64) => output.push_str("i64"),
- ty::TyInt(ast::IntTy::I128) => output.push_str("i128"),
- ty::TyUint(ast::UintTy::Usize) => output.push_str("usize"),
- ty::TyUint(ast::UintTy::U8) => output.push_str("u8"),
- ty::TyUint(ast::UintTy::U16) => output.push_str("u16"),
- ty::TyUint(ast::UintTy::U32) => output.push_str("u32"),
- ty::TyUint(ast::UintTy::U64) => output.push_str("u64"),
- ty::TyUint(ast::UintTy::U128) => output.push_str("u128"),
- ty::TyFloat(ast::FloatTy::F32) => output.push_str("f32"),
- ty::TyFloat(ast::FloatTy::F64) => output.push_str("f64"),
- ty::TyAdt(adt_def, substs) => {
+ ty::Bool => output.push_str("bool"),
+ ty::Char => output.push_str("char"),
+ ty::Str => output.push_str("str"),
+ ty::Never => output.push_str("!"),
+ ty::Int(ast::IntTy::Isize) => output.push_str("isize"),
+ ty::Int(ast::IntTy::I8) => output.push_str("i8"),
+ ty::Int(ast::IntTy::I16) => output.push_str("i16"),
+ ty::Int(ast::IntTy::I32) => output.push_str("i32"),
+ ty::Int(ast::IntTy::I64) => output.push_str("i64"),
+ ty::Int(ast::IntTy::I128) => output.push_str("i128"),
+ ty::Uint(ast::UintTy::Usize) => output.push_str("usize"),
+ ty::Uint(ast::UintTy::U8) => output.push_str("u8"),
+ ty::Uint(ast::UintTy::U16) => output.push_str("u16"),
+ ty::Uint(ast::UintTy::U32) => output.push_str("u32"),
+ ty::Uint(ast::UintTy::U64) => output.push_str("u64"),
+ ty::Uint(ast::UintTy::U128) => output.push_str("u128"),
+ ty::Float(ast::FloatTy::F32) => output.push_str("f32"),
+ ty::Float(ast::FloatTy::F64) => output.push_str("f64"),
+ ty::Adt(adt_def, substs) => {
self.push_def_path(adt_def.did, output);
self.push_type_params(substs, iter::empty(), output);
},
- ty::TyTuple(component_types) => {
+ ty::Tuple(component_types) => {
output.push('(');
for &component_type in component_types {
self.push_type_name(component_type, output);
}
output.push(')');
},
- ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
+ ty::RawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
output.push('*');
match mutbl {
hir::MutImmutable => output.push_str("const "),
self.push_type_name(inner_type, output);
},
- ty::TyRef(_, inner_type, mutbl) => {
+ ty::Ref(_, inner_type, mutbl) => {
output.push('&');
if mutbl == hir::MutMutable {
output.push_str("mut ");
self.push_type_name(inner_type, output);
},
- ty::TyArray(inner_type, len) => {
+ ty::Array(inner_type, len) => {
output.push('[');
self.push_type_name(inner_type, output);
write!(output, "; {}", len.unwrap_usize(self.tcx)).unwrap();
output.push(']');
},
- ty::TySlice(inner_type) => {
+ ty::Slice(inner_type) => {
output.push('[');
self.push_type_name(inner_type, output);
output.push(']');
},
- ty::TyDynamic(ref trait_data, ..) => {
+ ty::Dynamic(ref trait_data, ..) => {
if let Some(principal) = trait_data.principal() {
self.push_def_path(principal.def_id(), output);
self.push_type_params(principal.skip_binder().substs,
output);
}
},
- ty::TyForeign(did) => self.push_def_path(did, output),
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) => {
+ ty::Foreign(did) => self.push_def_path(did, output),
+ ty::FnDef(..) |
+ ty::FnPtr(_) => {
let sig = t.fn_sig(self.tcx);
if sig.unsafety() == hir::Unsafety::Unsafe {
output.push_str("unsafe ");
self.push_type_name(sig.output(), output);
}
},
- ty::TyGenerator(def_id, GeneratorSubsts { ref substs }, _) |
- ty::TyClosure(def_id, ClosureSubsts { ref substs }) => {
+ ty::Generator(def_id, GeneratorSubsts { ref substs }, _) |
+ ty::Closure(def_id, ClosureSubsts { ref substs }) => {
self.push_def_path(def_id, output);
let generics = self.tcx.generics_of(self.tcx.closure_base_def_id(def_id));
let substs = substs.truncate_to(self.tcx, generics);
self.push_type_params(substs, iter::empty(), output);
}
- ty::TyError |
- ty::TyInfer(_) |
- ty::TyProjection(..) |
- ty::TyParam(_) |
- ty::TyGeneratorWitness(_) |
- ty::TyAnon(..) => {
+ ty::Error |
+ ty::Infer(_) |
+ ty::Projection(..) |
+ ty::Param(_) |
+ ty::GeneratorWitness(_) |
+ ty::Anon(..) => {
bug!("DefPathBasedNames: Trying to create type name for \
unexpected type: {:?}", t);
}
debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
// Check if this is a generator, if so, return the drop glue for it
- if let Some(&ty::TyS { sty: ty::TyGenerator(gen_def_id, substs, _), .. }) = ty {
+ if let Some(&ty::TyS { sty: ty::Generator(gen_def_id, substs, _), .. }) = ty {
let mir = &**tcx.optimized_mir(gen_def_id).generator_drop.as_ref().unwrap();
return mir.subst(tcx, substs.substs);
}
match self_ty.sty {
_ if is_copy => builder.copy_shim(),
- ty::TyArray(ty, len) => {
+ ty::Array(ty, len) => {
let len = len.unwrap_usize(tcx);
builder.array_shim(dest, src, ty, len)
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
builder.tuple_like_shim(
dest, src,
substs.upvar_tys(def_id, tcx)
)
}
- ty::TyTuple(tys) => builder.tuple_like_shim(dest, src, tys.iter().cloned()),
+ ty::Tuple(tys) => builder.tuple_like_shim(dest, src, tys.iter().cloned()),
_ => {
bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty)
}
let sig = gcx.normalize_erasing_regions(param_env, sig);
let (adt_def, substs) = match sig.output().sty {
- ty::TyAdt(adt_def, substs) => (adt_def, substs),
+ ty::Adt(adt_def, substs) => (adt_def, substs),
_ => bug!("unexpected type for ADT ctor {:?}", sig.output())
};
// A Deref projection may restrict the context, this depends on the type
// being deref'd.
let context = match ty.sty {
- ty::TyRef(re, _, mutbl) => {
+ ty::Ref(re, _, mutbl) => {
let re = match re {
&RegionKind::ReScope(ce) => Some(ce),
&RegionKind::ReErased =>
};
(re, mutbl)
}
- ty::TyRawPtr(_) =>
+ ty::RawPtr(_) =>
// There is no guarantee behind even a mutable raw pointer,
// no write locks are acquired there, so we also don't want to
// release any.
(None, hir::MutImmutable),
- ty::TyAdt(adt, _) if adt.is_box() => (None, hir::MutMutable),
+ ty::Adt(adt, _) if adt.is_box() => (None, hir::MutMutable),
_ => bug!("Deref on a non-pointer type {:?}", ty),
};
// "Intersect" this restriction with proj.base.
}
let base_ty = base.ty(self.mir, self.tcx).to_ty(self.tcx);
match base_ty.sty {
- ty::TyRawPtr(..) => {
+ ty::RawPtr(..) => {
self.require_unsafe("dereference of raw pointer",
"raw pointers may be NULL, dangling or unaligned; they can violate \
aliasing rules and cause data races: all of these are undefined \
behavior")
}
- ty::TyAdt(adt, _) => {
+ ty::Adt(adt, _) => {
if adt.is_union() {
if context == PlaceContext::Store ||
context == PlaceContext::AsmOutput ||
use rustc::mir::{NullOp, StatementKind, Statement, BasicBlock, LocalKind};
use rustc::mir::{TerminatorKind, ClearCrossCrate, SourceInfo, BinOp, ProjectionElem};
use rustc::mir::visit::{Visitor, PlaceContext};
-use rustc::mir::interpret::{ConstEvalErr, EvalErrorKind, ScalarMaybeUndef};
+use rustc::mir::interpret::{
+ ConstEvalErr, EvalErrorKind, ScalarMaybeUndef, Scalar, GlobalId, EvalResult
+};
use rustc::ty::{TyCtxt, self, Instance};
-use rustc::mir::interpret::{Value, Scalar, GlobalId, EvalResult};
-use interpret::EvalContext;
-use interpret::CompileTimeEvaluator;
-use interpret::{eval_promoted, mk_borrowck_eval_cx, ValTy};
+use interpret::{EvalContext, CompileTimeEvaluator, eval_promoted, mk_borrowck_eval_cx};
+use interpret::{Value, OpTy, MemoryKind};
use transform::{MirPass, MirSource};
use syntax::source_map::{Span, DUMMY_SP};
use rustc::ty::subst::Substs;
-use rustc_data_structures::indexed_vec::IndexVec;
+use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use rustc::ty::ParamEnv;
use rustc::ty::layout::{
LayoutOf, TyLayout, LayoutError,
}
}
-type Const<'tcx> = (Value, TyLayout<'tcx>, Span);
+type Const<'tcx> = (OpTy<'tcx>, Span);
/// Finds optimization opportunities on the MIR.
struct ConstPropagator<'b, 'a, 'tcx:'a+'b> {
// FIXME: implement
=> {},
- | Panic
+ | Panic { .. }
| BoundsCheck{..}
| Overflow(_)
| OverflowNeg
source_info: SourceInfo,
) -> Option<Const<'tcx>> {
self.ecx.tcx.span = source_info.span;
- match self.ecx.const_to_value(c.literal.val) {
- Ok(val) => {
+ match self.ecx.const_value_to_op(c.literal.val) {
+ Ok(op) => {
let layout = self.tcx.layout_of(self.param_env.and(c.literal.ty)).ok()?;
- Some((val, layout, c.span))
+ Some((OpTy { op, layout }, c.span))
},
Err(error) => {
let (stacktrace, span) = self.ecx.generate_stacktrace(None);
Place::Projection(ref proj) => match proj.elem {
ProjectionElem::Field(field, _) => {
trace!("field proj on {:?}", proj.base);
- let (base, layout, span) = self.eval_place(&proj.base, source_info)?;
- let valty = self.use_ecx(source_info, |this| {
- this.ecx.read_field(base, None, field, layout)
+ let (base, span) = self.eval_place(&proj.base, source_info)?;
+ let res = self.use_ecx(source_info, |this| {
+ this.ecx.operand_field(base, field.index() as u64)
})?;
- Some((valty.0, valty.1, span))
+ Some((res, span))
},
+ // We could get more projections by using e.g. `operand_projection`,
+ // but we do not even have the stack frame set up properly so
+ // an `Index` projection would throw us off-track.
_ => None,
},
Place::Promoted(ref promoted) => {
};
// cannot use `const_eval` here, because that would require having the MIR
// for the current function available, but we're producing said MIR right now
- let (value, _, ty) = self.use_ecx(source_info, |this| {
+ let res = self.use_ecx(source_info, |this| {
eval_promoted(&mut this.ecx, cid, this.mir, this.param_env)
})?;
- let val = (value, ty, source_info.span);
- trace!("evaluated promoted {:?} to {:?}", promoted, val);
- Some(val)
+ trace!("evaluated promoted {:?} to {:?}", promoted, res);
+ Some((res, source_info.span))
},
_ => None,
}
Rvalue::Discriminant(..) => None,
Rvalue::Cast(kind, ref operand, _) => {
- let (value, layout, span) = self.eval_operand(operand, source_info)?;
+ let (op, span) = self.eval_operand(operand, source_info)?;
self.use_ecx(source_info, |this| {
- let dest_ptr = this.ecx.alloc_ptr(place_layout)?;
- let place_align = place_layout.align;
- let dest = ::interpret::Place::from_ptr(dest_ptr, place_align);
- this.ecx.cast(ValTy { value, ty: layout.ty }, kind, place_layout.ty, dest)?;
- Ok((
- Value::ByRef(dest_ptr.into(), place_align),
- place_layout,
- span,
- ))
+ let dest = this.ecx.allocate(place_layout, MemoryKind::Stack)?;
+ this.ecx.cast(op, kind, dest.into())?;
+ Ok((dest.into(), span))
})
}
Rvalue::Len(_) => None,
Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some((
- Value::Scalar(Scalar::Bits {
- bits: n as u128,
- size: self.tcx.data_layout.pointer_size.bytes() as u8,
- }.into()),
- self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?,
+ OpTy::from_scalar_value(
+ Scalar::Bits {
+ bits: n as u128,
+ size: self.tcx.data_layout.pointer_size.bytes() as u8,
+ },
+ self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?,
+ ),
span,
)))
}
return None;
}
- let val = self.eval_operand(arg, source_info)?;
- let prim = self.use_ecx(source_info, |this| {
- this.ecx.value_to_scalar(ValTy { value: val.0, ty: val.1.ty })
+ let (arg, _) = self.eval_operand(arg, source_info)?;
+ let val = self.use_ecx(source_info, |this| {
+ let prim = this.ecx.read_scalar(arg)?.not_undef()?;
+ this.ecx.unary_op(op, prim, arg.layout)
})?;
- let val = self.use_ecx(source_info, |this| this.ecx.unary_op(op, prim, val.1))?;
- Some((Value::Scalar(val.into()), place_layout, span))
+ Some((OpTy::from_scalar_value(val, place_layout), span))
}
Rvalue::CheckedBinaryOp(op, ref left, ref right) |
Rvalue::BinaryOp(op, ref left, ref right) => {
}
let r = self.use_ecx(source_info, |this| {
- this.ecx.value_to_scalar(ValTy { value: right.0, ty: right.1.ty })
+ this.ecx.read_value(right.0)
})?;
if op == BinOp::Shr || op == BinOp::Shl {
let left_ty = left.ty(self.mir, self.tcx);
.unwrap()
.size
.bits();
- let right_size = right.1.size;
- if r.to_bits(right_size).ok().map_or(false, |b| b >= left_bits as u128) {
+ let right_size = right.0.layout.size;
+ let r_bits = r.to_scalar().and_then(|r| r.to_bits(right_size));
+ if r_bits.ok().map_or(false, |b| b >= left_bits as u128) {
let source_scope_local_data = match self.mir.source_scope_local_data {
ClearCrossCrate::Set(ref data) => data,
ClearCrossCrate::Clear => return None,
}
let left = self.eval_operand(left, source_info)?;
let l = self.use_ecx(source_info, |this| {
- this.ecx.value_to_scalar(ValTy { value: left.0, ty: left.1.ty })
+ this.ecx.read_value(left.0)
})?;
trace!("const evaluating {:?} for {:?} and {:?}", op, left, right);
let (val, overflow) = self.use_ecx(source_info, |this| {
- this.ecx.binary_op(op, l, left.1.ty, r, right.1.ty)
+ this.ecx.binary_op(op, l, r)
})?;
let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue {
Value::ScalarPair(
}
Value::Scalar(val.into())
};
- Some((val, place_layout, span))
+ let res = OpTy {
+ op: ::interpret::Operand::Immediate(val),
+ layout: place_layout,
+ };
+ Some((res, span))
},
}
}
if let TerminatorKind::Assert { expected, msg, cond, .. } = kind {
if let Some(value) = self.eval_operand(cond, source_info) {
trace!("assertion on {:?} should be {:?}", value, expected);
- if Value::Scalar(Scalar::from_bool(*expected).into()) != value.0 {
+ let expected = Value::Scalar(Scalar::from_bool(*expected).into());
+ if expected != value.0.to_immediate() {
// poison all places this operand references so that further code
// doesn't use the invalid value
match cond {
let len = self
.eval_operand(len, source_info)
.expect("len must be const");
- let len = match len.0 {
+ let len = match len.0.to_immediate() {
Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits {
bits, ..
})) => bits,
let index = self
.eval_operand(index, source_info)
.expect("index must be const");
- let index = match index.0 {
+ let index = match index.0.to_immediate() {
Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits {
bits, ..
})) => bits,
use rustc::ty::{self, TyCtxt};
use rustc::mir::*;
use rustc::util::nodemap::FxHashMap;
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::Idx;
use transform::{MirPass, MirSource};
use util::patch::MirPatch;
mir: &Mir<'tcx>,
id: ast::NodeId,
env: &MoveDataParamEnv<'tcx, 'tcx>)
- -> IdxSetBuf<BasicBlock>
+ -> IdxSet<BasicBlock>
{
debug!("find_dead_unwinds({:?})", mir.span);
// We only need to do this pass once, because unwind edges can only
// reach cleanup blocks, which can't have unwind edges themselves.
- let mut dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
+ let mut dead_unwinds = IdxSet::new_empty(mir.basic_blocks().len());
let flow_inits =
do_dataflow(tcx, mir, id, &[], &dead_unwinds,
MaybeInitializedPlaces::new(tcx, mir, &env),
let mut init_data = InitializationData {
live: flow_inits.sets().on_entry_set_for(bb.index()).to_owned(),
- dead: IdxSetBuf::new_empty(env.move_data.move_paths.len()),
+ dead: IdxSet::new_empty(env.move_data.move_paths.len()),
};
debug!("find_dead_unwinds @ {:?}: {:?}; init_data={:?}",
bb, bb_data, init_data.live);
}
struct InitializationData {
- live: IdxSetBuf<MovePathIndex>,
- dead: IdxSetBuf<MovePathIndex>
+ live: IdxSet<MovePathIndex>,
+ dead: IdxSet<MovePathIndex>
}
impl InitializationData {
use util::dump_mir;
use util::liveness::{self, IdentityMap, LivenessMode};
use rustc_data_structures::indexed_vec::Idx;
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use std::collections::HashMap;
use std::borrow::Cow;
use std::iter::once;
movable: bool) ->
(liveness::LiveVarSet<Local>,
HashMap<BasicBlock, liveness::LiveVarSet<Local>>) {
- let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
+ let dead_unwinds = IdxSet::new_empty(mir.basic_blocks().len());
let node_id = tcx.hir.as_local_node_id(source.def_id).unwrap();
// Calculate when MIR locals have live storage. This gives us an upper bound of their
// Find the MIR locals which do not use StorageLive/StorageDead statements.
// The storage of these locals are always live.
- let mut ignored = StorageIgnored(IdxSetBuf::new_filled(mir.local_decls.len()));
+ let mut ignored = StorageIgnored(IdxSet::new_filled(mir.local_decls.len()));
ignored.visit_mir(mir);
// Calculate the MIR locals which have been previously
// MIR types
let allowed_upvars = tcx.erase_regions(&upvars);
let allowed = match interior.sty {
- ty::TyGeneratorWitness(s) => tcx.erase_late_bound_regions(&s),
+ ty::GeneratorWitness(s) => tcx.erase_late_bound_regions(&s),
_ => bug!(),
};
// Get the interior types and substs which typeck computed
let (upvars, interior, movable) = match gen_ty.sty {
- ty::TyGenerator(_, substs, movability) => {
+ ty::Generator(_, substs, movability) => {
(substs.upvar_tys(def_id, tcx).collect(),
substs.witness(def_id, tcx),
movability == hir::GeneratorMovability::Movable)
let terminator = bb_data.terminator();
if let TerminatorKind::Call {
func: Operand::Constant(ref f), .. } = terminator.kind {
- if let ty::TyFnDef(callee_def_id, substs) = f.ty.sty {
+ if let ty::FnDef(callee_def_id, substs) = f.ty.sty {
if let Some(instance) = Instance::resolve(self.tcx,
param_env,
callee_def_id,
let terminator = bb_data.terminator();
if let TerminatorKind::Call {
func: Operand::Constant(ref f), .. } = terminator.kind {
- if let ty::TyFnDef(callee_def_id, substs) = f.ty.sty {
+ if let ty::FnDef(callee_def_id, substs) = f.ty.sty {
// Don't inline the same function multiple times.
if callsite.callee != callee_def_id {
callsites.push_back(CallSite {
}
TerminatorKind::Call {func: Operand::Constant(ref f), .. } => {
- if let ty::TyFnDef(def_id, _) = f.ty.sty {
+ if let ty::FnDef(def_id, _) = f.ty.sty {
// Don't give intrinsics the extra penalty for calls
let f = tcx.fn_sig(def_id);
if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
assert!(args.next().is_none());
let tuple = Place::Local(tuple);
- let tuple_tys = if let ty::TyTuple(s) = tuple.ty(caller_mir, tcx).to_ty(tcx).sty {
+ let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_mir, tcx).to_ty(tcx).sty {
s
} else {
bug!("Closure arguments are not passed as a tuple");
*unwind = Some(self.update_target(tgt));
} else if !self.in_cleanup_block {
// Unless this drop is in a cleanup block, add an unwind edge to
- // the orignal call's cleanup block
+ // the original call's cleanup block
*unwind = self.cleanup_block;
}
}
*cleanup = Some(self.update_target(tgt));
} else if !self.in_cleanup_block {
// Unless this call is in a cleanup block, add an unwind edge to
- // the orignal call's cleanup block
+ // the original call's cleanup block
*cleanup = self.cleanup_block;
}
}
*cleanup = Some(self.update_target(tgt));
} else if !self.in_cleanup_block {
// Unless this assert is in a cleanup block, add an unwind edge to
- // the orignal call's cleanup block
+ // the original call's cleanup block
*cleanup = self.cleanup_block;
}
}
use rustc::mir::{Constant, Location, Place, Mir, Operand, ProjectionElem, Rvalue, Local};
use rustc::mir::visit::{MutVisitor, Visitor};
-use rustc::ty::{TyCtxt, TypeVariants};
+use rustc::ty::{TyCtxt, TyKind};
use rustc::util::nodemap::{FxHashMap, FxHashSet};
use rustc_data_structures::indexed_vec::Idx;
use std::mem;
if let Rvalue::Len(ref place) = *rvalue {
let place_ty = place.ty(&self.mir.local_decls, self.tcx).to_ty(self.tcx);
- if let TypeVariants::TyArray(_, len) = place_ty.sty {
+ if let TyKind::Array(_, len) = place_ty.sty {
let span = self.mir.source_info(location).span;
let ty = self.tcx.types.usize;
let constant = Constant { span, ty, literal: len };
use rustc::hir::def_id::DefId;
use rustc::middle::lang_items::LangItem;
use rustc::mir::*;
-use rustc::ty::{Slice, Ty, TyCtxt, TypeVariants};
+use rustc::ty::{List, Ty, TyCtxt, TyKind};
use rustc_data_structures::indexed_vec::{Idx};
use transform::{MirPass, MirSource};
use syntax;
source_info,
kind: TerminatorKind::Call {
func: Operand::function_handle(tcx, call_did,
- Slice::empty(), source_info.span),
+ List::empty(), source_info.span),
args: vec![lhs, rhs],
destination: Some((place, bb)),
cleanup: None,
fn sign_of_128bit(ty: Ty) -> Option<bool> {
match ty.sty {
- TypeVariants::TyInt(syntax::ast::IntTy::I128) => Some(true),
- TypeVariants::TyUint(syntax::ast::UintTy::U128) => Some(false),
+ TyKind::Int(syntax::ast::IntTy::I128) => Some(true),
+ TyKind::Uint(syntax::ast::UintTy::U128) => Some(false),
_ => None,
}
}
let ref mut statement = blocks[loc.block].statements[loc.statement_index];
match statement.kind {
StatementKind::Assign(_, Rvalue::Ref(_, _, ref mut place)) => {
- // Find the underlying local for this (necessarilly interior) borrow.
+ // Find the underlying local for this (necessarily interior) borrow.
// HACK(eddyb) using a recursive function because of mutable borrows.
fn interior_base<'a, 'tcx>(place: &'a mut Place<'tcx>)
-> &'a mut Place<'tcx> {
//! diagnostics as to why a constant rvalue wasn't promoted.
use rustc_data_structures::bitvec::BitArray;
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use rustc_data_structures::fx::FxHashSet;
use rustc::hir;
}
/// Qualify a whole const, static initializer or const fn.
- fn qualify_const(&mut self) -> (Qualif, Lrc<IdxSetBuf<Local>>) {
+ fn qualify_const(&mut self) -> (Qualif, Lrc<IdxSet<Local>>) {
debug!("qualifying {} {:?}", self.mode, self.def_id);
let mir = self.mir;
// Collect all the temps we need to promote.
- let mut promoted_temps = IdxSetBuf::new_empty(self.temp_promotion_state.len());
+ let mut promoted_temps = IdxSet::new_empty(self.temp_promotion_state.len());
for candidate in &self.promotion_candidates {
match *candidate {
(self.qualif, Lrc::new(promoted_temps))
}
+
+ fn is_const_panic_fn(&self, def_id: DefId) -> bool {
+ Some(def_id) == self.tcx.lang_items().panic_fn() ||
+ Some(def_id) == self.tcx.lang_items().begin_panic_fn()
+ }
}
/// Accumulates an Rvalue or Call's effects in self.qualif.
this.add(Qualif::NOT_CONST);
} else {
let base_ty = proj.base.ty(this.mir, this.tcx).to_ty(this.tcx);
- if let ty::TyRawPtr(_) = base_ty.sty {
+ if let ty::RawPtr(_) = base_ty.sty {
if !this.tcx.sess.features_untracked().const_raw_ptr_deref {
emit_feature_err(
&this.tcx.sess.parse_sess, "const_raw_ptr_deref",
if let Place::Projection(ref proj) = *place {
if let ProjectionElem::Deref = proj.elem {
let base_ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx);
- if let ty::TyRef(..) = base_ty.sty {
+ if let ty::Ref(..) = base_ty.sty {
is_reborrow = true;
}
}
if self.mode == Mode::StaticMut {
// Inside a `static mut`, &mut [...] is also allowed.
match ty.sty {
- ty::TyArray(..) | ty::TySlice(_) => forbidden_mut = false,
+ ty::Array(..) | ty::Slice(_) => forbidden_mut = false,
_ => {}
}
- } else if let ty::TyArray(_, len) = ty.sty {
+ } else if let ty::Array(_, len) = ty.sty {
// FIXME(eddyb) the `self.mode == Mode::Fn` condition
// seems unnecessary, given that this is merely a ZST.
if len.unwrap_usize(self.tcx) == 0 && self.mode == Mode::Fn {
}
Rvalue::BinaryOp(op, ref lhs, _) => {
- if let ty::TyRawPtr(_) = lhs.ty(self.mir, self.tcx).sty {
+ if let ty::RawPtr(_) = lhs.ty(self.mir, self.tcx).sty {
assert!(op == BinOp::Eq || op == BinOp::Ne ||
op == BinOp::Le || op == BinOp::Lt ||
op == BinOp::Ge || op == BinOp::Gt ||
let fn_ty = func.ty(self.mir, self.tcx);
let mut callee_def_id = None;
let (mut is_shuffle, mut is_const_fn) = (false, None);
- if let ty::TyFnDef(def_id, _) = fn_ty.sty {
+ if let ty::FnDef(def_id, _) = fn_ty.sty {
callee_def_id = Some(def_id);
match self.tcx.fn_sig(def_id).abi() {
Abi::RustIntrinsic |
}
}
_ => {
- if self.tcx.is_const_fn(def_id) {
+ if self.tcx.is_const_fn(def_id) || self.is_const_panic_fn(def_id) {
is_const_fn = Some(def_id);
}
}
// Const fn calls.
if let Some(def_id) = is_const_fn {
+ // check the const_panic feature gate or
// find corresponding rustc_const_unstable feature
- if let Some(&attr::Stability {
+ // FIXME: cannot allow this inside `allow_internal_unstable` because that would make
+ // `panic!` insta stable in constants, since the macro is marked with the attr
+ if self.is_const_panic_fn(def_id) {
+ if self.mode == Mode::Fn {
+ // never promote panics
+ self.qualif = Qualif::NOT_CONST;
+ } else if !self.tcx.sess.features_untracked().const_panic {
+ // don't allow panics in constants without the feature gate
+ emit_feature_err(
+ &self.tcx.sess.parse_sess,
+ "const_panic",
+ self.span,
+ GateIssue::Language,
+ &format!("panicking in {}s is unstable", self.mode),
+ );
+ }
+ } else if let Some(&attr::Stability {
rustc_const_unstable: Some(attr::RustcConstUnstable {
feature: ref feature_name
}),
fn mir_const_qualif<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
- -> (u8, Lrc<IdxSetBuf<Local>>) {
+ -> (u8, Lrc<IdxSet<Local>>) {
// NB: This `borrow()` is guaranteed to be valid (i.e., the value
// cannot yet be stolen), because `mir_validated()`, which steals
// from `mir_const(), forces this query to execute before
if mir.return_ty().references_error() {
tcx.sess.delay_span_bug(mir.span, "mir_const_qualif: Mir had errors");
- return (Qualif::NOT_CONST.bits(), Lrc::new(IdxSetBuf::new_empty(0)));
+ return (Qualif::NOT_CONST.bits(), Lrc::new(IdxSet::new_empty(0)));
}
let mut qualifier = Qualifier::new(tcx, def_id, mir, Mode::Const);
use rustc::ty::{self, TyCtxt};
use rustc::mir::{self, Mir, Location};
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::Idx;
use transform::{MirPass, MirSource};
let param_env = tcx.param_env(def_id);
let move_data = MoveData::gather_moves(mir, tcx).unwrap();
let mdpe = MoveDataParamEnv { move_data: move_data, param_env: param_env };
- let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
+ let dead_unwinds = IdxSet::new_empty(mir.basic_blocks().len());
let flow_inits =
do_dataflow(tcx, mir, id, &attributes, &dead_unwinds,
MaybeInitializedPlaces::new(tcx, mir, &mdpe),
if let Some(mir::Terminator { ref kind, source_info, .. }) = *terminator {
if let mir::TerminatorKind::Call { func: ref oper, ref args, .. } = *kind {
if let mir::Operand::Constant(ref func) = *oper {
- if let ty::TyFnDef(def_id, _) = func.ty.sty {
+ if let ty::FnDef(def_id, _) = func.ty.sty {
let abi = tcx.fn_sig(def_id).abi();
let name = tcx.item_name(def_id);
if abi == Abi::RustIntrinsic && name == "rustc_peek" {
// no need to transformation
} else {
let place_ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx);
- if let ty::TyArray(item_ty, const_size) = place_ty.sty {
+ if let ty::Array(item_ty, const_size) = place_ty.sty {
if let Some(size) = const_size.assert_usize(self.tcx) {
assert!(size <= u32::max_value() as u64,
"uniform array move out doesn't supported
let local_use = &visitor.locals_use[*local];
let opt_index_and_place = Self::try_get_item_source(local_use, mir);
// each local should be used twice:
- // in assign and in aggregate statments
+ // in assign and in aggregate statements
if local_use.use_count == 2 && opt_index_and_place.is_some() {
let (index, src_place) = opt_index_and_place.unwrap();
return Some((local_use, index, src_place));
let opt_src_place = items.first().and_then(|x| *x).map(|x| x.2);
let opt_size = opt_src_place.and_then(|src_place| {
let src_ty = src_place.ty(mir, tcx).to_ty(tcx);
- if let ty::TyArray(_, ref size_o) = src_ty.sty {
+ if let ty::Array(_, ref size_o) = src_ty.sty {
size_o.assert_usize(tcx)
} else {
None
if opt_size.is_some() && items.iter().all(
|l| l.is_some() && l.unwrap().2 == opt_src_place.unwrap()) {
- let indicies: Vec<_> = items.iter().map(|x| x.unwrap().1).collect();
- for i in 1..indicies.len() {
- if indicies[i - 1] + 1 != indicies[i] {
+ let indices: Vec<_> = items.iter().map(|x| x.unwrap().1).collect();
+ for i in 1..indices.len() {
+ if indices[i - 1] + 1 != indices[i] {
return;
}
}
- let min = *indicies.first().unwrap();
- let max = *indicies.last().unwrap();
+ let min = *indices.first().unwrap();
+ let max = *indices.last().unwrap();
for item in items {
let locals_use = item.unwrap().0;
ProjectionElem::Field(..) => {
let ty = base.ty(local_decls, tcx).to_ty(tcx);
match ty.sty {
- ty::TyAdt(def, _) if def.repr.packed() => {
+ ty::Adt(def, _) if def.repr.packed() => {
return true
}
_ => {}
o: Origin,
) -> DiagnosticBuilder<'cx> {
let type_name = match (&ty.sty, is_index) {
- (&ty::TyArray(_, _), Some(true)) | (&ty::TyArray(_, _), None) => "array",
- (&ty::TySlice(_), _) => "slice",
+ (&ty::Array(_, _), Some(true)) | (&ty::Array(_, _), None) => "array",
+ (&ty::Slice(_), _) => "slice",
_ => span_bug!(move_from_span, "this path should not cause illegal move"),
};
let mut err = struct_span_err!(
) -> DiagnosticBuilder<'cx> {
let moved_path = moved_path
.map(|mp| format!(": `{}`", mp))
- .unwrap_or("".to_owned());
+ .unwrap_or(String::new());
let err = struct_span_err!(
self,
/// if can_go then succ else drop-block
/// drop-block:
/// if ptr_based {
- /// ptr = cur
+ /// ptr = &mut *cur
/// cur = cur.offset(1)
/// } else {
/// ptr = &mut P[cur]
let one = self.constant_usize(1);
let (ptr_next, cur_next) = if ptr_based {
- (Rvalue::Use(copy(&Place::Local(cur))),
+ (Rvalue::Ref(
+ tcx.types.re_erased,
+ BorrowKind::Mut { allow_two_phase_borrow: false },
+ Place::Projection(Box::new(Projection {
+ base: Place::Local(cur),
+ elem: ProjectionElem::Deref,
+ }))
+ ),
Rvalue::BinaryOp(BinOp::Offset, copy(&Place::Local(cur)), one))
} else {
(Rvalue::Ref(
if ptr_based {
let tmp_ty = tcx.mk_mut_ptr(self.place_ty(self.place));
let tmp = Place::Local(self.new_temp(tmp_ty));
- // tmp = &P;
+ // tmp = &mut P;
// cur = tmp as *mut T;
// end = Offset(cur, len);
drop_block_stmts.push(self.assign(&tmp, Rvalue::Ref(
fn open_drop<'a>(&mut self) -> BasicBlock {
let ty = self.place_ty(self.place);
match ty.sty {
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx()).collect();
self.open_drop_for_tuple(&tys)
}
// This should only happen for the self argument on the resume function.
// It effetively only contains upvars until the generator transformation runs.
// See librustc_mir/transform/generator.rs for more details.
- ty::TyGenerator(def_id, substs, _) => {
+ ty::Generator(def_id, substs, _) => {
let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx()).collect();
self.open_drop_for_tuple(&tys)
}
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
self.open_drop_for_tuple(tys)
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
if def.is_box() {
self.open_drop_for_box(def, substs)
} else {
self.open_drop_for_adt(def, substs)
}
}
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
let unwind = self.unwind; // FIXME(#43234)
let succ = self.succ;
self.complete_drop(Some(DropFlagMode::Deep), succ, unwind)
}
- ty::TyArray(ety, size) => {
+ ty::Array(ety, size) => {
let size = size.assert_usize(self.tcx());
self.open_drop_for_array(ety, size)
},
- ty::TySlice(ety) => self.open_drop_for_array(ety, None),
+ ty::Slice(ety) => self.open_drop_for_array(ety, None),
_ => bug!("open drop from non-ADT `{:?}`", ty)
}
use rustc::mir::Local;
use rustc::mir::*;
use rustc::ty::{item_path, TyCtxt};
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use rustc_data_structures::work_queue::WorkQueue;
use std::fs;
use transform::MirSource;
use util::pretty::{dump_enabled, write_basic_block, write_mir_intro};
-pub type LiveVarSet<V> = IdxSetBuf<V>;
+pub type LiveVarSet<V> = IdxSet<V>;
/// This gives the result of the liveness analysis at the boundary of
/// basic blocks. You can use `simulate_block` to obtain the
) where
F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
{
- let _: io::Result<()> = do catch {
+ let _: io::Result<()> = try_block! {
let mut file = create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, source)?;
writeln!(file, "// MIR for `{}`", node_path)?;
writeln!(file, "// source = {:?}", source)?;
};
if tcx.sess.opts.debugging_opts.dump_mir_graphviz {
- let _: io::Result<()> = do catch {
+ let _: io::Result<()> = try_block! {
let mut file =
create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, source)?;
write_mir_fn_graphviz(tcx, source.def_id, mir, &mut file)?;
let indent = depth * INDENT.len();
let children = match scope_tree.get(&parent) {
- Some(childs) => childs,
+ Some(children) => children,
None => return Ok(()),
};
use syntax::visit::{self, Visitor};
use syntax_pos::Span;
use errors;
+use errors::Applicability;
struct AstValidator<'a> {
session: &'a Session,
);
match val.node {
ExprKind::Lit(ref v) if v.node.is_numeric() => {
- err.span_suggestion(
+ err.span_suggestion_with_applicability(
place.span.between(val.span),
"if you meant to write a comparison against a negative value, add a \
space in between `<` and `-`",
"< -".to_string(),
+ Applicability::MaybeIncorrect
);
}
_ => {}
fn visit_generic_args(&mut self, _: Span, generic_args: &'a GenericArgs) {
match *generic_args {
GenericArgs::AngleBracketed(ref data) => {
- data.args.iter().for_each(|arg| match arg {
- GenericArg::Type(ty) => self.visit_ty(ty),
- _ => {}
- });
+ for arg in &data.args {
+ self.visit_generic_arg(arg)
+ }
for type_binding in &data.bindings {
// Type bindings such as `Item=impl Debug` in `Iterator<Item=Debug>`
// are allowed to contain nested `impl Trait`.
e: &'tcx hir::Expr, node_ty: Ty<'tcx>) -> Promotability {
let ty_result = match node_ty.sty {
- ty::TyAdt(def, _) if def.has_dtor(v.tcx) => {
+ ty::Adt(def, _) if def.has_dtor(v.tcx) => {
NotPromotable
}
_ => Promotable
return NotPromotable;
}
match v.tables.node_id_to_type(lhs.hir_id).sty {
- ty::TyRawPtr(_) => {
+ ty::RawPtr(_) => {
assert!(op.node == hir::BinOpKind::Eq || op.node == hir::BinOpKind::Ne ||
op.node == hir::BinOpKind::Le || op.node == hir::BinOpKind::Lt ||
op.node == hir::BinOpKind::Ge || op.node == hir::BinOpKind::Gt);
Some(ref expr) => { struct_result = struct_result & v.check_expr(&expr); },
None => {},
}
- if let ty::TyAdt(adt, ..) = v.tables.expr_ty(e).sty {
+ if let ty::Adt(adt, ..) = v.tables.expr_ty(e).sty {
// unsafe_cell_type doesn't necessarily exist with no_core
if Some(adt.did) == v.tcx.lang_items().unsafe_cell_type() {
return NotPromotable;
extern crate syntax_pos;
extern crate rustc_data_structures;
-use rustc::hir::{self, GenericParamKind, PatKind};
+use rustc::hir::{self, PatKind};
use rustc::hir::def::Def;
use rustc::hir::def_id::{CRATE_DEF_INDEX, LOCAL_CRATE, CrateNum, DefId};
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
impl<'a, 'tcx> EmbargoVisitor<'a, 'tcx> {
fn item_ty_level(&self, item_def_id: DefId) -> Option<AccessLevel> {
let ty_def_id = match self.tcx.type_of(item_def_id).sty {
- ty::TyAdt(adt, _) => adt.did,
- ty::TyForeign(did) => did,
- ty::TyDynamic(ref obj, ..) if obj.principal().is_some() =>
+ ty::Adt(adt, _) => adt.did,
+ ty::Foreign(did) => did,
+ ty::Dynamic(ref obj, ..) if obj.principal().is_some() =>
obj.principal().unwrap().def_id(),
- ty::TyProjection(ref proj) => proj.trait_ref(self.tcx).def_id,
+ ty::Projection(ref proj) => proj.trait_ref(self.tcx).def_id,
_ => return Some(AccessLevel::Public)
};
if let Some(node_id) = self.tcx.hir.as_local_node_id(ty_def_id) {
fn ty(&mut self) -> &mut Self {
let ty = self.ev.tcx.type_of(self.item_def_id);
ty.visit_with(self);
- if let ty::TyFnDef(def_id, _) = ty.sty {
+ if let ty::FnDef(def_id, _) = ty.sty {
if def_id == self.item_def_id {
self.ev.tcx.fn_sig(def_id).visit_with(self);
}
impl<'b, 'a, 'tcx> TypeVisitor<'tcx> for ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> {
fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
let ty_def_id = match ty.sty {
- ty::TyAdt(adt, _) => Some(adt.did),
- ty::TyForeign(did) => Some(did),
- ty::TyDynamic(ref obj, ..) => obj.principal().map(|p| p.def_id()),
- ty::TyProjection(ref proj) => Some(proj.item_def_id),
- ty::TyFnDef(def_id, ..) |
- ty::TyClosure(def_id, ..) |
- ty::TyGenerator(def_id, ..) |
- ty::TyAnon(def_id, _) => Some(def_id),
+ ty::Adt(adt, _) => Some(adt.did),
+ ty::Foreign(did) => Some(did),
+ ty::Dynamic(ref obj, ..) => obj.principal().map(|p| p.def_id()),
+ ty::Projection(ref proj) => Some(proj.item_def_id),
+ ty::FnDef(def_id, ..) |
+ ty::Closure(def_id, ..) |
+ ty::Generator(def_id, ..) |
+ ty::Anon(def_id, _) => Some(def_id),
_ => None
};
impl<'a, 'tcx> TypeVisitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> {
fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
match ty.sty {
- ty::TyAdt(&ty::AdtDef { did: def_id, .. }, ..) |
- ty::TyFnDef(def_id, ..) |
- ty::TyForeign(def_id) => {
+ ty::Adt(&ty::AdtDef { did: def_id, .. }, ..) |
+ ty::FnDef(def_id, ..) |
+ ty::Foreign(def_id) => {
if !self.item_is_accessible(def_id) {
let msg = format!("type `{}` is private", ty);
self.tcx.sess.span_err(self.span, &msg);
return true;
}
- if let ty::TyFnDef(..) = ty.sty {
+ if let ty::FnDef(..) = ty.sty {
if self.tcx.fn_sig(def_id).visit_with(self) {
return true;
}
}
}
}
- ty::TyDynamic(ref predicates, ..) => {
+ ty::Dynamic(ref predicates, ..) => {
let is_private = predicates.skip_binder().iter().any(|predicate| {
let def_id = match *predicate {
ty::ExistentialPredicate::Trait(trait_ref) => trait_ref.def_id,
return true;
}
}
- ty::TyProjection(ref proj) => {
+ ty::Projection(ref proj) => {
let tcx = self.tcx;
if self.check_trait_ref(proj.trait_ref(tcx)) {
return true;
}
}
- ty::TyAnon(def_id, ..) => {
+ ty::Anon(def_id, ..) => {
for predicate in &self.tcx.predicates_of(def_id).predicates {
let trait_ref = match *predicate {
ty::Predicate::Trait(ref poly_trait_predicate) => {
return true;
}
for subst in trait_ref.substs.iter() {
- // Skip repeated `TyAnon`s to avoid infinite recursion.
+ // Skip repeated `Anon`s to avoid infinite recursion.
if let UnpackedKind::Type(ty) = subst.unpack() {
- if let ty::TyAnon(def_id, ..) = ty.sty {
+ if let ty::Anon(def_id, ..) = ty.sty {
if !self.visited_anon_tys.insert(def_id) {
continue;
}
}
fn visit_generics(&mut self, generics: &'tcx hir::Generics) {
- generics.params.iter().for_each(|param| match param.kind {
- GenericParamKind::Lifetime { .. } => {}
- GenericParamKind::Type { .. } => {
- for bound in ¶m.bounds {
- self.check_generic_bound(bound);
- }
+ for param in &generics.params {
+ for bound in ¶m.bounds {
+ self.check_generic_bound(bound);
}
- });
+ }
for predicate in &generics.where_clause.predicates {
match predicate {
&hir::WherePredicate::BoundPredicate(ref bound_pred) => {
fn ty(&mut self) -> &mut Self {
let ty = self.tcx.type_of(self.item_def_id);
ty.visit_with(self);
- if let ty::TyFnDef(def_id, _) = ty.sty {
+ if let ty::FnDef(def_id, _) = ty.sty {
if def_id == self.item_def_id {
self.tcx.fn_sig(def_id).visit_with(self);
}
impl<'a, 'tcx: 'a> TypeVisitor<'tcx> for SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> {
fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
let ty_def_id = match ty.sty {
- ty::TyAdt(adt, _) => Some(adt.did),
- ty::TyForeign(did) => Some(did),
- ty::TyDynamic(ref obj, ..) => obj.principal().map(|p| p.def_id()),
- ty::TyProjection(ref proj) => {
+ ty::Adt(adt, _) => Some(adt.did),
+ ty::Foreign(did) => Some(did),
+ ty::Dynamic(ref obj, ..) => obj.principal().map(|p| p.def_id()),
+ ty::Projection(ref proj) => {
if self.required_visibility == ty::Visibility::Invisible {
// Conservatively approximate the whole type alias as public without
// recursing into its components when determining impl publicity.
(Def::Static(self.definitions.local_def_id(item.id), m), ValueNS)
}
ForeignItemKind::Ty => {
- (Def::TyForeign(self.definitions.local_def_id(item.id)), TypeNS)
+ (Def::ForeignTy(self.definitions.local_def_id(item.id)), TypeNS)
}
ForeignItemKind::Macro(_) => unreachable!(),
};
span);
self.define(parent, ident, TypeNS, (module, vis, DUMMY_SP, expansion));
}
- Def::Variant(..) | Def::TyAlias(..) | Def::TyForeign(..) => {
+ Def::Variant(..) | Def::TyAlias(..) | Def::ForeignTy(..) => {
self.define(parent, ident, TypeNS, (def, vis, DUMMY_SP, expansion));
}
Def::Fn(..) | Def::Static(..) | Def::Const(..) | Def::VariantCtor(..) => {
use self::RibKind::*;
use rustc::hir::map::{Definitions, DefCollector};
-use rustc::hir::{self, PrimTy, TyBool, TyChar, TyFloat, TyInt, TyUint, TyStr};
+use rustc::hir::{self, PrimTy, Bool, Char, Float, Int, Uint, Str};
use rustc::middle::cstore::CrateStore;
use rustc::session::Session;
use rustc::lint;
use rustc_data_structures::sync::Lrc;
use resolve_imports::{ImportDirective, ImportDirectiveSubclass, NameResolution, ImportResolver};
-use macros::{InvocationData, LegacyBinding, MacroBinding};
+use macros::{InvocationData, LegacyBinding};
// NB: This module needs to be declared first so diagnostics are
// registered before they are used.
if let Some(impl_span) = maybe_impl_defid.map_or(None,
|def_id| resolver.definitions.opt_span(def_id)) {
err.span_label(reduce_impl_span_to_impl_keyword(cm, impl_span),
- "`Self` type implicitely declared here, on the `impl`");
+ "`Self` type implicitly declared here, on the `impl`");
}
},
Def::TyParam(typaram_defid) => {
Def::Trait(..) | Def::TyAlias(..) | Def::AssociatedTy(..) |
Def::PrimTy(..) | Def::TyParam(..) | Def::SelfTy(..) |
Def::Existential(..) |
- Def::TyForeign(..) => true,
+ Def::ForeignTy(..) => true,
_ => false,
},
PathSource::Trait(AliasPossibility::No) => match def {
.filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => None,
GenericParamKind::Type { ref default, .. } => {
- if found_default || default.is_some() {
- found_default = true;
- return Some((Ident::with_empty_ctxt(param.ident.name), Def::Err));
+ found_default |= default.is_some();
+ if found_default {
+ Some((Ident::with_empty_ctxt(param.ident.name), Def::Err))
+ } else {
+ None
}
- None
}
}));
fn new() -> PrimitiveTypeTable {
let mut table = PrimitiveTypeTable { primitive_types: FxHashMap() };
- table.intern("bool", TyBool);
- table.intern("char", TyChar);
- table.intern("f32", TyFloat(FloatTy::F32));
- table.intern("f64", TyFloat(FloatTy::F64));
- table.intern("isize", TyInt(IntTy::Isize));
- table.intern("i8", TyInt(IntTy::I8));
- table.intern("i16", TyInt(IntTy::I16));
- table.intern("i32", TyInt(IntTy::I32));
- table.intern("i64", TyInt(IntTy::I64));
- table.intern("i128", TyInt(IntTy::I128));
- table.intern("str", TyStr);
- table.intern("usize", TyUint(UintTy::Usize));
- table.intern("u8", TyUint(UintTy::U8));
- table.intern("u16", TyUint(UintTy::U16));
- table.intern("u32", TyUint(UintTy::U32));
- table.intern("u64", TyUint(UintTy::U64));
- table.intern("u128", TyUint(UintTy::U128));
+ table.intern("bool", Bool);
+ table.intern("char", Char);
+ table.intern("f32", Float(FloatTy::F32));
+ table.intern("f64", Float(FloatTy::F64));
+ table.intern("isize", Int(IntTy::Isize));
+ table.intern("i8", Int(IntTy::I8));
+ table.intern("i16", Int(IntTy::I16));
+ table.intern("i32", Int(IntTy::I32));
+ table.intern("i64", Int(IntTy::I64));
+ table.intern("i128", Int(IntTy::I128));
+ table.intern("str", Str);
+ table.intern("usize", Uint(UintTy::Usize));
+ table.intern("u8", Uint(UintTy::U8));
+ table.intern("u16", Uint(UintTy::U16));
+ table.intern("u32", Uint(UintTy::U32));
+ table.intern("u64", Uint(UintTy::U64));
+ table.intern("u128", Uint(UintTy::U128));
table
}
proc_mac_errors: Vec<macros::ProcMacError>,
/// crate-local macro expanded `macro_export` referred to by a module-relative path
macro_expanded_macro_export_errors: BTreeSet<(Span, Span)>,
-
+ /// macro-expanded `macro_rules` shadowing existing macros
disallowed_shadowing: Vec<&'a LegacyBinding<'a>>,
arenas: &'a ResolverArenas<'a>,
}
ident.span = ident.span.modern();
+ let mut poisoned = None;
loop {
- let (opt_module, poisoned) = if let Some(node_id) = record_used_id {
+ let opt_module = if let Some(node_id) = record_used_id {
self.hygienic_lexical_parent_with_compatibility_fallback(module, &mut ident.span,
- node_id)
+ node_id, &mut poisoned)
} else {
- (self.hygienic_lexical_parent(module, &mut ident.span), None)
+ self.hygienic_lexical_parent(module, &mut ident.span)
};
module = unwrap_or!(opt_module, break);
let orig_current_module = self.current_module;
}
return Some(LexicalScopeBinding::Item(binding))
}
- _ if poisoned.is_some() => break,
Err(Determined) => continue,
Err(Undetermined) =>
span_bug!(ident.span, "undetermined resolution during main resolution pass"),
None
}
- fn hygienic_lexical_parent_with_compatibility_fallback(
- &mut self, module: Module<'a>, span: &mut Span, node_id: NodeId
- ) -> (Option<Module<'a>>, /* poisoned */ Option<NodeId>)
- {
+ fn hygienic_lexical_parent_with_compatibility_fallback(&mut self, module: Module<'a>,
+ span: &mut Span, node_id: NodeId,
+ poisoned: &mut Option<NodeId>)
+ -> Option<Module<'a>> {
if let module @ Some(..) = self.hygienic_lexical_parent(module, span) {
- return (module, None);
+ return module;
}
// We need to support the next case under a deprecation warning
// The macro is a proc macro derive
if module.expansion.looks_like_proc_macro_derive() {
if parent.expansion.is_descendant_of(span.ctxt().outer()) {
- return (module.parent, Some(node_id));
+ *poisoned = Some(node_id);
+ return module.parent;
}
}
}
}
- (None, None)
+ None
}
fn resolve_ident_in_module(&mut self,
HasTypeParameters(generics, rib_kind) => {
let mut function_type_rib = Rib::new(rib_kind);
let mut seen_bindings = FxHashMap();
- generics.params.iter().for_each(|param| match param.kind {
- GenericParamKind::Lifetime { .. } => {}
- GenericParamKind::Type { .. } => {
- let ident = param.ident.modern();
- debug!("with_type_parameter_rib: {}", param.id);
-
- if seen_bindings.contains_key(&ident) {
- let span = seen_bindings.get(&ident).unwrap();
- let err = ResolutionError::NameAlreadyUsedInTypeParameterList(
- ident.name,
- span,
- );
- resolve_error(self, param.ident.span, err);
- }
- seen_bindings.entry(ident).or_insert(param.ident.span);
+ for param in &generics.params {
+ match param.kind {
+ GenericParamKind::Lifetime { .. } => {}
+ GenericParamKind::Type { .. } => {
+ let ident = param.ident.modern();
+ debug!("with_type_parameter_rib: {}", param.id);
+
+ if seen_bindings.contains_key(&ident) {
+ let span = seen_bindings.get(&ident).unwrap();
+ let err = ResolutionError::NameAlreadyUsedInTypeParameterList(
+ ident.name,
+ span,
+ );
+ resolve_error(self, param.ident.span, err);
+ }
+ seen_bindings.entry(ident).or_insert(param.ident.span);
- // Plain insert (no renaming).
- let def = Def::TyParam(self.definitions.local_def_id(param.id));
- function_type_rib.bindings.insert(ident, def);
- self.record_def(param.id, PathResolution::new(def));
+ // Plain insert (no renaming).
+ let def = Def::TyParam(self.definitions.local_def_id(param.id));
+ function_type_rib.bindings.insert(ident, def);
+ self.record_def(param.id, PathResolution::new(def));
+ }
}
- });
+ }
self.ribs[TypeNS].push(function_type_rib);
}
} else if opt_ns == Some(MacroNS) {
assert!(ns == TypeNS);
self.resolve_lexical_macro_path_segment(ident, ns, record_used, record_used,
- false, path_span).map(MacroBinding::binding)
+ false, path_span).map(|(b, _)| b)
} else {
let record_used_id =
if record_used { crate_lint.node_id().or(Some(CRATE_NODE_ID)) } else { None };
}
}
// Add primitive types to the mix
- if filter_fn(Def::PrimTy(TyBool)) {
+ if filter_fn(Def::PrimTy(Bool)) {
names.extend(
self.primitive_type_table.primitive_types.iter().map(|(name, _)| name)
)
vis.is_accessible_from(module.normal_ancestor_id, self)
}
+ fn report_ambiguity_error(
+ &self, name: Name, span: Span, _lexical: bool,
+ def1: Def, is_import1: bool, is_glob1: bool, from_expansion1: bool, span1: Span,
+ def2: Def, is_import2: bool, _is_glob2: bool, _from_expansion2: bool, span2: Span,
+ ) {
+ let participle = |is_import: bool| if is_import { "imported" } else { "defined" };
+ let msg1 = format!("`{}` could refer to the name {} here", name, participle(is_import1));
+ let msg2 =
+ format!("`{}` could also refer to the name {} here", name, participle(is_import2));
+ let note = if from_expansion1 {
+ Some(if let Def::Macro(..) = def1 {
+ format!("macro-expanded {} do not shadow",
+ if is_import1 { "macro imports" } else { "macros" })
+ } else {
+ format!("macro-expanded {} do not shadow when used in a macro invocation path",
+ if is_import1 { "imports" } else { "items" })
+ })
+ } else if is_glob1 {
+ Some(format!("consider adding an explicit import of `{}` to disambiguate", name))
+ } else {
+ None
+ };
+
+ let mut err = struct_span_err!(self.session, span, E0659, "`{}` is ambiguous", name);
+ err.span_note(span1, &msg1);
+ match def2 {
+ Def::Macro(..) if span2.is_dummy() =>
+ err.note(&format!("`{}` is also a builtin macro", name)),
+ _ => err.span_note(span2, &msg2),
+ };
+ if let Some(note) = note {
+ err.note(¬e);
+ }
+ err.emit();
+ }
+
fn report_errors(&mut self, krate: &Crate) {
self.report_shadowing_errors();
self.report_with_use_injections(krate);
}
for &AmbiguityError { span, name, b1, b2, lexical } in &self.ambiguity_errors {
- if !reported_spans.insert(span) { continue }
- let participle = |binding: &NameBinding| {
- if binding.is_import() { "imported" } else { "defined" }
- };
- let msg1 = format!("`{}` could refer to the name {} here", name, participle(b1));
- let msg2 = format!("`{}` could also refer to the name {} here", name, participle(b2));
- let note = if b1.expansion == Mark::root() || !lexical && b1.is_glob_import() {
- format!("consider adding an explicit import of `{}` to disambiguate", name)
- } else if let Def::Macro(..) = b1.def() {
- format!("macro-expanded {} do not shadow",
- if b1.is_import() { "macro imports" } else { "macros" })
- } else {
- format!("macro-expanded {} do not shadow when used in a macro invocation path",
- if b1.is_import() { "imports" } else { "items" })
- };
-
- let mut err = struct_span_err!(self.session, span, E0659, "`{}` is ambiguous", name);
- err.span_note(b1.span, &msg1);
- match b2.def() {
- Def::Macro(..) if b2.span.is_dummy() =>
- err.note(&format!("`{}` is also a builtin macro", name)),
- _ => err.span_note(b2.span, &msg2),
- };
- err.note(¬e).emit();
+ if reported_spans.insert(span) {
+ self.report_ambiguity_error(
+ name, span, lexical,
+ b1.def(), b1.is_import(), b1.is_glob_import(),
+ b1.expansion != Mark::root(), b1.span,
+ b2.def(), b2.is_import(), b2.is_glob_import(),
+ b2.expansion != Mark::root(), b2.span,
+ );
+ }
}
for &PrivacyError(span, name, binding) in &self.privacy_errors {
use syntax::tokenstream::{TokenStream, TokenTree, Delimited};
use syntax::util::lev_distance::find_best_match_for_name;
use syntax_pos::{Span, DUMMY_SP};
+use errors::Applicability;
use std::cell::Cell;
use std::mem;
use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::small_vec::ExpectOne;
+
+crate struct FromPrelude(bool);
+crate struct FromExpansion(bool);
#[derive(Clone)]
pub struct InvocationData<'a> {
pub span: Span,
}
+impl<'a> LegacyBinding<'a> {
+ fn def(&self) -> Def {
+ Def::Macro(self.def_id, MacroKind::Bang)
+ }
+}
+
pub struct ProcMacError {
crate_name: Symbol,
name: Symbol,
warn_msg: &'static str,
}
-#[derive(Copy, Clone)]
-pub enum MacroBinding<'a> {
- Legacy(&'a LegacyBinding<'a>),
- Global(&'a NameBinding<'a>),
- Modern(&'a NameBinding<'a>),
-}
-
-impl<'a> MacroBinding<'a> {
- pub fn span(self) -> Span {
- match self {
- MacroBinding::Legacy(binding) => binding.span,
- MacroBinding::Global(binding) | MacroBinding::Modern(binding) => binding.span,
- }
- }
-
- pub fn binding(self) -> &'a NameBinding<'a> {
- match self {
- MacroBinding::Global(binding) | MacroBinding::Modern(binding) => binding,
- MacroBinding::Legacy(_) => panic!("unexpected MacroBinding::Legacy"),
- }
- }
-
- pub fn def_ignoring_ambiguity(self) -> Def {
- match self {
- MacroBinding::Legacy(binding) => Def::Macro(binding.def_id, MacroKind::Bang),
- MacroBinding::Global(binding) | MacroBinding::Modern(binding) =>
- binding.def_ignoring_ambiguity(),
- }
- }
-}
-
impl<'a, 'crateloader: 'a> base::Resolver for Resolver<'a, 'crateloader> {
fn next_node_id(&mut self) -> ast::NodeId {
self.session.next_node_id()
None
}
- fn resolve_invoc(&mut self, invoc: &Invocation, scope: Mark, force: bool)
- -> Result<Option<Lrc<SyntaxExtension>>, Determinacy> {
- let def = match invoc.kind {
- InvocationKind::Attr { attr: None, .. } => return Ok(None),
- _ => self.resolve_invoc_to_def(invoc, scope, force)?,
+ fn resolve_macro_invocation(&mut self, invoc: &Invocation, scope: Mark, force: bool)
+ -> Result<Option<Lrc<SyntaxExtension>>, Determinacy> {
+ let (path, kind, derives_in_scope) = match invoc.kind {
+ InvocationKind::Attr { attr: None, .. } =>
+ return Ok(None),
+ InvocationKind::Attr { attr: Some(ref attr), ref traits, .. } =>
+ (&attr.path, MacroKind::Attr, &traits[..]),
+ InvocationKind::Bang { ref mac, .. } =>
+ (&mac.node.path, MacroKind::Bang, &[][..]),
+ InvocationKind::Derive { ref path, .. } =>
+ (path, MacroKind::Derive, &[][..]),
};
- if let Def::Macro(_, MacroKind::ProcMacroStub) = def {
- self.report_proc_macro_stub(invoc.span());
- return Err(Determinacy::Determined);
- } else if let Def::NonMacroAttr(attr_kind) = def {
- // Note that not only attributes, but anything in macro namespace can result in a
- // `Def::NonMacroAttr` definition (e.g. `inline!()`), so we must report the error
- // below for these cases.
- let is_attr_invoc =
- if let InvocationKind::Attr { .. } = invoc.kind { true } else { false };
- let path = invoc.path().expect("no path for non-macro attr");
- match attr_kind {
- NonMacroAttrKind::Tool | NonMacroAttrKind::DeriveHelper |
- NonMacroAttrKind::Custom if is_attr_invoc => {
- let features = self.session.features_untracked();
- if attr_kind == NonMacroAttrKind::Tool &&
- !features.tool_attributes {
- feature_err(&self.session.parse_sess, "tool_attributes",
- invoc.span(), GateIssue::Language,
- "tool attributes are unstable").emit();
- }
- if attr_kind == NonMacroAttrKind::Custom {
- assert!(path.segments.len() == 1);
- let name = path.segments[0].ident.name.as_str();
- if name.starts_with("rustc_") {
- if !features.rustc_attrs {
- let msg = "unless otherwise specified, attributes with the prefix \
- `rustc_` are reserved for internal compiler diagnostics";
- feature_err(&self.session.parse_sess, "rustc_attrs", invoc.span(),
- GateIssue::Language, &msg).emit();
- }
- } else if name.starts_with("derive_") {
- if !features.custom_derive {
- feature_err(&self.session.parse_sess, "custom_derive", invoc.span(),
- GateIssue::Language, EXPLAIN_DERIVE_UNDERSCORE).emit();
- }
- } else if !features.custom_attribute {
- let msg = format!("The attribute `{}` is currently unknown to the \
- compiler and may have meaning added to it in the \
- future", path);
- feature_err(&self.session.parse_sess, "custom_attribute", invoc.span(),
- GateIssue::Language, &msg).emit();
- }
- }
- return Ok(Some(Lrc::new(SyntaxExtension::NonMacroAttr {
- mark_used: attr_kind == NonMacroAttrKind::Tool,
- })));
- }
- _ => {
- self.report_non_macro_attr(path.span, def);
- return Err(Determinacy::Determined);
- }
- }
+ let (def, ext) = self.resolve_macro_to_def(path, kind, scope, derives_in_scope, force)?;
+
+ if let Def::Macro(def_id, _) = def {
+ self.macro_defs.insert(invoc.expansion_data.mark, def_id);
+ let normal_module_def_id =
+ self.macro_def_scope(invoc.expansion_data.mark).normal_ancestor_id;
+ self.definitions.add_parent_module_of_macro_def(invoc.expansion_data.mark,
+ normal_module_def_id);
+ invoc.expansion_data.mark.set_default_transparency(ext.default_transparency());
+ invoc.expansion_data.mark.set_is_builtin(def_id.krate == BUILTIN_MACROS_CRATE);
}
- let def_id = def.def_id();
-
- self.macro_defs.insert(invoc.expansion_data.mark, def_id);
- let normal_module_def_id =
- self.macro_def_scope(invoc.expansion_data.mark).normal_ancestor_id;
- self.definitions.add_parent_module_of_macro_def(invoc.expansion_data.mark,
- normal_module_def_id);
-
- self.unused_macros.remove(&def_id);
- let ext = self.get_macro(def);
- invoc.expansion_data.mark.set_default_transparency(ext.default_transparency());
- invoc.expansion_data.mark.set_is_builtin(def_id.krate == BUILTIN_MACROS_CRATE);
+
Ok(Some(ext))
}
- fn resolve_macro(&mut self, scope: Mark, path: &ast::Path, kind: MacroKind, force: bool)
- -> Result<Lrc<SyntaxExtension>, Determinacy> {
- self.resolve_macro_to_def(scope, path, kind, force).and_then(|def| {
- if let Def::Macro(_, MacroKind::ProcMacroStub) = def {
- self.report_proc_macro_stub(path.span);
- return Err(Determinacy::Determined);
- } else if let Def::NonMacroAttr(..) = def {
- self.report_non_macro_attr(path.span, def);
- return Err(Determinacy::Determined);
- }
- self.unused_macros.remove(&def.def_id());
- Ok(self.get_macro(def))
- })
+ fn resolve_macro_path(&mut self, path: &ast::Path, kind: MacroKind, scope: Mark,
+ derives_in_scope: &[ast::Path], force: bool)
+ -> Result<Lrc<SyntaxExtension>, Determinacy> {
+ Ok(self.resolve_macro_to_def(path, kind, scope, derives_in_scope, force)?.1)
}
fn check_unused_macros(&self) {
}
impl<'a, 'cl> Resolver<'a, 'cl> {
- fn report_proc_macro_stub(&self, span: Span) {
- self.session.span_err(span,
- "can't use a procedural macro from the same crate that defines it");
- }
+ fn resolve_macro_to_def(&mut self, path: &ast::Path, kind: MacroKind, scope: Mark,
+ derives_in_scope: &[ast::Path], force: bool)
+ -> Result<(Def, Lrc<SyntaxExtension>), Determinacy> {
+ let def = self.resolve_macro_to_def_inner(path, kind, scope, derives_in_scope, force);
- fn report_non_macro_attr(&self, span: Span, def: Def) {
- self.session.span_err(span, &format!("expected a macro, found {}", def.kind_name()));
- }
-
- fn resolve_invoc_to_def(&mut self, invoc: &Invocation, scope: Mark, force: bool)
- -> Result<Def, Determinacy> {
- let (attr, traits) = match invoc.kind {
- InvocationKind::Attr { ref attr, ref traits, .. } => (attr, traits),
- InvocationKind::Bang { ref mac, .. } => {
- return self.resolve_macro_to_def(scope, &mac.node.path, MacroKind::Bang, force);
- }
- InvocationKind::Derive { ref path, .. } => {
- return self.resolve_macro_to_def(scope, path, MacroKind::Derive, force);
+ // Report errors and enforce feature gates for the resolved macro.
+ if def != Err(Determinacy::Undetermined) {
+ // Do not report duplicated errors on every undetermined resolution.
+ for segment in &path.segments {
+ if let Some(args) = &segment.args {
+ self.session.span_err(args.span(), "generic arguments in macro path");
+ }
}
- };
-
- let path = attr.as_ref().unwrap().path.clone();
- let def = self.resolve_macro_to_def(scope, &path, MacroKind::Attr, force);
- if let Ok(Def::NonMacroAttr(NonMacroAttrKind::Custom)) = def {} else {
- return def;
}
- // At this point we've found that the `attr` is determinately unresolved and thus can be
- // interpreted as a custom attribute. Normally custom attributes are feature gated, but
- // it may be a custom attribute whitelisted by a derive macro and they do not require
- // a feature gate.
- //
- // So here we look through all of the derive annotations in scope and try to resolve them.
- // If they themselves successfully resolve *and* one of the resolved derive macros
- // whitelists this attribute's name, then this is a registered attribute and we can convert
- // it from a "generic custom attrite" into a "known derive helper attribute".
- enum ConvertToDeriveHelper { Yes, No, DontKnow }
- let mut convert_to_derive_helper = ConvertToDeriveHelper::No;
- let attr_name = path.segments[0].ident.name;
- for path in traits {
- match self.resolve_macro(scope, path, MacroKind::Derive, force) {
- Ok(ext) => if let SyntaxExtension::ProcMacroDerive(_, ref inert_attrs, _) = *ext {
- if inert_attrs.contains(&attr_name) {
- convert_to_derive_helper = ConvertToDeriveHelper::Yes;
- break
+ let def = def?;
+
+ match def {
+ Def::Macro(def_id, macro_kind) => {
+ self.unused_macros.remove(&def_id);
+ if macro_kind == MacroKind::ProcMacroStub {
+ let msg = "can't use a procedural macro from the same crate that defines it";
+ self.session.span_err(path.span, msg);
+ return Err(Determinacy::Determined);
+ }
+ }
+ Def::NonMacroAttr(attr_kind) => {
+ if kind == MacroKind::Attr {
+ let features = self.session.features_untracked();
+ if attr_kind == NonMacroAttrKind::Custom {
+ assert!(path.segments.len() == 1);
+ let name = path.segments[0].ident.name.as_str();
+ if name.starts_with("rustc_") {
+ if !features.rustc_attrs {
+ let msg = "unless otherwise specified, attributes with the prefix \
+ `rustc_` are reserved for internal compiler diagnostics";
+ feature_err(&self.session.parse_sess, "rustc_attrs", path.span,
+ GateIssue::Language, &msg).emit();
+ }
+ } else if name.starts_with("derive_") {
+ if !features.custom_derive {
+ feature_err(&self.session.parse_sess, "custom_derive", path.span,
+ GateIssue::Language, EXPLAIN_DERIVE_UNDERSCORE).emit();
+ }
+ } else if !features.custom_attribute {
+ let msg = format!("The attribute `{}` is currently unknown to the \
+ compiler and may have meaning added to it in the \
+ future", path);
+ feature_err(&self.session.parse_sess, "custom_attribute", path.span,
+ GateIssue::Language, &msg).emit();
+ }
}
- },
- Err(Determinacy::Undetermined) =>
- convert_to_derive_helper = ConvertToDeriveHelper::DontKnow,
- Err(Determinacy::Determined) => {}
+ } else {
+ // Not only attributes, but anything in macro namespace can result in
+ // `Def::NonMacroAttr` definition (e.g. `inline!()`), so we must report
+ // an error for those cases.
+ let msg = format!("expected a macro, found {}", def.kind_name());
+ self.session.span_err(path.span, &msg);
+ return Err(Determinacy::Determined);
+ }
}
+ _ => panic!("expected `Def::Macro` or `Def::NonMacroAttr`"),
}
- match convert_to_derive_helper {
- ConvertToDeriveHelper::Yes => Ok(Def::NonMacroAttr(NonMacroAttrKind::DeriveHelper)),
- ConvertToDeriveHelper::No => def,
- ConvertToDeriveHelper::DontKnow => Err(Determinacy::determined(force)),
- }
- }
-
- fn resolve_macro_to_def(&mut self, scope: Mark, path: &ast::Path, kind: MacroKind, force: bool)
- -> Result<Def, Determinacy> {
- let def = self.resolve_macro_to_def_inner(scope, path, kind, force);
- if def != Err(Determinacy::Undetermined) {
- // Do not report duplicated errors on every undetermined resolution.
- path.segments.iter().find(|segment| segment.args.is_some()).map(|segment| {
- self.session.span_err(segment.args.as_ref().unwrap().span(),
- "generic arguments in macro path");
- });
- }
- if kind != MacroKind::Bang && path.segments.len() > 1 &&
- def != Ok(Def::NonMacroAttr(NonMacroAttrKind::Tool)) {
- if !self.session.features_untracked().proc_macro_path_invoc {
- emit_feature_err(
- &self.session.parse_sess,
- "proc_macro_path_invoc",
- path.span,
- GateIssue::Language,
- "paths of length greater than one in macro invocations are \
- currently unstable",
- );
- }
- }
- def
+ Ok((def, self.get_macro(def)))
}
- pub fn resolve_macro_to_def_inner(&mut self, scope: Mark, path: &ast::Path,
- kind: MacroKind, force: bool)
- -> Result<Def, Determinacy> {
+ pub fn resolve_macro_to_def_inner(&mut self, path: &ast::Path, kind: MacroKind, scope: Mark,
+ derives_in_scope: &[ast::Path], force: bool)
+ -> Result<Def, Determinacy> {
let ast::Path { ref segments, span } = *path;
let mut path: Vec<_> = segments.iter().map(|seg| seg.ident).collect();
let invocation = self.invocations[&scope];
}
let legacy_resolution = self.resolve_legacy_scope(&invocation.legacy_scope, path[0], false);
- let result = if let Some(MacroBinding::Legacy(binding)) = legacy_resolution {
- Ok(Def::Macro(binding.def_id, MacroKind::Bang))
+ let result = if let Some((legacy_binding, _)) = legacy_resolution {
+ Ok(legacy_binding.def())
} else {
match self.resolve_lexical_macro_path_segment(path[0], MacroNS, false, force,
kind == MacroKind::Attr, span) {
- Ok(binding) => Ok(binding.binding().def_ignoring_ambiguity()),
+ Ok((binding, _)) => Ok(binding.def_ignoring_ambiguity()),
Err(Determinacy::Undetermined) => return Err(Determinacy::Undetermined),
Err(Determinacy::Determined) => {
self.found_unresolved_macro = true;
self.current_module.nearest_item_scope().legacy_macro_resolutions.borrow_mut()
.push((scope, path[0], kind, result.ok()));
- result
+ if let Ok(Def::NonMacroAttr(NonMacroAttrKind::Custom)) = result {} else {
+ return result;
+ }
+
+ // At this point we've found that the `attr` is determinately unresolved and thus can be
+ // interpreted as a custom attribute. Normally custom attributes are feature gated, but
+ // it may be a custom attribute whitelisted by a derive macro and they do not require
+ // a feature gate.
+ //
+ // So here we look through all of the derive annotations in scope and try to resolve them.
+ // If they themselves successfully resolve *and* one of the resolved derive macros
+ // whitelists this attribute's name, then this is a registered attribute and we can convert
+ // it from a "generic custom attrite" into a "known derive helper attribute".
+ assert!(kind == MacroKind::Attr);
+ enum ConvertToDeriveHelper { Yes, No, DontKnow }
+ let mut convert_to_derive_helper = ConvertToDeriveHelper::No;
+ for derive in derives_in_scope {
+ match self.resolve_macro_path(derive, MacroKind::Derive, scope, &[], force) {
+ Ok(ext) => if let SyntaxExtension::ProcMacroDerive(_, ref inert_attrs, _) = *ext {
+ if inert_attrs.contains(&path[0].name) {
+ convert_to_derive_helper = ConvertToDeriveHelper::Yes;
+ break
+ }
+ },
+ Err(Determinacy::Undetermined) =>
+ convert_to_derive_helper = ConvertToDeriveHelper::DontKnow,
+ Err(Determinacy::Determined) => {}
+ }
+ }
+
+ match convert_to_derive_helper {
+ ConvertToDeriveHelper::Yes => Ok(Def::NonMacroAttr(NonMacroAttrKind::DeriveHelper)),
+ ConvertToDeriveHelper::No => result,
+ ConvertToDeriveHelper::DontKnow => Err(Determinacy::determined(force)),
+ }
}
// Resolve the initial segment of a non-global macro path
// (e.g. `foo` in `foo::bar!(); or `foo!();`).
// This is a variation of `fn resolve_ident_in_lexical_scope` that can be run during
// expansion and import resolution (perhaps they can be merged in the future).
- pub fn resolve_lexical_macro_path_segment(&mut self,
- mut ident: Ident,
- ns: Namespace,
- record_used: bool,
- force: bool,
- is_attr: bool,
- path_span: Span)
- -> Result<MacroBinding<'a>, Determinacy> {
+ crate fn resolve_lexical_macro_path_segment(
+ &mut self,
+ mut ident: Ident,
+ ns: Namespace,
+ record_used: bool,
+ force: bool,
+ is_attr: bool,
+ path_span: Span
+ ) -> Result<(&'a NameBinding<'a>, FromPrelude), Determinacy> {
// General principles:
// 1. Not controlled (user-defined) names should have higher priority than controlled names
// built into the language or standard library. This way we can add new names into the
// m::mac!();
// }
// This includes names from globs and from macro expansions.
- let mut potentially_ambiguous_result: Option<MacroBinding> = None;
+ let mut potentially_ambiguous_result: Option<(&NameBinding, FromPrelude)> = None;
enum WhereToResolve<'a> {
Module(Module<'a>),
path_span,
);
self.current_module = orig_current_module;
- binding.map(MacroBinding::Modern)
+ binding.map(|binding| (binding, FromPrelude(false)))
}
WhereToResolve::MacroPrelude => {
match self.macro_prelude.get(&ident.name).cloned() {
- Some(binding) => Ok(MacroBinding::Global(binding)),
+ Some(binding) => Ok((binding, FromPrelude(true))),
None => Err(Determinacy::Determined),
}
}
let binding = (Def::NonMacroAttr(NonMacroAttrKind::Builtin),
ty::Visibility::Public, ident.span, Mark::root())
.to_name_binding(self.arenas);
- Ok(MacroBinding::Global(binding))
+ Ok((binding, FromPrelude(true)))
} else {
Err(Determinacy::Determined)
}
let binding = (crate_root, ty::Visibility::Public,
ident.span, Mark::root()).to_name_binding(self.arenas);
- Ok(MacroBinding::Global(binding))
+ Ok((binding, FromPrelude(true)))
} else {
Err(Determinacy::Determined)
}
if use_prelude && is_known_tool(ident.name) {
let binding = (Def::ToolMod, ty::Visibility::Public,
ident.span, Mark::root()).to_name_binding(self.arenas);
- Ok(MacroBinding::Global(binding))
+ Ok((binding, FromPrelude(true)))
} else {
Err(Determinacy::Determined)
}
false,
path_span,
) {
- result = Ok(MacroBinding::Global(binding));
+ result = Ok((binding, FromPrelude(true)));
}
}
}
self.primitive_type_table.primitive_types.get(&ident.name).cloned() {
let binding = (Def::PrimTy(prim_ty), ty::Visibility::Public,
ident.span, Mark::root()).to_name_binding(self.arenas);
- Ok(MacroBinding::Global(binding))
+ Ok((binding, FromPrelude(true)))
} else {
Err(Determinacy::Determined)
}
return Ok(result);
}
- let binding = result.binding();
-
// Found a solution that is ambiguous with a previously found solution.
// Push an ambiguity error for later reporting and
// return something for better recovery.
if let Some(previous_result) = potentially_ambiguous_result {
- if binding.def() != previous_result.binding().def() {
+ if result.0.def() != previous_result.0.def() {
self.ambiguity_errors.push(AmbiguityError {
span: path_span,
name: ident.name,
- b1: previous_result.binding(),
- b2: binding,
+ b1: previous_result.0,
+ b2: result.0,
lexical: true,
});
return Ok(previous_result);
// Found a solution that's not an ambiguity yet, but is "suspicious" and
// can participate in ambiguities later on.
// Remember it and go search for other solutions in outer scopes.
- if binding.is_glob_import() || binding.expansion != Mark::root() {
+ if result.0.is_glob_import() || result.0.expansion != Mark::root() {
potentially_ambiguous_result = Some(result);
continue_search!();
let binding = (Def::NonMacroAttr(NonMacroAttrKind::Custom),
ty::Visibility::Public, ident.span, Mark::root())
.to_name_binding(self.arenas);
- Ok(MacroBinding::Global(binding))
+ Ok((binding, FromPrelude(true)))
} else {
Err(determinacy)
}
}
- pub fn resolve_legacy_scope(&mut self,
- mut scope: &'a Cell<LegacyScope<'a>>,
- ident: Ident,
- record_used: bool)
- -> Option<MacroBinding<'a>> {
+ crate fn resolve_legacy_scope(&mut self,
+ mut scope: &'a Cell<LegacyScope<'a>>,
+ ident: Ident,
+ record_used: bool)
+ -> Option<(&'a LegacyBinding<'a>, FromExpansion)> {
let ident = ident.modern();
let mut relative_depth: u32 = 0;
- let mut binding = None;
loop {
match scope.get() {
LegacyScope::Empty => break,
if record_used && relative_depth > 0 {
self.disallowed_shadowing.push(potential_binding);
}
- binding = Some(potential_binding);
- break
+ return Some((potential_binding, FromExpansion(relative_depth > 0)));
}
scope = &potential_binding.parent;
}
};
}
- let binding = if let Some(binding) = binding {
- MacroBinding::Legacy(binding)
- } else if let Some(binding) = self.macro_prelude.get(&ident.name).cloned() {
- MacroBinding::Global(binding)
- } else {
- return None;
- };
-
- Some(binding)
+ None
}
pub fn finalize_current_module_macro_resolutions(&mut self) {
let resolution = self.resolve_lexical_macro_path_segment(ident, MacroNS, true, true,
kind == MacroKind::Attr, span);
- let check_consistency = |this: &Self, binding: MacroBinding| {
+ let check_consistency = |this: &Self, new_def: Def| {
if let Some(def) = def {
if this.ambiguity_errors.is_empty() && this.disallowed_shadowing.is_empty() &&
- binding.def_ignoring_ambiguity() != def {
+ new_def != def && new_def != Def::Err {
// Make sure compilation does not succeed if preferred macro resolution
// has changed after the macro had been expanded. In theory all such
// situations should be reported as ambiguity errors, so this is span-bug.
};
match (legacy_resolution, resolution) {
- (Some(MacroBinding::Legacy(legacy_binding)), Ok(MacroBinding::Modern(binding))) => {
- if legacy_binding.def_id != binding.def_ignoring_ambiguity().def_id() {
- let msg1 = format!("`{}` could refer to the macro defined here", ident);
- let msg2 =
- format!("`{}` could also refer to the macro imported here", ident);
- self.session.struct_span_err(span, &format!("`{}` is ambiguous", ident))
- .span_note(legacy_binding.span, &msg1)
- .span_note(binding.span, &msg2)
- .emit();
- }
- },
(None, Err(_)) => {
assert!(def.is_none());
let bang = if kind == MacroKind::Bang { "!" } else { "" };
self.suggest_macro_name(&ident.as_str(), kind, &mut err, span);
err.emit();
},
- (Some(MacroBinding::Modern(_)), _) | (_, Ok(MacroBinding::Legacy(_))) => {
- span_bug!(span, "impossible macro resolution result");
- }
+ (Some((legacy_binding, FromExpansion(from_expansion))),
+ Ok((binding, FromPrelude(false)))) |
+ (Some((legacy_binding, FromExpansion(from_expansion @ true))),
+ Ok((binding, FromPrelude(true)))) => {
+ if legacy_binding.def() != binding.def_ignoring_ambiguity() {
+ self.report_ambiguity_error(
+ ident.name, span, true,
+ legacy_binding.def(), false, false,
+ from_expansion, legacy_binding.span,
+ binding.def(), binding.is_import(), binding.is_glob_import(),
+ binding.expansion != Mark::root(), binding.span,
+ );
+ }
+ },
+ // OK, non-macro-expanded legacy wins over macro prelude even if defs are different
+ (Some((legacy_binding, FromExpansion(false))), Ok((_, FromPrelude(true)))) |
// OK, unambiguous resolution
- (Some(binding), Err(_)) | (None, Ok(binding)) |
- // OK, legacy wins over global even if their definitions are different
- (Some(binding @ MacroBinding::Legacy(_)), Ok(MacroBinding::Global(_))) |
- // OK, modern wins over global even if their definitions are different
- (Some(MacroBinding::Global(_)), Ok(binding @ MacroBinding::Modern(_))) => {
- check_consistency(self, binding);
+ (Some((legacy_binding, _)), Err(_)) => {
+ check_consistency(self, legacy_binding.def());
}
- (Some(MacroBinding::Global(binding1)), Ok(MacroBinding::Global(binding2))) => {
- if binding1.def() != binding2.def() {
- span_bug!(span, "mismatch between same global macro resolutions");
+ // OK, unambiguous resolution
+ (None, Ok((binding, FromPrelude(from_prelude)))) => {
+ check_consistency(self, binding.def_ignoring_ambiguity());
+ if from_prelude {
+ self.record_use(ident, MacroNS, binding, span);
+ self.err_if_macro_use_proc_macro(ident.name, span, binding);
}
- check_consistency(self, MacroBinding::Global(binding1));
-
- self.record_use(ident, MacroNS, binding1, span);
- self.err_if_macro_use_proc_macro(ident.name, span, binding1);
- },
+ }
};
}
}
if let Some(suggestion) = suggestion {
if suggestion != name {
if let MacroKind::Bang = kind {
- err.span_suggestion(span, "you could try the macro", suggestion.to_string());
+ err.span_suggestion_with_applicability(
+ span,
+ "you could try the macro",
+ suggestion.to_string(),
+ Applicability::MaybeIncorrect
+ );
} else {
- err.span_suggestion(span, "try", suggestion.to_string());
+ err.span_suggestion_with_applicability(
+ span,
+ "try",
+ suggestion.to_string(),
+ Applicability::MaybeIncorrect
+ );
}
} else {
err.help("have you added the `#[macro_use]` on the module/import?");
/// Error if `ext` is a Macros 1.1 procedural macro being imported by `#[macro_use]`
fn err_if_macro_use_proc_macro(&mut self, name: Name, use_span: Span,
binding: &NameBinding<'a>) {
- let krate = binding.def().def_id().krate;
+ let krate = match binding.def() {
+ Def::NonMacroAttr(..) | Def::Err => return,
+ Def::Macro(def_id, _) => def_id.krate,
+ _ => unreachable!(),
+ };
// Plugin-based syntax extensions are exempt from this check
if krate == BUILTIN_MACROS_CRATE { return; }
if let Some(span) = span {
let found_use = if found_use { "" } else { "\n" };
self.session.struct_span_err(err.use_span, err.warn_msg)
- .span_suggestion(
+ .span_suggestion_with_applicability(
span,
"instead, import the procedural macro like any other item",
format!("use {}::{};{}", err.crate_name, err.name, found_use),
+ Applicability::MachineApplicable
).emit();
} else {
self.session.struct_span_err(err.use_span, err.warn_msg)
};
match self.resolve_ident_in_module(module, ident, ns, false, path_span) {
Err(Determined) => continue,
+ Ok(binding)
+ if !self.is_accessible_from(binding.vis, single_import.parent) => continue,
Ok(_) | Err(Undetermined) => return Err(Undetermined),
}
}
path_span,
);
self.current_module = orig_current_module;
+
match result {
Err(Determined) => continue,
+ Ok(binding)
+ if !self.is_accessible_from(binding.vis, glob_import.parent) => continue,
Ok(_) | Err(Undetermined) => return Err(Undetermined),
}
}
let lev_suggestion =
match find_best_match_for_name(names, &ident.as_str(), None) {
Some(name) => format!(". Did you mean to use `{}`?", name),
- None => "".to_owned(),
+ None => String::new(),
};
let msg = match module {
ModuleOrUniformRoot::Module(module) => {
rustc_typeck = { path = "../librustc_typeck" }
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
-rls-data = "0.16"
+rls-data = "0.18"
rls-span = "0.4"
# FIXME(#40527) should move rustc serialize out of tree
rustc-serialize = "0.3"
if let Some(ref generic_args) = seg.args {
match **generic_args {
ast::GenericArgs::AngleBracketed(ref data) => {
- data.args.iter().for_each(|arg| match arg {
- ast::GenericArg::Type(ty) => self.visit_ty(ty),
- _ => {}
- });
+ for arg in &data.args {
+ match arg {
+ ast::GenericArg::Type(ty) => self.visit_ty(ty),
+ _ => {}
+ }
+ }
}
ast::GenericArgs::Parenthesized(ref data) => {
for t in &data.inputs {
// Explicit types in the turbo-fish.
if let Some(ref generic_args) = seg.args {
if let ast::GenericArgs::AngleBracketed(ref data) = **generic_args {
- data.args.iter().for_each(|arg| match arg {
- ast::GenericArg::Type(ty) => self.visit_ty(ty),
- _ => {}
- });
+ for arg in &data.args {
+ match arg {
+ ast::GenericArg::Type(ty) => self.visit_ty(ty),
+ _ => {}
+ }
+ }
}
}
}
fn visit_generics(&mut self, generics: &'l ast::Generics) {
- generics.params.iter().for_each(|param| match param.kind {
- ast::GenericParamKind::Lifetime { .. } => {}
- ast::GenericParamKind::Type { ref default, .. } => {
- for bound in ¶m.bounds {
- if let ast::GenericBound::Trait(ref trait_ref, _) = *bound {
- self.process_path(trait_ref.trait_ref.ref_id, &trait_ref.trait_ref.path)
+ for param in &generics.params {
+ match param.kind {
+ ast::GenericParamKind::Lifetime { .. } => {}
+ ast::GenericParamKind::Type { ref default, .. } => {
+ for bound in ¶m.bounds {
+ if let ast::GenericBound::Trait(ref trait_ref, _) = *bound {
+ self.process_path(trait_ref.trait_ref.ref_id, &trait_ref.trait_ref.path)
+ }
+ }
+ if let Some(ref ty) = default {
+ self.visit_ty(&ty);
}
- }
- if let Some(ref ty) = default {
- self.visit_ty(&ty);
}
}
- });
+ }
}
fn visit_ty(&mut self, t: &'l ast::Ty) {
pub fn get_expr_data(&self, expr: &ast::Expr) -> Option<Data> {
let hir_node = self.tcx.hir.expect_expr(expr.id);
let ty = self.tables.expr_ty_adjusted_opt(&hir_node);
- if ty.is_none() || ty.unwrap().sty == ty::TyError {
+ if ty.is_none() || ty.unwrap().sty == ty::Error {
return None;
}
match expr.node {
}
};
match self.tables.expr_ty_adjusted(&hir_node).sty {
- ty::TyAdt(def, _) if !def.is_enum() => {
+ ty::Adt(def, _) if !def.is_enum() => {
let variant = &def.non_enum_variant();
let index = self.tcx.find_field_index(ident, variant).unwrap();
let sub_span = self.span_utils.span_for_last_ident(expr.span);
ref_id: id_from_def_id(variant.fields[index].did),
}));
}
- ty::TyTuple(..) => None,
+ ty::Tuple(..) => None,
_ => {
debug!("Expected struct or union type, found {:?}", ty);
None
}
ast::ExprKind::Struct(ref path, ..) => {
match self.tables.expr_ty_adjusted(&hir_node).sty {
- ty::TyAdt(def, _) if !def.is_enum() => {
+ ty::Adt(def, _) if !def.is_enum() => {
let sub_span = self.span_utils.span_for_last_ident(path.span);
filter!(self.span_utils, sub_span, path.span, None);
let span = self.span_from_span(sub_span.unwrap());
hir::QPath::Resolved(_, ref path) => path.def,
hir::QPath::TypeRelative(..) => {
let ty = hir_ty_to_ty(self.tcx, ty);
- if let ty::TyProjection(proj) = ty.sty {
+ if let ty::Projection(proj) = ty.sty {
return HirDef::AssociatedTy(proj.item_def_id);
}
HirDef::Err
HirDef::Union(def_id) |
HirDef::Enum(def_id) |
HirDef::TyAlias(def_id) |
- HirDef::TyForeign(def_id) |
+ HirDef::ForeignTy(def_id) |
HirDef::TraitAlias(def_id) |
HirDef::AssociatedExistential(def_id) |
HirDef::AssociatedTy(def_id) |
.iter()
.any(|ct| *ct == CrateType::Executable);
let mut out_name = if executable {
- "".to_owned()
+ String::new()
} else {
"lib".to_owned()
};
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "ios".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "fuchsia".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
abi_blacklist: super::arm_base::abi_blacklist(),
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "android".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "cloudabi".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "freebsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "hermit".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "openbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "android".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "E-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(),
arch: "arm".to_string(),
target_os: "ios".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "android".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "cloudabi".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(),
arch: "arm".to_string(),
target_os: "ios".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
target_pointer_width: "32".to_string(),
target_c_int_width: "32".to_string(),
target_os: "emscripten".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
data_layout: "e-p:32:32-i64:64-v128:32:128-n32-S128".to_string(),
arch: "asmjs".to_string(),
data_layout: "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "ios".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "macos".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "android".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "cloudabi".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "dragonfly".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "freebsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "haiku".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "openbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
post_link_args: LinkArgs::new(),
asm_args: Vec::new(),
cpu: "generic".to_string(),
- features: "".to_string(),
+ features: String::new(),
dynamic_linking: false,
only_cdylib: false,
executables: false,
function_sections: true,
dll_prefix: "lib".to_string(),
dll_suffix: ".so".to_string(),
- exe_suffix: "".to_string(),
+ exe_suffix: String::new(),
staticlib_prefix: "lib".to_string(),
staticlib_suffix: ".a".to_string(),
target_family: None,
data_layout: "e-m:e-p:16:16-i32:16-i64:16-f32:16-f64:16-a:8-n8:16-S16".to_string(),
arch: "msp430".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(),
arch: "powerpc".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
target_pointer_width: "32".to_string(),
target_c_int_width: "32".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
arch: "riscv32".to_string(),
linker_flavor: LinkerFlavor::Ld,
data_layout: "E-m:e-i64:64-n32:64-S128".to_string(),
arch: "sparc64".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
// just be confusing.
arch: "sparc64".to_string(),
target_os: "solaris".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "sun".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
target_pointer_width: "32".to_string(),
target_c_int_width: "32".to_string(),
target_os: "emscripten".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
data_layout: "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(),
arch: "wasm32".to_string(),
target_pointer_width: "32".to_string(),
target_c_int_width: "32".to_string(),
target_os: "emscripten".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
data_layout: "e-p:32:32-i64:64-v128:32:128-n32-S128".to_string(),
arch: "wasm32".to_string(),
// relatively self-explanatory!
exe_suffix: ".wasm".to_string(),
- dll_prefix: "".to_string(),
+ dll_prefix: String::new(),
dll_suffix: ".wasm".to_string(),
linker_is_gnu: false,
// This is basically guaranteed to change in the future, don't rely on
// this. Use `not(target_os = "emscripten")` for now.
target_os: "unknown".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
data_layout: "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(),
arch: "wasm32".to_string(),
linker: Some("gcc".to_string()),
dynamic_linking: true,
executables: true,
- dll_prefix: "".to_string(),
+ dll_prefix: String::new(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
- staticlib_prefix: "".to_string(),
+ staticlib_prefix: String::new(),
staticlib_suffix: ".lib".to_string(),
no_default_libraries: true,
target_family: Some("windows".to_string()),
function_sections: true,
dynamic_linking: true,
executables: true,
- dll_prefix: "".to_string(),
+ dll_prefix: String::new(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
- staticlib_prefix: "".to_string(),
+ staticlib_prefix: String::new(),
staticlib_suffix: ".lib".to_string(),
target_family: Some("windows".to_string()),
is_like_windows: true,
data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "macos".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "ios".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "fuchsia".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
})
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "android".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "rumprun".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "solaris".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "sun".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "bitrig".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "cloudabi".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "dragonfly".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "freebsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "haiku".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "hermit".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "openbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "redox".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
chalk-engine = { version = "0.7.0", default-features=false }
+smallvec = { version = "0.6.5", features = ["union"] }
use rustc::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use rustc::ty::subst::Kind;
use rustc::ty::{self, TyCtxt};
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use std::fmt::{self, Debug};
use std::marker::PhantomData;
// _ => false,
// },
// Kind::Type(ty) => match ty.sty {
-// ty::TyInfer(ty::InferTy::CanonicalTy(cvar1)) => cvar == cvar1,
+// ty::Infer(ty::InferTy::CanonicalTy(cvar1)) => cvar == cvar1,
// _ => false,
// },
// })
// into the types of its fields `(B, Vec<A>)`. These will get
// pushed onto the stack. Eventually, expanding `Vec<A>` will
// lead to us trying to push `A` a second time -- to prevent
- // infinite recusion, we notice that `A` was already pushed
+ // infinite recursion, we notice that `A` was already pushed
// once and stop.
let mut ty_stack = vec![(for_ty, 0)];
match ty.sty {
// All parameters live for the duration of the
// function.
- ty::TyParam(..) => {}
+ ty::Param(..) => {}
// A projection that we couldn't resolve - it
// might have a destructor.
- ty::TyProjection(..) | ty::TyAnon(..) => {
+ ty::Projection(..) | ty::Anon(..) => {
result.kinds.push(ty.into());
}
}
let result = match ty.sty {
- ty::TyBool
- | ty::TyChar
- | ty::TyInt(_)
- | ty::TyUint(_)
- | ty::TyFloat(_)
- | ty::TyStr
- | ty::TyNever
- | ty::TyForeign(..)
- | ty::TyRawPtr(..)
- | ty::TyRef(..)
- | ty::TyFnDef(..)
- | ty::TyFnPtr(_)
- | ty::TyGeneratorWitness(..) => {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Never
+ | ty::Foreign(..)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::GeneratorWitness(..) => {
// these types never have a destructor
Ok(DtorckConstraint::empty())
}
- ty::TyArray(ety, _) | ty::TySlice(ety) => {
+ ty::Array(ety, _) | ty::Slice(ety) => {
// single-element containers, behave like their element
dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ety)
}
- ty::TyTuple(tys) => tys
+ ty::Tuple(tys) => tys
.iter()
.map(|ty| dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ty))
.collect(),
- ty::TyClosure(def_id, substs) => substs
+ ty::Closure(def_id, substs) => substs
.upvar_tys(def_id, tcx)
.map(|ty| dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ty))
.collect(),
- ty::TyGenerator(def_id, substs, _movability) => {
+ ty::Generator(def_id, substs, _movability) => {
// rust-lang/rust#49918: types can be constructed, stored
// in the interior, and sit idle when generator yields
// (and is subsequently dropped).
// its interior).
//
// However, the interior's representation uses things like
- // TyGeneratorWitness that explicitly assume they are not
+ // GeneratorWitness that explicitly assume they are not
// traversed in such a manner. So instead, we will
// simplify things for now by treating all generators as
// if they were like trait objects, where its upvars must
Ok(constraint)
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
let DtorckConstraint {
dtorck_types,
outlives,
// Objects must be alive in order for their destructor
// to be called.
- ty::TyDynamic(..) => Ok(DtorckConstraint {
+ ty::Dynamic(..) => Ok(DtorckConstraint {
outlives: vec![ty.into()],
dtorck_types: vec![],
overflows: vec![],
}),
// Types that can't be resolved. Pass them forward.
- ty::TyProjection(..) | ty::TyAnon(..) | ty::TyParam(..) => Ok(DtorckConstraint {
+ ty::Projection(..) | ty::Anon(..) | ty::Param(..) => Ok(DtorckConstraint {
outlives: vec![],
dtorck_types: vec![ty],
overflows: vec![],
}),
- ty::TyInfer(..) | ty::TyError => {
+ ty::Infer(..) | ty::Error => {
// By the time this code runs, all type variables ought to
// be fully resolved.
Err(NoSolution)
extern crate rustc_data_structures;
extern crate syntax;
extern crate syntax_pos;
+extern crate smallvec;
mod chalk_context;
mod dropck_outlives;
WhereClause,
};
use rustc::ty::query::Providers;
-use rustc::ty::{self, Slice, TyCtxt};
+use rustc::ty::{self, List, TyCtxt};
use rustc_data_structures::fx::FxHashSet;
use std::mem;
use syntax::ast;
DefPathData::AssocTypeInImpl(..) => program_clauses_for_associated_type_value(tcx, def_id),
DefPathData::AssocTypeInTrait(..) => program_clauses_for_associated_type_def(tcx, def_id),
DefPathData::TypeNs(..) => program_clauses_for_type_def(tcx, def_id),
- _ => Slice::empty(),
+ _ => List::empty(),
}
}
fn program_clauses_for_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Clauses<'tcx> {
if let ImplPolarity::Negative = tcx.impl_polarity(def_id) {
- return Slice::empty();
+ return List::empty();
}
// Rule Implemented-From-Impl (see rustc guide)
//! is parameterized by an instance of `AstConv`.
use rustc_data_structures::accumulate_vec::AccumulateVec;
-use hir::{self, GenericArg};
+use rustc_data_structures::array_vec::ArrayVec;
+use hir::{self, GenericArg, GenericArgs};
use hir::def::Def;
use hir::def_id::DefId;
+use hir::HirVec;
use middle::resolve_lifetime as rl;
use namespace::Namespace;
-use rustc::ty::subst::{Subst, Substs};
+use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt, ToPredicate, TypeFoldable};
-use rustc::ty::GenericParamDefKind;
+use rustc::ty::{GenericParamDef, GenericParamDefKind};
use rustc::ty::wf::object_region_bounds;
use rustc_target::spec::abi;
use std::slice;
use require_c_abi_if_variadic;
use util::common::ErrorReported;
use util::nodemap::{FxHashSet, FxHashMap};
-use errors::FatalError;
+use errors::{FatalError, DiagnosticId};
+use lint;
use std::iter;
use syntax::ast;
+use syntax::ptr::P;
use syntax::feature_gate::{GateIssue, emit_feature_err};
-use syntax_pos::Span;
+use syntax_pos::{Span, MultiSpan};
pub trait AstConv<'gcx, 'tcx> {
fn tcx<'a>(&'a self) -> TyCtxt<'a, 'gcx, 'tcx>;
span: Span,
}
-struct ParamRange {
- required: usize,
- accepted: usize
+#[derive(PartialEq)]
+enum GenericArgPosition {
+ Type,
+ Value, // e.g. functions
+ MethodCall,
+}
+
+// FIXME(#53525): these error codes should all be unified.
+struct GenericArgMismatchErrorCode {
+ lifetimes: (&'static str, &'static str),
+ types: (&'static str, &'static str),
}
/// Dummy type used for the `Self` of a `TraitRef` created for converting
/// a trait object, and which gets removed in `ExistentialTraitRef`.
/// This type must not appear anywhere in other converted types.
-const TRAIT_OBJECT_DUMMY_SELF: ty::TypeVariants<'static> = ty::TyInfer(ty::FreshTy(0));
+const TRAIT_OBJECT_DUMMY_SELF: ty::TyKind<'static> = ty::Infer(ty::FreshTy(0));
impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o {
pub fn ast_region_to_region(&self,
-> &'tcx Substs<'tcx>
{
- let (substs, assoc_bindings) =
- item_segment.with_generic_args(|generic_args| {
- self.create_substs_for_ast_path(
- span,
- def_id,
- generic_args,
- item_segment.infer_types,
- None)
- });
+ let (substs, assoc_bindings) = item_segment.with_generic_args(|generic_args| {
+ self.create_substs_for_ast_path(
+ span,
+ def_id,
+ generic_args,
+ item_segment.infer_types,
+ None,
+ )
+ });
- assoc_bindings.first().map(|b| self.prohibit_projection(b.span));
+ assoc_bindings.first().map(|b| Self::prohibit_assoc_ty_binding(self.tcx(), b.span));
substs
}
+ /// Report error if there is an explicit type parameter when using `impl Trait`.
+ fn check_impl_trait(
+ tcx: TyCtxt,
+ span: Span,
+ seg: &hir::PathSegment,
+ generics: &ty::Generics,
+ ) -> bool {
+ let explicit = !seg.infer_types;
+ let impl_trait = generics.params.iter().any(|param| match param.kind {
+ ty::GenericParamDefKind::Type {
+ synthetic: Some(hir::SyntheticTyParamKind::ImplTrait), ..
+ } => true,
+ _ => false,
+ });
+
+ if explicit && impl_trait {
+ let mut err = struct_span_err! {
+ tcx.sess,
+ span,
+ E0632,
+ "cannot provide explicit type parameters when `impl Trait` is \
+ used in argument position."
+ };
+
+ err.emit();
+ }
+
+ impl_trait
+ }
+
+ /// Check that the correct number of generic arguments have been provided.
+ /// Used specifically for function calls.
+ pub fn check_generic_arg_count_for_call(
+ tcx: TyCtxt,
+ span: Span,
+ def: &ty::Generics,
+ seg: &hir::PathSegment,
+ is_method_call: bool,
+ ) -> bool {
+ let empty_args = P(hir::GenericArgs {
+ args: HirVec::new(), bindings: HirVec::new(), parenthesized: false,
+ });
+ let suppress_mismatch = Self::check_impl_trait(tcx, span, seg, &def);
+ Self::check_generic_arg_count(
+ tcx,
+ span,
+ def,
+ if let Some(ref args) = seg.args {
+ args
+ } else {
+ &empty_args
+ },
+ if is_method_call {
+ GenericArgPosition::MethodCall
+ } else {
+ GenericArgPosition::Value
+ },
+ def.parent.is_none() && def.has_self, // `has_self`
+ seg.infer_types || suppress_mismatch, // `infer_types`
+ GenericArgMismatchErrorCode {
+ lifetimes: ("E0090", "E0088"),
+ types: ("E0089", "E0087"),
+ },
+ )
+ }
+
+ /// Check that the correct number of generic arguments have been provided.
+ /// This is used both for datatypes and function calls.
+ fn check_generic_arg_count(
+ tcx: TyCtxt,
+ span: Span,
+ def: &ty::Generics,
+ args: &hir::GenericArgs,
+ position: GenericArgPosition,
+ has_self: bool,
+ infer_types: bool,
+ error_codes: GenericArgMismatchErrorCode,
+ ) -> bool {
+ // At this stage we are guaranteed that the generic arguments are in the correct order, e.g.
+ // that lifetimes will proceed types. So it suffices to check the number of each generic
+ // arguments in order to validate them with respect to the generic parameters.
+ let param_counts = def.own_counts();
+ let arg_counts = args.own_counts();
+ let infer_lifetimes = position != GenericArgPosition::Type && arg_counts.lifetimes == 0;
+
+ let mut defaults: ty::GenericParamCount = Default::default();
+ for param in &def.params {
+ match param.kind {
+ GenericParamDefKind::Lifetime => {}
+ GenericParamDefKind::Type { has_default, .. } => {
+ defaults.types += has_default as usize
+ }
+ };
+ }
+
+ if position != GenericArgPosition::Type && !args.bindings.is_empty() {
+ AstConv::prohibit_assoc_ty_binding(tcx, args.bindings[0].span);
+ }
+
+ // Prohibit explicit lifetime arguments if late-bound lifetime parameters are present.
+ if !infer_lifetimes {
+ if let Some(span_late) = def.has_late_bound_regions {
+ let msg = "cannot specify lifetime arguments explicitly \
+ if late bound lifetime parameters are present";
+ let note = "the late bound lifetime parameter is introduced here";
+ let span = args.args[0].span();
+ if position == GenericArgPosition::Value
+ && arg_counts.lifetimes != param_counts.lifetimes {
+ let mut err = tcx.sess.struct_span_err(span, msg);
+ err.span_note(span_late, note);
+ err.emit();
+ return true;
+ } else {
+ let mut multispan = MultiSpan::from_span(span);
+ multispan.push_span_label(span_late, note.to_string());
+ tcx.lint_node(lint::builtin::LATE_BOUND_LIFETIME_ARGUMENTS,
+ args.args[0].id(), multispan, msg);
+ return false;
+ }
+ }
+ }
+
+ let check_kind_count = |error_code: (&str, &str),
+ kind,
+ required,
+ permitted,
+ provided,
+ offset| {
+ // We enforce the following: `required` <= `provided` <= `permitted`.
+ // For kinds without defaults (i.e. lifetimes), `required == permitted`.
+ // For other kinds (i.e. types), `permitted` may be greater than `required`.
+ if required <= provided && provided <= permitted {
+ return false;
+ }
+
+ // Unfortunately lifetime and type parameter mismatches are typically styled
+ // differently in diagnostics, which means we have a few cases to consider here.
+ let (bound, quantifier) = if required != permitted {
+ if provided < required {
+ (required, "at least ")
+ } else { // provided > permitted
+ (permitted, "at most ")
+ }
+ } else {
+ (required, "")
+ };
+
+ let mut span = span;
+ let label = if required == permitted && provided > permitted {
+ let diff = provided - permitted;
+ if diff == 1 {
+ // In the case when the user has provided too many arguments,
+ // we want to point to the first unexpected argument.
+ let first_superfluous_arg: &GenericArg = &args.args[offset + permitted];
+ span = first_superfluous_arg.span();
+ }
+ format!(
+ "{}unexpected {} argument{}",
+ if diff != 1 { format!("{} ", diff) } else { String::new() },
+ kind,
+ if diff != 1 { "s" } else { "" },
+ )
+ } else {
+ format!(
+ "expected {}{} {} argument{}",
+ quantifier,
+ bound,
+ kind,
+ if bound != 1 { "s" } else { "" },
+ )
+ };
+
+ tcx.sess.struct_span_err_with_code(
+ span,
+ &format!(
+ "wrong number of {} arguments: expected {}{}, found {}",
+ kind,
+ quantifier,
+ bound,
+ provided,
+ ),
+ DiagnosticId::Error({
+ if provided <= permitted {
+ error_code.0
+ } else {
+ error_code.1
+ }
+ }.into())
+ ).span_label(span, label).emit();
+
+ provided > required // `suppress_error`
+ };
+
+ if !infer_lifetimes || arg_counts.lifetimes > param_counts.lifetimes {
+ check_kind_count(
+ error_codes.lifetimes,
+ "lifetime",
+ param_counts.lifetimes,
+ param_counts.lifetimes,
+ arg_counts.lifetimes,
+ 0,
+ );
+ }
+ if !infer_types
+ || arg_counts.types > param_counts.types - defaults.types - has_self as usize {
+ check_kind_count(
+ error_codes.types,
+ "type",
+ param_counts.types - defaults.types - has_self as usize,
+ param_counts.types - has_self as usize,
+ arg_counts.types,
+ arg_counts.lifetimes,
+ )
+ } else {
+ false
+ }
+ }
+
+ /// Creates the relevant generic argument substitutions
+ /// corresponding to a set of generic parameters.
+ pub fn create_substs_for_generic_args<'a, 'b, A, P, I>(
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ def_id: DefId,
+ parent_substs: &[Kind<'tcx>],
+ has_self: bool,
+ self_ty: Option<Ty<'tcx>>,
+ args_for_def_id: A,
+ provided_kind: P,
+ inferred_kind: I,
+ ) -> &'tcx Substs<'tcx> where
+ A: Fn(DefId) -> (Option<&'b GenericArgs>, bool),
+ P: Fn(&GenericParamDef, &GenericArg) -> Kind<'tcx>,
+ I: Fn(Option<&[Kind<'tcx>]>, &GenericParamDef, bool) -> Kind<'tcx>
+ {
+ // Collect the segments of the path: we need to substitute arguments
+ // for parameters throughout the entire path (wherever there are
+ // generic parameters).
+ let mut parent_defs = tcx.generics_of(def_id);
+ let count = parent_defs.count();
+ let mut stack = vec![(def_id, parent_defs)];
+ while let Some(def_id) = parent_defs.parent {
+ parent_defs = tcx.generics_of(def_id);
+ stack.push((def_id, parent_defs));
+ }
+
+ // We manually build up the substitution, rather than using convenience
+ // methods in subst.rs so that we can iterate over the arguments and
+ // parameters in lock-step linearly, rather than trying to match each pair.
+ let mut substs: AccumulateVec<[Kind<'tcx>; 8]> = if count <= 8 {
+ AccumulateVec::Array(ArrayVec::new())
+ } else {
+ AccumulateVec::Heap(Vec::with_capacity(count))
+ };
+
+ fn push_kind<'tcx>(substs: &mut AccumulateVec<[Kind<'tcx>; 8]>, kind: Kind<'tcx>) {
+ match substs {
+ AccumulateVec::Array(ref mut arr) => arr.push(kind),
+ AccumulateVec::Heap(ref mut vec) => vec.push(kind),
+ }
+ }
+
+ // Iterate over each segment of the path.
+ while let Some((def_id, defs)) = stack.pop() {
+ let mut params = defs.params.iter().peekable();
+
+ // If we have already computed substitutions for parents, we can use those directly.
+ while let Some(¶m) = params.peek() {
+ if let Some(&kind) = parent_substs.get(param.index as usize) {
+ push_kind(&mut substs, kind);
+ params.next();
+ } else {
+ break;
+ }
+ }
+
+ // (Unless it's been handled in `parent_substs`) `Self` is handled first.
+ if has_self {
+ if let Some(¶m) = params.peek() {
+ if param.index == 0 {
+ if let GenericParamDefKind::Type { .. } = param.kind {
+ push_kind(&mut substs, self_ty.map(|ty| ty.into())
+ .unwrap_or_else(|| inferred_kind(None, param, true)));
+ params.next();
+ }
+ }
+ }
+ }
+
+ // Check whether this segment takes generic arguments and the user has provided any.
+ let (generic_args, infer_types) = args_for_def_id(def_id);
+
+ let mut args = generic_args.iter().flat_map(|generic_args| generic_args.args.iter())
+ .peekable();
+
+ loop {
+ // We're going to iterate through the generic arguments that the user
+ // provided, matching them with the generic parameters we expect.
+ // Mismatches can occur as a result of elided lifetimes, or for malformed
+ // input. We try to handle both sensibly.
+ match (args.peek(), params.peek()) {
+ (Some(&arg), Some(¶m)) => {
+ match (arg, ¶m.kind) {
+ (GenericArg::Lifetime(_), GenericParamDefKind::Lifetime)
+ | (GenericArg::Type(_), GenericParamDefKind::Type { .. }) => {
+ push_kind(&mut substs, provided_kind(param, arg));
+ args.next();
+ params.next();
+ }
+ (GenericArg::Lifetime(_), GenericParamDefKind::Type { .. }) => {
+ // We expected a type argument, but got a lifetime
+ // argument. This is an error, but we need to handle it
+ // gracefully so we can report sensible errors. In this
+ // case, we're simply going to infer this argument.
+ args.next();
+ }
+ (GenericArg::Type(_), GenericParamDefKind::Lifetime) => {
+ // We expected a lifetime argument, but got a type
+ // argument. That means we're inferring the lifetimes.
+ push_kind(&mut substs, inferred_kind(None, param, infer_types));
+ params.next();
+ }
+ }
+ }
+ (Some(_), None) => {
+ // We should never be able to reach this point with well-formed input.
+ // Getting to this point means the user supplied more arguments than
+ // there are parameters.
+ args.next();
+ }
+ (None, Some(¶m)) => {
+ // If there are fewer arguments than parameters, it means
+ // we're inferring the remaining arguments.
+ match param.kind {
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Type { .. } => {
+ let kind = inferred_kind(Some(&substs), param, infer_types);
+ push_kind(&mut substs, kind);
+ }
+ }
+ args.next();
+ params.next();
+ }
+ (None, None) => break,
+ }
+ }
+ }
+
+ tcx.intern_substs(&substs)
+ }
+
/// Given the type/region arguments provided to some path (along with
/// an implicit Self, if this is a trait reference) returns the complete
/// set of substitutions. This may involve applying defaulted type parameters.
self_ty: Option<Ty<'tcx>>)
-> (&'tcx Substs<'tcx>, Vec<ConvertedBinding<'tcx>>)
{
- let tcx = self.tcx();
-
- debug!("create_substs_for_ast_path(def_id={:?}, self_ty={:?}, \
- generic_args={:?})",
- def_id, self_ty, generic_args);
-
// If the type is parameterized by this region, then replace this
// region with the current anon region binding (in other words,
// whatever & would get replaced with).
+ debug!("create_substs_for_ast_path(def_id={:?}, self_ty={:?}, \
+ generic_args={:?})",
+ def_id, self_ty, generic_args);
- // FIXME(varkor): Separating out the parameters is messy.
- let lifetimes: Vec<_> = generic_args.args.iter().filter_map(|arg| match arg {
- GenericArg::Lifetime(lt) => Some(lt),
- _ => None,
- }).collect();
- let types: Vec<_> = generic_args.args.iter().filter_map(|arg| match arg {
- GenericArg::Type(ty) => Some(ty),
- _ => None,
- }).collect();
- let lt_provided = lifetimes.len();
- let ty_provided = types.len();
-
- let decl_generics = tcx.generics_of(def_id);
- let mut lt_accepted = 0;
- let mut ty_params = ParamRange { required: 0, accepted: 0 };
- for param in &decl_generics.params {
- match param.kind {
- GenericParamDefKind::Lifetime => {
- lt_accepted += 1;
- }
- GenericParamDefKind::Type { has_default, .. } => {
- ty_params.accepted += 1;
- if !has_default {
- ty_params.required += 1;
- }
- }
- };
- }
- if self_ty.is_some() {
- ty_params.required -= 1;
- ty_params.accepted -= 1;
- }
-
- if lt_accepted != lt_provided {
- report_lifetime_number_error(tcx, span, lt_provided, lt_accepted);
- }
+ let tcx = self.tcx();
+ let generic_params = tcx.generics_of(def_id);
// If a self-type was declared, one should be provided.
- assert_eq!(decl_generics.has_self, self_ty.is_some());
+ assert_eq!(generic_params.has_self, self_ty.is_some());
- // Check the number of type parameters supplied by the user.
- if !infer_types || ty_provided > ty_params.required {
- check_type_argument_count(tcx, span, ty_provided, ty_params);
- }
+ let has_self = generic_params.has_self;
+ Self::check_generic_arg_count(
+ self.tcx(),
+ span,
+ &generic_params,
+ &generic_args,
+ GenericArgPosition::Type,
+ has_self,
+ infer_types,
+ GenericArgMismatchErrorCode {
+ lifetimes: ("E0107", "E0107"),
+ types: ("E0243", "E0244"),
+ },
+ );
let is_object = self_ty.map_or(false, |ty| ty.sty == TRAIT_OBJECT_DUMMY_SELF);
let default_needs_object_self = |param: &ty::GenericParamDef| {
false
};
- let own_self = self_ty.is_some() as usize;
- let substs = Substs::for_item(tcx, def_id, |param, substs| {
- match param.kind {
- GenericParamDefKind::Lifetime => {
- let i = param.index as usize - own_self;
- if let Some(lt) = lifetimes.get(i) {
- self.ast_region_to_region(lt, Some(param)).into()
- } else {
- tcx.types.re_static.into()
+ let substs = Self::create_substs_for_generic_args(
+ self.tcx(),
+ def_id,
+ &[][..],
+ self_ty.is_some(),
+ self_ty,
+ // Provide the generic args, and whether types should be inferred.
+ |_| (Some(generic_args), infer_types),
+ // Provide substitutions for parameters for which (valid) arguments have been provided.
+ |param, arg| {
+ match (¶m.kind, arg) {
+ (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
+ self.ast_region_to_region(<, Some(param)).into()
}
- }
- GenericParamDefKind::Type { has_default, .. } => {
- let i = param.index as usize;
-
- // Handle Self first, so we can adjust the index to match the AST.
- if let (0, Some(ty)) = (i, self_ty) {
- return ty.into();
+ (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
+ self.ast_ty_to_ty(&ty).into()
}
-
- let i = i - (lt_accepted + own_self);
- if i < ty_provided {
- // A provided type parameter.
- self.ast_ty_to_ty(&types[i]).into()
- } else if infer_types {
- // No type parameters were provided, we can infer all.
- if !default_needs_object_self(param) {
- self.ty_infer_for_def(param, span).into()
+ _ => unreachable!(),
+ }
+ },
+ // Provide substitutions for parameters for which arguments are inferred.
+ |substs, param, infer_types| {
+ match param.kind {
+ GenericParamDefKind::Lifetime => tcx.types.re_static.into(),
+ GenericParamDefKind::Type { has_default, .. } => {
+ if !infer_types && has_default {
+ // No type parameter provided, but a default exists.
+
+ // If we are converting an object type, then the
+ // `Self` parameter is unknown. However, some of the
+ // other type parameters may reference `Self` in their
+ // defaults. This will lead to an ICE if we are not
+ // careful!
+ if default_needs_object_self(param) {
+ struct_span_err!(tcx.sess, span, E0393,
+ "the type parameter `{}` must be explicitly \
+ specified",
+ param.name)
+ .span_label(span,
+ format!("missing reference to `{}`", param.name))
+ .note(&format!("because of the default `Self` reference, \
+ type parameters must be specified on object \
+ types"))
+ .emit();
+ tcx.types.err.into()
+ } else {
+ // This is a default type parameter.
+ self.normalize_ty(
+ span,
+ tcx.at(span).type_of(param.def_id)
+ .subst_spanned(tcx, substs.unwrap(), Some(span))
+ ).into()
+ }
+ } else if infer_types {
+ // No type parameters were provided, we can infer all.
+ if !default_needs_object_self(param) {
+ self.ty_infer_for_def(param, span).into()
+ } else {
+ self.ty_infer(span).into()
+ }
} else {
- self.ty_infer(span).into()
- }
- } else if has_default {
- // No type parameter provided, but a default exists.
-
- // If we are converting an object type, then the
- // `Self` parameter is unknown. However, some of the
- // other type parameters may reference `Self` in their
- // defaults. This will lead to an ICE if we are not
- // careful!
- if default_needs_object_self(param) {
- struct_span_err!(tcx.sess, span, E0393,
- "the type parameter `{}` must be explicitly \
- specified",
- param.name)
- .span_label(span,
- format!("missing reference to `{}`", param.name))
- .note(&format!("because of the default `Self` reference, \
- type parameters must be specified on object \
- types"))
- .emit();
+ // We've already errored above about the mismatch.
tcx.types.err.into()
- } else {
- // This is a default type parameter.
- self.normalize_ty(
- span,
- tcx.at(span).type_of(param.def_id)
- .subst_spanned(tcx, substs, Some(span))
- ).into()
}
- } else {
- // We've already errored above about the mismatch.
- tcx.types.err.into()
}
}
- }
- });
+ },
+ );
let assoc_bindings = generic_args.bindings.iter().map(|binding| {
ConvertedBinding {
}
}).collect();
- debug!("create_substs_for_ast_path(decl_generics={:?}, self_ty={:?}) -> {:?}",
- decl_generics, self_ty, substs);
+ debug!("create_substs_for_ast_path(generic_params={:?}, self_ty={:?}) -> {:?}",
+ generic_params, self_ty, substs);
(substs, assoc_bindings)
}
trait_def_id,
self_ty,
trait_segment);
- assoc_bindings.first().map(|b| self.prohibit_projection(b.span));
+ assoc_bindings.first().map(|b| AstConv::prohibit_assoc_ty_binding(self.tcx(), b.span));
ty::TraitRef::new(trait_def_id, substs)
}
Err(ErrorReported) => return (tcx.types.err, Def::Err),
}
}
- (&ty::TyParam(_), Def::SelfTy(Some(param_did), None)) |
- (&ty::TyParam(_), Def::TyParam(param_did)) => {
+ (&ty::Param(_), Def::SelfTy(Some(param_did), None)) |
+ (&ty::Param(_), Def::TyParam(param_did)) => {
match self.find_bound_for_assoc_item(param_did, assoc_name, span) {
Ok(bound) => bound,
Err(ErrorReported) => return (tcx.types.err, Def::Err),
self.normalize_ty(span, tcx.mk_projection(item_def_id, trait_ref.substs))
}
- pub fn prohibit_generics(&self, segments: &[hir::PathSegment]) {
+ pub fn prohibit_generics<'a, T: IntoIterator<Item = &'a hir::PathSegment>>(&self, segments: T) {
for segment in segments {
segment.with_generic_args(|generic_args| {
let (mut err_for_lt, mut err_for_ty) = (false, false);
}
}
for binding in &generic_args.bindings {
- self.prohibit_projection(binding.span);
+ Self::prohibit_assoc_ty_binding(self.tcx(), binding.span);
break;
}
})
}
}
- pub fn prohibit_projection(&self, span: Span) {
- let mut err = struct_span_err!(self.tcx().sess, span, E0229,
+ pub fn prohibit_assoc_ty_binding(tcx: TyCtxt, span: Span) {
+ let mut err = struct_span_err!(tcx.sess, span, E0229,
"associated type bindings are not allowed here");
err.span_label(span, "associated type not allowed here").emit();
}
)
}
Def::Enum(did) | Def::TyAlias(did) | Def::Struct(did) |
- Def::Union(did) | Def::TyForeign(did) => {
+ Def::Union(did) | Def::ForeignTy(did) => {
assert_eq!(opt_self_ty, None);
self.prohibit_generics(path.segments.split_last().unwrap().1);
self.ast_path_to_ty(span, did, path.segments.last().unwrap())
assert_eq!(opt_self_ty, None);
self.prohibit_generics(&path.segments);
match prim_ty {
- hir::TyBool => tcx.types.bool,
- hir::TyChar => tcx.types.char,
- hir::TyInt(it) => tcx.mk_mach_int(it),
- hir::TyUint(uit) => tcx.mk_mach_uint(uit),
- hir::TyFloat(ft) => tcx.mk_mach_float(ft),
- hir::TyStr => tcx.mk_str()
+ hir::Bool => tcx.types.bool,
+ hir::Char => tcx.types.char,
+ hir::Int(it) => tcx.mk_mach_int(it),
+ hir::Uint(uit) => tcx.mk_mach_uint(uit),
+ hir::Float(ft) => tcx.mk_mach_float(ft),
+ hir::Str => tcx.mk_str()
}
}
Def::Err => {
}
hir::TyKind::Rptr(ref region, ref mt) => {
let r = self.ast_region_to_region(region, None);
- debug!("TyRef r={:?}", r);
+ debug!("Ref r={:?}", r);
let t = self.ast_ty_to_ty(&mt.ty);
tcx.mk_ref(r, ty::TypeAndMut {ty: t, mutbl: mt.mutbl})
}
let length_def_id = tcx.hir.local_def_id(length.id);
let substs = Substs::identity_for_item(tcx, length_def_id);
let length = ty::Const::unevaluated(tcx, length_def_id, substs, tcx.types.usize);
- let array_ty = tcx.mk_ty(ty::TyArray(self.ast_ty_to_ty(&ty), length));
+ let array_ty = tcx.mk_ty(ty::Array(self.ast_ty_to_ty(&ty), length));
self.normalize_ty(ast_ty.span, array_ty)
}
hir::TyKind::Typeof(ref _e) => {
tcx.types.err
}
hir::TyKind::Infer => {
- // TyInfer also appears as the type of arguments or return
+ // Infer also appears as the type of arguments or return
// values in a ExprKind::Closure, or as
// the type of local variables. Both of these cases are
// handled specially and will not descend into this routine.
/// we return `None`.
fn compute_object_lifetime_bound(&self,
span: Span,
- existential_predicates: ty::Binder<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>)
+ existential_predicates: ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>)
-> Option<ty::Region<'tcx>> // if None, use the default
{
let tcx = self.tcx();
(auto_traits, trait_bounds)
}
-fn check_type_argument_count(tcx: TyCtxt,
- span: Span,
- supplied: usize,
- ty_params: ParamRange)
-{
- let (required, accepted) = (ty_params.required, ty_params.accepted);
- if supplied < required {
- let expected = if required < accepted {
- "expected at least"
- } else {
- "expected"
- };
- let arguments_plural = if required == 1 { "" } else { "s" };
-
- struct_span_err!(tcx.sess, span, E0243,
- "wrong number of type arguments: {} {}, found {}",
- expected, required, supplied)
- .span_label(span,
- format!("{} {} type argument{}",
- expected,
- required,
- arguments_plural))
- .emit();
- } else if supplied > accepted {
- let expected = if required < accepted {
- format!("expected at most {}", accepted)
- } else {
- format!("expected {}", accepted)
- };
- let arguments_plural = if accepted == 1 { "" } else { "s" };
-
- struct_span_err!(tcx.sess, span, E0244,
- "wrong number of type arguments: {}, found {}",
- expected, supplied)
- .span_label(
- span,
- format!("{} type argument{}",
- if accepted == 0 { "expected no" } else { &expected },
- arguments_plural)
- )
- .emit();
- }
-}
-
-fn report_lifetime_number_error(tcx: TyCtxt, span: Span, number: usize, expected: usize) {
- let label = if number < expected {
- if expected == 1 {
- format!("expected {} lifetime parameter", expected)
- } else {
- format!("expected {} lifetime parameters", expected)
- }
- } else {
- let additional = number - expected;
- if additional == 1 {
- "unexpected lifetime parameter".to_string()
- } else {
- format!("{} unexpected lifetime parameters", additional)
- }
- };
- struct_span_err!(tcx.sess, span, E0107,
- "wrong number of lifetime parameters: expected {}, found {}",
- expected, number)
- .span_label(span, label)
- .emit();
-}
-
// A helper struct for conveniently grouping a set of bounds which we pass to
// and return from functions in multiple places.
#[derive(PartialEq, Eq, Clone, Debug)]
PatKind::Lit(ref lt) => {
let ty = self.check_expr(lt);
match ty.sty {
- ty::TypeVariants::TyRef(..) => false,
+ ty::Ref(..) => false,
_ => true,
}
}
// Peel off as many `&` or `&mut` from the discriminant as possible. For example,
// for `match &&&mut Some(5)` the loop runs three times, aborting when it reaches
- // the `Some(5)` which is not of type TyRef.
+ // the `Some(5)` which is not of type Ref.
//
// For each ampersand peeled off, update the binding mode and push the original
// type into the adjustments vector.
expected = loop {
debug!("inspecting {:?} with type {:?}", exp_ty, exp_ty.sty);
match exp_ty.sty {
- ty::TypeVariants::TyRef(_, inner_ty, inner_mutability) => {
- debug!("current discriminant is TyRef, inserting implicit deref");
+ ty::Ref(_, inner_ty, inner_mutability) => {
+ debug!("current discriminant is Ref, inserting implicit deref");
// Preserve the reference type. We'll need it later during HAIR lowering.
pat_adjustments.push(exp_ty);
if let hir::ExprKind::Lit(ref lt) = lt.node {
if let ast::LitKind::ByteStr(_) = lt.node {
let expected_ty = self.structurally_resolved_type(pat.span, expected);
- if let ty::TyRef(_, r_ty, _) = expected_ty.sty {
- if let ty::TySlice(_) = r_ty.sty {
+ if let ty::Ref(_, r_ty, _) = expected_ty.sty {
+ if let ty::Slice(_) = r_ty.sty {
pat_ty = tcx.mk_imm_ref(tcx.types.re_static,
tcx.mk_slice(tcx.types.u8))
}
let mut expected_len = elements.len();
if ddpos.is_some() {
// Require known type only when `..` is present
- if let ty::TyTuple(ref tys) =
+ if let ty::Tuple(ref tys) =
self.structurally_resolved_type(pat.span, expected).sty {
expected_len = tys.len();
}
// from all tuple elements isn't trivial.
TypeVariableOrigin::TypeInference(pat.span)));
let element_tys = tcx.mk_type_list(element_tys_iter);
- let pat_ty = tcx.mk_ty(ty::TyTuple(element_tys));
+ let pat_ty = tcx.mk_ty(ty::Tuple(element_tys));
self.demand_eqtype(pat.span, expected, pat_ty);
for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) {
self.check_pat_walk(elem, &element_tys[i], def_bm, true);
// hack detailed in (*) below.
debug!("check_pat_walk: expected={:?}", expected);
let (rptr_ty, inner_ty) = match expected.sty {
- ty::TyRef(_, r_ty, r_mutbl) if r_mutbl == mutbl => {
+ ty::Ref(_, r_ty, r_mutbl) if r_mutbl == mutbl => {
(expected, r_ty)
}
_ => {
PatKind::Slice(ref before, ref slice, ref after) => {
let expected_ty = self.structurally_resolved_type(pat.span, expected);
let (inner_ty, slice_ty) = match expected_ty.sty {
- ty::TyArray(inner_ty, size) => {
+ ty::Array(inner_ty, size) => {
let size = size.unwrap_usize(tcx);
let min_len = before.len() as u64 + after.len() as u64;
if slice.is_none() {
(inner_ty, tcx.types.err)
}
}
- ty::TySlice(inner_ty) => (inner_ty, expected_ty),
+ ty::Slice(inner_ty) => (inner_ty, expected_ty),
_ => {
if !expected_ty.references_error() {
let mut err = struct_span_err!(
tcx.sess, pat.span, E0529,
"expected an array or slice, found `{}`",
expected_ty);
- if let ty::TyRef(_, ty, _) = expected_ty.sty {
+ if let ty::Ref(_, ty, _) = expected_ty.sty {
match ty.sty {
- ty::TyArray(..) | ty::TySlice(..) => {
+ ty::Array(..) | ty::Slice(..) => {
err.help("the semantics of slice patterns changed \
recently; see issue #23121");
}
pub fn check_dereferencable(&self, span: Span, expected: Ty<'tcx>, inner: &hir::Pat) -> bool {
if let PatKind::Binding(..) = inner.node {
if let Some(mt) = self.shallow_resolve(expected).builtin_deref(true) {
- if let ty::TyDynamic(..) = mt.ty.sty {
+ if let ty::Dynamic(..) = mt.ty.sty {
// This is "x = SomeTrait" being reduced from
// "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
let type_str = self.ty_to_string(expected);
if subpats.len() == variant.fields.len() ||
subpats.len() < variant.fields.len() && ddpos.is_some() {
let substs = match pat_ty.sty {
- ty::TyAdt(_, substs) => substs,
+ ty::Adt(_, substs) => substs,
ref ty => bug!("unexpected pattern type {:?}", ty),
};
for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) {
let tcx = self.tcx;
let (substs, adt) = match adt_ty.sty {
- ty::TyAdt(adt, substs) => (substs, adt),
+ ty::Adt(adt, substs) => (substs, adt),
_ => span_bug!(span, "struct pattern is not an ADT")
};
let kind_name = adt.variant_descr();
self.fcx.try_overloaded_deref(self.span, source, needs)
.and_then(|InferOk { value: method, obligations: o }| {
obligations.extend(o);
- if let ty::TyRef(region, _, mutbl) = method.sig.output().sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.output().sty {
Some(OverloadedDeref {
region,
mutbl,
use rustc_target::spec::abi;
use syntax::ast::Ident;
use syntax_pos::Span;
+use errors::Applicability;
use rustc::hir;
// If the callee is a bare function or a closure, then we're all set.
match adjusted_ty.sty {
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
let adjustments = autoderef.adjust_steps(Needs::None);
self.apply_adjustments(callee_expr, adjustments);
return Some(CallStep::Builtin(adjusted_ty));
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
assert_eq!(def_id.krate, LOCAL_CRATE);
// Check whether this is a call to a closure where we
// over the top. The simplest fix by far is to just ignore
// this case and deref again, so we wind up with
// `FnMut::call_mut(&mut *x, ())`.
- ty::TyRef(..) if autoderef.step_count() == 0 => {
+ ty::Ref(..) if autoderef.step_count() == 0 => {
return None;
}
let method = self.register_infer_ok_obligations(ok);
let mut autoref = None;
if borrow {
- if let ty::TyRef(region, _, mutbl) = method.sig.inputs()[0].sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].sty {
let mutbl = match mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
expected: Expectation<'tcx>)
-> Ty<'tcx> {
let (fn_sig, def_span) = match callee_ty.sty {
- ty::TyFnDef(def_id, _) => {
+ ty::FnDef(def_id, _) => {
(callee_ty.fn_sig(self.tcx), self.tcx.hir.span_if_local(def_id))
}
- ty::TyFnPtr(sig) => (sig, None),
+ ty::FnPtr(sig) => (sig, None),
ref t => {
let mut unit_variant = None;
- if let &ty::TyAdt(adt_def, ..) = t {
+ if let &ty::Adt(adt_def, ..) = t {
if adt_def.is_enum() {
if let hir::ExprKind::Call(ref expr, _) = call_expr.node {
unit_variant = Some(self.tcx.hir.node_to_pretty_string(expr.id))
err.span_label(call_expr.span, "not a function");
if let Some(ref path) = unit_variant {
- err.span_suggestion(call_expr.span,
- &format!("`{}` is a unit variant, you need to write it \
- without the parenthesis", path),
- path.to_string());
+ err.span_suggestion_with_applicability(
+ call_expr.span,
+ &format!("`{}` is a unit variant, you need to write it \
+ without the parenthesis", path),
+ path.to_string(),
+ Applicability::MachineApplicable
+ );
}
if let hir::ExprKind::Call(ref expr, _) = call_expr.node {
}
Ok(match t.sty {
- ty::TySlice(_) | ty::TyStr => Some(PointerKind::Length),
- ty::TyDynamic(ref tty, ..) =>
+ ty::Slice(_) | ty::Str => Some(PointerKind::Length),
+ ty::Dynamic(ref tty, ..) =>
Some(PointerKind::Vtable(tty.principal().map(|p| p.def_id()))),
- ty::TyAdt(def, substs) if def.is_struct() => {
+ ty::Adt(def, substs) if def.is_struct() => {
match def.non_enum_variant().fields.last() {
None => Some(PointerKind::Thin),
Some(f) => {
}
}
}
- ty::TyTuple(fields) => match fields.last() {
+ ty::Tuple(fields) => match fields.last() {
None => Some(PointerKind::Thin),
Some(f) => self.pointer_kind(f, span)?
},
// Pointers to foreign types are thin, despite being unsized
- ty::TyForeign(..) => Some(PointerKind::Thin),
+ ty::Foreign(..) => Some(PointerKind::Thin),
// We should really try to normalize here.
- ty::TyProjection(ref pi) => Some(PointerKind::OfProjection(pi)),
- ty::TyAnon(def_id, substs) => Some(PointerKind::OfAnon(def_id, substs)),
- ty::TyParam(ref p) => Some(PointerKind::OfParam(p)),
+ ty::Projection(ref pi) => Some(PointerKind::OfProjection(pi)),
+ ty::Anon(def_id, substs) => Some(PointerKind::OfAnon(def_id, substs)),
+ ty::Param(ref p) => Some(PointerKind::OfParam(p)),
// Insufficient type information.
- ty::TyInfer(_) => None,
+ ty::Infer(_) => None,
- ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) |
- ty::TyFloat(_) | ty::TyArray(..) | ty::TyGeneratorWitness(..) |
- ty::TyRawPtr(_) | ty::TyRef(..) | ty::TyFnDef(..) |
- ty::TyFnPtr(..) | ty::TyClosure(..) | ty::TyGenerator(..) |
- ty::TyAdt(..) | ty::TyNever | ty::TyError => {
+ ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
+ ty::Float(_) | ty::Array(..) | ty::GeneratorWitness(..) |
+ ty::RawPtr(_) | ty::Ref(..) | ty::FnDef(..) |
+ ty::FnPtr(..) | ty::Closure(..) | ty::Generator(..) |
+ ty::Adt(..) | ty::Never | ty::Error => {
self.tcx.sess.delay_span_bug(
span, &format!("`{:?}` should be sized but is not?", t));
return Err(ErrorReported);
// cases now. We do a more thorough check at the end, once
// inference is more completely known.
match cast_ty.sty {
- ty::TyDynamic(..) | ty::TySlice(..) => {
+ ty::Dynamic(..) | ty::Slice(..) => {
check.report_cast_to_unsized_type(fcx);
Err(ErrorReported)
}
fcx.resolve_type_vars_if_possible(&self.expr_ty),
tstr);
match self.expr_ty.sty {
- ty::TyRef(_, _, mt) => {
+ ty::Ref(_, _, mt) => {
let mtstr = match mt {
hir::MutMutable => "mut ",
hir::MutImmutable => "",
tstr);
}
}
- ty::TyAdt(def, ..) if def.is_box() => {
+ ty::Adt(def, ..) if def.is_box() => {
match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) {
Ok(s) => {
err.span_suggestion(self.cast_span,
(Some(t_from), Some(t_cast)) => (t_from, t_cast),
// Function item types may need to be reified before casts.
(None, Some(t_cast)) => {
- if let ty::TyFnDef(..) = self.expr_ty.sty {
+ if let ty::FnDef(..) = self.expr_ty.sty {
// Attempt a coercion to a fn pointer type.
let f = self.expr_ty.fn_sig(fcx.tcx);
let res = fcx.try_coerce(self.expr,
(RPtr(p), Int(_)) |
(RPtr(p), Float) => {
match p.ty.sty {
- ty::TypeVariants::TyInt(_) |
- ty::TypeVariants::TyUint(_) |
- ty::TypeVariants::TyFloat(_) => {
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) => {
Err(CastError::NeedDeref)
}
- ty::TypeVariants::TyInfer(t) => {
+ ty::Infer(t) => {
match t {
ty::InferTy::IntVar(_) |
ty::InferTy::FloatVar(_) => Err(CastError::NeedDeref),
// array-ptr-cast.
if m_expr.mutbl == hir::MutImmutable && m_cast.mutbl == hir::MutImmutable {
- if let ty::TyArray(ety, _) = m_expr.ty.sty {
+ if let ty::Array(ety, _) = m_expr.ty.sty {
// Due to the limitations of LLVM global constants,
// region pointers end up pointing at copies of
// vector elements instead of the original values.
);
match expected_ty.sty {
- ty::TyDynamic(ref object_type, ..) => {
+ ty::Dynamic(ref object_type, ..) => {
let sig = object_type
.projection_bounds()
.filter_map(|pb| {
.and_then(|p| self.tcx.lang_items().fn_trait_kind(p.def_id()));
(sig, kind)
}
- ty::TyInfer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid),
- ty::TyFnPtr(sig) => {
+ ty::Infer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid),
+ ty::FnPtr(sig) => {
let expected_sig = ExpectedSig {
cause_span: None,
sig: sig.skip_binder().clone(),
// NB: This predicate is created by breaking down a
// `ClosureType: FnFoo()` predicate, where
- // `ClosureType` represents some `TyClosure`. It can't
+ // `ClosureType` represents some `Closure`. It can't
// possibly be referring to the current closure,
- // because we haven't produced the `TyClosure` for
+ // because we haven't produced the `Closure` for
// this closure yet; this is exactly why the other
// code is looking for a self type of a unresolved
// inference variable.
);
let input_tys = match arg_param_ty.sty {
- ty::TyTuple(tys) => tys.into_iter(),
+ ty::Tuple(tys) => tys.into_iter(),
_ => {
return None;
}
trait_ref, self_ty
);
match self_ty.sty {
- ty::TyInfer(ty::TyVar(v)) if expected_vid == v => Some(trait_ref),
+ ty::Infer(ty::TyVar(v)) if expected_vid == v => Some(trait_ref),
_ => None,
}
}
// Note: does not attempt to resolve type variables we encounter.
// See above for details.
match b.sty {
- ty::TyRawPtr(mt_b) => {
+ ty::RawPtr(mt_b) => {
return self.coerce_unsafe_ptr(a, b, mt_b.mutbl);
}
- ty::TyRef(r_b, ty, mutbl) => {
+ ty::Ref(r_b, ty, mutbl) => {
let mt_b = ty::TypeAndMut { ty, mutbl };
return self.coerce_borrowed_pointer(a, b, r_b, mt_b);
}
}
match a.sty {
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
// Function items are coercible to any closure
// type; function pointers are not (that would
// require double indirection).
// items to drop the unsafe qualifier.
self.coerce_from_fn_item(a, b)
}
- ty::TyFnPtr(a_f) => {
+ ty::FnPtr(a_f) => {
// We permit coercion of fn pointers to drop the
// unsafe qualifier.
self.coerce_from_fn_pointer(a, a_f, b)
}
- ty::TyClosure(def_id_a, substs_a) => {
+ ty::Closure(def_id_a, substs_a) => {
// Non-capturing closures are coercible to
// function pointers
self.coerce_closure_to_fn(a, def_id_a, substs_a, b)
// yield.
let (r_a, mt_a) = match a.sty {
- ty::TyRef(r_a, ty, mutbl) => {
+ ty::Ref(r_a, ty, mutbl) => {
let mt_a = ty::TypeAndMut { ty, mutbl };
coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?;
(r_a, mt_a)
// Now apply the autoref. We have to extract the region out of
// the final ref type we got.
let r_borrow = match ty.sty {
- ty::TyRef(r_borrow, _, _) => r_borrow,
+ ty::Ref(r_borrow, _, _) => r_borrow,
_ => span_bug!(span, "expected a ref type, got {:?}", ty),
};
let mutbl = match mt_b.mutbl {
// Handle reborrows before selecting `Source: CoerceUnsized<Target>`.
let reborrow = match (&source.sty, &target.sty) {
- (&ty::TyRef(_, ty_a, mutbl_a), &ty::TyRef(_, _, mutbl_b)) => {
+ (&ty::Ref(_, ty_a, mutbl_a), &ty::Ref(_, _, mutbl_b)) => {
coerce_mutbls(mutbl_a, mutbl_b)?;
let coercion = Coercion(self.cause.span);
})
}))
}
- (&ty::TyRef(_, ty_a, mt_a), &ty::TyRawPtr(ty::TypeAndMut { mutbl: mt_b, .. })) => {
+ (&ty::Ref(_, ty_a, mt_a), &ty::RawPtr(ty::TypeAndMut { mutbl: mt_b, .. })) => {
coerce_mutbls(mt_a, mt_b)?;
Some((Adjustment {
ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => {
if unsize_did == tr.def_id() {
let sty = &tr.skip_binder().input_types().nth(1).unwrap().sty;
- if let ty::TyTuple(..) = sty {
+ if let ty::Tuple(..) = sty {
debug!("coerce_unsized: found unsized tuple coercion");
has_unsized_tuple_coercion = true;
}
where F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
G: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>
{
- if let ty::TyFnPtr(fn_ty_b) = b.sty {
+ if let ty::FnPtr(fn_ty_b) = b.sty {
match (fn_ty_a.unsafety(), fn_ty_b.unsafety()) {
(hir::Unsafety::Normal, hir::Unsafety::Unsafe) => {
let unsafe_a = self.tcx.safe_to_unsafe_fn_ty(fn_ty_a);
debug!("coerce_from_fn_item(a={:?}, b={:?})", a, b);
match b.sty {
- ty::TyFnPtr(_) => {
+ ty::FnPtr(_) => {
let a_sig = a.fn_sig(self.tcx);
let InferOk { value: a_sig, mut obligations } =
self.normalize_associated_types_in_as_infer_ok(self.cause.span, &a_sig);
let node_id_a = self.tcx.hir.as_local_node_id(def_id_a).unwrap();
match b.sty {
- ty::TyFnPtr(_) if self.tcx.with_freevars(node_id_a, |v| v.is_empty()) => {
+ ty::FnPtr(_) if self.tcx.with_freevars(node_id_a, |v| v.is_empty()) => {
// We coerce the closure, which has fn type
// `extern "rust-call" fn((arg0,arg1,...)) -> _`
// to
debug!("coerce_unsafe_ptr(a={:?}, b={:?})", a, b);
let (is_ref, mt_a) = match a.sty {
- ty::TyRef(_, ty, mutbl) => (true, ty::TypeAndMut { ty, mutbl }),
- ty::TyRawPtr(mt) => (false, mt),
+ ty::Ref(_, ty, mutbl) => (true, ty::TypeAndMut { ty, mutbl }),
+ ty::RawPtr(mt) => (false, mt),
_ => {
return self.unify_and(a, b, identity);
}
// Special-case that coercion alone cannot handle:
// Two function item types of differing IDs or Substs.
- if let (&ty::TyFnDef(..), &ty::TyFnDef(..)) = (&prev_ty.sty, &new_ty.sty) {
+ if let (&ty::FnDef(..), &ty::FnDef(..)) = (&prev_ty.sty, &new_ty.sty) {
// Don't reify if the function types have a LUB, i.e. they
// are the same function and their parameters have a LUB.
let lub_ty = self.commit_if_ok(|_| {
Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(_, mutbl_adj)), .. }
] => {
match self.node_ty(expr.hir_id).sty {
- ty::TyRef(_, _, mt_orig) => {
+ ty::Ref(_, _, mt_orig) => {
let mutbl_adj: hir::Mutability = mutbl_adj.into();
// Reborrow that we can safely ignore, because
// the next adjustment can only be a Deref
use rustc::hir::{Item, ItemKind, print};
use rustc::ty::{self, Ty, AssociatedItem};
use rustc::ty::adjustment::AllowTwoPhase;
-use errors::{DiagnosticBuilder, SourceMapper};
+use errors::{Applicability, DiagnosticBuilder, SourceMapper};
use super::method::probe;
// If the expected type is an enum with any variants whose sole
// field is of the found type, suggest such variants. See Issue
// #42764.
- if let ty::TyAdt(expected_adt, substs) = expected.sty {
+ if let ty::Adt(expected_adt, substs) = expected.sty {
let mut compatible_variants = vec![];
for variant in &expected_adt.variants {
if variant.fields.len() == 1 {
}
match (&expected.sty, &checked_ty.sty) {
- (&ty::TyRef(_, exp, _), &ty::TyRef(_, check, _)) => match (&exp.sty, &check.sty) {
- (&ty::TyStr, &ty::TyArray(arr, _)) |
- (&ty::TyStr, &ty::TySlice(arr)) if arr == self.tcx.types.u8 => {
+ (&ty::Ref(_, exp, _), &ty::Ref(_, check, _)) => match (&exp.sty, &check.sty) {
+ (&ty::Str, &ty::Array(arr, _)) |
+ (&ty::Str, &ty::Slice(arr)) if arr == self.tcx.types.u8 => {
if let hir::ExprKind::Lit(_) = expr.node {
if let Ok(src) = cm.span_to_snippet(sp) {
if src.starts_with("b\"") {
}
}
},
- (&ty::TyArray(arr, _), &ty::TyStr) |
- (&ty::TySlice(arr), &ty::TyStr) if arr == self.tcx.types.u8 => {
+ (&ty::Array(arr, _), &ty::Str) |
+ (&ty::Slice(arr), &ty::Str) if arr == self.tcx.types.u8 => {
if let hir::ExprKind::Lit(_) = expr.node {
if let Ok(src) = cm.span_to_snippet(sp) {
if src.starts_with("\"") {
}
_ => {}
},
- (&ty::TyRef(_, _, mutability), _) => {
+ (&ty::Ref(_, _, mutability), _) => {
// Check if it can work when put into a ref. For example:
//
// ```
}
}
}
- (_, &ty::TyRef(_, checked, _)) => {
+ (_, &ty::Ref(_, checked, _)) => {
// We have `&T`, check if what was expected was `T`. If so,
// we may want to suggest adding a `*`, or removing
// a `&`.
if needs_paren { ")" } else { "" });
match (&expected_ty.sty, &checked_ty.sty) {
- (&ty::TyInt(ref exp), &ty::TyInt(ref found)) => {
+ (&ty::Int(ref exp), &ty::Int(ref found)) => {
match (found.bit_width(), exp.bit_width()) {
(Some(found), Some(exp)) if found > exp => {
if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_truncate),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_truncate),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
}
(None, _) | (_, None) => {
if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_isize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_isize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
}
_ => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_sign_extend),
- into_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_sign_extend),
+ into_suggestion,
+ Applicability::MachineApplicable
+ );
}
}
true
}
- (&ty::TyUint(ref exp), &ty::TyUint(ref found)) => {
+ (&ty::Uint(ref exp), &ty::Uint(ref found)) => {
match (found.bit_width(), exp.bit_width()) {
(Some(found), Some(exp)) if found > exp => {
if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_truncate),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_truncate),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
}
(None, _) | (_, None) => {
if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_usize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_usize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
}
_ => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_zero_extend),
- into_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_zero_extend),
+ into_suggestion,
+ Applicability::MachineApplicable
+ );
}
}
true
}
- (&ty::TyInt(ref exp), &ty::TyUint(ref found)) => {
+ (&ty::Int(ref exp), &ty::Uint(ref found)) => {
if can_cast {
match (found.bit_width(), exp.bit_width()) {
(Some(found), Some(exp)) if found > exp - 1 => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_truncate),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_truncate),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
(None, None) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_truncate),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_truncate),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
(None, _) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_isize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_isize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
(_, None) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_usize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_usize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
_ => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_zero_extend),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_zero_extend),
+ cast_suggestion,
+ Applicability::MachineApplicable
+ );
}
}
}
true
}
- (&ty::TyUint(ref exp), &ty::TyInt(ref found)) => {
+ (&ty::Uint(ref exp), &ty::Int(ref found)) => {
if can_cast {
match (found.bit_width(), exp.bit_width()) {
(Some(found), Some(exp)) if found - 1 > exp => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_truncate),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_truncate),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
(None, None) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_sign_extend),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_sign_extend),
+ cast_suggestion,
+ Applicability::MachineApplicable // lossy conversion
+ );
}
(None, _) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_usize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_usize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
(_, None) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_isize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_isize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
_ => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_sign_extend),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_sign_extend),
+ cast_suggestion,
+ Applicability::MachineApplicable
+ );
}
}
}
true
}
- (&ty::TyFloat(ref exp), &ty::TyFloat(ref found)) => {
+ (&ty::Float(ref exp), &ty::Float(ref found)) => {
if found.bit_width() < exp.bit_width() {
- err.span_suggestion(expr.span,
- &format!("{} in a lossless way",
- msg),
- into_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{} in a lossless way", msg),
+ into_suggestion,
+ Applicability::MachineApplicable
+ );
} else if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, producing the closest possible value",
- msg),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, producing the closest possible value", msg),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
true
}
- (&ty::TyUint(_), &ty::TyFloat(_)) | (&ty::TyInt(_), &ty::TyFloat(_)) => {
+ (&ty::Uint(_), &ty::Float(_)) | (&ty::Int(_), &ty::Float(_)) => {
if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, rounding the float towards zero",
- msg),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, rounding the float towards zero", msg),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
err.warn("casting here will cause undefined behavior if the rounded value \
cannot be represented by the target integer type, including \
`Inf` and `NaN` (this is a bug and will be fixed)");
}
true
}
- (&ty::TyFloat(ref exp), &ty::TyUint(ref found)) => {
+ (&ty::Float(ref exp), &ty::Uint(ref found)) => {
// if `found` is `None` (meaning found is `usize`), don't suggest `.into()`
if exp.bit_width() > found.bit_width().unwrap_or(256) {
- err.span_suggestion(expr.span,
- &format!("{}, producing the floating point \
- representation of the integer",
- msg),
- into_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, producing the floating point representation of the \
+ integer",
+ msg),
+ into_suggestion,
+ Applicability::MachineApplicable
+ );
} else if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, producing the floating point \
- representation of the integer, rounded if \
- necessary",
- msg),
- cast_suggestion);
+ err.span_suggestion_with_applicability(expr.span,
+ &format!("{}, producing the floating point representation of the \
+ integer, rounded if necessary",
+ msg),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
true
}
- (&ty::TyFloat(ref exp), &ty::TyInt(ref found)) => {
+ (&ty::Float(ref exp), &ty::Int(ref found)) => {
// if `found` is `None` (meaning found is `isize`), don't suggest `.into()`
if exp.bit_width() > found.bit_width().unwrap_or(256) {
- err.span_suggestion(expr.span,
- &format!("{}, producing the floating point \
- representation of the integer",
- msg),
- into_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, producing the floating point representation of the \
+ integer",
+ msg),
+ into_suggestion,
+ Applicability::MachineApplicable
+ );
} else if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, producing the floating point \
- representation of the integer, rounded if \
- necessary",
- msg),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, producing the floating point representation of the \
+ integer, rounded if necessary",
+ msg),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
true
}
let dtor_self_type = tcx.type_of(drop_impl_did);
let dtor_predicates = tcx.predicates_of(drop_impl_did);
match dtor_self_type.sty {
- ty::TyAdt(adt_def, self_to_impl_substs) => {
+ ty::Adt(adt_def, self_to_impl_substs) => {
ensure_drop_params_and_item_params_correspond(tcx,
drop_impl_did,
dtor_self_type,
// Replace all regions inside the generator interior with late bound regions
// Note that each region slot in the types gets a new fresh late bound region,
// which means that none of the regions inside relate to any other, even if
- // typeck had previously found contraints that would cause them to be related.
+ // typeck had previously found constraints that would cause them to be related.
let mut counter = 0;
let type_list = fcx.tcx.fold_regions(&type_list, &mut false, |_, current_depth| {
counter += 1;
match *expected {
Void => match t.sty {
- ty::TyTuple(ref v) if v.is_empty() => {},
+ ty::Tuple(ref v) if v.is_empty() => {},
_ => simple_error(&format!("`{}`", t), "()"),
},
// (The width we pass to LLVM doesn't concern the type checker.)
Integer(signed, bits, _llvm_width) => match (signed, bits, &t.sty) {
- (true, 8, &ty::TyInt(ast::IntTy::I8)) |
- (false, 8, &ty::TyUint(ast::UintTy::U8)) |
- (true, 16, &ty::TyInt(ast::IntTy::I16)) |
- (false, 16, &ty::TyUint(ast::UintTy::U16)) |
- (true, 32, &ty::TyInt(ast::IntTy::I32)) |
- (false, 32, &ty::TyUint(ast::UintTy::U32)) |
- (true, 64, &ty::TyInt(ast::IntTy::I64)) |
- (false, 64, &ty::TyUint(ast::UintTy::U64)) |
- (true, 128, &ty::TyInt(ast::IntTy::I128)) |
- (false, 128, &ty::TyUint(ast::UintTy::U128)) => {},
+ (true, 8, &ty::Int(ast::IntTy::I8)) |
+ (false, 8, &ty::Uint(ast::UintTy::U8)) |
+ (true, 16, &ty::Int(ast::IntTy::I16)) |
+ (false, 16, &ty::Uint(ast::UintTy::U16)) |
+ (true, 32, &ty::Int(ast::IntTy::I32)) |
+ (false, 32, &ty::Uint(ast::UintTy::U32)) |
+ (true, 64, &ty::Int(ast::IntTy::I64)) |
+ (false, 64, &ty::Uint(ast::UintTy::U64)) |
+ (true, 128, &ty::Int(ast::IntTy::I128)) |
+ (false, 128, &ty::Uint(ast::UintTy::U128)) => {},
_ => simple_error(&format!("`{}`", t),
&format!("`{}{n}`",
if signed {"i"} else {"u"},
n = bits)),
},
Float(bits) => match (bits, &t.sty) {
- (32, &ty::TyFloat(ast::FloatTy::F32)) |
- (64, &ty::TyFloat(ast::FloatTy::F64)) => {},
+ (32, &ty::Float(ast::FloatTy::F32)) |
+ (64, &ty::Float(ast::FloatTy::F64)) => {},
_ => simple_error(&format!("`{}`", t),
&format!("`f{n}`", n = bits)),
},
Pointer(ref inner_expected, ref _llvm_type, const_) => {
match t.sty {
- ty::TyRawPtr(ty::TypeAndMut { ty, mutbl }) => {
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl }) => {
if (mutbl == hir::MutImmutable) != const_ {
simple_error(&format!("`{}`", t),
if const_ {"const pointer"} else {"mut pointer"})
}
Aggregate(_flatten, ref expected_contents) => {
match t.sty {
- ty::TyTuple(contents) => {
+ ty::Tuple(contents) => {
if contents.len() != expected_contents.len() {
simple_error(&format!("tuple with length {}", contents.len()),
&format!("tuple with length {}", expected_contents.len()));
use rustc::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability};
use rustc::ty::fold::TypeFoldable;
use rustc::infer::{self, InferOk};
-use syntax_pos::Span;
use rustc::hir;
+use syntax_pos::Span;
use std::ops::Deref;
.include_raw_pointers()
.filter_map(|(ty, _)| {
match ty.sty {
- ty::TyDynamic(ref data, ..) => data.principal().map(|p| closure(self, ty, p)),
+ ty::Dynamic(ref data, ..) => data.principal().map(|p| closure(self, ty, p)),
_ => None,
}
})
fn instantiate_method_substs(
&mut self,
pick: &probe::Pick<'tcx>,
- segment: &hir::PathSegment,
+ seg: &hir::PathSegment,
parent_substs: &Substs<'tcx>,
) -> &'tcx Substs<'tcx> {
// Determine the values for the generic parameters of the method.
// If they were not explicitly supplied, just construct fresh
// variables.
- let method_generics = self.tcx.generics_of(pick.item.def_id);
- let mut fn_segment = Some((segment, method_generics));
- let supress_mismatch = self.fcx.check_impl_trait(self.span, fn_segment);
- self.fcx.check_generic_arg_count(self.span, &mut fn_segment, true, supress_mismatch);
+ let generics = self.tcx.generics_of(pick.item.def_id);
+ AstConv::check_generic_arg_count_for_call(
+ self.tcx,
+ self.span,
+ &generics,
+ &seg,
+ true, // `is_method_call`
+ );
// Create subst for early-bound lifetime parameters, combining
// parameters from the type and those from the method.
- assert_eq!(method_generics.parent_count, parent_substs.len());
- let provided = &segment.args;
- let own_counts = method_generics.own_counts();
- Substs::for_item(self.tcx, pick.item.def_id, |param, _| {
- let mut i = param.index as usize;
- if i < parent_substs.len() {
- parent_substs[i]
- } else {
- let (is_lt, is_ty) = match param.kind {
- GenericParamDefKind::Lifetime => (true, false),
- GenericParamDefKind::Type { .. } => (false, true),
- };
- provided.as_ref().and_then(|data| {
- for arg in &data.args {
- match arg {
- GenericArg::Lifetime(lt) if is_lt => {
- if i == parent_substs.len() {
- return Some(AstConv::ast_region_to_region(
- self.fcx, lt, Some(param)).into());
- }
- i -= 1;
- }
- GenericArg::Lifetime(_) => {}
- GenericArg::Type(ty) if is_ty => {
- if i == parent_substs.len() + own_counts.lifetimes {
- return Some(self.to_ty(ty).into());
- }
- i -= 1;
- }
- GenericArg::Type(_) => {}
- }
+ assert_eq!(generics.parent_count, parent_substs.len());
+
+ AstConv::create_substs_for_generic_args(
+ self.tcx,
+ pick.item.def_id,
+ parent_substs,
+ false,
+ None,
+ // Provide the generic args, and whether types should be inferred.
+ |_| {
+ // The last argument of the returned tuple here is unimportant.
+ if let Some(ref data) = seg.args {
+ (Some(data), false)
+ } else {
+ (None, false)
+ }
+ },
+ // Provide substitutions for parameters for which (valid) arguments have been provided.
+ |param, arg| {
+ match (¶m.kind, arg) {
+ (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
+ AstConv::ast_region_to_region(self.fcx, lt, Some(param)).into()
}
- None
- }).unwrap_or_else(|| self.var_for_def(self.span, param))
- }
- })
+ (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
+ self.to_ty(ty).into()
+ }
+ _ => unreachable!(),
+ }
+ },
+ // Provide substitutions for parameters for which arguments are inferred.
+ |_, param, _| self.var_for_def(self.span, param),
+ )
}
fn unify_receivers(&mut self, self_ty: Ty<'tcx>, method_self_ty: Ty<'tcx>) {
if let Adjust::Deref(Some(ref mut deref)) = adjustment.kind {
if let Some(ok) = self.try_overloaded_deref(expr.span, source, needs) {
let method = self.register_infer_ok_obligations(ok);
- if let ty::TyRef(region, _, mutbl) = method.sig.output().sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.output().sty {
*deref = OverloadedDeref {
region,
mutbl,
debug!("convert_place_op_to_mutable: method={:?}", method);
self.write_method_call(expr.hir_id, method);
- let (region, mutbl) = if let ty::TyRef(r, _, mutbl) = method.sig.inputs()[0].sty {
+ let (region, mutbl) = if let ty::Ref(r, _, mutbl) = method.sig.inputs()[0].sty {
(r, mutbl)
} else {
span_bug!(expr.span, "input to place op is not a ref?");
})
.any(|trait_pred| {
match trait_pred.skip_binder().self_ty().sty {
- ty::TyDynamic(..) => true,
+ ty::Dynamic(..) => true,
_ => false,
}
})
from_unsafe_deref: reached_raw_pointer,
unsize: false,
};
- if let ty::TyRawPtr(_) = ty.sty {
+ if let ty::RawPtr(_) = ty.sty {
// all the subsequent steps will be from_unsafe_deref
reached_raw_pointer = true;
}
let final_ty = autoderef.maybe_ambiguous_final_ty();
match final_ty.sty {
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
// Ended in an inference variable. If we are doing
// a real method lookup, this is a hard error because it's
// possible that there will be multiple applicable methods.
// just ignore it.
}
}
- ty::TyArray(elem_ty, _) => {
+ ty::Array(elem_ty, _) => {
let dereferences = steps.len() - 1;
steps.push(CandidateStep {
unsize: true,
});
}
- ty::TyError => return None,
+ ty::Error => return None,
_ => (),
}
let lang_items = self.tcx.lang_items();
match self_ty.sty {
- ty::TyDynamic(ref data, ..) => {
+ ty::Dynamic(ref data, ..) => {
if let Some(p) = data.principal() {
self.assemble_inherent_candidates_from_object(self_ty, p);
self.assemble_inherent_impl_candidates_for_type(p.def_id());
}
}
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
self.assemble_inherent_impl_candidates_for_type(def.did);
}
- ty::TyForeign(did) => {
+ ty::Foreign(did) => {
self.assemble_inherent_impl_candidates_for_type(did);
}
- ty::TyParam(p) => {
+ ty::Param(p) => {
self.assemble_inherent_candidates_from_param(self_ty, p);
}
- ty::TyChar => {
+ ty::Char => {
let lang_def_id = lang_items.char_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyStr => {
+ ty::Str => {
let lang_def_id = lang_items.str_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
let lang_def_id = lang_items.str_alloc_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TySlice(_) => {
+ ty::Slice(_) => {
let lang_def_id = lang_items.slice_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
let lang_def_id = lang_items.slice_u8_alloc_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => {
+ ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => {
let lang_def_id = lang_items.const_ptr_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => {
+ ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => {
let lang_def_id = lang_items.mut_ptr_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::I8) => {
+ ty::Int(ast::IntTy::I8) => {
let lang_def_id = lang_items.i8_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::I16) => {
+ ty::Int(ast::IntTy::I16) => {
let lang_def_id = lang_items.i16_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::I32) => {
+ ty::Int(ast::IntTy::I32) => {
let lang_def_id = lang_items.i32_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::I64) => {
+ ty::Int(ast::IntTy::I64) => {
let lang_def_id = lang_items.i64_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::I128) => {
+ ty::Int(ast::IntTy::I128) => {
let lang_def_id = lang_items.i128_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::Isize) => {
+ ty::Int(ast::IntTy::Isize) => {
let lang_def_id = lang_items.isize_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::U8) => {
+ ty::Uint(ast::UintTy::U8) => {
let lang_def_id = lang_items.u8_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::U16) => {
+ ty::Uint(ast::UintTy::U16) => {
let lang_def_id = lang_items.u16_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::U32) => {
+ ty::Uint(ast::UintTy::U32) => {
let lang_def_id = lang_items.u32_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::U64) => {
+ ty::Uint(ast::UintTy::U64) => {
let lang_def_id = lang_items.u64_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::U128) => {
+ ty::Uint(ast::UintTy::U128) => {
let lang_def_id = lang_items.u128_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::Usize) => {
+ ty::Uint(ast::UintTy::Usize) => {
let lang_def_id = lang_items.usize_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyFloat(ast::FloatTy::F32) => {
+ ty::Float(ast::FloatTy::F32) => {
let lang_def_id = lang_items.f32_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
let lang_def_id = lang_items.f32_runtime_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyFloat(ast::FloatTy::F64) => {
+ ty::Float(ast::FloatTy::F64) => {
let lang_def_id = lang_items.f64_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
match *predicate {
ty::Predicate::Trait(ref trait_predicate) => {
match trait_predicate.skip_binder().trait_ref.self_ty().sty {
- ty::TyParam(ref p) if *p == param_ty => {
+ ty::Param(ref p) if *p == param_ty => {
Some(trait_predicate.to_poly_trait_ref())
}
_ => None,
pick.autoderefs = step.autoderefs;
// Insert a `&*` or `&mut *` if this is a reference type:
- if let ty::TyRef(_, _, mutbl) = step.self_ty.sty {
+ if let ty::Ref(_, _, mutbl) = step.self_ty.sty {
pick.autoderefs += 1;
pick.autoref = Some(mutbl);
}
use syntax::ast;
use syntax::util::lev_distance::find_best_match_for_name;
-use errors::DiagnosticBuilder;
+use errors::{Applicability, DiagnosticBuilder};
use syntax_pos::{Span, FileName};
use rustc::hir;
use rustc::hir::print;
use rustc::infer::type_variable::TypeVariableOrigin;
-use rustc::ty::TyAdt;
+use rustc::ty::Adt;
use std::cmp::Ordering;
match ty.sty {
// Not all of these (e.g. unsafe fns) implement FnOnce
// so we look for these beforehand
- ty::TyClosure(..) |
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) => true,
+ ty::Closure(..) |
+ ty::FnDef(..) |
+ ty::FnPtr(_) => true,
// If it's not a simple function, look for things which implement FnOnce
_ => {
let fn_once = match tcx.lang_items().require(FnOnceTraitLangItem) {
let item_kind = if is_method {
"method"
} else if actual.is_enum() {
- if let TyAdt(ref adt_def, _) = actual.sty {
+ if let Adt(ref adt_def, _) = actual.sty {
let names = adt_def.variants.iter().map(|s| &s.name);
suggestion = find_best_match_for_name(names,
&item_name.as_str(),
if let Some(expr) = rcvr_expr {
for (ty, _) in self.autoderef(span, rcvr_ty) {
match ty.sty {
- ty::TyAdt(def, substs) if !def.is_enum() => {
+ ty::Adt(def, substs) if !def.is_enum() => {
let variant = &def.non_enum_variant();
if let Some(index) = self.tcx.find_field_index(item_name, variant) {
let field = &variant.fields[index];
}
if static_sources.len() == 1 {
if let Some(expr) = rcvr_expr {
- err.span_suggestion(expr.span.to(span),
+ err.span_suggestion_with_applicability(expr.span.to(span),
"use associated function syntax instead",
format!("{}::{}",
self.ty_to_string(actual),
- item_name));
+ item_name),
+ Applicability::MachineApplicable);
} else {
err.help(&format!("try with `{}::{}`",
self.ty_to_string(actual), item_name));
-> bool {
fn is_local(ty: Ty) -> bool {
match ty.sty {
- ty::TyAdt(def, _) => def.did.is_local(),
- ty::TyForeign(did) => did.is_local(),
+ ty::Adt(def, _) => def.did.is_local(),
+ ty::Foreign(did) => did.is_local(),
- ty::TyDynamic(ref tr, ..) => tr.principal()
+ ty::Dynamic(ref tr, ..) => tr.principal()
.map_or(false, |p| p.def_id().is_local()),
- ty::TyParam(_) => true,
+ ty::Param(_) => true,
// everything else (primitive types etc.) is effectively
// non-local (there are "edge" cases, e.g. (LocalType,), but
use rustc::ty::fold::TypeFoldable;
use rustc::ty::query::Providers;
use rustc::ty::util::{Representability, IntTypeExt, Discr};
-use errors::{DiagnosticBuilder, DiagnosticId};
+use errors::{Applicability, DiagnosticBuilder, DiagnosticId};
use require_c_abi_if_variadic;
use session::{CompileIncomplete, config, Session};
use TypeAndSubsts;
use lint;
use util::common::{ErrorReported, indenter};
-use util::nodemap::{DefIdMap, DefIdSet, FxHashMap, NodeMap};
+use util::nodemap::{DefIdMap, DefIdSet, FxHashMap, FxHashSet, NodeMap};
use std::cell::{Cell, RefCell, Ref, RefMut};
use rustc_data_structures::sync::Lrc;
// Anonymized types found in explicit return types and their
// associated fresh inference variable. Writeback resolves these
// variables to get the concrete type, which can be used to
- // deanonymize TyAnon, after typeck is done with all functions.
+ // deanonymize Anon, after typeck is done with all functions.
anon_types: RefCell<DefIdMap<AnonTypeDecl<'tcx>>>,
/// Each type parameter has an implicit region bound that
/// for examples of where this comes up,.
fn rvalue_hint(fcx: &FnCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> {
match fcx.tcx.struct_tail(ty).sty {
- ty::TySlice(_) | ty::TyStr | ty::TyDynamic(..) => {
+ ty::Slice(_) | ty::Str | ty::Dynamic(..) => {
ExpectRvalueLikeUnsized(ty)
}
_ => ExpectHasType(ty)
}
}
+#[derive(Debug)]
+struct PathSeg(DefId, usize);
+
pub struct FnCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
body_id: ast::NodeId,
// backwards compatibility. This makes fallback a stronger type hint than a cast coercion.
fcx.check_casts();
- // Closure and generater analysis may run after fallback
+ // Closure and generator analysis may run after fallback
// because they don't constrain other type variables.
fcx.closure_analyze(body);
assert!(fcx.deferred_call_resolutions.borrow().is_empty());
if let Some(panic_impl_did) = fcx.tcx.lang_items().panic_impl() {
if panic_impl_did == fcx.tcx.hir.local_def_id(fn_id) {
if let Some(panic_info_did) = fcx.tcx.lang_items().panic_info() {
- if declared_ret_ty.sty != ty::TyNever {
+ if declared_ret_ty.sty != ty::Never {
fcx.tcx.sess.span_err(
decl.output.span(),
"return type should be `!`",
let span = fcx.tcx.hir.span(fn_id);
if inputs.len() == 1 {
let arg_is_panic_info = match inputs[0].sty {
- ty::TyRef(region, ty, mutbl) => match ty.sty {
- ty::TyAdt(ref adt, _) => {
+ ty::Ref(region, ty, mutbl) => match ty.sty {
+ ty::Adt(ref adt, _) => {
adt.did == panic_info_did &&
mutbl == hir::Mutability::MutImmutable &&
*region != RegionKind::ReStatic
if let Some(alloc_error_handler_did) = fcx.tcx.lang_items().oom() {
if alloc_error_handler_did == fcx.tcx.hir.local_def_id(fn_id) {
if let Some(alloc_layout_did) = fcx.tcx.lang_items().alloc_layout() {
- if declared_ret_ty.sty != ty::TyNever {
+ if declared_ret_ty.sty != ty::Never {
fcx.tcx.sess.span_err(
decl.output.span(),
"return type should be `!`",
let span = fcx.tcx.hir.span(fn_id);
if inputs.len() == 1 {
let arg_is_alloc_layout = match inputs[0].sty {
- ty::TyAdt(ref adt, _) => {
+ ty::Adt(ref adt, _) => {
adt.did == alloc_layout_did
},
_ => false,
};
let param_env = ty::ParamEnv::reveal_all();
if let Ok(static_) = tcx.const_eval(param_env.and(cid)) {
- let alloc = tcx.const_value_to_allocation(static_);
+ let alloc = tcx.const_to_allocation(static_);
if alloc.relocations.len() != 0 {
let msg = "statics with a custom `#[link_section]` must be a \
simple list of bytes on the wasm target with no \
pub fn check_simd<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
let t = tcx.type_of(def_id);
match t.sty {
- ty::TyAdt(def, substs) if def.is_struct() => {
+ ty::Adt(def, substs) if def.is_struct() => {
let fields = &def.non_enum_variant().fields;
if fields.is_empty() {
span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty");
return;
}
match e.sty {
- ty::TyParam(_) => { /* struct<T>(T, T, T, T) is ok */ }
+ ty::Param(_) => { /* struct<T>(T, T, T, T) is ok */ }
_ if e.is_machine() => { /* struct(u8, u8, u8, u8) is ok */ }
_ => {
span_err!(tcx.sess, sp, E0077,
return false;
}
match t.sty {
- ty::TyAdt(def, substs) if def.is_struct() || def.is_union() => {
+ ty::Adt(def, substs) if def.is_struct() || def.is_union() => {
if tcx.adt_def(def.did).repr.align > 0 {
return true;
}
for field in &def.non_enum_variant().fields {
let f = field.ty(tcx, substs);
match f.sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
if check_packed_inner(tcx, def.did, stack) {
return true;
}
fn resolve_type_vars_with_obligations(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
debug!("resolve_type_vars_with_obligations(ty={:?})", ty);
- // No TyInfer()? Nothing needs doing.
+ // No Infer()? Nothing needs doing.
if !ty.has_infer_types() {
debug!("resolve_type_vars_with_obligations: ty={:?}", ty);
return ty;
// feature(never_type) is enabled, unconstrained ints with i32,
// unconstrained floats with f64.
// Fallback becomes very dubious if we have encountered type-checking errors.
- // In that case, fallback to TyError.
- // The return value indicates whether fallback has occured.
+ // In that case, fallback to Error.
+ // The return value indicates whether fallback has occurred.
fn fallback_if_possible(&self, ty: Ty<'tcx>) -> bool {
use rustc::ty::error::UnconstrainedNumeric::Neither;
use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
let mut self_ty = adjusted_ty;
if unsize {
// We only unsize arrays here.
- if let ty::TyArray(element_ty, _) = adjusted_ty.sty {
+ if let ty::Array(element_ty, _) = adjusted_ty.sty {
self_ty = self.tcx.mk_slice(element_ty);
} else {
continue;
let method = self.register_infer_ok_obligations(ok);
let mut adjustments = autoderef.adjust_steps(needs);
- if let ty::TyRef(region, _, r_mutbl) = method.sig.inputs()[0].sty {
+ if let ty::Ref(region, _, r_mutbl) = method.sig.inputs()[0].sty {
let mutbl = match r_mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
let sugg_span = tcx.sess.source_map().end_point(expr_sp);
// remove closing `)` from the span
let sugg_span = sugg_span.shrink_to_lo();
- err.span_suggestion(
+ err.span_suggestion_with_applicability(
sugg_span,
"expected the unit value `()`; create it with empty parentheses",
- String::from("()"));
+ String::from("()"),
+ Applicability::MachineApplicable);
} else {
err.span_label(sp, format!("expected {}{} parameter{}",
if variadic {"at least "} else {""},
let formal_tys = if tuple_arguments == TupleArguments {
let tuple_type = self.structurally_resolved_type(sp, fn_inputs[0]);
match tuple_type.sty {
- ty::TyTuple(arg_types) if arg_types.len() != args.len() => {
+ ty::Tuple(arg_types) if arg_types.len() != args.len() => {
param_count_error(arg_types.len(), args.len(), "E0057", false, false);
expected_arg_tys = &[];
self.err_args(args.len())
}
- ty::TyTuple(arg_types) => {
+ ty::Tuple(arg_types) => {
expected_arg_tys = match expected_arg_tys.get(0) {
Some(&ty) => match ty.sty {
- ty::TyTuple(ref tys) => &tys,
+ ty::Tuple(ref tys) => &tys,
_ => &[]
},
None => &[]
// in C but we just error out instead and require explicit casts.
let arg_ty = self.structurally_resolved_type(arg.span, arg_ty);
match arg_ty.sty {
- ty::TyFloat(ast::FloatTy::F32) => {
+ ty::Float(ast::FloatTy::F32) => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_double");
}
- ty::TyInt(ast::IntTy::I8) | ty::TyInt(ast::IntTy::I16) | ty::TyBool => {
+ ty::Int(ast::IntTy::I8) | ty::Int(ast::IntTy::I16) | ty::Bool => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_int");
}
- ty::TyUint(ast::UintTy::U8) | ty::TyUint(ast::UintTy::U16) => {
+ ty::Uint(ast::UintTy::U8) | ty::Uint(ast::UintTy::U16) => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_uint");
}
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx));
let ptr_ty = self.resolve_type_vars_if_possible(&ptr_ty);
variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string());
ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
let opt_ty = expected.to_option(self).and_then(|ty| {
match ty.sty {
- ty::TyInt(_) | ty::TyUint(_) => Some(ty),
- ty::TyChar => Some(tcx.types.u8),
- ty::TyRawPtr(..) => Some(tcx.types.usize),
- ty::TyFnDef(..) | ty::TyFnPtr(_) => Some(tcx.types.usize),
+ ty::Int(_) | ty::Uint(_) => Some(ty),
+ ty::Char => Some(tcx.types.u8),
+ ty::RawPtr(..) => Some(tcx.types.usize),
+ ty::FnDef(..) | ty::FnPtr(_) => Some(tcx.types.usize),
_ => None
}
});
ast::LitKind::FloatUnsuffixed(_) => {
let opt_ty = expected.to_option(self).and_then(|ty| {
match ty.sty {
- ty::TyFloat(_) => Some(ty),
+ ty::Float(_) => Some(ty),
_ => None
}
});
self.tcx.sess.source_map().span_to_snippet(lhs.span),
self.tcx.sess.source_map().span_to_snippet(rhs.span))
{
- err.span_suggestion(expr.span, msg, format!("{} == {}", left, right));
+ err.span_suggestion_with_applicability(
+ expr.span,
+ msg,
+ format!("{} == {}", left, right),
+ Applicability::MaybeIncorrect);
} else {
err.help(msg);
}
let mut autoderef = self.autoderef(expr.span, expr_t);
while let Some((base_t, _)) = autoderef.next() {
match base_t.sty {
- ty::TyAdt(base_def, substs) if !base_def.is_enum() => {
+ ty::Adt(base_def, substs) if !base_def.is_enum() => {
debug!("struct named {:?}", base_t);
let (ident, def_scope) =
self.tcx.adjust_ident(field, base_def.did, self.body_id);
private_candidate = Some((base_def.did, field_ty));
}
}
- ty::TyTuple(ref tys) => {
+ ty::Tuple(ref tys) => {
let fstr = field.as_str();
if let Ok(index) = fstr.parse::<usize>() {
if fstr == index.to_string() {
let mut err = self.no_such_field_err(field.span, field, expr_t);
match expr_t.sty {
- ty::TyAdt(def, _) if !def.is_enum() => {
+ ty::Adt(def, _) if !def.is_enum() => {
if let Some(suggested_field_name) =
Self::suggest_field_name(def.non_enum_variant(),
&field.as_str(), vec![]) {
}
};
}
- ty::TyRawPtr(..) => {
+ ty::RawPtr(..) => {
let base = self.tcx.hir.node_to_pretty_string(base.id);
let msg = format!("`{}` is a native pointer; try dereferencing it", base);
let suggestion = format!("(*{}).{}", base, field);
let mut err = self.type_error_struct_with_diag(
field.ident.span,
|actual| match ty.sty {
- ty::TyAdt(adt, ..) if adt.is_enum() => {
+ ty::Adt(adt, ..) if adt.is_enum() => {
struct_span_err!(self.tcx.sess, field.ident.span, E0559,
"{} `{}::{}` has no field named `{}`",
kind_name, actual, variant.name, field.ident)
format!("field does not exist - did you mean `{}`?", field_name));
} else {
match ty.sty {
- ty::TyAdt(adt, ..) => {
+ ty::Adt(adt, ..) => {
if adt.is_enum() {
err.span_label(field.ident.span,
format!("`{}::{}` does not have this field",
self.demand_eqtype(span, adt_ty_hint, adt_ty);
let (substs, adt_kind, kind_name) = match &adt_ty.sty{
- &ty::TyAdt(adt, substs) => {
+ &ty::Adt(adt, substs) => {
(substs, adt.adt_kind(), adt.variant_descr())
}
_ => span_bug!(span, "non-ADT passed to check_expr_struct_fields")
displayable_field_names.sort();
let truncated_fields_error = if len <= 3 {
- "".to_string()
+ String::new()
} else {
format!(" and {} other field{}", (len - 3), if len - 3 == 1 {""} else {"s"})
};
}
Def::Variant(..) => {
match ty.sty {
- ty::TyAdt(adt, substs) => {
+ ty::Adt(adt, substs) => {
Some((adt.variant_of_def(def), adt.did, substs))
}
_ => bug!("unexpected type: {:?}", ty.sty)
Def::Struct(..) | Def::Union(..) | Def::TyAlias(..) |
Def::AssociatedTy(..) | Def::SelfTy(..) => {
match ty.sty {
- ty::TyAdt(adt, substs) if !adt.is_enum() => {
+ ty::Adt(adt, substs) if !adt.is_enum() => {
Some((adt.non_enum_variant(), adt.did, substs))
}
_ => None,
};
// Prohibit struct expressions when non exhaustive flag is set.
- if let ty::TyAdt(adt, _) = struct_ty.sty {
+ if let ty::Adt(adt, _) = struct_ty.sty {
if !adt.did.is_local() && adt.is_non_exhaustive() {
span_err!(self.tcx.sess, expr.span, E0639,
"cannot create non-exhaustive {} using struct expression",
if !error_happened {
self.check_expr_has_type_or_error(base_expr, struct_ty);
match struct_ty.sty {
- ty::TyAdt(adt, substs) if adt.is_struct() => {
+ ty::Adt(adt, substs) if adt.is_struct() => {
let fru_field_types = adt.non_enum_variant().fields.iter().map(|f| {
self.normalize_associated_types_in(expr.span, &f.ty(self.tcx, substs))
}).collect();
/// strict, _|_ can appear in the type of an expression that does not,
/// itself, diverge: for example, fn() -> _|_.)
/// Note that inspecting a type's structure *directly* may expose the fact
- /// that there are actually multiple representations for `TyError`, so avoid
+ /// that there are actually multiple representations for `Error`, so avoid
/// that when err needs to be handled differently.
fn check_expr_with_expectation_and_needs(&self,
expr: &'gcx hir::Expr,
hir::ExprKind::Box(ref subexpr) => {
let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| {
match ty.sty {
- ty::TyAdt(def, _) if def.is_box()
+ ty::Adt(def, _) if def.is_box()
=> Expectation::rvalue_hint(self, ty.boxed_ty()),
_ => NoExpectation
}
} else if let Some(ok) = self.try_overloaded_deref(
expr.span, oprnd_t, needs) {
let method = self.register_infer_ok_obligations(ok);
- if let ty::TyRef(region, _, mutbl) = method.sig.inputs()[0].sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].sty {
let mutbl = match mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
hir::UnNot => {
let result = self.check_user_unop(expr, oprnd_t, unop);
// If it's builtin, we can reuse the type, this helps inference.
- if !(oprnd_t.is_integral() || oprnd_t.sty == ty::TyBool) {
+ if !(oprnd_t.is_integral() || oprnd_t.sty == ty::Bool) {
oprnd_t = result;
}
}
hir::ExprKind::AddrOf(mutbl, ref oprnd) => {
let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| {
match ty.sty {
- ty::TyRef(_, ty, _) | ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
if self.is_place_expr(&oprnd) {
// Places may legitimately have unsized types.
// For example, dereferences of a fat pointer and
hir::ExprKind::Array(ref args) => {
let uty = expected.to_option(self).and_then(|uty| {
match uty.sty {
- ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty),
+ ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
_ => None
}
});
let uty = match expected {
ExpectHasType(uty) => {
match uty.sty {
- ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty),
+ ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
_ => None
}
}
if element_ty.references_error() {
tcx.types.err
} else if let Ok(count) = count {
- tcx.mk_ty(ty::TyArray(t, count))
+ tcx.mk_ty(ty::Array(t, count))
} else {
tcx.types.err
}
let flds = expected.only_has_type(self).and_then(|ty| {
let ty = self.resolve_type_vars_with_obligations(ty);
match ty.sty {
- ty::TyTuple(ref flds) => Some(&flds[..]),
+ ty::Tuple(ref flds) => Some(&flds[..]),
_ => None
}
});
"cannot index into a value of type `{}`",
base_t);
// Try to give some advice about indexing tuples.
- if let ty::TyTuple(..) = base_t.sty {
+ if let ty::Tuple(..) = base_t.sty {
let mut needs_note = true;
// If the index is an integer, we can show the actual
// fixed expression:
ast::LitIntType::Unsuffixed) = lit.node {
let snip = tcx.sess.source_map().span_to_snippet(base.span);
if let Ok(snip) = snip {
- err.span_suggestion(expr.span,
- "to access tuple elements, use",
- format!("{}.{}", snip, i));
+ err.span_suggestion_with_applicability(
+ expr.span,
+ "to access tuple elements, use",
+ format!("{}.{}", snip, i),
+ Applicability::MachineApplicable);
needs_note = false;
}
}
{
match *qpath {
hir::QPath::Resolved(ref maybe_qself, ref path) => {
- let opt_self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself));
- let ty = AstConv::def_to_ty(self, opt_self_ty, path, true);
+ let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself));
+ let ty = AstConv::def_to_ty(self, self_ty, path, true);
(path.def, ty)
}
hir::QPath::TypeRelative(ref qself, ref segment) => {
// In some cases, blocks have just one exit, but other blocks
// can be targeted by multiple breaks. This can happen both
// with labeled blocks as well as when we desugar
- // a `do catch { ... }` expression.
+ // a `try { ... }` expression.
//
// Example 1:
//
hir::ExprKind::Match(..) |
hir::ExprKind::Block(..) => {
let sp = self.tcx.sess.source_map().next_point(cause_span);
- err.span_suggestion(sp,
- "try adding a semicolon",
- ";".to_string());
+ err.span_suggestion_with_applicability(
+ sp,
+ "try adding a semicolon",
+ ";".to_string(),
+ Applicability::MachineApplicable);
}
_ => (),
}
// haven't set a return type at all (and aren't `fn main()` or an impl).
match (&fn_decl.output, found.is_suggestable(), can_suggest, expected.is_nil()) {
(&hir::FunctionRetTy::DefaultReturn(span), true, true, true) => {
- err.span_suggestion(span,
- "try adding a return type",
- format!("-> {} ",
- self.resolve_type_vars_with_obligations(found)));
+ err.span_suggestion_with_applicability(
+ span,
+ "try adding a return type",
+ format!("-> {} ", self.resolve_type_vars_with_obligations(found)),
+ Applicability::MachineApplicable);
}
(&hir::FunctionRetTy::DefaultReturn(span), false, true, true) => {
err.span_label(span, "possibly return type missing here?");
}
let original_span = original_sp(last_stmt.span, blk.span);
let span_semi = original_span.with_lo(original_span.hi() - BytePos(1));
- err.span_suggestion(span_semi, "consider removing this semicolon", "".to_string());
+ err.span_suggestion_with_applicability(
+ span_semi,
+ "consider removing this semicolon",
+ String::new(),
+ Applicability::MachineApplicable);
}
- // Instantiates the given path, which must refer to an item with the given
- // number of type parameters and type.
- pub fn instantiate_value_path(&self,
- segments: &[hir::PathSegment],
- opt_self_ty: Option<Ty<'tcx>>,
- def: Def,
- span: Span,
- node_id: ast::NodeId)
- -> Ty<'tcx> {
- debug!("instantiate_value_path(path={:?}, def={:?}, node_id={})",
- segments,
- def,
- node_id);
-
+ fn def_ids_for_path_segments(&self,
+ segments: &[hir::PathSegment],
+ def: Def)
+ -> Vec<PathSeg> {
// We need to extract the type parameters supplied by the user in
// the path `path`. Due to the current setup, this is a bit of a
// tricky-process; the problem is that resolve only tells us the
// The first step then is to categorize the segments appropriately.
assert!(!segments.is_empty());
+ let last = segments.len() - 1;
+
+ let mut path_segs = vec![];
- let mut ufcs_associated = None;
- let mut type_segment = None;
- let mut fn_segment = None;
match def {
// Case 1. Reference to a struct/variant constructor.
Def::StructCtor(def_id, ..) |
Def::VariantCtor(def_id, ..) => {
// Everything but the final segment should have no
// parameters at all.
- let mut generics = self.tcx.generics_of(def_id);
- if let Some(def_id) = generics.parent {
- // Variant and struct constructors use the
- // generics of their parent type definition.
- generics = self.tcx.generics_of(def_id);
- }
- type_segment = Some((segments.last().unwrap(), generics));
+ let generics = self.tcx.generics_of(def_id);
+ // Variant and struct constructors use the
+ // generics of their parent type definition.
+ let generics_def_id = generics.parent.unwrap_or(def_id);
+ path_segs.push(PathSeg(generics_def_id, last));
}
// Case 2. Reference to a top-level value.
Def::Fn(def_id) |
Def::Const(def_id) |
Def::Static(def_id, _) => {
- fn_segment = Some((segments.last().unwrap(), self.tcx.generics_of(def_id)));
+ path_segs.push(PathSeg(def_id, last));
}
// Case 3. Reference to a method or associated const.
+ Def::Method(def_id) |
+ Def::AssociatedConst(def_id) => {
+ if segments.len() >= 2 {
+ let generics = self.tcx.generics_of(def_id);
+ path_segs.push(PathSeg(generics.parent.unwrap(), last - 1));
+ }
+ path_segs.push(PathSeg(def_id, last));
+ }
+
+ // Case 4. Local variable, no generics.
+ Def::Local(..) | Def::Upvar(..) => {}
+
+ _ => bug!("unexpected definition: {:?}", def),
+ }
+
+ debug!("path_segs = {:?}", path_segs);
+
+ path_segs
+ }
+
+ // Instantiates the given path, which must refer to an item with the given
+ // number of type parameters and type.
+ pub fn instantiate_value_path(&self,
+ segments: &[hir::PathSegment],
+ self_ty: Option<Ty<'tcx>>,
+ def: Def,
+ span: Span,
+ node_id: ast::NodeId)
+ -> Ty<'tcx> {
+ debug!("instantiate_value_path(path={:?}, def={:?}, node_id={})",
+ segments,
+ def,
+ node_id);
+
+ let path_segs = self.def_ids_for_path_segments(segments, def);
+
+ let mut ufcs_associated = None;
+ match def {
Def::Method(def_id) |
Def::AssociatedConst(def_id) => {
let container = self.tcx.associated_item(def_id).container;
}
ty::ImplContainer(_) => {}
}
-
- let generics = self.tcx.generics_of(def_id);
- if segments.len() >= 2 {
- let parent_generics = self.tcx.generics_of(generics.parent.unwrap());
- type_segment = Some((&segments[segments.len() - 2], parent_generics));
- } else {
+ if segments.len() == 1 {
// `<T>::assoc` will end up here, and so can `T::assoc`.
- let self_ty = opt_self_ty.expect("UFCS sugared assoc missing Self");
+ let self_ty = self_ty.expect("UFCS sugared assoc missing Self");
ufcs_associated = Some((container, self_ty));
}
- fn_segment = Some((segments.last().unwrap(), generics));
}
-
- // Case 4. Local variable, no generics.
- Def::Local(..) | Def::Upvar(..) => {}
-
- _ => bug!("unexpected definition: {:?}", def),
+ _ => {}
}
- debug!("type_segment={:?} fn_segment={:?}", type_segment, fn_segment);
-
// Now that we have categorized what space the parameters for each
// segment belong to, let's sort out the parameters that the user
// provided (if any) into their appropriate spaces. We'll also report
// errors if type parameters are provided in an inappropriate place.
- let poly_segments = type_segment.is_some() as usize +
- fn_segment.is_some() as usize;
- AstConv::prohibit_generics(self, &segments[..segments.len() - poly_segments]);
+
+ let mut generic_segs = FxHashSet::default();
+ for PathSeg(_, index) in &path_segs {
+ generic_segs.insert(index);
+ }
+ AstConv::prohibit_generics(self, segments.iter().enumerate().filter_map(|(index, seg)| {
+ if !generic_segs.contains(&index) {
+ Some(seg)
+ } else {
+ None
+ }
+ }));
match def {
Def::Local(nid) | Def::Upvar(nid, ..) => {
// variables. If the user provided some types, we may still need
// to add defaults. If the user provided *too many* types, that's
// a problem.
- let supress_mismatch = self.check_impl_trait(span, fn_segment);
- self.check_generic_arg_count(span, &mut type_segment, false, supress_mismatch);
- self.check_generic_arg_count(span, &mut fn_segment, false, supress_mismatch);
- let (fn_start, has_self) = match (type_segment, fn_segment) {
- (_, Some((_, generics))) => {
- (generics.parent_count, generics.has_self)
- }
- (Some((_, generics)), None) => {
- (generics.params.len(), generics.has_self)
- }
- (None, None) => (0, false)
- };
- // FIXME(varkor): Separating out the parameters is messy.
- let mut lifetimes_type_seg = vec![];
- let mut types_type_seg = vec![];
- let mut infer_types_type_seg = true;
- if let Some((seg, _)) = type_segment {
- if let Some(ref data) = seg.args {
- for arg in &data.args {
- match arg {
- GenericArg::Lifetime(lt) => lifetimes_type_seg.push(lt),
- GenericArg::Type(ty) => types_type_seg.push(ty),
- }
- }
+ let mut infer_args_for_err = FxHashSet::default();
+ for &PathSeg(def_id, index) in &path_segs {
+ let seg = &segments[index];
+ let generics = self.tcx.generics_of(def_id);
+ // Argument-position `impl Trait` is treated as a normal generic
+ // parameter internally, but we don't allow users to specify the
+ // parameter's value explicitly, so we have to do some error-
+ // checking here.
+ let suppress_errors = AstConv::check_generic_arg_count_for_call(
+ self.tcx,
+ span,
+ &generics,
+ &seg,
+ false, // `is_method_call`
+ );
+ if suppress_errors {
+ infer_args_for_err.insert(index);
+ self.set_tainted_by_errors(); // See issue #53251.
}
- infer_types_type_seg = seg.infer_types;
}
- let mut lifetimes_fn_seg = vec![];
- let mut types_fn_seg = vec![];
- let mut infer_types_fn_seg = true;
- if let Some((seg, _)) = fn_segment {
- if let Some(ref data) = seg.args {
- for arg in &data.args {
- match arg {
- GenericArg::Lifetime(lt) => lifetimes_fn_seg.push(lt),
- GenericArg::Type(ty) => types_fn_seg.push(ty),
- }
- }
- }
- infer_types_fn_seg = seg.infer_types;
- }
+ let has_self = path_segs.last().map(|PathSeg(def_id, _)| {
+ self.tcx.generics_of(*def_id).has_self
+ }).unwrap_or(false);
- let substs = Substs::for_item(self.tcx, def.def_id(), |param, substs| {
- let mut i = param.index as usize;
+ let def_id = def.def_id();
- let (segment, lifetimes, types, infer_types) = if i < fn_start {
- if let GenericParamDefKind::Type { .. } = param.kind {
- // Handle Self first, so we can adjust the index to match the AST.
- if has_self && i == 0 {
- return opt_self_ty.map(|ty| ty.into()).unwrap_or_else(|| {
- self.var_for_def(span, param)
- });
+ let substs = AstConv::create_substs_for_generic_args(
+ self.tcx,
+ def_id,
+ &[][..],
+ has_self,
+ self_ty,
+ // Provide the generic args, and whether types should be inferred.
+ |def_id| {
+ if let Some(&PathSeg(_, index)) = path_segs.iter().find(|&PathSeg(did, _)| {
+ *did == def_id
+ }) {
+ // If we've encountered an `impl Trait`-related error, we're just
+ // going to infer the arguments for better error messages.
+ if !infer_args_for_err.contains(&index) {
+ // Check whether the user has provided generic arguments.
+ if let Some(ref data) = segments[index].args {
+ return (Some(data), segments[index].infer_types);
+ }
}
+ return (None, segments[index].infer_types);
}
- i -= has_self as usize;
- (type_segment, &lifetimes_type_seg, &types_type_seg, infer_types_type_seg)
- } else {
- i -= fn_start;
- (fn_segment, &lifetimes_fn_seg, &types_fn_seg, infer_types_fn_seg)
- };
- match param.kind {
- GenericParamDefKind::Lifetime => {
- if let Some(lifetime) = lifetimes.get(i) {
- AstConv::ast_region_to_region(self, lifetime, Some(param)).into()
- } else {
- self.re_infer(span, Some(param)).unwrap().into()
+ (None, true)
+ },
+ // Provide substitutions for parameters for which (valid) arguments have been provided.
+ |param, arg| {
+ match (¶m.kind, arg) {
+ (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
+ AstConv::ast_region_to_region(self, lt, Some(param)).into()
}
+ (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
+ self.to_ty(ty).into()
+ }
+ _ => unreachable!(),
}
- GenericParamDefKind::Type { .. } => {
- // Skip over the lifetimes in the same segment.
- if let Some((_, generics)) = segment {
- i -= generics.own_counts().lifetimes;
+ },
+ // Provide substitutions for parameters for which arguments are inferred.
+ |substs, param, infer_types| {
+ match param.kind {
+ GenericParamDefKind::Lifetime => {
+ self.re_infer(span, Some(param)).unwrap().into()
}
-
- let has_default = match param.kind {
- GenericParamDefKind::Type { has_default, .. } => has_default,
- _ => unreachable!()
- };
-
- if let Some(ast_ty) = types.get(i) {
- // A provided type parameter.
- self.to_ty(ast_ty).into()
- } else if !infer_types && has_default {
- // No type parameter provided, but a default exists.
- let default = self.tcx.type_of(param.def_id);
- self.normalize_ty(
- span,
- default.subst_spanned(self.tcx, substs, Some(span))
- ).into()
- } else {
- // No type parameters were provided, we can infer all.
- // This can also be reached in some error cases:
- // We prefer to use inference variables instead of
- // TyError to let type inference recover somewhat.
- self.var_for_def(span, param)
+ GenericParamDefKind::Type { has_default, .. } => {
+ if !infer_types && has_default {
+ // If we have a default, then we it doesn't matter that we're not
+ // inferring the type arguments: we provide the default where any
+ // is missing.
+ let default = self.tcx.type_of(param.def_id);
+ self.normalize_ty(
+ span,
+ default.subst_spanned(self.tcx, substs.unwrap(), Some(span))
+ ).into()
+ } else {
+ // If no type arguments were provided, we have to infer them.
+ // This case also occurs as a result of some malformed input, e.g.
+ // a lifetime argument being given instead of a type paramter.
+ // Using inference instead of `Error` gives better error messages.
+ self.var_for_def(span, param)
+ }
}
}
- }
- });
+ },
+ );
// The things we are substituting into the type should not contain
// escaping late-bound regions, and nor should the base type scheme.
- let ty = self.tcx.type_of(def.def_id());
+ let ty = self.tcx.type_of(def_id);
assert!(!substs.has_escaping_regions());
assert!(!ty.has_escaping_regions());
// Add all the obligations that are required, substituting and
// normalized appropriately.
- let bounds = self.instantiate_bounds(span, def.def_id(), &substs);
+ let bounds = self.instantiate_bounds(span, def_id, &substs);
self.add_obligations_for_parameters(
- traits::ObligationCause::new(span, self.body_id, traits::ItemObligation(def.def_id())),
+ traits::ObligationCause::new(span, self.body_id, traits::ItemObligation(def_id)),
&bounds);
// Substitute the values for the type parameters into the type of
}
}
- self.check_rustc_args_require_const(def.def_id(), node_id, span);
+ self.check_rustc_args_require_const(def_id, node_id, span);
debug!("instantiate_value_path: type of {:?} is {:?}",
node_id,
directly, not through a function pointer");
}
- /// Report errors if the provided parameters are too few or too many.
- fn check_generic_arg_count(&self,
- span: Span,
- segment: &mut Option<(&hir::PathSegment, &ty::Generics)>,
- is_method_call: bool,
- supress_mismatch_error: bool) {
- let (lifetimes, types, infer_types, bindings) = segment.map_or(
- (vec![], vec![], true, &[][..]),
- |(s, _)| {
- s.args.as_ref().map_or(
- (vec![], vec![], s.infer_types, &[][..]),
- |data| {
- let (mut lifetimes, mut types) = (vec![], vec![]);
- data.args.iter().for_each(|arg| match arg {
- GenericArg::Lifetime(lt) => lifetimes.push(lt),
- GenericArg::Type(ty) => types.push(ty),
- });
- (lifetimes, types, s.infer_types, &data.bindings[..])
- }
- )
- });
-
- // Check provided parameters.
- let ((ty_required, ty_accepted), lt_accepted) =
- segment.map_or(((0, 0), 0), |(_, generics)| {
- struct ParamRange {
- required: usize,
- accepted: usize
- };
-
- let mut lt_accepted = 0;
- let mut ty_params = ParamRange { required: 0, accepted: 0 };
- for param in &generics.params {
- match param.kind {
- GenericParamDefKind::Lifetime => lt_accepted += 1,
- GenericParamDefKind::Type { has_default, .. } => {
- ty_params.accepted += 1;
- if !has_default {
- ty_params.required += 1;
- }
- }
- };
- }
- if generics.parent.is_none() && generics.has_self {
- ty_params.required -= 1;
- ty_params.accepted -= 1;
- }
-
- ((ty_params.required, ty_params.accepted), lt_accepted)
- });
-
- let count_type_params = |n| {
- format!("{} type parameter{}", n, if n == 1 { "" } else { "s" })
- };
- let expected_text = count_type_params(ty_accepted);
- let actual_text = count_type_params(types.len());
- if let Some((mut err, span)) = if types.len() > ty_accepted {
- // To prevent derived errors to accumulate due to extra
- // type parameters, we force instantiate_value_path to
- // use inference variables instead of the provided types.
- *segment = None;
- let span = types[ty_accepted].span;
- Some((struct_span_err!(self.tcx.sess, span, E0087,
- "too many type parameters provided: \
- expected at most {}, found {}",
- expected_text, actual_text), span))
- } else if types.len() < ty_required && !infer_types && !supress_mismatch_error {
- Some((struct_span_err!(self.tcx.sess, span, E0089,
- "too few type parameters provided: \
- expected {}, found {}",
- expected_text, actual_text), span))
- } else {
- None
- } {
- self.set_tainted_by_errors(); // #53251
- err.span_label(span, format!("expected {}", expected_text)).emit();
- }
-
- if !bindings.is_empty() {
- AstConv::prohibit_projection(self, bindings[0].span);
- }
-
- let infer_lifetimes = lifetimes.len() == 0;
- // Prohibit explicit lifetime arguments if late bound lifetime parameters are present.
- let has_late_bound_lifetime_defs =
- segment.map_or(None, |(_, generics)| generics.has_late_bound_regions);
- if let (Some(span_late), false) = (has_late_bound_lifetime_defs, lifetimes.is_empty()) {
- // Report this as a lint only if no error was reported previously.
- let primary_msg = "cannot specify lifetime arguments explicitly \
- if late bound lifetime parameters are present";
- let note_msg = "the late bound lifetime parameter is introduced here";
- if !is_method_call && (lifetimes.len() > lt_accepted ||
- lifetimes.len() < lt_accepted && !infer_lifetimes) {
- let mut err = self.tcx.sess.struct_span_err(lifetimes[0].span, primary_msg);
- err.span_note(span_late, note_msg);
- err.emit();
- *segment = None;
- } else {
- let mut multispan = MultiSpan::from_span(lifetimes[0].span);
- multispan.push_span_label(span_late, note_msg.to_string());
- self.tcx.lint_node(lint::builtin::LATE_BOUND_LIFETIME_ARGUMENTS,
- lifetimes[0].id, multispan, primary_msg);
- }
- return;
- }
-
- let count_lifetime_params = |n| {
- format!("{} lifetime parameter{}", n, if n == 1 { "" } else { "s" })
- };
- let expected_text = count_lifetime_params(lt_accepted);
- let actual_text = count_lifetime_params(lifetimes.len());
- if let Some((mut err, span)) = if lifetimes.len() > lt_accepted {
- let span = lifetimes[lt_accepted].span;
- Some((struct_span_err!(self.tcx.sess, span, E0088,
- "too many lifetime parameters provided: \
- expected at most {}, found {}",
- expected_text, actual_text), span))
- } else if lifetimes.len() < lt_accepted && !infer_lifetimes {
- Some((struct_span_err!(self.tcx.sess, span, E0090,
- "too few lifetime parameters provided: \
- expected {}, found {}",
- expected_text, actual_text), span))
- } else {
- None
- } {
- err.span_label(span, format!("expected {}", expected_text)).emit();
- }
- }
-
- /// Report error if there is an explicit type parameter when using `impl Trait`.
- fn check_impl_trait(&self,
- span: Span,
- segment: Option<(&hir::PathSegment, &ty::Generics)>)
- -> bool {
- let segment = segment.map(|(path_segment, generics)| {
- let explicit = !path_segment.infer_types;
- let impl_trait = generics.params.iter().any(|param| match param.kind {
- ty::GenericParamDefKind::Type {
- synthetic: Some(hir::SyntheticTyParamKind::ImplTrait), ..
- } => true,
- _ => false,
- });
-
- if explicit && impl_trait {
- let mut err = struct_span_err! {
- self.tcx.sess,
- span,
- E0632,
- "cannot provide explicit type parameters when `impl Trait` is \
- used in argument position."
- };
-
- err.emit();
- }
-
- impl_trait
- });
-
- segment.unwrap_or(false)
- }
-
// Resolves `typ` by a single level if `typ` is a type variable.
// If no resolution is possible, then an error is reported.
// Numeric inference variables may be left unresolved.
let mut types_used = vec![false; own_counts.types];
for leaf_ty in ty.walk() {
- if let ty::TyParam(ty::ParamTy { idx, .. }) = leaf_ty.sty {
+ if let ty::Param(ty::ParamTy { idx, .. }) = leaf_ty.sty {
debug!("Found use of ty param num {}", idx);
types_used[idx as usize - own_counts.lifetimes] = true;
- } else if let ty::TyError = leaf_ty.sty {
+ } else if let ty::Error = leaf_ty.sty {
// If there is already another error, do not emit
// an error for not using a type Parameter.
assert!(tcx.sess.err_count() > 0);
use super::{FnCtxt, Needs};
use super::method::MethodCallee;
use rustc::ty::{self, Ty, TypeFoldable};
-use rustc::ty::TypeVariants::{TyRef, TyAdt, TyStr, TyUint, TyNever, TyTuple, TyChar, TyArray};
+use rustc::ty::TyKind::{Ref, Adt, Str, Uint, Never, Tuple, Char, Array};
use rustc::ty::adjustment::{Adjustment, Adjust, AllowTwoPhase, AutoBorrow, AutoBorrowMutability};
use rustc::infer::type_variable::TypeVariableOrigin;
use errors;
Ok(method) => {
let by_ref_binop = !op.node.is_by_value();
if is_assign == IsAssign::Yes || by_ref_binop {
- if let ty::TyRef(region, _, mutbl) = method.sig.inputs()[0].sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].sty {
let mutbl = match mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
}
}
if by_ref_binop {
- if let ty::TyRef(region, _, mutbl) = method.sig.inputs()[1].sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[1].sty {
let mutbl = match mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
format!("cannot use `{}=` on type `{}`",
op.node.as_str(), lhs_ty));
let mut suggested_deref = false;
- if let TyRef(_, mut rty, _) = lhs_ty.sty {
+ if let Ref(_, mut rty, _) = lhs_ty.sty {
if {
!self.infcx.type_moves_by_default(self.param_env,
rty,
.is_ok()
} {
if let Ok(lstring) = source_map.span_to_snippet(lhs_expr.span) {
- while let TyRef(_, rty_inner, _) = rty.sty {
+ while let Ref(_, rty_inner, _) = rty.sty {
rty = rty_inner;
}
let msg = &format!(
// This has nothing here because it means we did string
// concatenation (e.g. "Hello " += "World!"). This means
// we don't want the note in the else clause to be emitted
- } else if let ty::TyParam(_) = lhs_ty.sty {
+ } else if let ty::Param(_) = lhs_ty.sty {
// FIXME: point to span of param
err.note(&format!(
"`{}` might need a bound for `{}`",
op.node.as_str(),
lhs_ty);
let mut suggested_deref = false;
- if let TyRef(_, mut rty, _) = lhs_ty.sty {
+ if let Ref(_, mut rty, _) = lhs_ty.sty {
if {
!self.infcx.type_moves_by_default(self.param_env,
rty,
.is_ok()
} {
if let Ok(lstring) = source_map.span_to_snippet(lhs_expr.span) {
- while let TyRef(_, rty_inner, _) = rty.sty {
+ while let Ref(_, rty_inner, _) = rty.sty {
rty = rty_inner;
}
let msg = &format!(
// This has nothing here because it means we did string
// concatenation (e.g. "Hello " + "World!"). This means
// we don't want the note in the else clause to be emitted
- } else if let ty::TyParam(_) = lhs_ty.sty {
+ } else if let ty::Param(_) = lhs_ty.sty {
// FIXME: point to span of param
err.note(&format!(
"`{}` might need a bound for `{}`",
// If this function returns true it means a note was printed, so we don't need
// to print the normal "implementation of `std::ops::Add` might be missing" note
match (&lhs_ty.sty, &rhs_ty.sty) {
- (&TyRef(_, l_ty, _), &TyRef(_, r_ty, _))
- if l_ty.sty == TyStr && r_ty.sty == TyStr => {
+ (&Ref(_, l_ty, _), &Ref(_, r_ty, _))
+ if l_ty.sty == Str && r_ty.sty == Str => {
if !is_assign {
err.span_label(expr.span,
"`+` can't be used to concatenate two `&str` strings");
}
true
}
- (&TyRef(_, l_ty, _), &TyAdt(..))
- if l_ty.sty == TyStr && &format!("{:?}", rhs_ty) == "std::string::String" => {
+ (&Ref(_, l_ty, _), &Adt(..))
+ if l_ty.sty == Str && &format!("{:?}", rhs_ty) == "std::string::String" => {
err.span_label(expr.span,
"`+` can't be used to concatenate a `&str` with a `String`");
match (
err.span_label(ex.span, format!("cannot apply unary \
operator `{}`", op.as_str()));
match actual.sty {
- TyUint(_) if op == hir::UnNeg => {
+ Uint(_) if op == hir::UnNeg => {
err.note("unsigned values cannot be negated");
},
- TyStr | TyNever | TyChar | TyTuple(_) | TyArray(_,_) => {},
- TyRef(_, ref lty, _) if lty.sty == TyStr => {},
+ Str | Never | Char | Tuple(_) | Array(_,_) => {},
+ Ref(_, ref lty, _) if lty.sty == Str => {},
_ => {
let missing_trait = match op {
hir::UnNeg => "std::ops::Neg",
// For overloaded derefs, base_ty is the input to `Deref::deref`,
// but it's a reference type uing the same region as the output.
let base_ty = self.resolve_expr_type_adjusted(base);
- if let ty::TyRef(r_ptr, _, _) = base_ty.sty {
+ if let ty::Ref(r_ptr, _, _) = base_ty.sty {
self.mk_subregion_due_to_dereference(expr.span, expr_region, r_ptr);
}
from_ty,
to_ty);
match (&from_ty.sty, &to_ty.sty) {
- /*From:*/ (&ty::TyRef(from_r, from_ty, _),
- /*To: */ &ty::TyRef(to_r, to_ty, _)) => {
+ /*From:*/ (&ty::Ref(from_r, from_ty, _),
+ /*To: */ &ty::Ref(to_r, to_ty, _)) => {
// Target cannot outlive source, naturally.
self.sub_regions(infer::Reborrow(cast_expr.span), to_r, from_r);
self.walk_cast(cast_expr, from_ty, to_ty);
}
/*From:*/ (_,
- /*To: */ &ty::TyDynamic(.., r)) => {
+ /*To: */ &ty::Dynamic(.., r)) => {
// When T is existentially quantified as a trait
// `Foo+'to`, it must outlive the region bound `'to`.
self.type_must_outlive(infer::RelateObjectBound(cast_expr.span), from_ty, r);
}
- /*From:*/ (&ty::TyAdt(from_def, _),
- /*To: */ &ty::TyAdt(to_def, _)) if from_def.is_box() && to_def.is_box() => {
+ /*From:*/ (&ty::Adt(from_def, _),
+ /*To: */ &ty::Adt(to_def, _)) if from_def.is_box() && to_def.is_box() => {
self.walk_cast(cast_expr, from_ty.boxed_ty(), to_ty.boxed_ty());
}
fn constrain_callee(&mut self, callee_expr: &hir::Expr) {
let callee_ty = self.resolve_node_type(callee_expr.hir_id);
match callee_ty.sty {
- ty::TyFnDef(..) | ty::TyFnPtr(_) => { }
+ ty::FnDef(..) | ty::FnPtr(_) => { }
_ => {
// this should not happen, but it does if the program is
// erroneous
self.ty_to_string(indexed_ty));
let r_index_expr = ty::ReScope(region::Scope::Node(index_expr.hir_id.local_id));
- if let ty::TyRef(r_ptr, r_ty, _) = indexed_ty.sty {
+ if let ty::Ref(r_ptr, r_ty, _) = indexed_ty.sty {
match r_ty.sty {
- ty::TySlice(_) | ty::TyStr => {
+ ty::Slice(_) | ty::Str => {
self.sub_regions(infer::IndexSlice(index_expr.span),
self.tcx.mk_region(r_index_expr), r_ptr);
}
id, mutbl, cmt_borrowed);
let rptr_ty = self.resolve_node_type(id);
- if let ty::TyRef(r, _, _) = rptr_ty.sty {
+ if let ty::Ref(r, _, _) = rptr_ty.sty {
debug!("rptr_ty={}", rptr_ty);
self.link_region(span, r, ty::BorrowKind::from_mutbl(mutbl), cmt_borrowed);
}
// how all the types get adjusted.)
match ref_kind {
ty::ImmBorrow => {
- // The reference being reborrowed is a sharable ref of
+ // The reference being reborrowed is a shareable ref of
// type `&'a T`. In this case, it doesn't matter where we
// *found* the `&T` pointer, the memory it references will
// be valid and immutable for `'a`. So we can stop here.
// Extract the type of the closure.
let (closure_def_id, substs) = match self.node_ty(closure_hir_id).sty {
- ty::TyClosure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
- ty::TyGenerator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
- ty::TyError => {
+ ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
+ ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
+ ty::Error => {
// #51714: skip analysis when we have already encountered type errors
return;
}
impl<'tcx> ty::fold::TypeVisitor<'tcx> for CountParams {
fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
match t.sty {
- ty::TyParam(p) => {
+ ty::Param(p) => {
self.params.insert(p.idx);
t.super_visit_with(self)
}
ty.fold_with(&mut ty::fold::BottomUpFolder {
tcx: fcx.tcx,
fldop: |ty| {
- if let ty::TyAnon(def_id, substs) = ty.sty {
+ if let ty::Anon(def_id, substs) = ty.sty {
trace!("check_existential_types: anon_ty, {:?}, {:?}", def_id, substs);
let generics = tcx.generics_of(def_id);
// only check named existential types
for (subst, param) in substs.iter().zip(&generics.params) {
match subst.unpack() {
ty::subst::UnpackedKind::Type(ty) => match ty.sty {
- ty::TyParam(..) => {},
+ ty::Param(..) => {},
// prevent `fn foo() -> Foo<u32>` from being defining
_ => {
tcx
}
}
} // if is_named_existential_type
- } // if let TyAnon
+ } // if let Anon
ty
},
reg_op: |reg| reg,
use rustc::hir::def_id::{DefId, DefIndex};
use rustc::hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc::infer::InferCtxt;
+use rustc::ty::adjustment::{Adjust, Adjustment};
+use rustc::ty::fold::{BottomUpFolder, TypeFoldable, TypeFolder};
use rustc::ty::subst::UnpackedKind;
use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::adjustment::{Adjust, Adjustment};
-use rustc::ty::fold::{TypeFoldable, TypeFolder, BottomUpFolder};
use rustc::util::nodemap::DefIdSet;
+use rustc_data_structures::sync::Lrc;
+use std::mem;
use syntax::ast;
use syntax_pos::Span;
-use std::mem;
-use rustc_data_structures::sync::Lrc;
///////////////////////////////////////////////////////////////////////////
// Entry point
);
debug!(
"used_trait_imports({:?}) = {:?}",
- item_def_id,
- used_trait_imports
+ item_def_id, used_trait_imports
);
wbcx.tables.used_trait_imports = used_trait_imports;
debug!(
"writeback: tables for {:?} are {:#?}",
- item_def_id,
- wbcx.tables
+ item_def_id, wbcx.tables
);
self.tcx.alloc_tables(wbcx.tables)
// operating on scalars, we clear the overload.
fn fix_scalar_builtin_expr(&mut self, e: &hir::Expr) {
match e.node {
- hir::ExprKind::Unary(hir::UnNeg, ref inner) |
- hir::ExprKind::Unary(hir::UnNot, ref inner) => {
+ hir::ExprKind::Unary(hir::UnNeg, ref inner)
+ | hir::ExprKind::Unary(hir::UnNot, ref inner) => {
let inner_ty = self.fcx.node_ty(inner.hir_id);
let inner_ty = self.fcx.resolve_type_vars_if_possible(&inner_ty);
match tables.expr_ty_adjusted(&base).sty {
// All valid indexing looks like this
- ty::TyRef(_, base_ty, _) => {
+ ty::Ref(_, base_ty, _) => {
let index_ty = tables.expr_ty_adjusted(&index);
let index_ty = self.fcx.resolve_type_vars_if_possible(&index_ty);
- if base_ty.builtin_index().is_some()
- && index_ty == self.fcx.tcx.types.usize {
+ if base_ty.builtin_index().is_some() && index_ty == self.fcx.tcx.types.usize {
// Remove the method call record
tables.type_dependent_defs_mut().remove(e.hir_id);
tables.node_substs_mut().remove(e.hir_id);
// of size information - we need to get rid of it
// Since this is "after" the other adjustment to be
// discarded, we do an extra `pop()`
- Some(Adjustment { kind: Adjust::Unsize, .. }) => {
+ Some(Adjustment {
+ kind: Adjust::Unsize,
+ ..
+ }) => {
// So the borrow discard actually happens here
a.pop();
- },
+ }
_ => {}
}
});
}
- },
+ }
// Might encounter non-valid indexes at this point, so there
// has to be a fall-through
- _ => {},
+ _ => {}
}
}
}
}
-
///////////////////////////////////////////////////////////////////////////
// Impl of Visitor for Resolver
//
if let Some(&bm) = self.fcx.tables.borrow().pat_binding_modes().get(p.hir_id) {
self.tables.pat_binding_modes_mut().insert(p.hir_id, bm);
} else {
- self.tcx().sess.delay_span_bug(p.span, "missing binding mode");
+ self.tcx()
+ .sess
+ .delay_span_bug(p.span, "missing binding mode");
}
}
hir::PatKind::Struct(_, ref fields, _) => {
};
debug!(
"Upvar capture for {:?} resolved to {:?}",
- upvar_id,
- new_upvar_capture
+ upvar_id, new_upvar_capture
);
self.tables
.upvar_capture_map
fldop: |ty| {
trace!("checking type {:?}: {:#?}", ty, ty.sty);
// find a type parameter
- if let ty::TyParam(..) = ty.sty {
+ if let ty::Param(..) = ty.sty {
// look it up in the substitution list
assert_eq!(anon_defn.substs.len(), generics.params.len());
for (subst, param) in anon_defn.substs.iter().zip(&generics.params) {
if subst == ty {
// found it in the substitution list, replace with the
// parameter from the existential type
- return self
- .tcx()
+ return self.tcx()
.global_tcx()
.mk_ty_param(param.index, param.name);
}
name: p.name,
};
trace!("replace {:?} with {:?}", region, reg);
- return self.tcx().global_tcx()
+ return self.tcx()
+ .global_tcx()
.mk_region(ty::ReEarlyBound(reg));
}
}
}
trace!("anon_defn: {:#?}", anon_defn);
trace!("generics: {:#?}", generics);
- self.tcx().sess
+ self.tcx()
+ .sess
.struct_span_err(
span,
"non-defining existential type use in defining scope",
span,
format!(
"lifetime `{}` is part of concrete type but not used \
- in parameter list of existential type",
+ in parameter list of existential type",
region,
),
)
self.tcx().global_tcx().mk_region(ty::ReStatic)
}
}
- }
+ },
})
};
- let old = self.tables.concrete_existential_types.insert(def_id, definition_ty);
+ if let ty::Anon(defin_ty_def_id, _substs) = definition_ty.sty {
+ if def_id == defin_ty_def_id {
+ // Concrete type resolved to the existential type itself
+ // Force a cycle error
+ self.tcx().at(span).type_of(defin_ty_def_id);
+ }
+ }
+
+ let old = self.tables
+ .concrete_existential_types
+ .insert(def_id, definition_ty);
if let Some(old) = old {
if old != definition_ty {
span_bug!(
span,
"visit_anon_types tried to write \
- different types for the same existential type: {:?}, {:?}, {:?}",
+ different types for the same existential type: {:?}, {:?}, {:?}",
def_id,
definition_ty,
old,
fn visit_field_id(&mut self, node_id: ast::NodeId) {
let hir_id = self.tcx().hir.node_to_hir_id(node_id);
- if let Some(index) = self.fcx.tables.borrow_mut().field_indices_mut().remove(hir_id) {
+ if let Some(index) = self.fcx
+ .tables
+ .borrow_mut()
+ .field_indices_mut()
+ .remove(hir_id)
+ {
self.tables.field_indices_mut().insert(hir_id, index);
}
}
fn visit_node_id(&mut self, span: Span, hir_id: hir::HirId) {
- // Export associated path extensions and method resultions.
+ // Export associated path extensions and method resolutions.
if let Some(def) = self.fcx
.tables
.borrow_mut()
let resolved_adjustment = self.resolve(&adjustment, &span);
debug!(
"Adjustments for node {:?}: {:?}",
- hir_id,
- resolved_adjustment
+ hir_id, resolved_adjustment
);
self.tables
.adjustments_mut()
let resolved_adjustment = self.resolve(&adjustment, &span);
debug!(
"pat_adjustments for node {:?}: {:?}",
- hir_id,
- resolved_adjustment
+ hir_id, resolved_adjustment
);
self.tables
.pat_adjustments_mut()
fn report_error(&self, t: Ty<'tcx>) {
if !self.tcx.sess.has_errors() {
self.infcx
- .need_type_info_err(Some(self.body.id()), self.span.to_span(&self.tcx), t).emit();
+ .need_type_info_err(Some(self.body.id()), self.span.to_span(&self.tcx), t)
+ .emit();
}
}
}
use lint;
use rustc::ty::TyCtxt;
+use errors::Applicability;
use syntax::ast;
use syntax_pos::Span;
let id = tcx.hir.hir_to_node_id(hir_id);
let msg = "unused extern crate";
tcx.struct_span_lint_node(lint, id, span, msg)
- .span_suggestion_short(span, "remove it", "".to_string())
+ .span_suggestion_short_with_applicability(
+ span,
+ "remove it",
+ String::new(),
+ Applicability::MachineApplicable)
.emit();
continue;
}
fn visit_implementation_of_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_did: DefId) {
match tcx.type_of(impl_did).sty {
- ty::TyAdt(..) => {}
+ ty::Adt(..) => {}
_ => {
// Destructors only work on nominal types.
if let Some(impl_node_id) = tcx.hir.as_local_node_id(impl_did) {
(mt_a.ty, mt_b.ty, unsize_trait, None)
};
let (source, target, trait_def_id, kind) = match (&source.sty, &target.sty) {
- (&ty::TyRef(r_a, ty_a, mutbl_a), &ty::TyRef(r_b, ty_b, mutbl_b)) => {
+ (&ty::Ref(r_a, ty_a, mutbl_a), &ty::Ref(r_b, ty_b, mutbl_b)) => {
infcx.sub_regions(infer::RelateObjectBound(span), r_b, r_a);
let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a };
let mt_b = ty::TypeAndMut { ty: ty_b, mutbl: mutbl_b };
check_mutbl(mt_a, mt_b, &|ty| gcx.mk_imm_ref(r_b, ty))
}
- (&ty::TyRef(_, ty_a, mutbl_a), &ty::TyRawPtr(mt_b)) => {
+ (&ty::Ref(_, ty_a, mutbl_a), &ty::RawPtr(mt_b)) => {
let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a };
check_mutbl(mt_a, mt_b, &|ty| gcx.mk_imm_ptr(ty))
}
- (&ty::TyRawPtr(mt_a), &ty::TyRawPtr(mt_b)) => {
+ (&ty::RawPtr(mt_a), &ty::RawPtr(mt_b)) => {
check_mutbl(mt_a, mt_b, &|ty| gcx.mk_imm_ptr(ty))
}
- (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) if def_a.is_struct() &&
+ (&ty::Adt(def_a, substs_a), &ty::Adt(def_b, substs_b)) if def_a.is_struct() &&
def_b.is_struct() => {
if def_a != def_b {
let source_path = gcx.item_path_str(def_a.did);
let self_ty = self.tcx.type_of(def_id);
let lang_items = self.tcx.lang_items();
match self_ty.sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
self.check_def_id(item, def.did);
}
- ty::TyForeign(did) => {
+ ty::Foreign(did) => {
self.check_def_id(item, did);
}
- ty::TyDynamic(ref data, ..) if data.principal().is_some() => {
+ ty::Dynamic(ref data, ..) if data.principal().is_some() => {
self.check_def_id(item, data.principal().unwrap().def_id());
}
- ty::TyChar => {
+ ty::Char => {
self.check_primitive_impl(def_id,
lang_items.char_impl(),
None,
"char",
item.span);
}
- ty::TyStr => {
+ ty::Str => {
self.check_primitive_impl(def_id,
lang_items.str_impl(),
lang_items.str_alloc_impl(),
"str",
item.span);
}
- ty::TySlice(slice_item) if slice_item == self.tcx.types.u8 => {
+ ty::Slice(slice_item) if slice_item == self.tcx.types.u8 => {
self.check_primitive_impl(def_id,
lang_items.slice_u8_impl(),
lang_items.slice_u8_alloc_impl(),
"[u8]",
item.span);
}
- ty::TySlice(_) => {
+ ty::Slice(_) => {
self.check_primitive_impl(def_id,
lang_items.slice_impl(),
lang_items.slice_alloc_impl(),
"[T]",
item.span);
}
- ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => {
+ ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => {
self.check_primitive_impl(def_id,
lang_items.const_ptr_impl(),
None,
"*const T",
item.span);
}
- ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => {
+ ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => {
self.check_primitive_impl(def_id,
lang_items.mut_ptr_impl(),
None,
"*mut T",
item.span);
}
- ty::TyInt(ast::IntTy::I8) => {
+ ty::Int(ast::IntTy::I8) => {
self.check_primitive_impl(def_id,
lang_items.i8_impl(),
None,
"i8",
item.span);
}
- ty::TyInt(ast::IntTy::I16) => {
+ ty::Int(ast::IntTy::I16) => {
self.check_primitive_impl(def_id,
lang_items.i16_impl(),
None,
"i16",
item.span);
}
- ty::TyInt(ast::IntTy::I32) => {
+ ty::Int(ast::IntTy::I32) => {
self.check_primitive_impl(def_id,
lang_items.i32_impl(),
None,
"i32",
item.span);
}
- ty::TyInt(ast::IntTy::I64) => {
+ ty::Int(ast::IntTy::I64) => {
self.check_primitive_impl(def_id,
lang_items.i64_impl(),
None,
"i64",
item.span);
}
- ty::TyInt(ast::IntTy::I128) => {
+ ty::Int(ast::IntTy::I128) => {
self.check_primitive_impl(def_id,
lang_items.i128_impl(),
None,
"i128",
item.span);
}
- ty::TyInt(ast::IntTy::Isize) => {
+ ty::Int(ast::IntTy::Isize) => {
self.check_primitive_impl(def_id,
lang_items.isize_impl(),
None,
"isize",
item.span);
}
- ty::TyUint(ast::UintTy::U8) => {
+ ty::Uint(ast::UintTy::U8) => {
self.check_primitive_impl(def_id,
lang_items.u8_impl(),
None,
"u8",
item.span);
}
- ty::TyUint(ast::UintTy::U16) => {
+ ty::Uint(ast::UintTy::U16) => {
self.check_primitive_impl(def_id,
lang_items.u16_impl(),
None,
"u16",
item.span);
}
- ty::TyUint(ast::UintTy::U32) => {
+ ty::Uint(ast::UintTy::U32) => {
self.check_primitive_impl(def_id,
lang_items.u32_impl(),
None,
"u32",
item.span);
}
- ty::TyUint(ast::UintTy::U64) => {
+ ty::Uint(ast::UintTy::U64) => {
self.check_primitive_impl(def_id,
lang_items.u64_impl(),
None,
"u64",
item.span);
}
- ty::TyUint(ast::UintTy::U128) => {
+ ty::Uint(ast::UintTy::U128) => {
self.check_primitive_impl(def_id,
lang_items.u128_impl(),
None,
"u128",
item.span);
}
- ty::TyUint(ast::UintTy::Usize) => {
+ ty::Uint(ast::UintTy::Usize) => {
self.check_primitive_impl(def_id,
lang_items.usize_impl(),
None,
"usize",
item.span);
}
- ty::TyFloat(ast::FloatTy::F32) => {
+ ty::Float(ast::FloatTy::F32) => {
self.check_primitive_impl(def_id,
lang_items.f32_impl(),
lang_items.f32_runtime_impl(),
"f32",
item.span);
}
- ty::TyFloat(ast::FloatTy::F64) => {
+ ty::Float(ast::FloatTy::F64) => {
self.check_primitive_impl(def_id,
lang_items.f64_impl(),
lang_items.f64_runtime_impl(),
"f64",
item.span);
}
- ty::TyError => {
+ ty::Error => {
return;
}
_ => {
tcx.specialization_graph_of(trait_def_id);
// check for overlap with the automatic `impl Trait for Trait`
- if let ty::TyDynamic(ref data, ..) = trait_ref.self_ty().sty {
+ if let ty::Dynamic(ref data, ..) = trait_ref.self_ty().sty {
// This is something like impl Trait1 for Trait2. Illegal
// if Trait1 is a supertrait of Trait2 or Trait2 is not object safe.
!trait_def_id.is_local() {
let self_ty = trait_ref.self_ty();
let opt_self_def_id = match self_ty.sty {
- ty::TyAdt(self_def, _) => Some(self_def.did),
- ty::TyForeign(did) => Some(did),
+ ty::Adt(self_def, _) => Some(self_def.did),
+ ty::Foreign(did) => Some(did),
_ => None,
};
impl<'tcx> TypeVisitor<'tcx> for ParameterCollector {
fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
match t.sty {
- ty::TyProjection(..) | ty::TyAnon(..) if !self.include_nonconstraining => {
+ ty::Projection(..) | ty::Anon(..) if !self.include_nonconstraining => {
// projections are not injective
return false;
}
- ty::TyParam(data) => {
+ ty::Param(data) => {
self.parameters.push(Parameter::from(data));
}
_ => {}
"##,
E0087: r##"
-Too many type parameters were supplied for a function. For example:
+Too many type arguments were supplied for a function. For example:
```compile_fail,E0087
fn foo<T>() {}
fn main() {
- foo::<f64, bool>(); // error, expected 1 parameter, found 2 parameters
+ foo::<f64, bool>(); // error: wrong number of type arguments:
+ // expected 1, found 2
}
```
-The number of supplied parameters must exactly match the number of defined type
+The number of supplied arguments must exactly match the number of defined type
parameters.
"##,
E0088: r##"
-You gave too many lifetime parameters. Erroneous code example:
+You gave too many lifetime arguments. Erroneous code example:
```compile_fail,E0088
fn f() {}
fn main() {
- f::<'static>() // error: too many lifetime parameters provided
+ f::<'static>() // error: wrong number of lifetime arguments:
+ // expected 0, found 1
}
```
-Please check you give the right number of lifetime parameters. Example:
+Please check you give the right number of lifetime arguments. Example:
```
fn f() {}
"##,
E0089: r##"
-Not enough type parameters were supplied for a function. For example:
+Too few type arguments were supplied for a function. For example:
```compile_fail,E0089
fn foo<T, U>() {}
fn main() {
- foo::<f64>(); // error, expected 2 parameters, found 1 parameter
+ foo::<f64>(); // error: wrong number of type arguments: expected 2, found 1
}
```
-Note that if a function takes multiple type parameters but you want the compiler
+Note that if a function takes multiple type arguments but you want the compiler
to infer some of them, you can use type placeholders:
```compile_fail,E0089
fn main() {
let x: bool = true;
- foo::<f64>(x); // error, expected 2 parameters, found 1 parameter
+ foo::<f64>(x); // error: wrong number of type arguments:
+ // expected 2, found 1
foo::<_, f64>(x); // same as `foo::<bool, f64>(x)`
}
```
"##,
E0090: r##"
-You gave too few lifetime parameters. Example:
+You gave too few lifetime arguments. Example:
```compile_fail,E0090
fn foo<'a: 'b, 'b: 'a>() {}
fn main() {
- foo::<'static>(); // error, expected 2 lifetime parameters
+ foo::<'static>(); // error: wrong number of lifetime arguments:
+ // expected 2, found 1
}
```
-Please check you give the right number of lifetime parameters. Example:
+Please check you give the right number of lifetime arguments. Example:
```
fn foo<'a: 'b, 'b: 'a>() {}
// }
// ```
//
- // In a concession to backwards compatbility, we continue to
+ // In a concession to backwards compatibility, we continue to
// permit those, so long as the lifetimes aren't used in
// associated types. I believe this is sound, because lifetimes
// used elsewhere are not projected back out.
let main_def_id = tcx.hir.local_def_id(main_id);
let main_t = tcx.type_of(main_def_id);
match main_t.sty {
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
match tcx.hir.find(main_id) {
Some(hir_map::NodeItem(it)) => {
match it.node {
let start_def_id = tcx.hir.local_def_id(start_id);
let start_t = tcx.type_of(start_def_id);
match start_t.sty {
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
match tcx.hir.find(start_id) {
Some(hir_map::NodeItem(it)) => {
match it.node {
// Calculating the predicate requirements necessary
// for item_did.
//
- // For field of type &'a T (reference) or TyAdt
+ // For field of type &'a T (reference) or Adt
// (struct/enum/union) there will be outlive
// requirements for adt_def.
let field_ty = self.tcx.type_of(field_def.did);
// a predicate requirement of T: 'a (T outlives 'a).
//
// We also want to calculate potential predicates for the T
- ty::TyRef(region, rty, _) => {
- debug!("TyRef");
+ ty::Ref(region, rty, _) => {
+ debug!("Ref");
insert_outlives_predicate(tcx, rty.into(), region, required_predicates);
}
- // For each TyAdt (struct/enum/union) type `Foo<'a, T>`, we
+ // For each Adt (struct/enum/union) type `Foo<'a, T>`, we
// can load the current set of inferred and explicit
// predicates from `global_inferred_outlives` and filter the
// ones that are TypeOutlives.
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
// First check the inferred predicates
//
// Example 1:
// round we will get `U: 'b`. We then apply the substitution
// `['b => 'a, U => T]` and thus get the requirement that `T:
// 'a` holds for `Foo`.
- debug!("TyAdt");
+ debug!("Adt");
if let Some(unsubstituted_predicates) = global_inferred_outlives.get(&def.did) {
for unsubstituted_predicate in unsubstituted_predicates {
// `unsubstituted_predicate` is `U: 'b` in the
);
}
- ty::TyDynamic(obj, ..) => {
+ ty::Dynamic(obj, ..) => {
// This corresponds to `dyn Trait<..>`. In this case, we should
// use the explicit predicates as well.
// `dyn Trait` at this stage. Therefore when checking explicit
// predicates in `check_explicit_predicates` we need to ignore
// checking the explicit_map for Self type.
- debug!("TyDynamic");
+ debug!("Dynamic");
debug!("field_ty = {}", &field_ty);
debug!("ty in field = {}", &ty);
if let Some(ex_trait_ref) = obj.principal() {
}
}
- ty::TyProjection(obj) => {
+ ty::Projection(obj) => {
// This corresponds to `<T as Foo<'a>>::Bar`. In this case, we should use the
// explicit predicates as well.
- debug!("TyProjection");
+ debug!("Projection");
check_explicit_predicates(
tcx,
&tcx.associated_item(obj.item_def_id).container.id(),
let inferred_start = self.terms_cx.inferred_starts[&id];
let current_item = &CurrentItem { inferred_start };
match tcx.type_of(def_id).sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
// Not entirely obvious: constraints on structs/enums do not
// affect the variance of their type parameters. See discussion
// in comment at top of module.
}
}
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
self.add_constraints_from_sig(current_item,
tcx.fn_sig(def_id),
self.covariant);
variance);
match ty.sty {
- ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) |
- ty::TyStr | ty::TyNever | ty::TyForeign(..) => {
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) |
+ ty::Str | ty::Never | ty::Foreign(..) => {
// leaf type -- noop
}
- ty::TyFnDef(..) |
- ty::TyGenerator(..) |
- ty::TyClosure(..) => {
+ ty::FnDef(..) |
+ ty::Generator(..) |
+ ty::Closure(..) => {
bug!("Unexpected closure type in variance computation");
}
- ty::TyRef(region, ty, mutbl) => {
+ ty::Ref(region, ty, mutbl) => {
let contra = self.contravariant(variance);
self.add_constraints_from_region(current, region, contra);
self.add_constraints_from_mt(current, &ty::TypeAndMut { ty, mutbl }, variance);
}
- ty::TyArray(typ, _) |
- ty::TySlice(typ) => {
+ ty::Array(typ, _) |
+ ty::Slice(typ) => {
self.add_constraints_from_ty(current, typ, variance);
}
- ty::TyRawPtr(ref mt) => {
+ ty::RawPtr(ref mt) => {
self.add_constraints_from_mt(current, mt, variance);
}
- ty::TyTuple(subtys) => {
+ ty::Tuple(subtys) => {
for &subty in subtys {
self.add_constraints_from_ty(current, subty, variance);
}
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
self.add_constraints_from_substs(current, def.did, substs, variance);
}
- ty::TyProjection(ref data) => {
+ ty::Projection(ref data) => {
let tcx = self.tcx();
self.add_constraints_from_trait_ref(current, data.trait_ref(tcx), variance);
}
- ty::TyAnon(_, substs) => {
+ ty::Anon(_, substs) => {
self.add_constraints_from_invariant_substs(current, substs, variance);
}
- ty::TyDynamic(ref data, r) => {
+ ty::Dynamic(ref data, r) => {
// The type `Foo<T+'a>` is contravariant w/r/t `'a`:
let contra = self.contravariant(variance);
self.add_constraints_from_region(current, r, contra);
}
}
- ty::TyParam(ref data) => {
+ ty::Param(ref data) => {
self.add_constraint(current, data.idx, variance);
}
- ty::TyFnPtr(sig) => {
+ ty::FnPtr(sig) => {
self.add_constraints_from_sig(current, sig, variance);
}
- ty::TyError => {
+ ty::Error => {
// we encounter this when walking the trait references for object
- // types, where we use TyError as the Self type
+ // types, where we use Error as the Self type
}
- ty::TyGeneratorWitness(..) |
- ty::TyInfer(..) => {
+ ty::GeneratorWitness(..) |
+ ty::Infer(..) => {
bug!("unexpected type encountered in \
variance inference: {}",
ty);
debug!("id={} variances={:?}", id, variances);
// Functions can have unused type parameters: make those invariant.
- if let ty::TyFnDef(..) = tcx.type_of(def_id).sty {
+ if let ty::FnDef(..) = tcx.type_of(def_id).sty {
for variance in &mut variances {
if *variance == ty::Bivariant {
*variance = ty::Invariant;
// In fact, the iteration of an FxHashMap can even vary between platforms,
// since FxHasher has different behavior for 32-bit and 64-bit platforms.
//
- // Obviously, it's extremely undesireable for documentation rendering
+ // Obviously, it's extremely undesirable for documentation rendering
// to be depndent on the platform it's run on. Apart from being confusing
// to end users, it makes writing tests much more difficult, as predicates
// can appear in any order in the final result.
// predicates and bounds, however, we ensure that for a given codebase, all
// auto-trait impls always render in exactly the same way.
//
- // Using the Debug impementation for sorting prevents us from needing to
+ // Using the Debug implementation for sorting prevents us from needing to
// write quite a bit of almost entirely useless code (e.g. how should two
// Types be sorted relative to each other). It also allows us to solve the
// problem for both WherePredicates and GenericBounds at the same time. This
return impls;
}
let ty = self.cx.tcx.type_of(def_id);
- if self.cx.access_levels.borrow().is_doc_reachable(def_id) || ty.is_primitive() {
- let generics = self.cx.tcx.generics_of(def_id);
- let real_name = name.clone().map(|name| Ident::from_str(&name));
- let param_env = self.cx.tcx.param_env(def_id);
- for &trait_def_id in self.cx.all_traits.iter() {
- if !self.cx.access_levels.borrow().is_doc_reachable(trait_def_id) ||
- self.cx.generated_synthetics
- .borrow_mut()
- .get(&(def_id, trait_def_id))
- .is_some() {
- continue
- }
- self.cx.tcx.for_each_relevant_impl(trait_def_id, ty, |impl_def_id| {
- self.cx.tcx.infer_ctxt().enter(|infcx| {
- let t_generics = infcx.tcx.generics_of(impl_def_id);
- let trait_ref = infcx.tcx.impl_trait_ref(impl_def_id)
- .expect("Cannot get impl trait");
-
- match trait_ref.self_ty().sty {
- ty::TypeVariants::TyParam(_) => {},
- _ => return,
- }
-
- let substs = infcx.fresh_substs_for_item(DUMMY_SP, def_id);
- let ty = ty.subst(infcx.tcx, substs);
- let param_env = param_env.subst(infcx.tcx, substs);
-
- let impl_substs = infcx.fresh_substs_for_item(DUMMY_SP, impl_def_id);
- let trait_ref = trait_ref.subst(infcx.tcx, impl_substs);
-
- // Require the type the impl is implemented on to match
- // our type, and ignore the impl if there was a mismatch.
- let cause = traits::ObligationCause::dummy();
- let eq_result = infcx.at(&cause, param_env)
- .eq(trait_ref.self_ty(), ty);
- if let Ok(InferOk { value: (), obligations }) = eq_result {
- // FIXME(eddyb) ignoring `obligations` might cause false positives.
- drop(obligations);
-
- let may_apply = infcx.predicate_may_hold(&traits::Obligation::new(
- cause.clone(),
- param_env,
- trait_ref.to_predicate(),
- ));
- if !may_apply {
- return
- }
- self.cx.generated_synthetics.borrow_mut()
- .insert((def_id, trait_def_id));
- let trait_ = hir::TraitRef {
- path: get_path_for_type(infcx.tcx,
- trait_def_id,
- hir::def::Def::Trait),
- ref_id: ast::DUMMY_NODE_ID,
- hir_ref_id: hir::DUMMY_HIR_ID,
- };
- let provided_trait_methods =
- infcx.tcx.provided_trait_methods(trait_def_id)
- .into_iter()
- .map(|meth| meth.ident.to_string())
- .collect();
-
- let ty = self.cx.get_real_ty(def_id, def_ctor, &real_name, generics);
- let predicates = infcx.tcx.predicates_of(impl_def_id);
-
- impls.push(Item {
- source: infcx.tcx.def_span(impl_def_id).clean(self.cx),
- name: None,
- attrs: Default::default(),
- visibility: None,
- def_id: self.cx.next_def_id(impl_def_id.krate),
- stability: None,
- deprecation: None,
- inner: ImplItem(Impl {
- unsafety: hir::Unsafety::Normal,
- generics: (t_generics, &predicates).clean(self.cx),
- provided_trait_methods,
- trait_: Some(trait_.clean(self.cx)),
- for_: ty.clean(self.cx),
- items: infcx.tcx.associated_items(impl_def_id)
- .collect::<Vec<_>>()
- .clean(self.cx),
- polarity: None,
- synthetic: false,
- blanket_impl: Some(infcx.tcx.type_of(impl_def_id)
- .clean(self.cx)),
- }),
- });
+ let generics = self.cx.tcx.generics_of(def_id);
+ let real_name = name.clone().map(|name| Ident::from_str(&name));
+ let param_env = self.cx.tcx.param_env(def_id);
+ for &trait_def_id in self.cx.all_traits.iter() {
+ if !self.cx.access_levels.borrow().is_doc_reachable(trait_def_id) ||
+ self.cx.generated_synthetics
+ .borrow_mut()
+ .get(&(def_id, trait_def_id))
+ .is_some() {
+ continue
+ }
+ self.cx.tcx.for_each_relevant_impl(trait_def_id, ty, |impl_def_id| {
+ self.cx.tcx.infer_ctxt().enter(|infcx| {
+ let t_generics = infcx.tcx.generics_of(impl_def_id);
+ let trait_ref = infcx.tcx.impl_trait_ref(impl_def_id)
+ .expect("Cannot get impl trait");
+
+ match trait_ref.self_ty().sty {
+ ty::Param(_) => {},
+ _ => return,
+ }
+
+ let substs = infcx.fresh_substs_for_item(DUMMY_SP, def_id);
+ let ty = ty.subst(infcx.tcx, substs);
+ let param_env = param_env.subst(infcx.tcx, substs);
+
+ let impl_substs = infcx.fresh_substs_for_item(DUMMY_SP, impl_def_id);
+ let trait_ref = trait_ref.subst(infcx.tcx, impl_substs);
+
+ // Require the type the impl is implemented on to match
+ // our type, and ignore the impl if there was a mismatch.
+ let cause = traits::ObligationCause::dummy();
+ let eq_result = infcx.at(&cause, param_env)
+ .eq(trait_ref.self_ty(), ty);
+ if let Ok(InferOk { value: (), obligations }) = eq_result {
+ // FIXME(eddyb) ignoring `obligations` might cause false positives.
+ drop(obligations);
+
+ let may_apply = infcx.predicate_may_hold(&traits::Obligation::new(
+ cause.clone(),
+ param_env,
+ trait_ref.to_predicate(),
+ ));
+ if !may_apply {
+ return
}
- });
+ self.cx.generated_synthetics.borrow_mut()
+ .insert((def_id, trait_def_id));
+ let trait_ = hir::TraitRef {
+ path: get_path_for_type(infcx.tcx,
+ trait_def_id,
+ hir::def::Def::Trait),
+ ref_id: ast::DUMMY_NODE_ID,
+ hir_ref_id: hir::DUMMY_HIR_ID,
+ };
+ let provided_trait_methods =
+ infcx.tcx.provided_trait_methods(trait_def_id)
+ .into_iter()
+ .map(|meth| meth.ident.to_string())
+ .collect();
+
+ let ty = self.cx.get_real_ty(def_id, def_ctor, &real_name, generics);
+ let predicates = infcx.tcx.predicates_of(impl_def_id);
+
+ impls.push(Item {
+ source: infcx.tcx.def_span(impl_def_id).clean(self.cx),
+ name: None,
+ attrs: Default::default(),
+ visibility: None,
+ def_id: self.cx.next_def_id(impl_def_id.krate),
+ stability: None,
+ deprecation: None,
+ inner: ImplItem(Impl {
+ unsafety: hir::Unsafety::Normal,
+ generics: (t_generics, &predicates).clean(self.cx),
+ provided_trait_methods,
+ trait_: Some(trait_.clean(self.cx)),
+ for_: ty.clean(self.cx),
+ items: infcx.tcx.associated_items(impl_def_id)
+ .collect::<Vec<_>>()
+ .clean(self.cx),
+ polarity: None,
+ synthetic: false,
+ blanket_impl: Some(infcx.tcx.type_of(impl_def_id)
+ .clean(self.cx)),
+ }),
+ });
+ }
});
- }
+ });
}
impls
}
True,
/// Denies all configurations.
False,
- /// A generic configration option, e.g. `test` or `target_os = "linux"`.
+ /// A generic configuration option, e.g. `test` or `target_os = "linux"`.
Cfg(Symbol, Option<Symbol>),
/// Negate a configuration requirement, i.e. `not(x)`.
Not(Box<Cfg>),
let ty = cx.tcx.type_of(def_id);
match ty.sty {
- ty::TyAdt(adt, _) => callback(&match adt.adt_kind() {
+ ty::Adt(adt, _) => callback(&match adt.adt_kind() {
AdtKind::Struct => Def::Struct,
AdtKind::Enum => Def::Enum,
AdtKind::Union => Def::Union,
}),
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyFloat(_) |
- ty::TyStr |
- ty::TyBool |
- ty::TyChar => callback(&move |_: DefId| {
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) |
+ ty::Str |
+ ty::Bool |
+ ty::Char => callback(&move |_: DefId| {
match ty.sty {
- ty::TyInt(x) => Def::PrimTy(hir::TyInt(x)),
- ty::TyUint(x) => Def::PrimTy(hir::TyUint(x)),
- ty::TyFloat(x) => Def::PrimTy(hir::TyFloat(x)),
- ty::TyStr => Def::PrimTy(hir::TyStr),
- ty::TyBool => Def::PrimTy(hir::TyBool),
- ty::TyChar => Def::PrimTy(hir::TyChar),
+ ty::Int(x) => Def::PrimTy(hir::Int(x)),
+ ty::Uint(x) => Def::PrimTy(hir::Uint(x)),
+ ty::Float(x) => Def::PrimTy(hir::Float(x)),
+ ty::Str => Def::PrimTy(hir::Str),
+ ty::Bool => Def::PrimTy(hir::Bool),
+ ty::Char => Def::PrimTy(hir::Char),
_ => unreachable!(),
}
}),
ret.extend(build_impls(cx, did, true));
clean::EnumItem(build_enum(cx, did))
}
- Def::TyForeign(did) => {
+ Def::ForeignTy(did) => {
record_extern_fqn(cx, did, clean::TypeKind::Foreign);
ret.extend(build_impls(cx, did, false));
clean::ForeignTypeItem
let name = if self.name.is_some() {
self.name.expect("No name provided").clean(cx)
} else {
- "".to_string()
+ String::new()
};
// maintain a stack of mod ids, for doc comment path resolution
Some(did) if cx.tcx.lang_items().fn_trait_kind(did).is_some() => {
assert_eq!(types.len(), 1);
let inputs = match types[0].sty {
- ty::TyTuple(ref tys) => tys.iter().map(|t| t.clean(cx)).collect(),
+ ty::Tuple(ref tys) => tys.iter().map(|t| t.clean(cx)).collect(),
_ => {
return GenericArgs::AngleBracketed {
lifetimes,
let output = None;
// FIXME(#20299) return type comes from a projection now
// match types[1].sty {
- // ty::TyTuple(ref v) if v.is_empty() => None, // -> ()
+ // ty::Tuple(ref v) if v.is_empty() => None, // -> ()
// _ => Some(types[1].clean(cx))
// };
GenericArgs::Parenthesized {
// collect any late bound regions
let mut late_bounds = vec![];
for ty_s in trait_ref.input_types().skip(1) {
- if let ty::TyTuple(ts) = ty_s.sty {
+ if let ty::Tuple(ts) = ty_s.sty {
for &ty_s in ts {
- if let ty::TyRef(ref reg, _, _) = ty_s.sty {
+ if let ty::Ref(ref reg, _, _) = ty_s.sty {
if let &ty::RegionKind::ReLateBound(..) = *reg {
debug!(" hit an ReLateBound {:?}", reg);
if let Some(Lifetime(name)) = reg.clean(cx) {
values: sig.skip_binder().inputs().iter().map(|t| {
Argument {
type_: t.clean(cx),
- name: names.next().map_or("".to_string(), |name| name.to_string()),
+ name: names.next().map_or(String::new(), |name| name.to_string()),
}
}).collect(),
},
let self_arg_ty = *sig.input(0).skip_binder();
if self_arg_ty == self_ty {
decl.inputs.values[0].type_ = Generic(String::from("Self"));
- } else if let ty::TyRef(_, ty, _) = self_arg_ty.sty {
+ } else if let ty::Ref(_, ty, _) = self_arg_ty.sty {
if ty == self_ty {
match decl.inputs.values[0].type_ {
BorrowedRef{ref mut type_, ..} => {
impl Clean<Type> for hir::Ty {
fn clean(&self, cx: &DocContext) -> Type {
use rustc::hir::*;
+
match self.node {
TyKind::Never => Never,
TyKind::Ptr(ref m) => RawPointer(m.mutbl.clean(cx), box m.ty.clean(cx)),
if let Some(bounds) = cx.impl_trait_bounds.borrow_mut().remove(&did) {
return ImplTrait(bounds);
}
+ } else if let Def::Existential(did) = path.def {
+ // This block is for returned impl trait only.
+ if let Some(node_id) = cx.tcx.hir.as_local_node_id(did) {
+ let item = cx.tcx.hir.expect_item(node_id);
+ if let hir::ItemKind::Existential(ref ty) = item.node {
+ return ImplTrait(ty.bounds.clean(cx));
+ }
+ }
}
let mut alias = None;
let mut ty_substs = FxHashMap();
let mut lt_substs = FxHashMap();
provided_params.with_generic_args(|generic_args| {
- let mut indices = ty::GenericParamCount {
- lifetimes: 0,
- types: 0
- };
+ let mut indices: GenericParamCount = Default::default();
for param in generics.params.iter() {
match param.kind {
hir::GenericParamKind::Lifetime { .. } => {
TyKind::Path(hir::QPath::TypeRelative(ref qself, ref segment)) => {
let mut def = Def::Err;
let ty = hir_ty_to_ty(cx.tcx, self);
- if let ty::TyProjection(proj) = ty.sty {
+ if let ty::Projection(proj) = ty.sty {
def = Def::Trait(proj.trait_ref(cx.tcx).def_id);
}
let trait_path = hir::Path {
impl<'tcx> Clean<Type> for Ty<'tcx> {
fn clean(&self, cx: &DocContext) -> Type {
match self.sty {
- ty::TyNever => Never,
- ty::TyBool => Primitive(PrimitiveType::Bool),
- ty::TyChar => Primitive(PrimitiveType::Char),
- ty::TyInt(int_ty) => Primitive(int_ty.into()),
- ty::TyUint(uint_ty) => Primitive(uint_ty.into()),
- ty::TyFloat(float_ty) => Primitive(float_ty.into()),
- ty::TyStr => Primitive(PrimitiveType::Str),
- ty::TySlice(ty) => Slice(box ty.clean(cx)),
- ty::TyArray(ty, n) => {
+ ty::Never => Never,
+ ty::Bool => Primitive(PrimitiveType::Bool),
+ ty::Char => Primitive(PrimitiveType::Char),
+ ty::Int(int_ty) => Primitive(int_ty.into()),
+ ty::Uint(uint_ty) => Primitive(uint_ty.into()),
+ ty::Float(float_ty) => Primitive(float_ty.into()),
+ ty::Str => Primitive(PrimitiveType::Str),
+ ty::Slice(ty) => Slice(box ty.clean(cx)),
+ ty::Array(ty, n) => {
let mut n = cx.tcx.lift(&n).expect("array lift failed");
if let ConstValue::Unevaluated(def_id, substs) = n.val {
let param_env = cx.tcx.param_env(def_id);
let n = print_const(cx, n);
Array(box ty.clean(cx), n)
}
- ty::TyRawPtr(mt) => RawPointer(mt.mutbl.clean(cx), box mt.ty.clean(cx)),
- ty::TyRef(r, ty, mutbl) => BorrowedRef {
+ ty::RawPtr(mt) => RawPointer(mt.mutbl.clean(cx), box mt.ty.clean(cx)),
+ ty::Ref(r, ty, mutbl) => BorrowedRef {
lifetime: r.clean(cx),
mutability: mutbl.clean(cx),
type_: box ty.clean(cx),
},
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) => {
- let ty = cx.tcx.lift(self).expect("TyFnPtr lift failed");
+ ty::FnDef(..) |
+ ty::FnPtr(_) => {
+ let ty = cx.tcx.lift(self).expect("FnPtr lift failed");
let sig = ty.fn_sig(cx.tcx);
BareFunction(box BareFunctionDecl {
unsafety: sig.unsafety(),
abi: sig.abi(),
})
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
let did = def.did;
let kind = match def.adt_kind() {
AdtKind::Struct => TypeKind::Struct,
is_generic: false,
}
}
- ty::TyForeign(did) => {
+ ty::Foreign(did) => {
inline::record_extern_fqn(cx, did, TypeKind::Foreign);
let path = external_path(cx, &cx.tcx.item_name(did).as_str(),
None, false, vec![], Substs::empty());
is_generic: false,
}
}
- ty::TyDynamic(ref obj, ref reg) => {
+ ty::Dynamic(ref obj, ref reg) => {
if let Some(principal) = obj.principal() {
let did = principal.def_id();
inline::record_extern_fqn(cx, did, TypeKind::Trait);
Never
}
}
- ty::TyTuple(ref t) => Tuple(t.clean(cx)),
+ ty::Tuple(ref t) => Tuple(t.clean(cx)),
- ty::TyProjection(ref data) => data.clean(cx),
+ ty::Projection(ref data) => data.clean(cx),
- ty::TyParam(ref p) => Generic(p.name.to_string()),
+ ty::Param(ref p) => Generic(p.name.to_string()),
- ty::TyAnon(def_id, substs) => {
+ ty::Anon(def_id, substs) => {
// Grab the "TraitA + TraitB" from `impl TraitA + TraitB`,
// by looking up the projections associated with the def_id.
let predicates_of = cx.tcx.predicates_of(def_id);
- let substs = cx.tcx.lift(&substs).expect("TyAnon lift failed");
+ let substs = cx.tcx.lift(&substs).expect("Anon lift failed");
let bounds = predicates_of.instantiate(cx.tcx, substs);
let mut regions = vec![];
let mut has_sized = false;
ImplTrait(bounds)
}
- ty::TyClosure(..) | ty::TyGenerator(..) => Tuple(vec![]), // FIXME(pcwalton)
+ ty::Closure(..) | ty::Generator(..) => Tuple(vec![]), // FIXME(pcwalton)
- ty::TyGeneratorWitness(..) => panic!("TyGeneratorWitness"),
- ty::TyInfer(..) => panic!("TyInfer"),
- ty::TyError => panic!("TyError"),
+ ty::GeneratorWitness(..) => panic!("GeneratorWitness"),
+ ty::Infer(..) => panic!("Infer"),
+ ty::Error => panic!("Error"),
}
}
}
ForeignStaticItem(Static {
type_: ty.clean(cx),
mutability: if mutbl {Mutable} else {Immutable},
- expr: "".to_string(),
+ expr: String::new(),
})
}
hir::ForeignItemKind::Type => {
debug!("converting span {:?} to snippet", self.clean(cx));
let sn = match cx.sess().source_map().span_to_snippet(*self) {
Ok(x) => x.to_string(),
- Err(_) => "".to_string()
+ Err(_) => String::new()
};
debug!("got snippet {}", sn);
sn
let is_generic = match path.def {
Def::PrimTy(p) => match p {
- hir::TyStr => return Primitive(PrimitiveType::Str),
- hir::TyBool => return Primitive(PrimitiveType::Bool),
- hir::TyChar => return Primitive(PrimitiveType::Char),
- hir::TyInt(int_ty) => return Primitive(int_ty.into()),
- hir::TyUint(uint_ty) => return Primitive(uint_ty.into()),
- hir::TyFloat(float_ty) => return Primitive(float_ty.into()),
+ hir::Str => return Primitive(PrimitiveType::Str),
+ hir::Bool => return Primitive(PrimitiveType::Bool),
+ hir::Char => return Primitive(PrimitiveType::Char),
+ hir::Int(int_ty) => return Primitive(int_ty.into()),
+ hir::Uint(uint_ty) => return Primitive(uint_ty.into()),
+ hir::Float(float_ty) => return Primitive(float_ty.into()),
},
Def::SelfTy(..) if path.segments.len() == 1 => {
return Generic(keywords::SelfType.name().to_string());
Def::Struct(i) => (i, TypeKind::Struct),
Def::Union(i) => (i, TypeKind::Union),
Def::Mod(i) => (i, TypeKind::Module),
- Def::TyForeign(i) => (i, TypeKind::Foreign),
+ Def::ForeignTy(i) => (i, TypeKind::Foreign),
Def::Const(i) => (i, TypeKind::Const),
Def::Static(i, _) => (i, TypeKind::Static),
Def::Variant(i) => (cx.tcx.parent_def_id(i).expect("cannot get parent def id"),
feature: self.feature.to_string(),
since: match self.level {
attr::Stable {ref since} => since.to_string(),
- _ => "".to_string(),
+ _ => String::new(),
},
deprecated_since: match self.rustc_depr {
Some(attr::RustcDeprecation {ref since, ..}) => since.to_string(),
- _=> "".to_string(),
+ _=> String::new(),
},
deprecated_reason: match self.rustc_depr {
Some(ref depr) => depr.reason.to_string(),
- _ => "".to_string(),
+ _ => String::new(),
},
unstable_reason: match self.level {
attr::Unstable { reason: Some(ref reason), .. } => reason.to_string(),
- _ => "".to_string(),
+ _ => String::new(),
},
issue: match self.level {
attr::Unstable {issue, ..} => Some(issue),
impl Clean<Deprecation> for attr::Deprecation {
fn clean(&self, _: &DocContext) -> Deprecation {
Deprecation {
- since: self.since.as_ref().map_or("".to_string(), |s| s.to_string()),
- note: self.note.as_ref().map_or("".to_string(), |s| s.to_string()),
+ since: self.since.as_ref().map_or(String::new(), |s| s.to_string()),
+ note: self.note.as_ref().map_or(String::new(), |s| s.to_string()),
}
}
}
clean::BorrowedRef{ lifetime: ref l, mutability, type_: ref ty} => {
let lt = match *l {
Some(ref l) => format!("{} ", *l),
- _ => "".to_string(),
+ _ => String::new(),
};
let m = MutableSpace(mutability);
let amp = if f.alternate() {
root_path = page.root_path,
suffix=page.resource_suffix)
} else {
- "".to_owned()
+ String::new()
},
content = *t,
root_path = page.root_path,
css_class = page.css_class,
logo = if layout.logo.is_empty() {
- "".to_string()
+ String::new()
} else {
format!("<a href='{}{}/index.html'>\
<img src='{}' alt='logo' width='100'></a>",
description = page.description,
keywords = page.keywords,
favicon = if layout.favicon.is_empty() {
- "".to_string()
+ String::new()
} else {
format!(r#"<link rel="shortcut icon" href="{}">"#, layout.favicon)
},
// the access levels from crateanalysis.
pub access_levels: Arc<AccessLevels<DefId>>,
- /// The version of the crate being documented, if given fron the `--crate-version` flag.
+ /// The version of the crate being documented, if given from the `--crate-version` flag.
pub crate_version: Option<String>,
// Private fields only used when initially crawling a crate to build a cache
local_sources: FxHashMap(),
issue_tracker_base_url: None,
layout: layout::Layout {
- logo: "".to_string(),
- favicon: "".to_string(),
+ logo: String::new(),
+ favicon: String::new(),
external_html: external_html.clone(),
krate: krate.name.clone(),
},
!chr.is_whitespace()
})
}).collect::<Vec<_>>().join("\n"),
- None => "".to_string()
+ None => String::new()
}
}
stab_docs = stab_docs,
docs = MarkdownSummaryLine(doc_value, &myitem.links()),
class = myitem.type_(),
- stab = myitem.stability_class().unwrap_or("".to_string()),
+ stab = myitem.stability_class().unwrap_or(String::new()),
unsafety_flag = unsafety_flag,
href = item_path(myitem.type_(), myitem.name.as_ref().unwrap()),
title_type = myitem.type_(),
var themesWidth = null;
+ var titleBeforeSearch = document.title;
+
if (!String.prototype.startsWith) {
String.prototype.startsWith = function(searchString, position) {
position = position || 0;
ev.preventDefault();
addClass(search, "hidden");
removeClass(document.getElementById("main"), "hidden");
+ document.title = titleBeforeSearch;
}
defocusSearchBar();
}
Ok((ty.def, Some(format!("{}.{}", out, item_name))))
} else {
match cx.tcx.type_of(did).sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
if let Some(item) = if def.is_enum() {
def.all_fields().find(|item| item.ident.name == item_name)
} else {
let path = ast::Path { segments: vec![segment], span: DUMMY_SP };
let mut resolver = cx.resolver.borrow_mut();
let mark = Mark::root();
- let res = resolver
- .resolve_macro_to_def_inner(mark, &path, MacroKind::Bang, false);
- if let Ok(def) = res {
+ if let Ok(def) = resolver.resolve_macro_to_def_inner(&path, MacroKind::Bang, mark, &[], false) {
if let SyntaxExtension::DeclMacro { .. } = *resolver.get_macro(def) {
return Some(def);
}
}
const PRIMITIVES: &[(&str, Def)] = &[
- ("u8", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::U8))),
- ("u16", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::U16))),
- ("u32", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::U32))),
- ("u64", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::U64))),
- ("u128", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::U128))),
- ("usize", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::Usize))),
- ("i8", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::I8))),
- ("i16", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::I16))),
- ("i32", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::I32))),
- ("i64", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::I64))),
- ("i128", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::I128))),
- ("isize", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::Isize))),
- ("f32", Def::PrimTy(hir::PrimTy::TyFloat(syntax::ast::FloatTy::F32))),
- ("f64", Def::PrimTy(hir::PrimTy::TyFloat(syntax::ast::FloatTy::F64))),
- ("str", Def::PrimTy(hir::PrimTy::TyStr)),
- ("bool", Def::PrimTy(hir::PrimTy::TyBool)),
- ("char", Def::PrimTy(hir::PrimTy::TyChar)),
+ ("u8", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::U8))),
+ ("u16", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::U16))),
+ ("u32", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::U32))),
+ ("u64", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::U64))),
+ ("u128", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::U128))),
+ ("usize", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::Usize))),
+ ("i8", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::I8))),
+ ("i16", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::I16))),
+ ("i32", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::I32))),
+ ("i64", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::I64))),
+ ("i128", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::I128))),
+ ("isize", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::Isize))),
+ ("f32", Def::PrimTy(hir::PrimTy::Float(syntax::ast::FloatTy::F32))),
+ ("f64", Def::PrimTy(hir::PrimTy::Float(syntax::ast::FloatTy::F64))),
+ ("str", Def::PrimTy(hir::PrimTy::Str)),
+ ("bool", Def::PrimTy(hir::PrimTy::Bool)),
+ ("char", Def::PrimTy(hir::PrimTy::Char)),
];
fn is_primitive(path_str: &str, is_val: bool) -> Option<Def> {
Def::Struct(did) |
Def::Union(did) |
Def::Enum(did) |
- Def::TyForeign(did) |
+ Def::ForeignTy(did) |
Def::TyAlias(did) if !self_is_hidden => {
self.cx.access_levels.borrow_mut().map.insert(did, AccessLevel::Public);
},
name = "serialize"
path = "lib.rs"
crate-type = ["dylib", "rlib"]
+
+[dependencies]
+smallvec = { version = "0.6.5", features = ["union"] }
use std::rc::Rc;
use std::sync::Arc;
-impl<
- T: Encodable
-> Encodable for LinkedList<T> {
+use smallvec::{Array, SmallVec};
+
+impl<A> Encodable for SmallVec<A>
+ where A: Array,
+ A::Item: Encodable
+{
+ fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
+ s.emit_seq(self.len(), |s| {
+ for (i, e) in self.iter().enumerate() {
+ s.emit_seq_elt(i, |s| e.encode(s))?;
+ }
+ Ok(())
+ })
+ }
+}
+
+impl<A> Decodable for SmallVec<A>
+ where A: Array,
+ A::Item: Decodable
+{
+ fn decode<D: Decoder>(d: &mut D) -> Result<SmallVec<A>, D::Error> {
+ d.read_seq(|d, len| {
+ let mut vec = SmallVec::with_capacity(len);
+ // FIXME(#48994) - could just be collected into a Result<SmallVec, D::Error>
+ for i in 0..len {
+ vec.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
+ }
+ Ok(vec)
+ })
+ }
+}
+
+impl<T: Encodable> Encodable for LinkedList<T> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_seq(self.len(), |s| {
for (i, e) in self.iter().enumerate() {
}
}
-impl<
- K: Encodable + PartialEq + Ord,
- V: Encodable
-> Encodable for BTreeMap<K, V> {
+impl<K, V> Encodable for BTreeMap<K, V>
+ where K: Encodable + PartialEq + Ord,
+ V: Encodable
+{
fn encode<S: Encoder>(&self, e: &mut S) -> Result<(), S::Error> {
e.emit_map(self.len(), |e| {
let mut i = 0;
}
}
-impl<
- K: Decodable + PartialEq + Ord,
- V: Decodable
-> Decodable for BTreeMap<K, V> {
+impl<K, V> Decodable for BTreeMap<K, V>
+ where K: Decodable + PartialEq + Ord,
+ V: Decodable
+{
fn decode<D: Decoder>(d: &mut D) -> Result<BTreeMap<K, V>, D::Error> {
d.read_map(|d, len| {
let mut map = BTreeMap::new();
}
}
-impl<
- T: Encodable + PartialEq + Ord
-> Encodable for BTreeSet<T> {
+impl<T> Encodable for BTreeSet<T>
+ where T: Encodable + PartialEq + Ord
+{
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_seq(self.len(), |s| {
let mut i = 0;
}
}
-impl<
- T: Decodable + PartialEq + Ord
-> Decodable for BTreeSet<T> {
+impl<T> Decodable for BTreeSet<T>
+ where T: Decodable + PartialEq + Ord
+{
fn decode<D: Decoder>(d: &mut D) -> Result<BTreeSet<T>, D::Error> {
d.read_seq(|d, len| {
let mut set = BTreeSet::new();
}
}
+#[inline]
pub fn write_signed_leb128(out: &mut Vec<u8>, value: i128) {
write_signed_leb128_to(value, |v| write_to_vec(out, v))
}
pub use self::serialize::{SpecializationError, SpecializedEncoder, SpecializedDecoder};
pub use self::serialize::{UseSpecializedEncodable, UseSpecializedDecodable};
+extern crate smallvec;
+
mod serialize;
mod collection_impls;
self.data
}
+ #[inline]
pub fn emit_raw_bytes(&mut self, s: &[u8]) {
self.data.extend_from_slice(s);
}
self.position += bytes;
}
+ #[inline]
pub fn read_raw_bytes(&mut self, s: &mut [u8]) -> Result<(), String> {
let start = self.position;
let end = start + s.len();
Ok(Cow::Borrowed(s))
}
+ #[inline]
fn error(&mut self, err: &str) -> Self::Error {
err.to_string()
}
self.emit_enum("Option", f)
}
+ #[inline]
fn emit_option_none(&mut self) -> Result<(), Self::Error> {
self.emit_enum_variant("None", 0, 0, |_| Ok(()))
}
}
impl<T:Encodable> Encodable for Rc<T> {
- #[inline]
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
(**self).encode(s)
}
}
impl<T:Decodable> Decodable for Rc<T> {
- #[inline]
fn decode<D: Decoder>(d: &mut D) -> Result<Rc<T>, D::Error> {
Ok(Rc::new(Decodable::decode(d)?))
}
}
}
-impl<T:Decodable+ToOwned> Decodable for Cow<'static, [T]> where [T]: ToOwned<Owned = Vec<T>> {
+impl<T:Decodable+ToOwned> Decodable for Cow<'static, [T]>
+ where [T]: ToOwned<Owned = Vec<T>>
+{
fn decode<D: Decoder>(d: &mut D) -> Result<Cow<'static, [T]>, D::Error> {
d.read_seq(|d, len| {
let mut v = Vec::with_capacity(len);
// make a RawBucket point to invalid memory using safe code.
impl<K, V> RawBucket<K, V> {
unsafe fn hash(&self) -> *mut HashUint {
- self.hash_start.offset(self.idx as isize)
+ self.hash_start.add(self.idx)
}
unsafe fn pair(&self) -> *mut (K, V) {
- self.pair_start.offset(self.idx as isize) as *mut (K, V)
+ self.pair_start.add(self.idx) as *mut (K, V)
}
unsafe fn hash_pair(&self) -> (*mut HashUint, *mut (K, V)) {
(self.hash(), self.pair())
/// This function acquires exclusive access to the task context.
///
/// Panics if no task has been set or if the task context has already been
-/// retrived by a surrounding call to get_task_cx.
+/// retrieved by a surrounding call to get_task_cx.
pub fn get_task_cx<F, R>(f: F) -> R
where
F: FnOnce(&mut task::Context) -> R
// Find the last newline character in the buffer provided. If found then
// we're going to write all the data up to that point and then flush,
- // otherewise we just write the whole block to the underlying writer.
+ // otherwise we just write the whole block to the underlying writer.
let i = match memchr::memrchr(b'\n', buf) {
Some(i) => i,
None => return self.inner.write(buf),
#![feature(libc)]
#![feature(link_args)]
#![feature(linkage)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![feature(needs_panic_runtime)]
#![feature(never_type)]
#![cfg_attr(not(stage0), feature(nll))]
#[cfg(test)]
mod tests {
- // test the implementations for the current plattform
+ // test the implementations for the current platform
use super::{memchr, memrchr};
#[test]
/// # Examples
///
/// ```
- /// #![feature(ip_constructors)]
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::LOCALHOST;
/// assert_eq!(addr, Ipv4Addr::new(127, 0, 0, 1));
/// ```
- #[unstable(feature = "ip_constructors",
- reason = "requires greater scrutiny before stabilization",
- issue = "44582")]
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1);
/// An IPv4 address representing an unspecified address: 0.0.0.0
/// # Examples
///
/// ```
- /// #![feature(ip_constructors)]
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::UNSPECIFIED;
/// assert_eq!(addr, Ipv4Addr::new(0, 0, 0, 0));
/// ```
- #[unstable(feature = "ip_constructors",
- reason = "requires greater scrutiny before stabilization",
- issue = "44582")]
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0);
/// An IPv4 address representing the broadcast address: 255.255.255.255
/// # Examples
///
/// ```
- /// #![feature(ip_constructors)]
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::BROADCAST;
/// assert_eq!(addr, Ipv4Addr::new(255, 255, 255, 255));
/// ```
- #[unstable(feature = "ip_constructors",
- reason = "requires greater scrutiny before stabilization",
- issue = "44582")]
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255);
/// Returns the four eight-bit integers that make up this address.
/// # Examples
///
/// ```
- /// #![feature(ip_constructors)]
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::LOCALHOST;
/// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
/// ```
- #[unstable(feature = "ip_constructors",
- reason = "requires greater scrutiny before stabilization",
- issue = "44582")]
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
/// An IPv6 address representing the unspecified address: `::`
/// # Examples
///
/// ```
- /// #![feature(ip_constructors)]
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::UNSPECIFIED;
/// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
/// ```
- #[unstable(feature = "ip_constructors",
- reason = "requires greater scrutiny before stabilization",
- issue = "44582")]
+ #[stable(feature = "ip_constructors", since = "1.30.0")]
pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
/// Returns the eight 16-bit segments that make up this address.
#[unstable(feature = "libstd_sys_internals",
reason = "used by the panic! macro",
issue = "0")]
+#[cfg_attr(not(any(stage0, test)), lang = "begin_panic")]
#[inline(never)] #[cold] // avoid code bloat at the call sites as much as possible
pub fn begin_panic<M: Any + Send>(msg: M, file_line_col: &(&'static str, u32, u32)) -> ! {
// Note that this should be the only allocation performed in this code path.
/// happens-before relation between the closure and code executing after the
/// return).
///
- /// If the given closure recusively invokes `call_once` on the same `Once`
+ /// If the given closure recursively invokes `call_once` on the same `Once`
/// instance the exact behavior is not specified, allowed outcomes are
/// a panic or a deadlock.
///
use sync::atomic::{AtomicBool, Ordering};
// Kernel prior to 4.5 don't have copy_file_range
- // We store the availability in a global to avoid unneccessary syscalls
+ // We store the availability in a global to avoid unnecessary syscalls
static HAS_COPY_FILE_RANGE: AtomicBool = AtomicBool::new(true);
unsafe fn copy_file_range(
#[cfg(not(target_os = "linux"))]
const SOCK_CLOEXEC: c_int = 0;
-// Another conditional contant for name resolution: Macos et iOS use
+// Another conditional constant for name resolution: Macos et iOS use
// SO_NOSIGPIPE as a setsockopt flag to disable SIGPIPE emission on socket.
// Other platforms do otherwise.
#[cfg(target_vendor = "apple")]
impl Mutex {
pub const fn new() -> Mutex {
Mutex {
+ // This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly
+ // initializing an SRWLOCK here.
lock: AtomicUsize::new(0),
held: UnsafeCell::new(false),
}
if v.capacity() == v.len() {
v.reserve(1);
}
- slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize),
+ slice::from_raw_parts_mut(v.as_mut_ptr().add(v.len()),
v.capacity() - v.len())
}
pub unsafe fn slice_unchecked(s: &Wtf8, begin: usize, end: usize) -> &Wtf8 {
// memory layout of an &[u8] and &Wtf8 are the same
Wtf8::from_bytes_unchecked(slice::from_raw_parts(
- s.bytes.as_ptr().offset(begin as isize),
+ s.bytes.as_ptr().add(begin),
end - begin
))
}
rustc_errors = { path = "../librustc_errors" }
rustc_data_structures = { path = "../librustc_data_structures" }
rustc_target = { path = "../librustc_target" }
+smallvec = { version = "0.6.5", features = ["union"] }
ExprKind::Match(..) => ExprPrecedence::Match,
ExprKind::Closure(..) => ExprPrecedence::Closure,
ExprKind::Block(..) => ExprPrecedence::Block,
- ExprKind::Catch(..) => ExprPrecedence::Catch,
+ ExprKind::TryBlock(..) => ExprPrecedence::TryBlock,
ExprKind::Async(..) => ExprPrecedence::Async,
ExprKind::Assign(..) => ExprPrecedence::Assign,
ExprKind::AssignOp(..) => ExprPrecedence::AssignOp,
/// created during lowering cannot be made the parent of any other
/// preexisting defs.
Async(CaptureBy, NodeId, P<Block>),
- /// A catch block (`catch { ... }`)
- Catch(P<Block>),
+ /// A try block (`try { ... }`)
+ TryBlock(P<Block>),
/// An assignment (`a = foo()`)
Assign(P<Expr>, P<Expr>),
```ignore (limited to a warning during 2018 edition development)
#![feature(rust_2018_preview)]
-#![feature(raw_identifiers)] // error: the feature `raw_identifiers` is
- // included in the Rust 2018 edition
+#![feature(impl_header_lifetime_elision)] // error: the feature
+ // `impl_header_lifetime_elision` is
+ // included in the Rust 2018 edition
```
"##,
let sym = Ident::with_empty_ctxt(Symbol::gensym(&format!(
"__register_diagnostic_{}", code
)));
- MacEager::items(OneVector::many(vec![
+ MacEager::items(OneVector::from_vec(vec![
ecx.item_mod(
span,
span,
),
);
- MacEager::items(OneVector::many(vec![
+ MacEager::items(OneVector::from_vec(vec![
P(ast::Item {
ident: *name,
attrs: Vec::new(),
// Use a macro because forwarding to a simple function has type system issues
macro_rules! make_stmts_default {
($me:expr) => {
- $me.make_expr().map(|e| OneVector::one(ast::Stmt {
+ $me.make_expr().map(|e| smallvec![ast::Stmt {
id: ast::DUMMY_NODE_ID,
span: e.span,
node: ast::StmtKind::Expr(e),
- }))
+ }])
}
}
}
fn make_stmts(self: Box<DummyResult>) -> Option<OneVector<ast::Stmt>> {
- Some(OneVector::one(ast::Stmt {
+ Some(smallvec![ast::Stmt {
id: ast::DUMMY_NODE_ID,
node: ast::StmtKind::Expr(DummyResult::raw_expr(self.span)),
span: self.span,
- }))
+ }])
}
fn make_ty(self: Box<DummyResult>) -> Option<P<ast::Ty>> {
fn find_legacy_attr_invoc(&mut self, attrs: &mut Vec<Attribute>, allow_derive: bool)
-> Option<Attribute>;
- fn resolve_invoc(&mut self, invoc: &Invocation, scope: Mark, force: bool)
- -> Result<Option<Lrc<SyntaxExtension>>, Determinacy>;
- fn resolve_macro(&mut self, scope: Mark, path: &ast::Path, kind: MacroKind, force: bool)
- -> Result<Lrc<SyntaxExtension>, Determinacy>;
+ fn resolve_macro_invocation(&mut self, invoc: &Invocation, scope: Mark, force: bool)
+ -> Result<Option<Lrc<SyntaxExtension>>, Determinacy>;
+ fn resolve_macro_path(&mut self, path: &ast::Path, kind: MacroKind, scope: Mark,
+ derives_in_scope: &[ast::Path], force: bool)
+ -> Result<Lrc<SyntaxExtension>, Determinacy>;
+
fn check_unused_macros(&self);
}
fn resolve_imports(&mut self) {}
fn find_legacy_attr_invoc(&mut self, _attrs: &mut Vec<Attribute>, _allow_derive: bool)
-> Option<Attribute> { None }
- fn resolve_invoc(&mut self, _invoc: &Invocation, _scope: Mark, _force: bool)
- -> Result<Option<Lrc<SyntaxExtension>>, Determinacy> {
+ fn resolve_macro_invocation(&mut self, _invoc: &Invocation, _scope: Mark, _force: bool)
+ -> Result<Option<Lrc<SyntaxExtension>>, Determinacy> {
Err(Determinacy::Determined)
}
- fn resolve_macro(&mut self, _scope: Mark, _path: &ast::Path, _kind: MacroKind,
- _force: bool) -> Result<Lrc<SyntaxExtension>, Determinacy> {
+ fn resolve_macro_path(&mut self, _path: &ast::Path, _kind: MacroKind, _scope: Mark,
+ _derives_in_scope: &[ast::Path], _force: bool)
+ -> Result<Lrc<SyntaxExtension>, Determinacy> {
Err(Determinacy::Determined)
}
fn check_unused_macros(&self) {}
use std::collections::HashMap;
use std::fs::File;
use std::io::Read;
+use std::iter::FromIterator;
use std::{iter, mem};
use std::rc::Rc;
use std::path::PathBuf;
self.expand_fragment(AstFragment::$Kind(ast)).$make_ast()
})*)*
$($(fn $fold_ast_elt(&mut self, ast_elt: <$AstTy as IntoIterator>::Item) -> $AstTy {
- self.expand_fragment(AstFragment::$Kind(OneVector::one(ast_elt))).$make_ast()
+ self.expand_fragment(AstFragment::$Kind(smallvec![ast_elt])).$make_ast()
})*)*
}
InvocationKind::Derive { ref path, .. } => path.span,
}
}
-
- pub fn path(&self) -> Option<&Path> {
- match self.kind {
- InvocationKind::Bang { ref mac, .. } => Some(&mac.node.path),
- InvocationKind::Attr { attr: Some(ref attr), .. } => Some(&attr.path),
- InvocationKind::Attr { attr: None, .. } => None,
- InvocationKind::Derive { ref path, .. } => Some(path),
- }
- }
}
pub struct MacroExpander<'a, 'b:'a> {
let orig_mod_span = krate.module.inner;
- let krate_item = AstFragment::Items(OneVector::one(P(ast::Item {
+ let krate_item = AstFragment::Items(smallvec![P(ast::Item {
attrs: krate.attrs,
span: krate.span,
node: ast::ItemKind::Mod(krate.module),
id: ast::DUMMY_NODE_ID,
vis: respan(krate.span.shrink_to_lo(), ast::VisibilityKind::Public),
tokens: None,
- })));
+ })]);
match self.expand_fragment(krate_item).make_items().pop().map(P::into_inner) {
Some(ast::Item { attrs, node: ast::ItemKind::Mod(module), .. }) => {
// we'll be able to immediately resolve most of imported macros.
self.resolve_imports();
- // Resolve paths in all invocations and produce ouput expanded fragments for them, but
+ // Resolve paths in all invocations and produce output expanded fragments for them, but
// do not insert them into our input AST fragment yet, only store in `expanded_fragments`.
// The output fragments also go through expansion recursively until no invocations are left.
// Unresolved macros produce dummy outputs as a recovery measure.
let scope =
if self.monotonic { invoc.expansion_data.mark } else { orig_expansion_data.mark };
- let ext = match self.cx.resolver.resolve_invoc(&invoc, scope, force) {
+ let ext = match self.cx.resolver.resolve_macro_invocation(&invoc, scope, force) {
Ok(ext) => Some(ext),
Err(Determinacy::Determined) => None,
Err(Determinacy::Undetermined) => {
for path in &traits {
let mark = Mark::fresh(self.cx.current_expansion.mark);
derives.push(mark);
- let item = match self.cx.resolver.resolve_macro(
- Mark::root(), path, MacroKind::Derive, false) {
+ let item = match self.cx.resolver.resolve_macro_path(
+ path, MacroKind::Derive, Mark::root(), &[], false) {
Ok(ext) => match *ext {
BuiltinDerive(..) => item_with_markers.clone(),
_ => item.clone(),
None => return,
};
- fragment.visit_with(&mut DisallowModules {
+ fragment.visit_with(&mut DisallowMacros {
span,
parse_sess: self.cx.parse_sess,
});
- struct DisallowModules<'a> {
+ struct DisallowMacros<'a> {
span: Span,
parse_sess: &'a ParseSess,
}
- impl<'ast, 'a> Visitor<'ast> for DisallowModules<'a> {
+ impl<'ast, 'a> Visitor<'ast> for DisallowMacros<'a> {
fn visit_item(&mut self, i: &'ast ast::Item) {
- let name = match i.node {
- ast::ItemKind::Mod(_) => Some("modules"),
- ast::ItemKind::MacroDef(_) => Some("macro definitions"),
- _ => None,
- };
- if let Some(name) = name {
+ if let ast::ItemKind::MacroDef(_) = i.node {
emit_feature_err(
self.parse_sess,
"proc_macro_gen",
self.span,
GateIssue::Language,
- &format!("procedural macros cannot expand to {}", name),
+ &format!("procedural macros cannot expand to macro definitions"),
);
}
visit::walk_item(self, i);
ui
});
- OneVector::many(
+ OneVector::from_iter(
self.fold_unnameable(item).into_iter()
.chain(self.fold_unnameable(use_item)))
} else {
match kind {
AstFragmentKind::Expr => AstFragment::Expr(expr_placeholder()),
AstFragmentKind::OptExpr => AstFragment::OptExpr(Some(expr_placeholder())),
- AstFragmentKind::Items => AstFragment::Items(OneVector::one(P(ast::Item {
+ AstFragmentKind::Items => AstFragment::Items(smallvec![P(ast::Item {
id, span, ident, vis, attrs,
node: ast::ItemKind::Mac(mac_placeholder()),
tokens: None,
- }))),
- AstFragmentKind::TraitItems => AstFragment::TraitItems(OneVector::one(ast::TraitItem {
+ })]),
+ AstFragmentKind::TraitItems => AstFragment::TraitItems(smallvec![ast::TraitItem {
id, span, ident, attrs, generics,
node: ast::TraitItemKind::Macro(mac_placeholder()),
tokens: None,
- })),
- AstFragmentKind::ImplItems => AstFragment::ImplItems(OneVector::one(ast::ImplItem {
+ }]),
+ AstFragmentKind::ImplItems => AstFragment::ImplItems(smallvec![ast::ImplItem {
id, span, ident, vis, attrs, generics,
node: ast::ImplItemKind::Macro(mac_placeholder()),
defaultness: ast::Defaultness::Final,
tokens: None,
- })),
+ }]),
AstFragmentKind::ForeignItems =>
- AstFragment::ForeignItems(OneVector::one(ast::ForeignItem {
+ AstFragment::ForeignItems(smallvec![ast::ForeignItem {
id, span, ident, vis, attrs,
node: ast::ForeignItemKind::Macro(mac_placeholder()),
- })),
+ }]),
AstFragmentKind::Pat => AstFragment::Pat(P(ast::Pat {
id, span, node: ast::PatKind::Mac(mac_placeholder()),
})),
AstFragmentKind::Ty => AstFragment::Ty(P(ast::Ty {
id, span, node: ast::TyKind::Mac(mac_placeholder()),
})),
- AstFragmentKind::Stmts => AstFragment::Stmts(OneVector::one({
+ AstFragmentKind::Stmts => AstFragment::Stmts(smallvec![{
let mac = P((mac_placeholder(), ast::MacStmtStyle::Braces, ThinVec::new()));
ast::Stmt { id, span, node: ast::StmtKind::Mac(mac) }
- })),
+ }]),
}
}
fn fold_item(&mut self, item: P<ast::Item>) -> OneVector<P<ast::Item>> {
match item.node {
ast::ItemKind::Mac(_) => return self.remove(item.id).make_items(),
- ast::ItemKind::MacroDef(_) => return OneVector::one(item),
+ ast::ItemKind::MacroDef(_) => return smallvec![item],
_ => {}
}
Ok(..) => {
// Add this input file to the code map to make it available as
// dependency information, but don't enter it's contents
- cx.source_map().new_source_file(file.into(), "".to_string());
+ cx.source_map().new_source_file(file.into(), String::new());
base::MacEager::expr(cx.expr_lit(sp, ast::LitKind::ByteStr(Lrc::new(bytes))))
}
// A queue of possible matcher positions. We initialize it with the matcher position in which
// the "dot" is before the first token of the first token tree in `ms`. `inner_parse_loop` then
- // processes all of these possible matcher positions and produces posible next positions into
+ // processes all of these possible matcher positions and produces possible next positions into
// `next_items`. After some post-processing, the contents of `next_items` replenish `cur_items`
// and we start over again.
//
// This MatcherPos instance is allocated on the stack. All others -- and
// there are frequently *no* others! -- are allocated on the heap.
let mut initial = initial_matcher_pos(ms, parser.span.lo());
- let mut cur_items = OneVector::one(MatcherPosHandle::Ref(&mut initial));
+ let mut cur_items = smallvec![MatcherPosHandle::Ref(&mut initial)];
let mut next_items = Vec::new();
loop {
),
);
}
- // If there are no posible next positions AND we aren't waiting for the black-box parser,
+ // If there are no possible next positions AND we aren't waiting for the black-box parser,
// then their is a syntax error.
else if bb_items.is_empty() && next_items.is_empty() {
return Failure(parser.span, parser.token);
frag_span: Span) -> bool {
match frag_name {
"item" | "block" | "stmt" | "expr" | "pat" | "lifetime" |
- "path" | "ty" | "ident" | "meta" | "tt" | "" => true,
+ "path" | "ty" | "ident" | "meta" | "tt" | "vis" | "" => true,
"literal" => {
if !features.macro_literal_matcher &&
!attr::contains_name(attrs, "allow_internal_unstable") {
}
true
},
- "vis" => {
- if !features.macro_vis_matcher &&
- !attr::contains_name(attrs, "allow_internal_unstable") {
- let explain = feature_gate::EXPLAIN_VIS_MATCHER;
- emit_feature_err(sess,
- "macro_vis_matcher",
- frag_span,
- GateIssue::Language,
- explain);
- }
- true
- },
_ => false,
}
}
}
// `tree` is followed by an `ident`. This could be `$meta_var` or the `$crate` special
- // metavariable that names the crate of the invokation.
+ // metavariable that names the crate of the invocation.
Some(tokenstream::TokenTree::Token(ident_span, ref token)) if token.is_ident() => {
let (ident, is_raw) = token.ident().unwrap();
let span = ident_span.with_lo(span.lo());
interp: Option<HashMap<Ident, Rc<NamedMatch>>>,
src: Vec<quoted::TokenTree>)
-> TokenStream {
- let mut stack = OneVector::one(Frame::new(src));
+ let mut stack: OneVector<Frame> = smallvec![Frame::new(src)];
let interpolations = interp.unwrap_or_else(HashMap::new); /* just a convenience */
let mut repeats = Vec::new();
let mut result: Vec<TokenStream> = Vec::new();
// Allows comparing raw pointers during const eval
(active, const_compare_raw_pointers, "1.27.0", Some(53020), None),
+ // Allows panicking during const eval (produces compile-time errors)
+ (active, const_panic, "1.30.0", Some(51999), None),
+
// Allows using #[prelude_import] on glob `use` items.
//
// rustc internal
// `extern "x86-interrupt" fn()`
(active, abi_x86_interrupt, "1.17.0", Some(40180), None),
- // Allows the `catch {...}` expression
- (active, catch_expr, "1.17.0", Some(31436), None),
+ // Allows the `try {...}` expression
+ (active, try_blocks, "1.29.0", Some(31436), None),
// Used to preserve symbols (see llvm.used)
(active, used, "1.18.0", Some(40289), None),
// Allows overlapping impls of marker traits
(active, overlapping_marker_traits, "1.18.0", Some(29864), None),
- // Allows use of the :vis macro fragment specifier
- (active, macro_vis_matcher, "1.18.0", Some(41022), None),
-
// rustc internal
(active, abi_thiscall, "1.19.0", None, None),
// `use path as _;` and `extern crate c as _;`
(active, underscore_imports, "1.26.0", Some(48216), None),
- // Allows keywords to be escaped for use as identifiers
- (active, raw_identifiers, "1.26.0", Some(48589), Some(Edition::Edition2018)),
-
// Allows macro invocations in `extern {}` blocks
(active, macros_in_extern, "1.27.0", Some(49476), None),
(active, tbm_target_feature, "1.27.0", Some(44839), None),
(active, wasm_target_feature, "1.30.0", Some(44839), None),
- // Allows macro invocations of the form `#[foo::bar]`
- (active, proc_macro_path_invoc, "1.27.0", Some(38356), None),
-
// Allows macro invocations on modules expressions and statements and
// procedural macros to expand to non-items.
(active, proc_macro_mod, "1.27.0", Some(38356), None),
// Access to crate names passed via `--extern` through prelude
(active, extern_prelude, "1.27.0", Some(44660), Some(Edition::Edition2018)),
- // Scoped attributes
- (active, tool_attributes, "1.25.0", Some(44690), None),
// Scoped lints
(active, tool_lints, "1.28.0", Some(44690), None),
// 'a: { break 'a; }
(active, label_break_value, "1.28.0", Some(48594), None),
+ // Integer match exhaustiveness checking
+ (active, exhaustive_integer_patterns, "1.30.0", Some(50907), None),
+
// #[panic_implementation]
(active, panic_implementation, "1.28.0", Some(44489), None),
(accepted, repr_transparent, "1.28.0", Some(43036), None),
// Defining procedural macros in `proc-macro` crates
(accepted, proc_macro, "1.29.0", Some(38356), None),
+ // Allows use of the :vis macro fragment specifier
+ (accepted, macro_vis_matcher, "1.29.0", Some(41022), None),
// Allows importing and reexporting macros with `use`,
// enables macro modularization in general.
(accepted, use_extern_macros, "1.30.0", Some(35896), None),
+ // Allows keywords to be escaped for use as identifiers
+ (accepted, raw_identifiers, "1.30.0", Some(48589), None),
+ // Attributes scoped to tools
+ (accepted, tool_attributes, "1.30.0", Some(44690), None),
+ // Allows multi-segment paths in attributes and derives
+ (accepted, proc_macro_path_invoc, "1.30.0", Some(38356), None),
);
// If you change this, please modify src/doc/unstable-book as well. You must
pub const EXPLAIN_DERIVE_UNDERSCORE: &'static str =
"attributes of the form `#[derive_*]` are reserved for the compiler";
-pub const EXPLAIN_VIS_MATCHER: &'static str =
- ":vis fragment specifier is experimental and subject to change";
-
pub const EXPLAIN_LITERAL_MATCHER: &'static str =
":literal fragment specifier is experimental and subject to change";
e.span,
"yield syntax is experimental");
}
- ast::ExprKind::Catch(_) => {
- gate_feature_post!(&self, catch_expr, e.span, "`catch` expression is experimental");
+ ast::ExprKind::TryBlock(_) => {
+ gate_feature_post!(&self, try_blocks, e.span, "`try` expression is experimental");
}
ast::ExprKind::IfLet(ref pats, ..) | ast::ExprKind::WhileLet(ref pats, ..) => {
if pats.len() > 1 {
"existential types are unstable"
);
}
-
- ast::ImplItemKind::Type(_) if !ii.generics.params.is_empty() => {
- gate_feature_post!(&self, generic_associated_types, ii.span,
- "generic associated types are unstable");
+ ast::ImplItemKind::Type(_) => {
+ if !ii.generics.params.is_empty() {
+ gate_feature_post!(&self, generic_associated_types, ii.span,
+ "generic associated types are unstable");
+ }
+ if !ii.generics.where_clause.predicates.is_empty() {
+ gate_feature_post!(&self, generic_associated_types, ii.span,
+ "where clauses on associated types are unstable");
+ }
}
_ => {}
}
plugin_attributes,
};
- if !features.raw_identifiers {
- for &span in sess.raw_identifier_spans.borrow().iter() {
- if !span.allows_unstable() {
- gate_feature!(&ctx, raw_identifiers, span,
- "raw identifiers are experimental and subject to change"
- );
- }
- }
- }
-
let visitor = &mut PostExpansionVisitor { context: &ctx };
visitor.whole_crate_feature_gates(krate);
visit::walk_crate(visitor, krate);
use util::move_map::MoveMap;
use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::small_vec::ExpectOne;
pub trait Folder : Sized {
// Any additions to this trait should happen in form
pub fn noop_fold_trait_item<T: Folder>(i: TraitItem, folder: &mut T)
-> OneVector<TraitItem> {
- OneVector::one(TraitItem {
+ smallvec![TraitItem {
id: folder.new_id(i.id),
ident: folder.fold_ident(i.ident),
attrs: fold_attrs(i.attrs, folder),
},
span: folder.new_span(i.span),
tokens: i.tokens,
- })
+ }]
}
pub fn noop_fold_impl_item<T: Folder>(i: ImplItem, folder: &mut T)
-> OneVector<ImplItem> {
- OneVector::one(ImplItem {
+ smallvec![ImplItem {
id: folder.new_id(i.id),
vis: folder.fold_vis(i.vis),
ident: folder.fold_ident(i.ident),
},
span: folder.new_span(i.span),
tokens: i.tokens,
- })
+ }]
}
pub fn noop_fold_fn_header<T: Folder>(mut header: FnHeader, folder: &mut T) -> FnHeader {
// fold one item into possibly many items
pub fn noop_fold_item<T: Folder>(i: P<Item>, folder: &mut T) -> OneVector<P<Item>> {
- OneVector::one(i.map(|i| folder.fold_item_simple(i)))
+ smallvec![i.map(|i| folder.fold_item_simple(i))]
}
// fold one item into exactly one item
pub fn noop_fold_foreign_item<T: Folder>(ni: ForeignItem, folder: &mut T)
-> OneVector<ForeignItem> {
- OneVector::one(folder.fold_foreign_item_simple(ni))
+ smallvec![folder.fold_foreign_item_simple(ni)]
}
pub fn noop_fold_foreign_item_simple<T: Folder>(ni: ForeignItem, folder: &mut T) -> ForeignItem {
}
ExprKind::Yield(ex) => ExprKind::Yield(ex.map(|x| folder.fold_expr(x))),
ExprKind::Try(ex) => ExprKind::Try(folder.fold_expr(ex)),
- ExprKind::Catch(body) => ExprKind::Catch(folder.fold_block(body)),
+ ExprKind::TryBlock(body) => ExprKind::TryBlock(folder.fold_block(body)),
},
id: folder.new_id(id),
span: folder.new_span(span),
pub fn noop_fold_stmt_kind<T: Folder>(node: StmtKind, folder: &mut T) -> OneVector<StmtKind> {
match node {
- StmtKind::Local(local) => OneVector::one(StmtKind::Local(folder.fold_local(local))),
+ StmtKind::Local(local) => smallvec![StmtKind::Local(folder.fold_local(local))],
StmtKind::Item(item) => folder.fold_item(item).into_iter().map(StmtKind::Item).collect(),
StmtKind::Expr(expr) => {
folder.fold_opt_expr(expr).into_iter().map(StmtKind::Expr).collect()
StmtKind::Semi(expr) => {
folder.fold_opt_expr(expr).into_iter().map(StmtKind::Semi).collect()
}
- StmtKind::Mac(mac) => OneVector::one(StmtKind::Mac(mac.map(|(mac, semi, attrs)| {
+ StmtKind::Mac(mac) => smallvec![StmtKind::Mac(mac.map(|(mac, semi, attrs)| {
(folder.fold_mac(mac), semi, fold_attrs(attrs.into(), folder).into())
- }))),
+ }))],
}
}
#![feature(rustc_diagnostic_macros)]
#![feature(slice_sort_by_cached_key)]
#![feature(str_escape)]
+#![feature(try_trait)]
#![feature(unicode_internals)]
-#![feature(catch_expr)]
#![recursion_limit="256"]
extern crate rustc_data_structures;
extern crate rustc_target;
#[macro_use] extern crate scoped_tls;
+#[macro_use]
+extern crate smallvec;
extern crate serialize as rustc_serialize; // used by deriving
ast::ExprKind::WhileLet(..) |
ast::ExprKind::Loop(..) |
ast::ExprKind::ForLoop(..) |
- ast::ExprKind::Catch(..) => false,
+ ast::ExprKind::TryBlock(..) => false,
_ => true,
}
}
if col < len {
(&s[col..len]).to_string()
} else {
- "".to_string()
+ String::new()
}
}
None => s,
// http://www.unicode.org/Public/security/10.0.0/confusables.txt
use syntax_pos::{Span, NO_EXPANSION};
-use errors::DiagnosticBuilder;
+use errors::{Applicability, DiagnosticBuilder};
use super::StringReader;
const UNICODE_ARRAY: &[(char, &str, char)] = &[
let msg =
format!("Unicode character '{}' ({}) looks like '{}' ({}), but it is not",
ch, u_name, ascii_char, ascii_name);
- err.span_suggestion(span, &msg, ascii_char.to_string());
+ err.span_suggestion_with_applicability(
+ span,
+ &msg,
+ ascii_char.to_string(),
+ Applicability::MaybeIncorrect);
true
},
None => {
fn byte_str_lit(lit: &str) -> Lrc<Vec<u8>> {
let mut res = Vec::with_capacity(lit.len());
- let error = |i| format!("lexer should have rejected {} at {}", lit, i);
+ let error = |i| panic!("lexer should have rejected {} at {}", lit, i);
/// Eat everything up to a non-whitespace
fn eat<I: Iterator<Item=(usize, u8)>>(it: &mut iter::Peekable<I>) {
loop {
match chars.next() {
Some((i, b'\\')) => {
- let em = error(i);
- match chars.peek().expect(&em).1 {
+ match chars.peek().unwrap_or_else(|| error(i)).1 {
b'\n' => eat(&mut chars),
b'\r' => {
chars.next();
- if chars.peek().expect(&em).1 != b'\n' {
+ if chars.peek().unwrap_or_else(|| error(i)).1 != b'\n' {
panic!("lexer accepted bare CR");
}
eat(&mut chars);
}
},
Some((i, b'\r')) => {
- let em = error(i);
- if chars.peek().expect(&em).1 != b'\n' {
+ if chars.peek().unwrap_or_else(|| error(i)).1 != b'\n' {
panic!("lexer accepted bare CR");
}
chars.next();
let mut i = tokens.iter();
// This might be a sign we need a connect method on Iterator.
let b = i.next()
- .map_or("".to_string(), |t| t.to_string());
+ .map_or(String::new(), |t| t.to_string());
i.enumerate().fold(b, |mut b, (i, a)| {
if tokens.len() > 2 && i == tokens.len() - 2 {
b.push_str(", or ");
} else {
err.span_label(self.span, "expected identifier");
if self.token == token::Comma && self.look_ahead(1, |t| t.is_ident()) {
- err.span_suggestion(self.span, "remove this comma", "".into());
+ err.span_suggestion(self.span, "remove this comma", String::new());
}
}
err
self.parse_seq_to_before_tokens(&[ket], sep, TokenExpectType::Expect, f)
}
- fn parse_seq_to_before_tokens<T, F>(&mut self,
- kets: &[&token::Token],
- sep: SeqSep,
- expect: TokenExpectType,
- mut f: F)
- -> PResult<'a, Vec<T>>
+ fn parse_seq_to_before_tokens<T, F>(
+ &mut self,
+ kets: &[&token::Token],
+ sep: SeqSep,
+ expect: TokenExpectType,
+ mut f: F,
+ ) -> PResult<'a, Vec<T>>
where F: FnMut(&mut Parser<'a>) -> PResult<'a, T>
{
let mut first: bool = true;
let parser_snapshot_before_pat = self.clone();
+ // Once we can use edition 2018 in the compiler,
+ // replace this with real try blocks.
+ macro_rules! try_block {
+ ($($inside:tt)*) => (
+ (||{ ::std::ops::Try::from_ok({ $($inside)* }) })()
+ )
+ }
+
// We're going to try parsing the argument as a pattern (even though it's not
// allowed). This way we can provide better errors to the user.
- let pat_arg: PResult<'a, _> = do catch {
+ let pat_arg: PResult<'a, _> = try_block! {
let pat = self.parse_pat()?;
self.expect(&token::Colon)?;
(pat, self.parse_ty()?)
TokenExpectType::Expect,
|p| p.parse_ty())?;
self.bump(); // `)`
+ let span = lo.to(self.prev_span);
let output = if self.eat(&token::RArrow) {
Some(self.parse_ty_common(false, false)?)
} else {
None
};
- let span = lo.to(self.prev_span);
ParenthesisedArgs { inputs, output, span }.into()
};
BlockCheckMode::Unsafe(ast::UserProvided),
attrs);
}
- if self.is_catch_expr() {
+ if self.is_do_catch_block() {
+ let mut db = self.fatal("found removed `do catch` syntax");
+ db.help("Following RFC #2388, the new non-placeholder syntax is `try`");
+ return Err(db);
+ }
+ if self.is_try_block() {
let lo = self.span;
- assert!(self.eat_keyword(keywords::Do));
- assert!(self.eat_keyword(keywords::Catch));
- return self.parse_catch_expr(lo, attrs);
+ assert!(self.eat_keyword(keywords::Try));
+ return self.parse_try_block(lo, attrs);
}
if self.eat_keyword(keywords::Return) {
if self.token.can_begin_expr() {
err.span_suggestion_short_with_applicability(
self.span,
"remove this comma",
- "".to_owned(),
+ String::new(),
Applicability::MachineApplicable
);
err.note("the base struct must always be the last field");
ExprKind::Async(capture_clause, ast::DUMMY_NODE_ID, body), attrs))
}
- /// Parse a `do catch {...}` expression (`do catch` token already eaten)
- fn parse_catch_expr(&mut self, span_lo: Span, mut attrs: ThinVec<Attribute>)
+ /// Parse a `try {...}` expression (`try` token already eaten)
+ fn parse_try_block(&mut self, span_lo: Span, mut attrs: ThinVec<Attribute>)
-> PResult<'a, P<Expr>>
{
let (iattrs, body) = self.parse_inner_attrs_and_block()?;
attrs.extend(iattrs);
- Ok(self.mk_expr(span_lo.to(body.span), ExprKind::Catch(body), attrs))
+ Ok(self.mk_expr(span_lo.to(body.span), ExprKind::TryBlock(body), attrs))
}
// `match` token already eaten
e.span_suggestion_short_with_applicability(
match_span,
"try removing this `match`",
- "".to_owned(),
+ String::new(),
Applicability::MaybeIncorrect // speculative
);
}
if arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col
&& expr_lines.lines.len() == 2
&& self.token == token::FatArrow => {
- // We check wether there's any trailing code in the parse span, if there
- // isn't, we very likely have the following:
+ // We check whether there's any trailing code in the parse span,
+ // if there isn't, we very likely have the following:
//
// X | &Y => "y"
// | -- - missing comma
if self.token == token::CloseDelim(token::Brace) {
// If the struct looks otherwise well formed, recover and continue.
if let Some(sp) = comma_sp {
- err.span_suggestion_short(sp, "remove this comma", "".into());
+ err.span_suggestion_short(sp, "remove this comma", String::new());
}
err.emit();
break;
err.multipart_suggestion(
"move the `..` to the end of the field list",
vec![
- (etc_span, "".into()),
+ (etc_span, String::new()),
(self.span, format!("{}.. }}", if ate_comma { "" } else { ", " })),
],
);
}
/// A wrapper around `parse_pat` with some special error handling for the
- /// "top-level" patterns in a match arm, `for` loop, `let`, &c. (in contast
+ /// "top-level" patterns in a match arm, `for` loop, `let`, &c. (in contrast
/// to subpatterns within such).
fn parse_top_level_pat(&mut self) -> PResult<'a, P<Pat>> {
let pat = self.parse_pat()?;
// If `break_on_semi` is `Break`, then we will stop consuming tokens after
// finding (and consuming) a `;` outside of `{}` or `[]` (note that this is
// approximate - it can mean we break too early due to macros, but that
- // shoud only lead to sub-optimal recovery, not inaccurate parsing).
+ // should only lead to sub-optimal recovery, not inaccurate parsing).
//
// If `break_on_block` is `Break`, then we will stop consuming tokens
// after finding (and consuming) a brace-delimited block.
)
}
- fn is_catch_expr(&mut self) -> bool {
+ fn is_do_catch_block(&mut self) -> bool {
self.token.is_keyword(keywords::Do) &&
self.look_ahead(1, |t| t.is_keyword(keywords::Catch)) &&
self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) &&
+ !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
+ }
+
+ fn is_try_block(&mut self) -> bool {
+ self.token.is_keyword(keywords::Try) &&
+ self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) &&
+
+ self.span.edition() >= Edition::Edition2018 &&
- // prevent `while catch {} {}`, `if catch {} {} else {}`, etc.
+ // prevent `while try {} {}`, `if try {} {} else {}`, etc.
!self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
}
fn parse_generic_bounds_common(&mut self, allow_plus: bool) -> PResult<'a, GenericBounds> {
let mut bounds = Vec::new();
loop {
- // This needs to be syncronized with `Token::can_begin_bound`.
+ // This needs to be synchronized with `Token::can_begin_bound`.
let is_bound_start = self.check_path() || self.check_lifetime() ||
self.check(&token::Question) ||
self.check_keyword(keywords::For) ||
if token_str == ";" {
let msg = "consider removing this semicolon";
err.span_suggestion_short_with_applicability(
- self.span, msg, "".to_string(), Applicability::MachineApplicable
+ self.span, msg, String::new(), Applicability::MachineApplicable
);
if !items.is_empty() { // Issue #51603
let previous_item = &items[items.len()-1];
}
/// Parse one of the items allowed by the flags.
- /// NB: this function no longer parses the items inside an
- /// extern crate.
fn parse_item_implementation(
&mut self,
attrs: Vec<Attribute>,
self.print_expr_maybe_paren(e, parser::PREC_POSTFIX)?;
self.s.word("?")?
}
- ast::ExprKind::Catch(ref blk) => {
- self.head("do catch")?;
+ ast::ExprKind::TryBlock(ref blk) => {
+ self.head("try")?;
self.s.space()?;
self.print_block_with_attrs(blk, attrs)?
}
cm.new_source_file(PathBuf::from("blork.rs").into(),
"first line.\nsecond line".to_string());
cm.new_source_file(PathBuf::from("empty.rs").into(),
- "".to_string());
+ String::new());
cm.new_source_file(PathBuf::from("blork2.rs").into(),
"first line.\nsecond line".to_string());
cm
use OneVector;
use symbol::{self, Symbol, keywords};
use ThinVec;
+use rustc_data_structures::small_vec::ExpectOne;
enum ShouldPanic {
No,
if ident.name != keywords::Invalid.name() {
self.cx.path.pop();
}
- OneVector::one(P(item))
+ smallvec![P(item)]
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac { mac }
EntryPointType::OtherMain => folded,
};
- OneVector::one(folded)
+ smallvec![folded]
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac { mac }
Loop,
Match,
Block,
- Catch,
+ TryBlock,
Struct,
Async,
}
ExprPrecedence::Loop |
ExprPrecedence::Match |
ExprPrecedence::Block |
- ExprPrecedence::Catch |
+ ExprPrecedence::TryBlock |
ExprPrecedence::Async |
ExprPrecedence::Struct => PREC_PAREN,
}
ExprKind::Try(ref subexpression) => {
visitor.visit_expr(subexpression)
}
- ExprKind::Catch(ref body) => {
+ ExprKind::TryBlock(ref body) => {
visitor.visit_block(body)
}
}
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
rustc_data_structures = { path = "../librustc_data_structures" }
-rustc_target = { path = "../librustc_target" }
\ No newline at end of file
+rustc_target = { path = "../librustc_target" }
+smallvec = { version = "0.6.5", features = ["union"] }
use syntax::symbol::Symbol;
use syntax::tokenstream;
use syntax_pos::{MultiSpan, Span, DUMMY_SP};
+use errors::Applicability;
use std::borrow::Cow;
use std::collections::hash_map::Entry;
invalid_refs: Vec<(usize, usize)>,
/// Spans of all the formatting arguments, in order.
arg_spans: Vec<Span>,
- /// Wether this formatting string is a literal or it comes from a macro.
+ /// Whether this formatting string is a literal or it comes from a macro.
is_literal: bool,
}
0 => "{}".to_string(),
_ => format!("{}{{}}", "{} ".repeat(args.len())),
};
- err.span_suggestion(
+ err.span_suggestion_with_applicability(
fmt_sp.shrink_to_lo(),
"you might be missing a string literal to format with",
format!("\"{}\", ", sugg_fmt),
+ Applicability::MaybeIncorrect,
);
err.emit();
return DummyResult::raw_expr(sp);
/// LLVM's `module asm "some assembly here"`. All of LLVM's caveats
/// therefore apply.
-use rustc_data_structures::small_vec::OneVector;
-
use syntax::ast;
use syntax::source_map::respan;
use syntax::ext::base;
None => return DummyResult::any(sp),
};
- MacEager::items(OneVector::one(P(ast::Item {
+ MacEager::items(smallvec![P(ast::Item {
ident: ast::Ident::with_empty_ctxt(Symbol::intern("")),
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
vis: respan(sp.shrink_to_lo(), ast::VisibilityKind::Inherited),
span: sp,
tokens: None,
- })))
+ })])
}
extern crate rustc_data_structures;
extern crate rustc_errors as errors;
extern crate rustc_target;
+#[macro_use]
+extern crate smallvec;
mod diagnostics;
#[derive(Clone, Copy, Hash, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)]
pub enum CompilerDesugaringKind {
QuestionMark,
- Catch,
+ TryBlock,
/// Desugaring of an `impl Trait` in return type position
/// to an `existential type Foo: Trait;` + replacing the
/// `impl Trait` with `Foo`.
Symbol::intern(match self {
CompilerDesugaringKind::Async => "async",
CompilerDesugaringKind::QuestionMark => "?",
- CompilerDesugaringKind::Catch => "do catch",
+ CompilerDesugaringKind::TryBlock => "try block",
CompilerDesugaringKind::ExistentialReturnType => "existential type",
CompilerDesugaringKind::ForLoop => "for loop",
})
// Edition-specific keywords reserved for future use.
(51, Async, "async") // >= 2018 Edition Only
+ (52, Try, "try") // >= 2018 Edition Only
// Special lifetime names
- (52, UnderscoreLifetime, "'_")
- (53, StaticLifetime, "'static")
+ (53, UnderscoreLifetime, "'_")
+ (54, StaticLifetime, "'static")
// Weak keywords, have special meaning only in specific contexts.
- (54, Auto, "auto")
- (55, Catch, "catch")
- (56, Default, "default")
- (57, Dyn, "dyn")
- (58, Union, "union")
- (59, Existential, "existential")
+ (55, Auto, "auto")
+ (56, Catch, "catch")
+ (57, Default, "default")
+ (58, Dyn, "dyn")
+ (59, Union, "union")
+ (60, Existential, "existential")
}
impl Symbol {
fn is_unused_keyword_2018(self) -> bool {
- self == keywords::Async.name()
+ self >= keywords::Async.name() &&
+ self <= keywords::Try.name()
}
}
});
// This is safe because the interner keeps string alive until it is dropped.
// We can access it because we know the interner is still alive since we use a
- // scoped thread local to access it, and it was alive at the begining of this scope
+ // scoped thread local to access it, and it was alive at the beginning of this scope
unsafe { f(&*str) }
}
let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
let mbps = if bs.mb_s == 0 {
- "".into()
+ String::new()
} else {
format!(r#", "mib_per_second": {}"#, bs.mb_s)
};
}
#if LLVM_VERSION_GE(7, 0)
- unwrap(Target)->addPassesToEmitFile(*PM, OS, nullptr, FileType, false);
+ buffer_ostream BOS(OS);
+ unwrap(Target)->addPassesToEmitFile(*PM, BOS, nullptr, FileType, false);
#else
unwrap(Target)->addPassesToEmitFile(*PM, OS, FileType, false);
#endif
let _: (char, u32) = Trait::without_default_impl(0);
// Currently, no object code is generated for trait methods with default
- // implemenations, unless they are actually called from somewhere. Therefore
+ // implementations, unless they are actually called from somewhere. Therefore
// we cannot import the implementations and have to create our own inline.
//~ MONO_ITEM fn cgu_export_trait_method::Trait[0]::with_default_impl[0]<u32>
let _ = Trait::with_default_impl(0u32);
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -O
+
+// A drop([...].clone()) sequence on an Rc should be a no-op
+// In particular, no call to __rust_dealloc should be emitted
+#![crate_type = "lib"]
+use std::rc::Rc;
+
+pub fn foo(t: &Rc<Vec<usize>>) {
+// CHECK-NOT: __rust_dealloc
+ drop(t.clone());
+}
#![feature(plugin_registrar, rustc_private)]
#![feature(box_syntax)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![feature(macro_at_most_once_rep)]
#[macro_use] extern crate rustc;
#![feature(plugin_registrar)]
#![feature(box_syntax, rustc_private)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![feature(macro_at_most_once_rep)]
// Load rustc as a plugin to get macros
#![feature(plugin_registrar)]
#![feature(box_syntax, rustc_private)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![feature(macro_at_most_once_rep)]
extern crate syntax;
// except according to those terms.
// This crate attempts to enumerate the various scenarios for how a
-// type can define fields and methods with various visiblities and
+// type can define fields and methods with various visibilities and
// stabilities.
//
// The basic stability pattern in this file has four cases:
//
// However, since stability attributes can only be observed in
// cross-crate linkage scenarios, there is little reason to take the
-// cross-product (4 stability cases * 4 visiblity cases), because the
+// cross-product (4 stability cases * 4 visibility cases), because the
// first three visibility cases cannot be accessed outside this crate,
// and therefore stability is only relevant when the visibility is pub
// to the whole universe.
use proc_macro::*;
-#[proc_macro_attribute]
-pub fn attr2mod(_: TokenStream, _: TokenStream) -> TokenStream {
- "mod test {}".parse().unwrap()
-}
-
#[proc_macro_attribute]
pub fn attr2mac1(_: TokenStream, _: TokenStream) -> TokenStream {
"macro_rules! foo1 { (a) => (a) }".parse().unwrap()
"macro foo2(a) { a }".parse().unwrap()
}
-#[proc_macro]
-pub fn mac2mod(_: TokenStream) -> TokenStream {
- "mod test2 {}".parse().unwrap()
-}
-
#[proc_macro]
pub fn mac2mac1(_: TokenStream) -> TokenStream {
"macro_rules! foo3 { (a) => (a) }".parse().unwrap()
#[proc_macro]
pub fn tricky(_: TokenStream) -> TokenStream {
"fn foo() {
- mod test {}
macro_rules! foo { (a) => (a) }
}".parse().unwrap()
}
use foo::*;
-#[attr2mod]
-//~^ ERROR: cannot expand to modules
-pub fn a() {}
#[attr2mac1]
//~^ ERROR: cannot expand to macro definitions
pub fn a() {}
//~^ ERROR: cannot expand to macro definitions
pub fn a() {}
-mac2mod!(); //~ ERROR: cannot expand to modules
mac2mac1!(); //~ ERROR: cannot expand to macro definitions
mac2mac2!(); //~ ERROR: cannot expand to macro definitions
tricky!();
-//~^ ERROR: cannot expand to modules
-//~| ERROR: cannot expand to macro definitions
+//~^ ERROR: cannot expand to macro definitions
fn main() {}
// aux-build:proc-macro-gates.rs
// gate-test-proc_macro_non_items
-// gate-test-proc_macro_path_invoc
// gate-test-proc_macro_mod line
// gate-test-proc_macro_expr
// gate-test-proc_macro_mod
use foo::*;
-#[foo::a] //~ ERROR: paths of length greater than one
-fn _test() {}
-
fn _test_inner() {
#![a] // OK
}
#[a] //~ ERROR: custom attributes cannot be applied to modules
-//~| ERROR: procedural macros cannot expand to modules
mod _test2 {}
mod _test2_inner {
#![a] //~ ERROR: custom attributes cannot be applied to modules
- //~| ERROR: procedural macros cannot expand to modules
}
#[a = y] //~ ERROR: must only be followed by a delimiter token
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only
+
+fn main() {
+ let _: Option<()> = do catch {};
+ //~^ ERROR found removed `do catch` syntax
+ //~^^ HELP Following RFC #2388, the new non-placeholder syntax is `try`
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only --edition 2018
+
+fn main() {
+ let try = "foo"; //~ error: expected pattern, found reserved keyword `try`
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern:runned an unexported test
+// error-pattern:ran an unexported test
// compile-flags:--test
// check-stdout
#[test]
fn unexported() {
- panic!("runned an unexported test");
+ panic!("ran an unexported test");
}
}
-include ../tools.mk
-# Test that hir-tree output doens't crash and includes
+# Test that hir-tree output doesn't crash and includes
# the string constant we would expect to see.
all:
-include ../tools.mk
-# Test that hir-tree output doens't crash and includes
+# Test that hir-tree output doesn't crash and includes
# the string constant we would expect to see.
all:
LOG := $(TMPDIR)/log.txt
# This test builds a shared object, then an executable that links it as a native
-# rust library (constrast to an rlib). The shared library and executable both
+# rust library (contrast to an rlib). The shared library and executable both
# are compiled with address sanitizer, and we assert that a fault in the cdylib
# is correctly detected.
LOG := $(TMPDIR)/log.txt
# This test builds a shared object, then an executable that links it as a native
-# rust library (constrast to an rlib). The shared library and executable both
+# rust library (contrast to an rlib). The shared library and executable both
# are compiled with address sanitizer, and we assert that a fault in the dylib
# is correctly detected.
fn expand(cx: &mut ExtCtxt, _: syntax_pos::Span, _: &[tokenstream::TokenTree])
-> Box<MacResult+'static> {
- MacEager::items(OneVector::many(vec![
+ MacEager::items(OneVector::from_vec(vec![
quote_item!(cx, struct Struct1;).unwrap(),
quote_item!(cx, struct Struct2;).unwrap()
]))
#![feature(plugin_registrar, rustc_private)]
#![feature(box_syntax)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![feature(macro_at_most_once_rep)]
#[macro_use] extern crate rustc;
// except according to those terms.
#![feature(box_syntax, plugin, plugin_registrar, rustc_private)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![feature(macro_at_most_once_rep)]
#![crate_type = "dylib"]
// aux-build:derive-b.rs
// ignore-stage1
-#![feature(proc_macro_path_invoc, unrestricted_attribute_tokens)]
+#![feature(unrestricted_attribute_tokens)]
extern crate derive_b;
// aux-build:issue-42708.rs
// ignore-stage1
-#![feature(decl_macro, proc_macro_path_invoc)]
+#![feature(decl_macro)]
#![allow(unused)]
extern crate issue_42708;
// aux-build:issue-50061.rs
// ignore-stage1
-#![feature(proc_macro_path_invoc, decl_macro)]
+#![feature(decl_macro)]
extern crate issue_50061;
// edition:2015
-#![feature(raw_identifiers)]
-
#[macro_export]
macro_rules! produces_async {
() => (pub fn async() {})
]
}
- // Tests TyBool
+ // Tests Bool
pub type FooBool = bool;
- // Tests TyChar
+ // Tests Char
pub type FooChar = char;
- // Tests TyInt (does not test all variants of IntTy)
+ // Tests Int (does not test all variants of IntTy)
pub type FooInt = isize;
- // Tests TyUint (does not test all variants of UintTy)
+ // Tests Uint (does not test all variants of UintTy)
pub type FooUint = usize;
- // Tests TyFloat (does not test all variants of FloatTy)
+ // Tests Float (does not test all variants of FloatTy)
pub type FooFloat = f64;
- // Tests TyStr
+ // Tests Str
pub type FooStr = str;
- // Tests TyArray
+ // Tests Array
pub type FooArray = [u8; 1];
- // Tests TySlice
+ // Tests Slice
pub type FooSlice = [u8];
// Tests Box (of u8)
pub type FooBox = Box<u8>;
- // Tests TyRawPtr
+ // Tests RawPtr
pub type FooPtr = *const u8;
- // Tests TyRef
+ // Tests Ref
pub type FooRef = &'static u8;
- // Tests TyFnPtr
+ // Tests FnPtr
pub type FooFnPtr = fn(u8) -> bool;
- // Tests TyDynamic
+ // Tests Dynamic
pub trait FooTrait {
fn foo_method(&self) -> usize;
}
VarB(usize, usize)
}
- // Tests TyTuple
+ // Tests Tuple
pub type FooNil = ();
pub type FooTuple = (u8, i8, bool);
- // Skipping TyParam
+ // Skipping Param
- // Skipping TyInfer
+ // Skipping Infer
- // Skipping TyError
+ // Skipping Error
}
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-struct catch {}
-
-pub fn main() {
- let catch_result: Option<_> = do catch {
- let x = 5;
- x
- };
- assert_eq!(catch_result, Some(5));
-
- let mut catch = true;
- while catch { catch = false; }
- assert_eq!(catch, false);
-
- catch = if catch { false } else { true };
- assert_eq!(catch, true);
-
- match catch {
- _ => {}
- };
-
- let catch_err: Result<_, i32> = do catch {
- Err(22)?;
- 1
- };
- assert_eq!(catch_err, Err(22));
-
- let catch_okay: Result<i32, i32> = do catch {
- if false { Err(25)?; }
- Ok::<(), i32>(())?;
- 28
- };
- assert_eq!(catch_okay, Ok(28));
-
- let catch_from_loop: Result<i32, i32> = do catch {
- for i in 0..10 {
- if i < 5 { Ok::<i32, i32>(i)?; } else { Err(i)?; }
- }
- 22
- };
- assert_eq!(catch_from_loop, Err(5));
-
- let cfg_init;
- let _res: Result<(), ()> = do catch {
- cfg_init = 5;
- };
- assert_eq!(cfg_init, 5);
-
- let cfg_init_2;
- let _res: Result<(), ()> = do catch {
- cfg_init_2 = 6;
- Err(())?;
- };
- assert_eq!(cfg_init_2, 6);
-
- let my_string = "test".to_string();
- let res: Result<&str, ()> = do catch {
- // Unfortunately, deref doesn't fire here (#49356)
- &my_string[..]
- };
- assert_eq!(res, Ok("test"));
-
- let my_opt: Option<_> = do catch { () };
- assert_eq!(my_opt, Some(()));
-
- let my_opt: Option<_> = do catch { };
- assert_eq!(my_opt, Some(()));
-}
// RwLock (since we can grab the child pointers in read-only
// mode), but we cannot lock a std::sync::Mutex to guard reading
// from each node via the same pattern, since once you hit the
- // cycle, you'll be trying to acquring the same lock twice.
+ // cycle, you'll be trying to acquiring the same lock twice.
// (We deal with this by exiting the traversal early if try_lock fails.)
// Cycle 12: { arc0 -> (arc1, arc2), arc1 -> (), arc2 -> arc0 }, refcells
// edition:2015
// aux-build:edition-kw-macro-2015.rs
-#![feature(raw_identifiers)]
-
#[macro_use]
extern crate edition_kw_macro_2015;
// edition:2015
// aux-build:edition-kw-macro-2018.rs
-#![feature(raw_identifiers)]
-
#[macro_use]
extern crate edition_kw_macro_2018;
}
fn test_once() {
- // Make sure each argument are evaluted only once even though it may be
+ // Make sure each argument are evaluated only once even though it may be
// formatted multiple times
fn foo() -> isize {
static mut FOO: isize = 0;
}
match 'c' {
'a'...'z' => {}
- _ => panic!("should suppport char ranges")
+ _ => panic!("should support char ranges")
}
match -3_isize {
-7...5 => {}
// trailing comma on lifetime bounds
type TypeE = TypeA<'static,>;
-// normal type arugment
+// normal type argument
type TypeF<T> = Box<T>;
// type argument with trailing comma
// Issue 33903:
// Built-in indexing should be used even when the index is not
// trivially an integer
-// Only built-in indexing can be used in constant expresssions
+// Only built-in indexing can be used in constant expressions
const FOO: i32 = [12, 34][0 + 1];
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(catch_expr)]
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
fn main() {
let mut a = 0;
let () = {
- let _: Result<(), ()> = do catch {
+ let _: Result<(), ()> = try {
let _ = Err(())?;
return
};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// Test that we are able to reinitilize box with moved referent
+// Test that we are able to reinitialize box with moved referent
#![feature(nll)]
static mut ORDER: [usize; 3] = [0, 0, 0];
static mut INDEX: usize = 0;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
//{{{ issue 40569 ==============================================================
// except according to those terms.
#![allow(dead_code, unused_imports)]
-#![feature(macro_vis_matcher, crate_visibility_modifier)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
+#![feature(crate_visibility_modifier)]
/**
Ensure that `:vis` matches can be captured in existing positions, and passed
}
match 'c' {
'a'..='z' => {}
- _ => panic!("should suppport char ranges")
+ _ => panic!("should support char ranges")
}
match -3 {
-7..=5 => {}
// except according to those terms.
// Regression test for #23698: The reassignment checker only cared
-// about the last assigment in a match arm body
+// about the last assignment in a match arm body
// Use an extra function to make sure no extra assignments
// are introduced by macros in the match statement
let write_len = buf.len();
unsafe {
*self = slice::from_raw_parts_mut(
- self.as_mut_ptr().offset(write_len as isize),
+ self.as_mut_ptr().add(write_len),
self.len() - write_len
);
}
fn main() {
// This can fail if rustc and LLVM disagree on the size of a type.
- // In this case, `Option<Packed<(&(), u32)>>` was erronously not
+ // In this case, `Option<Packed<(&(), u32)>>` was erroneously not
// marked as packed despite needing alignment `1` and containing
// its `&()` discriminant, which has alignment larger than `1`.
sanity_check_size((Some(Packed((&(), 0))), true));
for i in 0..COUNT / 2 {
let (p0, p1, size) = (ascend[2*i], ascend[2*i+1], idx_to_size(i));
for j in 0..size {
- assert_eq!(*p0.offset(j as isize), i as u8);
- assert_eq!(*p1.offset(j as isize), i as u8);
+ assert_eq!(*p0.add(j), i as u8);
+ assert_eq!(*p1.add(j), i as u8);
}
}
}
for i in 0..COUNT / 2 {
let (p0, p1, size) = (ascend[2*i], ascend[2*i+1], idx_to_size(i));
for j in 0..size {
- *p0.offset(j as isize) = i as u8;
- *p1.offset(j as isize) = i as u8;
+ *p0.add(j) = i as u8;
+ *p1.add(j) = i as u8;
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(raw_identifiers)]
-
use std::mem;
#[r#repr(r#C, r#packed)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(raw_identifiers)]
-
fn r#fn(r#match: u32) -> u32 {
r#match
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(raw_identifiers)]
-
#[derive(Debug, PartialEq, Eq)]
struct IntWrapper(u32);
// except according to those terms.
#![feature(decl_macro)]
-#![feature(raw_identifiers)]
r#macro_rules! r#struct {
($r#struct:expr) => { $r#struct }
let args = unsafe {
(0..argc as usize).map(|i| {
- let ptr = *argv.offset(i as isize) as *const _;
+ let ptr = *argv.add(i) as *const _;
CStr::from_ptr(ptr).to_bytes().to_vec()
}).collect::<Vec<_>>()
};
fn main() {
unsafe {
- // Install signal hander that runs on alternate signal stack.
+ // Install signal handler that runs on alternate signal stack.
let mut action: sigaction = std::mem::zeroed();
action.sa_flags = (SA_ONSTACK | SA_SIGINFO) as _;
action.sa_sigaction = signal_handler as sighandler_t;
// Scoped attributes should not trigger an unused attributes lint.
-#![feature(tool_attributes)]
#![deny(unused_attributes)]
fn main() {
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+struct catch {}
+
+pub fn main() {
+ let catch_result: Option<_> = try {
+ let x = 5;
+ x
+ };
+ assert_eq!(catch_result, Some(5));
+
+ let mut catch = true;
+ while catch { catch = false; }
+ assert_eq!(catch, false);
+
+ catch = if catch { false } else { true };
+ assert_eq!(catch, true);
+
+ match catch {
+ _ => {}
+ };
+
+ let catch_err: Result<_, i32> = try {
+ Err(22)?;
+ 1
+ };
+ assert_eq!(catch_err, Err(22));
+
+ let catch_okay: Result<i32, i32> = try {
+ if false { Err(25)?; }
+ Ok::<(), i32>(())?;
+ 28
+ };
+ assert_eq!(catch_okay, Ok(28));
+
+ let catch_from_loop: Result<i32, i32> = try {
+ for i in 0..10 {
+ if i < 5 { Ok::<i32, i32>(i)?; } else { Err(i)?; }
+ }
+ 22
+ };
+ assert_eq!(catch_from_loop, Err(5));
+
+ let cfg_init;
+ let _res: Result<(), ()> = try {
+ cfg_init = 5;
+ };
+ assert_eq!(cfg_init, 5);
+
+ let cfg_init_2;
+ let _res: Result<(), ()> = try {
+ cfg_init_2 = 6;
+ Err(())?;
+ };
+ assert_eq!(cfg_init_2, 6);
+
+ let my_string = "test".to_string();
+ let res: Result<&str, ()> = try {
+ // Unfortunately, deref doesn't fire here (#49356)
+ &my_string[..]
+ };
+ assert_eq!(res, Ok("test"));
+
+ let my_opt: Option<_> = try { () };
+ assert_eq!(my_opt, Some(()));
+
+ let my_opt: Option<_> = try { };
+ assert_eq!(my_opt, Some(()));
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2015
+
+fn main() {
+ let try = 2;
+ struct try { try: u32 };
+ let try: try = try { try };
+ assert_eq!(try.try, 2);
+}
//
// error: internal compiler error: get_unique_type_id_of_type() -
// unexpected type: closure,
-// TyClosure(syntax::ast::DefId{krate: 0, node: 66},
+// Closure(syntax::ast::DefId{krate: 0, node: 66},
// ReScope(63))
//
// This is a regression test for issue #17021.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name = "foo"]
+
+// @has foo/struct.S.html '//h3[@id="impl-Into"]//code' 'impl<T, U> Into for T'
+pub struct S2 {}
+mod m {
+ pub struct S {}
+}
+pub use m::*;
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name = "foo"]
+
+pub trait Foo {}
+pub trait Foo2 {}
+
+pub struct Bar;
+
+impl Foo for Bar {}
+impl Foo2 for Bar {}
+
+// @!has foo/fn.foo.html '//section[@id="main"]//pre' "x: &\'x impl Foo"
+// @!has foo/fn.foo.html '//section[@id="main"]//pre' "-> &\'x impl Foo {"
+pub fn foo<'x>(x: &'x impl Foo) -> &'x impl Foo {
+ x
+}
+
+// @!has foo/fn.foo2.html '//section[@id="main"]//pre' "x: &\'x impl Foo"
+// @!has foo/fn.foo2.html '//section[@id="main"]//pre' '-> impl Foo2 {'
+pub fn foo2<'x>(_x: &'x impl Foo) -> impl Foo2 {
+ Bar
+}
+
+// @!has foo/fn.foo_foo.html '//section[@id="main"]//pre' '-> impl Foo + Foo2 {'
+pub fn foo_foo() -> impl Foo + Foo2 {
+ Bar
+}
+
+// @!has foo/fn.foo2.html '//section[@id="main"]//pre' "x: &'x (impl Foo + Foo2)"
+pub fn foo_foo_foo<'x>(_x: &'x (impl Foo + Foo2)) {
+}
#![crate_name = "qwop"]
-/// (writen on a spider's web) Some Macro
+/// (written on a spider's web) Some Macro
#[macro_export]
macro_rules! some_macro {
() => {
#![feature(plugin_registrar)]
#![feature(box_syntax, rustc_private)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![feature(macro_at_most_once_rep)]
// Load rustc as a plugin to get macros
#![feature(plugin_registrar)]
#![feature(box_syntax, rustc_private)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![feature(macro_at_most_once_rep)]
extern crate syntax;
#![feature(plugin_registrar)]
#![feature(box_syntax, rustc_private)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![feature(macro_at_most_once_rep)]
extern crate syntax;
// aux-build:generate-mod.rs
-#![feature(proc_macro_gen, proc_macro_path_invoc)]
-
extern crate generate_mod;
struct FromOutside;
//~| WARN this was previously accepted
struct Z;
+fn inner_block() {
+ #[derive(generate_mod::CheckDerive)] //~ WARN cannot find type `FromOutside` in this scope
+ //~| WARN cannot find type `OuterDerive` in this scope
+ //~| WARN this was previously accepted
+ //~| WARN this was previously accepted
+ struct InnerZ;
+}
+
#[derive(generate_mod::CheckDeriveLint)] // OK, lint is suppressed
struct W;
error[E0412]: cannot find type `FromOutside` in this scope
- --> $DIR/generate-mod.rs:21:1
+ --> $DIR/generate-mod.rs:19:1
|
LL | generate_mod::check!(); //~ ERROR cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^ not found in this scope
error[E0412]: cannot find type `Outer` in this scope
- --> $DIR/generate-mod.rs:21:1
+ --> $DIR/generate-mod.rs:19:1
|
LL | generate_mod::check!(); //~ ERROR cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^ not found in this scope
error[E0412]: cannot find type `FromOutside` in this scope
- --> $DIR/generate-mod.rs:24:1
+ --> $DIR/generate-mod.rs:22:1
|
LL | #[generate_mod::check_attr] //~ ERROR cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^ not found in this scope
error[E0412]: cannot find type `OuterAttr` in this scope
- --> $DIR/generate-mod.rs:24:1
+ --> $DIR/generate-mod.rs:22:1
|
LL | #[generate_mod::check_attr] //~ ERROR cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^ not found in this scope
warning: cannot find type `FromOutside` in this scope
- --> $DIR/generate-mod.rs:28:10
+ --> $DIR/generate-mod.rs:26:10
|
LL | #[derive(generate_mod::CheckDerive)] //~ WARN cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
= note: for more information, see issue #50504 <https://github.com/rust-lang/rust/issues/50504>
warning: cannot find type `OuterDerive` in this scope
- --> $DIR/generate-mod.rs:28:10
+ --> $DIR/generate-mod.rs:26:10
|
LL | #[derive(generate_mod::CheckDerive)] //~ WARN cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #50504 <https://github.com/rust-lang/rust/issues/50504>
+warning: cannot find type `FromOutside` in this scope
+ --> $DIR/generate-mod.rs:33:14
+ |
+LL | #[derive(generate_mod::CheckDerive)] //~ WARN cannot find type `FromOutside` in this scope
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #50504 <https://github.com/rust-lang/rust/issues/50504>
+
+warning: cannot find type `OuterDerive` in this scope
+ --> $DIR/generate-mod.rs:33:14
+ |
+LL | #[derive(generate_mod::CheckDerive)] //~ WARN cannot find type `FromOutside` in this scope
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #50504 <https://github.com/rust-lang/rust/issues/50504>
+
error: aborting due to 4 previous errors
For more information about this error, try `rustc --explain E0412`.
--> $DIR/macro-namespace-reserved-2.rs:34:5
|
LL | my_macro!(); //~ ERROR can't use a procedural macro from the same crate that defines it
- | ^^^^^^^^^^^^
+ | ^^^^^^^^
error: can't use a procedural macro from the same crate that defines it
--> $DIR/macro-namespace-reserved-2.rs:37:5
|
LL | my_macro_attr!(); //~ ERROR can't use a procedural macro from the same crate that defines it
- | ^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^^^^^
error: can't use a procedural macro from the same crate that defines it
--> $DIR/macro-namespace-reserved-2.rs:40:5
|
LL | MyTrait!(); //~ ERROR can't use a procedural macro from the same crate that defines it
- | ^^^^^^^^^^^
+ | ^^^^^^^
error: can't use a procedural macro from the same crate that defines it
- --> $DIR/macro-namespace-reserved-2.rs:43:1
+ --> $DIR/macro-namespace-reserved-2.rs:43:3
|
LL | #[my_macro] //~ ERROR can't use a procedural macro from the same crate that defines it
- | ^^^^^^^^^^^
+ | ^^^^^^^^
error: can't use a procedural macro from the same crate that defines it
- --> $DIR/macro-namespace-reserved-2.rs:45:1
+ --> $DIR/macro-namespace-reserved-2.rs:45:3
|
LL | #[my_macro_attr] //~ ERROR can't use a procedural macro from the same crate that defines it
- | ^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^^^^^
error: can't use a procedural macro from the same crate that defines it
- --> $DIR/macro-namespace-reserved-2.rs:47:1
+ --> $DIR/macro-namespace-reserved-2.rs:47:3
|
LL | #[MyTrait] //~ ERROR can't use a procedural macro from the same crate that defines it
- | ^^^^^^^^^^
+ | ^^^^^^^
error: can't use a procedural macro from the same crate that defines it
--> $DIR/macro-namespace-reserved-2.rs:50:10
// compile-pass
-#![feature(raw_identifiers)]
-//~^ WARN the feature `raw_identifiers` is included in the Rust 2018 edition
+#![feature(impl_header_lifetime_elision)]
+//~^ WARN the feature `impl_header_lifetime_elision` is included in the Rust 2018 edition
#![feature(rust_2018_preview)]
-fn main() {
- let foo = 0;
- let bar = r#foo;
-}
+fn main() {}
-warning[E0705]: the feature `raw_identifiers` is included in the Rust 2018 edition
+warning[E0705]: the feature `impl_header_lifetime_elision` is included in the Rust 2018 edition
--> $DIR/E0705.rs:13:12
|
-LL | #![feature(raw_identifiers)]
- | ^^^^^^^^^^^^^^^
+LL | #![feature(impl_header_lifetime_elision)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// Check that the user gets an errror if they omit a binding from an
+// Check that the user gets an error if they omit a binding from an
// object type.
pub trait Foo {
fn foo<'a>() {
let _ = S::new::<isize,f64>(1, 1.0);
- //~^ ERROR too many type parameters provided
+ //~^ ERROR wrong number of type arguments
let _ = S::<'a,isize>::new::<f64>(1, 1.0);
- //~^ ERROR wrong number of lifetime parameters
+ //~^ ERROR wrong number of lifetime arguments
let _: S2 = Trait::new::<isize,f64>(1, 1.0);
- //~^ ERROR too many type parameters provided
+ //~^ ERROR wrong number of type arguments
let _: S2 = Trait::<'a,isize>::new::<f64>(1, 1.0);
- //~^ ERROR too many lifetime parameters provided
+ //~^ ERROR wrong number of lifetime arguments
}
fn main() {}
-error[E0087]: too many type parameters provided: expected at most 1 type parameter, found 2 type parameters
+error[E0087]: wrong number of type arguments: expected 1, found 2
--> $DIR/bad-mid-path-type-params.rs:40:28
|
LL | let _ = S::new::<isize,f64>(1, 1.0);
- | ^^^ expected 1 type parameter
+ | ^^^ unexpected type argument
-error[E0107]: wrong number of lifetime parameters: expected 0, found 1
- --> $DIR/bad-mid-path-type-params.rs:43:13
+error[E0107]: wrong number of lifetime arguments: expected 0, found 1
+ --> $DIR/bad-mid-path-type-params.rs:43:17
|
LL | let _ = S::<'a,isize>::new::<f64>(1, 1.0);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^ unexpected lifetime parameter
+ | ^^ unexpected lifetime argument
-error[E0087]: too many type parameters provided: expected at most 1 type parameter, found 2 type parameters
+error[E0087]: wrong number of type arguments: expected 1, found 2
--> $DIR/bad-mid-path-type-params.rs:46:36
|
LL | let _: S2 = Trait::new::<isize,f64>(1, 1.0);
- | ^^^ expected 1 type parameter
+ | ^^^ unexpected type argument
-error[E0088]: too many lifetime parameters provided: expected at most 0 lifetime parameters, found 1 lifetime parameter
+error[E0088]: wrong number of lifetime arguments: expected 0, found 1
--> $DIR/bad-mid-path-type-params.rs:49:25
|
LL | let _: S2 = Trait::<'a,isize>::new::<f64>(1, 1.0);
- | ^^ expected 0 lifetime parameters
+ | ^^ unexpected lifetime argument
error: aborting due to 4 previous errors
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-pass
+
+// Bastion of the Turbofish
+// ------------------------
+// Beware travellers, lest you venture into waters callous and unforgiving,
+// where hope must be abandoned, ere it is cruelly torn from you. For here
+// stands the bastion of the Turbofish: an impenetrable fortress holding
+// unshaking against those who would dare suggest the supererogation of the
+// Turbofish.
+//
+// Once I was young and foolish and had the impudence to imagine that I could
+// shake free from the coils by which that creature had us tightly bound. I
+// dared to suggest that there was a better way: a brighter future, in which
+// Rustaceans both new and old could be rid of that vile beast. But alas! In
+// my foolhardiness my ignorance was unveiled and my dreams were dashed
+// unforgivingly against the rock of syntactic ambiguity.
+//
+// This humble program, small and insignificant though it might seem,
+// demonstrates that to which we had previously cast a blind eye: an ambiguity
+// in permitting generic arguments to be provided without the consent of the
+// Great Turbofish. Should you be so naïve as to try to revolt against its
+// mighty clutches, here shall its wrath be indomitably displayed. This
+// program must pass for all eternity, fundamentally at odds with an impetuous
+// rebellion against the Turbofish.
+//
+// My heart aches in sorrow, for I know I am defeated. Let this be a warning
+// to all those who come after. Here stands the bastion of the Turbofish.
+
+// See https://github.com/rust-lang/rust/pull/53562
+// and https://github.com/rust-lang/rfcs/pull/2527
+// for context.
+
+fn main() {
+ let (oh, woe, is, me) = ("the", "Turbofish", "remains", "undefeated");
+ let _: (bool, bool) = (oh<woe, is>(me));
+}
// revisions: ast migrate nll
// Since we are testing nll (and migration) explicitly as a separate
-// revisions, dont worry about the --compare-mode=nll on this test.
+// revisions, don't worry about the --compare-mode=nll on this test.
// ignore-compare-mode-nll
+++ /dev/null
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-bad-lifetime.rs:33:13
- |
-LL | let k = &mut i;
- | ------ borrow of `i` occurs here
-...
-LL | i = 10; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^^ assignment to borrowed `i` occurs here
-LL | };
-LL | ::std::mem::drop(k); //~ ERROR use of moved value: `k`
- | - borrow later used here
-
-error[E0382]: use of moved value: `k`
- --> $DIR/catch-bad-lifetime.rs:35:26
- |
-LL | Err(k) ?;
- | - value moved here
-...
-LL | ::std::mem::drop(k); //~ ERROR use of moved value: `k`
- | ^ value used here after move
- |
- = note: move occurs because `k` has type `&mut i32`, which does not implement the `Copy` trait
-
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-bad-lifetime.rs:36:9
- |
-LL | let k = &mut i;
- | ------ borrow of `i` occurs here
-...
-LL | i = 40; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^^ assignment to borrowed `i` occurs here
-LL |
-LL | let i_ptr = if let Err(i_ptr) = j { i_ptr } else { panic ! ("") };
- | - borrow later used here
-
-error: aborting due to 3 previous errors
-
-Some errors occurred: E0382, E0506.
-For more information about an error, try `rustc --explain E0382`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-// This test checks that borrows made and returned inside catch blocks are properly constrained
-pub fn main() {
- {
- // Test that borrows returned from a catch block must be valid for the lifetime of the
- // result variable
- let _result: Result<(), &str> = do catch {
- let my_string = String::from("");
- let my_str: & str = & my_string;
- //~^ ERROR `my_string` does not live long enough
- Err(my_str) ?;
- Err("") ?;
- };
- }
-
- {
- // Test that borrows returned from catch blocks freeze their referent
- let mut i = 5;
- let k = &mut i;
- let mut j: Result<(), &mut i32> = do catch {
- Err(k) ?;
- i = 10; //~ ERROR cannot assign to `i` because it is borrowed
- };
- ::std::mem::drop(k); //~ ERROR use of moved value: `k`
- i = 40; //~ ERROR cannot assign to `i` because it is borrowed
-
- let i_ptr = if let Err(i_ptr) = j { i_ptr } else { panic ! ("") };
- *i_ptr = 50;
- }
-}
-
+++ /dev/null
-error[E0597]: `my_string` does not live long enough
- --> $DIR/catch-bad-lifetime.rs:20:35
- |
-LL | let my_str: & str = & my_string;
- | ^^^^^^^^^ borrowed value does not live long enough
-...
-LL | };
- | - `my_string` dropped here while still borrowed
-LL | }
- | - borrowed value needs to live until here
-
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-bad-lifetime.rs:33:13
- |
-LL | let k = &mut i;
- | - borrow of `i` occurs here
-...
-LL | i = 10; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^^ assignment to borrowed `i` occurs here
-
-error[E0382]: use of moved value: `k`
- --> $DIR/catch-bad-lifetime.rs:35:26
- |
-LL | Err(k) ?;
- | - value moved here
-...
-LL | ::std::mem::drop(k); //~ ERROR use of moved value: `k`
- | ^ value used here after move
- |
- = note: move occurs because `k` has type `&mut i32`, which does not implement the `Copy` trait
-
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-bad-lifetime.rs:36:9
- |
-LL | let k = &mut i;
- | - borrow of `i` occurs here
-...
-LL | i = 40; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^^ assignment to borrowed `i` occurs here
-
-error: aborting due to 4 previous errors
-
-Some errors occurred: E0382, E0506, E0597.
-For more information about an error, try `rustc --explain E0382`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-pub fn main() {
- let res: Result<u32, i32> = do catch {
- Err("")?; //~ ERROR the trait bound `i32: std::convert::From<&str>` is not satisfied
- 5
- };
-
- let res: Result<i32, i32> = do catch {
- "" //~ ERROR type mismatch
- };
-
- let res: Result<i32, i32> = do catch { }; //~ ERROR type mismatch
-
- let res: () = do catch { }; //~ the trait bound `(): std::ops::Try` is not satisfied
-
- let res: i32 = do catch { 5 }; //~ ERROR the trait bound `i32: std::ops::Try` is not satisfied
-}
+++ /dev/null
-error[E0277]: the trait bound `i32: std::convert::From<&str>` is not satisfied
- --> $DIR/catch-bad-type.rs:15:9
- |
-LL | Err("")?; //~ ERROR the trait bound `i32: std::convert::From<&str>` is not satisfied
- | ^^^^^^^^ the trait `std::convert::From<&str>` is not implemented for `i32`
- |
- = help: the following implementations were found:
- <i32 as std::convert::From<bool>>
- <i32 as std::convert::From<i16>>
- <i32 as std::convert::From<i8>>
- <i32 as std::convert::From<u16>>
- <i32 as std::convert::From<u8>>
- = note: required by `std::convert::From::from`
-
-error[E0271]: type mismatch resolving `<std::result::Result<i32, i32> as std::ops::Try>::Ok == &str`
- --> $DIR/catch-bad-type.rs:20:9
- |
-LL | "" //~ ERROR type mismatch
- | ^^ expected i32, found &str
- |
- = note: expected type `i32`
- found type `&str`
-
-error[E0271]: type mismatch resolving `<std::result::Result<i32, i32> as std::ops::Try>::Ok == ()`
- --> $DIR/catch-bad-type.rs:23:44
- |
-LL | let res: Result<i32, i32> = do catch { }; //~ ERROR type mismatch
- | ^ expected i32, found ()
- |
- = note: expected type `i32`
- found type `()`
-
-error[E0277]: the trait bound `(): std::ops::Try` is not satisfied
- --> $DIR/catch-bad-type.rs:25:28
- |
-LL | let res: () = do catch { }; //~ the trait bound `(): std::ops::Try` is not satisfied
- | ^^^ the trait `std::ops::Try` is not implemented for `()`
- |
- = note: required by `std::ops::Try::from_ok`
-
-error[E0277]: the trait bound `i32: std::ops::Try` is not satisfied
- --> $DIR/catch-bad-type.rs:27:29
- |
-LL | let res: i32 = do catch { 5 }; //~ ERROR the trait bound `i32: std::ops::Try` is not satisfied
- | ^^^^^ the trait `std::ops::Try` is not implemented for `i32`
- |
- = note: required by `std::ops::Try::from_ok`
-
-error: aborting due to 5 previous errors
-
-Some errors occurred: E0271, E0277.
-For more information about an error, try `rustc --explain E0271`.
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-fn foo() -> Option<()> { Some(()) }
-
-fn main() {
- let _: Option<f32> = do catch {
- foo()?;
- 42
- //~^ ERROR type mismatch
- };
-
- let _: Option<i32> = do catch {
- foo()?;
- };
- //~^ ERROR type mismatch
-}
+++ /dev/null
-error[E0271]: type mismatch resolving `<std::option::Option<f32> as std::ops::Try>::Ok == {integer}`
- --> $DIR/catch-block-type-error.rs:18:9
- |
-LL | 42
- | ^^
- | |
- | expected f32, found integral variable
- | help: use a float literal: `42.0`
- |
- = note: expected type `f32`
- found type `{integer}`
-
-error[E0271]: type mismatch resolving `<std::option::Option<i32> as std::ops::Try>::Ok == ()`
- --> $DIR/catch-block-type-error.rs:24:5
- |
-LL | };
- | ^ expected i32, found ()
- |
- = note: expected type `i32`
- found type `()`
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0271`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-fn main() {
- match do catch { false } { _ => {} } //~ ERROR expected expression, found reserved keyword `do`
-}
+++ /dev/null
-error: expected expression, found reserved keyword `do`
- --> $DIR/catch-in-match.rs:14:11
- |
-LL | match do catch { false } { _ => {} } //~ ERROR expected expression, found reserved keyword `do`
- | ^^ expected expression
-
-error: aborting due to previous error
-
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-fn main() {
- while do catch { false } {} //~ ERROR expected expression, found reserved keyword `do`
-}
+++ /dev/null
-error: expected expression, found reserved keyword `do`
- --> $DIR/catch-in-while.rs:14:11
- |
-LL | while do catch { false } {} //~ ERROR expected expression, found reserved keyword `do`
- | ^^ expected expression
-
-error: aborting due to previous error
-
+++ /dev/null
-error[E0382]: borrow of moved value: `x`
- --> $DIR/catch-maybe-bad-lifetime.rs:33:24
- |
-LL | ::std::mem::drop(x);
- | - value moved here
-LL | };
-LL | println!("{}", x); //~ ERROR use of moved value: `x`
- | ^ value borrowed here after move
- |
- = note: move occurs because `x` has type `std::string::String`, which does not implement the `Copy` trait
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0382`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-// This test checks that borrows made and returned inside catch blocks are properly constrained
-pub fn main() {
- {
- // Test that a borrow which *might* be returned still freezes its referent
- let mut i = 222;
- let x: Result<&i32, ()> = do catch {
- Err(())?;
- &i
- };
- x.ok().cloned();
- i = 0; //~ ERROR cannot assign to `i` because it is borrowed
- let _ = i;
- }
-
- {
- let x = String::new();
- let _y: Result<(), ()> = do catch {
- Err(())?;
- ::std::mem::drop(x);
- };
- println!("{}", x); //~ ERROR use of moved value: `x`
- }
-
- {
- // Test that a borrow which *might* be assigned to an outer variable still freezes
- // its referent
- let mut i = 222;
- let j;
- let x: Result<(), ()> = do catch {
- Err(())?;
- j = &i;
- };
- i = 0; //~ ERROR cannot assign to `i` because it is borrowed
- let _ = i;
- }
-}
-
+++ /dev/null
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-maybe-bad-lifetime.rs:23:9
- |
-LL | &i
- | - borrow of `i` occurs here
-...
-LL | i = 0; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^ assignment to borrowed `i` occurs here
-
-error[E0382]: use of moved value: `x`
- --> $DIR/catch-maybe-bad-lifetime.rs:33:24
- |
-LL | ::std::mem::drop(x);
- | - value moved here
-LL | };
-LL | println!("{}", x); //~ ERROR use of moved value: `x`
- | ^ value used here after move
- |
- = note: move occurs because `x` has type `std::string::String`, which does not implement the `Copy` trait
-
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-maybe-bad-lifetime.rs:45:9
- |
-LL | j = &i;
- | - borrow of `i` occurs here
-LL | };
-LL | i = 0; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^ assignment to borrowed `i` occurs here
-
-error: aborting due to 3 previous errors
-
-Some errors occurred: E0382, E0506.
-For more information about an error, try `rustc --explain E0382`.
+++ /dev/null
-error[E0381]: borrow of possibly uninitialized variable: `cfg_res`
- --> $DIR/catch-opt-init.rs:23:5
- |
-LL | assert_eq!(cfg_res, 5); //~ ERROR use of possibly uninitialized variable
- | ^^^^^^^^^^^^^^^^^^^^^^^ use of possibly uninitialized `cfg_res`
- |
- = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0381`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-fn use_val<T: Sized>(_x: T) {}
-
-pub fn main() {
- let cfg_res;
- let _: Result<(), ()> = do catch {
- Err(())?;
- cfg_res = 5;
- Ok::<(), ()>(())?;
- use_val(cfg_res);
- };
- assert_eq!(cfg_res, 5); //~ ERROR use of possibly uninitialized variable
-}
-
+++ /dev/null
-error[E0381]: use of possibly uninitialized variable: `cfg_res`
- --> $DIR/catch-opt-init.rs:23:16
- |
-LL | assert_eq!(cfg_res, 5); //~ ERROR use of possibly uninitialized variable
- | ^^^^^^^ use of possibly uninitialized `cfg_res`
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0381`.
--> $DIR/cfg-attr-unknown-attribute-macro-expansion.rs:13:27
|
LL | #[cfg_attr(all(), unknown)] //~ ERROR `unknown` is currently unknown
- | ^^^^^^^^
+ | ^^^^^^^
...
LL | foo!();
| ------- in this macro invocation
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let x = Some(1);
+ let y = x.or_else(4);
+ //~^ ERROR expected a `std::ops::FnOnce<()>` closure, found `{integer}`
+}
--- /dev/null
+error[E0277]: expected a `std::ops::FnOnce<()>` closure, found `{integer}`
+ --> $DIR/closure-expected.rs:13:15
+ |
+LL | let y = x.or_else(4);
+ | ^^^^^^^ expected an `FnOnce<()>` closure, found `{integer}`
+ |
+ = help: the trait `std::ops::FnOnce<()>` is not implemented for `{integer}`
+ = note: wrap the `{integer}` in a closure with no arguments: `|| { /* code */ }
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0277`.
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-pass
-
-macro_rules! m {
- () => {{
- fn f(_: impl Sized) {}
- f
- }}
-}
-
-fn main() {
- fn f() -> impl Sized {};
- m!()(f());
-}
fn main() {
S(&0, &0); // OK
S::<'static>(&0, &0);
- //~^ ERROR expected 2 lifetime parameters, found 1 lifetime parameter
+ //~^ ERROR wrong number of lifetime arguments: expected 2, found 1
S::<'static, 'static, 'static>(&0, &0);
- //~^ ERROR expected at most 2 lifetime parameters, found 3 lifetime parameters
+ //~^ ERROR wrong number of lifetime arguments: expected 2, found 3
E::V(&0); // OK
E::V::<'static>(&0);
- //~^ ERROR expected 2 lifetime parameters, found 1 lifetime parameter
+ //~^ ERROR wrong number of lifetime arguments: expected 2, found 1
E::V::<'static, 'static, 'static>(&0);
- //~^ ERROR expected at most 2 lifetime parameters, found 3 lifetime parameters
+ //~^ ERROR wrong number of lifetime arguments: expected 2, found 3
}
-error[E0090]: too few lifetime parameters provided: expected 2 lifetime parameters, found 1 lifetime parameter
+error[E0090]: wrong number of lifetime arguments: expected 2, found 1
--> $DIR/constructor-lifetime-args.rs:27:5
|
LL | S::<'static>(&0, &0);
- | ^^^^^^^^^^^^ expected 2 lifetime parameters
+ | ^^^^^^^^^^^^ expected 2 lifetime arguments
-error[E0088]: too many lifetime parameters provided: expected at most 2 lifetime parameters, found 3 lifetime parameters
+error[E0088]: wrong number of lifetime arguments: expected 2, found 3
--> $DIR/constructor-lifetime-args.rs:29:27
|
LL | S::<'static, 'static, 'static>(&0, &0);
- | ^^^^^^^ expected 2 lifetime parameters
+ | ^^^^^^^ unexpected lifetime argument
-error[E0090]: too few lifetime parameters provided: expected 2 lifetime parameters, found 1 lifetime parameter
+error[E0090]: wrong number of lifetime arguments: expected 2, found 1
--> $DIR/constructor-lifetime-args.rs:32:5
|
LL | E::V::<'static>(&0);
- | ^^^^^^^^^^^^^^^ expected 2 lifetime parameters
+ | ^^^^^^^^^^^^^^^ expected 2 lifetime arguments
-error[E0088]: too many lifetime parameters provided: expected at most 2 lifetime parameters, found 3 lifetime parameters
+error[E0088]: wrong number of lifetime arguments: expected 2, found 3
--> $DIR/constructor-lifetime-args.rs:34:30
|
LL | E::V::<'static, 'static, 'static>(&0);
- | ^^^^^^^ expected 2 lifetime parameters
+ | ^^^^^^^ unexpected lifetime argument
error: aborting due to 4 previous errors
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(const_panic)]
+
+fn main() {}
+
+const Z: () = panic!("cheese");
+//~^ ERROR this constant cannot be used
+
+const Y: () = unreachable!();
+//~^ ERROR this constant cannot be used
+
+const X: () = unimplemented!();
+//~^ ERROR this constant cannot be used
--- /dev/null
+error: this constant cannot be used
+ --> $DIR/const_panic.rs:15:1
+ |
+LL | const Z: () = panic!("cheese");
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'cheese', $DIR/const_panic.rs:15:15
+ |
+ = note: #[deny(const_err)] on by default
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic.rs:18:1
+ |
+LL | const Y: () = unreachable!();
+ | ^^^^^^^^^^^^^^--------------^
+ | |
+ | the evaluated program panicked at 'internal error: entered unreachable code', $DIR/const_panic.rs:18:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic.rs:21:1
+ |
+LL | const X: () = unimplemented!();
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'not yet implemented', $DIR/const_panic.rs:21:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: aborting due to 3 previous errors
+
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![no_std]
+#![crate_type = "lib"]
+#![feature(const_panic)]
+
+const Z: () = panic!("cheese");
+//~^ ERROR this constant cannot be used
+
+const Y: () = unreachable!();
+//~^ ERROR this constant cannot be used
+
+const X: () = unimplemented!();
+//~^ ERROR this constant cannot be used
--- /dev/null
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore.rs:15:1
+ |
+LL | const Z: () = panic!("cheese");
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'cheese', $DIR/const_panic_libcore.rs:15:15
+ |
+ = note: #[deny(const_err)] on by default
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore.rs:18:1
+ |
+LL | const Y: () = unreachable!();
+ | ^^^^^^^^^^^^^^--------------^
+ | |
+ | the evaluated program panicked at 'internal error: entered unreachable code', $DIR/const_panic_libcore.rs:18:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore.rs:21:1
+ |
+LL | const X: () = unimplemented!();
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'not yet implemented', $DIR/const_panic_libcore.rs:21:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: aborting due to 3 previous errors
+
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "bin"]
+#![feature(lang_items)]
+#![feature(panic_implementation)]
+#![feature(const_panic)]
+#![no_main]
+#![no_std]
+
+use core::panic::PanicInfo;
+
+const Z: () = panic!("cheese");
+//~^ ERROR this constant cannot be used
+
+const Y: () = unreachable!();
+//~^ ERROR this constant cannot be used
+
+const X: () = unimplemented!();
+//~^ ERROR this constant cannot be used
+
+#[lang = "eh_personality"]
+fn eh() {}
+#[lang = "eh_unwind_resume"]
+fn eh_unwind_resume() {}
+
+#[panic_implementation]
+fn panic(_info: &PanicInfo) -> ! {
+ loop {}
+}
--- /dev/null
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore_main.rs:20:1
+ |
+LL | const Z: () = panic!("cheese");
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'cheese', $DIR/const_panic_libcore_main.rs:20:15
+ |
+ = note: #[deny(const_err)] on by default
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore_main.rs:23:1
+ |
+LL | const Y: () = unreachable!();
+ | ^^^^^^^^^^^^^^--------------^
+ | |
+ | the evaluated program panicked at 'internal error: entered unreachable code', $DIR/const_panic_libcore_main.rs:23:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore_main.rs:26:1
+ |
+LL | const X: () = unimplemented!();
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'not yet implemented', $DIR/const_panic_libcore_main.rs:26:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: aborting due to 3 previous errors
+
LL | const Z2: i32 = unsafe { *(42 as *const i32) }; //~ ERROR cannot be used
| ^^^^^^^^^^^^^^^^^^^^^^^^^-------------------^^^
| |
- | tried to access memory with alignment 2, but alignment 4 is required
+ | a memory access tried to interpret some bytes as a pointer
error: this constant cannot be used
--> $DIR/const_raw_ptr_ops.rs:27:1
LL | | Union { usize: &BAR }.foo,
LL | | Union { usize: &BAR }.bar,
LL | | )};
- | |___^ type validation failed: encountered 5 at (*.1).TAG, but expected something in the range 42..=99
+ | |___^ type validation failed: encountered 5 at .1.<deref>.<enum-tag>, but expected something in the range 42..=99
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {}
+
+const Z: () = panic!("cheese");
+//~^ ERROR panicking in constants is unstable
+
+const Y: () = unreachable!();
+//~^ ERROR panicking in constants is unstable
+
+const X: () = unimplemented!();
+//~^ ERROR panicking in constants is unstable
--- /dev/null
+error[E0658]: panicking in constants is unstable (see issue #51999)
+ --> $DIR/feature-gate-const_panic.rs:13:15
+ |
+LL | const Z: () = panic!("cheese");
+ | ^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(const_panic)] to the crate attributes to enable
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error[E0658]: panicking in constants is unstable (see issue #51999)
+ --> $DIR/feature-gate-const_panic.rs:19:15
+ |
+LL | const X: () = unimplemented!();
+ | ^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(const_panic)] to the crate attributes to enable
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error[E0658]: panicking in constants is unstable (see issue #51999)
+ --> $DIR/feature-gate-const_panic.rs:16:15
+ |
+LL | const Y: () = unreachable!();
+ | ^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(const_panic)] to the crate attributes to enable
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: aborting due to 3 previous errors
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-pass
+
+macro_rules! m {
+ () => {{
+ fn f(_: impl Sized) {}
+ f
+ }}
+}
+
+fn main() {
+ fn f() -> impl Sized {};
+ m!()(f());
+}
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#[repr(usize)]
-#[derive(Copy, Clone)]
-enum Enum {
- A = 0,
-}
-
-union Foo {
- a: &'static u8,
- b: Enum,
-}
-
-// A pointer is guaranteed non-null
-const BAD_ENUM: Enum = unsafe { Foo { a: &1 }.b};
-//~^ ERROR this constant likely exhibits undefined behavior
-
-fn main() {
-}
+++ /dev/null
-error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/ub-enum-ptr.rs:23:1
- |
-LL | const BAD_ENUM: Enum = unsafe { Foo { a: &1 }.b};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered pointer at .TAG, but expected something in the range 0..=0
- |
- = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0080`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[repr(usize)]
+#[derive(Copy, Clone)]
+enum Enum {
+ A = 0,
+}
+union TransmuteEnum {
+ a: &'static u8,
+ b: Enum,
+}
+
+// A pointer is guaranteed non-null
+const BAD_ENUM: Enum = unsafe { TransmuteEnum { a: &1 }.b };
+//~^ ERROR this constant likely exhibits undefined behavior
+
+// Invalid enum discriminant
+#[repr(usize)]
+#[derive(Copy, Clone)]
+enum Enum2 {
+ A = 2,
+}
+union TransmuteEnum2 {
+ a: usize,
+ b: Enum2,
+}
+const BAD_ENUM2 : Enum2 = unsafe { TransmuteEnum2 { a: 0 }.b };
+//~^ ERROR this constant likely exhibits undefined behavior
+
+// Invalid enum field content (mostly to test printing of apths for enum tuple
+// variants and tuples).
+union TransmuteChar {
+ a: u32,
+ b: char,
+}
+// Need to create something which does not clash with enum layout optimizations.
+const BAD_ENUM_CHAR : Option<(char, char)> = Some(('x', unsafe { TransmuteChar { a: !0 }.b }));
+//~^ ERROR this constant likely exhibits undefined behavior
+
+fn main() {
+}
--- /dev/null
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/ub-enum.rs:22:1
+ |
+LL | const BAD_ENUM: Enum = unsafe { TransmuteEnum { a: &1 }.b };
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered pointer at .<enum-tag>, but expected something in the range 0..=0
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/ub-enum.rs:35:1
+ |
+LL | const BAD_ENUM2 : Enum2 = unsafe { TransmuteEnum2 { a: 0 }.b };
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 0 at .<enum-tag>, but expected something in the range 2..=2
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/ub-enum.rs:45:1
+ |
+LL | const BAD_ENUM_CHAR : Option<(char, char)> = Some(('x', unsafe { TransmuteChar { a: !0 }.b }));
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered character at .Some.0.1, but expected a valid unicode codepoint
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error: aborting due to 3 previous errors
+
+For more information about this error, try `rustc --explain E0080`.
|
note: ...which requires normalizing `ParamEnvAnd { param_env: ParamEnv { caller_bounds: [], reveal: All }, value: [u8; _] }`...
note: ...which requires const-evaluating `Foo::bytes::{{constant}}`...
- --> $SRC_DIR/libcore/mem.rs:323:14
+ --> $SRC_DIR/libcore/mem.rs:LL:COL
|
LL | unsafe { intrinsics::size_of::<T>() }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: ...which again requires computing layout of `Foo`, completing the cycle
note: cycle used when const-evaluating `Foo::bytes::{{constant}}`
- --> $SRC_DIR/libcore/mem.rs:323:14
+ --> $SRC_DIR/libcore/mem.rs:LL:COL
|
LL | unsafe { intrinsics::size_of::<T>() }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
// Unresolved multi-segment attributes are not treated as custom.
-#![feature(custom_attribute, proc_macro_path_invoc)]
+#![feature(custom_attribute)]
mod existent {}
error[E0658]: The attribute `foo` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/custom_attribute.rs:13:1
+ --> $DIR/custom_attribute.rs:13:3
|
LL | #[foo] //~ ERROR The attribute `foo`
- | ^^^^^^
+ | ^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `foo` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/custom_attribute.rs:15:5
+ --> $DIR/custom_attribute.rs:15:7
|
LL | #[foo] //~ ERROR The attribute `foo`
- | ^^^^^^
+ | ^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `foo` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/custom_attribute.rs:17:5
+ --> $DIR/custom_attribute.rs:17:7
|
LL | #[foo] //~ ERROR The attribute `foo`
- | ^^^^^^
+ | ^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// We need to opt inot the `!` feature in order to trigger the
+// We need to opt into the `!` feature in order to trigger the
// requirement that this is testing.
#![feature(never_type)]
// edition:2015
-#![feature(raw_identifiers)]
#![allow(async_idents)]
#[macro_export]
// aux-build:edition-kw-macro-2015.rs
// compile-pass
-#![feature(raw_identifiers)]
#![allow(async_idents)]
#[macro_use]
// edition:2015
// aux-build:edition-kw-macro-2015.rs
-#![feature(raw_identifiers)]
-
#[macro_use]
extern crate edition_kw_macro_2015;
error: no rules expected the token `r#async`
- --> $DIR/edition-keywords-2015-2015-parsing.rs:24:31
+ --> $DIR/edition-keywords-2015-2015-parsing.rs:22:31
|
LL | r#async = consumes_async!(r#async); //~ ERROR no rules expected the token `r#async`
| ^^^^^^^
error: no rules expected the token `async`
- --> $DIR/edition-keywords-2015-2015-parsing.rs:25:35
+ --> $DIR/edition-keywords-2015-2015-parsing.rs:23:35
|
LL | r#async = consumes_async_raw!(async); //~ ERROR no rules expected the token `async`
| ^^^^^
// edition:2015
// aux-build:edition-kw-macro-2018.rs
-#![feature(raw_identifiers)]
-
#[macro_use]
extern crate edition_kw_macro_2018;
error: expected identifier, found reserved keyword `async`
- --> $DIR/edition-keywords-2015-2018-expansion.rs:20:5
+ --> $DIR/edition-keywords-2015-2018-expansion.rs:18:5
|
LL | produces_async! {} //~ ERROR expected identifier, found reserved keyword
| ^^^^^^^^^^^^^^^^^^ expected identifier, found reserved keyword
// edition:2015
// aux-build:edition-kw-macro-2018.rs
-#![feature(raw_identifiers)]
-
#[macro_use]
extern crate edition_kw_macro_2018;
error: no rules expected the token `r#async`
- --> $DIR/edition-keywords-2015-2018-parsing.rs:24:31
+ --> $DIR/edition-keywords-2015-2018-parsing.rs:22:31
|
LL | r#async = consumes_async!(r#async); //~ ERROR no rules expected the token `r#async`
| ^^^^^^^
error: no rules expected the token `async`
- --> $DIR/edition-keywords-2015-2018-parsing.rs:25:35
+ --> $DIR/edition-keywords-2015-2018-parsing.rs:23:35
|
LL | r#async = consumes_async_raw!(async); //~ ERROR no rules expected the token `async`
| ^^^^^
fn bar<T>() {}
fn main() {
- foo::<f64>(); //~ ERROR expected at most 0 type parameters, found 1 type parameter [E0087]
+ foo::<f64>(); //~ ERROR wrong number of type arguments: expected 0, found 1 [E0087]
- bar::<f64, u64>(); //~ ERROR expected at most 1 type parameter, found 2 type parameters [E0087]
+ bar::<f64, u64>(); //~ ERROR wrong number of type arguments: expected 1, found 2 [E0087]
}
-error[E0087]: too many type parameters provided: expected at most 0 type parameters, found 1 type parameter
+error[E0087]: wrong number of type arguments: expected 0, found 1
--> $DIR/E0087.rs:15:11
|
-LL | foo::<f64>(); //~ ERROR expected at most 0 type parameters, found 1 type parameter [E0087]
- | ^^^ expected 0 type parameters
+LL | foo::<f64>(); //~ ERROR wrong number of type arguments: expected 0, found 1 [E0087]
+ | ^^^ unexpected type argument
-error[E0087]: too many type parameters provided: expected at most 1 type parameter, found 2 type parameters
+error[E0087]: wrong number of type arguments: expected 1, found 2
--> $DIR/E0087.rs:17:16
|
-LL | bar::<f64, u64>(); //~ ERROR expected at most 1 type parameter, found 2 type parameters [E0087]
- | ^^^ expected 1 type parameter
+LL | bar::<f64, u64>(); //~ ERROR wrong number of type arguments: expected 1, found 2 [E0087]
+ | ^^^ unexpected type argument
error: aborting due to 2 previous errors
-error[E0088]: too many lifetime parameters provided: expected at most 0 lifetime parameters, found 1 lifetime parameter
+error[E0088]: wrong number of lifetime arguments: expected 0, found 1
--> $DIR/E0088.rs:15:9
|
LL | f::<'static>(); //~ ERROR E0088
- | ^^^^^^^ expected 0 lifetime parameters
+ | ^^^^^^^ unexpected lifetime argument
-error[E0088]: too many lifetime parameters provided: expected at most 1 lifetime parameter, found 2 lifetime parameters
+error[E0088]: wrong number of lifetime arguments: expected 1, found 2
--> $DIR/E0088.rs:16:18
|
LL | g::<'static, 'static>(); //~ ERROR E0088
- | ^^^^^^^ expected 1 lifetime parameter
+ | ^^^^^^^ unexpected lifetime argument
error: aborting due to 2 previous errors
fn foo<T, U>() {}
fn main() {
- foo::<f64>(); //~ ERROR expected 2 type parameters, found 1 type parameter [E0089]
+ foo::<f64>(); //~ ERROR wrong number of type arguments: expected 2, found 1 [E0089]
}
-error[E0089]: too few type parameters provided: expected 2 type parameters, found 1 type parameter
+error[E0089]: wrong number of type arguments: expected 2, found 1
--> $DIR/E0089.rs:14:5
|
-LL | foo::<f64>(); //~ ERROR expected 2 type parameters, found 1 type parameter [E0089]
- | ^^^^^^^^^^ expected 2 type parameters
+LL | foo::<f64>(); //~ ERROR wrong number of type arguments: expected 2, found 1 [E0089]
+ | ^^^^^^^^^^ expected 2 type arguments
error: aborting due to previous error
fn foo<'a: 'b, 'b: 'a>() {}
fn main() {
- foo::<'static>(); //~ ERROR expected 2 lifetime parameters, found 1 lifetime parameter [E0090]
+ foo::<'static>(); //~ ERROR wrong number of lifetime arguments: expected 2, found 1 [E0090]
}
-error[E0090]: too few lifetime parameters provided: expected 2 lifetime parameters, found 1 lifetime parameter
+error[E0090]: wrong number of lifetime arguments: expected 2, found 1
--> $DIR/E0090.rs:14:5
|
-LL | foo::<'static>(); //~ ERROR expected 2 lifetime parameters, found 1 lifetime parameter [E0090]
- | ^^^^^^^^^^^^^^ expected 2 lifetime parameters
+LL | foo::<'static>(); //~ ERROR wrong number of lifetime arguments: expected 2, found 1 [E0090]
+ | ^^^^^^^^^^^^^^ expected 2 lifetime arguments
error: aborting due to previous error
struct Baz<'a, 'b, 'c> {
buzz: Buzz<'a>,
//~^ ERROR E0107
- //~| expected 2 lifetime parameters
+ //~| expected 2 lifetime arguments
bar: Bar<'a>,
//~^ ERROR E0107
- //~| unexpected lifetime parameter
+ //~| unexpected lifetime argument
foo2: Foo<'a, 'b, 'c>,
//~^ ERROR E0107
- //~| 2 unexpected lifetime parameters
+ //~| 2 unexpected lifetime arguments
}
-fn main() {
-}
+fn main() {}
-error[E0107]: wrong number of lifetime parameters: expected 2, found 1
+error[E0107]: wrong number of lifetime arguments: expected 2, found 1
--> $DIR/E0107.rs:21:11
|
LL | buzz: Buzz<'a>,
- | ^^^^^^^^ expected 2 lifetime parameters
+ | ^^^^^^^^ expected 2 lifetime arguments
-error[E0107]: wrong number of lifetime parameters: expected 0, found 1
- --> $DIR/E0107.rs:24:10
+error[E0107]: wrong number of lifetime arguments: expected 0, found 1
+ --> $DIR/E0107.rs:24:14
|
LL | bar: Bar<'a>,
- | ^^^^^^^ unexpected lifetime parameter
+ | ^^ unexpected lifetime argument
-error[E0107]: wrong number of lifetime parameters: expected 1, found 3
+error[E0107]: wrong number of lifetime arguments: expected 1, found 3
--> $DIR/E0107.rs:27:11
|
LL | foo2: Foo<'a, 'b, 'c>,
- | ^^^^^^^^^^^^^^^ 2 unexpected lifetime parameters
+ | ^^^^^^^^^^^^^^^ 2 unexpected lifetime arguments
error: aborting due to 3 previous errors
--> $DIR/E0244.rs:12:23
|
LL | struct Bar<S, T> { x: Foo<S, T> }
- | ^^^^^^^^^ expected no type arguments
+ | ^^^^^^^^^ 2 unexpected type arguments
error: aborting due to previous error
--> $DIR/E0401.rs:32:25
|
LL | impl<T> Iterator for A<T> {
- | ---- `Self` type implicitely declared here, on the `impl`
+ | ---- `Self` type implicitly declared here, on the `impl`
...
LL | fn helper(sel: &Self) -> u8 { //~ ERROR E0401
| ------ ^^^^ use of type variable from outer function
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(exhaustive_integer_patterns)]
+#![feature(exclusive_range_pattern)]
+#![deny(unreachable_patterns)]
+
+use std::{char, usize, u8, u16, u32, u64, u128, isize, i8, i16, i32, i64, i128};
+
+fn main() {
+ let x: u8 = 0;
+
+ // A single range covering the entire domain.
+ match x {
+ 0 ..= 255 => {} // ok
+ }
+
+ // A combination of ranges and values.
+ // These are currently allowed to be overlapping.
+ match x {
+ 0 ..= 32 => {}
+ 33 => {}
+ 34 .. 128 => {}
+ 100 ..= 200 => {}
+ 200 => {} //~ ERROR unreachable pattern
+ 201 ..= 255 => {}
+ }
+
+ // An incomplete set of values.
+ match x { //~ ERROR non-exhaustive patterns
+ 0 .. 128 => {}
+ }
+
+ // A more incomplete set of values.
+ match x { //~ ERROR non-exhaustive patterns
+ 0 ..= 10 => {}
+ 20 ..= 30 => {}
+ 35 => {}
+ 70 .. 255 => {}
+ }
+
+ let x: i8 = 0;
+ match x { //~ ERROR non-exhaustive patterns
+ -7 => {}
+ -5..=120 => {}
+ -2..=20 => {} //~ ERROR unreachable pattern
+ 125 => {}
+ }
+
+ // Let's test other types too!
+ let c: char = '\u{0}';
+ match c {
+ '\u{0}' ..= char::MAX => {} // ok
+ }
+
+ // We can actually get away with just covering the
+ // following two ranges, which correspond to all
+ // valid Unicode Scalar Values.
+ match c {
+ '\u{0000}' ..= '\u{D7FF}' => {}
+ '\u{E000}' ..= '\u{10_FFFF}' => {}
+ }
+
+ match 0usize {
+ 0 ..= usize::MAX => {} // ok
+ }
+
+ match 0u16 {
+ 0 ..= u16::MAX => {} // ok
+ }
+
+ match 0u32 {
+ 0 ..= u32::MAX => {} // ok
+ }
+
+ match 0u64 {
+ 0 ..= u64::MAX => {} // ok
+ }
+
+ match 0u128 {
+ 0 ..= u128::MAX => {} // ok
+ }
+
+ match 0isize {
+ isize::MIN ..= isize::MAX => {} // ok
+ }
+
+ match 0i8 {
+ -128 ..= 127 => {} // ok
+ }
+
+ match 0i8 { //~ ERROR non-exhaustive patterns
+ -127 ..= 127 => {}
+ }
+
+ match 0i16 {
+ i16::MIN ..= i16::MAX => {} // ok
+ }
+
+ match 0i16 { //~ ERROR non-exhaustive patterns
+ i16::MIN ..= -1 => {}
+ 1 ..= i16::MAX => {}
+ }
+
+ match 0i32 {
+ i32::MIN ..= i32::MAX => {} // ok
+ }
+
+ match 0i64 {
+ i64::MIN ..= i64::MAX => {} // ok
+ }
+
+ match 0i128 {
+ i128::MIN ..= i128::MAX => {} // ok
+ }
+
+ // Make sure that guards don't factor into the exhaustiveness checks.
+ match 0u8 { //~ ERROR non-exhaustive patterns
+ 0 .. 128 => {}
+ 128 ..= 255 if true => {}
+ }
+
+ match 0u8 {
+ 0 .. 128 => {}
+ 128 ..= 255 if false => {}
+ 128 ..= 255 => {} // ok, because previous arm was guarded
+ }
+
+ // Now things start getting a bit more interesting. Testing products!
+ match (0u8, Some(())) { //~ ERROR non-exhaustive patterns
+ (1, _) => {}
+ (_, None) => {}
+ }
+
+ match (0u8, true) { //~ ERROR non-exhaustive patterns
+ (0 ..= 125, false) => {}
+ (128 ..= 255, false) => {}
+ (0 ..= 255, true) => {}
+ }
+
+ match (0u8, true) { // ok
+ (0 ..= 125, false) => {}
+ (128 ..= 255, false) => {}
+ (0 ..= 255, true) => {}
+ (125 .. 128, false) => {}
+ }
+
+ match 0u8 { // ok
+ 0 .. 2 => {}
+ 1 ..= 2 => {}
+ _ => {}
+ }
+
+ const LIM: u128 = u128::MAX - 1;
+ match 0u128 { //~ ERROR non-exhaustive patterns
+ 0 ..= LIM => {}
+ }
+
+ match 0u128 { //~ ERROR non-exhaustive patterns
+ 0 ..= 4 => {}
+ }
+
+ match 0u128 { //~ ERROR non-exhaustive patterns
+ 4 ..= u128::MAX => {}
+ }
+}
--- /dev/null
+error: unreachable pattern
+ --> $DIR/exhaustive_integer_patterns.rs:32:9
+ |
+LL | 200 => {} //~ ERROR unreachable pattern
+ | ^^^
+ |
+note: lint level defined here
+ --> $DIR/exhaustive_integer_patterns.rs:13:9
+ |
+LL | #![deny(unreachable_patterns)]
+ | ^^^^^^^^^^^^^^^^^^^^
+
+error[E0004]: non-exhaustive patterns: `128u8..=255u8` not covered
+ --> $DIR/exhaustive_integer_patterns.rs:37:11
+ |
+LL | match x { //~ ERROR non-exhaustive patterns
+ | ^ pattern `128u8..=255u8` not covered
+
+error[E0004]: non-exhaustive patterns: `11u8..=19u8`, `31u8..=34u8`, `36u8..=69u8` and 1 more not covered
+ --> $DIR/exhaustive_integer_patterns.rs:42:11
+ |
+LL | match x { //~ ERROR non-exhaustive patterns
+ | ^ patterns `11u8..=19u8`, `31u8..=34u8`, `36u8..=69u8` and 1 more not covered
+
+error: unreachable pattern
+ --> $DIR/exhaustive_integer_patterns.rs:53:9
+ |
+LL | -2..=20 => {} //~ ERROR unreachable pattern
+ | ^^^^^^^
+
+error[E0004]: non-exhaustive patterns: `-128i8..=-8i8`, `-6i8`, `121i8..=124i8` and 1 more not covered
+ --> $DIR/exhaustive_integer_patterns.rs:50:11
+ |
+LL | match x { //~ ERROR non-exhaustive patterns
+ | ^ patterns `-128i8..=-8i8`, `-6i8`, `121i8..=124i8` and 1 more not covered
+
+error[E0004]: non-exhaustive patterns: `-128i8` not covered
+ --> $DIR/exhaustive_integer_patterns.rs:99:11
+ |
+LL | match 0i8 { //~ ERROR non-exhaustive patterns
+ | ^^^ pattern `-128i8` not covered
+
+error[E0004]: non-exhaustive patterns: `0i16` not covered
+ --> $DIR/exhaustive_integer_patterns.rs:107:11
+ |
+LL | match 0i16 { //~ ERROR non-exhaustive patterns
+ | ^^^^ pattern `0i16` not covered
+
+error[E0004]: non-exhaustive patterns: `128u8..=255u8` not covered
+ --> $DIR/exhaustive_integer_patterns.rs:125:11
+ |
+LL | match 0u8 { //~ ERROR non-exhaustive patterns
+ | ^^^ pattern `128u8..=255u8` not covered
+
+error[E0004]: non-exhaustive patterns: `(0u8, Some(_))` and `(2u8..=255u8, Some(_))` not covered
+ --> $DIR/exhaustive_integer_patterns.rs:137:11
+ |
+LL | match (0u8, Some(())) { //~ ERROR non-exhaustive patterns
+ | ^^^^^^^^^^^^^^^ patterns `(0u8, Some(_))` and `(2u8..=255u8, Some(_))` not covered
+
+error[E0004]: non-exhaustive patterns: `(126u8..=127u8, false)` not covered
+ --> $DIR/exhaustive_integer_patterns.rs:142:11
+ |
+LL | match (0u8, true) { //~ ERROR non-exhaustive patterns
+ | ^^^^^^^^^^^ pattern `(126u8..=127u8, false)` not covered
+
+error[E0004]: non-exhaustive patterns: `340282366920938463463374607431768211455u128` not covered
+ --> $DIR/exhaustive_integer_patterns.rs:162:11
+ |
+LL | match 0u128 { //~ ERROR non-exhaustive patterns
+ | ^^^^^ pattern `340282366920938463463374607431768211455u128` not covered
+
+error[E0004]: non-exhaustive patterns: `5u128..=340282366920938463463374607431768211455u128` not covered
+ --> $DIR/exhaustive_integer_patterns.rs:166:11
+ |
+LL | match 0u128 { //~ ERROR non-exhaustive patterns
+ | ^^^^^ pattern `5u128..=340282366920938463463374607431768211455u128` not covered
+
+error[E0004]: non-exhaustive patterns: `0u128..=3u128` not covered
+ --> $DIR/exhaustive_integer_patterns.rs:170:11
+ |
+LL | match 0u128 { //~ ERROR non-exhaustive patterns
+ | ^^^^^ pattern `0u128..=3u128` not covered
+
+error: aborting due to 13 previous errors
+
+For more information about this error, try `rustc --explain E0004`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(existential_type)]
+// compile-pass
+mod my_mod {
+ use std::fmt::Debug;
+
+ pub existential type Foo: Debug;
+ pub existential type Foot: Debug;
+
+ pub fn get_foo() -> Foo {
+ 5i32
+ }
+
+ pub fn get_foot() -> Foot {
+ get_foo()
+ }
+}
+
+fn main() {
+ let _: my_mod::Foot = my_mod::get_foot();
+}
+
-error[E0391]: cycle detected when normalizing `ParamEnvAnd { param_env: ParamEnv { caller_bounds: [], reveal: All }, value: Foo }`
+error[E0391]: cycle detected when processing `Foo`
--> $DIR/no_inferrable_concrete_type.rs:16:1
|
LL | existential type Foo: Copy; //~ cycle detected
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
- = note: ...which again requires normalizing `ParamEnvAnd { param_env: ParamEnv { caller_bounds: [], reveal: All }, value: Foo }`, completing the cycle
+note: ...which requires processing `bar`...
+ --> $DIR/no_inferrable_concrete_type.rs:19:23
+ |
+LL | fn bar(x: Foo) -> Foo { x }
+ | ^^^^^
+ = note: ...which again requires processing `Foo`, completing the cycle
error: aborting due to previous error
// extern functions are extern "C" fn
let _x: extern "C" fn() = f; // OK
is_fn(f);
- //~^ ERROR `extern "C" fn() {f}: std::ops::Fn<()>` is not satisfied
+ //~^ ERROR expected a `std::ops::Fn<()>` closure, found `extern "C" fn() {f}`
}
-error[E0277]: the trait bound `extern "C" fn() {f}: std::ops::Fn<()>` is not satisfied
+error[E0277]: expected a `std::ops::Fn<()>` closure, found `extern "C" fn() {f}`
--> $DIR/extern-wrong-value-type.rs:19:5
|
LL | is_fn(f);
- | ^^^^^ the trait `std::ops::Fn<()>` is not implemented for `extern "C" fn() {f}`
+ | ^^^^^ expected an `Fn<()>` closure, found `extern "C" fn() {f}`
|
+ = help: the trait `std::ops::Fn<()>` is not implemented for `extern "C" fn() {f}`
+ = note: wrap the `extern "C" fn() {f}` in a closure with no arguments: `|| { /* code */ }
note: required by `is_fn`
--> $DIR/extern-wrong-value-type.rs:14:1
|
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let x: u8 = 0;
+ match x { //~ ERROR non-exhaustive patterns: `_` not covered
+ 0 ..= 255 => {}
+ }
+}
--- /dev/null
+error[E0004]: non-exhaustive patterns: `_` not covered
+ --> $DIR/feature-gate-exhaustive_integer_patterns.rs:13:11
+ |
+LL | match x { //~ ERROR non-exhaustive patterns: `_` not covered
+ | ^ pattern `_` not covered
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0004`.
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// ignore-tidy-linelength
-
-// Test that `#[rustc_*]` attributes are gated by `rustc_attrs` feature gate.
-
-#[rustc_variance] //~ ERROR the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable
-#[rustc_error] //~ ERROR the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable
-
-fn main() {}
+++ /dev/null
-error[E0658]: the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable (see issue #29642)
- --> $DIR/feature-gate-rustc-attrs-1.rs:15:1
- |
-LL | #[rustc_variance] //~ ERROR the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable
- | ^^^^^^^^^^^^^^^^^
- |
- = help: add #![feature(rustc_attrs)] to the crate attributes to enable
-
-error[E0658]: the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable (see issue #29642)
- --> $DIR/feature-gate-rustc-attrs-1.rs:16:1
- |
-LL | #[rustc_error] //~ ERROR the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable
- | ^^^^^^^^^^^^^^
- |
- = help: add #![feature(rustc_attrs)] to the crate attributes to enable
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0658`.
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub mod foo {
- pub use bar::Bar;
- //~^ ERROR unresolved import `bar`
-
- pub mod bar {
- pub struct Bar;
- }
-}
-
-fn main() {
- let _ = foo::Bar;
-}
+++ /dev/null
-error[E0432]: unresolved import `bar`
- --> $DIR/feature-gate-uniform-paths.rs:12:13
- |
-LL | pub use bar::Bar;
- | ^^^ Did you mean `self::bar`?
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0432`.
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(custom_attribute)]
-
-#[my_attr(a b c d)]
-//~^ ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `b`
-//~| ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `c`
-//~| ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `d`
-fn main() {}
+++ /dev/null
-error: expected one of `(`, `)`, `,`, `::`, or `=`, found `b`
- --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:13
- |
-LL | #[my_attr(a b c d)]
- | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
-
-error: expected one of `(`, `)`, `,`, `::`, or `=`, found `c`
- --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:15
- |
-LL | #[my_attr(a b c d)]
- | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
-
-error: expected one of `(`, `)`, `,`, `::`, or `=`, found `d`
- --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:17
- |
-LL | #[my_attr(a b c d)]
- | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
-
-error: aborting due to 3 previous errors
-
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub fn main() {
- let catch_result = do catch { //~ ERROR `catch` expression is experimental
- let x = 5;
- x
- };
- assert_eq!(catch_result, 5);
-}
+++ /dev/null
-error[E0658]: `catch` expression is experimental (see issue #31436)
- --> $DIR/feature-gate-catch_expr.rs:12:24
- |
-LL | let catch_result = do catch { //~ ERROR `catch` expression is experimental
- | ________________________^
-LL | | let x = 5;
-LL | | x
-LL | | };
- | |_____^
- |
- = help: add #![feature(catch_expr)] to the crate attributes to enable
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0658`.
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:17:1
+ --> $DIR/feature-gate-custom_attribute.rs:17:3
|
LL | #[fake_attr] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:18:1
+ --> $DIR/feature-gate-custom_attribute.rs:18:3
|
LL | #[fake_attr(100)] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:19:1
+ --> $DIR/feature-gate-custom_attribute.rs:19:3
|
LL | #[fake_attr(1, 2, 3)] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:20:1
+ --> $DIR/feature-gate-custom_attribute.rs:20:3
|
LL | #[fake_attr("hello")] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:21:1
+ --> $DIR/feature-gate-custom_attribute.rs:21:3
|
LL | #[fake_attr(name = "hello")] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:22:1
+ --> $DIR/feature-gate-custom_attribute.rs:22:3
|
LL | #[fake_attr(1, "hi", key = 12, true, false)] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:23:1
+ --> $DIR/feature-gate-custom_attribute.rs:23:3
|
LL | #[fake_attr(key = "hello", val = 10)] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:24:1
+ --> $DIR/feature-gate-custom_attribute.rs:24:3
|
LL | #[fake_attr(key("hello"), val(10))] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:25:1
+ --> $DIR/feature-gate-custom_attribute.rs:25:3
|
LL | #[fake_attr(enabled = true, disabled = false)] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:26:1
+ --> $DIR/feature-gate-custom_attribute.rs:26:3
|
LL | #[fake_attr(true)] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:27:1
+ --> $DIR/feature-gate-custom_attribute.rs:27:3
|
LL | #[fake_attr(pi = 3.14159)] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_attr` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:28:1
+ --> $DIR/feature-gate-custom_attribute.rs:28:3
|
LL | #[fake_attr(b"hi")] //~ ERROR attribute `fake_attr` is currently unknown
- | ^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `fake_doc` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/feature-gate-custom_attribute.rs:29:1
+ --> $DIR/feature-gate-custom_attribute.rs:29:3
|
LL | #[fake_doc(r"doc")] //~ ERROR attribute `fake_doc` is currently unknown
- | ^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: attributes of the form `#[derive_*]` are reserved for the compiler (see issue #29644)
- --> $DIR/feature-gate-custom_derive.rs:11:1
+ --> $DIR/feature-gate-custom_derive.rs:11:3
|
LL | #[derive_Clone]
- | ^^^^^^^^^^^^^^^
+ | ^^^^^^^^^^^^
|
= help: add #![feature(custom_derive)] to the crate attributes to enable
}
struct Foo;
+
impl PointerFamily<u32> for Foo {
type Pointer<usize> = Box<usize>;
//~^ ERROR generic associated types are unstable
//~^ ERROR where clauses on associated types are unstable
}
+impl Bar for Foo {
+ type Assoc where Self: Sized = Foo;
+ //~^ ERROR where clauses on associated types are unstable
+}
fn main() {}
= help: add #![feature(generic_associated_types)] to the crate attributes to enable
error[E0658]: generic associated types are unstable (see issue #44265)
- --> $DIR/feature-gate-generic_associated_types.rs:23:5
+ --> $DIR/feature-gate-generic_associated_types.rs:24:5
|
LL | type Pointer<usize> = Box<usize>;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= help: add #![feature(generic_associated_types)] to the crate attributes to enable
error[E0658]: generic associated types are unstable (see issue #44265)
- --> $DIR/feature-gate-generic_associated_types.rs:25:5
+ --> $DIR/feature-gate-generic_associated_types.rs:26:5
|
LL | type Pointer2<u32> = Box<u32>;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= help: add #![feature(generic_associated_types)] to the crate attributes to enable
error[E0658]: where clauses on associated types are unstable (see issue #44265)
- --> $DIR/feature-gate-generic_associated_types.rs:30:5
+ --> $DIR/feature-gate-generic_associated_types.rs:31:5
|
LL | type Assoc where Self: Sized;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= help: add #![feature(generic_associated_types)] to the crate attributes to enable
-error: aborting due to 6 previous errors
+error[E0658]: where clauses on associated types are unstable (see issue #44265)
+ --> $DIR/feature-gate-generic_associated_types.rs:36:5
+ |
+LL | type Assoc where Self: Sized = Foo;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(generic_associated_types)] to the crate attributes to enable
+
+error: aborting due to 7 previous errors
For more information about this error, try `rustc --explain E0658`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Test that the MSP430 interrupt ABI cannot be used when msp430_interrupt
-// feature gate is not used.
-
-macro_rules! m { ($v:vis) => {} }
-//~^ ERROR :vis fragment specifier is experimental and subject to change
-
-fn main() {
- m!(pub);
-}
+++ /dev/null
-error[E0658]: :vis fragment specifier is experimental and subject to change (see issue #41022)
- --> $DIR/feature-gate-macro-vis-matcher.rs:14:19
- |
-LL | macro_rules! m { ($v:vis) => {} }
- | ^^^^^^
- |
- = help: add #![feature(macro_vis_matcher)] to the crate attributes to enable
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0658`.
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-fn main() {
- let r#foo = 3; //~ ERROR raw identifiers are experimental and subject to change
- println!("{}", foo);
-}
+++ /dev/null
-error[E0658]: raw identifiers are experimental and subject to change (see issue #48589)
- --> $DIR/feature-gate-raw-identifiers.rs:12:9
- |
-LL | let r#foo = 3; //~ ERROR raw identifiers are experimental and subject to change
- | ^^^^^
- |
- = help: add #![feature(raw_identifiers)] to the crate attributes to enable
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+
+// Test that `#[rustc_*]` attributes are gated by `rustc_attrs` feature gate.
+
+#[rustc_variance] //~ ERROR the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable
+#[rustc_error] //~ ERROR the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable
+
+fn main() {}
--- /dev/null
+error[E0658]: the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable (see issue #29642)
+ --> $DIR/feature-gate-rustc-attrs-1.rs:15:1
+ |
+LL | #[rustc_variance] //~ ERROR the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable
+ | ^^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(rustc_attrs)] to the crate attributes to enable
+
+error[E0658]: the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable (see issue #29642)
+ --> $DIR/feature-gate-rustc-attrs-1.rs:16:1
+ |
+LL | #[rustc_error] //~ ERROR the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable
+ | ^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(rustc_attrs)] to the crate attributes to enable
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0658`.
error[E0658]: unless otherwise specified, attributes with the prefix `rustc_` are reserved for internal compiler diagnostics (see issue #29642)
- --> $DIR/feature-gate-rustc-attrs.rs:15:1
+ --> $DIR/feature-gate-rustc-attrs.rs:15:3
|
LL | #[rustc_foo]
- | ^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(rustc_attrs)] to the crate attributes to enable
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-fn main() {
- #[rustfmt::skip] //~ ERROR tool attributes are unstable
- let x = 3
- ;
-}
+++ /dev/null
-error[E0658]: tool attributes are unstable (see issue #44690)
- --> $DIR/feature-gate-tool_attributes.rs:12:5
- |
-LL | #[rustfmt::skip] //~ ERROR tool attributes are unstable
- | ^^^^^^^^^^^^^^^^
- |
- = help: add #![feature(tool_attributes)] to the crate attributes to enable
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+pub fn main() {
+ let try_result: Option<_> = try { //~ ERROR `try` expression is experimental
+ let x = 5;
+ x
+ };
+ assert_eq!(try_result, Some(5));
+}
--- /dev/null
+error[E0658]: `try` expression is experimental (see issue #31436)
+ --> $DIR/feature-gate-try_blocks.rs:14:33
+ |
+LL | let try_result: Option<_> = try { //~ ERROR `try` expression is experimental
+ | _________________________________^
+LL | | let x = 5;
+LL | | x
+LL | | };
+ | |_____^
+ |
+ = help: add #![feature(try_blocks)] to the crate attributes to enable
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub mod foo {
+ pub use bar::Bar;
+ //~^ ERROR unresolved import `bar`
+
+ pub mod bar {
+ pub struct Bar;
+ }
+}
+
+fn main() {
+ let _ = foo::Bar;
+}
--- /dev/null
+error[E0432]: unresolved import `bar`
+ --> $DIR/feature-gate-uniform-paths.rs:12:13
+ |
+LL | pub use bar::Bar;
+ | ^^^ Did you mean `self::bar`?
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0432`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(custom_attribute)]
+
+#[my_attr(a b c d)]
+//~^ ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `b`
+//~| ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `c`
+//~| ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `d`
+fn main() {}
--- /dev/null
+error: expected one of `(`, `)`, `,`, `::`, or `=`, found `b`
+ --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:13
+ |
+LL | #[my_attr(a b c d)]
+ | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
+
+error: expected one of `(`, `)`, `,`, `::`, or `=`, found `c`
+ --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:15
+ |
+LL | #[my_attr(a b c d)]
+ | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
+
+error: expected one of `(`, `)`, `,`, `::`, or `=`, found `d`
+ --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:17
+ |
+LL | #[my_attr(a b c d)]
+ | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
+
+error: aborting due to 3 previous errors
+
//~| found type `std::boxed::Box<dyn std::ops::FnMut() -> isize>`
needs_fn(1);
- //~^ ERROR : std::ops::Fn<(isize,)>`
+ //~^ ERROR expected a `std::ops::Fn<(isize,)>` closure, found `{integer}`
}
= note: expected type `()`
found type `std::boxed::Box<dyn std::ops::FnMut() -> isize>`
-error[E0277]: the trait bound `{integer}: std::ops::Fn<(isize,)>` is not satisfied
+error[E0277]: expected a `std::ops::Fn<(isize,)>` closure, found `{integer}`
--> $DIR/fn-trait-formatting.rs:29:5
|
LL | needs_fn(1);
- | ^^^^^^^^ the trait `std::ops::Fn<(isize,)>` is not implemented for `{integer}`
+ | ^^^^^^^^ expected an `Fn<(isize,)>` closure, found `{integer}`
|
- = help: the following implementations were found:
- <&'a F as std::ops::Fn<A>>
- <core::str::LinesAnyMap as std::ops::Fn<(&'a str,)>>
+ = help: the trait `std::ops::Fn<(isize,)>` is not implemented for `{integer}`
note: required by `needs_fn`
--> $DIR/fn-trait-formatting.rs:13:1
|
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo<'a, T: 'a>(&'a T);
+
+struct Bar<'a>(&'a ());
+
+fn main() {
+ Foo::<'static, 'static, ()>(&0); //~ ERROR wrong number of lifetime arguments
+ //~^ ERROR mismatched types
+
+ Bar::<'static, 'static, ()>(&()); //~ ERROR wrong number of lifetime arguments
+ //~^ ERROR wrong number of type arguments
+}
--- /dev/null
+error[E0088]: wrong number of lifetime arguments: expected 1, found 2
+ --> $DIR/generic-arg-mismatch-recover.rs:16:20
+ |
+LL | Foo::<'static, 'static, ()>(&0); //~ ERROR wrong number of lifetime arguments
+ | ^^^^^^^ unexpected lifetime argument
+
+error[E0308]: mismatched types
+ --> $DIR/generic-arg-mismatch-recover.rs:16:33
+ |
+LL | Foo::<'static, 'static, ()>(&0); //~ ERROR wrong number of lifetime arguments
+ | ^^ expected (), found integral variable
+ |
+ = note: expected type `&'static ()`
+ found type `&{integer}`
+
+error[E0088]: wrong number of lifetime arguments: expected 1, found 2
+ --> $DIR/generic-arg-mismatch-recover.rs:19:20
+ |
+LL | Bar::<'static, 'static, ()>(&()); //~ ERROR wrong number of lifetime arguments
+ | ^^^^^^^ unexpected lifetime argument
+
+error[E0087]: wrong number of type arguments: expected 0, found 1
+ --> $DIR/generic-arg-mismatch-recover.rs:19:29
+ |
+LL | Bar::<'static, 'static, ()>(&()); //~ ERROR wrong number of lifetime arguments
+ | ^^ unexpected type argument
+
+error: aborting due to 4 previous errors
+
+Some errors occurred: E0087, E0088, E0308.
+For more information about an error, try `rustc --explain E0087`.
LL | fn hash(&self, hasher: &mut impl Hasher) {}
| ^^^^^^^^^^^ expected generic parameter, found `impl Trait`
|
- ::: $SRC_DIR/libcore/hash/mod.rs:185:13
+ ::: $SRC_DIR/libcore/hash/mod.rs:LL:COL
|
LL | fn hash<H: Hasher>(&self, state: &mut H);
| - declaration in trait here
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-pass
+
+mod m {
+ pub struct S(u8);
+
+ use S as Z;
+}
+
+use m::*;
+
+fn main() {}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Ambiguity between a `macro_rules` macro and a non-existent import recovered as `Def::Err`
+
+macro_rules! mac { () => () }
+
+mod m {
+ use nonexistent_module::mac; //~ ERROR unresolved import `nonexistent_module`
+
+ mac!(); //~ ERROR `mac` is ambiguous
+}
+
+fn main() {}
--- /dev/null
+error[E0432]: unresolved import `nonexistent_module`
+ --> $DIR/issue-53269.rs:16:9
+ |
+LL | use nonexistent_module::mac; //~ ERROR unresolved import `nonexistent_module`
+ | ^^^^^^^^^^^^^^^^^^ Maybe a missing `extern crate nonexistent_module;`?
+
+error[E0659]: `mac` is ambiguous
+ --> $DIR/issue-53269.rs:18:5
+ |
+LL | mac!(); //~ ERROR `mac` is ambiguous
+ | ^^^
+ |
+note: `mac` could refer to the name defined here
+ --> $DIR/issue-53269.rs:13:1
+ |
+LL | macro_rules! mac { () => () }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+note: `mac` could also refer to the name imported here
+ --> $DIR/issue-53269.rs:16:9
+ |
+LL | use nonexistent_module::mac; //~ ERROR unresolved import `nonexistent_module`
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+
+error: aborting due to 2 previous errors
+
+Some errors occurred: E0432, E0659.
+For more information about an error, try `rustc --explain E0432`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Macro from prelude is shadowed by non-existent import recovered as `Def::Err`.
+
+use std::assert; //~ ERROR unresolved import `std::assert`
+
+fn main() {
+ assert!(true);
+}
--- /dev/null
+error[E0432]: unresolved import `std::assert`
+ --> $DIR/issue-53512.rs:13:5
+ |
+LL | use std::assert; //~ ERROR unresolved import `std::assert`
+ | ^^^^^^^^^^^ no `assert` in the root
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0432`.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// `#[macro_export] macro_rules` that doen't originate from macro expansions can be placed
+// `#[macro_export] macro_rules` that doesn't originate from macro expansions can be placed
// into the root module soon enough to act as usual items and shadow globs and preludes.
#![feature(decl_macro)]
-error: `m` is ambiguous
+error[E0659]: `m` is ambiguous
--> $DIR/macros.rs:48:5
|
LL | m!(); //~ ERROR ambiguous
| ^
|
-note: `m` could refer to the macro defined here
+note: `m` could refer to the name defined here
--> $DIR/macros.rs:46:5
|
LL | macro_rules! m { () => {} }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^
-note: `m` could also refer to the macro imported here
+note: `m` could also refer to the name imported here
--> $DIR/macros.rs:47:9
|
LL | use two_macros::m;
mod m5 {
macro_rules! m { () => {
- macro_rules! panic { () => {} } //~ ERROR `panic` is already in scope
+ macro_rules! panic { () => {} }
} }
m!();
- panic!();
+ panic!(); //~ ERROR `panic` is ambiguous
}
#[macro_use(n)]
-error: `panic` is already in scope
+error[E0659]: `panic` is ambiguous
+ --> $DIR/shadow_builtin_macros.rs:43:5
+ |
+LL | panic!(); //~ ERROR `panic` is ambiguous
+ | ^^^^^
+ |
+note: `panic` could refer to the name defined here
--> $DIR/shadow_builtin_macros.rs:40:9
|
-LL | macro_rules! panic { () => {} } //~ ERROR `panic` is already in scope
+LL | macro_rules! panic { () => {} }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
LL | } }
LL | m!();
| ----- in this macro invocation
- |
- = note: macro-expanded `macro_rules!`s may not shadow existing macros (see RFC 1560)
+ = note: `panic` is also a builtin macro
+ = note: macro-expanded macros do not shadow
error[E0659]: `panic` is ambiguous
--> $DIR/shadow_builtin_macros.rs:25:14
$(
fn $n() {
S::f::<i64>();
- //~^ ERROR too many type parameters provided
+ //~^ ERROR wrong number of type arguments
}
)*
}
}
impl_add!(a b);
+
+fn main() {}
-error[E0601]: `main` function not found in crate `issue_53251`
- |
- = note: consider adding a `main` function to `$DIR/issue-53251.rs`
-
-error[E0087]: too many type parameters provided: expected at most 0 type parameters, found 1 type parameter
+error[E0087]: wrong number of type arguments: expected 0, found 1
--> $DIR/issue-53251.rs:21:24
|
LL | S::f::<i64>();
- | ^^^ expected 0 type parameters
+ | ^^^ unexpected type argument
...
LL | impl_add!(a b);
| --------------- in this macro invocation
-error: aborting due to 2 previous errors
+error: aborting due to previous error
-Some errors occurred: E0087, E0601.
-For more information about an error, try `rustc --explain E0087`.
+For more information about this error, try `rustc --explain E0087`.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// Test that `Box` cannot be used with a lifetime parameter.
+// Test that `Box` cannot be used with a lifetime argument.
struct Foo<'a> {
- x: Box<'a, isize> //~ ERROR wrong number of lifetime parameters
+ x: Box<'a, isize> //~ ERROR wrong number of lifetime arguments
}
pub fn main() {
-error[E0107]: wrong number of lifetime parameters: expected 0, found 1
- --> $DIR/issue-18423.rs:14:8
+error[E0107]: wrong number of lifetime arguments: expected 0, found 1
+ --> $DIR/issue-18423.rs:14:12
|
-LL | x: Box<'a, isize> //~ ERROR wrong number of lifetime parameters
- | ^^^^^^^^^^^^^^ unexpected lifetime parameter
+LL | x: Box<'a, isize> //~ ERROR wrong number of lifetime arguments
+ | ^^ unexpected lifetime argument
error: aborting due to previous error
let ptr: *mut () = 0 as *mut _;
let _: &mut Fn() = unsafe {
&mut *(ptr as *mut Fn())
- //~^ ERROR `(): std::ops::Fn<()>` is not satisfied
+ //~^ ERROR expected a `std::ops::Fn<()>` closure, found `()`
};
}
-error[E0277]: the trait bound `(): std::ops::Fn<()>` is not satisfied
+error[E0277]: expected a `std::ops::Fn<()>` closure, found `()`
--> $DIR/issue-22034.rs:18:16
|
LL | &mut *(ptr as *mut Fn())
- | ^^^ the trait `std::ops::Fn<()>` is not implemented for `()`
+ | ^^^ expected an `Fn<()>` closure, found `()`
|
+ = help: the trait `std::ops::Fn<()>` is not implemented for `()`
+ = note: wrap the `()` in a closure with no arguments: `|| { /* code */ }
= note: required for the cast to the object type `dyn std::ops::Fn()`
error: aborting due to previous error
-error[E0277]: the trait bound `(): std::ops::FnMut<(_, char)>` is not satisfied
+error[E0277]: expected a `std::ops::FnMut<(_, char)>` closure, found `()`
--> $DIR/issue-23966.rs:12:16
|
LL | "".chars().fold(|_, _| (), ());
- | ^^^^ the trait `std::ops::FnMut<(_, char)>` is not implemented for `()`
+ | ^^^^ expected an `FnMut<(_, char)>` closure, found `()`
+ |
+ = help: the trait `std::ops::FnMut<(_, char)>` is not implemented for `()`
error: aborting due to previous error
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// Checks lexical scopes cannot see through normal module boundries
+// Checks lexical scopes cannot see through normal module boundaries
fn f() {
fn g() {}
| ^ use of type variable from outer function
error[E0244]: wrong number of type arguments: expected 0, found 1
- --> $DIR/issue-3214.rs:16:22
+ --> $DIR/issue-3214.rs:16:26
|
LL | impl<T> Drop for foo<T> {
- | ^^^^^^ expected no type arguments
+ | ^ unexpected type argument
error: aborting due to 2 previous errors
error[E0658]: attributes of the form `#[derive_*]` are reserved for the compiler (see issue #29644)
- --> $DIR/issue-32655.rs:16:9
+ --> $DIR/issue-32655.rs:16:11
|
LL | #[derive_Clone] //~ ERROR attributes of the form
- | ^^^^^^^^^^^^^^^
+ | ^^^^^^^^^^^^
...
LL | foo!();
| ------- in this macro invocation
= help: add #![feature(custom_derive)] to the crate attributes to enable
error[E0658]: attributes of the form `#[derive_*]` are reserved for the compiler (see issue #29644)
- --> $DIR/issue-32655.rs:28:5
+ --> $DIR/issue-32655.rs:28:7
|
LL | #[derive_Clone] //~ ERROR attributes of the form
- | ^^^^^^^^^^^^^^^
+ | ^^^^^^^^^^^^
|
= help: add #![feature(custom_derive)] to the crate attributes to enable
// except according to those terms.
static S : u64 = { { panic!("foo"); 0 } };
-//~^ ERROR calls in statics are limited
+//~^ ERROR panicking in statics is unstable
fn main() {
println!("{:?}", S);
-error[E0015]: calls in statics are limited to constant functions, tuple structs and tuple variants
+error[E0658]: panicking in statics is unstable (see issue #51999)
--> $DIR/issue-32829.rs:11:22
|
LL | static S : u64 = { { panic!("foo"); 0 } };
| ^^^^^^^^^^^^^^
|
+ = help: add #![feature(const_panic)] to the crate attributes to enable
= note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
error: aborting due to previous error
-For more information about this error, try `rustc --explain E0015`.
+For more information about this error, try `rustc --explain E0658`.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
macro_rules! foo {
($($p:vis)*) => {} //~ ERROR repetition matches empty token tree
// run-pass
-// This test has structs and functions that are by definiton unusable
+// This test has structs and functions that are by definition unusable
// all over the place, so just go ahead and allow dead_code
#![allow(dead_code)]
error[E0658]: The attribute `marco_use` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/issue-49074.rs:13:1
+ --> $DIR/issue-49074.rs:13:3
|
LL | #[marco_use] // typo
- | ^^^^^^^^^^^^
+ | ^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// Confirm that we don't accidently divide or mod by zero in llvm_type
+// Confirm that we don't accidentally divide or mod by zero in llvm_type
// compile-pass
#![feature(label_break_value)]
-// These are forbidden occurences of label-break-value
+// These are forbidden occurrences of label-break-value
fn labeled_unsafe() {
unsafe 'b: {} //~ ERROR expected one of `extern`, `fn`, or `{`
// except according to those terms.
// FIXME: Change to UI Test
-// Check notes are placed on an assignment that can actually precede the current assigmnent
-// Don't emmit a first assignment for assignment in a loop.
+// Check notes are placed on an assignment that can actually precede the current assignment
+// Don't emit a first assignment for assignment in a loop.
// compile-flags: -Zborrowck=compare
use other::*;
mod foo {
- // Test that this is unused even though an earler `extern crate` is used.
+ // Test that this is unused even though an earlier `extern crate` is used.
extern crate lint_unused_extern_crate2; //~ ERROR unused extern crate
}
// aux-build:lints-in-foreign-macros.rs
// compile-pass
-#![warn(unused_imports)]
+#![warn(unused_imports)] //~ missing documentation for crate [missing_docs]
+#![warn(missing_docs)]
#[macro_use]
extern crate lints_in_foreign_macros;
mod b { bar!(); }
mod c { baz!(use std::string::ToString;); } //~ WARN: unused import
mod d { baz2!(use std::string::ToString;); } //~ WARN: unused import
+baz!(pub fn undocumented() {}); //~ WARN: missing documentation for a function
+baz2!(pub fn undocumented2() {}); //~ WARN: missing documentation for a function
fn main() {}
warning: unused import: `std::string::ToString`
- --> $DIR/lints-in-foreign-macros.rs:20:16
+ --> $DIR/lints-in-foreign-macros.rs:21:16
|
LL | () => {use std::string::ToString;} //~ WARN: unused import
| ^^^^^^^^^^^^^^^^^^^^^
note: lint level defined here
--> $DIR/lints-in-foreign-macros.rs:14:9
|
-LL | #![warn(unused_imports)]
+LL | #![warn(unused_imports)] //~ missing documentation for crate [missing_docs]
| ^^^^^^^^^^^^^^
warning: unused import: `std::string::ToString`
- --> $DIR/lints-in-foreign-macros.rs:25:18
+ --> $DIR/lints-in-foreign-macros.rs:26:18
|
LL | mod c { baz!(use std::string::ToString;); } //~ WARN: unused import
| ^^^^^^^^^^^^^^^^^^^^^
warning: unused import: `std::string::ToString`
- --> $DIR/lints-in-foreign-macros.rs:26:19
+ --> $DIR/lints-in-foreign-macros.rs:27:19
|
LL | mod d { baz2!(use std::string::ToString;); } //~ WARN: unused import
| ^^^^^^^^^^^^^^^^^^^^^
+warning: missing documentation for crate
+ --> $DIR/lints-in-foreign-macros.rs:14:1
+ |
+LL | / #![warn(unused_imports)] //~ missing documentation for crate [missing_docs]
+LL | | #![warn(missing_docs)]
+LL | |
+LL | | #[macro_use]
+... |
+LL | |
+LL | | fn main() {}
+ | |____________^
+ |
+note: lint level defined here
+ --> $DIR/lints-in-foreign-macros.rs:15:9
+ |
+LL | #![warn(missing_docs)]
+ | ^^^^^^^^^^^^
+
+warning: missing documentation for a function
+ --> $DIR/lints-in-foreign-macros.rs:28:6
+ |
+LL | baz!(pub fn undocumented() {}); //~ WARN: missing documentation for a function
+ | ^^^^^^^^^^^^^^^^^^^^^
+
+warning: missing documentation for a function
+ --> $DIR/lints-in-foreign-macros.rs:29:7
+ |
+LL | baz2!(pub fn undocumented2() {}); //~ WARN: missing documentation for a function
+ | ^^^^^^^^^^^^^^^^^^^^^^
+
// compile-pass
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![allow(unused)]
#![warn(unreachable_pub)]
// compile-pass
#![feature(crate_visibility_modifier)]
-#![feature(macro_vis_matcher)]
+#![cfg_attr(stage0, feature(macro_vis_matcher))]
#![allow(unused)]
#![warn(unreachable_pub)]
| ^^^^^^^^^^^^^^
error[E0658]: The attribute `macro_reexport` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/macro-reexport-removed.rs:15:1
+ --> $DIR/macro-reexport-removed.rs:15:3
|
LL | #[macro_reexport(macro_one)] //~ ERROR attribute `macro_reexport` is currently unknown
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^^^^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
fn method_call() {
S.early(); // OK
S.early::<'static>();
- //~^ ERROR expected 2 lifetime parameters, found 1 lifetime parameter
+ //~^ ERROR wrong number of lifetime arguments: expected 2, found 1
S.early::<'static, 'static, 'static>();
- //~^ ERROR expected at most 2 lifetime parameters, found 3 lifetime parameters
+ //~^ ERROR wrong number of lifetime arguments: expected 2, found 3
let _: &u8 = S.life_and_type::<'static>();
S.life_and_type::<u8>();
S.life_and_type::<'static, u8>();
S::early(S); // OK
S::early::<'static>(S);
- //~^ ERROR expected 2 lifetime parameters, found 1 lifetime parameter
+ //~^ ERROR wrong number of lifetime arguments: expected 2, found 1
S::early::<'static, 'static, 'static>(S);
- //~^ ERROR expected at most 2 lifetime parameters, found 3 lifetime parameters
+ //~^ ERROR wrong number of lifetime arguments: expected 2, found 3
let _: &u8 = S::life_and_type::<'static>(S);
S::life_and_type::<u8>(S);
S::life_and_type::<'static, u8>(S);
-error[E0090]: too few lifetime parameters provided: expected 2 lifetime parameters, found 1 lifetime parameter
+error[E0090]: wrong number of lifetime arguments: expected 2, found 1
--> $DIR/method-call-lifetime-args-fail.rs:26:7
|
LL | S.early::<'static>();
- | ^^^^^ expected 2 lifetime parameters
+ | ^^^^^ expected 2 lifetime arguments
-error[E0088]: too many lifetime parameters provided: expected at most 2 lifetime parameters, found 3 lifetime parameters
+error[E0088]: wrong number of lifetime arguments: expected 2, found 3
--> $DIR/method-call-lifetime-args-fail.rs:28:33
|
LL | S.early::<'static, 'static, 'static>();
- | ^^^^^^^ expected 2 lifetime parameters
+ | ^^^^^^^ unexpected lifetime argument
error: cannot specify lifetime arguments explicitly if late bound lifetime parameters are present
--> $DIR/method-call-lifetime-args-fail.rs:37:15
LL | fn late_unused_early<'a, 'b>(self) -> &'b u8 { loop {} }
| ^^
-error[E0090]: too few lifetime parameters provided: expected 2 lifetime parameters, found 1 lifetime parameter
+error[E0090]: wrong number of lifetime arguments: expected 2, found 1
--> $DIR/method-call-lifetime-args-fail.rs:73:5
|
LL | S::early::<'static>(S);
- | ^^^^^^^^^^^^^^^^^^^ expected 2 lifetime parameters
+ | ^^^^^^^^^^^^^^^^^^^ expected 2 lifetime arguments
-error[E0088]: too many lifetime parameters provided: expected at most 2 lifetime parameters, found 3 lifetime parameters
+error[E0088]: wrong number of lifetime arguments: expected 2, found 3
--> $DIR/method-call-lifetime-args-fail.rs:75:34
|
LL | S::early::<'static, 'static, 'static>(S);
- | ^^^^^^^ expected 2 lifetime parameters
+ | ^^^^^^^ unexpected lifetime argument
error: aborting due to 18 previous errors
// compile-flags: -Z parse-only
-#![feature(raw_identifiers)]
-
fn test_if() {
r#if true { } //~ ERROR found `true`
}
error: expected one of `!`, `.`, `::`, `;`, `?`, `{`, `}`, or an operator, found `true`
- --> $DIR/raw-literal-keywords.rs:16:10
+ --> $DIR/raw-literal-keywords.rs:14:10
|
LL | r#if true { } //~ ERROR found `true`
| ^^^^ expected one of 8 possible tokens here
error: expected one of `!`, `.`, `::`, `;`, `?`, `{`, `}`, or an operator, found `Test`
- --> $DIR/raw-literal-keywords.rs:20:14
+ --> $DIR/raw-literal-keywords.rs:18:14
|
LL | r#struct Test; //~ ERROR found `Test`
| ^^^^ expected one of 8 possible tokens here
error: expected one of `!`, `.`, `::`, `;`, `?`, `{`, `}`, or an operator, found `Test`
- --> $DIR/raw-literal-keywords.rs:24:13
+ --> $DIR/raw-literal-keywords.rs:22:13
|
LL | r#union Test; //~ ERROR found `Test`
| ^^^^ expected one of 8 possible tokens here
// compile-flags: -Z parse-only
-#![feature(raw_identifiers)]
-
fn self_test(r#self: u32) {
//~^ ERROR `r#self` is not currently supported.
}
error: `r#self` is not currently supported.
- --> $DIR/raw-literal-self.rs:15:14
+ --> $DIR/raw-literal-self.rs:13:14
|
LL | fn self_test(r#self: u32) {
| ^^^^^^
error[E0658]: unless otherwise specified, attributes with the prefix `rustc_` are reserved for internal compiler diagnostics (see issue #29642)
- --> $DIR/reserved-attr-on-macro.rs:11:1
+ --> $DIR/reserved-attr-on-macro.rs:11:3
|
LL | #[rustc_attribute_should_be_reserved] //~ ERROR attributes with the prefix `rustc_` are reserved
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= help: add #![feature(rustc_attrs)] to the crate attributes to enable
#![feature(generic_associated_types)]
//FIXME(#44265): The lifetime shadowing and type parameter shadowing
-// should cause an error. Now it compiles (errorneously) and this will be addressed
+// should cause an error. Now it compiles (erroneously) and this will be addressed
// by a future PR. Then remove the following:
// compile-pass
#![deny(rust_2018_compatibility)]
-// Don't make a suggestion for a raw identifer replacement unless raw
+// Don't make a suggestion for a raw identifier replacement unless raw
// identifiers are enabled.
fn main() {
--> $DIR/async-ident-allowed.rs:19:9
|
LL | let async = 3; //~ ERROR: is a keyword
- | ^^^^^
+ | ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
|
note: lint level defined here
--> $DIR/async-ident-allowed.rs:13:9
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(raw_identifiers)]
#![allow(dead_code, unused_variables, non_camel_case_types, non_upper_case_globals)]
#![deny(async_idents)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(raw_identifiers)]
#![allow(dead_code, unused_variables, non_camel_case_types, non_upper_case_globals)]
#![deny(async_idents)]
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:18:4
+ --> $DIR/async-ident.rs:17:4
|
LL | fn async() {} //~ ERROR async
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
|
note: lint level defined here
- --> $DIR/async-ident.rs:13:9
+ --> $DIR/async-ident.rs:12:9
|
LL | #![deny(async_idents)]
| ^^^^^^^^^^^^
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:23:7
+ --> $DIR/async-ident.rs:22:7
|
LL | ($async:expr, async) => {};
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:23:19
+ --> $DIR/async-ident.rs:22:19
|
LL | ($async:expr, async) => {};
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:37:11
+ --> $DIR/async-ident.rs:36:11
|
LL | trait async {}
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:41:10
+ --> $DIR/async-ident.rs:40:10
|
LL | impl async for MyStruct {}
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:47:12
+ --> $DIR/async-ident.rs:46:12
|
LL | static async: u32 = 0;
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:53:11
+ --> $DIR/async-ident.rs:52:11
|
LL | const async: u32 = 0;
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:59:15
+ --> $DIR/async-ident.rs:58:15
|
LL | impl Foo { fn async() {} }
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:64:12
+ --> $DIR/async-ident.rs:63:12
|
LL | struct async {}
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:67:9
+ --> $DIR/async-ident.rs:66:9
|
LL | let async: async = async {};
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:67:16
+ --> $DIR/async-ident.rs:66:16
|
LL | let async: async = async {};
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:67:24
+ --> $DIR/async-ident.rs:66:24
|
LL | let async: async = async {};
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:78:19
+ --> $DIR/async-ident.rs:77:19
|
LL | () => (pub fn async() {})
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
= note: for more information, see issue #49716 <https://github.com/rust-lang/rust/issues/49716>
error: `async` is a keyword in the 2018 edition
- --> $DIR/async-ident.rs:85:6
+ --> $DIR/async-ident.rs:84:6
|
LL | (async) => (1)
| ^^^^^ help: you can use a raw identifier to stay compatible: `r#async`
error[E0244]: wrong number of type arguments: expected 0, found 1
- --> $DIR/seq-args.rs:14:9
+ --> $DIR/seq-args.rs:14:13
|
LL | impl<T> seq<T> for Vec<T> { //~ ERROR wrong number of type arguments
- | ^^^^^^ expected no type arguments
+ | ^ unexpected type argument
error[E0244]: wrong number of type arguments: expected 0, found 1
- --> $DIR/seq-args.rs:17:6
+ --> $DIR/seq-args.rs:17:10
|
LL | impl seq<bool> for u32 { //~ ERROR wrong number of type arguments
- | ^^^^^^^^^ expected no type arguments
+ | ^^^^ unexpected type argument
error: aborting due to 2 previous errors
// except according to those terms.
// Test that we DO NOT warn when lifetime name is used multiple
-// argments, or more than once in a single argument.
+// arguments, or more than once in a single argument.
//
// compile-pass
error[E0658]: The attribute `foo` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/issue-36530.rs:11:1
+ --> $DIR/issue-36530.rs:11:3
|
LL | #[foo] //~ ERROR is currently unknown to the compiler
- | ^^^^^^
+ | ^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
error[E0658]: The attribute `foo` is currently unknown to the compiler and may have meaning added to it in the future (see issue #29642)
- --> $DIR/issue-36530.rs:13:5
+ --> $DIR/issue-36530.rs:13:8
|
LL | #![foo] //~ ERROR is currently unknown to the compiler
- | ^^^^^^^
+ | ^^^
|
= help: add #![feature(custom_attribute)] to the crate attributes to enable
found type `{integer}`
error[E0244]: wrong number of type arguments: expected 0, found 1
- --> $DIR/structure-constructor-type-mismatch.rs:58:15
+ --> $DIR/structure-constructor-type-mismatch.rs:58:24
|
LL | let pt3 = PointF::<i32> { //~ ERROR wrong number of type arguments
- | ^^^^^^^^^^^^^ expected no type arguments
+ | ^^^ unexpected type argument
error[E0308]: mismatched types
--> $DIR/structure-constructor-type-mismatch.rs:59:12
found type `{integer}`
error[E0244]: wrong number of type arguments: expected 0, found 1
- --> $DIR/structure-constructor-type-mismatch.rs:64:9
+ --> $DIR/structure-constructor-type-mismatch.rs:64:18
|
LL | PointF::<u32> { .. } => {} //~ ERROR wrong number of type arguments
- | ^^^^^^^^^^^^^ expected no type arguments
+ | ^^^ unexpected type argument
error[E0308]: mismatched types
--> $DIR/structure-constructor-type-mismatch.rs:64:9
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(tool_attributes, custom_attribute)]
+#![feature(custom_attribute)]
type A = rustfmt; //~ ERROR expected type, found tool module `rustfmt`
type B = rustfmt::skip; //~ ERROR expected type, found tool attribute `rustfmt::skip`
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(tool_attributes)]
-
#[derive(rustfmt::skip)] //~ ERROR expected a macro, found tool attribute
struct S;
error: expected a macro, found tool attribute
- --> $DIR/tool-attributes-misplaced-2.rs:13:10
+ --> $DIR/tool-attributes-misplaced-2.rs:11:10
|
LL | #[derive(rustfmt::skip)] //~ ERROR expected a macro, found tool attribute
| ^^^^^^^^^^^^^
error: expected a macro, found tool attribute
- --> $DIR/tool-attributes-misplaced-2.rs:17:5
+ --> $DIR/tool-attributes-misplaced-2.rs:15:5
|
LL | rustfmt::skip!(); //~ ERROR expected a macro, found tool attribute
| ^^^^^^^^^^^^^
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(tool_attributes, proc_macro_path_invoc)]
-
mod rustfmt {}
#[rustfmt::skip] //~ ERROR failed to resolve. Could not find `skip` in `rustfmt`
error[E0433]: failed to resolve. Could not find `skip` in `rustfmt`
- --> $DIR/tool-attributes-shadowing.rs:15:12
+ --> $DIR/tool-attributes-shadowing.rs:13:12
|
LL | #[rustfmt::skip] //~ ERROR failed to resolve. Could not find `skip` in `rustfmt`
| ^^^^ Could not find `skip` in `rustfmt`
let _: S<'static, 'static +>;
//~^ at least one non-builtin trait is required for an object type
let _: S<'static, 'static>;
- //~^ ERROR wrong number of lifetime parameters: expected 1, found 2
+ //~^ ERROR wrong number of lifetime arguments: expected 1, found 2
//~| ERROR wrong number of type arguments: expected 1, found 0
let _: S<'static +, 'static>;
//~^ ERROR lifetime parameters must be declared prior to type parameters
LL | let _: S<'static, 'static +>;
| ^^^^^^^^^
-error[E0107]: wrong number of lifetime parameters: expected 1, found 2
- --> $DIR/trait-object-vs-lifetime.rs:23:12
+error[E0107]: wrong number of lifetime arguments: expected 1, found 2
+ --> $DIR/trait-object-vs-lifetime.rs:23:23
|
LL | let _: S<'static, 'static>;
- | ^^^^^^^^^^^^^^^^^^^ unexpected lifetime parameter
+ | ^^^^^^^ unexpected lifetime argument
error[E0243]: wrong number of type arguments: expected 1, found 0
--> $DIR/trait-object-vs-lifetime.rs:23:12
impl bar for u32 { fn dup(&self) -> u32 { *self } fn blah<X>(&self) {} }
fn main() {
- 10.dup::<i32>(); //~ ERROR expected at most 0 type parameters, found 1 type parameter
- 10.blah::<i32, i32>(); //~ ERROR expected at most 1 type parameter, found 2 type parameters
+ 10.dup::<i32>(); //~ ERROR wrong number of type arguments: expected 0, found 1
+ 10.blah::<i32, i32>(); //~ ERROR wrong number of type arguments: expected 1, found 2
(box 10 as Box<bar>).dup();
//~^ ERROR E0038
//~| ERROR E0038
-error[E0087]: too many type parameters provided: expected at most 0 type parameters, found 1 type parameter
+error[E0087]: wrong number of type arguments: expected 0, found 1
--> $DIR/trait-test-2.rs:18:14
|
-LL | 10.dup::<i32>(); //~ ERROR expected at most 0 type parameters, found 1 type parameter
- | ^^^ expected 0 type parameters
+LL | 10.dup::<i32>(); //~ ERROR wrong number of type arguments: expected 0, found 1
+ | ^^^ unexpected type argument
-error[E0087]: too many type parameters provided: expected at most 1 type parameter, found 2 type parameters
+error[E0087]: wrong number of type arguments: expected 1, found 2
--> $DIR/trait-test-2.rs:19:20
|
-LL | 10.blah::<i32, i32>(); //~ ERROR expected at most 1 type parameter, found 2 type parameters
- | ^^^ expected 1 type parameter
+LL | 10.blah::<i32, i32>(); //~ ERROR wrong number of type arguments: expected 1, found 2
+ | ^^^ unexpected type argument
error[E0277]: the trait bound `dyn bar: bar` is not satisfied
--> $DIR/trait-test-2.rs:20:26
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+#![inline(never)]
+fn do_something_with<T>(_x: T) {}
+
+// This test checks that borrows made and returned inside try blocks are properly constrained
+pub fn main() {
+ {
+ // Test that borrows returned from a try block must be valid for the lifetime of the
+ // result variable
+ let result: Result<(), &str> = try {
+ let my_string = String::from("");
+ let my_str: & str = & my_string;
+ //~^ ERROR `my_string` does not live long enough
+ Err(my_str) ?;
+ Err("") ?;
+ };
+ do_something_with(result);
+ }
+
+ {
+ // Test that borrows returned from try blocks freeze their referent
+ let mut i = 5;
+ let k = &mut i;
+ let mut j: Result<(), &mut i32> = try {
+ Err(k) ?;
+ i = 10; //~ ERROR cannot assign to `i` because it is borrowed
+ };
+ ::std::mem::drop(k); //~ ERROR use of moved value: `k`
+ i = 40; //~ ERROR cannot assign to `i` because it is borrowed
+
+ let i_ptr = if let Err(i_ptr) = j { i_ptr } else { panic ! ("") };
+ *i_ptr = 50;
+ }
+}
+
--- /dev/null
+error[E0597]: `my_string` does not live long enough
+ --> $DIR/try-block-bad-lifetime.rs:25:33
+ |
+LL | let my_str: & str = & my_string;
+ | ^^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | };
+ | - `my_string` dropped here while still borrowed
+LL | do_something_with(result);
+ | ------ borrow later used here
+
+error[E0506]: cannot assign to `i` because it is borrowed
+ --> $DIR/try-block-bad-lifetime.rs:39:13
+ |
+LL | let k = &mut i;
+ | ------ borrow of `i` occurs here
+...
+LL | i = 10; //~ ERROR cannot assign to `i` because it is borrowed
+ | ^^^^^^ assignment to borrowed `i` occurs here
+LL | };
+LL | ::std::mem::drop(k); //~ ERROR use of moved value: `k`
+ | - borrow later used here
+
+error[E0382]: use of moved value: `k`
+ --> $DIR/try-block-bad-lifetime.rs:41:26
+ |
+LL | Err(k) ?;
+ | - value moved here
+...
+LL | ::std::mem::drop(k); //~ ERROR use of moved value: `k`
+ | ^ value used here after move
+ |
+ = note: move occurs because `k` has type `&mut i32`, which does not implement the `Copy` trait
+
+error[E0506]: cannot assign to `i` because it is borrowed
+ --> $DIR/try-block-bad-lifetime.rs:42:9
+ |
+LL | let k = &mut i;
+ | ------ borrow of `i` occurs here
+...
+LL | i = 40; //~ ERROR cannot assign to `i` because it is borrowed
+ | ^^^^^^ assignment to borrowed `i` occurs here
+LL |
+LL | let i_ptr = if let Err(i_ptr) = j { i_ptr } else { panic ! ("") };
+ | - borrow later used here
+
+error: aborting due to 4 previous errors
+
+Some errors occurred: E0382, E0506, E0597.
+For more information about an error, try `rustc --explain E0382`.
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+pub fn main() {
+ let res: Result<u32, i32> = try {
+ Err("")?; //~ ERROR the trait bound `i32: std::convert::From<&str>` is not satisfied
+ 5
+ };
+
+ let res: Result<i32, i32> = try {
+ "" //~ ERROR type mismatch
+ };
+
+ let res: Result<i32, i32> = try { }; //~ ERROR type mismatch
+
+ let res: () = try { }; //~ the trait bound `(): std::ops::Try` is not satisfied
+
+ let res: i32 = try { 5 }; //~ ERROR the trait bound `i32: std::ops::Try` is not satisfied
+}
--- /dev/null
+error[E0277]: the trait bound `i32: std::convert::From<&str>` is not satisfied
+ --> $DIR/try-block-bad-type.rs:17:9
+ |
+LL | Err("")?; //~ ERROR the trait bound `i32: std::convert::From<&str>` is not satisfied
+ | ^^^^^^^^ the trait `std::convert::From<&str>` is not implemented for `i32`
+ |
+ = help: the following implementations were found:
+ <i32 as std::convert::From<bool>>
+ <i32 as std::convert::From<i16>>
+ <i32 as std::convert::From<i8>>
+ <i32 as std::convert::From<u16>>
+ <i32 as std::convert::From<u8>>
+ = note: required by `std::convert::From::from`
+
+error[E0271]: type mismatch resolving `<std::result::Result<i32, i32> as std::ops::Try>::Ok == &str`
+ --> $DIR/try-block-bad-type.rs:22:9
+ |
+LL | "" //~ ERROR type mismatch
+ | ^^ expected i32, found &str
+ |
+ = note: expected type `i32`
+ found type `&str`
+
+error[E0271]: type mismatch resolving `<std::result::Result<i32, i32> as std::ops::Try>::Ok == ()`
+ --> $DIR/try-block-bad-type.rs:25:39
+ |
+LL | let res: Result<i32, i32> = try { }; //~ ERROR type mismatch
+ | ^ expected i32, found ()
+ |
+ = note: expected type `i32`
+ found type `()`
+
+error[E0277]: the trait bound `(): std::ops::Try` is not satisfied
+ --> $DIR/try-block-bad-type.rs:27:23
+ |
+LL | let res: () = try { }; //~ the trait bound `(): std::ops::Try` is not satisfied
+ | ^^^ the trait `std::ops::Try` is not implemented for `()`
+ |
+ = note: required by `std::ops::Try::from_ok`
+
+error[E0277]: the trait bound `i32: std::ops::Try` is not satisfied
+ --> $DIR/try-block-bad-type.rs:29:24
+ |
+LL | let res: i32 = try { 5 }; //~ ERROR the trait bound `i32: std::ops::Try` is not satisfied
+ | ^^^^^ the trait `std::ops::Try` is not implemented for `i32`
+ |
+ = note: required by `std::ops::Try::from_ok`
+
+error: aborting due to 5 previous errors
+
+Some errors occurred: E0271, E0277.
+For more information about an error, try `rustc --explain E0271`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2015
+
+pub fn main() {
+ let try_result: Option<_> = try {
+ //~^ ERROR expected struct, variant or union type, found macro `try`
+ let x = 5; //~ ERROR expected identifier, found keyword
+ x
+ };
+ assert_eq!(try_result, Some(5));
+}
--- /dev/null
+error: expected identifier, found keyword `let`
+ --> $DIR/try-block-in-edition2015.rs:16:9
+ |
+LL | let try_result: Option<_> = try {
+ | --- while parsing this struct
+LL | //~^ ERROR expected struct, variant or union type, found macro `try`
+LL | let x = 5; //~ ERROR expected identifier, found keyword
+ | ^^^ expected identifier, found keyword
+
+error[E0574]: expected struct, variant or union type, found macro `try`
+ --> $DIR/try-block-in-edition2015.rs:14:33
+ |
+LL | let try_result: Option<_> = try {
+ | ^^^ did you mean `try!(...)`?
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0574`.
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+fn main() {
+ match try { false } { _ => {} } //~ ERROR expected expression, found reserved keyword `try`
+}
--- /dev/null
+error: expected expression, found reserved keyword `try`
+ --> $DIR/try-block-in-match.rs:16:11
+ |
+LL | match try { false } { _ => {} } //~ ERROR expected expression, found reserved keyword `try`
+ | ^^^ expected expression
+
+error: aborting due to previous error
+
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+fn main() {
+ while try { false } {} //~ ERROR expected expression, found reserved keyword `try`
+}
--- /dev/null
+error: expected expression, found reserved keyword `try`
+ --> $DIR/try-block-in-while.rs:16:11
+ |
+LL | while try { false } {} //~ ERROR expected expression, found reserved keyword `try`
+ | ^^^ expected expression
+
+error: aborting due to previous error
+
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+#![inline(never)]
+fn do_something_with<T>(_x: T) {}
+
+// This test checks that borrows made and returned inside try blocks are properly constrained
+pub fn main() {
+ {
+ // Test that a borrow which *might* be returned still freezes its referent
+ let mut i = 222;
+ let x: Result<&i32, ()> = try {
+ Err(())?;
+ &i
+ };
+ i = 0; //~ ERROR cannot assign to `i` because it is borrowed
+ let _ = i;
+ do_something_with(x);
+ }
+
+ {
+ let x = String::new();
+ let _y: Result<(), ()> = try {
+ Err(())?;
+ ::std::mem::drop(x);
+ };
+ println!("{}", x); //~ ERROR borrow of moved value: `x`
+ }
+
+ {
+ // Test that a borrow which *might* be assigned to an outer variable still freezes
+ // its referent
+ let mut i = 222;
+ let mut j = &-1;
+ let _x: Result<(), ()> = try {
+ Err(())?;
+ j = &i;
+ };
+ i = 0; //~ ERROR cannot assign to `i` because it is borrowed
+ let _ = i;
+ do_something_with(j);
+ }
+}
+
--- /dev/null
+error[E0506]: cannot assign to `i` because it is borrowed
+ --> $DIR/try-block-maybe-bad-lifetime.rs:27:9
+ |
+LL | &i
+ | -- borrow of `i` occurs here
+LL | };
+LL | i = 0; //~ ERROR cannot assign to `i` because it is borrowed
+ | ^^^^^ assignment to borrowed `i` occurs here
+LL | let _ = i;
+LL | do_something_with(x);
+ | - borrow later used here
+
+error[E0382]: borrow of moved value: `x`
+ --> $DIR/try-block-maybe-bad-lifetime.rs:38:24
+ |
+LL | ::std::mem::drop(x);
+ | - value moved here
+LL | };
+LL | println!("{}", x); //~ ERROR borrow of moved value: `x`
+ | ^ value borrowed here after move
+ |
+ = note: move occurs because `x` has type `std::string::String`, which does not implement the `Copy` trait
+
+error[E0506]: cannot assign to `i` because it is borrowed
+ --> $DIR/try-block-maybe-bad-lifetime.rs:50:9
+ |
+LL | j = &i;
+ | -- borrow of `i` occurs here
+LL | };
+LL | i = 0; //~ ERROR cannot assign to `i` because it is borrowed
+ | ^^^^^ assignment to borrowed `i` occurs here
+LL | let _ = i;
+LL | do_something_with(j);
+ | - borrow later used here
+
+error: aborting due to 3 previous errors
+
+Some errors occurred: E0382, E0506.
+For more information about an error, try `rustc --explain E0382`.
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+fn use_val<T: Sized>(_x: T) {}
+
+pub fn main() {
+ let cfg_res;
+ let _: Result<(), ()> = try {
+ Err(())?;
+ cfg_res = 5;
+ Ok::<(), ()>(())?;
+ use_val(cfg_res);
+ };
+ assert_eq!(cfg_res, 5); //~ ERROR borrow of possibly uninitialized variable: `cfg_res`
+}
+
--- /dev/null
+error[E0381]: borrow of possibly uninitialized variable: `cfg_res`
+ --> $DIR/try-block-opt-init.rs:25:5
+ |
+LL | assert_eq!(cfg_res, 5); //~ ERROR borrow of possibly uninitialized variable: `cfg_res`
+ | ^^^^^^^^^^^^^^^^^^^^^^^ use of possibly uninitialized `cfg_res`
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0381`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+fn foo() -> Option<()> { Some(()) }
+
+fn main() {
+ let _: Option<f32> = try {
+ foo()?;
+ 42
+ //~^ ERROR type mismatch
+ };
+
+ let _: Option<i32> = try {
+ foo()?;
+ };
+ //~^ ERROR type mismatch
+}
--- /dev/null
+error[E0271]: type mismatch resolving `<std::option::Option<f32> as std::ops::Try>::Ok == {integer}`
+ --> $DIR/try-block-type-error.rs:20:9
+ |
+LL | 42
+ | ^^
+ | |
+ | expected f32, found integral variable
+ | help: use a float literal: `42.0`
+ |
+ = note: expected type `f32`
+ found type `{integer}`
+
+error[E0271]: type mismatch resolving `<std::option::Option<i32> as std::ops::Try>::Ok == ()`
+ --> $DIR/try-block-type-error.rs:26:5
+ |
+LL | };
+ | ^ expected i32, found ()
+ |
+ = note: expected type `i32`
+ found type `()`
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0271`.
//~^ ERROR wrong number of type arguments: expected 0, found 1 [E0244]
struct MyStruct2<'a, T: Copy<'a>>;
-//~^ ERROR: wrong number of lifetime parameters: expected 0, found 1
+//~^ ERROR: wrong number of lifetime arguments: expected 0, found 1
fn foo2<'a, T:Copy<'a, U>, U>(x: T) {}
//~^ ERROR wrong number of type arguments: expected 0, found 1 [E0244]
-//~| ERROR: wrong number of lifetime parameters: expected 0, found 1
+//~| ERROR: wrong number of lifetime arguments: expected 0, found 1
fn main() {
}
error[E0244]: wrong number of type arguments: expected 0, found 1
- --> $DIR/typeck-builtin-bound-type-parameters.rs:11:11
+ --> $DIR/typeck-builtin-bound-type-parameters.rs:11:16
|
LL | fn foo1<T:Copy<U>, U>(x: T) {}
- | ^^^^^^^ expected no type arguments
+ | ^ unexpected type argument
error[E0244]: wrong number of type arguments: expected 0, found 1
- --> $DIR/typeck-builtin-bound-type-parameters.rs:14:14
+ --> $DIR/typeck-builtin-bound-type-parameters.rs:14:19
|
LL | trait Trait: Copy<Send> {}
- | ^^^^^^^^^^ expected no type arguments
+ | ^^^^ unexpected type argument
error[E0244]: wrong number of type arguments: expected 0, found 1
- --> $DIR/typeck-builtin-bound-type-parameters.rs:17:21
+ --> $DIR/typeck-builtin-bound-type-parameters.rs:17:26
|
LL | struct MyStruct1<T: Copy<T>>;
- | ^^^^^^^ expected no type arguments
+ | ^ unexpected type argument
-error[E0107]: wrong number of lifetime parameters: expected 0, found 1
- --> $DIR/typeck-builtin-bound-type-parameters.rs:20:25
+error[E0107]: wrong number of lifetime arguments: expected 0, found 1
+ --> $DIR/typeck-builtin-bound-type-parameters.rs:20:30
|
LL | struct MyStruct2<'a, T: Copy<'a>>;
- | ^^^^^^^^ unexpected lifetime parameter
+ | ^^ unexpected lifetime argument
-error[E0107]: wrong number of lifetime parameters: expected 0, found 1
- --> $DIR/typeck-builtin-bound-type-parameters.rs:24:15
+error[E0107]: wrong number of lifetime arguments: expected 0, found 1
+ --> $DIR/typeck-builtin-bound-type-parameters.rs:24:20
|
LL | fn foo2<'a, T:Copy<'a, U>, U>(x: T) {}
- | ^^^^^^^^^^^ unexpected lifetime parameter
+ | ^^ unexpected lifetime argument
error[E0244]: wrong number of type arguments: expected 0, found 1
- --> $DIR/typeck-builtin-bound-type-parameters.rs:24:15
+ --> $DIR/typeck-builtin-bound-type-parameters.rs:24:24
|
LL | fn foo2<'a, T:Copy<'a, U>, U>(x: T) {}
- | ^^^^^^^^^^^ expected no type arguments
+ | ^ unexpected type argument
error: aborting due to 6 previous errors
error[E0244]: wrong number of type arguments: expected 1, found 2
- --> $DIR/typeck_type_placeholder_lifetime_1.rs:19:12
+ --> $DIR/typeck_type_placeholder_lifetime_1.rs:19:19
|
LL | let c: Foo<_, _> = Foo { r: &5 };
- | ^^^^^^^^^ expected 1 type argument
+ | ^ unexpected type argument
error: aborting due to previous error
error[E0244]: wrong number of type arguments: expected 1, found 2
- --> $DIR/typeck_type_placeholder_lifetime_2.rs:19:12
+ --> $DIR/typeck_type_placeholder_lifetime_2.rs:19:19
|
LL | let c: Foo<_, usize> = Foo { r: &5 };
- | ^^^^^^^^^^^^^ expected 1 type argument
+ | ^^^^^ unexpected type argument
error: aborting due to previous error
fn main() {
<String as IntoCow>::into_cow("foo".to_string());
- //~^ ERROR too few type parameters provided: expected 1 type parameter
+ //~^ ERROR wrong number of type arguments: expected 1, found 0
}
-error[E0089]: too few type parameters provided: expected 1 type parameter, found 0 type parameters
+error[E0089]: wrong number of type arguments: expected 1, found 0
--> $DIR/ufcs-qpath-missing-params.rs:24:5
|
LL | <String as IntoCow>::into_cow("foo".to_string());
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected 1 type parameter
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected 1 type argument
error: aborting due to previous error
}
fn test2(x: &Foo<(isize,),Output=()>, y: &Foo(isize)) {
-//~^ ERROR wrong number of lifetime parameters: expected 1, found 0
+//~^ ERROR wrong number of lifetime arguments: expected 1, found 0
// Here, the omitted lifetimes are expanded to distinct things.
same_type(x, y)
}
-error[E0107]: wrong number of lifetime parameters: expected 1, found 0
+error[E0107]: wrong number of lifetime arguments: expected 1, found 0
--> $DIR/unboxed-closure-sugar-region.rs:40:43
|
LL | fn test2(x: &Foo<(isize,),Output=()>, y: &Foo(isize)) {
- | ^^^^^^^^^^ expected 1 lifetime parameter
+ | ^^^^^^^^^^ expected 1 lifetime argument
error: aborting due to previous error
error[E0244]: wrong number of type arguments: expected 0, found 1
- --> $DIR/unboxed-closure-sugar-wrong-number-number-type-parameters.rs:15:11
+ --> $DIR/unboxed-closure-sugar-wrong-number-number-type-parameters.rs:15:15
|
LL | fn foo(_: Zero())
- | ^^^^^^ expected no type arguments
+ | ^^ unexpected type argument
error[E0220]: associated type `Output` not found for `Zero`
--> $DIR/unboxed-closure-sugar-wrong-number-number-type-parameters.rs:15:15
error[E0244]: wrong number of type arguments: expected 0, found 1
- --> $DIR/unboxed-closure-sugar-wrong-trait.rs:15:8
+ --> $DIR/unboxed-closure-sugar-wrong-trait.rs:15:13
|
LL | fn f<F:Trait(isize) -> isize>(x: F) {}
- | ^^^^^^^^^^^^^^^^^^^^^ expected no type arguments
+ | ^^^^^^^ unexpected type argument
error[E0220]: associated type `Output` not found for `Trait`
--> $DIR/unboxed-closure-sugar-wrong-trait.rs:15:24
-error[E0277]: the trait bound `S: std::ops::Fn<(isize,)>` is not satisfied
+error[E0277]: expected a `std::ops::Fn<(isize,)>` closure, found `S`
--> $DIR/unboxed-closures-fnmut-as-fn.rs:38:13
|
LL | let x = call_it(&S, 22);
- | ^^^^^^^ the trait `std::ops::Fn<(isize,)>` is not implemented for `S`
+ | ^^^^^^^ expected an `Fn<(isize,)>` closure, found `S`
|
+ = help: the trait `std::ops::Fn<(isize,)>` is not implemented for `S`
note: required by `call_it`
--> $DIR/unboxed-closures-fnmut-as-fn.rs:33:1
|
-error[E0277]: the trait bound `for<'r> for<'s> unsafe fn(&'s isize) -> isize {square}: std::ops::Fn<(&'r isize,)>` is not satisfied
+error[E0277]: expected a `std::ops::Fn<(&isize,)>` closure, found `for<'r> unsafe fn(&'r isize) -> isize {square}`
--> $DIR/unboxed-closures-unsafe-extern-fn.rs:22:13
|
LL | let x = call_it(&square, 22);
- | ^^^^^^^ the trait `for<'r> std::ops::Fn<(&'r isize,)>` is not implemented for `for<'r> unsafe fn(&'r isize) -> isize {square}`
+ | ^^^^^^^ expected an `Fn<(&isize,)>` closure, found `for<'r> unsafe fn(&'r isize) -> isize {square}`
|
+ = help: the trait `for<'r> std::ops::Fn<(&'r isize,)>` is not implemented for `for<'r> unsafe fn(&'r isize) -> isize {square}`
note: required by `call_it`
--> $DIR/unboxed-closures-unsafe-extern-fn.rs:17:1
|
LL | fn call_it<F:Fn(&isize)->isize>(_: &F, _: isize) -> isize { 0 }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error[E0277]: the trait bound `for<'r> for<'s> unsafe fn(&'s isize) -> isize {square}: std::ops::FnMut<(&'r isize,)>` is not satisfied
+error[E0277]: expected a `std::ops::FnMut<(&isize,)>` closure, found `for<'r> unsafe fn(&'r isize) -> isize {square}`
--> $DIR/unboxed-closures-unsafe-extern-fn.rs:27:13
|
LL | let y = call_it_mut(&mut square, 22);
- | ^^^^^^^^^^^ the trait `for<'r> std::ops::FnMut<(&'r isize,)>` is not implemented for `for<'r> unsafe fn(&'r isize) -> isize {square}`
+ | ^^^^^^^^^^^ expected an `FnMut<(&isize,)>` closure, found `for<'r> unsafe fn(&'r isize) -> isize {square}`
|
+ = help: the trait `for<'r> std::ops::FnMut<(&'r isize,)>` is not implemented for `for<'r> unsafe fn(&'r isize) -> isize {square}`
note: required by `call_it_mut`
--> $DIR/unboxed-closures-unsafe-extern-fn.rs:18:1
|
LL | fn call_it_mut<F:FnMut(&isize)->isize>(_: &mut F, _: isize) -> isize { 0 }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error[E0277]: the trait bound `for<'r> for<'s> unsafe fn(&'s isize) -> isize {square}: std::ops::FnOnce<(&'r isize,)>` is not satisfied
+error[E0277]: expected a `std::ops::FnOnce<(&isize,)>` closure, found `for<'r> unsafe fn(&'r isize) -> isize {square}`
--> $DIR/unboxed-closures-unsafe-extern-fn.rs:32:13
|
LL | let z = call_it_once(square, 22);
- | ^^^^^^^^^^^^ the trait `for<'r> std::ops::FnOnce<(&'r isize,)>` is not implemented for `for<'r> unsafe fn(&'r isize) -> isize {square}`
+ | ^^^^^^^^^^^^ expected an `FnOnce<(&isize,)>` closure, found `for<'r> unsafe fn(&'r isize) -> isize {square}`
|
+ = help: the trait `for<'r> std::ops::FnOnce<(&'r isize,)>` is not implemented for `for<'r> unsafe fn(&'r isize) -> isize {square}`
note: required by `call_it_once`
--> $DIR/unboxed-closures-unsafe-extern-fn.rs:19:1
|
-error[E0277]: the trait bound `for<'r> for<'s> extern "C" fn(&'s isize) -> isize {square}: std::ops::Fn<(&'r isize,)>` is not satisfied
+error[E0277]: expected a `std::ops::Fn<(&isize,)>` closure, found `for<'r> extern "C" fn(&'r isize) -> isize {square}`
--> $DIR/unboxed-closures-wrong-abi.rs:22:13
|
LL | let x = call_it(&square, 22);
- | ^^^^^^^ the trait `for<'r> std::ops::Fn<(&'r isize,)>` is not implemented for `for<'r> extern "C" fn(&'r isize) -> isize {square}`
+ | ^^^^^^^ expected an `Fn<(&isize,)>` closure, found `for<'r> extern "C" fn(&'r isize) -> isize {square}`
|
+ = help: the trait `for<'r> std::ops::Fn<(&'r isize,)>` is not implemented for `for<'r> extern "C" fn(&'r isize) -> isize {square}`
note: required by `call_it`
--> $DIR/unboxed-closures-wrong-abi.rs:17:1
|
LL | fn call_it<F:Fn(&isize)->isize>(_: &F, _: isize) -> isize { 0 }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error[E0277]: the trait bound `for<'r> for<'s> extern "C" fn(&'s isize) -> isize {square}: std::ops::FnMut<(&'r isize,)>` is not satisfied
+error[E0277]: expected a `std::ops::FnMut<(&isize,)>` closure, found `for<'r> extern "C" fn(&'r isize) -> isize {square}`
--> $DIR/unboxed-closures-wrong-abi.rs:27:13
|
LL | let y = call_it_mut(&mut square, 22);
- | ^^^^^^^^^^^ the trait `for<'r> std::ops::FnMut<(&'r isize,)>` is not implemented for `for<'r> extern "C" fn(&'r isize) -> isize {square}`
+ | ^^^^^^^^^^^ expected an `FnMut<(&isize,)>` closure, found `for<'r> extern "C" fn(&'r isize) -> isize {square}`
|
+ = help: the trait `for<'r> std::ops::FnMut<(&'r isize,)>` is not implemented for `for<'r> extern "C" fn(&'r isize) -> isize {square}`
note: required by `call_it_mut`
--> $DIR/unboxed-closures-wrong-abi.rs:18:1
|
LL | fn call_it_mut<F:FnMut(&isize)->isize>(_: &mut F, _: isize) -> isize { 0 }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error[E0277]: the trait bound `for<'r> for<'s> extern "C" fn(&'s isize) -> isize {square}: std::ops::FnOnce<(&'r isize,)>` is not satisfied
+error[E0277]: expected a `std::ops::FnOnce<(&isize,)>` closure, found `for<'r> extern "C" fn(&'r isize) -> isize {square}`
--> $DIR/unboxed-closures-wrong-abi.rs:32:13
|
LL | let z = call_it_once(square, 22);
- | ^^^^^^^^^^^^ the trait `for<'r> std::ops::FnOnce<(&'r isize,)>` is not implemented for `for<'r> extern "C" fn(&'r isize) -> isize {square}`
+ | ^^^^^^^^^^^^ expected an `FnOnce<(&isize,)>` closure, found `for<'r> extern "C" fn(&'r isize) -> isize {square}`
|
+ = help: the trait `for<'r> std::ops::FnOnce<(&'r isize,)>` is not implemented for `for<'r> extern "C" fn(&'r isize) -> isize {square}`
note: required by `call_it_once`
--> $DIR/unboxed-closures-wrong-abi.rs:19:1
|
-error[E0277]: the trait bound `for<'r> unsafe fn(isize) -> isize {square}: std::ops::Fn<(&'r isize,)>` is not satisfied
+error[E0277]: expected a `std::ops::Fn<(&isize,)>` closure, found `unsafe fn(isize) -> isize {square}`
--> $DIR/unboxed-closures-wrong-arg-type-extern-fn.rs:23:13
|
LL | let x = call_it(&square, 22);
- | ^^^^^^^ the trait `for<'r> std::ops::Fn<(&'r isize,)>` is not implemented for `unsafe fn(isize) -> isize {square}`
+ | ^^^^^^^ expected an `Fn<(&isize,)>` closure, found `unsafe fn(isize) -> isize {square}`
|
+ = help: the trait `for<'r> std::ops::Fn<(&'r isize,)>` is not implemented for `unsafe fn(isize) -> isize {square}`
note: required by `call_it`
--> $DIR/unboxed-closures-wrong-arg-type-extern-fn.rs:18:1
|
LL | fn call_it<F:Fn(&isize)->isize>(_: &F, _: isize) -> isize { 0 }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error[E0277]: the trait bound `for<'r> unsafe fn(isize) -> isize {square}: std::ops::FnMut<(&'r isize,)>` is not satisfied
+error[E0277]: expected a `std::ops::FnMut<(&isize,)>` closure, found `unsafe fn(isize) -> isize {square}`
--> $DIR/unboxed-closures-wrong-arg-type-extern-fn.rs:28:13
|
LL | let y = call_it_mut(&mut square, 22);
- | ^^^^^^^^^^^ the trait `for<'r> std::ops::FnMut<(&'r isize,)>` is not implemented for `unsafe fn(isize) -> isize {square}`
+ | ^^^^^^^^^^^ expected an `FnMut<(&isize,)>` closure, found `unsafe fn(isize) -> isize {square}`
|
+ = help: the trait `for<'r> std::ops::FnMut<(&'r isize,)>` is not implemented for `unsafe fn(isize) -> isize {square}`
note: required by `call_it_mut`
--> $DIR/unboxed-closures-wrong-arg-type-extern-fn.rs:19:1
|
LL | fn call_it_mut<F:FnMut(&isize)->isize>(_: &mut F, _: isize) -> isize { 0 }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error[E0277]: the trait bound `for<'r> unsafe fn(isize) -> isize {square}: std::ops::FnOnce<(&'r isize,)>` is not satisfied
+error[E0277]: expected a `std::ops::FnOnce<(&isize,)>` closure, found `unsafe fn(isize) -> isize {square}`
--> $DIR/unboxed-closures-wrong-arg-type-extern-fn.rs:33:13
|
LL | let z = call_it_once(square, 22);
- | ^^^^^^^^^^^^ the trait `for<'r> std::ops::FnOnce<(&'r isize,)>` is not implemented for `unsafe fn(isize) -> isize {square}`
+ | ^^^^^^^^^^^^ expected an `FnOnce<(&isize,)>` closure, found `unsafe fn(isize) -> isize {square}`
|
+ = help: the trait `for<'r> std::ops::FnOnce<(&'r isize,)>` is not implemented for `unsafe fn(isize) -> isize {square}`
note: required by `call_it_once`
--> $DIR/unboxed-closures-wrong-arg-type-extern-fn.rs:20:1
|
// normalize-stderr-test "allocation \d+" -> "allocation N"
// normalize-stderr-test "size \d+" -> "size N"
+union BoolTransmute {
+ val: u8,
+ bl: bool,
+}
+
#[repr(C)]
#[derive(Copy, Clone)]
struct SliceRepr {
bad: BadSliceRepr,
slice: &'static [u8],
str: &'static str,
+ my_str: &'static Str,
}
#[repr(C)]
}
trait Trait {}
+impl Trait for bool {}
+
+struct Str(str);
// OK
const A: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.str};
-// should lint
+// bad str
const B: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.str};
-// bad
+//~^ ERROR this constant likely exhibits undefined behavior
+// bad str
const C: &str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.str};
//~^ ERROR this constant likely exhibits undefined behavior
+// bad str in Str
+const C2: &Str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str};
+//~^ ERROR this constant likely exhibits undefined behavior
// OK
const A2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.slice};
-// should lint
+// bad slice
const B2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.slice};
-// bad
-const C2: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice};
+//~^ ERROR this constant likely exhibits undefined behavior
+// bad slice
+const C3: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice};
//~^ ERROR this constant likely exhibits undefined behavior
-// bad
+// bad trait object
const D: &Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: &3 } }.rust};
//~^ ERROR this constant likely exhibits undefined behavior
-// bad
+// bad trait object
const E: &Trait = unsafe { DynTransmute { repr2: DynRepr2 { ptr: &92, vtable: &3 } }.rust};
//~^ ERROR this constant likely exhibits undefined behavior
-// bad
+// bad trait object
const F: &Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 } }.rust};
//~^ ERROR this constant likely exhibits undefined behavior
+// bad data *inside* the trait object
+const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl };
+//~^ ERROR this constant likely exhibits undefined behavior
+
+// bad data *inside* the slice
+const H: &[bool] = &[unsafe { BoolTransmute { val: 3 }.bl }];
+//~^ ERROR this constant likely exhibits undefined behavior
+
fn main() {
}
error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/union-ub-fat-ptr.rs:72:1
+ --> $DIR/union-ub-fat-ptr.rs:79:1
+ |
+LL | const B: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.str};
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access at offset N, outside bounds of allocation N which has size N
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/union-ub-fat-ptr.rs:82:1
|
LL | const C: &str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.str};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered length is not a valid integer
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/union-ub-fat-ptr.rs:80:1
+ --> $DIR/union-ub-fat-ptr.rs:85:1
|
-LL | const C2: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered length is not a valid integer
+LL | const C2: &Str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str};
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/union-ub-fat-ptr.rs:84:1
+ --> $DIR/union-ub-fat-ptr.rs:91:1
+ |
+LL | const B2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.slice};
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access at offset N, outside bounds of allocation N which has size N
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/union-ub-fat-ptr.rs:94:1
+ |
+LL | const C3: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice};
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/union-ub-fat-ptr.rs:98:1
|
LL | const D: &Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: &3 } }.rust};
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ tried to access memory with alignment N, but alignment N is required
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/union-ub-fat-ptr.rs:87:1
+ --> $DIR/union-ub-fat-ptr.rs:101:1
|
LL | const E: &Trait = unsafe { DynTransmute { repr2: DynRepr2 { ptr: &92, vtable: &3 } }.rust};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access at offset N, outside bounds of allocation N which has size N
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ a memory access tried to interpret some bytes as a pointer
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/union-ub-fat-ptr.rs:90:1
+ --> $DIR/union-ub-fat-ptr.rs:104:1
|
LL | const F: &Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 } }.rust};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered vtable address is not a pointer
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer vtable is not a valid pointer
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/union-ub-fat-ptr.rs:108:1
+ |
+LL | const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl };
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 3 at .<deref>, but expected something in the range 0..=1
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/union-ub-fat-ptr.rs:112:1
+ |
+LL | const H: &[bool] = &[unsafe { BoolTransmute { val: 3 }.bl }];
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 3 at .<deref>[0], but expected something in the range 0..=1
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
-error: aborting due to 5 previous errors
+error: aborting due to 10 previous errors
For more information about this error, try `rustc --explain E0080`.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(proc_macro_path_invoc)]
-
#[foo::bar] //~ ERROR failed to resolve. Use of undeclared type or module `foo`
fn main() {}
error[E0433]: failed to resolve. Use of undeclared type or module `foo`
- --> $DIR/unknown-tool-name.rs:13:3
+ --> $DIR/unknown-tool-name.rs:11:3
|
LL | #[foo::bar] //~ ERROR failed to resolve. Use of undeclared type or module `foo`
| ^^^ Use of undeclared type or module `foo`
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ignore-arm stdcall isn't suppported
+// ignore-arm stdcall isn't supported
fn baz(f: extern "stdcall" fn(usize, ...)) {
//~^ ERROR: variadic function must have C or cdecl calling convention
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ignore-arm stdcall isn't suppported
-// ignore-aarch64 stdcall isn't suppported
+// ignore-arm stdcall isn't supported
+// ignore-aarch64 stdcall isn't supported
extern "stdcall" {
fn printf(_: *const u8, ...); //~ ERROR: variadic function must have C or cdecl calling
// Test that we can quantify lifetimes outside a constraint (i.e., including
// the self type) in a where clause. Specifically, test that implementing for a
-// specific lifetime is not enough to satisify the `for<'a> ...` constraint, which
+// specific lifetime is not enough to satisfy the `for<'a> ...` constraint, which
// should require *all* lifetimes.
static X: &'static u32 = &42;
self.clippy_version = self.version("clippy", "x86_64-unknown-linux-gnu");
self.rustfmt_version = self.version("rustfmt", "x86_64-unknown-linux-gnu");
self.llvm_tools_version = self.version("llvm-tools", "x86_64-unknown-linux-gnu");
- self.lldb_version = self.version("lldb", "x86_64-unknown-linux-gnu");
+ // lldb is only built for macOS.
+ self.lldb_version = self.version("lldb", "x86_64-apple-darwin");
self.rust_git_commit_hash = self.git_commit_hash("rust", "x86_64-unknown-linux-gnu");
self.cargo_git_commit_hash = self.git_commit_hash("cargo", "x86_64-unknown-linux-gnu");
-Subproject commit f05a1038b59cd4217e58b3aef7a0751a0efd01e4
+Subproject commit dda656652e2e1a8d615a712d7f7482c25fa0a9c2
let mut strs: Vec<String> = nv.splitn(2, '=').map(str::to_owned).collect();
match strs.len() {
- 1 => (strs.pop().unwrap(), "".to_owned()),
+ 1 => (strs.pop().unwrap(), String::new()),
2 => {
let end = strs.pop().unwrap();
(strs.pop().unwrap(), end)
normalized = normalized.replace("\\n", "\n");
}
+ // If there are `$SRC_DIR` normalizations with line and column numbers, then replace them
+ // with placeholders as we do not want tests needing updated when compiler source code
+ // changes.
+ // eg. $SRC_DIR/libcore/mem.rs:323:14 becomes $SRC_DIR/libcore/mem.rs:LL:COL
+ normalized = Regex::new("SRC_DIR(.+):\\d+:\\d+").unwrap()
+ .replace_all(&normalized, "SRC_DIR$1:LL:COL").into_owned();
+
normalized = normalized.replace("\\\\", "\\") // denormalize for paths on windows
.replace("\\", "/") // normalize for paths on windows
.replace("\r\n", "\n") // normalize for linebreaks on windows
-Subproject commit f76ea3ca16ed22dde8ef929db74a4b4df6f2f899
+Subproject commit 813b3b952c07b6b85732c3fbdf3eb74f61a9fa96
// add it to the set of known library features so we can still generate docs.
lib_features.insert("compiler_builtins_lib".to_owned(), Feature {
level: Status::Unstable,
- since: "".to_owned(),
+ since: String::new(),
has_gate_test: false,
tracking_issue: None,
});
//! * No trailing whitespace
//! * No CR characters
//! * No `TODO` or `XXX` directives
-//! * A valid license header is at the top
//! * No unexplained ` ```ignore ` or ` ```rust,ignore ` doc tests
//!
//! A number of these checks can be opted-out of with various directives like
use std::path::Path;
const COLS: usize = 100;
-const LICENSE: &'static str = "\
-Copyright <year> The Rust Project Developers. See the COPYRIGHT
-file at the top-level directory of this distribution and at
-http://rust-lang.org/COPYRIGHT.
-
-Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-<LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-option. This file may not be copied, modified, or distributed
-except according to those terms.";
const UNEXPLAINED_IGNORE_DOCTEST_INFO: &str = r#"unexplained "```ignore" doctest; try one:
trailing_new_lines = 0;
}
}
- if !licenseck(file, &contents) {
- tidy_error!(bad, "{}: incorrect license", file.display());
- }
match trailing_new_lines {
0 => tidy_error!(bad, "{}: missing trailing newline", file.display()),
1 | 2 => {}
};
})
}
-
-fn licenseck(file: &Path, contents: &str) -> bool {
- if contents.contains("ignore-license") {
- return true
- }
- let exceptions = [
- "libstd/sync/mpsc/mpsc_queue.rs",
- "libstd/sync/mpsc/spsc_queue.rs",
- ];
- if exceptions.iter().any(|f| file.ends_with(f)) {
- return true
- }
-
- // Skip the BOM if it's there
- let bom = "\u{feff}";
- let contents = if contents.starts_with(bom) {&contents[3..]} else {contents};
-
- // See if the license shows up in the first 100 lines
- let lines = contents.lines().take(100).collect::<Vec<_>>();
- lines.windows(LICENSE.lines().count()).any(|window| {
- let offset = if window.iter().all(|w| w.starts_with("//")) {
- 2
- } else if window.iter().all(|w| w.starts_with('#')) {
- 1
- } else if window.iter().all(|w| w.starts_with(" *")) {
- 2
- } else {
- return false
- };
- window.iter().map(|a| a[offset..].trim())
- .zip(LICENSE.lines()).all(|(a, b)| {
- a == b || match b.find("<year>") {
- Some(i) => a.starts_with(&b[..i]) && a.ends_with(&b[i+6..]),
- None => false,
- }
- })
- })
-
-}