"crossbeam-utils 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
dependencies = [
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
"serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)",
]
+[[package]]
+name = "rls-data"
+version = "0.18.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
[[package]]
name = "rls-rustc"
version = "0.5.0"
"rustc_target 0.0.0",
"scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serialize 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax 0.0.0",
"syntax_pos 0.0.0",
"tempfile 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_data_structures 0.0.0",
"rustc_errors 0.0.0",
"rustc_target 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
"rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_cratesio_shim 0.0.0",
"serialize 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"stable_deref_trait 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
"rustc_errors 0.0.0",
"rustc_target 0.0.0",
"serialize 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
version = "0.0.0"
dependencies = [
"log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "rls-data 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rls-data 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc_data_structures 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
[[package]]
name = "serialize"
version = "0.0.0"
+dependencies = [
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
[[package]]
name = "shell-escape"
[[package]]
name = "smallvec"
-version = "0.6.3"
+version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_target 0.0.0",
"scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serialize 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax_pos 0.0.0",
]
"rustc_data_structures 0.0.0",
"rustc_errors 0.0.0",
"rustc_target 0.0.0",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
"checksum rls-analysis 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "96f84d303dcbe1c1bdd41b10867d3399c38fbdac32c4e3645cdb6dbd7f82db1d"
"checksum rls-blacklist 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e4a9cc2545ccb7e05b355bfe047b8039a6ec12270d5f3c996b766b340a50f7d2"
"checksum rls-data 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3dd20763e1c60ae8945384c8a8fa4ac44f8afa7b0a817511f5e8927e5d24f988"
+"checksum rls-data 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4f81e838ecff6830ed33c2907fd236f38d441c206e983a2aa29fbce99295fab9"
"checksum rls-rustc 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9dba7390427aefa953608429701e3665192ca810ba8ae09301e001b7c7bed0"
"checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a"
"checksum rls-vfs 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ecbc8541b4c341d6271eae10f869dd9d36db871afe184f5b6f9bffbd6ed0373f"
"checksum shell-escape 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "170a13e64f2a51b77a45702ba77287f5c6829375b04a69cf2222acd17d0cfab9"
"checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2"
"checksum siphasher 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0df90a788073e8d0235a67e50441d47db7c8ad9debd91cbf43736a2a92d36537"
-"checksum smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "26df3bb03ca5eac2e64192b723d51f56c1b1e0860e7c766281f4598f181acdc8"
+"checksum smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "153ffa32fd170e9944f7e0838edf824a754ec4c1fc64746fcc9fe1f8fa602e5d"
"checksum socket2 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "962a516af4d3a7c272cb3a1d50a8cc4e5b41802e4ad54cfb7bee8ba61d37d703"
"checksum stable_deref_trait 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ffbc596e092fe5f598b12ef46cc03754085ac2f4d8c739ad61c4ae266cc3b3fa"
"checksum string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "25d70109977172b127fe834e5449e5ab1740b9ba49fa18a2020f509174f25423"
+++ /dev/null
-# `catch_expr`
-
-The tracking issue for this feature is: [#31436]
-
-[#31436]: https://github.com/rust-lang/rust/issues/31436
-
-------------------------
-
-The `catch_expr` feature adds support for a `catch` expression. The `catch`
-expression creates a new scope one can use the `?` operator in.
-
-```rust
-#![feature(catch_expr)]
-
-use std::num::ParseIntError;
-
-let result: Result<i32, ParseIntError> = do catch {
- "1".parse::<i32>()?
- + "2".parse::<i32>()?
- + "3".parse::<i32>()?
-};
-assert_eq!(result, Ok(6));
-
-let result: Result<i32, ParseIntError> = do catch {
- "1".parse::<i32>()?
- + "foo".parse::<i32>()?
- + "3".parse::<i32>()?
-};
-assert!(result.is_err());
-```
+++ /dev/null
-# `tool_attributes`
-
-The tracking issue for this feature is: [#44690]
-
-[#44690]: https://github.com/rust-lang/rust/issues/44690
-
-------------------------
-
-Tool attributes let you use scoped attributes to control the behavior
-of certain tools.
-
-Currently tool names which can be appear in scoped attributes are restricted to
-`clippy` and `rustfmt`.
-
-## An example
-
-```rust
-#![feature(tool_attributes)]
-
-#[rustfmt::skip]
-fn foo() { println!("hello, world"); }
-
-fn main() {
- foo();
-}
-```
--- /dev/null
+# `try_blocks`
+
+The tracking issue for this feature is: [#31436]
+
+[#31436]: https://github.com/rust-lang/rust/issues/31436
+
+------------------------
+
+The `try_blocks` feature adds support for `try` blocks. A `try`
+block creates a new scope one can use the `?` operator in.
+
+```rust,ignore
+// This code needs the 2018 edition
+
+#![feature(try_blocks)]
+
+use std::num::ParseIntError;
+
+let result: Result<i32, ParseIntError> = try {
+ "1".parse::<i32>()?
+ + "2".parse::<i32>()?
+ + "3".parse::<i32>()?
+};
+assert_eq!(result, Ok(6));
+
+let result: Result<i32, ParseIntError> = try {
+ "1".parse::<i32>()?
+ + "foo".parse::<i32>()?
+ + "3".parse::<i32>()?
+};
+assert!(result.is_err());
+```
len);
}
- /// Returns a pair of slices which contain the contents of the buffer not used by the VecDeque.
- #[inline]
- unsafe fn unused_as_mut_slices<'a>(&'a mut self) -> (&'a mut [T], &'a mut [T]) {
- let head = self.head;
- let tail = self.tail;
- let buf = self.buffer_as_mut_slice();
- if head != tail {
- // In buf, head..tail contains the VecDeque and tail..head is unused.
- // So calling `ring_slices` with tail and head swapped returns unused slices.
- RingSlices::ring_slices(buf, tail, head)
- } else {
- // Swapping doesn't help when head == tail.
- let (before, after) = buf.split_at_mut(head);
- (after, before)
- }
- }
-
/// Copies a potentially wrapping block of memory len long from src to dest.
/// (abs(dst - src) + len) must be no larger than cap() (There must be at
/// most one continuous overlapping region between src and dest).
#[inline]
#[stable(feature = "append", since = "1.4.0")]
pub fn append(&mut self, other: &mut Self) {
- // Copies all values from `src_slice` to the start of `dst_slice`.
- unsafe fn copy_whole_slice<T>(src_slice: &[T], dst_slice: &mut [T]) {
- let len = src_slice.len();
- ptr::copy_nonoverlapping(src_slice.as_ptr(), dst_slice[..len].as_mut_ptr(), len);
- }
-
- let src_total = other.len();
-
- // Guarantees there is space in `self` for `other`.
- self.reserve(src_total);
-
- self.head = {
- let original_head = self.head;
-
- // The goal is to copy all values from `other` into `self`. To avoid any
- // mismatch, all valid values in `other` are retrieved...
- let (src_high, src_low) = other.as_slices();
- // and unoccupied parts of self are retrieved.
- let (dst_high, dst_low) = unsafe { self.unused_as_mut_slices() };
-
- // Then all that is needed is to copy all values from
- // src (src_high and src_low) to dst (dst_high and dst_low).
- //
- // other [o o o . . . . . o o o o]
- // [5 6 7] [1 2 3 4]
- // src_low src_high
- //
- // self [. . . . . . o o o o . .]
- // [3 4 5 6 7 .] [1 2]
- // dst_low dst_high
- //
- // Values are not copied one by one but as slices in `copy_whole_slice`.
- // What slices are used depends on various properties of src and dst.
- // There are 6 cases in total:
- // 1. `src` is contiguous and fits in dst_high
- // 2. `src` is contiguous and does not fit in dst_high
- // 3. `src` is discontiguous and fits in dst_high
- // 4. `src` is discontiguous and does not fit in dst_high
- // + src_high is smaller than dst_high
- // 5. `src` is discontiguous and does not fit in dst_high
- // + dst_high is smaller than src_high
- // 6. `src` is discontiguous and does not fit in dst_high
- // + dst_high is the same size as src_high
- let src_contiguous = src_low.is_empty();
- let dst_high_fits_src = dst_high.len() >= src_total;
- match (src_contiguous, dst_high_fits_src) {
- (true, true) => {
- // 1.
- // other [. . . o o o . . . . . .]
- // [] [1 1 1]
- //
- // self [. o o o o o . . . . . .]
- // [.] [1 1 1 . . .]
-
- unsafe {
- copy_whole_slice(src_high, dst_high);
- }
- original_head + src_total
- }
- (true, false) => {
- // 2.
- // other [. . . o o o o o . . . .]
- // [] [1 1 2 2 2]
- //
- // self [. . . . . . . o o o . .]
- // [2 2 2 . . . .] [1 1]
-
- let (src_1, src_2) = src_high.split_at(dst_high.len());
- unsafe {
- copy_whole_slice(src_1, dst_high);
- copy_whole_slice(src_2, dst_low);
- }
- src_total - dst_high.len()
- }
- (false, true) => {
- // 3.
- // other [o o . . . . . . . o o o]
- // [2 2] [1 1 1]
- //
- // self [. o o . . . . . . . . .]
- // [.] [1 1 1 2 2 . . . .]
-
- let (dst_1, dst_2) = dst_high.split_at_mut(src_high.len());
- unsafe {
- copy_whole_slice(src_high, dst_1);
- copy_whole_slice(src_low, dst_2);
- }
- original_head + src_total
- }
- (false, false) => {
- if src_high.len() < dst_high.len() {
- // 4.
- // other [o o o . . . . . . o o o]
- // [2 3 3] [1 1 1]
- //
- // self [. . . . . . o o . . . .]
- // [3 3 . . . .] [1 1 1 2]
-
- let (dst_1, dst_2) = dst_high.split_at_mut(src_high.len());
- let (src_2, src_3) = src_low.split_at(dst_2.len());
- unsafe {
- copy_whole_slice(src_high, dst_1);
- copy_whole_slice(src_2, dst_2);
- copy_whole_slice(src_3, dst_low);
- }
- src_3.len()
- } else if src_high.len() > dst_high.len() {
- // 5.
- // other [o o o . . . . . o o o o]
- // [3 3 3] [1 1 2 2]
- //
- // self [. . . . . . o o o o . .]
- // [2 2 3 3 3 .] [1 1]
-
- let (src_1, src_2) = src_high.split_at(dst_high.len());
- let (dst_2, dst_3) = dst_low.split_at_mut(src_2.len());
- unsafe {
- copy_whole_slice(src_1, dst_high);
- copy_whole_slice(src_2, dst_2);
- copy_whole_slice(src_low, dst_3);
- }
- dst_2.len() + src_low.len()
- } else {
- // 6.
- // other [o o . . . . . . . o o o]
- // [2 2] [1 1 1]
- //
- // self [. . . . . . . o o . . .]
- // [2 2 . . . . .] [1 1 1]
-
- unsafe {
- copy_whole_slice(src_high, dst_high);
- copy_whole_slice(src_low, dst_low);
- }
- src_low.len()
- }
- }
- }
- };
-
- // Some values now exist in both `other` and `self` but are made inaccessible in `other`.
- other.tail = other.head;
+ // naive impl
+ self.extend(other.drain(..));
}
/// Retains only the elements specified by the predicate.
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
f.debug_tuple("Iter")
- .field(&self.ring)
- .field(&self.tail)
- .field(&self.head)
- .finish()
+ .field(&front)
+ .field(&back)
+ .finish()
}
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for IterMut<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let (front, back) = RingSlices::ring_slices(&*self.ring, self.head, self.tail);
f.debug_tuple("IterMut")
- .field(&self.ring)
- .field(&self.tail)
- .field(&self.head)
- .finish()
+ .field(&front)
+ .field(&back)
+ .finish()
}
}
}
}
+ #[test]
+ fn issue_53529() {
+ use boxed::Box;
+
+ let mut dst = VecDeque::new();
+ dst.push_front(Box::new(1));
+ dst.push_front(Box::new(2));
+ assert_eq!(*dst.pop_back().unwrap(), 1);
+
+ let mut src = VecDeque::new();
+ src.push_front(Box::new(2));
+ dst.append(&mut src);
+ for a in dst {
+ assert_eq!(*a, 2);
+ }
+ }
+
}
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::transmute;
-/// Leaks a value: takes ownership and "forgets" about the value **without running
-/// its destructor**.
+/// Takes ownership and "forgets" about the value **without running its destructor**.
///
/// Any resources the value manages, such as heap memory or a file handle, will linger
-/// forever in an unreachable state.
+/// forever in an unreachable state. However, it does not guarantee that pointers
+/// to this memory will remain valid.
///
-/// If you want to dispose of a value properly, running its destructor, see
+/// * If you want to leak memory, see [`Box::leak`][leak].
+/// * If you want to obtain a raw pointer to the memory, see [`Box::into_raw`][into_raw].
+/// * If you want to dispose of a value properly, running its destructor, see
/// [`mem::drop`][drop].
///
/// # Safety
///
/// # Examples
///
-/// Leak some heap memory by never deallocating it:
-///
-/// ```
-/// use std::mem;
-///
-/// let heap_memory = Box::new(3);
-/// mem::forget(heap_memory);
-/// ```
-///
/// Leak an I/O object, never closing the file:
///
/// ```no_run
/// }
/// ```
///
-/// ## Use case 3
-///
-/// You are transferring ownership across a [FFI] boundary to code written in
-/// another language. You need to `forget` the value on the Rust side because Rust
-/// code is no longer responsible for it.
-///
-/// ```no_run
-/// use std::mem;
-///
-/// extern "C" {
-/// fn my_c_function(x: *const u32);
-/// }
-///
-/// let x: Box<u32> = Box::new(3);
-///
-/// // Transfer ownership into C code.
-/// unsafe {
-/// my_c_function(&*x);
-/// }
-/// mem::forget(x);
-/// ```
-///
-/// In this case, C code must call back into Rust to free the object. Calling C's `free`
-/// function on a [`Box`][box] is *not* safe! Also, `Box` provides an [`into_raw`][into_raw]
-/// method which is the preferred way to do this in practice.
-///
/// [drop]: fn.drop.html
/// [uninit]: fn.uninitialized.html
/// [clone]: ../clone/trait.Clone.html
/// [swap]: fn.swap.html
/// [FFI]: ../../book/first-edition/ffi.html
/// [box]: ../../std/boxed/struct.Box.html
+/// [leak]: ../../std/boxed/struct.Box.html#method.leak
/// [into_raw]: ../../std/boxed/struct.Box.html#method.into_raw
/// [ub]: ../../reference/behavior-considered-undefined.html
#[inline]
/// }
/// }
/// ```
+ ///
+ /// # Null-unchecked version
+ ///
+ /// If you are sure the pointer can never be null and are looking for some kind of
+ /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can
+ /// dereference the pointer directly.
+ ///
+ /// ```
+ /// let ptr: *const u8 = &10u8 as *const u8;
+ ///
+ /// unsafe {
+ /// let val_back = &*ptr;
+ /// println!("We got back the value: {}!", val_back);
+ /// }
+ /// ```
#[stable(feature = "ptr_as_ref", since = "1.9.0")]
#[inline]
pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
/// }
/// }
/// ```
+ ///
+ /// # Null-unchecked version
+ ///
+ /// If you are sure the pointer can never be null and are looking for some kind of
+ /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can
+ /// dereference the pointer directly.
+ ///
+ /// ```
+ /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
+ ///
+ /// unsafe {
+ /// let val_back = &*ptr;
+ /// println!("We got back the value: {}!", val_back);
+ /// }
+ /// ```
#[stable(feature = "ptr_as_ref", since = "1.9.0")]
#[inline]
pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
byteorder = { version = "1.1", features = ["i128"]}
chalk-engine = { version = "0.7.0", default-features=false }
rustc_fs_util = { path = "../librustc_fs_util" }
+smallvec = { version = "0.6.5", features = ["union"] }
# Note that these dependencies are a lie, they're just here to get linkage to
# work.
// queries). Making them anonymous avoids hashing the result, which
// may save a bit of time.
[anon] EraseRegionsTy { ty: Ty<'tcx> },
- [anon] ConstValueToAllocation { val: &'tcx ty::Const<'tcx> },
+ [anon] ConstToAllocation { val: &'tcx ty::Const<'tcx> },
[input] Freevars(DefId),
[input] MaybeUnusedTraitImport(DefId),
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use rustc_data_structures::sync::{Lrc, Lock};
use std::env;
use std::hash::Hash;
} = task {
debug_assert_eq!(node, key);
let krate_idx = self.node_to_node_index[&DepNode::new_no_params(DepKind::Krate)];
- self.alloc_node(node, SmallVec::one(krate_idx))
+ self.alloc_node(node, smallvec![krate_idx])
} else {
bug!("complete_eval_always_task() - Expected eval always task to be popped");
}
Existential(DefId),
/// `type Foo = Bar;`
TyAlias(DefId),
- TyForeign(DefId),
+ ForeignTy(DefId),
TraitAlias(DefId),
AssociatedTy(DefId),
/// `existential type Foo: Bar;`
Def::AssociatedTy(id) | Def::TyParam(id) | Def::Struct(id) | Def::StructCtor(id, ..) |
Def::Union(id) | Def::Trait(id) | Def::Method(id) | Def::Const(id) |
Def::AssociatedConst(id) | Def::Macro(id, ..) |
- Def::Existential(id) | Def::AssociatedExistential(id) | Def::TyForeign(id) => {
+ Def::Existential(id) | Def::AssociatedExistential(id) | Def::ForeignTy(id) => {
id
}
Def::StructCtor(.., CtorKind::Fictive) => bug!("impossible struct constructor"),
Def::Union(..) => "union",
Def::Trait(..) => "trait",
- Def::TyForeign(..) => "foreign type",
+ Def::ForeignTy(..) => "foreign type",
Def::Method(..) => "method",
Def::Const(..) => "constant",
Def::AssociatedConst(..) => "associated constant",
}
ImplTraitContext::Universal(in_band_ty_params) => {
self.lower_node_id(def_node_id);
- // Add a definition for the in-band TyParam
+ // Add a definition for the in-band Param
let def_index = self
.resolver
.definitions()
fn lower_item_id(&mut self, i: &Item) -> OneVector<hir::ItemId> {
match i.node {
ItemKind::Use(ref use_tree) => {
- let mut vec = OneVector::one(hir::ItemId { id: i.id });
+ let mut vec = smallvec![hir::ItemId { id: i.id }];
self.lower_item_id_use_tree(use_tree, i.id, &mut vec);
vec
}
ItemKind::MacroDef(..) => OneVector::new(),
ItemKind::Fn(ref decl, ref header, ..) => {
- let mut ids = OneVector::one(hir::ItemId { id: i.id });
+ let mut ids = smallvec![hir::ItemId { id: i.id }];
self.lower_impl_trait_ids(decl, header, &mut ids);
ids
},
ItemKind::Impl(.., None, _, ref items) => {
- let mut ids = OneVector::one(hir::ItemId { id: i.id });
+ let mut ids = smallvec![hir::ItemId { id: i.id }];
for item in items {
if let ImplItemKind::Method(ref sig, _) = item.node {
self.lower_impl_trait_ids(&sig.decl, &sig.header, &mut ids);
}
ids
},
- _ => OneVector::one(hir::ItemId { id: i.id }),
+ _ => smallvec![hir::ItemId { id: i.id }],
}
}
hir::LoopSource::Loop,
)
}),
- ExprKind::Catch(ref body) => {
+ ExprKind::TryBlock(ref body) => {
self.with_catch_scope(body.id, |this| {
let unstable_span =
- this.allow_internal_unstable(CompilerDesugaringKind::Catch, body.span);
+ this.allow_internal_unstable(CompilerDesugaringKind::TryBlock, body.span);
let mut block = this.lower_block(body, true).into_inner();
let tail = block.expr.take().map_or_else(
|| {
}
fn lower_stmt(&mut self, s: &Stmt) -> OneVector<hir::Stmt> {
- OneVector::one(match s.node {
+ smallvec![match s.node {
StmtKind::Local(ref l) => Spanned {
node: hir::StmtKind::Decl(
P(Spanned {
span: s.span,
},
StmtKind::Mac(..) => panic!("Shouldn't exist here"),
- })
+ }]
}
fn lower_capture_clause(&mut self, c: CaptureBy) -> hir::CaptureClause {
match item.node {
ForeignItemKind::Fn(..) => Some(Def::Fn(def_id)),
ForeignItemKind::Static(_, m) => Some(Def::Static(def_id, m)),
- ForeignItemKind::Type => Some(Def::TyForeign(def_id)),
+ ForeignItemKind::Type => Some(Def::ForeignTy(def_id)),
}
}
NodeTraitItem(item) => {
/// Not represented directly in the AST, referred to by name through a ty_path.
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
pub enum PrimTy {
- TyInt(IntTy),
- TyUint(UintTy),
- TyFloat(FloatTy),
- TyStr,
- TyBool,
- TyChar,
+ Int(IntTy),
+ Uint(UintTy),
+ Float(FloatTy),
+ Str,
+ Bool,
+ Char,
}
#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
}
impl_stable_hash_for!(enum hir::PrimTy {
- TyInt(int_ty),
- TyUint(uint_ty),
- TyFloat(float_ty),
- TyStr,
- TyBool,
- TyChar
+ Int(int_ty),
+ Uint(uint_ty),
+ Float(float_ty),
+ Str,
+ Bool,
+ Char
});
impl_stable_hash_for!(struct hir::BareFnTy {
PrimTy(prim_ty),
TyParam(def_id),
SelfTy(trait_def_id, impl_def_id),
- TyForeign(def_id),
+ ForeignTy(def_id),
Fn(def_id),
Const(def_id),
Static(def_id, is_mutbl),
QuestionMark,
ExistentialReturnType,
ForLoop,
- Catch
+ TryBlock
});
impl_stable_hash_for!(enum ::syntax_pos::FileName {
use mir;
impl<'a, 'gcx, T> HashStable<StableHashingContext<'a>>
-for &'gcx ty::Slice<T>
+for &'gcx ty::List<T>
where T: HashStable<StableHashingContext<'a>> {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
}
}
-impl<'a, 'gcx, T> ToStableHashKey<StableHashingContext<'a>> for &'gcx ty::Slice<T>
+impl<'a, 'gcx, T> ToStableHashKey<StableHashingContext<'a>> for &'gcx ty::List<T>
where T: HashStable<StableHashingContext<'a>>
{
type KeyType = Fingerprint;
Undef
});
-impl_stable_hash_for!(enum mir::interpret::Value {
- Scalar(v),
- ScalarPair(a, b),
- ByRef(ptr, align)
-});
-
impl_stable_hash_for!(struct mir::interpret::Pointer {
alloc_id,
offset
DeallocateNonBasePtr |
HeapAllocZeroBytes |
Unreachable |
- Panic |
ReadFromReturnPointer |
UnimplementedTraitSelection |
TypeckError |
GeneratorResumedAfterReturn |
GeneratorResumedAfterPanic |
InfiniteLoop => {}
+ Panic { ref msg, ref file, line, col } => {
+ msg.hash_stable(hcx, hasher);
+ file.hash_stable(hcx, hasher);
+ line.hash_stable(hcx, hasher);
+ col.hash_stable(hcx, hasher);
+ },
ReferencedConstant(ref err) => err.hash_stable(hcx, hasher),
MachineError(ref err) => err.hash_stable(hcx, hasher),
FunctionPointerTyMismatch(a, b) => {
});
impl<'a, 'gcx> HashStable<StableHashingContext<'a>>
-for ty::TypeVariants<'gcx>
+for ty::TyKind<'gcx>
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
- use ty::TypeVariants::*;
+ use ty::TyKind::*;
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
- TyBool |
- TyChar |
- TyStr |
- TyError |
- TyNever => {
+ Bool |
+ Char |
+ Str |
+ Error |
+ Never => {
// Nothing more to hash.
}
- TyInt(int_ty) => {
+ Int(int_ty) => {
int_ty.hash_stable(hcx, hasher);
}
- TyUint(uint_ty) => {
+ Uint(uint_ty) => {
uint_ty.hash_stable(hcx, hasher);
}
- TyFloat(float_ty) => {
+ Float(float_ty) => {
float_ty.hash_stable(hcx, hasher);
}
- TyAdt(adt_def, substs) => {
+ Adt(adt_def, substs) => {
adt_def.hash_stable(hcx, hasher);
substs.hash_stable(hcx, hasher);
}
- TyArray(inner_ty, len) => {
+ Array(inner_ty, len) => {
inner_ty.hash_stable(hcx, hasher);
len.hash_stable(hcx, hasher);
}
- TySlice(inner_ty) => {
+ Slice(inner_ty) => {
inner_ty.hash_stable(hcx, hasher);
}
- TyRawPtr(pointee_ty) => {
+ RawPtr(pointee_ty) => {
pointee_ty.hash_stable(hcx, hasher);
}
- TyRef(region, pointee_ty, mutbl) => {
+ Ref(region, pointee_ty, mutbl) => {
region.hash_stable(hcx, hasher);
pointee_ty.hash_stable(hcx, hasher);
mutbl.hash_stable(hcx, hasher);
}
- TyFnDef(def_id, substs) => {
+ FnDef(def_id, substs) => {
def_id.hash_stable(hcx, hasher);
substs.hash_stable(hcx, hasher);
}
- TyFnPtr(ref sig) => {
+ FnPtr(ref sig) => {
sig.hash_stable(hcx, hasher);
}
- TyDynamic(ref existential_predicates, region) => {
+ Dynamic(ref existential_predicates, region) => {
existential_predicates.hash_stable(hcx, hasher);
region.hash_stable(hcx, hasher);
}
- TyClosure(def_id, closure_substs) => {
+ Closure(def_id, closure_substs) => {
def_id.hash_stable(hcx, hasher);
closure_substs.hash_stable(hcx, hasher);
}
- TyGenerator(def_id, generator_substs, movability) => {
+ Generator(def_id, generator_substs, movability) => {
def_id.hash_stable(hcx, hasher);
generator_substs.hash_stable(hcx, hasher);
movability.hash_stable(hcx, hasher);
}
- TyGeneratorWitness(types) => {
+ GeneratorWitness(types) => {
types.hash_stable(hcx, hasher)
}
- TyTuple(inner_tys) => {
+ Tuple(inner_tys) => {
inner_tys.hash_stable(hcx, hasher);
}
- TyProjection(ref projection_ty) => {
+ Projection(ref projection_ty) => {
projection_ty.hash_stable(hcx, hasher);
}
- TyAnon(def_id, substs) => {
+ Anon(def_id, substs) => {
def_id.hash_stable(hcx, hasher);
substs.hash_stable(hcx, hasher);
}
- TyParam(param_ty) => {
+ Param(param_ty) => {
param_ty.hash_stable(hcx, hasher);
}
- TyForeign(def_id) => {
+ Foreign(def_id) => {
def_id.hash_stable(hcx, hasher);
}
- TyInfer(infer_ty) => {
+ Infer(infer_ty) => {
infer_ty.hash_stable(hcx, hasher);
}
}
_hasher: &mut StableHasher<W>) {
// TyVid values are confined to an inference context and hence
// should not be hashed.
- bug!("ty::TypeVariants::hash_stable() - can't hash a TyVid {:?}.", *self)
+ bug!("ty::TyKind::hash_stable() - can't hash a TyVid {:?}.", *self)
}
}
_hasher: &mut StableHasher<W>) {
// IntVid values are confined to an inference context and hence
// should not be hashed.
- bug!("ty::TypeVariants::hash_stable() - can't hash an IntVid {:?}.", *self)
+ bug!("ty::TyKind::hash_stable() - can't hash an IntVid {:?}.", *self)
}
}
_hasher: &mut StableHasher<W>) {
// FloatVid values are confined to an inference context and hence
// should not be hashed.
- bug!("ty::TypeVariants::hash_stable() - can't hash a FloatVid {:?}.", *self)
+ bug!("ty::TyKind::hash_stable() - can't hash a FloatVid {:?}.", *self)
}
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
match ty.sty {
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
// I am a horrible monster and I pray for death. When
// we encounter a closure here, it is always a closure
// from within the function that we are currently
tcx,
reg_op: |reg| reg,
fldop: |ty| {
- if let ty::TyAnon(def_id, substs) = ty.sty {
+ if let ty::Anon(def_id, substs) = ty.sty {
// Check that this is `impl Trait` type is
// declared by `parent_def_id` -- i.e., one whose
// value we are inferring. At present, this is
// ```
//
// Here, the return type of `foo` references a
- // `TyAnon` indeed, but not one whose value is
+ // `Anon` indeed, but not one whose value is
// presently being inferred. You can get into a
// similar situation with closure return types
// today:
let tcx = infcx.tcx;
debug!(
- "instantiate_anon_types: TyAnon(def_id={:?}, substs={:?})",
+ "instantiate_anon_types: Anon(def_id={:?}, substs={:?})",
def_id, substs
);
- // Use the same type variable if the exact same TyAnon appears more
+ // Use the same type variable if the exact same Anon appears more
// than once in the return type (e.g. if it's passed to a type alias).
if let Some(anon_defn) = self.anon_types.get(&def_id) {
return anon_defn.concrete_ty;
for predicate in bounds.predicates {
// Change the predicate to refer to the type variable,
- // which will be the concrete type, instead of the TyAnon.
+ // which will be the concrete type, instead of the Anon.
// This also instantiates nested `impl Trait`.
let predicate = self.instantiate_anon_types_in_map(&predicate);
use std::sync::atomic::Ordering;
use ty::fold::{TypeFoldable, TypeFolder};
use ty::subst::Kind;
-use ty::{self, CanonicalVar, Lift, Slice, Ty, TyCtxt, TypeFlags};
+use ty::{self, CanonicalVar, Lift, List, Ty, TyCtxt, TypeFlags};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexed_vec::Idx;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> {
/// Canonicalizes a query value `V`. When we canonicalize a query,
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
match t.sty {
- ty::TyInfer(ty::TyVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::General, t),
+ ty::Infer(ty::TyVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::General, t),
- ty::TyInfer(ty::IntVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Int, t),
+ ty::Infer(ty::IntVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Int, t),
- ty::TyInfer(ty::FloatVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Float, t),
+ ty::Infer(ty::FloatVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Float, t),
- ty::TyInfer(ty::FreshTy(_))
- | ty::TyInfer(ty::FreshIntTy(_))
- | ty::TyInfer(ty::FreshFloatTy(_)) => {
+ ty::Infer(ty::FreshTy(_))
+ | ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_)) => {
bug!("encountered a fresh type during canonicalization")
}
- ty::TyInfer(ty::CanonicalTy(_)) => {
+ ty::Infer(ty::CanonicalTy(_)) => {
bug!("encountered a canonical type during canonicalization")
}
- ty::TyClosure(..)
- | ty::TyGenerator(..)
- | ty::TyGeneratorWitness(..)
- | ty::TyBool
- | ty::TyChar
- | ty::TyInt(..)
- | ty::TyUint(..)
- | ty::TyFloat(..)
- | ty::TyAdt(..)
- | ty::TyStr
- | ty::TyError
- | ty::TyArray(..)
- | ty::TySlice(..)
- | ty::TyRawPtr(..)
- | ty::TyRef(..)
- | ty::TyFnDef(..)
- | ty::TyFnPtr(_)
- | ty::TyDynamic(..)
- | ty::TyNever
- | ty::TyTuple(..)
- | ty::TyProjection(..)
- | ty::TyForeign(..)
- | ty::TyParam(..)
- | ty::TyAnon(..) => {
+ ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::Adt(..)
+ | ty::Str
+ | ty::Error
+ | ty::Array(..)
+ | ty::Slice(..)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Dynamic(..)
+ | ty::Never
+ | ty::Tuple(..)
+ | ty::Projection(..)
+ | ty::Foreign(..)
+ | ty::Param(..)
+ | ty::Anon(..) => {
if t.flags.intersects(self.needs_canonical_flags) {
t.super_fold_with(self)
} else {
if !value.has_type_flags(needs_canonical_flags) {
let out_value = gcx.lift(value).unwrap();
let canon_value = Canonical {
- variables: Slice::empty(),
+ variables: List::empty(),
value: out_value,
};
return canon_value;
// avoid allocations in those cases. We also don't use `indices` to
// determine if a kind has been seen before until the limit of 8 has
// been exceeded, to also avoid allocations for `indices`.
- if var_values.is_array() {
+ if !var_values.spilled() {
// `var_values` is stack-allocated. `indices` isn't used yet. Do a
// direct linear search of `var_values`.
if let Some(idx) = var_values.iter().position(|&k| k == kind) {
// If `var_values` has become big enough to be heap-allocated,
// fill up `indices` to facilitate subsequent lookups.
- if !var_values.is_array() {
+ if var_values.spilled() {
assert!(indices.is_empty());
*indices =
var_values.iter()
use infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin};
use rustc_data_structures::indexed_vec::IndexVec;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use rustc_data_structures::sync::Lrc;
use serialize::UseSpecializedDecodable;
use std::ops::Index;
use syntax::source_map::Span;
use ty::fold::TypeFoldable;
use ty::subst::Kind;
-use ty::{self, CanonicalVar, Lift, Region, Slice, TyCtxt};
+use ty::{self, CanonicalVar, Lift, Region, List, TyCtxt};
mod canonicalizer;
pub value: V,
}
-pub type CanonicalVarInfos<'gcx> = &'gcx Slice<CanonicalVarInfo>;
+pub type CanonicalVarInfos<'gcx> = &'gcx List<CanonicalVarInfo>;
impl<'gcx> UseSpecializedDecodable for CanonicalVarInfos<'gcx> {}
fn fresh_inference_vars_for_canonical_vars(
&self,
span: Span,
- variables: &Slice<CanonicalVarInfo>,
+ variables: &List<CanonicalVarInfo>,
) -> CanonicalVarValues<'tcx> {
let var_values: IndexVec<CanonicalVar, Kind<'tcx>> = variables
.iter()
match result_value.unpack() {
UnpackedKind::Type(result_value) => {
// e.g., here `result_value` might be `?0` in the example above...
- if let ty::TyInfer(ty::InferTy::CanonicalTy(index)) = result_value.sty {
+ if let ty::Infer(ty::InferTy::CanonicalTy(index)) = result_value.sty {
// in which case we would set `canonical_vars[0]` to `Some(?U)`.
opt_values[index] = Some(*original_value);
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
match t.sty {
- ty::TyInfer(ty::InferTy::CanonicalTy(c)) => {
+ ty::Infer(ty::InferTy::CanonicalTy(c)) => {
match self.var_values.var_values[c].unpack() {
UnpackedKind::Type(ty) => ty,
r => bug!("{:?} is a type but value is {:?}", c, r),
match (&a.sty, &b.sty) {
// Relate integral variables to other types
- (&ty::TyInfer(ty::IntVar(a_id)), &ty::TyInfer(ty::IntVar(b_id))) => {
+ (&ty::Infer(ty::IntVar(a_id)), &ty::Infer(ty::IntVar(b_id))) => {
self.int_unification_table
.borrow_mut()
.unify_var_var(a_id, b_id)
.map_err(|e| int_unification_error(a_is_expected, e))?;
Ok(a)
}
- (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyInt(v)) => {
+ (&ty::Infer(ty::IntVar(v_id)), &ty::Int(v)) => {
self.unify_integral_variable(a_is_expected, v_id, IntType(v))
}
- (&ty::TyInt(v), &ty::TyInfer(ty::IntVar(v_id))) => {
+ (&ty::Int(v), &ty::Infer(ty::IntVar(v_id))) => {
self.unify_integral_variable(!a_is_expected, v_id, IntType(v))
}
- (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyUint(v)) => {
+ (&ty::Infer(ty::IntVar(v_id)), &ty::Uint(v)) => {
self.unify_integral_variable(a_is_expected, v_id, UintType(v))
}
- (&ty::TyUint(v), &ty::TyInfer(ty::IntVar(v_id))) => {
+ (&ty::Uint(v), &ty::Infer(ty::IntVar(v_id))) => {
self.unify_integral_variable(!a_is_expected, v_id, UintType(v))
}
// Relate floating-point variables to other types
- (&ty::TyInfer(ty::FloatVar(a_id)), &ty::TyInfer(ty::FloatVar(b_id))) => {
+ (&ty::Infer(ty::FloatVar(a_id)), &ty::Infer(ty::FloatVar(b_id))) => {
self.float_unification_table
.borrow_mut()
.unify_var_var(a_id, b_id)
.map_err(|e| float_unification_error(relation.a_is_expected(), e))?;
Ok(a)
}
- (&ty::TyInfer(ty::FloatVar(v_id)), &ty::TyFloat(v)) => {
+ (&ty::Infer(ty::FloatVar(v_id)), &ty::Float(v)) => {
self.unify_float_variable(a_is_expected, v_id, v)
}
- (&ty::TyFloat(v), &ty::TyInfer(ty::FloatVar(v_id))) => {
+ (&ty::Float(v), &ty::Infer(ty::FloatVar(v_id))) => {
self.unify_float_variable(!a_is_expected, v_id, v)
}
// All other cases of inference are errors
- (&ty::TyInfer(_), _) |
- (_, &ty::TyInfer(_)) => {
+ (&ty::Infer(_), _) |
+ (_, &ty::Infer(_)) => {
Err(TypeError::Sorts(ty::relate::expected_found(relation, &a, &b)))
}
// subtyping. This is basically our "occurs check", preventing
// us from creating infinitely sized types.
match t.sty {
- ty::TyInfer(ty::TyVar(vid)) => {
+ ty::Infer(ty::TyVar(vid)) => {
let mut variables = self.infcx.type_variables.borrow_mut();
let vid = variables.root_var(vid);
let sub_vid = variables.sub_root_var(vid);
}
}
}
- ty::TyInfer(ty::IntVar(_)) |
- ty::TyInfer(ty::FloatVar(_)) => {
+ ty::Infer(ty::IntVar(_)) |
+ ty::Infer(ty::FloatVar(_)) => {
// No matter what mode we are in,
// integer/floating-point types must be equal to be
// relatable.
let a = infcx.type_variables.borrow_mut().replace_if_possible(a);
let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
match (&a.sty, &b.sty) {
- (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => {
+ (&ty::Infer(TyVar(a_id)), &ty::Infer(TyVar(b_id))) => {
infcx.type_variables.borrow_mut().equate(a_id, b_id);
Ok(a)
}
- (&ty::TyInfer(TyVar(a_id)), _) => {
+ (&ty::Infer(TyVar(a_id)), _) => {
self.fields.instantiate(b, RelationDir::EqTo, a_id, self.a_is_expected)?;
Ok(a)
}
- (_, &ty::TyInfer(TyVar(b_id))) => {
+ (_, &ty::Infer(TyVar(b_id))) => {
self.fields.instantiate(a, RelationDir::EqTo, b_id, self.a_is_expected)?;
Ok(a)
}
use hir::def_id::DefId;
use middle::region;
use traits::{ObligationCause, ObligationCauseCode};
-use ty::{self, subst::Subst, Region, Ty, TyCtxt, TypeFoldable, TypeVariants};
+use ty::{self, subst::Subst, Region, Ty, TyCtxt, TypeFoldable, TyKind};
use ty::error::TypeError;
use syntax::ast::DUMMY_NODE_ID;
use syntax_pos::{Pos, Span};
// if they are both "path types", there's a chance of ambiguity
// due to different versions of the same crate
match (&exp_found.expected.sty, &exp_found.found.sty) {
- (&ty::TyAdt(exp_adt, _), &ty::TyAdt(found_adt, _)) => {
+ (&ty::Adt(exp_adt, _), &ty::Adt(found_adt, _)) => {
report_path_match(err, exp_adt.did, found_adt.did);
}
_ => (),
self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, &other_ty);
return Some(());
}
- if let &ty::TyAdt(def, _) = &ta.sty {
+ if let &ty::Adt(def, _) = &ta.sty {
let path_ = self.tcx.item_path_str(def.did.clone());
if path_ == other_path {
self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, &other_ty);
fn equals<'tcx>(a: &Ty<'tcx>, b: &Ty<'tcx>) -> bool {
match (&a.sty, &b.sty) {
(a, b) if *a == *b => true,
- (&ty::TyInt(_), &ty::TyInfer(ty::InferTy::IntVar(_)))
- | (&ty::TyInfer(ty::InferTy::IntVar(_)), &ty::TyInt(_))
- | (&ty::TyInfer(ty::InferTy::IntVar(_)), &ty::TyInfer(ty::InferTy::IntVar(_)))
- | (&ty::TyFloat(_), &ty::TyInfer(ty::InferTy::FloatVar(_)))
- | (&ty::TyInfer(ty::InferTy::FloatVar(_)), &ty::TyFloat(_))
+ (&ty::Int(_), &ty::Infer(ty::InferTy::IntVar(_)))
+ | (&ty::Infer(ty::InferTy::IntVar(_)), &ty::Int(_))
+ | (&ty::Infer(ty::InferTy::IntVar(_)), &ty::Infer(ty::InferTy::IntVar(_)))
+ | (&ty::Float(_), &ty::Infer(ty::InferTy::FloatVar(_)))
+ | (&ty::Infer(ty::InferTy::FloatVar(_)), &ty::Float(_))
| (
- &ty::TyInfer(ty::InferTy::FloatVar(_)),
- &ty::TyInfer(ty::InferTy::FloatVar(_)),
+ &ty::Infer(ty::InferTy::FloatVar(_)),
+ &ty::Infer(ty::InferTy::FloatVar(_)),
) => true,
_ => false,
}
}
match (&t1.sty, &t2.sty) {
- (&ty::TyAdt(def1, sub1), &ty::TyAdt(def2, sub2)) => {
+ (&ty::Adt(def1, sub1), &ty::Adt(def2, sub2)) => {
let sub_no_defaults_1 = self.strip_generic_default_params(def1.did, sub1);
let sub_no_defaults_2 = self.strip_generic_default_params(def2.did, sub2);
let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
}
// When finding T != &T, highlight only the borrow
- (&ty::TyRef(r1, ref_ty1, mutbl1), _) if equals(&ref_ty1, &t2) => {
+ (&ty::Ref(r1, ref_ty1, mutbl1), _) if equals(&ref_ty1, &t2) => {
let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
push_ty_ref(&r1, ref_ty1, mutbl1, &mut values.0);
values.1.push_normal(t2.to_string());
values
}
- (_, &ty::TyRef(r2, ref_ty2, mutbl2)) if equals(&t1, &ref_ty2) => {
+ (_, &ty::Ref(r2, ref_ty2, mutbl2)) if equals(&t1, &ref_ty2) => {
let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
values.0.push_normal(t1.to_string());
push_ty_ref(&r2, ref_ty2, mutbl2, &mut values.1);
}
// When encountering &T != &mut T, highlight only the borrow
- (&ty::TyRef(r1, ref_ty1, mutbl1),
- &ty::TyRef(r2, ref_ty2, mutbl2)) if equals(&ref_ty1, &ref_ty2) => {
+ (&ty::Ref(r1, ref_ty1, mutbl1),
+ &ty::Ref(r2, ref_ty2, mutbl2)) if equals(&ref_ty1, &ref_ty2) => {
let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
push_ty_ref(&r1, ref_ty1, mutbl1, &mut values.0);
push_ty_ref(&r2, ref_ty2, mutbl2, &mut values.1);
(_, false, _) => {
if let Some(exp_found) = exp_found {
let (def_id, ret_ty) = match exp_found.found.sty {
- TypeVariants::TyFnDef(def, _) => {
+ TyKind::FnDef(def, _) => {
(Some(def), Some(self.tcx.fn_sig(def).output()))
}
_ => (None, None),
};
let exp_is_struct = match exp_found.expected.sty {
- TypeVariants::TyAdt(def, _) => def.is_struct(),
+ TyKind::Adt(def, _) => def.is_struct(),
_ => false,
};
let type_param = generics.type_param(param, self.tcx);
let hir = &self.tcx.hir;
hir.as_local_node_id(type_param.def_id).map(|id| {
- // Get the `hir::TyParam` to verify whether it already has any bounds.
+ // Get the `hir::Param` to verify whether it already has any bounds.
// We do this to avoid suggesting code that ends up as `T: 'a'b`,
// instead we suggest `T: 'a + 'b` in that case.
let mut has_bounds = false;
s
};
let var_description = match var_origin {
- infer::MiscVariable(_) => "".to_string(),
+ infer::MiscVariable(_) => String::new(),
infer::PatternRegion(_) => " for pattern".to_string(),
infer::AddrOfRegion(_) => " for borrow expression".to_string(),
infer::Autoref(_) => " for autoref".to_string(),
use hir::intravisit::{self, Visitor, NestedVisitorMap};
use infer::InferCtxt;
use infer::type_variable::TypeVariableOrigin;
-use ty::{self, Ty, TyInfer, TyVar};
+use ty::{self, Ty, Infer, TyVar};
use syntax::source_map::CompilerDesugaringKind;
use syntax_pos::Span;
use errors::DiagnosticBuilder;
let ty = self.infcx.resolve_type_vars_if_possible(&ty);
ty.walk().any(|inner_ty| {
inner_ty == *self.target_ty || match (&inner_ty.sty, &self.target_ty.sty) {
- (&TyInfer(TyVar(a_vid)), &TyInfer(TyVar(b_vid))) => {
+ (&Infer(TyVar(a_vid)), &Infer(TyVar(b_vid))) => {
self.infcx
.type_variables
.borrow_mut()
impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
pub fn extract_type_name(&self, ty: &'a Ty<'tcx>) -> String {
- if let ty::TyInfer(ty::TyVar(ty_vid)) = (*ty).sty {
+ if let ty::Infer(ty::TyVar(ty_vid)) = (*ty).sty {
let ty_vars = self.type_variables.borrow();
if let TypeVariableOrigin::TypeParameterDefinition(_, name) =
*ty_vars.var_origin(ty_vid) {
) -> Option<Span> {
let ret_ty = self.tcx.type_of(scope_def_id);
match ret_ty.sty {
- ty::TyFnDef(_, _) => {
+ ty::FnDef(_, _) => {
let sig = ret_ty.fn_sig(self.tcx);
let late_bound_regions = self.tcx
.collect_referenced_late_bound_regions(&sig.output());
) -> bool {
let ret_ty = self.tcx.type_of(scope_def_id);
match ret_ty.sty {
- ty::TyFnDef(_, _) => {
+ ty::FnDef(_, _) => {
let sig = ret_ty.fn_sig(self.tcx);
let output = self.tcx.erase_late_bound_regions(&sig.output());
return output.is_impl_trait();
let tcx = self.infcx.tcx;
match t.sty {
- ty::TyInfer(ty::TyVar(v)) => {
+ ty::Infer(ty::TyVar(v)) => {
let opt_ty = self.infcx.type_variables.borrow_mut().probe(v).known();
self.freshen(
opt_ty,
ty::FreshTy)
}
- ty::TyInfer(ty::IntVar(v)) => {
+ ty::Infer(ty::IntVar(v)) => {
self.freshen(
self.infcx.int_unification_table.borrow_mut()
.probe_value(v)
ty::FreshIntTy)
}
- ty::TyInfer(ty::FloatVar(v)) => {
+ ty::Infer(ty::FloatVar(v)) => {
self.freshen(
self.infcx.float_unification_table.borrow_mut()
.probe_value(v)
ty::FreshFloatTy)
}
- ty::TyInfer(ty::FreshTy(c)) |
- ty::TyInfer(ty::FreshIntTy(c)) |
- ty::TyInfer(ty::FreshFloatTy(c)) => {
+ ty::Infer(ty::FreshTy(c)) |
+ ty::Infer(ty::FreshIntTy(c)) |
+ ty::Infer(ty::FreshFloatTy(c)) => {
if c >= self.freshen_count {
bug!("Encountered a freshend type with id {} \
but our counter is only at {}",
t
}
- ty::TyInfer(ty::CanonicalTy(..)) =>
+ ty::Infer(ty::CanonicalTy(..)) =>
bug!("encountered canonical ty during freshening"),
- ty::TyGenerator(..) |
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(..) |
- ty::TyUint(..) |
- ty::TyFloat(..) |
- ty::TyAdt(..) |
- ty::TyStr |
- ty::TyError |
- ty::TyArray(..) |
- ty::TySlice(..) |
- ty::TyRawPtr(..) |
- ty::TyRef(..) |
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) |
- ty::TyDynamic(..) |
- ty::TyNever |
- ty::TyTuple(..) |
- ty::TyProjection(..) |
- ty::TyForeign(..) |
- ty::TyParam(..) |
- ty::TyClosure(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyAnon(..) => {
+ ty::Generator(..) |
+ ty::Bool |
+ ty::Char |
+ ty::Int(..) |
+ ty::Uint(..) |
+ ty::Float(..) |
+ ty::Adt(..) |
+ ty::Str |
+ ty::Error |
+ ty::Array(..) |
+ ty::Slice(..) |
+ ty::RawPtr(..) |
+ ty::Ref(..) |
+ ty::FnDef(..) |
+ ty::FnPtr(_) |
+ ty::Dynamic(..) |
+ ty::Never |
+ ty::Tuple(..) |
+ ty::Projection(..) |
+ ty::Foreign(..) |
+ ty::Param(..) |
+ ty::Closure(..) |
+ ty::GeneratorWitness(..) |
+ ty::Anon(..) => {
t.super_fold_with(self)
}
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
match ty.sty {
- ty::TyInfer(ty::InferTy::TyVar(vid)) => {
+ ty::Infer(ty::InferTy::TyVar(vid)) => {
match self.type_variables.get(&vid) {
None => {
// This variable was created before the
// is (e.g.) `Box<i32>`. A more obvious solution might be to
// iterate on the subtype obligations that are returned, but I
// think this suffices. -nmatsakis
- (&ty::TyInfer(TyVar(..)), _) => {
+ (&ty::Infer(TyVar(..)), _) => {
let v = infcx.next_ty_var(TypeVariableOrigin::LatticeVariable(this.cause().span));
this.relate_bound(v, b, a)?;
Ok(v)
}
- (_, &ty::TyInfer(TyVar(..))) => {
+ (_, &ty::Infer(TyVar(..))) => {
let v = infcx.next_ty_var(TypeVariableOrigin::LatticeVariable(this.cause().span));
this.relate_bound(v, a, b)?;
Ok(v)
pub fn type_var_diverges(&'a self, ty: Ty) -> bool {
match ty.sty {
- ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().var_diverges(vid),
+ ty::Infer(ty::TyVar(vid)) => self.type_variables.borrow().var_diverges(vid),
_ => false
}
}
use ty::error::UnconstrainedNumeric::Neither;
use ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
match ty.sty {
- ty::TyInfer(ty::IntVar(vid)) => {
+ ty::Infer(ty::IntVar(vid)) => {
if self.int_unification_table.borrow_mut().probe_value(vid).is_some() {
Neither
} else {
UnconstrainedInt
}
},
- ty::TyInfer(ty::FloatVar(vid)) => {
+ ty::Infer(ty::FloatVar(vid)) => {
if self.float_unification_table.borrow_mut().probe_value(vid).is_some() {
Neither
} else {
pub fn shallow_resolve(&self, typ: Ty<'tcx>) -> Ty<'tcx> {
match typ.sty {
- ty::TyInfer(ty::TyVar(v)) => {
+ ty::Infer(ty::TyVar(v)) => {
// Not entirely obvious: if `typ` is a type variable,
// it can be resolved to an int/float variable, which
// can then be recursively resolved, hence the
.unwrap_or(typ)
}
- ty::TyInfer(ty::IntVar(v)) => {
+ ty::Infer(ty::IntVar(v)) => {
self.int_unification_table
.borrow_mut()
.probe_value(v)
.unwrap_or(typ)
}
- ty::TyInfer(ty::FloatVar(v)) => {
+ ty::Infer(ty::FloatVar(v)) => {
self.float_unification_table
.borrow_mut()
.probe_value(v)
}
// [Note-Type-error-reporting]
- // An invariant is that anytime the expected or actual type is TyError (the special
+ // An invariant is that anytime the expected or actual type is Error (the special
// error type, meaning that an error occurred when typechecking this expression),
// this is a derived error. The error cascaded from another error (that was already
// reported), so it's not useful to display it to the user.
// The following methods implement this logic.
- // They check if either the actual or expected type is TyError, and don't print the error
+ // They check if either the actual or expected type is Error, and don't print the error
// in this case. The typechecker should only ever report type errors involving mismatched
// types using one of these methods, and should not call span_err directly for such
// errors.
let actual_ty = self.resolve_type_vars_if_possible(&actual_ty);
debug!("type_error_struct_with_diag({:?}, {:?})", sp, actual_ty);
- // Don't report an error if actual type is TyError.
+ // Don't report an error if actual type is Error.
if actual_ty.references_error() {
return self.tcx.sess.diagnostic().struct_dummy();
}
fn type_bound(&self, ty: Ty<'tcx>) -> VerifyBound<'tcx> {
match ty.sty {
- ty::TyParam(p) => self.param_bound(p),
- ty::TyProjection(data) => {
+ ty::Param(p) => self.param_bound(p),
+ ty::Projection(data) => {
let declared_bounds = self.projection_declared_bounds(data);
self.projection_bound(declared_bounds, data)
}
fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
let t = self.infcx.shallow_resolve(t);
if t.has_infer_types() {
- if let ty::TyInfer(_) = t.sty {
+ if let ty::Infer(_) = t.sty {
// Since we called `shallow_resolve` above, this must
// be an (as yet...) unresolved inference variable.
true
} else {
let t = self.infcx.shallow_resolve(t);
match t.sty {
- ty::TyInfer(ty::TyVar(vid)) => {
+ ty::Infer(ty::TyVar(vid)) => {
self.err = Some(FixupError::UnresolvedTy(vid));
self.tcx().types.err
}
- ty::TyInfer(ty::IntVar(vid)) => {
+ ty::Infer(ty::IntVar(vid)) => {
self.err = Some(FixupError::UnresolvedIntTy(vid));
self.tcx().types.err
}
- ty::TyInfer(ty::FloatVar(vid)) => {
+ ty::Infer(ty::FloatVar(vid)) => {
self.err = Some(FixupError::UnresolvedFloatTy(vid));
self.tcx().types.err
}
- ty::TyInfer(_) => {
+ ty::Infer(_) => {
bug!("Unexpected type in full type resolver: {:?}", t);
}
_ => {
let a = infcx.type_variables.borrow_mut().replace_if_possible(a);
let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
match (&a.sty, &b.sty) {
- (&ty::TyInfer(TyVar(a_vid)), &ty::TyInfer(TyVar(b_vid))) => {
+ (&ty::Infer(TyVar(a_vid)), &ty::Infer(TyVar(b_vid))) => {
// Shouldn't have any LBR here, so we can safely put
// this under a binder below without fear of accidental
// capture.
Ok(a)
}
- (&ty::TyInfer(TyVar(a_id)), _) => {
+ (&ty::Infer(TyVar(a_id)), _) => {
self.fields
.instantiate(b, RelationDir::SupertypeOf, a_id, !self.a_is_expected)?;
Ok(a)
}
- (_, &ty::TyInfer(TyVar(b_id))) => {
+ (_, &ty::Infer(TyVar(b_id))) => {
self.fields.instantiate(a, RelationDir::SubtypeOf, b_id, self.a_is_expected)?;
Ok(a)
}
- (&ty::TyError, _) | (_, &ty::TyError) => {
+ (&ty::Error, _) | (_, &ty::Error) => {
infcx.set_tainted_by_errors();
Ok(self.tcx().types.err)
}
/// instantiated. Otherwise, returns `t`.
pub fn replace_if_possible(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
match t.sty {
- ty::TyInfer(ty::TyVar(v)) => {
+ ty::Infer(ty::TyVar(v)) => {
match self.probe(v) {
TypeVariableValue::Unknown { .. } => t,
TypeVariableValue::Known { value } => value,
#![feature(trace_macros)]
#![feature(trusted_len)]
#![feature(vec_remove_item)]
-#![feature(catch_expr)]
#![feature(step_trait)]
#![feature(integer_atomics)]
#![feature(test)]
extern crate byteorder;
extern crate backtrace;
+#[macro_use]
+extern crate smallvec;
+
// Note that librustc doesn't actually depend on these crates, see the note in
// `Cargo.toml` for this crate about why these are here.
#[allow(unused_extern_crates)]
fn handle_field_access(&mut self, lhs: &hir::Expr, node_id: ast::NodeId) {
match self.tables.expr_ty_adjusted(lhs).sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
let index = self.tcx.field_index(node_id, self.tables);
self.insert_def_id(def.non_enum_variant().fields[index].did);
}
- ty::TyTuple(..) => {}
+ ty::Tuple(..) => {}
_ => span_bug!(lhs.span, "named field access on non-ADT"),
}
}
fn handle_field_pattern_match(&mut self, lhs: &hir::Pat, def: Def,
pats: &[source_map::Spanned<hir::FieldPat>]) {
let variant = match self.tables.node_id_to_type(lhs.hir_id).sty {
- ty::TyAdt(adt, _) => adt.variant_of_def(def),
+ ty::Adt(adt, _) => adt.variant_of_def(def),
_ => span_bug!(lhs.span, "non-ADT in struct pattern")
};
for pat in pats {
self.handle_field_access(&lhs, expr.id);
}
hir::ExprKind::Struct(_, ref fields, _) => {
- if let ty::TypeVariants::TyAdt(ref adt, _) = self.tables.expr_ty(expr).sty {
+ if let ty::Adt(ref adt, _) = self.tables.expr_ty(expr).sty {
self.mark_as_used_if_union(adt, fields);
}
}
// make sure that the thing we are pointing out stays valid
// for the lifetime `scope_r` of the resulting ptr:
let expr_ty = return_if_err!(self.mc.expr_ty(expr));
- if let ty::TyRef(r, _, _) = expr_ty.sty {
+ if let ty::Ref(r, _, _) = expr_ty.sty {
let bk = ty::BorrowKind::from_mutbl(m);
self.borrow_expr(&base, r, bk, AddrOf);
}
debug!("walk_callee: callee={:?} callee_ty={:?}",
callee, callee_ty);
match callee_ty.sty {
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
self.consume_expr(callee);
}
- ty::TyError => { }
+ ty::Error => { }
_ => {
if let Some(def) = self.mc.tables.type_dependent_defs().get(call.hir_id) {
let def_id = def.def_id();
// Select just those fields of the `with`
// expression that will actually be used
match with_cmt.ty.sty {
- ty::TyAdt(adt, substs) if adt.is_struct() => {
+ ty::Adt(adt, substs) if adt.is_struct() => {
// Consume those fields of the with expression that are needed.
for (f_index, with_field) in adt.non_enum_variant().fields.iter().enumerate() {
let is_mentioned = fields.iter().any(|f| {
// It is also a borrow or copy/move of the value being matched.
match bm {
ty::BindByReference(m) => {
- if let ty::TyRef(r, _, _) = pat_ty.sty {
+ if let ty::Ref(r, _, _) = pat_ty.sty {
let bk = ty::BorrowKind::from_mutbl(m);
delegate.borrow(pat.id, pat.span, &cmt_pat, r, bk, RefBinding);
}
ty: Ty<'tcx>)
-> Ty<'tcx> {
let (def, substs) = match ty.sty {
- ty::TyAdt(def, substs) => (def, substs),
+ ty::Adt(def, substs) => (def, substs),
_ => return ty
};
// Special-case transmutting from `typeof(function)` and
// `Option<typeof(function)>` to present a clearer error.
let from = unpack_option_like(self.tcx.global_tcx(), from);
- if let (&ty::TyFnDef(..), SizeSkeleton::Known(size_to)) = (&from.sty, sk_to) {
+ if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (&from.sty, sk_to) {
if size_to == Pointer.size(self.tcx) {
struct_span_err!(self.tcx.sess, span, E0591,
"can't transmute zero-sized type")
PanicBoundsCheckFnLangItem, "panic_bounds_check", panic_bounds_check_fn;
PanicInfoLangItem, "panic_info", panic_info;
PanicImplLangItem, "panic_impl", panic_impl;
+ // Libstd panic entry point. Necessary for const eval to be able to catch it
+ BeginPanicFnLangItem, "begin_panic", begin_panic_fn;
ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn;
BoxFreeFnLangItem, "box_free", box_free_fn;
fn resolve_field(&self, field_index: usize) -> Option<(&'tcx ty::AdtDef, &'tcx ty::FieldDef)>
{
let adt_def = match self.ty.sty {
- ty::TyAdt(def, _) => def,
- ty::TyTuple(..) => return None,
+ ty::Adt(def, _) => def,
+ ty::Tuple(..) => return None,
// closures get `Categorization::Upvar` rather than `Categorization::Interior`
_ => bug!("interior cmt {:?} is not an ADT", self)
};
// FnOnce | copied | upvar -> &'up bk
let kind = match self.node_ty(fn_hir_id)?.sty {
- ty::TyGenerator(..) => ty::ClosureKind::FnOnce,
- ty::TyClosure(closure_def_id, closure_substs) => {
+ ty::Generator(..) => ty::ClosureKind::FnOnce,
+ ty::Closure(closure_def_id, closure_substs) => {
match self.infcx {
// During upvar inference we may not know the
// closure kind, just use the LATTICE_BOTTOM value.
// that the above is actually immutable and
// has a ref type. However, nothing should
// actually look at the type, so we can get
- // away with stuffing a `TyError` in there
+ // away with stuffing a `Error` in there
// instead of bothering to construct a proper
// one.
let cmt_result = cmt_ {
// Always promote `[T; 0]` (even when e.g. borrowed mutably).
let promotable = match expr_ty.sty {
- ty::TyArray(_, len) if len.assert_usize(self.tcx) == Some(0) => true,
+ ty::Array(_, len) if len.assert_usize(self.tcx) == Some(0) => true,
_ => promotable,
};
let base_ty = self.expr_ty_adjusted(base)?;
let (region, mutbl) = match base_ty.sty {
- ty::TyRef(region, _, mutbl) => (region, mutbl),
+ ty::Ref(region, _, mutbl) => (region, mutbl),
_ => {
span_bug!(expr.span, "cat_overloaded_place: base is not a reference")
}
};
let ptr = match base_cmt.ty.sty {
- ty::TyAdt(def, ..) if def.is_box() => Unique,
- ty::TyRawPtr(ref mt) => UnsafePtr(mt.mutbl),
- ty::TyRef(r, _, mutbl) => {
+ ty::Adt(def, ..) if def.is_box() => Unique,
+ ty::RawPtr(ref mt) => UnsafePtr(mt.mutbl),
+ ty::Ref(r, _, mutbl) => {
let bk = ty::BorrowKind::from_mutbl(mutbl);
BorrowedPtr(bk, r)
}
}
Def::StructCtor(_, CtorKind::Fn) => {
match self.pat_ty_unadjusted(&pat)?.sty {
- ty::TyAdt(adt_def, _) => {
+ ty::Adt(adt_def, _) => {
(cmt, adt_def.non_enum_variant().fields.len())
}
ref ty => {
PatKind::Tuple(ref subpats, ddpos) => {
// (p1, ..., pN)
let expected_len = match self.pat_ty_unadjusted(&pat)?.sty {
- ty::TyTuple(ref tys) => tys.len(),
+ ty::Tuple(ref tys) => tys.len(),
ref ty => span_bug!(pat.span, "tuple pattern unexpected type {:?}", ty),
};
for (i, subpat) in subpats.iter().enumerate_and_adjust(expected_len, ddpos) {
use syntax_pos::Span;
use syntax::ast;
+use syntax::symbol::Symbol;
pub type ConstEvalResult<'tcx> = Result<&'tcx ty::Const<'tcx>, Lrc<ConstEvalErr<'tcx>>>;
HeapAllocZeroBytes,
HeapAllocNonPowerOfTwoAlignment(u64),
Unreachable,
- Panic,
+ Panic {
+ msg: Symbol,
+ line: u32,
+ col: u32,
+ file: Symbol,
+ },
ReadFromReturnPointer,
PathNotFound(Vec<String>),
UnimplementedTraitSelection,
"tried to re-, de-, or allocate heap memory with alignment that is not a power of two",
Unreachable =>
"entered unreachable code",
- Panic =>
+ Panic { .. } =>
"the evaluated program panicked",
ReadFromReturnPointer =>
"tried to read from the return pointer",
write!(f, "{}", inner),
IncorrectAllocationInformation(size, size2, align, align2) =>
write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and align {}", size.bytes(), align.abi(), size2.bytes(), align2.abi()),
+ Panic { ref msg, line, col, ref file } =>
+ write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col),
_ => write!(f, "{}", self.description()),
}
}
FrameInfo, ConstEvalResult,
};
-pub use self::value::{Scalar, Value, ConstValue, ScalarMaybeUndef};
+pub use self::value::{Scalar, ConstValue, ScalarMaybeUndef};
use std::fmt;
use mir;
Pointer { alloc_id, offset }
}
- pub(crate) fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
+ pub fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
Pointer::new(
self.alloc_id,
Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)),
(Pointer::new(self.alloc_id, Size::from_bytes(res)), over)
}
- pub(crate) fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+ pub fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
Ok(Pointer::new(
self.alloc_id,
Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
}
}
-pub fn write_target_int(
- endianness: layout::Endian,
- mut target: &mut [u8],
- data: i128,
-) -> Result<(), io::Error> {
- let len = target.len();
- match endianness {
- layout::Endian::Little => target.write_int128::<LittleEndian>(data, len),
- layout::Endian::Big => target.write_int128::<BigEndian>(data, len),
- }
-}
-
pub fn read_target_uint(endianness: layout::Endian, mut source: &[u8]) -> Result<u128, io::Error> {
match endianness {
layout::Endian::Little => source.read_uint128::<LittleEndian>(source.len()),
}
}
+////////////////////////////////////////////////////////////////////////////////
+// Methods to faciliate working with signed integers stored in a u128
+////////////////////////////////////////////////////////////////////////////////
+
+pub fn sign_extend(value: u128, size: Size) -> u128 {
+ let size = size.bits();
+ // sign extend
+ let shift = 128 - size;
+ // shift the unsigned value to the left
+ // and back to the right as signed (essentially fills with FF on the left)
+ (((value << shift) as i128) >> shift) as u128
+}
+
+pub fn truncate(value: u128, size: Size) -> u128 {
+ let size = size.bits();
+ let shift = 128 - size;
+ // truncate (shift left to drop out leftover values, shift right to fill with zeroes)
+ (value << shift) >> shift
+}
+
////////////////////////////////////////////////////////////////////////////////
// Undefined byte tracking
////////////////////////////////////////////////////////////////////////////////
#![allow(unknown_lints)]
-use ty::layout::{Align, HasDataLayout, Size};
-use ty;
+use ty::layout::{HasDataLayout, Size};
use ty::subst::Substs;
use hir::def_id::DefId;
use super::{EvalResult, Pointer, PointerArithmetic, Allocation};
/// Represents a constant value in Rust. Scalar and ScalarPair are optimizations which
-/// matches Value's optimizations for easy conversions between these two types
+/// matches the LocalValue optimizations for easy conversions between Value and ConstValue.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, RustcEncodable, RustcDecodable, Hash)]
pub enum ConstValue<'tcx> {
/// Never returned from the `const_eval` query, but the HIR contains these frequently in order
/// evaluation
Unevaluated(DefId, &'tcx Substs<'tcx>),
/// Used only for types with layout::abi::Scalar ABI and ZSTs
+ ///
+ /// Not using the enum `Value` to encode that this must not be `Undef`
Scalar(Scalar),
/// Used only for types with layout::abi::ScalarPair
///
}
impl<'tcx> ConstValue<'tcx> {
- #[inline]
- pub fn from_byval_value(val: Value) -> EvalResult<'static, Self> {
- Ok(match val {
- Value::ByRef(..) => bug!(),
- Value::ScalarPair(a, b) => ConstValue::ScalarPair(a.unwrap_or_err()?, b),
- Value::Scalar(val) => ConstValue::Scalar(val.unwrap_or_err()?),
- })
- }
-
- #[inline]
- pub fn to_byval_value(&self) -> Option<Value> {
- match *self {
- ConstValue::Unevaluated(..) |
- ConstValue::ByRef(..) => None,
- ConstValue::ScalarPair(a, b) => Some(Value::ScalarPair(a.into(), b)),
- ConstValue::Scalar(val) => Some(Value::Scalar(val.into())),
- }
- }
-
#[inline]
pub fn try_to_scalar(&self) -> Option<Scalar> {
match *self {
}
#[inline]
- pub fn to_bits(&self, size: Size) -> Option<u128> {
+ pub fn try_to_bits(&self, size: Size) -> Option<u128> {
self.try_to_scalar()?.to_bits(size).ok()
}
#[inline]
- pub fn to_ptr(&self) -> Option<Pointer> {
+ pub fn try_to_ptr(&self) -> Option<Pointer> {
self.try_to_scalar()?.to_ptr().ok()
}
-}
-/// A `Value` represents a single self-contained Rust value.
-///
-/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitive
-/// value held directly, outside of any allocation (`Scalar`). For `ByRef`-values, we remember
-/// whether the pointer is supposed to be aligned or not (also see Place).
-///
-/// For optimization of a few very common cases, there is also a representation for a pair of
-/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
-/// operations and fat pointers. This idea was taken from rustc's codegen.
-#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
-pub enum Value {
- ByRef(Scalar, Align),
- Scalar(ScalarMaybeUndef),
- ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef),
-}
-
-impl<'tcx> ty::TypeFoldable<'tcx> for Value {
- fn super_fold_with<'gcx: 'tcx, F: ty::fold::TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> Self {
- *self
+ pub fn new_slice(
+ val: Scalar,
+ len: u64,
+ cx: impl HasDataLayout
+ ) -> Self {
+ ConstValue::ScalarPair(val, Scalar::Bits {
+ bits: len as u128,
+ size: cx.data_layout().pointer_size.bytes() as u8,
+ }.into())
}
- fn super_visit_with<V: ty::fold::TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
- false
+
+ pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self {
+ ConstValue::ScalarPair(val, Scalar::Ptr(vtable).into())
}
}
impl<'tcx> Scalar {
- pub fn ptr_null<C: HasDataLayout>(cx: C) -> Self {
+ pub fn ptr_null(cx: impl HasDataLayout) -> Self {
Scalar::Bits {
bits: 0,
size: cx.data_layout().pointer_size.bytes() as u8,
}
}
- pub fn to_value_with_len<C: HasDataLayout>(self, len: u64, cx: C) -> Value {
- ScalarMaybeUndef::Scalar(self).to_value_with_len(len, cx)
- }
-
- pub fn to_value_with_vtable(self, vtable: Pointer) -> Value {
- ScalarMaybeUndef::Scalar(self).to_value_with_vtable(vtable)
+ pub fn zst() -> Self {
+ Scalar::Bits { bits: 0, size: 0 }
}
- pub fn ptr_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
+ pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
}
}
- pub fn ptr_offset<C: HasDataLayout>(self, i: Size, cx: C) -> EvalResult<'tcx, Self> {
+ pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
}
}
- pub fn ptr_wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
+ pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self {
let layout = cx.data_layout();
match self {
Scalar::Bits { bits, size } => {
}
}
- pub fn is_null_ptr<C: HasDataLayout>(self, cx: C) -> bool {
+ pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool {
match self {
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, cx.data_layout().pointer_size.bytes());
}
}
- pub fn to_value(self) -> Value {
- Value::Scalar(ScalarMaybeUndef::Scalar(self))
+ pub fn from_bool(b: bool) -> Self {
+ Scalar::Bits { bits: b as u128, size: 1 }
+ }
+
+ pub fn from_char(c: char) -> Self {
+ Scalar::Bits { bits: c as u128, size: 4 }
+ }
+
+ pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
+ match self {
+ Scalar::Bits { bits, size } => {
+ assert_eq!(target_size.bytes(), size as u64);
+ assert_ne!(size, 0, "to_bits cannot be used with zsts");
+ Ok(bits)
+ }
+ Scalar::Ptr(_) => err!(ReadPointerAsBytes),
+ }
+ }
+
+ pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
+ match self {
+ Scalar::Bits { bits: 0, .. } => err!(InvalidNullPointerUsage),
+ Scalar::Bits { .. } => err!(ReadBytesAsPointer),
+ Scalar::Ptr(p) => Ok(p),
+ }
+ }
+
+ pub fn is_bits(self) -> bool {
+ match self {
+ Scalar::Bits { .. } => true,
+ _ => false,
+ }
+ }
+
+ pub fn is_ptr(self) -> bool {
+ match self {
+ Scalar::Ptr(_) => true,
+ _ => false,
+ }
+ }
+
+ pub fn to_bool(self) -> EvalResult<'tcx, bool> {
+ match self {
+ Scalar::Bits { bits: 0, size: 1 } => Ok(false),
+ Scalar::Bits { bits: 1, size: 1 } => Ok(true),
+ _ => err!(InvalidBool),
+ }
}
}
impl From<Pointer> for Scalar {
+ #[inline(always)]
fn from(ptr: Pointer) -> Self {
Scalar::Ptr(ptr)
}
/// The raw bytes of a simple value.
Bits {
/// The first `size` bytes are the value.
- /// Do not try to read less or more bytes that that
+ /// Do not try to read less or more bytes that that. The remaining bytes must be 0.
size: u8,
bits: u128,
},
}
impl From<Scalar> for ScalarMaybeUndef {
+ #[inline(always)]
fn from(s: Scalar) -> Self {
ScalarMaybeUndef::Scalar(s)
}
}
-impl ScalarMaybeUndef {
- pub fn unwrap_or_err(self) -> EvalResult<'static, Scalar> {
+impl<'tcx> ScalarMaybeUndef {
+ pub fn not_undef(self) -> EvalResult<'static, Scalar> {
match self {
ScalarMaybeUndef::Scalar(scalar) => Ok(scalar),
ScalarMaybeUndef::Undef => err!(ReadUndefBytes),
}
}
- pub fn to_value_with_len<C: HasDataLayout>(self, len: u64, cx: C) -> Value {
- Value::ScalarPair(self, Scalar::Bits {
- bits: len as u128,
- size: cx.data_layout().pointer_size.bytes() as u8,
- }.into())
- }
-
- pub fn to_value_with_vtable(self, vtable: Pointer) -> Value {
- Value::ScalarPair(self, Scalar::Ptr(vtable).into())
- }
-
- pub fn ptr_offset<C: HasDataLayout>(self, i: Size, cx: C) -> EvalResult<'tcx, Self> {
- match self {
- ScalarMaybeUndef::Scalar(scalar) => {
- scalar.ptr_offset(i, cx).map(ScalarMaybeUndef::Scalar)
- },
- ScalarMaybeUndef::Undef => Ok(ScalarMaybeUndef::Undef)
- }
- }
-}
-
-impl<'tcx> Scalar {
- pub fn from_bool(b: bool) -> Self {
- Scalar::Bits { bits: b as u128, size: 1 }
- }
-
- pub fn from_char(c: char) -> Self {
- Scalar::Bits { bits: c as u128, size: 4 }
- }
-
- pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
- match self {
- Scalar::Bits { bits, size } => {
- assert_eq!(target_size.bytes(), size as u64);
- assert_ne!(size, 0, "to_bits cannot be used with zsts");
- Ok(bits)
- }
- Scalar::Ptr(_) => err!(ReadPointerAsBytes),
- }
- }
-
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
- match self {
- Scalar::Bits {..} => err!(ReadBytesAsPointer),
- Scalar::Ptr(p) => Ok(p),
- }
+ self.not_undef()?.to_ptr()
}
- pub fn is_bits(self) -> bool {
- match self {
- Scalar::Bits { .. } => true,
- _ => false,
- }
- }
-
- pub fn is_ptr(self) -> bool {
- match self {
- Scalar::Ptr(_) => true,
- _ => false,
- }
+ pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
+ self.not_undef()?.to_bits(target_size)
}
pub fn to_bool(self) -> EvalResult<'tcx, bool> {
- match self {
- Scalar::Bits { bits: 0, size: 1 } => Ok(false),
- Scalar::Bits { bits: 1, size: 1 } => Ok(true),
- _ => err!(InvalidBool),
- }
+ self.not_undef()?.to_bool()
}
}
use hir::def_id::DefId;
use hir::{self, HirId, InlineAsm};
use middle::region;
-use mir::interpret::{EvalErrorKind, Scalar, Value, ScalarMaybeUndef};
+use mir::interpret::{EvalErrorKind, Scalar, ScalarMaybeUndef, ConstValue};
use mir::visit::MirVisitable;
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float;
use rustc_data_structures::graph::dominators::{dominators, Dominators};
use rustc_data_structures::graph::{self, GraphPredecessors, GraphSuccessors};
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::sync::ReadGuard;
use rustc_serialize as serialize;
.iter()
.map(|&u| {
let mut s = String::new();
- print_miri_value(
- Scalar::Bits {
- bits: u,
- size: size.bytes() as u8,
- }.to_value(),
- switch_ty,
- &mut s,
- ).unwrap();
+ let c = ty::Const {
+ val: ConstValue::Scalar(Scalar::Bits {
+ bits: u,
+ size: size.bytes() as u8,
+ }.into()),
+ ty: switch_ty,
+ };
+ fmt_const_val(&mut s, &c).unwrap();
s.into()
})
.chain(iter::once(String::from("otherwise").into()))
region
} else {
// Do not even print 'static
- "".to_owned()
+ String::new()
};
write!(fmt, "&{}{}{:?}", region, kind_str, place)
}
}
/// Write a `ConstValue` in a way closer to the original source code than the `Debug` output.
-pub fn fmt_const_val<W: Write>(fmt: &mut W, const_val: &ty::Const) -> fmt::Result {
- if let Some(value) = const_val.to_byval_value() {
- print_miri_value(value, const_val.ty, fmt)
- } else {
- write!(fmt, "{:?}:{}", const_val.val, const_val.ty)
- }
-}
-
-pub fn print_miri_value<'tcx, W: Write>(value: Value, ty: Ty<'tcx>, f: &mut W) -> fmt::Result {
- use ty::TypeVariants::*;
+pub fn fmt_const_val(f: &mut impl Write, const_val: &ty::Const) -> fmt::Result {
+ use ty::TyKind::*;
+ let value = const_val.val;
+ let ty = const_val.ty;
// print some primitives
- if let Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) = value {
+ if let ConstValue::Scalar(Scalar::Bits { bits, .. }) = value {
match ty.sty {
- TyBool if bits == 0 => return write!(f, "false"),
- TyBool if bits == 1 => return write!(f, "true"),
- TyFloat(ast::FloatTy::F32) => return write!(f, "{}f32", Single::from_bits(bits)),
- TyFloat(ast::FloatTy::F64) => return write!(f, "{}f64", Double::from_bits(bits)),
- TyUint(ui) => return write!(f, "{:?}{}", bits, ui),
- TyInt(i) => {
+ Bool if bits == 0 => return write!(f, "false"),
+ Bool if bits == 1 => return write!(f, "true"),
+ Float(ast::FloatTy::F32) => return write!(f, "{}f32", Single::from_bits(bits)),
+ Float(ast::FloatTy::F64) => return write!(f, "{}f64", Double::from_bits(bits)),
+ Uint(ui) => return write!(f, "{:?}{}", bits, ui),
+ Int(i) => {
let bit_width = ty::tls::with(|tcx| {
let ty = tcx.lift_to_global(&ty).unwrap();
tcx.layout_of(ty::ParamEnv::empty().and(ty))
let shift = 128 - bit_width;
return write!(f, "{:?}{}", ((bits as i128) << shift) >> shift, i);
}
- TyChar => return write!(f, "{:?}", ::std::char::from_u32(bits as u32).unwrap()),
+ Char => return write!(f, "{:?}", ::std::char::from_u32(bits as u32).unwrap()),
_ => {},
}
}
// print function definitons
- if let TyFnDef(did, _) = ty.sty {
+ if let FnDef(did, _) = ty.sty {
return write!(f, "{}", item_path_str(did));
}
// print string literals
- if let Value::ScalarPair(ptr, len) = value {
- if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = ptr {
+ if let ConstValue::ScalarPair(ptr, len) = value {
+ if let Scalar::Ptr(ptr) = ptr {
if let ScalarMaybeUndef::Scalar(Scalar::Bits { bits: len, .. }) = len {
- if let TyRef(_, &ty::TyS { sty: TyStr, .. }, _) = ty.sty {
+ if let Ref(_, &ty::TyS { sty: Str, .. }, _) = ty.sty {
return ty::tls::with(|tcx| {
let alloc = tcx.alloc_map.lock().get(ptr.alloc_id);
if let Some(interpret::AllocType::Memory(alloc)) = alloc {
let ty = self.to_ty(tcx);
PlaceTy::Ty {
ty: match ty.sty {
- ty::TyArray(inner, size) => {
+ ty::Array(inner, size) => {
let size = size.unwrap_usize(tcx);
let len = size - (from as u64) - (to as u64);
tcx.mk_array(inner, len)
}
- ty::TySlice(..) => ty,
+ ty::Slice(..) => ty,
_ => {
bug!("cannot subslice non-array type: `{:?}`", self)
}
}
ProjectionElem::Downcast(adt_def1, index) =>
match self.to_ty(tcx).sty {
- ty::TyAdt(adt_def, substs) => {
+ ty::Adt(adt_def, substs) => {
assert!(adt_def.is_enum());
assert!(index < adt_def.variants.len());
assert_eq!(adt_def, adt_def1);
}
Rvalue::Discriminant(ref place) => {
let ty = place.ty(local_decls, tcx).to_ty(tcx);
- if let ty::TyAdt(adt_def, _) = ty.sty {
+ if let ty::Adt(adt_def, _) = ty.sty {
adt_def.repr.discr_type().to_ty(tcx)
} else {
// This can only be `0`, for now, so `u8` will suffice.
"perform LLVM link-time optimizations"),
target_cpu: Option<String> = (None, parse_opt_string, [TRACKED],
"select target processor (rustc --print target-cpus for details)"),
- target_feature: String = ("".to_string(), parse_string, [TRACKED],
+ target_feature: String = (String::new(), parse_string, [TRACKED],
"target specific attributes (rustc --print target-features for details)"),
passes: Vec<String> = (Vec::new(), parse_list, [TRACKED],
"a list of extra LLVM passes to run (space separated)"),
"choose the code model to use (rustc --print code-models for details)"),
metadata: Vec<String> = (Vec::new(), parse_list, [TRACKED],
"metadata to mangle symbol names with"),
- extra_filename: String = ("".to_string(), parse_string, [UNTRACKED],
+ extra_filename: String = (String::new(), parse_string, [UNTRACKED],
"extra data to put in each output filename"),
codegen_units: Option<usize> = (None, parse_opt_uint, [UNTRACKED],
"divide crate into N units to optimize in parallel"),
};
if cg.target_feature == "help" {
prints.push(PrintRequest::TargetFeatures);
- cg.target_feature = "".to_string();
+ cg.target_feature = String::new();
}
if cg.relocation_model.as_ref().map_or(false, |s| s == "help") {
prints.push(PrintRequest::RelocationModels);
}
return match substs.type_at(0).sty {
- ty::TyParam(_) => true,
- ty::TyProjection(p) => self.is_of_param(p.substs),
+ ty::Param(_) => true,
+ ty::Projection(p) => self.is_of_param(p.substs),
_ => false,
};
}
fn is_possibly_remote_type(ty: Ty, _in_crate: InCrate) -> bool {
match ty.sty {
- ty::TyProjection(..) | ty::TyParam(..) => true,
+ ty::Projection(..) | ty::Param(..) => true,
_ => false,
}
}
fn fundamental_ty(tcx: TyCtxt, ty: Ty) -> bool {
match ty.sty {
- ty::TyRef(..) => true,
- ty::TyAdt(def, _) => def.is_fundamental(),
- ty::TyDynamic(ref data, ..) => {
+ ty::Ref(..) => true,
+ ty::Adt(def, _) => def.is_fundamental(),
+ ty::Dynamic(ref data, ..) => {
data.principal().map_or(false, |p| tcx.has_attr(p.def_id(), "fundamental"))
}
_ => false
debug!("ty_is_local_constructor({:?})", ty);
match ty.sty {
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(..) |
- ty::TyUint(..) |
- ty::TyFloat(..) |
- ty::TyStr |
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) |
- ty::TyArray(..) |
- ty::TySlice(..) |
- ty::TyRawPtr(..) |
- ty::TyRef(..) |
- ty::TyNever |
- ty::TyTuple(..) |
- ty::TyParam(..) |
- ty::TyProjection(..) => {
+ ty::Bool |
+ ty::Char |
+ ty::Int(..) |
+ ty::Uint(..) |
+ ty::Float(..) |
+ ty::Str |
+ ty::FnDef(..) |
+ ty::FnPtr(_) |
+ ty::Array(..) |
+ ty::Slice(..) |
+ ty::RawPtr(..) |
+ ty::Ref(..) |
+ ty::Never |
+ ty::Tuple(..) |
+ ty::Param(..) |
+ ty::Projection(..) => {
false
}
- ty::TyInfer(..) => match in_crate {
+ ty::Infer(..) => match in_crate {
InCrate::Local => false,
// The inference variable might be unified with a local
// type in that remote crate.
InCrate::Remote => true,
},
- ty::TyAdt(def, _) => def_id_is_local(def.did, in_crate),
- ty::TyForeign(did) => def_id_is_local(did, in_crate),
+ ty::Adt(def, _) => def_id_is_local(def.did, in_crate),
+ ty::Foreign(did) => def_id_is_local(did, in_crate),
- ty::TyDynamic(ref tt, ..) => {
+ ty::Dynamic(ref tt, ..) => {
tt.principal().map_or(false, |p| {
def_id_is_local(p.def_id(), in_crate)
})
}
- ty::TyError => {
+ ty::Error => {
true
}
- ty::TyClosure(..) |
- ty::TyGenerator(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyAnon(..) => {
+ ty::Closure(..) |
+ ty::Generator(..) |
+ ty::GeneratorWitness(..) |
+ ty::Anon(..) => {
bug!("ty_is_local invoked on unexpected type: {:?}", ty)
}
}
/// if the type can be equated to any type.
fn type_category<'tcx>(t: Ty<'tcx>) -> Option<u32> {
match t.sty {
- ty::TyBool => Some(0),
- ty::TyChar => Some(1),
- ty::TyStr => Some(2),
- ty::TyInt(..) | ty::TyUint(..) | ty::TyInfer(ty::IntVar(..)) => Some(3),
- ty::TyFloat(..) | ty::TyInfer(ty::FloatVar(..)) => Some(4),
- ty::TyRef(..) | ty::TyRawPtr(..) => Some(5),
- ty::TyArray(..) | ty::TySlice(..) => Some(6),
- ty::TyFnDef(..) | ty::TyFnPtr(..) => Some(7),
- ty::TyDynamic(..) => Some(8),
- ty::TyClosure(..) => Some(9),
- ty::TyTuple(..) => Some(10),
- ty::TyProjection(..) => Some(11),
- ty::TyParam(..) => Some(12),
- ty::TyAnon(..) => Some(13),
- ty::TyNever => Some(14),
- ty::TyAdt(adt, ..) => match adt.adt_kind() {
+ ty::Bool => Some(0),
+ ty::Char => Some(1),
+ ty::Str => Some(2),
+ ty::Int(..) | ty::Uint(..) | ty::Infer(ty::IntVar(..)) => Some(3),
+ ty::Float(..) | ty::Infer(ty::FloatVar(..)) => Some(4),
+ ty::Ref(..) | ty::RawPtr(..) => Some(5),
+ ty::Array(..) | ty::Slice(..) => Some(6),
+ ty::FnDef(..) | ty::FnPtr(..) => Some(7),
+ ty::Dynamic(..) => Some(8),
+ ty::Closure(..) => Some(9),
+ ty::Tuple(..) => Some(10),
+ ty::Projection(..) => Some(11),
+ ty::Param(..) => Some(12),
+ ty::Anon(..) => Some(13),
+ ty::Never => Some(14),
+ ty::Adt(adt, ..) => match adt.adt_kind() {
AdtKind::Struct => Some(15),
AdtKind::Union => Some(16),
AdtKind::Enum => Some(17),
},
- ty::TyGenerator(..) => Some(18),
- ty::TyForeign(..) => Some(19),
- ty::TyGeneratorWitness(..) => Some(20),
- ty::TyInfer(..) | ty::TyError => None
+ ty::Generator(..) => Some(18),
+ ty::Foreign(..) => Some(19),
+ ty::GeneratorWitness(..) => Some(20),
+ ty::Infer(..) | ty::Error => None
}
}
match (type_category(a), type_category(b)) {
(Some(cat_a), Some(cat_b)) => match (&a.sty, &b.sty) {
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => def_a == def_b,
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => def_a == def_b,
_ => cat_a == cat_b
},
// infer and error can be equated to all types
if len > 5 {
format!("\nand {} others", len - 4)
} else {
- "".to_owned()
+ String::new()
}
));
}
let found_trait_ty = found_trait_ref.self_ty();
let found_did = match found_trait_ty.sty {
- ty::TyClosure(did, _) |
- ty::TyForeign(did) |
- ty::TyFnDef(did, _) => Some(did),
- ty::TyAdt(def, _) => Some(def.did),
+ ty::Closure(did, _) |
+ ty::Foreign(did) |
+ ty::FnDef(did, _) => Some(did),
+ ty::Adt(def, _) => Some(def.did),
_ => None,
};
let found_span = found_did.and_then(|did| {
}).map(|sp| self.tcx.sess.source_map().def_span(sp)); // the sp could be an fn def
let found = match found_trait_ref.skip_binder().substs.type_at(1).sty {
- ty::TyTuple(ref tys) => tys.iter()
+ ty::Tuple(ref tys) => tys.iter()
.map(|_| ArgKind::empty()).collect::<Vec<_>>(),
_ => vec![ArgKind::empty()],
};
let expected = match expected_trait_ref.skip_binder().substs.type_at(1).sty {
- ty::TyTuple(ref tys) => tys.iter()
+ ty::Tuple(ref tys) => tys.iter()
.map(|t| match t.sty {
- ty::TypeVariants::TyTuple(ref tys) => ArgKind::Tuple(
+ ty::Tuple(ref tys) => ArgKind::Tuple(
Some(span),
tys.iter()
.map(|ty| ("_".to_owned(), ty.sty.to_string()))
let mut trait_type = trait_ref.self_ty();
for refs_remaining in 0..refs_number {
- if let ty::TypeVariants::TyRef(_, t_type, _) = trait_type.sty {
+ if let ty::Ref(_, t_type, _) = trait_type.sty {
trait_type = t_type;
let substs = self.tcx.mk_substs_trait(trait_type, &[]);
remove_refs);
err.span_suggestion_short_with_applicability(
- sp, &format_str, String::from(""), Applicability::MachineApplicable
+ sp, &format_str, String::new(), Applicability::MachineApplicable
);
break;
}
.collect::<Vec<String>>()
.join(", "))
} else {
- "".to_owned()
+ String::new()
},
);
err.span_suggestion_with_applicability(
fn build_fn_sig_string<'a, 'gcx, 'tcx>(tcx: ty::TyCtxt<'a, 'gcx, 'tcx>,
trait_ref: &ty::TraitRef<'tcx>) -> String {
let inputs = trait_ref.substs.type_at(1);
- let sig = if let ty::TyTuple(inputs) = inputs.sty {
+ let sig = if let ty::Tuple(inputs) = inputs.sty {
tcx.mk_fn_sig(
inputs.iter().map(|&x| x),
tcx.mk_infer(ty::TyVar(ty::TyVid { index: 0 })),
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.infcx.tcx }
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
- if let ty::TyParam(ty::ParamTy {name, ..}) = ty.sty {
+ if let ty::Param(ty::ParamTy {name, ..}) = ty.sty {
let infcx = self.infcx;
self.var_map.entry(ty).or_insert_with(||
infcx.next_ty_var(
/// argument. This has no name (`_`) and no source spans..
pub fn from_expected_ty(t: Ty<'_>) -> ArgKind {
match t.sty {
- ty::TyTuple(ref tys) => ArgKind::Tuple(
+ ty::Tuple(ref tys) => ArgKind::Tuple(
None,
tys.iter()
.map(|ty| ("_".to_owned(), ty.sty.to_string()))
.map(|t| selcx.infcx().resolve_type_vars_if_possible(&t))
.filter(|t| t.has_infer_types())
.flat_map(|t| t.walk())
- .filter(|t| match t.sty { ty::TyInfer(_) => true, _ => false })
+ .filter(|t| match t.sty { ty::Infer(_) => true, _ => false })
.collect()
}
use middle::region;
use mir::interpret::ConstEvalErr;
use ty::subst::Substs;
-use ty::{self, AdtKind, Slice, Ty, TyCtxt, GenericParamDefKind, ToPredicate};
+use ty::{self, AdtKind, List, Ty, TyCtxt, GenericParamDefKind, ToPredicate};
use ty::error::{ExpectedFound, TypeError};
use ty::fold::{TypeFolder, TypeFoldable, TypeVisitor};
use infer::{InferCtxt};
CannotProve,
}
-pub type Goals<'tcx> = &'tcx Slice<Goal<'tcx>>;
+pub type Goals<'tcx> = &'tcx List<Goal<'tcx>>;
impl<'tcx> DomainGoal<'tcx> {
pub fn into_goal(self) -> Goal<'tcx> {
}
/// Multiple clauses.
-pub type Clauses<'tcx> = &'tcx Slice<Clause<'tcx>>;
+pub type Clauses<'tcx> = &'tcx List<Clause<'tcx>>;
/// A "program clause" has the form `D :- G1, ..., Gn`. It is saying
/// that the domain goal `D` is true if `G1...Gn` are provable. This
let mut error = false;
ty.maybe_walk(|ty| {
match ty.sty {
- ty::TyParam(ref param_ty) => {
+ ty::Param(ref param_ty) => {
if param_ty.is_self() {
error = true;
}
false // no contained types to walk
}
- ty::TyProjection(ref data) => {
+ ty::Projection(ref data) => {
// This is a projected type `<Foo as SomeTrait>::X`.
// Compute supertraits of current trait lazily.
let ty = ty.super_fold_with(self);
match ty.sty {
- ty::TyAnon(def_id, substs) if !substs.has_escaping_regions() => { // (*)
+ ty::Anon(def_id, substs) if !substs.has_escaping_regions() => { // (*)
// Only normalize `impl Trait` after type-checking, usually in codegen.
match self.param_env.reveal {
Reveal::UserFacing => ty,
}
}
- ty::TyProjection(ref data) if !data.has_escaping_regions() => { // (*)
+ ty::Projection(ref data) if !data.has_escaping_regions() => { // (*)
// (*) This is kind of hacky -- we need to be able to
// handle normalization within binders because
/// return an associated obligation that, when fulfilled, will lead to
/// an error.
///
-/// Note that we used to return `TyError` here, but that was quite
+/// Note that we used to return `Error` here, but that was quite
/// dubious -- the premise was that an error would *eventually* be
/// reported, when the obligation was processed. But in general once
-/// you see a `TyError` you are supposed to be able to assume that an
+/// you see a `Error` you are supposed to be able to assume that an
/// error *has been* reported, so that you can take whatever heuristic
/// paths you want to take. To make things worse, it was possible for
/// cycles to arise, where you basically had a setup like `<MyType<$0>
let tcx = selcx.tcx();
// Check whether the self-type is itself a projection.
let (def_id, substs) = match obligation_trait_ref.self_ty().sty {
- ty::TyProjection(ref data) => {
+ ty::Projection(ref data) => {
(data.trait_ref(tcx).def_id, data.substs)
}
- ty::TyAnon(def_id, substs) => (def_id, substs),
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Anon(def_id, substs) => (def_id, substs),
+ ty::Infer(ty::TyVar(_)) => {
// If the self-type is an inference variable, then it MAY wind up
// being a projected type, so induce an ambiguity.
candidate_set.mark_ambiguous();
debug!("confirm_object_candidate(object_ty={:?})",
object_ty);
let data = match object_ty.sty {
- ty::TyDynamic(ref data, ..) => data,
+ ty::Dynamic(ref data, ..) => data,
_ => {
span_bug!(
obligation.cause.span,
// This means that the impl is missing a definition for the
// associated type. This error will be reported by the type
// checker method `check_impl_items_against_trait`, so here we
- // just return TyError.
+ // just return Error.
debug!("confirm_impl_candidate: no associated type {:?} for {:?}",
assoc_ty.item.ident,
obligation.predicate);
use infer::at::At;
use infer::InferOk;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use std::iter::FromIterator;
use syntax::source_map::Span;
use ty::subst::Kind;
// None of these types have a destructor and hence they do not
// require anything in particular to outlive the dtor's
// execution.
- ty::TyInfer(ty::FreshIntTy(_))
- | ty::TyInfer(ty::FreshFloatTy(_))
- | ty::TyBool
- | ty::TyInt(_)
- | ty::TyUint(_)
- | ty::TyFloat(_)
- | ty::TyNever
- | ty::TyFnDef(..)
- | ty::TyFnPtr(_)
- | ty::TyChar
- | ty::TyGeneratorWitness(..)
- | ty::TyRawPtr(_)
- | ty::TyRef(..)
- | ty::TyStr
- | ty::TyForeign(..)
- | ty::TyError => true,
+ ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Bool
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Never
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Char
+ | ty::GeneratorWitness(..)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::Str
+ | ty::Foreign(..)
+ | ty::Error => true,
// [T; N] and [T] have same properties as T.
- ty::TyArray(ty, _) | ty::TySlice(ty) => trivial_dropck_outlives(tcx, ty),
+ ty::Array(ty, _) | ty::Slice(ty) => trivial_dropck_outlives(tcx, ty),
// (T1..Tn) and closures have same properties as T1..Tn --
// check if *any* of those are trivial.
- ty::TyTuple(ref tys) => tys.iter().cloned().all(|t| trivial_dropck_outlives(tcx, t)),
- ty::TyClosure(def_id, ref substs) => substs
+ ty::Tuple(ref tys) => tys.iter().cloned().all(|t| trivial_dropck_outlives(tcx, t)),
+ ty::Closure(def_id, ref substs) => substs
.upvar_tys(def_id, tcx)
.all(|t| trivial_dropck_outlives(tcx, t)),
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
if Some(def.did) == tcx.lang_items().manually_drop() {
// `ManuallyDrop` never has a dtor.
true
}
// The following *might* require a destructor: it would deeper inspection to tell.
- ty::TyDynamic(..)
- | ty::TyProjection(..)
- | ty::TyParam(_)
- | ty::TyAnon(..)
- | ty::TyInfer(_)
- | ty::TyGenerator(..) => false,
+ ty::Dynamic(..)
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Anon(..)
+ | ty::Infer(_)
+ | ty::Generator(..) => false,
}
}
// except according to those terms.
use infer::InferCtxt;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use traits::{EvaluationResult, PredicateObligation, SelectionContext,
TraitQueryMode, OverflowError};
use infer::at::At;
use infer::{InferCtxt, InferOk};
use mir::interpret::{ConstValue, GlobalId};
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use traits::project::Normalized;
use traits::{Obligation, ObligationCause, PredicateObligation, Reveal};
use ty::fold::{TypeFoldable, TypeFolder};
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
let ty = ty.super_fold_with(self);
match ty.sty {
- ty::TyAnon(def_id, substs) if !substs.has_escaping_regions() => {
+ ty::Anon(def_id, substs) if !substs.has_escaping_regions() => {
// (*)
// Only normalize `impl Trait` after type-checking, usually in codegen.
match self.param_env.reveal {
let concrete_ty = generic_ty.subst(self.tcx(), substs);
self.anon_depth += 1;
if concrete_ty == ty {
- // The type in question can only be inferred in terms of itself. This
- // is likely a user code issue, not a compiler issue. Thus, we will
- // induce a cycle error by calling the parent query again on the type.
- //
- // FIXME: Perhaps a better solution would be to have fold_ty()
- // itself be a query. Then, a type fold cycle would be detected
- // and reported more naturally as part of the query system, rather
- // than forcing it here.
- //
- // FIXME: Need a better span than just one pointing to the type def.
- // Should point to a defining use of the type that results in this
- // un-normalizable state.
- if let Some(param_env_lifted) =
- self.tcx().lift_to_global(&self.param_env)
- {
- if let Some(ty_lifted) = self.tcx().lift_to_global(&concrete_ty) {
- let span = self.tcx().def_span(def_id);
- self.tcx()
- .global_tcx()
- .at(span)
- .normalize_ty_after_erasing_regions(
- param_env_lifted.and(ty_lifted),
- );
- self.tcx().sess.abort_if_errors();
- }
- }
- // If a cycle error can't be emitted, indicate a NoSolution error
- // and let the caller handle it.
- self.error = true;
- return concrete_ty;
+ bug!(
+ "infinite recursion generic_ty: {:#?}, substs: {:#?}, \
+ concrete_ty: {:#?}, ty: {:#?}",
+ generic_ty,
+ substs,
+ concrete_ty,
+ ty
+ );
}
let folded_ty = self.fold_ty(concrete_ty);
self.anon_depth -= 1;
}
}
- ty::TyProjection(ref data) if !data.has_escaping_regions() => {
+ ty::Projection(ref data) if !data.has_escaping_regions() => {
// (*)
// (*) This is kind of hacky -- we need to be able to
// handle normalization within binders because
use infer::InferCtxt;
use syntax::ast;
use syntax::source_map::Span;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use traits::{FulfillmentContext, ObligationCause, TraitEngine, TraitEngineExt};
use traits::query::NoSolution;
use ty::{self, Ty, TyCtxt};
use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResult, QueryRegionConstraint,
QueryResult};
use infer::{InferCtxt, InferOk};
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use std::fmt;
use std::rc::Rc;
use traits::query::Fallible;
&IntercrateAmbiguityCause::DownstreamCrate { ref trait_desc, ref self_desc } => {
let self_desc = if let &Some(ref ty) = self_desc {
format!(" for type `{}`", ty)
- } else { "".to_string() };
+ } else { String::new() };
format!("downstream crates may implement trait `{}`{}", trait_desc, self_desc)
}
&IntercrateAmbiguityCause::UpstreamCrateUpdate { ref trait_desc, ref self_desc } => {
let self_desc = if let &Some(ref ty) = self_desc {
format!(" for type `{}`", ty)
- } else { "".to_string() };
+ } else { String::new() };
format!("upstream crates may add new impl of trait `{}`{} \
in future versions",
trait_desc, self_desc)
-> SelectionResult<'tcx, SelectionCandidate<'tcx>>
{
if stack.obligation.predicate.references_error() {
- // If we encounter a `TyError`, we generally prefer the
+ // If we encounter a `Error`, we generally prefer the
// most "optimistic" result in response -- that is, the
// one least likely to report downstream errors. But
// because this routine is shared by coherence and by
// before we go into the whole skolemization thing, just
// quickly check if the self-type is a projection at all.
match obligation.predicate.skip_binder().trait_ref.self_ty().sty {
- ty::TyProjection(_) | ty::TyAnon(..) => {}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Projection(_) | ty::Anon(..) => {}
+ ty::Infer(ty::TyVar(_)) => {
span_bug!(obligation.cause.span,
"Self=_ should have been handled by assemble_candidates");
}
skol_map);
let (def_id, substs) = match skol_trait_predicate.trait_ref.self_ty().sty {
- ty::TyProjection(ref data) =>
+ ty::Projection(ref data) =>
(data.trait_ref(self.tcx()).def_id, data.substs),
- ty::TyAnon(def_id, substs) => (def_id, substs),
+ ty::Anon(def_id, substs) => (def_id, substs),
_ => {
span_bug!(
obligation.cause.span,
// type/region parameters
let self_ty = *obligation.self_ty().skip_binder();
match self_ty.sty {
- ty::TyGenerator(..) => {
+ ty::Generator(..) => {
debug!("assemble_generator_candidates: self_ty={:?} obligation={:?}",
self_ty,
obligation);
candidates.vec.push(GeneratorCandidate);
Ok(())
}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
debug!("assemble_generator_candidates: ambiguous self-type");
candidates.ambiguous = true;
return Ok(());
// touch bound regions, they just capture the in-scope
// type/region parameters
match obligation.self_ty().skip_binder().sty {
- ty::TyClosure(closure_def_id, closure_substs) => {
+ ty::Closure(closure_def_id, closure_substs) => {
debug!("assemble_unboxed_candidates: kind={:?} obligation={:?}",
kind, obligation);
match self.infcx.closure_kind(closure_def_id, closure_substs) {
};
Ok(())
}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
debug!("assemble_unboxed_closure_candidates: ambiguous self-type");
candidates.ambiguous = true;
return Ok(());
// ok to skip binder because what we are inspecting doesn't involve bound regions
let self_ty = *obligation.self_ty().skip_binder();
match self_ty.sty {
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
debug!("assemble_fn_pointer_candidates: ambiguous self-type");
candidates.ambiguous = true; // could wind up being a fn() type
}
// provide an impl, but only for suitable `fn` pointers
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
if let ty::FnSig {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
if self.tcx().trait_is_auto(def_id) {
match self_ty.sty {
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
// For object types, we don't know what the closed
// over types are. This means we conservatively
// say nothing; a candidate may be added by
// `assemble_candidates_from_object_ty`.
}
- ty::TyForeign(..) => {
+ ty::Foreign(..) => {
// Since the contents of foreign types is unknown,
// we don't add any `..` impl. Default traits could
// still be provided by a manual implementation for
// this trait and type.
}
- ty::TyParam(..) |
- ty::TyProjection(..) => {
+ ty::Param(..) |
+ ty::Projection(..) => {
// In these cases, we don't know what the actual
// type is. Therefore, we cannot break it down
// into its constituent types. So we don't
// for an example of a test case that exercises
// this path.
}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
// the auto impl might apply, we don't know
candidates.ambiguous = true;
}
// any LBR.
let self_ty = this.tcx().erase_late_bound_regions(&obligation.self_ty());
let poly_trait_ref = match self_ty.sty {
- ty::TyDynamic(ref data, ..) => {
+ ty::Dynamic(ref data, ..) => {
if data.auto_traits().any(|did| did == obligation.predicate.def_id()) {
debug!("assemble_candidates_from_object_ty: matched builtin bound, \
pushing candidate");
None => return,
}
}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
debug!("assemble_candidates_from_object_ty: ambiguous");
candidates.ambiguous = true; // could wind up being an object type
return;
let may_apply = match (&source.sty, &target.sty) {
// Trait+Kx+'a -> Trait+Ky+'b (upcasts).
- (&ty::TyDynamic(ref data_a, ..), &ty::TyDynamic(ref data_b, ..)) => {
+ (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
// Upcasts permit two things:
//
// 1. Dropping builtin bounds, e.g. `Foo+Send` to `Foo`
}
// T -> Trait.
- (_, &ty::TyDynamic(..)) => true,
+ (_, &ty::Dynamic(..)) => true,
// Ambiguous handling is below T -> Trait, because inference
// variables can still implement Unsize<Trait> and nested
// obligations will have the final say (likely deferred).
- (&ty::TyInfer(ty::TyVar(_)), _) |
- (_, &ty::TyInfer(ty::TyVar(_))) => {
+ (&ty::Infer(ty::TyVar(_)), _) |
+ (_, &ty::Infer(ty::TyVar(_))) => {
debug!("assemble_candidates_for_unsizing: ambiguous");
candidates.ambiguous = true;
false
}
// [T; n] -> [T].
- (&ty::TyArray(..), &ty::TySlice(_)) => true,
+ (&ty::Array(..), &ty::Slice(_)) => true,
// Struct<T> -> Struct<U>.
- (&ty::TyAdt(def_id_a, _), &ty::TyAdt(def_id_b, _)) if def_id_a.is_struct() => {
+ (&ty::Adt(def_id_a, _), &ty::Adt(def_id_b, _)) if def_id_a.is_struct() => {
def_id_a == def_id_b
}
// (.., T) -> (.., U).
- (&ty::TyTuple(tys_a), &ty::TyTuple(tys_b)) => {
+ (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => {
tys_a.len() == tys_b.len()
}
obligation.predicate.skip_binder().self_ty());
match self_ty.sty {
- ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) |
- ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) |
- ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyRawPtr(..) |
- ty::TyChar | ty::TyRef(..) | ty::TyGenerator(..) |
- ty::TyGeneratorWitness(..) | ty::TyArray(..) | ty::TyClosure(..) |
- ty::TyNever | ty::TyError => {
+ ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) |
+ ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) |
+ ty::FnDef(..) | ty::FnPtr(_) | ty::RawPtr(..) |
+ ty::Char | ty::Ref(..) | ty::Generator(..) |
+ ty::GeneratorWitness(..) | ty::Array(..) | ty::Closure(..) |
+ ty::Never | ty::Error => {
// safe for everything
Where(ty::Binder::dummy(Vec::new()))
}
- ty::TyStr | ty::TySlice(_) | ty::TyDynamic(..) | ty::TyForeign(..) => None,
+ ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None,
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
Where(ty::Binder::bind(tys.last().into_iter().cloned().collect()))
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
let sized_crit = def.sized_constraint(self.tcx());
// (*) binder moved here
Where(ty::Binder::bind(
))
}
- ty::TyProjection(_) | ty::TyParam(_) | ty::TyAnon(..) => None,
- ty::TyInfer(ty::TyVar(_)) => Ambiguous,
+ ty::Projection(_) | ty::Param(_) | ty::Anon(..) => None,
+ ty::Infer(ty::TyVar(_)) => Ambiguous,
- ty::TyInfer(ty::CanonicalTy(_)) |
- ty::TyInfer(ty::FreshTy(_)) |
- ty::TyInfer(ty::FreshIntTy(_)) |
- ty::TyInfer(ty::FreshFloatTy(_)) => {
+ ty::Infer(ty::CanonicalTy(_)) |
+ ty::Infer(ty::FreshTy(_)) |
+ ty::Infer(ty::FreshIntTy(_)) |
+ ty::Infer(ty::FreshFloatTy(_)) => {
bug!("asked to assemble builtin bounds of unexpected type: {:?}",
self_ty);
}
use self::BuiltinImplConditions::{Ambiguous, None, Where};
match self_ty.sty {
- ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) |
- ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyError => {
+ ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) |
+ ty::FnDef(..) | ty::FnPtr(_) | ty::Error => {
Where(ty::Binder::dummy(Vec::new()))
}
- ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) |
- ty::TyChar | ty::TyRawPtr(..) | ty::TyNever |
- ty::TyRef(_, _, hir::MutImmutable) => {
+ ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) |
+ ty::Char | ty::RawPtr(..) | ty::Never |
+ ty::Ref(_, _, hir::MutImmutable) => {
// Implementations provided in libcore
None
}
- ty::TyDynamic(..) | ty::TyStr | ty::TySlice(..) |
- ty::TyGenerator(..) | ty::TyGeneratorWitness(..) | ty::TyForeign(..) |
- ty::TyRef(_, _, hir::MutMutable) => {
+ ty::Dynamic(..) | ty::Str | ty::Slice(..) |
+ ty::Generator(..) | ty::GeneratorWitness(..) | ty::Foreign(..) |
+ ty::Ref(_, _, hir::MutMutable) => {
None
}
- ty::TyArray(element_ty, _) => {
+ ty::Array(element_ty, _) => {
// (*) binder moved here
Where(ty::Binder::bind(vec![element_ty]))
}
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
// (*) binder moved here
Where(ty::Binder::bind(tys.to_vec()))
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let trait_id = obligation.predicate.def_id();
let is_copy_trait = Some(trait_id) == self.tcx().lang_items().copy_trait();
let is_clone_trait = Some(trait_id) == self.tcx().lang_items().clone_trait();
}
}
- ty::TyAdt(..) | ty::TyProjection(..) | ty::TyParam(..) | ty::TyAnon(..) => {
+ ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Anon(..) => {
// Fallback to whatever user-defined impls exist in this case.
None
}
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
// Unbound type variable. Might or might not have
// applicable impls and so forth, depending on what
// those type variables wind up being bound to.
Ambiguous
}
- ty::TyInfer(ty::CanonicalTy(_)) |
- ty::TyInfer(ty::FreshTy(_)) |
- ty::TyInfer(ty::FreshIntTy(_)) |
- ty::TyInfer(ty::FreshFloatTy(_)) => {
+ ty::Infer(ty::CanonicalTy(_)) |
+ ty::Infer(ty::FreshTy(_)) |
+ ty::Infer(ty::FreshIntTy(_)) |
+ ty::Infer(ty::FreshFloatTy(_)) => {
bug!("asked to assemble builtin bounds of unexpected type: {:?}",
self_ty);
}
/// ```
fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> {
match t.sty {
- ty::TyUint(_) |
- ty::TyInt(_) |
- ty::TyBool |
- ty::TyFloat(_) |
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) |
- ty::TyStr |
- ty::TyError |
- ty::TyInfer(ty::IntVar(_)) |
- ty::TyInfer(ty::FloatVar(_)) |
- ty::TyNever |
- ty::TyChar => {
+ ty::Uint(_) |
+ ty::Int(_) |
+ ty::Bool |
+ ty::Float(_) |
+ ty::FnDef(..) |
+ ty::FnPtr(_) |
+ ty::Str |
+ ty::Error |
+ ty::Infer(ty::IntVar(_)) |
+ ty::Infer(ty::FloatVar(_)) |
+ ty::Never |
+ ty::Char => {
Vec::new()
}
- ty::TyDynamic(..) |
- ty::TyParam(..) |
- ty::TyForeign(..) |
- ty::TyProjection(..) |
- ty::TyInfer(ty::CanonicalTy(_)) |
- ty::TyInfer(ty::TyVar(_)) |
- ty::TyInfer(ty::FreshTy(_)) |
- ty::TyInfer(ty::FreshIntTy(_)) |
- ty::TyInfer(ty::FreshFloatTy(_)) => {
+ ty::Dynamic(..) |
+ ty::Param(..) |
+ ty::Foreign(..) |
+ ty::Projection(..) |
+ ty::Infer(ty::CanonicalTy(_)) |
+ ty::Infer(ty::TyVar(_)) |
+ ty::Infer(ty::FreshTy(_)) |
+ ty::Infer(ty::FreshIntTy(_)) |
+ ty::Infer(ty::FreshFloatTy(_)) => {
bug!("asked to assemble constituent types of unexpected type: {:?}",
t);
}
- ty::TyRawPtr(ty::TypeAndMut { ty: element_ty, ..}) |
- ty::TyRef(_, element_ty, _) => {
+ ty::RawPtr(ty::TypeAndMut { ty: element_ty, ..}) |
+ ty::Ref(_, element_ty, _) => {
vec![element_ty]
},
- ty::TyArray(element_ty, _) | ty::TySlice(element_ty) => {
+ ty::Array(element_ty, _) | ty::Slice(element_ty) => {
vec![element_ty]
}
- ty::TyTuple(ref tys) => {
+ ty::Tuple(ref tys) => {
// (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
tys.to_vec()
}
- ty::TyClosure(def_id, ref substs) => {
+ ty::Closure(def_id, ref substs) => {
substs.upvar_tys(def_id, self.tcx()).collect()
}
- ty::TyGenerator(def_id, ref substs, _) => {
+ ty::Generator(def_id, ref substs, _) => {
let witness = substs.witness(def_id, self.tcx());
substs.upvar_tys(def_id, self.tcx()).chain(iter::once(witness)).collect()
}
- ty::TyGeneratorWitness(types) => {
+ ty::GeneratorWitness(types) => {
// This is sound because no regions in the witness can refer to
// the binder outside the witness. So we'll effectivly reuse
// the implicit binder around the witness.
}
// for `PhantomData<T>`, we pass `T`
- ty::TyAdt(def, substs) if def.is_phantom_data() => {
+ ty::Adt(def, substs) if def.is_phantom_data() => {
substs.types().collect()
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
def.all_fields()
.map(|f| f.ty(self.tcx(), substs))
.collect()
}
- ty::TyAnon(def_id, substs) => {
+ ty::Anon(def_id, substs) => {
// We can resolve the `impl Trait` to its concrete type,
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
// case that results. -nmatsakis
let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
let poly_trait_ref = match self_ty.sty {
- ty::TyDynamic(ref data, ..) => {
+ ty::Dynamic(ref data, ..) => {
data.principal().unwrap().with_self_ty(self.tcx(), self_ty)
}
_ => {
// type/region parameters
let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
let (generator_def_id, substs) = match self_ty.sty {
- ty::TyGenerator(id, substs, _) => (id, substs),
+ ty::Generator(id, substs, _) => (id, substs),
_ => bug!("closure candidate for non-closure {:?}", obligation)
};
// type/region parameters
let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
let (closure_def_id, substs) = match self_ty.sty {
- ty::TyClosure(id, substs) => (id, substs),
+ ty::Closure(id, substs) => (id, substs),
_ => bug!("closure candidate for non-closure {:?}", obligation)
};
let mut nested = vec![];
match (&source.sty, &target.sty) {
// Trait+Kx+'a -> Trait+Ky+'b (upcasts).
- (&ty::TyDynamic(ref data_a, r_a), &ty::TyDynamic(ref data_b, r_b)) => {
+ (&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => {
// See assemble_candidates_for_unsizing for more info.
let existential_predicates = data_a.map_bound(|data_a| {
let principal = data_a.principal();
}
// T -> Trait.
- (_, &ty::TyDynamic(ref data, r)) => {
+ (_, &ty::Dynamic(ref data, r)) => {
let mut object_dids =
data.auto_traits().chain(data.principal().map(|p| p.def_id()));
if let Some(did) = object_dids.find(|did| {
}
// [T; n] -> [T].
- (&ty::TyArray(a, _), &ty::TySlice(b)) => {
+ (&ty::Array(a, _), &ty::Slice(b)) => {
let InferOk { obligations, .. } =
self.infcx.at(&obligation.cause, obligation.param_env)
.eq(b, a)
}
// Struct<T> -> Struct<U>.
- (&ty::TyAdt(def, substs_a), &ty::TyAdt(_, substs_b)) => {
+ (&ty::Adt(def, substs_a), &ty::Adt(_, substs_b)) => {
let fields = def
.all_fields()
.map(|f| tcx.type_of(f.did))
let mut ty_params = BitArray::new(substs_a.types().count());
let mut found = false;
for ty in field.walk() {
- if let ty::TyParam(p) = ty.sty {
+ if let ty::Param(p) = ty.sty {
ty_params.insert(p.idx as usize);
found = true;
}
}
// Replace type parameters used in unsizing with
- // TyError and ensure they do not affect any other fields.
+ // Error and ensure they do not affect any other fields.
// This could be checked after type collection for any struct
// with a potentially unsized trailing field.
let params = substs_a.iter().enumerate().map(|(i, &k)| {
}
// (.., T) -> (.., U).
- (&ty::TyTuple(tys_a), &ty::TyTuple(tys_b)) => {
+ (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => {
assert_eq!(tys_a.len(), tys_b.len());
// The last field of the tuple has to exist.
.unwrap()
.subst(infcx.tcx, &source_substs);
- // translate the Self and TyParam parts of the substitution, since those
+ // translate the Self and Param parts of the substitution, since those
// vary across impls
let target_substs = match target_node {
specialization_graph::Node::Impl(target_impl) => {
}
}
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice<traits::Goal<'tcx>> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<traits::Goal<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let v = self.iter()
.map(|t| t.fold_with(folder))
}
}
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice<traits::Clause<'tcx>> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<traits::Clause<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let v = self.iter()
.map(|t| t.fold_with(folder))
if a == b { return Ok(a); }
match (&a.sty, &b.sty) {
- (_, &ty::TyInfer(ty::FreshTy(_))) |
- (_, &ty::TyInfer(ty::FreshIntTy(_))) |
- (_, &ty::TyInfer(ty::FreshFloatTy(_))) => {
+ (_, &ty::Infer(ty::FreshTy(_))) |
+ (_, &ty::Infer(ty::FreshIntTy(_))) |
+ (_, &ty::Infer(ty::FreshFloatTy(_))) => {
Ok(a)
}
- (&ty::TyInfer(_), _) |
- (_, &ty::TyInfer(_)) => {
+ (&ty::Infer(_), _) |
+ (_, &ty::Infer(_)) => {
Err(TypeError::Sorts(relate::expected_found(self, &a, &b)))
}
- (&ty::TyError, _) | (_, &ty::TyError) => {
+ (&ty::Error, _) | (_, &ty::Error) => {
Ok(self.tcx().types.err)
}
impl<'tcx> CastTy<'tcx> {
pub fn from_ty(t: Ty<'tcx>) -> Option<CastTy<'tcx>> {
match t.sty {
- ty::TyBool => Some(CastTy::Int(IntTy::Bool)),
- ty::TyChar => Some(CastTy::Int(IntTy::Char)),
- ty::TyInt(_) => Some(CastTy::Int(IntTy::I)),
- ty::TyInfer(ty::InferTy::IntVar(_)) => Some(CastTy::Int(IntTy::I)),
- ty::TyInfer(ty::InferTy::FloatVar(_)) => Some(CastTy::Float),
- ty::TyUint(u) => Some(CastTy::Int(IntTy::U(u))),
- ty::TyFloat(_) => Some(CastTy::Float),
- ty::TyAdt(d,_) if d.is_enum() && d.is_payloadfree() =>
+ ty::Bool => Some(CastTy::Int(IntTy::Bool)),
+ ty::Char => Some(CastTy::Int(IntTy::Char)),
+ ty::Int(_) => Some(CastTy::Int(IntTy::I)),
+ ty::Infer(ty::InferTy::IntVar(_)) => Some(CastTy::Int(IntTy::I)),
+ ty::Infer(ty::InferTy::FloatVar(_)) => Some(CastTy::Float),
+ ty::Uint(u) => Some(CastTy::Int(IntTy::U(u))),
+ ty::Float(_) => Some(CastTy::Float),
+ ty::Adt(d,_) if d.is_enum() && d.is_payloadfree() =>
Some(CastTy::Int(IntTy::CEnum)),
- ty::TyRawPtr(mt) => Some(CastTy::Ptr(mt)),
- ty::TyRef(_, ty, mutbl) => Some(CastTy::RPtr(ty::TypeAndMut { ty, mutbl })),
- ty::TyFnPtr(..) => Some(CastTy::FnPtr),
+ ty::RawPtr(mt) => Some(CastTy::Ptr(mt)),
+ ty::Ref(_, ty, mutbl) => Some(CastTy::RPtr(ty::TypeAndMut { ty, mutbl })),
+ ty::FnPtr(..) => Some(CastTy::FnPtr),
_ => None,
}
}
}
impl<'tcx> EncodableWithShorthand for Ty<'tcx> {
- type Variant = ty::TypeVariants<'tcx>;
+ type Variant = ty::TyKind<'tcx>;
fn variant(&self) -> &Self::Variant {
&self.sty
}
})
} else {
let tcx = decoder.tcx();
- Ok(tcx.mk_ty(ty::TypeVariants::decode(decoder)?))
+ Ok(tcx.mk_ty(ty::TyKind::decode(decoder)?))
}
}
#[inline]
pub fn decode_ty_slice<'a, 'tcx, D>(decoder: &mut D)
- -> Result<&'tcx ty::Slice<Ty<'tcx>>, D::Error>
+ -> Result<&'tcx ty::List<Ty<'tcx>>, D::Error>
where D: TyDecoder<'a, 'tcx>,
'tcx: 'a,
{
#[inline]
pub fn decode_existential_predicate_slice<'a, 'tcx, D>(decoder: &mut D)
- -> Result<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>, D::Error>
+ -> Result<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>, D::Error>
where D: TyDecoder<'a, 'tcx>,
'tcx: 'a,
{
}
}
- impl<$($typaram),*> SpecializedDecoder<&'tcx ty::Slice<ty::Ty<'tcx>>>
+ impl<$($typaram),*> SpecializedDecoder<&'tcx ty::List<ty::Ty<'tcx>>>
for $DecoderName<$($typaram),*> {
fn specialized_decode(&mut self)
- -> Result<&'tcx ty::Slice<ty::Ty<'tcx>>, Self::Error> {
+ -> Result<&'tcx ty::List<ty::Ty<'tcx>>, Self::Error> {
decode_ty_slice(self)
}
}
}
}
- impl<$($typaram),*> SpecializedDecoder<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>
+ impl<$($typaram),*> SpecializedDecoder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>
for $DecoderName<$($typaram),*> {
fn specialized_decode(&mut self)
- -> Result<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>, Self::Error> {
+ -> Result<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>, Self::Error> {
decode_existential_predicate_slice(self)
}
}
use traits;
use traits::{Clause, Clauses, Goal, Goals};
use ty::{self, Ty, TypeAndMut};
-use ty::{TyS, TypeVariants, Slice};
+use ty::{TyS, TyKind, List};
use ty::{AdtKind, AdtDef, ClosureSubsts, GeneratorSubsts, Region, Const};
use ty::{PolyFnSig, InferTy, ParamTy, ProjectionTy, ExistentialPredicate, Predicate};
use ty::RegionKind;
use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid};
-use ty::TypeVariants::*;
+use ty::TyKind::*;
use ty::GenericParamDefKind;
use ty::layout::{LayoutDetails, TargetDataLayout};
use ty::query;
/// Specifically use a speedy hash algorithm for these hash sets,
/// they're accessed quite often.
type_: InternedSet<'tcx, TyS<'tcx>>,
- type_list: InternedSet<'tcx, Slice<Ty<'tcx>>>,
+ type_list: InternedSet<'tcx, List<Ty<'tcx>>>,
substs: InternedSet<'tcx, Substs<'tcx>>,
- canonical_var_infos: InternedSet<'tcx, Slice<CanonicalVarInfo>>,
+ canonical_var_infos: InternedSet<'tcx, List<CanonicalVarInfo>>,
region: InternedSet<'tcx, RegionKind>,
- existential_predicates: InternedSet<'tcx, Slice<ExistentialPredicate<'tcx>>>,
- predicates: InternedSet<'tcx, Slice<Predicate<'tcx>>>,
+ existential_predicates: InternedSet<'tcx, List<ExistentialPredicate<'tcx>>>,
+ predicates: InternedSet<'tcx, List<Predicate<'tcx>>>,
const_: InternedSet<'tcx, Const<'tcx>>,
- clauses: InternedSet<'tcx, Slice<Clause<'tcx>>>,
- goals: InternedSet<'tcx, Slice<Goal<'tcx>>>,
+ clauses: InternedSet<'tcx, List<Clause<'tcx>>>,
+ goals: InternedSet<'tcx, List<Goal<'tcx>>>,
}
impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> {
fn intern_ty(
local: &CtxtInterners<'tcx>,
global: &CtxtInterners<'gcx>,
- st: TypeVariants<'tcx>
+ st: TyKind<'tcx>
) -> Ty<'tcx> {
let flags = super::flags::FlagComputation::for_sty(&st);
fn new(interners: &CtxtInterners<'tcx>) -> CommonTypes<'tcx> {
// Ensure our type representation does not grow
#[cfg(target_pointer_width = "64")]
- assert!(mem::size_of::<ty::TypeVariants>() <= 24);
+ assert!(mem::size_of::<ty::TyKind>() <= 24);
#[cfg(target_pointer_width = "64")]
assert!(mem::size_of::<ty::TyS>() <= 32);
&*r
};
CommonTypes {
- bool: mk(TyBool),
- char: mk(TyChar),
- never: mk(TyNever),
- err: mk(TyError),
- isize: mk(TyInt(ast::IntTy::Isize)),
- i8: mk(TyInt(ast::IntTy::I8)),
- i16: mk(TyInt(ast::IntTy::I16)),
- i32: mk(TyInt(ast::IntTy::I32)),
- i64: mk(TyInt(ast::IntTy::I64)),
- i128: mk(TyInt(ast::IntTy::I128)),
- usize: mk(TyUint(ast::UintTy::Usize)),
- u8: mk(TyUint(ast::UintTy::U8)),
- u16: mk(TyUint(ast::UintTy::U16)),
- u32: mk(TyUint(ast::UintTy::U32)),
- u64: mk(TyUint(ast::UintTy::U64)),
- u128: mk(TyUint(ast::UintTy::U128)),
- f32: mk(TyFloat(ast::FloatTy::F32)),
- f64: mk(TyFloat(ast::FloatTy::F64)),
+ bool: mk(Bool),
+ char: mk(Char),
+ never: mk(Never),
+ err: mk(Error),
+ isize: mk(Int(ast::IntTy::Isize)),
+ i8: mk(Int(ast::IntTy::I8)),
+ i16: mk(Int(ast::IntTy::I16)),
+ i32: mk(Int(ast::IntTy::I32)),
+ i64: mk(Int(ast::IntTy::I64)),
+ i128: mk(Int(ast::IntTy::I128)),
+ usize: mk(Uint(ast::UintTy::Usize)),
+ u8: mk(Uint(ast::UintTy::U8)),
+ u16: mk(Uint(ast::UintTy::U16)),
+ u32: mk(Uint(ast::UintTy::U32)),
+ u64: mk(Uint(ast::UintTy::U64)),
+ u128: mk(Uint(ast::UintTy::U128)),
+ f32: mk(Float(ast::FloatTy::F32)),
+ f64: mk(Float(ast::FloatTy::F64)),
re_empty: mk_region(RegionKind::ReEmpty),
re_static: mk_region(RegionKind::ReStatic),
/// None is returned if the value or one of the components is not part
/// of the provided context.
/// For Ty, None can be returned if either the type interner doesn't
-/// contain the TypeVariants key or if the address of the interned
+/// contain the TyKind key or if the address of the interned
/// pointer differs. The latter case is possible if a primitive type,
/// e.g. `()` or `u8`, was interned in a different context.
pub trait Lift<'tcx>: fmt::Debug {
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<Goal<'a>> {
- type Lifted = &'tcx Slice<Goal<'tcx>>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<Goal<'a>> {
+ type Lifted = &'tcx List<Goal<'tcx>>;
fn lift_to_tcx<'b, 'gcx>(
&self,
tcx: TyCtxt<'b, 'gcx, 'tcx>,
- ) -> Option<&'tcx Slice<Goal<'tcx>>> {
+ ) -> Option<&'tcx List<Goal<'tcx>>> {
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<Clause<'a>> {
- type Lifted = &'tcx Slice<Clause<'tcx>>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<Clause<'a>> {
+ type Lifted = &'tcx List<Clause<'tcx>>;
fn lift_to_tcx<'b, 'gcx>(
&self,
tcx: TyCtxt<'b, 'gcx, 'tcx>,
- ) -> Option<&'tcx Slice<Clause<'tcx>>> {
+ ) -> Option<&'tcx List<Clause<'tcx>>> {
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
type Lifted = &'tcx Substs<'tcx>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Substs<'tcx>> {
if self.len() == 0 {
- return Some(Slice::empty());
+ return Some(List::empty());
}
if tcx.interners.arena.in_arena(&self[..] as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<Ty<'a>> {
- type Lifted = &'tcx Slice<Ty<'tcx>>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<Ty<'a>> {
+ type Lifted = &'tcx List<Ty<'tcx>>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>)
- -> Option<&'tcx Slice<Ty<'tcx>>> {
+ -> Option<&'tcx List<Ty<'tcx>>> {
if self.len() == 0 {
- return Some(Slice::empty());
+ return Some(List::empty());
}
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<ExistentialPredicate<'a>> {
- type Lifted = &'tcx Slice<ExistentialPredicate<'tcx>>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<ExistentialPredicate<'a>> {
+ type Lifted = &'tcx List<ExistentialPredicate<'tcx>>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>)
- -> Option<&'tcx Slice<ExistentialPredicate<'tcx>>> {
+ -> Option<&'tcx List<ExistentialPredicate<'tcx>>> {
if self.is_empty() {
- return Some(Slice::empty());
+ return Some(List::empty());
}
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<Predicate<'a>> {
- type Lifted = &'tcx Slice<Predicate<'tcx>>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<Predicate<'a>> {
+ type Lifted = &'tcx List<Predicate<'tcx>>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>)
- -> Option<&'tcx Slice<Predicate<'tcx>>> {
+ -> Option<&'tcx List<Predicate<'tcx>>> {
if self.is_empty() {
- return Some(Slice::empty());
+ return Some(List::empty());
}
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
}
}
-impl<'a, 'tcx> Lift<'tcx> for &'a Slice<CanonicalVarInfo> {
- type Lifted = &'tcx Slice<CanonicalVarInfo>;
+impl<'a, 'tcx> Lift<'tcx> for &'a List<CanonicalVarInfo> {
+ type Lifted = &'tcx List<CanonicalVarInfo>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
if self.len() == 0 {
- return Some(Slice::empty());
+ return Some(List::empty());
}
if tcx.interners.arena.in_arena(*self as *const _) {
return Some(unsafe { mem::transmute(*self) });
for &Interned(t) in tcx.interners.type_.borrow().iter() {
let variant = match t.sty {
- ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) |
- ty::TyFloat(..) | ty::TyStr | ty::TyNever => continue,
- ty::TyError => /* unimportant */ continue,
+ ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
+ ty::Float(..) | ty::Str | ty::Never => continue,
+ ty::Error => /* unimportant */ continue,
$(ty::$variant(..) => &mut $variant,)*
};
let region = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER);
pub fn print_debug_stats(self) {
sty_debug_print!(
self,
- TyAdt, TyArray, TySlice, TyRawPtr, TyRef, TyFnDef, TyFnPtr,
- TyGenerator, TyGeneratorWitness, TyDynamic, TyClosure, TyTuple,
- TyParam, TyInfer, TyProjection, TyAnon, TyForeign);
+ Adt, Array, Slice, RawPtr, Ref, FnDef, FnPtr,
+ Generator, GeneratorWitness, Dynamic, Closure, Tuple,
+ Param, Infer, Projection, Anon, Foreign);
println!("Substs interner: #{}", self.interners.substs.borrow().len());
println!("Region interner: #{}", self.interners.region.borrow().len());
}
}
-impl<'tcx: 'lcx, 'lcx> Borrow<TypeVariants<'lcx>> for Interned<'tcx, TyS<'tcx>> {
- fn borrow<'a>(&'a self) -> &'a TypeVariants<'lcx> {
+impl<'tcx: 'lcx, 'lcx> Borrow<TyKind<'lcx>> for Interned<'tcx, TyS<'tcx>> {
+ fn borrow<'a>(&'a self) -> &'a TyKind<'lcx> {
&self.0.sty
}
}
-// NB: An Interned<Slice<T>> compares and hashes as its elements.
-impl<'tcx, T: PartialEq> PartialEq for Interned<'tcx, Slice<T>> {
- fn eq(&self, other: &Interned<'tcx, Slice<T>>) -> bool {
+// NB: An Interned<List<T>> compares and hashes as its elements.
+impl<'tcx, T: PartialEq> PartialEq for Interned<'tcx, List<T>> {
+ fn eq(&self, other: &Interned<'tcx, List<T>>) -> bool {
self.0[..] == other.0[..]
}
}
-impl<'tcx, T: Eq> Eq for Interned<'tcx, Slice<T>> {}
+impl<'tcx, T: Eq> Eq for Interned<'tcx, List<T>> {}
-impl<'tcx, T: Hash> Hash for Interned<'tcx, Slice<T>> {
+impl<'tcx, T: Hash> Hash for Interned<'tcx, List<T>> {
fn hash<H: Hasher>(&self, s: &mut H) {
self.0[..].hash(s)
}
}
-impl<'tcx: 'lcx, 'lcx> Borrow<[Ty<'lcx>]> for Interned<'tcx, Slice<Ty<'tcx>>> {
+impl<'tcx: 'lcx, 'lcx> Borrow<[Ty<'lcx>]> for Interned<'tcx, List<Ty<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Ty<'lcx>] {
&self.0[..]
}
}
-impl<'tcx: 'lcx, 'lcx> Borrow<[CanonicalVarInfo]> for Interned<'tcx, Slice<CanonicalVarInfo>> {
+impl<'tcx: 'lcx, 'lcx> Borrow<[CanonicalVarInfo]> for Interned<'tcx, List<CanonicalVarInfo>> {
fn borrow<'a>(&'a self) -> &'a [CanonicalVarInfo] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[ExistentialPredicate<'lcx>]>
- for Interned<'tcx, Slice<ExistentialPredicate<'tcx>>> {
+ for Interned<'tcx, List<ExistentialPredicate<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [ExistentialPredicate<'lcx>] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[Predicate<'lcx>]>
- for Interned<'tcx, Slice<Predicate<'tcx>>> {
+ for Interned<'tcx, List<Predicate<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Predicate<'lcx>] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[Clause<'lcx>]>
-for Interned<'tcx, Slice<Clause<'tcx>>> {
+for Interned<'tcx, List<Clause<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Clause<'lcx>] {
&self.0[..]
}
}
impl<'tcx: 'lcx, 'lcx> Borrow<[Goal<'lcx>]>
-for Interned<'tcx, Slice<Goal<'tcx>>> {
+for Interned<'tcx, List<Goal<'tcx>>> {
fn borrow<'a>(&'a self) -> &'a [Goal<'lcx>] {
&self.0[..]
}
($($field:ident: $method:ident($ty:ident)),+) => (
$(intern_method!( 'tcx, $field: $method(
&[$ty<'tcx>],
- |a, v| Slice::from_arena(a, v),
+ |a, v| List::from_arena(a, v),
Deref::deref,
- |xs: &[$ty]| xs.iter().any(keep_local)) -> Slice<$ty<'tcx>>);)+
+ |xs: &[$ty]| xs.iter().any(keep_local)) -> List<$ty<'tcx>>);)+
)
}
'tcx,
canonical_var_infos: _intern_canonical_var_infos(
&[CanonicalVarInfo],
- |a, v| Slice::from_arena(a, v),
+ |a, v| List::from_arena(a, v),
Deref::deref,
|_xs: &[CanonicalVarInfo]| -> bool { false }
- ) -> Slice<CanonicalVarInfo>
+ ) -> List<CanonicalVarInfo>
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn coerce_closure_fn_ty(self, sig: PolyFnSig<'tcx>) -> Ty<'tcx> {
let converted_sig = sig.map_bound(|s| {
let params_iter = match s.inputs()[0].sty {
- ty::TyTuple(params) => {
+ ty::Tuple(params) => {
params.into_iter().cloned()
}
_ => bug!(),
self.mk_fn_ptr(converted_sig)
}
- pub fn mk_ty(&self, st: TypeVariants<'tcx>) -> Ty<'tcx> {
+ pub fn mk_ty(&self, st: TyKind<'tcx>) -> Ty<'tcx> {
CtxtInterners::intern_ty(&self.interners, &self.global_interners, st)
}
}
pub fn mk_str(self) -> Ty<'tcx> {
- self.mk_ty(TyStr)
+ self.mk_ty(Str)
}
pub fn mk_static_str(self) -> Ty<'tcx> {
pub fn mk_adt(self, def: &'tcx AdtDef, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
// take a copy of substs so that we own the vectors inside
- self.mk_ty(TyAdt(def, substs))
+ self.mk_ty(Adt(def, substs))
}
pub fn mk_foreign(self, def_id: DefId) -> Ty<'tcx> {
- self.mk_ty(TyForeign(def_id))
+ self.mk_ty(Foreign(def_id))
}
pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> {
}
}
});
- self.mk_ty(TyAdt(adt_def, substs))
+ self.mk_ty(Adt(adt_def, substs))
}
pub fn mk_ptr(self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyRawPtr(tm))
+ self.mk_ty(RawPtr(tm))
}
pub fn mk_ref(self, r: Region<'tcx>, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyRef(r, tm.ty, tm.mutbl))
+ self.mk_ty(Ref(r, tm.ty, tm.mutbl))
}
pub fn mk_mut_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
}
pub fn mk_array(self, ty: Ty<'tcx>, n: u64) -> Ty<'tcx> {
- self.mk_ty(TyArray(ty, ty::Const::from_usize(self, n)))
+ self.mk_ty(Array(ty, ty::Const::from_usize(self, n)))
}
pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TySlice(ty))
+ self.mk_ty(Slice(ty))
}
pub fn intern_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> {
- self.mk_ty(TyTuple(self.intern_type_list(ts)))
+ self.mk_ty(Tuple(self.intern_type_list(ts)))
}
pub fn mk_tup<I: InternAs<[Ty<'tcx>], Ty<'tcx>>>(self, iter: I) -> I::Output {
- iter.intern_with(|ts| self.mk_ty(TyTuple(self.intern_type_list(ts))))
+ iter.intern_with(|ts| self.mk_ty(Tuple(self.intern_type_list(ts))))
}
pub fn mk_nil(self) -> Ty<'tcx> {
}
pub fn mk_bool(self) -> Ty<'tcx> {
- self.mk_ty(TyBool)
+ self.mk_ty(Bool)
}
pub fn mk_fn_def(self, def_id: DefId,
substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyFnDef(def_id, substs))
+ self.mk_ty(FnDef(def_id, substs))
}
pub fn mk_fn_ptr(self, fty: PolyFnSig<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyFnPtr(fty))
+ self.mk_ty(FnPtr(fty))
}
pub fn mk_dynamic(
self,
- obj: ty::Binder<&'tcx Slice<ExistentialPredicate<'tcx>>>,
+ obj: ty::Binder<&'tcx List<ExistentialPredicate<'tcx>>>,
reg: ty::Region<'tcx>
) -> Ty<'tcx> {
- self.mk_ty(TyDynamic(obj, reg))
+ self.mk_ty(Dynamic(obj, reg))
}
pub fn mk_projection(self,
item_def_id: DefId,
substs: &'tcx Substs<'tcx>)
-> Ty<'tcx> {
- self.mk_ty(TyProjection(ProjectionTy {
+ self.mk_ty(Projection(ProjectionTy {
item_def_id,
substs,
}))
pub fn mk_closure(self, closure_id: DefId, closure_substs: ClosureSubsts<'tcx>)
-> Ty<'tcx> {
- self.mk_ty(TyClosure(closure_id, closure_substs))
+ self.mk_ty(Closure(closure_id, closure_substs))
}
pub fn mk_generator(self,
generator_substs: GeneratorSubsts<'tcx>,
movability: hir::GeneratorMovability)
-> Ty<'tcx> {
- self.mk_ty(TyGenerator(id, generator_substs, movability))
+ self.mk_ty(Generator(id, generator_substs, movability))
}
- pub fn mk_generator_witness(self, types: ty::Binder<&'tcx Slice<Ty<'tcx>>>) -> Ty<'tcx> {
- self.mk_ty(TyGeneratorWitness(types))
+ pub fn mk_generator_witness(self, types: ty::Binder<&'tcx List<Ty<'tcx>>>) -> Ty<'tcx> {
+ self.mk_ty(GeneratorWitness(types))
}
pub fn mk_var(self, v: TyVid) -> Ty<'tcx> {
}
pub fn mk_infer(self, it: InferTy) -> Ty<'tcx> {
- self.mk_ty(TyInfer(it))
+ self.mk_ty(Infer(it))
}
pub fn mk_ty_param(self,
index: u32,
name: InternedString) -> Ty<'tcx> {
- self.mk_ty(TyParam(ParamTy { idx: index, name: name }))
+ self.mk_ty(Param(ParamTy { idx: index, name: name }))
}
pub fn mk_self_type(self) -> Ty<'tcx> {
}
pub fn mk_anon(self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
- self.mk_ty(TyAnon(def_id, substs))
+ self.mk_ty(Anon(def_id, substs))
}
pub fn intern_existential_predicates(self, eps: &[ExistentialPredicate<'tcx>])
- -> &'tcx Slice<ExistentialPredicate<'tcx>> {
+ -> &'tcx List<ExistentialPredicate<'tcx>> {
assert!(!eps.is_empty());
assert!(eps.windows(2).all(|w| w[0].stable_cmp(self, &w[1]) != Ordering::Greater));
self._intern_existential_predicates(eps)
}
pub fn intern_predicates(self, preds: &[Predicate<'tcx>])
- -> &'tcx Slice<Predicate<'tcx>> {
+ -> &'tcx List<Predicate<'tcx>> {
// FIXME consider asking the input slice to be sorted to avoid
// re-interning permutations, in which case that would be asserted
// here.
if preds.len() == 0 {
// The macro-generated method below asserts we don't intern an empty slice.
- Slice::empty()
+ List::empty()
} else {
self._intern_predicates(preds)
}
}
- pub fn intern_type_list(self, ts: &[Ty<'tcx>]) -> &'tcx Slice<Ty<'tcx>> {
+ pub fn intern_type_list(self, ts: &[Ty<'tcx>]) -> &'tcx List<Ty<'tcx>> {
if ts.len() == 0 {
- Slice::empty()
+ List::empty()
} else {
self._intern_type_list(ts)
}
}
- pub fn intern_substs(self, ts: &[Kind<'tcx>]) -> &'tcx Slice<Kind<'tcx>> {
+ pub fn intern_substs(self, ts: &[Kind<'tcx>]) -> &'tcx List<Kind<'tcx>> {
if ts.len() == 0 {
- Slice::empty()
+ List::empty()
} else {
self._intern_substs(ts)
}
pub fn intern_canonical_var_infos(self, ts: &[CanonicalVarInfo]) -> CanonicalVarInfos<'gcx> {
if ts.len() == 0 {
- Slice::empty()
+ List::empty()
} else {
self.global_tcx()._intern_canonical_var_infos(ts)
}
pub fn intern_clauses(self, ts: &[Clause<'tcx>]) -> Clauses<'tcx> {
if ts.len() == 0 {
- Slice::empty()
+ List::empty()
} else {
self._intern_clauses(ts)
}
pub fn intern_goals(self, ts: &[Goal<'tcx>]) -> Goals<'tcx> {
if ts.len() == 0 {
- Slice::empty()
+ List::empty()
} else {
self._intern_goals(ts)
}
}
pub fn mk_existential_predicates<I: InternAs<[ExistentialPredicate<'tcx>],
- &'tcx Slice<ExistentialPredicate<'tcx>>>>(self, iter: I)
+ &'tcx List<ExistentialPredicate<'tcx>>>>(self, iter: I)
-> I::Output {
iter.intern_with(|xs| self.intern_existential_predicates(xs))
}
pub fn mk_predicates<I: InternAs<[Predicate<'tcx>],
- &'tcx Slice<Predicate<'tcx>>>>(self, iter: I)
+ &'tcx List<Predicate<'tcx>>>>(self, iter: I)
-> I::Output {
iter.intern_with(|xs| self.intern_predicates(xs))
}
pub fn mk_type_list<I: InternAs<[Ty<'tcx>],
- &'tcx Slice<Ty<'tcx>>>>(self, iter: I) -> I::Output {
+ &'tcx List<Ty<'tcx>>>>(self, iter: I) -> I::Output {
iter.intern_with(|xs| self.intern_type_list(xs))
}
pub fn mk_substs<I: InternAs<[Kind<'tcx>],
- &'tcx Slice<Kind<'tcx>>>>(self, iter: I) -> I::Output {
+ &'tcx List<Kind<'tcx>>>>(self, iter: I) -> I::Output {
iter.intern_with(|xs| self.intern_substs(xs))
}
CyclicTy(Ty<'tcx>),
ProjectionMismatched(ExpectedFound<DefId>),
ProjectionBoundsLength(ExpectedFound<usize>),
- ExistentialMismatch(ExpectedFound<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>),
+ ExistentialMismatch(ExpectedFound<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>),
OldStyleLUB(Box<TypeError<'tcx>>),
}
impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> {
pub fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> String {
match self.sty {
- ty::TyBool | ty::TyChar | ty::TyInt(_) |
- ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr | ty::TyNever => self.to_string(),
- ty::TyTuple(ref tys) if tys.is_empty() => self.to_string(),
+ ty::Bool | ty::Char | ty::Int(_) |
+ ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => self.to_string(),
+ ty::Tuple(ref tys) if tys.is_empty() => self.to_string(),
- ty::TyAdt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)),
- ty::TyForeign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)),
- ty::TyArray(_, n) => {
+ ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)),
+ ty::Foreign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)),
+ ty::Array(_, n) => {
match n.assert_usize(tcx) {
Some(n) => format!("array of {} elements", n),
None => "array".to_string(),
}
}
- ty::TySlice(_) => "slice".to_string(),
- ty::TyRawPtr(_) => "*-ptr".to_string(),
- ty::TyRef(region, ty, mutbl) => {
+ ty::Slice(_) => "slice".to_string(),
+ ty::RawPtr(_) => "*-ptr".to_string(),
+ ty::Ref(region, ty, mutbl) => {
let tymut = ty::TypeAndMut { ty, mutbl };
let tymut_string = tymut.to_string();
if tymut_string == "_" || //unknown type name,
format!("&{}", tymut_string)
}
}
- ty::TyFnDef(..) => "fn item".to_string(),
- ty::TyFnPtr(_) => "fn pointer".to_string(),
- ty::TyDynamic(ref inner, ..) => {
+ ty::FnDef(..) => "fn item".to_string(),
+ ty::FnPtr(_) => "fn pointer".to_string(),
+ ty::Dynamic(ref inner, ..) => {
inner.principal().map_or_else(|| "trait".to_string(),
|p| format!("trait {}", tcx.item_path_str(p.def_id())))
}
- ty::TyClosure(..) => "closure".to_string(),
- ty::TyGenerator(..) => "generator".to_string(),
- ty::TyGeneratorWitness(..) => "generator witness".to_string(),
- ty::TyTuple(..) => "tuple".to_string(),
- ty::TyInfer(ty::TyVar(_)) => "inferred type".to_string(),
- ty::TyInfer(ty::IntVar(_)) => "integral variable".to_string(),
- ty::TyInfer(ty::FloatVar(_)) => "floating-point variable".to_string(),
- ty::TyInfer(ty::CanonicalTy(_)) |
- ty::TyInfer(ty::FreshTy(_)) => "skolemized type".to_string(),
- ty::TyInfer(ty::FreshIntTy(_)) => "skolemized integral type".to_string(),
- ty::TyInfer(ty::FreshFloatTy(_)) => "skolemized floating-point type".to_string(),
- ty::TyProjection(_) => "associated type".to_string(),
- ty::TyParam(ref p) => {
+ ty::Closure(..) => "closure".to_string(),
+ ty::Generator(..) => "generator".to_string(),
+ ty::GeneratorWitness(..) => "generator witness".to_string(),
+ ty::Tuple(..) => "tuple".to_string(),
+ ty::Infer(ty::TyVar(_)) => "inferred type".to_string(),
+ ty::Infer(ty::IntVar(_)) => "integral variable".to_string(),
+ ty::Infer(ty::FloatVar(_)) => "floating-point variable".to_string(),
+ ty::Infer(ty::CanonicalTy(_)) |
+ ty::Infer(ty::FreshTy(_)) => "skolemized type".to_string(),
+ ty::Infer(ty::FreshIntTy(_)) => "skolemized integral type".to_string(),
+ ty::Infer(ty::FreshFloatTy(_)) => "skolemized floating-point type".to_string(),
+ ty::Projection(_) => "associated type".to_string(),
+ ty::Param(ref p) => {
if p.is_self() {
"Self".to_string()
} else {
"type parameter".to_string()
}
}
- ty::TyAnon(..) => "anonymized type".to_string(),
- ty::TyError => "type error".to_string(),
+ ty::Anon(..) => "anonymized type".to_string(),
+ ty::Error => "type error".to_string(),
}
}
}
db.help("consider boxing your closure and/or using it as a trait object");
}
match (&values.found.sty, &values.expected.sty) { // Issue #53280
- (ty::TyInfer(ty::IntVar(_)), ty::TyFloat(_)) => {
+ (ty::Infer(ty::IntVar(_)), ty::Float(_)) => {
if let Ok(snippet) = self.sess.source_map().span_to_snippet(sp) {
if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') {
db.span_suggestion_with_applicability(
-> Option<SimplifiedType>
{
match ty.sty {
- ty::TyBool => Some(BoolSimplifiedType),
- ty::TyChar => Some(CharSimplifiedType),
- ty::TyInt(int_type) => Some(IntSimplifiedType(int_type)),
- ty::TyUint(uint_type) => Some(UintSimplifiedType(uint_type)),
- ty::TyFloat(float_type) => Some(FloatSimplifiedType(float_type)),
- ty::TyAdt(def, _) => Some(AdtSimplifiedType(def.did)),
- ty::TyStr => Some(StrSimplifiedType),
- ty::TyArray(..) | ty::TySlice(_) => Some(ArraySimplifiedType),
- ty::TyRawPtr(_) => Some(PtrSimplifiedType),
- ty::TyDynamic(ref trait_info, ..) => {
+ ty::Bool => Some(BoolSimplifiedType),
+ ty::Char => Some(CharSimplifiedType),
+ ty::Int(int_type) => Some(IntSimplifiedType(int_type)),
+ ty::Uint(uint_type) => Some(UintSimplifiedType(uint_type)),
+ ty::Float(float_type) => Some(FloatSimplifiedType(float_type)),
+ ty::Adt(def, _) => Some(AdtSimplifiedType(def.did)),
+ ty::Str => Some(StrSimplifiedType),
+ ty::Array(..) | ty::Slice(_) => Some(ArraySimplifiedType),
+ ty::RawPtr(_) => Some(PtrSimplifiedType),
+ ty::Dynamic(ref trait_info, ..) => {
trait_info.principal().map(|p| TraitSimplifiedType(p.def_id()))
}
- ty::TyRef(_, ty, _) => {
+ ty::Ref(_, ty, _) => {
// since we introduce auto-refs during method lookup, we
// just treat &T and T as equivalent from the point of
// view of possibly unifying
simplify_type(tcx, ty, can_simplify_params)
}
- ty::TyFnDef(def_id, _) |
- ty::TyClosure(def_id, _) => {
+ ty::FnDef(def_id, _) |
+ ty::Closure(def_id, _) => {
Some(ClosureSimplifiedType(def_id))
}
- ty::TyGenerator(def_id, _, _) => {
+ ty::Generator(def_id, _, _) => {
Some(GeneratorSimplifiedType(def_id))
}
- ty::TyGeneratorWitness(ref tys) => {
+ ty::GeneratorWitness(ref tys) => {
Some(GeneratorWitnessSimplifiedType(tys.skip_binder().len()))
}
- ty::TyNever => Some(NeverSimplifiedType),
- ty::TyTuple(ref tys) => {
+ ty::Never => Some(NeverSimplifiedType),
+ ty::Tuple(ref tys) => {
Some(TupleSimplifiedType(tys.len()))
}
- ty::TyFnPtr(ref f) => {
+ ty::FnPtr(ref f) => {
Some(FunctionSimplifiedType(f.skip_binder().inputs().len()))
}
- ty::TyProjection(_) | ty::TyParam(_) => {
+ ty::Projection(_) | ty::Param(_) => {
if can_simplify_params {
// In normalized types, projections don't unify with
// anything. when lazy normalization happens, this
None
}
}
- ty::TyAnon(def_id, _) => {
+ ty::Anon(def_id, _) => {
Some(AnonSimplifiedType(def_id))
}
- ty::TyForeign(def_id) => {
+ ty::Foreign(def_id) => {
Some(ForeignSimplifiedType(def_id))
}
- ty::TyInfer(_) | ty::TyError => None,
+ ty::Infer(_) | ty::Error => None,
}
}
}
}
- pub fn for_sty(st: &ty::TypeVariants) -> FlagComputation {
+ pub fn for_sty(st: &ty::TyKind) -> FlagComputation {
let mut result = FlagComputation::new();
result.add_sty(st);
result
}
}
- fn add_sty(&mut self, st: &ty::TypeVariants) {
+ fn add_sty(&mut self, st: &ty::TyKind) {
match st {
- &ty::TyBool |
- &ty::TyChar |
- &ty::TyInt(_) |
- &ty::TyFloat(_) |
- &ty::TyUint(_) |
- &ty::TyNever |
- &ty::TyStr |
- &ty::TyForeign(..) => {
+ &ty::Bool |
+ &ty::Char |
+ &ty::Int(_) |
+ &ty::Float(_) |
+ &ty::Uint(_) |
+ &ty::Never |
+ &ty::Str |
+ &ty::Foreign(..) => {
}
- // You might think that we could just return TyError for
- // any type containing TyError as a component, and get
+ // You might think that we could just return Error for
+ // any type containing Error as a component, and get
// rid of the TypeFlags::HAS_TY_ERR flag -- likewise for ty_bot (with
// the exception of function types that return bot).
// But doing so caused sporadic memory corruption, and
// neither I (tjc) nor nmatsakis could figure out why,
// so we're doing it this way.
- &ty::TyError => {
+ &ty::Error => {
self.add_flags(TypeFlags::HAS_TY_ERR)
}
- &ty::TyParam(ref p) => {
+ &ty::Param(ref p) => {
self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES);
if p.is_self() {
self.add_flags(TypeFlags::HAS_SELF);
}
}
- &ty::TyGenerator(_, ref substs, _) => {
+ &ty::Generator(_, ref substs, _) => {
self.add_flags(TypeFlags::HAS_TY_CLOSURE);
self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES);
self.add_substs(&substs.substs);
}
- &ty::TyGeneratorWitness(ref ts) => {
+ &ty::GeneratorWitness(ref ts) => {
let mut computation = FlagComputation::new();
computation.add_tys(&ts.skip_binder()[..]);
self.add_bound_computation(&computation);
}
- &ty::TyClosure(_, ref substs) => {
+ &ty::Closure(_, ref substs) => {
self.add_flags(TypeFlags::HAS_TY_CLOSURE);
self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES);
self.add_substs(&substs.substs);
}
- &ty::TyInfer(infer) => {
+ &ty::Infer(infer) => {
self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES); // it might, right?
self.add_flags(TypeFlags::HAS_TY_INFER);
match infer {
}
}
- &ty::TyAdt(_, substs) => {
+ &ty::Adt(_, substs) => {
self.add_substs(substs);
}
- &ty::TyProjection(ref data) => {
+ &ty::Projection(ref data) => {
// currently we can't normalize projections that
// include bound regions, so track those separately.
if !data.has_escaping_regions() {
self.add_projection_ty(data);
}
- &ty::TyAnon(_, substs) => {
+ &ty::Anon(_, substs) => {
self.add_flags(TypeFlags::HAS_PROJECTION);
self.add_substs(substs);
}
- &ty::TyDynamic(ref obj, r) => {
+ &ty::Dynamic(ref obj, r) => {
let mut computation = FlagComputation::new();
for predicate in obj.skip_binder().iter() {
match *predicate {
self.add_region(r);
}
- &ty::TyArray(tt, len) => {
+ &ty::Array(tt, len) => {
self.add_ty(tt);
self.add_const(len);
}
- &ty::TySlice(tt) => {
+ &ty::Slice(tt) => {
self.add_ty(tt)
}
- &ty::TyRawPtr(ref m) => {
+ &ty::RawPtr(ref m) => {
self.add_ty(m.ty);
}
- &ty::TyRef(r, ty, _) => {
+ &ty::Ref(r, ty, _) => {
self.add_region(r);
self.add_ty(ty);
}
- &ty::TyTuple(ref ts) => {
+ &ty::Tuple(ref ts) => {
self.add_tys(&ts[..]);
}
- &ty::TyFnDef(_, substs) => {
+ &ty::FnDef(_, substs) => {
self.add_substs(substs);
}
- &ty::TyFnPtr(f) => {
+ &ty::FnPtr(f) => {
self.add_fn_sig(f);
}
}
// in the normalized form
if self.just_constrained {
match t.sty {
- ty::TyProjection(..) | ty::TyAnon(..) => { return false; }
+ ty::Projection(..) | ty::Anon(..) => { return false; }
_ => { }
}
}
// except according to those terms.
use std::mem;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use syntax::ast::CRATE_NODE_ID;
use ty::context::TyCtxt;
use ty::{DefId, DefIdTree};
let mut next_ret = SmallVec::new();
let mut old_ret: SmallVec<[DefId; 1]> = SmallVec::new();
for next_forest in iter {
- for id in ret.root_ids.drain(..) {
+ for id in ret.root_ids.drain() {
if next_forest.contains(tcx, id) {
next_ret.push(id);
} else {
old_ret.push(id);
}
}
- ret.root_ids.extend(old_ret.drain(..));
+ ret.root_ids.extend(old_ret.drain());
for id in next_forest.root_ids {
if ret.contains(tcx, id) {
}
mem::swap(&mut next_ret, &mut ret.root_ids);
- next_ret.drain(..);
+ next_ret.drain();
}
ret
}
let mut ret = DefIdForest::empty();
let mut next_ret = SmallVec::new();
for next_forest in iter {
- for id in ret.root_ids.drain(..) {
+ for id in ret.root_ids.drain() {
if !next_forest.contains(tcx, id) {
next_ret.push(id);
}
}
mem::swap(&mut next_ret, &mut ret.root_ids);
- next_ret.drain(..);
+ next_ret.drain();
}
ret
}
use ty::{AdtDef, VariantDef, FieldDef, Ty, TyS};
use ty::{DefId, Substs};
use ty::{AdtKind, Visibility};
-use ty::TypeVariants::*;
+use ty::TyKind::*;
pub use self::def_id_forest::DefIdForest;
tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest
{
match self.sty {
- TyAdt(def, substs) => {
+ Adt(def, substs) => {
{
let substs_set = visited.entry(def.did).or_default();
if !substs_set.insert(substs) {
ret
},
- TyNever => DefIdForest::full(tcx),
- TyTuple(ref tys) => {
+ Never => DefIdForest::full(tcx),
+ Tuple(ref tys) => {
DefIdForest::union(tcx, tys.iter().map(|ty| {
ty.uninhabited_from(visited, tcx)
}))
},
- TyArray(ty, len) => {
+ Array(ty, len) => {
match len.assert_usize(tcx) {
// If the array is definitely non-empty, it's uninhabited if
// the type of its elements is uninhabited.
_ => DefIdForest::empty()
}
}
- TyRef(_, ty, _) => {
+ Ref(_, ty, _) => {
ty.uninhabited_from(visited, tcx)
}
);
let def = match item_type.sty {
- ty::TyFnDef(..) if {
+ ty::FnDef(..) if {
let f = item_type.fn_sig(tcx);
f.abi() == Abi::RustIntrinsic ||
f.abi() == Abi::PlatformIntrinsic
// impl on `Foo`, but fallback to `<Foo>::bar` if self-type is
// anything other than a simple path.
match self_ty.sty {
- ty::TyAdt(adt_def, substs) => {
+ ty::Adt(adt_def, substs) => {
if substs.types().next().is_none() { // ignore regions
self.push_item_path(buffer, adt_def.did);
} else {
}
}
- ty::TyForeign(did) => self.push_item_path(buffer, did),
+ ty::Foreign(did) => self.push_item_path(buffer, did),
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyFloat(_) |
- ty::TyStr => {
+ ty::Bool |
+ ty::Char |
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) |
+ ty::Str => {
buffer.push(&self_ty.to_string());
}
/// decisions and we may want to adjust it later.
pub fn characteristic_def_id_of_type(ty: Ty) -> Option<DefId> {
match ty.sty {
- ty::TyAdt(adt_def, _) => Some(adt_def.did),
+ ty::Adt(adt_def, _) => Some(adt_def.did),
- ty::TyDynamic(data, ..) => data.principal().map(|p| p.def_id()),
+ ty::Dynamic(data, ..) => data.principal().map(|p| p.def_id()),
- ty::TyArray(subty, _) |
- ty::TySlice(subty) => characteristic_def_id_of_type(subty),
+ ty::Array(subty, _) |
+ ty::Slice(subty) => characteristic_def_id_of_type(subty),
- ty::TyRawPtr(mt) => characteristic_def_id_of_type(mt.ty),
+ ty::RawPtr(mt) => characteristic_def_id_of_type(mt.ty),
- ty::TyRef(_, ty, _) => characteristic_def_id_of_type(ty),
+ ty::Ref(_, ty, _) => characteristic_def_id_of_type(ty),
- ty::TyTuple(ref tys) => tys.iter()
+ ty::Tuple(ref tys) => tys.iter()
.filter_map(|ty| characteristic_def_id_of_type(ty))
.next(),
- ty::TyFnDef(def_id, _) |
- ty::TyClosure(def_id, _) |
- ty::TyGenerator(def_id, _, _) |
- ty::TyForeign(def_id) => Some(def_id),
-
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyStr |
- ty::TyFnPtr(_) |
- ty::TyProjection(_) |
- ty::TyParam(_) |
- ty::TyAnon(..) |
- ty::TyInfer(_) |
- ty::TyError |
- ty::TyGeneratorWitness(..) |
- ty::TyNever |
- ty::TyFloat(_) => None,
+ ty::FnDef(def_id, _) |
+ ty::Closure(def_id, _) |
+ ty::Generator(def_id, _, _) |
+ ty::Foreign(def_id) => Some(def_id),
+
+ ty::Bool |
+ ty::Char |
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Str |
+ ty::FnPtr(_) |
+ ty::Projection(_) |
+ ty::Param(_) |
+ ty::Anon(..) |
+ ty::Infer(_) |
+ ty::Error |
+ ty::GeneratorWitness(..) |
+ ty::Never |
+ ty::Float(_) => None,
}
}
Ok(match ty.sty {
// Basic scalars.
- ty::TyBool => {
+ ty::Bool => {
tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
value: Int(I8, false),
valid_range: 0..=1
}))
}
- ty::TyChar => {
+ ty::Char => {
tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
value: Int(I32, false),
valid_range: 0..=0x10FFFF
}))
}
- ty::TyInt(ity) => {
+ ty::Int(ity) => {
scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
}
- ty::TyUint(ity) => {
+ ty::Uint(ity) => {
scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
}
- ty::TyFloat(fty) => scalar(Float(fty)),
- ty::TyFnPtr(_) => {
+ ty::Float(fty) => scalar(Float(fty)),
+ ty::FnPtr(_) => {
let mut ptr = scalar_unit(Pointer);
ptr.valid_range = 1..=*ptr.valid_range.end();
tcx.intern_layout(LayoutDetails::scalar(self, ptr))
}
// The never type.
- ty::TyNever => {
+ ty::Never => {
tcx.intern_layout(LayoutDetails {
variants: Variants::Single { index: 0 },
fields: FieldPlacement::Union(0),
}
// Potentially-fat pointers.
- ty::TyRef(_, pointee, _) |
- ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ ty::Ref(_, pointee, _) |
+ ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
let mut data_ptr = scalar_unit(Pointer);
if !ty.is_unsafe_ptr() {
data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
let unsized_part = tcx.struct_tail(pointee);
let metadata = match unsized_part.sty {
- ty::TyForeign(..) => {
+ ty::Foreign(..) => {
return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
}
- ty::TySlice(_) | ty::TyStr => {
+ ty::Slice(_) | ty::Str => {
scalar_unit(Int(dl.ptr_sized_integer(), false))
}
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
let mut vtable = scalar_unit(Pointer);
vtable.valid_range = 1..=*vtable.valid_range.end();
vtable
}
// Arrays and slices.
- ty::TyArray(element, mut count) => {
+ ty::Array(element, mut count) => {
if count.has_projections() {
count = tcx.normalize_erasing_regions(param_env, count);
if count.has_projections() {
size
})
}
- ty::TySlice(element) => {
+ ty::Slice(element) => {
let element = self.layout_of(element)?;
tcx.intern_layout(LayoutDetails {
variants: Variants::Single { index: 0 },
size: Size::ZERO
})
}
- ty::TyStr => {
+ ty::Str => {
tcx.intern_layout(LayoutDetails {
variants: Variants::Single { index: 0 },
fields: FieldPlacement::Array {
}
// Odd unit types.
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
}
- ty::TyDynamic(..) | ty::TyForeign(..) => {
+ ty::Dynamic(..) | ty::Foreign(..) => {
let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
StructKind::AlwaysSized)?;
match unit.abi {
}
// Tuples, generators and closures.
- ty::TyGenerator(def_id, ref substs, _) => {
+ ty::Generator(def_id, ref substs, _) => {
let tys = substs.field_tys(def_id, tcx);
univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
&ReprOptions::default(),
StructKind::AlwaysSized)?
}
- ty::TyClosure(def_id, ref substs) => {
+ ty::Closure(def_id, ref substs) => {
let tys = substs.upvar_tys(def_id, tcx);
univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
&ReprOptions::default(),
StructKind::AlwaysSized)?
}
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
let kind = if tys.len() == 0 {
StructKind::AlwaysSized
} else {
}
// SIMD vector types.
- ty::TyAdt(def, ..) if def.repr.simd() => {
+ ty::Adt(def, ..) if def.repr.simd() => {
let element = self.layout_of(ty.simd_type(tcx))?;
let count = ty.simd_size(tcx) as u64;
assert!(count > 0);
}
// ADTs.
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
// Cache the field layouts.
let variants = def.variants.iter().map(|v| {
v.fields.iter().map(|field| {
}
// Types with no meaningful known layout.
- ty::TyProjection(_) | ty::TyAnon(..) => {
+ ty::Projection(_) | ty::Anon(..) => {
let normalized = tcx.normalize_erasing_regions(param_env, ty);
if ty == normalized {
return Err(LayoutError::Unknown(ty));
}
tcx.layout_raw(param_env.and(normalized))?
}
- ty::TyGeneratorWitness(..) | ty::TyInfer(_) => {
+ ty::GeneratorWitness(..) | ty::Infer(_) => {
bug!("LayoutDetails::compute: unexpected type `{}`", ty)
}
- ty::TyParam(_) | ty::TyError => {
+ ty::Param(_) | ty::Error => {
return Err(LayoutError::Unknown(ty));
}
})
};
let adt_def = match layout.ty.sty {
- ty::TyAdt(ref adt_def, _) => {
+ ty::Adt(ref adt_def, _) => {
debug!("print-type-size t: `{:?}` process adt", layout.ty);
adt_def
}
- ty::TyClosure(..) => {
+ ty::Closure(..) => {
debug!("print-type-size t: `{:?}` record closure", layout.ty);
record(DataTypeKind::Closure, false, None, vec![]);
return;
};
match ty.sty {
- ty::TyRef(_, pointee, _) |
- ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ ty::Ref(_, pointee, _) |
+ ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
let non_zero = !ty.is_unsafe_ptr();
let tail = tcx.struct_tail(pointee);
match tail.sty {
- ty::TyParam(_) | ty::TyProjection(_) => {
+ ty::Param(_) | ty::Projection(_) => {
debug_assert!(tail.has_param_types() || tail.has_self_ty());
Ok(SizeSkeleton::Pointer {
non_zero,
}
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
// Only newtypes and enums w/ nullable pointer optimization.
if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
return Err(err);
}
}
- ty::TyProjection(_) | ty::TyAnon(..) => {
+ ty::Projection(_) | ty::Anon(..) => {
let normalized = tcx.normalize_erasing_regions(param_env, ty);
if ty == normalized {
Err(err)
});
let fields = match this.ty.sty {
- ty::TyAdt(def, _) => def.variants[variant_index].fields.len(),
+ ty::Adt(def, _) => def.variants[variant_index].fields.len(),
_ => bug!()
};
let tcx = cx.tcx();
fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
let tcx = cx.tcx();
cx.layout_of(match this.ty.sty {
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyFloat(_) |
- ty::TyFnPtr(_) |
- ty::TyNever |
- ty::TyFnDef(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyForeign(..) |
- ty::TyDynamic(..) => {
+ ty::Bool |
+ ty::Char |
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) |
+ ty::FnPtr(_) |
+ ty::Never |
+ ty::FnDef(..) |
+ ty::GeneratorWitness(..) |
+ ty::Foreign(..) |
+ ty::Dynamic(..) => {
bug!("TyLayout::field_type({:?}): not applicable", this)
}
// Potentially-fat pointers.
- ty::TyRef(_, pointee, _) |
- ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ ty::Ref(_, pointee, _) |
+ ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
assert!(i < this.fields.count());
// Reuse the fat *T type as its own thin pointer data field.
}
match tcx.struct_tail(pointee).sty {
- ty::TySlice(_) |
- ty::TyStr => tcx.types.usize,
- ty::TyDynamic(data, _) => {
+ ty::Slice(_) |
+ ty::Str => tcx.types.usize,
+ ty::Dynamic(data, _) => {
let trait_def_id = data.principal().unwrap().def_id();
let num_fns: u64 = crate::traits::supertrait_def_ids(tcx, trait_def_id)
.map(|trait_def_id| {
}
// Arrays and slices.
- ty::TyArray(element, _) |
- ty::TySlice(element) => element,
- ty::TyStr => tcx.types.u8,
+ ty::Array(element, _) |
+ ty::Slice(element) => element,
+ ty::Str => tcx.types.u8,
// Tuples, generators and closures.
- ty::TyClosure(def_id, ref substs) => {
+ ty::Closure(def_id, ref substs) => {
substs.upvar_tys(def_id, tcx).nth(i).unwrap()
}
- ty::TyGenerator(def_id, ref substs, _) => {
+ ty::Generator(def_id, ref substs, _) => {
substs.field_tys(def_id, tcx).nth(i).unwrap()
}
- ty::TyTuple(tys) => tys[i],
+ ty::Tuple(tys) => tys[i],
// SIMD vector types.
- ty::TyAdt(def, ..) if def.repr.simd() => {
+ ty::Adt(def, ..) if def.repr.simd() => {
this.ty.simd_type(tcx)
}
// ADTs.
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
match this.variants {
Variants::Single { index } => {
def.variants[index].fields[i].ty(tcx, substs)
}
}
- ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
- ty::TyInfer(_) | ty::TyError => {
+ ty::Projection(_) | ty::Anon(..) | ty::Param(_) |
+ ty::Infer(_) | ty::Error => {
bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
}
})
// Locals variables which live across yields are stored
// in the generator type as fields. These may be uninitialized
// so we don't look for niches there.
- if let ty::TyGenerator(..) = layout.ty.sty {
+ if let ty::Generator(..) = layout.ty.sty {
return Ok(None);
}
use syntax::symbol::{keywords, Symbol, LocalInternedString, InternedString};
use syntax_pos::{DUMMY_SP, Span};
-use rustc_data_structures::accumulate_vec::IntoIter as AccIntoIter;
+use smallvec;
use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult,
HashStable};
pub use self::sty::{FnSig, GenSig, PolyFnSig, PolyGenSig};
pub use self::sty::{InferTy, ParamTy, ProjectionTy, ExistentialPredicate};
pub use self::sty::{ClosureSubsts, GeneratorSubsts, UpvarSubsts, TypeAndMut};
-pub use self::sty::{TraitRef, TypeVariants, PolyTraitRef};
+pub use self::sty::{TraitRef, TyKind, PolyTraitRef};
pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef};
pub use self::sty::{ExistentialProjection, PolyExistentialProjection, Const};
pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region};
pub use self::sty::BoundRegion::*;
pub use self::sty::InferTy::*;
pub use self::sty::RegionKind::*;
-pub use self::sty::TypeVariants::*;
+pub use self::sty::TyKind::*;
pub use self::binding::BindingMode;
pub use self::binding::BindingMode::*;
const HAS_FREE_LOCAL_NAMES = 1 << 10;
// Present if the type belongs in a local type context.
- // Only set for TyInfer other than Fresh.
+ // Only set for Infer other than Fresh.
const KEEP_IN_LOCAL_TCX = 1 << 11;
// Is there a projection that does not involve a bound region?
}
pub struct TyS<'tcx> {
- pub sty: TypeVariants<'tcx>,
+ pub sty: TyKind<'tcx>,
pub flags: TypeFlags,
/// This is a kind of confusing thing: it stores the smallest
impl<'tcx> TyS<'tcx> {
pub fn is_primitive_ty(&self) -> bool {
match self.sty {
- TypeVariants::TyBool |
- TypeVariants::TyChar |
- TypeVariants::TyInt(_) |
- TypeVariants::TyUint(_) |
- TypeVariants::TyFloat(_) |
- TypeVariants::TyInfer(InferTy::IntVar(_)) |
- TypeVariants::TyInfer(InferTy::FloatVar(_)) |
- TypeVariants::TyInfer(InferTy::FreshIntTy(_)) |
- TypeVariants::TyInfer(InferTy::FreshFloatTy(_)) => true,
- TypeVariants::TyRef(_, x, _) => x.is_primitive_ty(),
+ TyKind::Bool |
+ TyKind::Char |
+ TyKind::Int(_) |
+ TyKind::Uint(_) |
+ TyKind::Float(_) |
+ TyKind::Infer(InferTy::IntVar(_)) |
+ TyKind::Infer(InferTy::FloatVar(_)) |
+ TyKind::Infer(InferTy::FreshIntTy(_)) |
+ TyKind::Infer(InferTy::FreshFloatTy(_)) => true,
+ TyKind::Ref(_, x, _) => x.is_primitive_ty(),
_ => false,
}
}
pub fn is_suggestable(&self) -> bool {
match self.sty {
- TypeVariants::TyAnon(..) |
- TypeVariants::TyFnDef(..) |
- TypeVariants::TyFnPtr(..) |
- TypeVariants::TyDynamic(..) |
- TypeVariants::TyClosure(..) |
- TypeVariants::TyInfer(..) |
- TypeVariants::TyProjection(..) => false,
+ TyKind::Anon(..) |
+ TyKind::FnDef(..) |
+ TyKind::FnPtr(..) |
+ TyKind::Dynamic(..) |
+ TyKind::Closure(..) |
+ TyKind::Infer(..) |
+ TyKind::Projection(..) => false,
_ => true,
}
}
pub type CanonicalTy<'gcx> = Canonical<'gcx, Ty<'gcx>>;
extern {
- /// A dummy type used to force Slice to by unsized without requiring fat pointers
- type OpaqueSliceContents;
+ /// A dummy type used to force List to by unsized without requiring fat pointers
+ type OpaqueListContents;
}
/// A wrapper for slices with the additional invariant
/// the same contents can exist in the same context.
/// This means we can use pointer for both
/// equality comparisons and hashing.
+/// Note: `Slice` was already taken by the `Ty`.
#[repr(C)]
-pub struct Slice<T> {
+pub struct List<T> {
len: usize,
data: [T; 0],
- opaque: OpaqueSliceContents,
+ opaque: OpaqueListContents,
}
-unsafe impl<T: Sync> Sync for Slice<T> {}
+unsafe impl<T: Sync> Sync for List<T> {}
-impl<T: Copy> Slice<T> {
+impl<T: Copy> List<T> {
#[inline]
- fn from_arena<'tcx>(arena: &'tcx SyncDroplessArena, slice: &[T]) -> &'tcx Slice<T> {
+ fn from_arena<'tcx>(arena: &'tcx SyncDroplessArena, slice: &[T]) -> &'tcx List<T> {
assert!(!mem::needs_drop::<T>());
assert!(mem::size_of::<T>() != 0);
assert!(slice.len() != 0);
size,
cmp::max(mem::align_of::<T>(), mem::align_of::<usize>()));
unsafe {
- let result = &mut *(mem.as_mut_ptr() as *mut Slice<T>);
+ let result = &mut *(mem.as_mut_ptr() as *mut List<T>);
// Write the length
result.len = slice.len();
}
}
-impl<T: fmt::Debug> fmt::Debug for Slice<T> {
+impl<T: fmt::Debug> fmt::Debug for List<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
}
}
-impl<T: Encodable> Encodable for Slice<T> {
+impl<T: Encodable> Encodable for List<T> {
#[inline]
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
(**self).encode(s)
}
}
-impl<T> Ord for Slice<T> where T: Ord {
- fn cmp(&self, other: &Slice<T>) -> Ordering {
+impl<T> Ord for List<T> where T: Ord {
+ fn cmp(&self, other: &List<T>) -> Ordering {
if self == other { Ordering::Equal } else {
<[T] as Ord>::cmp(&**self, &**other)
}
}
}
-impl<T> PartialOrd for Slice<T> where T: PartialOrd {
- fn partial_cmp(&self, other: &Slice<T>) -> Option<Ordering> {
+impl<T> PartialOrd for List<T> where T: PartialOrd {
+ fn partial_cmp(&self, other: &List<T>) -> Option<Ordering> {
if self == other { Some(Ordering::Equal) } else {
<[T] as PartialOrd>::partial_cmp(&**self, &**other)
}
}
}
-impl<T: PartialEq> PartialEq for Slice<T> {
+impl<T: PartialEq> PartialEq for List<T> {
#[inline]
- fn eq(&self, other: &Slice<T>) -> bool {
+ fn eq(&self, other: &List<T>) -> bool {
ptr::eq(self, other)
}
}
-impl<T: Eq> Eq for Slice<T> {}
+impl<T: Eq> Eq for List<T> {}
-impl<T> Hash for Slice<T> {
+impl<T> Hash for List<T> {
#[inline]
fn hash<H: Hasher>(&self, s: &mut H) {
- (self as *const Slice<T>).hash(s)
+ (self as *const List<T>).hash(s)
}
}
-impl<T> Deref for Slice<T> {
+impl<T> Deref for List<T> {
type Target = [T];
#[inline(always)]
fn deref(&self) -> &[T] {
}
}
-impl<'a, T> IntoIterator for &'a Slice<T> {
+impl<'a, T> IntoIterator for &'a List<T> {
type Item = &'a T;
type IntoIter = <&'a [T] as IntoIterator>::IntoIter;
#[inline(always)]
}
}
-impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Slice<Ty<'tcx>> {}
+impl<'tcx> serialize::UseSpecializedDecodable for &'tcx List<Ty<'tcx>> {}
-impl<T> Slice<T> {
+impl<T> List<T> {
#[inline(always)]
- pub fn empty<'a>() -> &'a Slice<T> {
+ pub fn empty<'a>() -> &'a List<T> {
#[repr(align(64), C)]
struct EmptySlice([u8; 64]);
static EMPTY_SLICE: EmptySlice = EmptySlice([0; 64]);
assert!(mem::align_of::<T>() <= 64);
unsafe {
- &*(&EMPTY_SLICE as *const _ as *const Slice<T>)
+ &*(&EMPTY_SLICE as *const _ as *const List<T>)
}
}
}
/// Obligations that the caller must satisfy. This is basically
/// the set of bounds on the in-scope type parameters, translated
/// into Obligations, and elaborated and normalized.
- pub caller_bounds: &'tcx Slice<ty::Predicate<'tcx>>,
+ pub caller_bounds: &'tcx List<ty::Predicate<'tcx>>,
/// Typically, this is `Reveal::UserFacing`, but during codegen we
/// want `Reveal::All` -- note that this is always paired with an
/// Trait`) are left hidden, so this is suitable for ordinary
/// type-checking.
pub fn empty() -> Self {
- Self::new(ty::Slice::empty(), Reveal::UserFacing)
+ Self::new(List::empty(), Reveal::UserFacing)
}
/// Construct a trait environment with no where clauses in scope
/// NB. If you want to have predicates in scope, use `ParamEnv::new`,
/// or invoke `param_env.with_reveal_all()`.
pub fn reveal_all() -> Self {
- Self::new(ty::Slice::empty(), Reveal::All)
+ Self::new(List::empty(), Reveal::All)
}
/// Construct a trait environment with the given set of predicates.
- pub fn new(caller_bounds: &'tcx ty::Slice<ty::Predicate<'tcx>>,
+ pub fn new(caller_bounds: &'tcx List<ty::Predicate<'tcx>>,
reveal: Reveal)
-> Self {
ty::ParamEnv { caller_bounds, reveal }
/// Returns this same environment but with no caller bounds.
pub fn without_caller_bounds(self) -> Self {
- ty::ParamEnv { caller_bounds: ty::Slice::empty(), ..self }
+ ty::ParamEnv { caller_bounds: List::empty(), ..self }
}
/// Creates a suitable environment in which to perform trait
ty: Ty<'tcx>)
-> Vec<Ty<'tcx>> {
let result = match ty.sty {
- TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
- TyRawPtr(..) | TyRef(..) | TyFnDef(..) | TyFnPtr(_) |
- TyArray(..) | TyClosure(..) | TyGenerator(..) | TyNever => {
+ Bool | Char | Int(..) | Uint(..) | Float(..) |
+ RawPtr(..) | Ref(..) | FnDef(..) | FnPtr(_) |
+ Array(..) | Closure(..) | Generator(..) | Never => {
vec![]
}
- TyStr |
- TyDynamic(..) |
- TySlice(_) |
- TyForeign(..) |
- TyError |
- TyGeneratorWitness(..) => {
+ Str |
+ Dynamic(..) |
+ Slice(_) |
+ Foreign(..) |
+ Error |
+ GeneratorWitness(..) => {
// these are never sized - return the target type
vec![ty]
}
- TyTuple(ref tys) => {
+ Tuple(ref tys) => {
match tys.last() {
None => vec![],
Some(ty) => self.sized_constraint_for_ty(tcx, ty)
}
}
- TyAdt(adt, substs) => {
+ Adt(adt, substs) => {
// recursive case
let adt_tys = adt.sized_constraint(tcx);
debug!("sized_constraint_for_ty({:?}) intermediate = {:?}",
.collect()
}
- TyProjection(..) | TyAnon(..) => {
+ Projection(..) | Anon(..) => {
// must calculate explicitly.
// FIXME: consider special-casing always-Sized projections
vec![ty]
}
- TyParam(..) => {
+ Param(..) => {
// perf hack: if there is a `T: Sized` bound, then
// we know that `T` is Sized and do not need to check
// it on the impl.
}
}
- TyInfer(..) => {
+ Infer(..) => {
bug!("unexpected type `{:?}` in sized_constraint_for_ty",
ty)
}
/// Iterator that walks the immediate children of `self`. Hence
/// `Foo<Bar<i32>, u32>` yields the sequence `[Bar<i32>, u32]`
/// (but not `i32`, like `walk`).
- pub fn walk_shallow(&'tcx self) -> AccIntoIter<walk::TypeWalkerArray<'tcx>> {
+ pub fn walk_shallow(&'tcx self) -> smallvec::IntoIter<walk::TypeWalkerArray<'tcx>> {
walk::walk_shallow(self)
}
/// - a type parameter or projection whose Sizedness can't be known
/// - a tuple of type parameters or projections, if there are multiple
/// such.
-/// - a TyError, if a type contained itself. The representability
+/// - a Error, if a type contained itself. The representability
/// check should catch this case.
fn adt_sized_constraint<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
// in the `subtys` iterator (e.g., when encountering a
// projection).
match ty.sty {
- ty::TyClosure(def_id, ref substs) => {
+ ty::Closure(def_id, ref substs) => {
for upvar_ty in substs.upvar_tys(def_id, *self) {
self.compute_components(upvar_ty, out);
}
}
- ty::TyGenerator(def_id, ref substs, _) => {
+ ty::Generator(def_id, ref substs, _) => {
// Same as the closure case
for upvar_ty in substs.upvar_tys(def_id, *self) {
self.compute_components(upvar_ty, out);
}
// All regions are bound inside a witness
- ty::TyGeneratorWitness(..) => (),
+ ty::GeneratorWitness(..) => (),
// OutlivesTypeParameterEnv -- the actual checking that `X:'a`
// is implied by the environment is done in regionck.
- ty::TyParam(p) => {
+ ty::Param(p) => {
out.push(Component::Param(p));
}
// trait-ref. Therefore, if we see any higher-ranke regions,
// we simply fallback to the most restrictive rule, which
// requires that `Pi: 'a` for all `i`.
- ty::TyProjection(ref data) => {
+ ty::Projection(ref data) => {
if !data.has_escaping_regions() {
// best case: no escaping regions, so push the
// projection and skip the subtree (thus generating no
// We assume that inference variables are fully resolved.
// So, if we encounter an inference variable, just record
// the unresolved variable as a component.
- ty::TyInfer(infer_ty) => {
+ ty::Infer(infer_ty) => {
out.push(Component::UnresolvedInferenceVariable(infer_ty));
}
// the type and then visits the types that are lexically
// contained within. (The comments refer to relevant rules
// from RFC1214.)
- ty::TyBool | // OutlivesScalar
- ty::TyChar | // OutlivesScalar
- ty::TyInt(..) | // OutlivesScalar
- ty::TyUint(..) | // OutlivesScalar
- ty::TyFloat(..) | // OutlivesScalar
- ty::TyNever | // ...
- ty::TyAdt(..) | // OutlivesNominalType
- ty::TyAnon(..) | // OutlivesNominalType (ish)
- ty::TyForeign(..) | // OutlivesNominalType
- ty::TyStr | // OutlivesScalar (ish)
- ty::TyArray(..) | // ...
- ty::TySlice(..) | // ...
- ty::TyRawPtr(..) | // ...
- ty::TyRef(..) | // OutlivesReference
- ty::TyTuple(..) | // ...
- ty::TyFnDef(..) | // OutlivesFunction (*)
- ty::TyFnPtr(_) | // OutlivesFunction (*)
- ty::TyDynamic(..) | // OutlivesObject, OutlivesFragment (*)
- ty::TyError => {
+ ty::Bool | // OutlivesScalar
+ ty::Char | // OutlivesScalar
+ ty::Int(..) | // OutlivesScalar
+ ty::Uint(..) | // OutlivesScalar
+ ty::Float(..) | // OutlivesScalar
+ ty::Never | // ...
+ ty::Adt(..) | // OutlivesNominalType
+ ty::Anon(..) | // OutlivesNominalType (ish)
+ ty::Foreign(..) | // OutlivesNominalType
+ ty::Str | // OutlivesScalar (ish)
+ ty::Array(..) | // ...
+ ty::Slice(..) | // ...
+ ty::RawPtr(..) | // ...
+ ty::Ref(..) | // OutlivesReference
+ ty::Tuple(..) | // ...
+ ty::FnDef(..) | // OutlivesFunction (*)
+ ty::FnPtr(_) | // OutlivesFunction (*)
+ ty::Dynamic(..) | // OutlivesObject, OutlivesFragment (*)
+ ty::Error => {
// (*) Bare functions and traits are both binders. In the
// RFC, this means we would add the bound regions to the
// "bound regions list". In our representation, no such
}
}
-impl<'tcx> QueryDescription<'tcx> for queries::const_value_to_allocation<'tcx> {
+impl<'tcx> QueryDescription<'tcx> for queries::const_to_allocation<'tcx> {
fn describe(_tcx: TyCtxt, val: &'tcx ty::Const<'tcx>) -> String {
- format!("converting value `{:?}` to an allocation", val)
+ format!("converting constant `{:?}` to an allocation", val)
}
}
use util::common::{ErrorReported};
use util::profiling::ProfileCategory::*;
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_target::spec::PanicStrategy;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
/// Maps DefId's that have an associated Mir to the result
/// of the MIR qualify_consts pass. The actual meaning of
/// the value isn't known except to the pass itself.
- [] fn mir_const_qualif: MirConstQualif(DefId) -> (u8, Lrc<IdxSetBuf<mir::Local>>),
+ [] fn mir_const_qualif: MirConstQualif(DefId) -> (u8, Lrc<IdxSet<mir::Local>>),
/// Fetch the MIR for a given def-id right after it's built - this includes
/// unreachable code.
[] fn const_eval: const_eval_dep_node(ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>)
-> ConstEvalResult<'tcx>,
- /// Converts a constant value to an constant allocation
- [] fn const_value_to_allocation: const_value_to_allocation(
+ /// Converts a constant value to a constant allocation
+ [] fn const_to_allocation: const_to_allocation(
&'tcx ty::Const<'tcx>
) -> &'tcx Allocation,
},
DepConstructor::EraseRegionsTy { ty }
}
-fn const_value_to_allocation<'tcx>(
+fn const_to_allocation<'tcx>(
val: &'tcx ty::Const<'tcx>,
) -> DepConstructor<'tcx> {
- DepConstructor::ConstValueToAllocation { val }
+ DepConstructor::ConstToAllocation { val }
}
fn type_param_predicates<'tcx>((item_id, param_id): (DefId, DefId)) -> DepConstructor<'tcx> {
DepKind::FulfillObligation |
DepKind::VtableMethods |
DepKind::EraseRegionsTy |
- DepKind::ConstValueToAllocation |
+ DepKind::ConstToAllocation |
DepKind::NormalizeProjectionTy |
DepKind::NormalizeTyAfterErasingRegions |
DepKind::ImpliedOutlivesBounds |
}
#[derive(Debug, Clone)]
-struct GeneratorWitness<'tcx>(&'tcx ty::Slice<Ty<'tcx>>);
+struct GeneratorWitness<'tcx>(&'tcx ty::List<Ty<'tcx>>);
TupleStructTypeFoldableImpl! {
impl<'tcx> TypeFoldable<'tcx> for GeneratorWitness<'tcx> {
let b_sty = &b.sty;
debug!("super_tys: a_sty={:?} b_sty={:?}", a_sty, b_sty);
match (a_sty, b_sty) {
- (&ty::TyInfer(_), _) |
- (_, &ty::TyInfer(_)) =>
+ (&ty::Infer(_), _) |
+ (_, &ty::Infer(_)) =>
{
// The caller should handle these cases!
bug!("var types encountered in super_relate_tys")
}
- (&ty::TyError, _) | (_, &ty::TyError) =>
+ (&ty::Error, _) | (_, &ty::Error) =>
{
Ok(tcx.types.err)
}
- (&ty::TyNever, _) |
- (&ty::TyChar, _) |
- (&ty::TyBool, _) |
- (&ty::TyInt(_), _) |
- (&ty::TyUint(_), _) |
- (&ty::TyFloat(_), _) |
- (&ty::TyStr, _)
+ (&ty::Never, _) |
+ (&ty::Char, _) |
+ (&ty::Bool, _) |
+ (&ty::Int(_), _) |
+ (&ty::Uint(_), _) |
+ (&ty::Float(_), _) |
+ (&ty::Str, _)
if a == b =>
{
Ok(a)
}
- (&ty::TyParam(ref a_p), &ty::TyParam(ref b_p))
+ (&ty::Param(ref a_p), &ty::Param(ref b_p))
if a_p.idx == b_p.idx =>
{
Ok(a)
}
- (&ty::TyAdt(a_def, a_substs), &ty::TyAdt(b_def, b_substs))
+ (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs))
if a_def == b_def =>
{
let substs = relation.relate_item_substs(a_def.did, a_substs, b_substs)?;
Ok(tcx.mk_adt(a_def, substs))
}
- (&ty::TyForeign(a_id), &ty::TyForeign(b_id))
+ (&ty::Foreign(a_id), &ty::Foreign(b_id))
if a_id == b_id =>
{
Ok(tcx.mk_foreign(a_id))
}
- (&ty::TyDynamic(ref a_obj, ref a_region), &ty::TyDynamic(ref b_obj, ref b_region)) => {
+ (&ty::Dynamic(ref a_obj, ref a_region), &ty::Dynamic(ref b_obj, ref b_region)) => {
let region_bound = relation.with_cause(Cause::ExistentialRegionBound,
|relation| {
relation.relate_with_variance(
Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound))
}
- (&ty::TyGenerator(a_id, a_substs, movability),
- &ty::TyGenerator(b_id, b_substs, _))
+ (&ty::Generator(a_id, a_substs, movability),
+ &ty::Generator(b_id, b_substs, _))
if a_id == b_id =>
{
- // All TyGenerator types with the same id represent
+ // All Generator types with the same id represent
// the (anonymous) type of the same generator expression. So
// all of their regions should be equated.
let substs = relation.relate(&a_substs, &b_substs)?;
Ok(tcx.mk_generator(a_id, substs, movability))
}
- (&ty::TyGeneratorWitness(a_types), &ty::TyGeneratorWitness(b_types)) =>
+ (&ty::GeneratorWitness(a_types), &ty::GeneratorWitness(b_types)) =>
{
// Wrap our types with a temporary GeneratorWitness struct
// inside the binder so we can related them
Ok(tcx.mk_generator_witness(types))
}
- (&ty::TyClosure(a_id, a_substs),
- &ty::TyClosure(b_id, b_substs))
+ (&ty::Closure(a_id, a_substs),
+ &ty::Closure(b_id, b_substs))
if a_id == b_id =>
{
- // All TyClosure types with the same id represent
+ // All Closure types with the same id represent
// the (anonymous) type of the same closure expression. So
// all of their regions should be equated.
let substs = relation.relate(&a_substs, &b_substs)?;
Ok(tcx.mk_closure(a_id, substs))
}
- (&ty::TyRawPtr(ref a_mt), &ty::TyRawPtr(ref b_mt)) =>
+ (&ty::RawPtr(ref a_mt), &ty::RawPtr(ref b_mt)) =>
{
let mt = relation.relate(a_mt, b_mt)?;
Ok(tcx.mk_ptr(mt))
}
- (&ty::TyRef(a_r, a_ty, a_mutbl), &ty::TyRef(b_r, b_ty, b_mutbl)) =>
+ (&ty::Ref(a_r, a_ty, a_mutbl), &ty::Ref(b_r, b_ty, b_mutbl)) =>
{
let r = relation.relate_with_variance(ty::Contravariant, &a_r, &b_r)?;
let a_mt = ty::TypeAndMut { ty: a_ty, mutbl: a_mutbl };
Ok(tcx.mk_ref(r, mt))
}
- (&ty::TyArray(a_t, sz_a), &ty::TyArray(b_t, sz_b)) =>
+ (&ty::Array(a_t, sz_a), &ty::Array(b_t, sz_b)) =>
{
let t = relation.relate(&a_t, &b_t)?;
assert_eq!(sz_a.ty, tcx.types.usize);
match (to_u64(sz_a), to_u64(sz_b)) {
(Ok(sz_a_u64), Ok(sz_b_u64)) => {
if sz_a_u64 == sz_b_u64 {
- Ok(tcx.mk_ty(ty::TyArray(t, sz_a)))
+ Ok(tcx.mk_ty(ty::Array(t, sz_a)))
} else {
Err(TypeError::FixedArraySize(
expected_found(relation, &sz_a_u64, &sz_b_u64)))
}
}
- // We reported an error or will ICE, so we can return TyError.
+ // We reported an error or will ICE, so we can return Error.
(Err(ErrorReported), _) | (_, Err(ErrorReported)) => {
Ok(tcx.types.err)
}
}
}
- (&ty::TySlice(a_t), &ty::TySlice(b_t)) =>
+ (&ty::Slice(a_t), &ty::Slice(b_t)) =>
{
let t = relation.relate(&a_t, &b_t)?;
Ok(tcx.mk_slice(t))
}
- (&ty::TyTuple(as_), &ty::TyTuple(bs)) =>
+ (&ty::Tuple(as_), &ty::Tuple(bs)) =>
{
if as_.len() == bs.len() {
Ok(tcx.mk_tup(as_.iter().zip(bs).map(|(a, b)| relation.relate(a, b)))?)
}
}
- (&ty::TyFnDef(a_def_id, a_substs), &ty::TyFnDef(b_def_id, b_substs))
+ (&ty::FnDef(a_def_id, a_substs), &ty::FnDef(b_def_id, b_substs))
if a_def_id == b_def_id =>
{
let substs = relation.relate_item_substs(a_def_id, a_substs, b_substs)?;
Ok(tcx.mk_fn_def(a_def_id, substs))
}
- (&ty::TyFnPtr(a_fty), &ty::TyFnPtr(b_fty)) =>
+ (&ty::FnPtr(a_fty), &ty::FnPtr(b_fty)) =>
{
let fty = relation.relate(&a_fty, &b_fty)?;
Ok(tcx.mk_fn_ptr(fty))
}
- (&ty::TyProjection(ref a_data), &ty::TyProjection(ref b_data)) =>
+ (&ty::Projection(ref a_data), &ty::Projection(ref b_data)) =>
{
let projection_ty = relation.relate(a_data, b_data)?;
Ok(tcx.mk_projection(projection_ty.item_def_id, projection_ty.substs))
}
- (&ty::TyAnon(a_def_id, a_substs), &ty::TyAnon(b_def_id, b_substs))
+ (&ty::Anon(a_def_id, a_substs), &ty::Anon(b_def_id, b_substs))
if a_def_id == b_def_id =>
{
let substs = relate_substs(relation, None, a_substs, b_substs)?;
}
}
-impl<'tcx> Relate<'tcx> for &'tcx ty::Slice<ty::ExistentialPredicate<'tcx>> {
+impl<'tcx> Relate<'tcx> for &'tcx ty::List<ty::ExistentialPredicate<'tcx>> {
fn relate<'a, 'gcx, R>(relation: &mut R,
a: &Self,
b: &Self)
HeapAllocZeroBytes => HeapAllocZeroBytes,
HeapAllocNonPowerOfTwoAlignment(n) => HeapAllocNonPowerOfTwoAlignment(n),
Unreachable => Unreachable,
- Panic => Panic,
+ Panic { ref msg, ref file, line, col } => Panic {
+ msg: msg.clone(),
+ file: file.clone(),
+ line, col,
+ },
ReadFromReturnPointer => ReadFromReturnPointer,
PathNotFound(ref v) => PathNotFound(v.clone()),
UnimplementedTraitSelection => UnimplementedTraitSelection,
impl<'tcx> TypeFoldable<'tcx> for ty::ParamEnv<'tcx> { reveal, caller_bounds }
}
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice<ty::ExistentialPredicate<'tcx>> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::ExistentialPredicate<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let v = self.iter().map(|p| p.fold_with(folder)).collect::<AccumulateVec<[_; 8]>>();
folder.tcx().intern_existential_predicates(&v)
}
}
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice<Ty<'tcx>> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<Ty<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let v = self.iter().map(|t| t.fold_with(folder)).collect::<AccumulateVec<[_; 8]>>();
folder.tcx().intern_type_list(&v)
impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let sty = match self.sty {
- ty::TyRawPtr(tm) => ty::TyRawPtr(tm.fold_with(folder)),
- ty::TyArray(typ, sz) => ty::TyArray(typ.fold_with(folder), sz.fold_with(folder)),
- ty::TySlice(typ) => ty::TySlice(typ.fold_with(folder)),
- ty::TyAdt(tid, substs) => ty::TyAdt(tid, substs.fold_with(folder)),
- ty::TyDynamic(ref trait_ty, ref region) =>
- ty::TyDynamic(trait_ty.fold_with(folder), region.fold_with(folder)),
- ty::TyTuple(ts) => ty::TyTuple(ts.fold_with(folder)),
- ty::TyFnDef(def_id, substs) => {
- ty::TyFnDef(def_id, substs.fold_with(folder))
+ ty::RawPtr(tm) => ty::RawPtr(tm.fold_with(folder)),
+ ty::Array(typ, sz) => ty::Array(typ.fold_with(folder), sz.fold_with(folder)),
+ ty::Slice(typ) => ty::Slice(typ.fold_with(folder)),
+ ty::Adt(tid, substs) => ty::Adt(tid, substs.fold_with(folder)),
+ ty::Dynamic(ref trait_ty, ref region) =>
+ ty::Dynamic(trait_ty.fold_with(folder), region.fold_with(folder)),
+ ty::Tuple(ts) => ty::Tuple(ts.fold_with(folder)),
+ ty::FnDef(def_id, substs) => {
+ ty::FnDef(def_id, substs.fold_with(folder))
}
- ty::TyFnPtr(f) => ty::TyFnPtr(f.fold_with(folder)),
- ty::TyRef(ref r, ty, mutbl) => {
- ty::TyRef(r.fold_with(folder), ty.fold_with(folder), mutbl)
+ ty::FnPtr(f) => ty::FnPtr(f.fold_with(folder)),
+ ty::Ref(ref r, ty, mutbl) => {
+ ty::Ref(r.fold_with(folder), ty.fold_with(folder), mutbl)
}
- ty::TyGenerator(did, substs, movability) => {
- ty::TyGenerator(
+ ty::Generator(did, substs, movability) => {
+ ty::Generator(
did,
substs.fold_with(folder),
movability)
}
- ty::TyGeneratorWitness(types) => ty::TyGeneratorWitness(types.fold_with(folder)),
- ty::TyClosure(did, substs) => ty::TyClosure(did, substs.fold_with(folder)),
- ty::TyProjection(ref data) => ty::TyProjection(data.fold_with(folder)),
- ty::TyAnon(did, substs) => ty::TyAnon(did, substs.fold_with(folder)),
- ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) |
- ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) |
- ty::TyParam(..) | ty::TyNever | ty::TyForeign(..) => return self
+ ty::GeneratorWitness(types) => ty::GeneratorWitness(types.fold_with(folder)),
+ ty::Closure(did, substs) => ty::Closure(did, substs.fold_with(folder)),
+ ty::Projection(ref data) => ty::Projection(data.fold_with(folder)),
+ ty::Anon(did, substs) => ty::Anon(did, substs.fold_with(folder)),
+ ty::Bool | ty::Char | ty::Str | ty::Int(_) |
+ ty::Uint(_) | ty::Float(_) | ty::Error | ty::Infer(_) |
+ ty::Param(..) | ty::Never | ty::Foreign(..) => return self
};
if self.sty == sty {
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
match self.sty {
- ty::TyRawPtr(ref tm) => tm.visit_with(visitor),
- ty::TyArray(typ, sz) => typ.visit_with(visitor) || sz.visit_with(visitor),
- ty::TySlice(typ) => typ.visit_with(visitor),
- ty::TyAdt(_, substs) => substs.visit_with(visitor),
- ty::TyDynamic(ref trait_ty, ref reg) =>
+ ty::RawPtr(ref tm) => tm.visit_with(visitor),
+ ty::Array(typ, sz) => typ.visit_with(visitor) || sz.visit_with(visitor),
+ ty::Slice(typ) => typ.visit_with(visitor),
+ ty::Adt(_, substs) => substs.visit_with(visitor),
+ ty::Dynamic(ref trait_ty, ref reg) =>
trait_ty.visit_with(visitor) || reg.visit_with(visitor),
- ty::TyTuple(ts) => ts.visit_with(visitor),
- ty::TyFnDef(_, substs) => substs.visit_with(visitor),
- ty::TyFnPtr(ref f) => f.visit_with(visitor),
- ty::TyRef(r, ty, _) => r.visit_with(visitor) || ty.visit_with(visitor),
- ty::TyGenerator(_did, ref substs, _) => {
+ ty::Tuple(ts) => ts.visit_with(visitor),
+ ty::FnDef(_, substs) => substs.visit_with(visitor),
+ ty::FnPtr(ref f) => f.visit_with(visitor),
+ ty::Ref(r, ty, _) => r.visit_with(visitor) || ty.visit_with(visitor),
+ ty::Generator(_did, ref substs, _) => {
substs.visit_with(visitor)
}
- ty::TyGeneratorWitness(ref types) => types.visit_with(visitor),
- ty::TyClosure(_did, ref substs) => substs.visit_with(visitor),
- ty::TyProjection(ref data) => data.visit_with(visitor),
- ty::TyAnon(_, ref substs) => substs.visit_with(visitor),
- ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) |
- ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) |
- ty::TyParam(..) | ty::TyNever | ty::TyForeign(..) => false,
+ ty::GeneratorWitness(ref types) => types.visit_with(visitor),
+ ty::Closure(_did, ref substs) => substs.visit_with(visitor),
+ ty::Projection(ref data) => data.visit_with(visitor),
+ ty::Anon(_, ref substs) => substs.visit_with(visitor),
+ ty::Bool | ty::Char | ty::Str | ty::Int(_) |
+ ty::Uint(_) | ty::Float(_) | ty::Error | ty::Infer(_) |
+ ty::Param(..) | ty::Never | ty::Foreign(..) => false,
}
}
}
}
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice<ty::Predicate<'tcx>> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let v = self.iter().map(|p| p.fold_with(folder)).collect::<AccumulateVec<[_; 8]>>();
folder.tcx().intern_predicates(&v)
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! This module contains TypeVariants and its major components
+//! This module contains TyKind and its major components
use hir::def_id::DefId;
use rustc_data_structures::indexed_vec::Idx;
use ty::subst::{Substs, Subst, Kind, UnpackedKind};
use ty::{self, AdtDef, TypeFlags, Ty, TyCtxt, TypeFoldable};
-use ty::{Slice, TyS, ParamEnvAnd, ParamEnv};
+use ty::{List, TyS, ParamEnvAnd, ParamEnv};
use util::captures::Captures;
-use mir::interpret::{Scalar, Pointer, Value};
+use mir::interpret::{Scalar, Pointer};
use std::iter;
use std::cmp::Ordering;
use hir;
use self::InferTy::*;
-use self::TypeVariants::*;
+use self::TyKind::*;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct TypeAndMut<'tcx> {
/// NB: If you change this, you'll probably want to change the corresponding
/// AST structure in libsyntax/ast.rs as well.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
-pub enum TypeVariants<'tcx> {
+pub enum TyKind<'tcx> {
/// The primitive boolean type. Written as `bool`.
- TyBool,
+ Bool,
/// The primitive character type; holds a Unicode scalar value
/// (a non-surrogate code point). Written as `char`.
- TyChar,
+ Char,
/// A primitive signed integer type. For example, `i32`.
- TyInt(ast::IntTy),
+ Int(ast::IntTy),
/// A primitive unsigned integer type. For example, `u32`.
- TyUint(ast::UintTy),
+ Uint(ast::UintTy),
/// A primitive floating-point type. For example, `f64`.
- TyFloat(ast::FloatTy),
+ Float(ast::FloatTy),
/// Structures, enumerations and unions.
///
- /// Substs here, possibly against intuition, *may* contain `TyParam`s.
+ /// Substs here, possibly against intuition, *may* contain `Param`s.
/// That is, even after substitution it is possible that there are type
- /// variables. This happens when the `TyAdt` corresponds to an ADT
+ /// variables. This happens when the `Adt` corresponds to an ADT
/// definition and not a concrete use of it.
- TyAdt(&'tcx AdtDef, &'tcx Substs<'tcx>),
+ Adt(&'tcx AdtDef, &'tcx Substs<'tcx>),
- TyForeign(DefId),
+ Foreign(DefId),
/// The pointee of a string slice. Written as `str`.
- TyStr,
+ Str,
/// An array with the given length. Written as `[T; n]`.
- TyArray(Ty<'tcx>, &'tcx ty::Const<'tcx>),
+ Array(Ty<'tcx>, &'tcx ty::Const<'tcx>),
/// The pointee of an array slice. Written as `[T]`.
- TySlice(Ty<'tcx>),
+ Slice(Ty<'tcx>),
/// A raw pointer. Written as `*mut T` or `*const T`
- TyRawPtr(TypeAndMut<'tcx>),
+ RawPtr(TypeAndMut<'tcx>),
/// A reference; a pointer with an associated lifetime. Written as
/// `&'a mut T` or `&'a T`.
- TyRef(Region<'tcx>, Ty<'tcx>, hir::Mutability),
+ Ref(Region<'tcx>, Ty<'tcx>, hir::Mutability),
/// The anonymous type of a function declaration/definition. Each
/// function has a unique type.
- TyFnDef(DefId, &'tcx Substs<'tcx>),
+ FnDef(DefId, &'tcx Substs<'tcx>),
/// A pointer to a function. Written as `fn() -> i32`.
- TyFnPtr(PolyFnSig<'tcx>),
+ FnPtr(PolyFnSig<'tcx>),
/// A trait, defined with `trait`.
- TyDynamic(Binder<&'tcx Slice<ExistentialPredicate<'tcx>>>, ty::Region<'tcx>),
+ Dynamic(Binder<&'tcx List<ExistentialPredicate<'tcx>>>, ty::Region<'tcx>),
/// The anonymous type of a closure. Used to represent the type of
/// `|a| a`.
- TyClosure(DefId, ClosureSubsts<'tcx>),
+ Closure(DefId, ClosureSubsts<'tcx>),
/// The anonymous type of a generator. Used to represent the type of
/// `|a| yield a`.
- TyGenerator(DefId, GeneratorSubsts<'tcx>, hir::GeneratorMovability),
+ Generator(DefId, GeneratorSubsts<'tcx>, hir::GeneratorMovability),
/// A type representin the types stored inside a generator.
/// This should only appear in GeneratorInteriors.
- TyGeneratorWitness(Binder<&'tcx Slice<Ty<'tcx>>>),
+ GeneratorWitness(Binder<&'tcx List<Ty<'tcx>>>),
/// The never type `!`
- TyNever,
+ Never,
/// A tuple type. For example, `(i32, bool)`.
- TyTuple(&'tcx Slice<Ty<'tcx>>),
+ Tuple(&'tcx List<Ty<'tcx>>),
/// The projection of an associated type. For example,
/// `<T as Trait<..>>::N`.
- TyProjection(ProjectionTy<'tcx>),
+ Projection(ProjectionTy<'tcx>),
/// Anonymized (`impl Trait`) type found in a return type.
/// The DefId comes either from
/// * or the `existential type` declaration
/// The substitutions are for the generics of the function in question.
/// After typeck, the concrete type can be found in the `types` map.
- TyAnon(DefId, &'tcx Substs<'tcx>),
+ Anon(DefId, &'tcx Substs<'tcx>),
/// A type parameter; for example, `T` in `fn f<T>(x: T) {}
- TyParam(ParamTy),
+ Param(ParamTy),
/// A type variable used during type-checking.
- TyInfer(InferTy),
+ Infer(InferTy),
/// A placeholder for a type which could not be computed; this is
/// propagated to avoid useless error messages.
- TyError,
+ Error,
}
/// A closure can be modeled as a struct that looks like:
/// If you have an inference context, use `infcx.closure_sig()`.
pub fn closure_sig(self, def_id: DefId, tcx: TyCtxt<'_, 'tcx, 'tcx>) -> ty::PolyFnSig<'tcx> {
match self.closure_sig_ty(def_id, tcx).sty {
- ty::TyFnPtr(sig) => sig,
+ ty::FnPtr(sig) => sig,
ref t => bug!("closure_sig_ty is not a fn-ptr: {:?}", t),
}
}
}
}
-impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Slice<ExistentialPredicate<'tcx>> {}
+impl<'tcx> serialize::UseSpecializedDecodable for &'tcx List<ExistentialPredicate<'tcx>> {}
-impl<'tcx> Slice<ExistentialPredicate<'tcx>> {
+impl<'tcx> List<ExistentialPredicate<'tcx>> {
pub fn principal(&self) -> Option<ExistentialTraitRef<'tcx>> {
match self.get(0) {
Some(&ExistentialPredicate::Trait(tr)) => Some(tr),
}
}
-impl<'tcx> Binder<&'tcx Slice<ExistentialPredicate<'tcx>>> {
+impl<'tcx> Binder<&'tcx List<ExistentialPredicate<'tcx>>> {
pub fn principal(&self) -> Option<PolyExistentialTraitRef<'tcx>> {
self.skip_binder().principal().map(Binder::bind)
}
/// - `variadic` indicates whether this is a variadic function. (only true for foreign fns)
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
pub struct FnSig<'tcx> {
- pub inputs_and_output: &'tcx Slice<Ty<'tcx>>,
+ pub inputs_and_output: &'tcx List<Ty<'tcx>>,
pub variadic: bool,
pub unsafety: hir::Unsafety,
pub abi: abi::Abi,
pub fn input(&self, index: usize) -> ty::Binder<Ty<'tcx>> {
self.map_bound_ref(|fn_sig| fn_sig.inputs()[index])
}
- pub fn inputs_and_output(&self) -> ty::Binder<&'tcx Slice<Ty<'tcx>>> {
+ pub fn inputs_and_output(&self) -> ty::Binder<&'tcx List<Ty<'tcx>>> {
self.map_bound_ref(|fn_sig| fn_sig.inputs_and_output)
}
pub fn output(&self) -> ty::Binder<Ty<'tcx>> {
/// the likes of `liberate_late_bound_regions`. The distinction exists
/// because higher-ranked lifetimes aren't supported in all places. See [1][2].
///
-/// Unlike TyParam-s, bound regions are not supposed to exist "in the wild"
+/// Unlike Param-s, bound regions are not supposed to exist "in the wild"
/// outside their binder, e.g. in types passed to type inference, and
/// should first be substituted (by skolemized regions, free regions,
/// or region variables).
impl<'a, 'gcx, 'tcx> TyS<'tcx> {
pub fn is_nil(&self) -> bool {
match self.sty {
- TyTuple(ref tys) => tys.is_empty(),
+ Tuple(ref tys) => tys.is_empty(),
_ => false,
}
}
pub fn is_never(&self) -> bool {
match self.sty {
- TyNever => true,
+ Never => true,
_ => false,
}
}
pub fn is_primitive(&self) -> bool {
match self.sty {
- TyBool | TyChar | TyInt(_) | TyUint(_) | TyFloat(_) => true,
+ Bool | Char | Int(_) | Uint(_) | Float(_) => true,
_ => false,
}
}
pub fn is_ty_var(&self) -> bool {
match self.sty {
- TyInfer(TyVar(_)) => true,
+ Infer(TyVar(_)) => true,
_ => false,
}
}
pub fn is_ty_infer(&self) -> bool {
match self.sty {
- TyInfer(_) => true,
+ Infer(_) => true,
_ => false,
}
}
pub fn is_phantom_data(&self) -> bool {
- if let TyAdt(def, _) = self.sty {
+ if let Adt(def, _) = self.sty {
def.is_phantom_data()
} else {
false
}
}
- pub fn is_bool(&self) -> bool { self.sty == TyBool }
+ pub fn is_bool(&self) -> bool { self.sty == Bool }
pub fn is_param(&self, index: u32) -> bool {
match self.sty {
- ty::TyParam(ref data) => data.idx == index,
+ ty::Param(ref data) => data.idx == index,
_ => false,
}
}
pub fn is_self(&self) -> bool {
match self.sty {
- TyParam(ref p) => p.is_self(),
+ Param(ref p) => p.is_self(),
_ => false,
}
}
pub fn is_slice(&self) -> bool {
match self.sty {
- TyRawPtr(TypeAndMut { ty, .. }) | TyRef(_, ty, _) => match ty.sty {
- TySlice(_) | TyStr => true,
+ RawPtr(TypeAndMut { ty, .. }) | Ref(_, ty, _) => match ty.sty {
+ Slice(_) | Str => true,
_ => false,
},
_ => false
#[inline]
pub fn is_simd(&self) -> bool {
match self.sty {
- TyAdt(def, _) => def.repr.simd(),
+ Adt(def, _) => def.repr.simd(),
_ => false,
}
}
pub fn sequence_element_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
match self.sty {
- TyArray(ty, _) | TySlice(ty) => ty,
- TyStr => tcx.mk_mach_uint(ast::UintTy::U8),
+ Array(ty, _) | Slice(ty) => ty,
+ Str => tcx.mk_mach_uint(ast::UintTy::U8),
_ => bug!("sequence_element_type called on non-sequence value: {}", self),
}
}
pub fn simd_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
match self.sty {
- TyAdt(def, substs) => {
+ Adt(def, substs) => {
def.non_enum_variant().fields[0].ty(tcx, substs)
}
_ => bug!("simd_type called on invalid type")
pub fn simd_size(&self, _cx: TyCtxt) -> usize {
match self.sty {
- TyAdt(def, _) => def.non_enum_variant().fields.len(),
+ Adt(def, _) => def.non_enum_variant().fields.len(),
_ => bug!("simd_size called on invalid type")
}
}
pub fn is_region_ptr(&self) -> bool {
match self.sty {
- TyRef(..) => true,
+ Ref(..) => true,
_ => false,
}
}
pub fn is_mutable_pointer(&self) -> bool {
match self.sty {
- TyRawPtr(TypeAndMut { mutbl: hir::Mutability::MutMutable, .. }) |
- TyRef(_, _, hir::Mutability::MutMutable) => true,
+ RawPtr(TypeAndMut { mutbl: hir::Mutability::MutMutable, .. }) |
+ Ref(_, _, hir::Mutability::MutMutable) => true,
_ => false
}
}
pub fn is_unsafe_ptr(&self) -> bool {
match self.sty {
- TyRawPtr(_) => return true,
+ RawPtr(_) => return true,
_ => return false,
}
}
pub fn is_box(&self) -> bool {
match self.sty {
- TyAdt(def, _) => def.is_box(),
+ Adt(def, _) => def.is_box(),
_ => false,
}
}
/// panics if called on any type other than `Box<T>`
pub fn boxed_ty(&self) -> Ty<'tcx> {
match self.sty {
- TyAdt(def, substs) if def.is_box() => substs.type_at(0),
+ Adt(def, substs) if def.is_box() => substs.type_at(0),
_ => bug!("`boxed_ty` is called on non-box type {:?}", self),
}
}
/// A scalar type is one that denotes an atomic datum, with no sub-components.
- /// (A TyRawPtr is scalar because it represents a non-managed pointer, so its
+ /// (A RawPtr is scalar because it represents a non-managed pointer, so its
/// contents are abstract to rustc.)
pub fn is_scalar(&self) -> bool {
match self.sty {
- TyBool | TyChar | TyInt(_) | TyFloat(_) | TyUint(_) |
- TyInfer(IntVar(_)) | TyInfer(FloatVar(_)) |
- TyFnDef(..) | TyFnPtr(_) | TyRawPtr(_) => true,
+ Bool | Char | Int(_) | Float(_) | Uint(_) |
+ Infer(IntVar(_)) | Infer(FloatVar(_)) |
+ FnDef(..) | FnPtr(_) | RawPtr(_) => true,
_ => false
}
}
/// Returns true if this type is a floating point type and false otherwise.
pub fn is_floating_point(&self) -> bool {
match self.sty {
- TyFloat(_) |
- TyInfer(FloatVar(_)) => true,
+ Float(_) |
+ Infer(FloatVar(_)) => true,
_ => false,
}
}
pub fn is_trait(&self) -> bool {
match self.sty {
- TyDynamic(..) => true,
+ Dynamic(..) => true,
_ => false,
}
}
pub fn is_enum(&self) -> bool {
match self.sty {
- TyAdt(adt_def, _) => {
+ Adt(adt_def, _) => {
adt_def.is_enum()
}
_ => false,
pub fn is_closure(&self) -> bool {
match self.sty {
- TyClosure(..) => true,
+ Closure(..) => true,
_ => false,
}
}
pub fn is_generator(&self) -> bool {
match self.sty {
- TyGenerator(..) => true,
+ Generator(..) => true,
_ => false,
}
}
pub fn is_integral(&self) -> bool {
match self.sty {
- TyInfer(IntVar(_)) | TyInt(_) | TyUint(_) => true,
+ Infer(IntVar(_)) | Int(_) | Uint(_) => true,
_ => false
}
}
pub fn is_fresh_ty(&self) -> bool {
match self.sty {
- TyInfer(FreshTy(_)) => true,
+ Infer(FreshTy(_)) => true,
_ => false,
}
}
pub fn is_fresh(&self) -> bool {
match self.sty {
- TyInfer(FreshTy(_)) => true,
- TyInfer(FreshIntTy(_)) => true,
- TyInfer(FreshFloatTy(_)) => true,
+ Infer(FreshTy(_)) => true,
+ Infer(FreshIntTy(_)) => true,
+ Infer(FreshFloatTy(_)) => true,
_ => false,
}
}
pub fn is_char(&self) -> bool {
match self.sty {
- TyChar => true,
+ Char => true,
_ => false,
}
}
pub fn is_fp(&self) -> bool {
match self.sty {
- TyInfer(FloatVar(_)) | TyFloat(_) => true,
+ Infer(FloatVar(_)) | Float(_) => true,
_ => false
}
}
pub fn is_signed(&self) -> bool {
match self.sty {
- TyInt(_) => true,
+ Int(_) => true,
_ => false,
}
}
pub fn is_machine(&self) -> bool {
match self.sty {
- TyInt(ast::IntTy::Isize) | TyUint(ast::UintTy::Usize) => false,
- TyInt(..) | TyUint(..) | TyFloat(..) => true,
+ Int(ast::IntTy::Isize) | Uint(ast::UintTy::Usize) => false,
+ Int(..) | Uint(..) | Float(..) => true,
_ => false,
}
}
pub fn has_concrete_skeleton(&self) -> bool {
match self.sty {
- TyParam(_) | TyInfer(_) | TyError => false,
+ Param(_) | Infer(_) | Error => false,
_ => true,
}
}
/// Some types---notably unsafe ptrs---can only be dereferenced explicitly.
pub fn builtin_deref(&self, explicit: bool) -> Option<TypeAndMut<'tcx>> {
match self.sty {
- TyAdt(def, _) if def.is_box() => {
+ Adt(def, _) if def.is_box() => {
Some(TypeAndMut {
ty: self.boxed_ty(),
mutbl: hir::MutImmutable,
})
},
- TyRef(_, ty, mutbl) => Some(TypeAndMut { ty, mutbl }),
- TyRawPtr(mt) if explicit => Some(mt),
+ Ref(_, ty, mutbl) => Some(TypeAndMut { ty, mutbl }),
+ RawPtr(mt) if explicit => Some(mt),
_ => None,
}
}
/// Returns the type of `ty[i]`.
pub fn builtin_index(&self) -> Option<Ty<'tcx>> {
match self.sty {
- TyArray(ty, _) | TySlice(ty) => Some(ty),
+ Array(ty, _) | Slice(ty) => Some(ty),
_ => None,
}
}
pub fn fn_sig(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> PolyFnSig<'tcx> {
match self.sty {
- TyFnDef(def_id, substs) => {
+ FnDef(def_id, substs) => {
tcx.fn_sig(def_id).subst(tcx, substs)
}
- TyFnPtr(f) => f,
+ FnPtr(f) => f,
_ => bug!("Ty::fn_sig() called on non-fn type: {:?}", self)
}
}
pub fn is_fn(&self) -> bool {
match self.sty {
- TyFnDef(..) | TyFnPtr(_) => true,
+ FnDef(..) | FnPtr(_) => true,
_ => false,
}
}
pub fn is_impl_trait(&self) -> bool {
match self.sty {
- TyAnon(..) => true,
+ Anon(..) => true,
_ => false,
}
}
pub fn ty_adt_def(&self) -> Option<&'tcx AdtDef> {
match self.sty {
- TyAdt(adt, _) => Some(adt),
+ Adt(adt, _) => Some(adt),
_ => None,
}
}
/// ignores late-bound regions binders.
pub fn regions(&self) -> Vec<ty::Region<'tcx>> {
match self.sty {
- TyRef(region, _, _) => {
+ Ref(region, _, _) => {
vec![region]
}
- TyDynamic(ref obj, region) => {
+ Dynamic(ref obj, region) => {
let mut v = vec![region];
if let Some(p) = obj.principal() {
v.extend(p.skip_binder().substs.regions());
}
v
}
- TyAdt(_, substs) | TyAnon(_, substs) => {
+ Adt(_, substs) | Anon(_, substs) => {
substs.regions().collect()
}
- TyClosure(_, ClosureSubsts { ref substs }) |
- TyGenerator(_, GeneratorSubsts { ref substs }, _) => {
+ Closure(_, ClosureSubsts { ref substs }) |
+ Generator(_, GeneratorSubsts { ref substs }, _) => {
substs.regions().collect()
}
- TyProjection(ref data) => {
+ Projection(ref data) => {
data.substs.regions().collect()
}
- TyFnDef(..) |
- TyFnPtr(_) |
- TyGeneratorWitness(..) |
- TyBool |
- TyChar |
- TyInt(_) |
- TyUint(_) |
- TyFloat(_) |
- TyStr |
- TyArray(..) |
- TySlice(_) |
- TyRawPtr(_) |
- TyNever |
- TyTuple(..) |
- TyForeign(..) |
- TyParam(_) |
- TyInfer(_) |
- TyError => {
+ FnDef(..) |
+ FnPtr(_) |
+ GeneratorWitness(..) |
+ Bool |
+ Char |
+ Int(_) |
+ Uint(_) |
+ Float(_) |
+ Str |
+ Array(..) |
+ Slice(_) |
+ RawPtr(_) |
+ Never |
+ Tuple(..) |
+ Foreign(..) |
+ Param(_) |
+ Infer(_) |
+ Error => {
vec![]
}
}
/// is complete, that type variable will be unified.
pub fn to_opt_closure_kind(&self) -> Option<ty::ClosureKind> {
match self.sty {
- TyInt(int_ty) => match int_ty {
+ Int(int_ty) => match int_ty {
ast::IntTy::I8 => Some(ty::ClosureKind::Fn),
ast::IntTy::I16 => Some(ty::ClosureKind::FnMut),
ast::IntTy::I32 => Some(ty::ClosureKind::FnOnce),
_ => bug!("cannot convert type `{:?}` to a closure kind", self),
},
- TyInfer(_) => None,
+ Infer(_) => None,
- TyError => Some(ty::ClosureKind::Fn),
+ Error => Some(ty::ClosureKind::Fn),
_ => bug!("cannot convert type `{:?}` to a closure kind", self),
}
/// `false` means nothing -- could be sized, might not be.
pub fn is_trivially_sized(&self, tcx: TyCtxt<'_, '_, 'tcx>) -> bool {
match self.sty {
- ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) |
- ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) |
- ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyRawPtr(..) |
- ty::TyChar | ty::TyRef(..) | ty::TyGenerator(..) |
- ty::TyGeneratorWitness(..) | ty::TyArray(..) | ty::TyClosure(..) |
- ty::TyNever | ty::TyError =>
+ ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) |
+ ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) |
+ ty::FnDef(..) | ty::FnPtr(_) | ty::RawPtr(..) |
+ ty::Char | ty::Ref(..) | ty::Generator(..) |
+ ty::GeneratorWitness(..) | ty::Array(..) | ty::Closure(..) |
+ ty::Never | ty::Error =>
true,
- ty::TyStr | ty::TySlice(_) | ty::TyDynamic(..) | ty::TyForeign(..) =>
+ ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) =>
false,
- ty::TyTuple(tys) =>
+ ty::Tuple(tys) =>
tys.iter().all(|ty| ty.is_trivially_sized(tcx)),
- ty::TyAdt(def, _substs) =>
+ ty::Adt(def, _substs) =>
def.sized_constraint(tcx).is_empty(),
- ty::TyProjection(_) | ty::TyParam(_) | ty::TyAnon(..) => false,
+ ty::Projection(_) | ty::Param(_) | ty::Anon(..) => false,
- ty::TyInfer(ty::TyVar(_)) => false,
+ ty::Infer(ty::TyVar(_)) => false,
- ty::TyInfer(ty::CanonicalTy(_)) |
- ty::TyInfer(ty::FreshTy(_)) |
- ty::TyInfer(ty::FreshIntTy(_)) |
- ty::TyInfer(ty::FreshFloatTy(_)) =>
+ ty::Infer(ty::CanonicalTy(_)) |
+ ty::Infer(ty::FreshTy(_)) |
+ ty::Infer(ty::FreshIntTy(_)) |
+ ty::Infer(ty::FreshFloatTy(_)) =>
bug!("is_trivially_sized applied to unexpected type: {:?}", self),
}
}
}
let ty = tcx.lift_to_global(&ty).unwrap();
let size = tcx.layout_of(ty).ok()?.size;
- self.val.to_bits(size)
+ self.val.try_to_bits(size)
}
#[inline]
pub fn to_ptr(&self) -> Option<Pointer> {
- self.val.to_ptr()
- }
-
- #[inline]
- pub fn to_byval_value(&self) -> Option<Value> {
- self.val.to_byval_value()
+ self.val.try_to_ptr()
}
#[inline]
assert_eq!(self.ty, ty.value);
let ty = tcx.lift_to_global(&ty).unwrap();
let size = tcx.layout_of(ty).ok()?.size;
- self.val.to_bits(size)
+ self.val.try_to_bits(size)
}
#[inline]
// Type substitutions.
use hir::def_id::DefId;
-use ty::{self, Lift, Slice, Ty, TyCtxt};
+use ty::{self, Lift, List, Ty, TyCtxt};
use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use serialize::{self, Encodable, Encoder, Decodable, Decoder};
}
/// A substitution mapping generic parameters to new values.
-pub type Substs<'tcx> = Slice<Kind<'tcx>>;
+pub type Substs<'tcx> = List<Kind<'tcx>>;
impl<'a, 'gcx, 'tcx> Substs<'tcx> {
/// Creates a Substs that maps each generic parameter to itself.
self.ty_stack_depth += 1;
let t1 = match t.sty {
- ty::TyParam(p) => {
+ ty::Param(p) => {
self.ty_for_param(p, t)
}
_ => {
use ty::{self, Ty, TyCtxt, GenericParamDefKind, TypeFoldable};
use ty::subst::{Substs, UnpackedKind};
use ty::query::TyCtxtAt;
-use ty::TypeVariants::*;
+use ty::TyKind::*;
use ty::layout::{Integer, IntegerExt};
use util::common::ErrorReported;
use middle::lang_items;
impl<'tcx> fmt::Display for Discr<'tcx> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self.ty.sty {
- ty::TyInt(ity) => {
+ ty::Int(ity) => {
let bits = ty::tls::with(|tcx| {
Integer::from_attr(tcx, SignedInt(ity)).size().bits()
});
}
pub fn checked_add<'a, 'gcx>(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, n: u128) -> (Self, bool) {
let (int, signed) = match self.ty.sty {
- TyInt(ity) => (Integer::from_attr(tcx, SignedInt(ity)), true),
- TyUint(uty) => (Integer::from_attr(tcx, UnsignedInt(uty)), false),
+ Int(ity) => (Integer::from_attr(tcx, SignedInt(ity)), true),
+ Uint(uty) => (Integer::from_attr(tcx, UnsignedInt(uty)), false),
_ => bug!("non integer discriminant"),
};
let (adt, substs) = match self_type.sty {
// These types used to have a builtin impl.
// Now libcore provides that impl.
- ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) |
- ty::TyChar | ty::TyRawPtr(..) | ty::TyNever |
- ty::TyRef(_, _, hir::MutImmutable) => return Ok(()),
+ ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) |
+ ty::Char | ty::RawPtr(..) | ty::Never |
+ ty::Ref(_, _, hir::MutImmutable) => return Ok(()),
- ty::TyAdt(adt, substs) => (adt, substs),
+ ty::Adt(adt, substs) => (adt, substs),
_ => return Err(CopyImplementationError::NotAnAdt),
};
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
match ty.sty {
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
for field in def.all_fields() {
let field_ty = field.ty(self, substs);
- if let TyError = field_ty.sty {
+ if let Error = field_ty.sty {
return true;
}
}
pub fn struct_tail(self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
loop {
match ty.sty {
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
if !def.is_struct() {
break;
}
}
}
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
if let Some((&last_ty, _)) = tys.split_last() {
ty = last_ty;
} else {
let (mut a, mut b) = (source, target);
loop {
match (&a.sty, &b.sty) {
- (&TyAdt(a_def, a_substs), &TyAdt(b_def, b_substs))
+ (&Adt(a_def, a_substs), &Adt(b_def, b_substs))
if a_def == b_def && a_def.is_struct() => {
if let Some(f) = a_def.non_enum_variant().fields.last() {
a = f.ty(self, a_substs);
break;
}
},
- (&TyTuple(a_tys), &TyTuple(b_tys))
+ (&Tuple(a_tys), &Tuple(b_tys))
if a_tys.len() == b_tys.len() => {
if let Some(a_last) = a_tys.last() {
a = a_last;
// parameters marked as pure.
let impl_substs = match self.type_of(impl_def_id).sty {
- ty::TyAdt(def_, substs) if def_ == def => substs,
+ ty::Adt(def_, substs) if def_ == def => substs,
_ => bug!()
};
let item_substs = match self.type_of(def.did).sty {
- ty::TyAdt(def_, substs) if def_ == def => substs,
+ ty::Adt(def_, substs) if def_ == def => substs,
_ => bug!()
};
!impl_generics.region_param(ebr, self).pure_wrt_drop
}
UnpackedKind::Type(&ty::TyS {
- sty: ty::TypeVariants::TyParam(ref pt), ..
+ sty: ty::Param(ref pt), ..
}) => {
!impl_generics.type_param(pt, self).pure_wrt_drop
}
-> Representability
{
match ty.sty {
- TyTuple(ref ts) => {
+ Tuple(ref ts) => {
// Find non representable
fold_repr(ts.iter().map(|ty| {
is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
}
// Fixed-length vectors.
// FIXME(#11924) Behavior undecided for zero-length vectors.
- TyArray(ty, _) => {
+ Array(ty, _) => {
is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
}
- TyAdt(def, substs) => {
+ Adt(def, substs) => {
// Find non representable fields with their spans
fold_repr(def.all_fields().map(|field| {
let ty = field.ty(tcx, substs);
}
}))
}
- TyClosure(..) => {
+ Closure(..) => {
// this check is run on type definitions, so we don't expect
// to see closure types
bug!("requires check invoked on inapplicable type: {:?}", ty)
fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: &'tcx ty::AdtDef) -> bool {
match ty.sty {
- TyAdt(ty_def, _) => {
+ Adt(ty_def, _) => {
ty_def == def
}
_ => false
fn same_type<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
match (&a.sty, &b.sty) {
- (&TyAdt(did_a, substs_a), &TyAdt(did_b, substs_b)) => {
+ (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => {
if did_a != did_b {
return false;
}
ty: Ty<'tcx>) -> Representability
{
match ty.sty {
- TyAdt(def, _) => {
+ Adt(def, _) => {
{
// Iterate through stack of previously seen types.
let mut iter = seen.iter();
match ty.sty {
// Fast-path for primitive types
- ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) |
- ty::TyBool | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyNever |
- ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar | ty::TyGeneratorWitness(..) |
- ty::TyRawPtr(_) | ty::TyRef(..) | ty::TyStr => false,
+ ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) |
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Never |
+ ty::FnDef(..) | ty::FnPtr(_) | ty::Char | ty::GeneratorWitness(..) |
+ ty::RawPtr(_) | ty::Ref(..) | ty::Str => false,
// Foreign types can never have destructors
- ty::TyForeign(..) => false,
+ ty::Foreign(..) => false,
// `ManuallyDrop` doesn't have a destructor regardless of field types.
- ty::TyAdt(def, _) if Some(def.did) == tcx.lang_items().manually_drop() => false,
+ ty::Adt(def, _) if Some(def.did) == tcx.lang_items().manually_drop() => false,
// Issue #22536: We first query type_moves_by_default. It sees a
// normalized version of the type, and therefore will definitely
// (see above), it is sound to treat it as having a destructor.
// User destructors are the only way to have concrete drop types.
- ty::TyAdt(def, _) if def.has_dtor(tcx) => true,
+ ty::Adt(def, _) if def.has_dtor(tcx) => true,
// Can refer to a type which may drop.
// FIXME(eddyb) check this against a ParamEnv.
- ty::TyDynamic(..) | ty::TyProjection(..) | ty::TyParam(_) |
- ty::TyAnon(..) | ty::TyInfer(_) | ty::TyError => true,
+ ty::Dynamic(..) | ty::Projection(..) | ty::Param(_) |
+ ty::Anon(..) | ty::Infer(_) | ty::Error => true,
// Structural recursion.
- ty::TyArray(ty, _) | ty::TySlice(ty) => needs_drop(ty),
+ ty::Array(ty, _) | ty::Slice(ty) => needs_drop(ty),
- ty::TyClosure(def_id, ref substs) => substs.upvar_tys(def_id, tcx).any(needs_drop),
+ ty::Closure(def_id, ref substs) => substs.upvar_tys(def_id, tcx).any(needs_drop),
// Pessimistically assume that all generators will require destructors
// as we don't know if a destructor is a noop or not until after the MIR
// state transformation pass
- ty::TyGenerator(..) => true,
+ ty::Generator(..) => true,
- ty::TyTuple(ref tys) => tys.iter().cloned().any(needs_drop),
+ ty::Tuple(ref tys) => tys.iter().cloned().any(needs_drop),
// unions don't have destructors because of the child types,
// only if they manually implement `Drop` (handled above).
- ty::TyAdt(def, _) if def.is_union() => false,
+ ty::Adt(def, _) if def.is_union() => false,
- ty::TyAdt(def, substs) =>
+ ty::Adt(def, substs) =>
def.variants.iter().any(
|variant| variant.fields.iter().any(
|field| needs_drop(field.ty(tcx, substs)))),
match self_arg_ty.sty {
_ if is_self_ty(self_arg_ty) => ByValue,
- ty::TyRef(region, ty, mutbl) if is_self_ty(ty) => {
+ ty::Ref(region, ty, mutbl) if is_self_ty(ty) => {
ByReference(region, mutbl)
}
- ty::TyRawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => {
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => {
ByRawPointer(mutbl)
}
- ty::TyAdt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => {
+ ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => {
ByBox
}
_ => Other
use mir::interpret::ConstValue;
use ty::{self, Ty};
-use rustc_data_structures::small_vec::SmallVec;
-use rustc_data_structures::accumulate_vec::IntoIter as AccIntoIter;
+use smallvec::{self, SmallVec};
// The TypeWalker's stack is hot enough that it's worth going to some effort to
// avoid heap allocations.
impl<'tcx> TypeWalker<'tcx> {
pub fn new(ty: Ty<'tcx>) -> TypeWalker<'tcx> {
- TypeWalker { stack: SmallVec::one(ty), last_subtree: 1, }
+ TypeWalker { stack: smallvec![ty], last_subtree: 1, }
}
/// Skips the subtree of types corresponding to the last type
}
}
-pub fn walk_shallow<'tcx>(ty: Ty<'tcx>) -> AccIntoIter<TypeWalkerArray<'tcx>> {
+pub fn walk_shallow<'tcx>(ty: Ty<'tcx>) -> smallvec::IntoIter<TypeWalkerArray<'tcx>> {
let mut stack = SmallVec::new();
push_subtypes(&mut stack, ty);
stack.into_iter()
// types as they are written).
fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) {
match parent_ty.sty {
- ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) |
- ty::TyStr | ty::TyInfer(_) | ty::TyParam(_) | ty::TyNever | ty::TyError |
- ty::TyForeign(..) => {
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) |
+ ty::Str | ty::Infer(_) | ty::Param(_) | ty::Never | ty::Error |
+ ty::Foreign(..) => {
}
- ty::TyArray(ty, len) => {
+ ty::Array(ty, len) => {
push_const(stack, len);
stack.push(ty);
}
- ty::TySlice(ty) => {
+ ty::Slice(ty) => {
stack.push(ty);
}
- ty::TyRawPtr(ref mt) => {
+ ty::RawPtr(ref mt) => {
stack.push(mt.ty);
}
- ty::TyRef(_, ty, _) => {
+ ty::Ref(_, ty, _) => {
stack.push(ty);
}
- ty::TyProjection(ref data) => {
+ ty::Projection(ref data) => {
stack.extend(data.substs.types().rev());
}
- ty::TyDynamic(ref obj, ..) => {
+ ty::Dynamic(ref obj, ..) => {
stack.extend(obj.iter().rev().flat_map(|predicate| {
let (substs, opt_ty) = match *predicate.skip_binder() {
ty::ExistentialPredicate::Trait(tr) => (tr.substs, None),
substs.types().rev().chain(opt_ty)
}));
}
- ty::TyAdt(_, substs) | ty::TyAnon(_, substs) => {
+ ty::Adt(_, substs) | ty::Anon(_, substs) => {
stack.extend(substs.types().rev());
}
- ty::TyClosure(_, ref substs) => {
+ ty::Closure(_, ref substs) => {
stack.extend(substs.substs.types().rev());
}
- ty::TyGenerator(_, ref substs, _) => {
+ ty::Generator(_, ref substs, _) => {
stack.extend(substs.substs.types().rev());
}
- ty::TyGeneratorWitness(ts) => {
+ ty::GeneratorWitness(ts) => {
stack.extend(ts.skip_binder().iter().cloned().rev());
}
- ty::TyTuple(ts) => {
+ ty::Tuple(ts) => {
stack.extend(ts.iter().cloned().rev());
}
- ty::TyFnDef(_, substs) => {
+ ty::FnDef(_, substs) => {
stack.extend(substs.types().rev());
}
- ty::TyFnPtr(sig) => {
+ ty::FnPtr(sig) => {
stack.push(sig.skip_binder().output());
stack.extend(sig.skip_binder().inputs().iter().cloned().rev());
}
let param_env = self.param_env;
while let Some(ty) = subtys.next() {
match ty.sty {
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(..) |
- ty::TyUint(..) |
- ty::TyFloat(..) |
- ty::TyError |
- ty::TyStr |
- ty::TyGeneratorWitness(..) |
- ty::TyNever |
- ty::TyParam(_) |
- ty::TyForeign(..) => {
+ ty::Bool |
+ ty::Char |
+ ty::Int(..) |
+ ty::Uint(..) |
+ ty::Float(..) |
+ ty::Error |
+ ty::Str |
+ ty::GeneratorWitness(..) |
+ ty::Never |
+ ty::Param(_) |
+ ty::Foreign(..) => {
// WfScalar, WfParameter, etc
}
- ty::TySlice(subty) => {
+ ty::Slice(subty) => {
self.require_sized(subty, traits::SliceOrArrayElem);
}
- ty::TyArray(subty, len) => {
+ ty::Array(subty, len) => {
self.require_sized(subty, traits::SliceOrArrayElem);
assert_eq!(len.ty, self.infcx.tcx.types.usize);
self.compute_const(len);
}
- ty::TyTuple(ref tys) => {
+ ty::Tuple(ref tys) => {
if let Some((_last, rest)) = tys.split_last() {
for elem in rest {
self.require_sized(elem, traits::TupleElem);
}
}
- ty::TyRawPtr(_) => {
+ ty::RawPtr(_) => {
// simple cases that are WF if their type args are WF
}
- ty::TyProjection(data) => {
+ ty::Projection(data) => {
subtys.skip_current_subtree(); // subtree handled by compute_projection
self.compute_projection(data);
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
// WfNominalType
let obligations = self.nominal_obligations(def.did, substs);
self.out.extend(obligations);
}
- ty::TyRef(r, rty, _) => {
+ ty::Ref(r, rty, _) => {
// WfReference
if !r.has_escaping_regions() && !rty.has_escaping_regions() {
let cause = self.cause(traits::ReferenceOutlivesReferent(ty));
}
}
- ty::TyGenerator(..) => {
+ ty::Generator(..) => {
// Walk ALL the types in the generator: this will
// include the upvar types as well as the yield
// type. Note that this is mildly distinct from
// generators don't take arguments.
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
// Only check the upvar types for WF, not the rest
// of the types within. This is needed because we
// capture the signature and it may not be WF
}
}
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
// let the loop iterate into the argument/return
// types appearing in the fn signature
}
- ty::TyAnon(did, substs) => {
+ ty::Anon(did, substs) => {
// all of the requirements on type parameters
// should've been checked by the instantiation
// of whatever returned this exact `impl Trait`.
}
}
- ty::TyDynamic(data, r) => {
+ ty::Dynamic(data, r) => {
// WfObject
//
// Here, we defer WF checking due to higher-ranked
// register a pending obligation and keep
// moving. (Goal is that an "inductive hypothesis"
// is satisfied to ensure termination.)
- ty::TyInfer(_) => {
+ ty::Infer(_) => {
let ty = self.infcx.shallow_resolve(ty);
- if let ty::TyInfer(_) = ty.sty { // not yet resolved...
+ if let ty::Infer(_) = ty.sty { // not yet resolved...
if ty == ty0 { // ...this is the type we started from! no progress.
return false;
}
} else {
// Yes, resolved, proceed with the
// result. Should never return false because
- // `ty` is not a TyInfer.
+ // `ty` is not a Infer.
assert!(self.compute(ty));
}
}
}
fn from_object_ty(&mut self, ty: Ty<'tcx>,
- data: ty::Binder<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>,
+ data: ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>,
region: ty::Region<'tcx>) {
// Imagine a type like this:
//
/// `ty::required_region_bounds`, see that for more information.
pub fn object_region_bounds<'a, 'gcx, 'tcx>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- existential_predicates: ty::Binder<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>)
+ existential_predicates: ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>)
-> Vec<ty::Region<'tcx>>
{
// Since we don't actually *know* the self type for an object,
let mb = n as f64 / 1_000_000.0;
format!("; rss: {}MB", mb.round() as usize)
}
- None => "".to_owned(),
+ None => String::new(),
};
println!("{}time: {}{}\t{}",
" ".repeat(indentation),
use middle::region::{self, BlockRemainder};
use ty::subst::{self, Subst};
use ty::{BrAnon, BrEnv, BrFresh, BrNamed};
-use ty::{TyBool, TyChar, TyAdt};
-use ty::{TyError, TyStr, TyArray, TySlice, TyFloat, TyFnDef, TyFnPtr};
-use ty::{TyParam, TyRawPtr, TyRef, TyNever, TyTuple};
-use ty::{TyClosure, TyGenerator, TyGeneratorWitness, TyForeign, TyProjection, TyAnon};
-use ty::{TyDynamic, TyInt, TyUint, TyInfer};
+use ty::{Bool, Char, Adt};
+use ty::{Error, Str, Array, Slice, Float, FnDef, FnPtr};
+use ty::{Param, RawPtr, Ref, Never, Tuple};
+use ty::{Closure, Generator, GeneratorWitness, Foreign, Projection, Anon};
+use ty::{Dynamic, Int, Uint, Infer};
use ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable, GenericParamCount, GenericParamDefKind};
use util::nodemap::FxHashSet;
if !verbose && fn_trait_kind.is_some() && projections.len() == 1 {
let projection_ty = projections[0].ty;
- if let TyTuple(ref args) = substs.type_at(1).sty {
+ if let Tuple(ref args) = substs.type_at(1).sty {
return self.fn_sig(f, args, false, projection_ty);
}
}
}
define_print! {
- ('tcx) &'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>, (self, f, cx) {
+ ('tcx) &'tcx ty::List<ty::ExistentialPredicate<'tcx>>, (self, f, cx) {
display {
// Generate the main trait ref, including associated types.
ty::tls::with(|tcx| {
}
define_print! {
- ('tcx) &'tcx ty::Slice<Ty<'tcx>>, (self, f, cx) {
+ ('tcx) &'tcx ty::List<Ty<'tcx>>, (self, f, cx) {
display {
write!(f, "{{")?;
let mut tys = self.iter();
define_print_multi! {
[
- ('tcx) ty::Binder<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>,
+ ('tcx) ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>,
('tcx) ty::Binder<ty::TraitRef<'tcx>>,
('tcx) ty::Binder<ty::FnSig<'tcx>>,
('tcx) ty::Binder<ty::TraitPredicate<'tcx>>,
}
define_print! {
- ('tcx) ty::TypeVariants<'tcx>, (self, f, cx) {
+ ('tcx) ty::TyKind<'tcx>, (self, f, cx) {
display {
match *self {
- TyBool => write!(f, "bool"),
- TyChar => write!(f, "char"),
- TyInt(t) => write!(f, "{}", t.ty_to_string()),
- TyUint(t) => write!(f, "{}", t.ty_to_string()),
- TyFloat(t) => write!(f, "{}", t.ty_to_string()),
- TyRawPtr(ref tm) => {
+ Bool => write!(f, "bool"),
+ Char => write!(f, "char"),
+ Int(t) => write!(f, "{}", t.ty_to_string()),
+ Uint(t) => write!(f, "{}", t.ty_to_string()),
+ Float(t) => write!(f, "{}", t.ty_to_string()),
+ RawPtr(ref tm) => {
write!(f, "*{} ", match tm.mutbl {
hir::MutMutable => "mut",
hir::MutImmutable => "const",
})?;
tm.ty.print(f, cx)
}
- TyRef(r, ty, mutbl) => {
+ Ref(r, ty, mutbl) => {
write!(f, "&")?;
let s = r.print_to_string(cx);
if s != "'_" {
}
ty::TypeAndMut { ty, mutbl }.print(f, cx)
}
- TyNever => write!(f, "!"),
- TyTuple(ref tys) => {
+ Never => write!(f, "!"),
+ Tuple(ref tys) => {
write!(f, "(")?;
let mut tys = tys.iter();
if let Some(&ty) = tys.next() {
}
write!(f, ")")
}
- TyFnDef(def_id, substs) => {
+ FnDef(def_id, substs) => {
ty::tls::with(|tcx| {
let mut sig = tcx.fn_sig(def_id);
if let Some(substs) = tcx.lift(&substs) {
cx.parameterized(f, substs, def_id, &[])?;
write!(f, "}}")
}
- TyFnPtr(ref bare_fn) => {
+ FnPtr(ref bare_fn) => {
bare_fn.print(f, cx)
}
- TyInfer(infer_ty) => write!(f, "{}", infer_ty),
- TyError => write!(f, "[type error]"),
- TyParam(ref param_ty) => write!(f, "{}", param_ty),
- TyAdt(def, substs) => cx.parameterized(f, substs, def.did, &[]),
- TyDynamic(data, r) => {
+ Infer(infer_ty) => write!(f, "{}", infer_ty),
+ Error => write!(f, "[type error]"),
+ Param(ref param_ty) => write!(f, "{}", param_ty),
+ Adt(def, substs) => cx.parameterized(f, substs, def.did, &[]),
+ Dynamic(data, r) => {
let r = r.print_to_string(cx);
if !r.is_empty() {
write!(f, "(")?;
Ok(())
}
}
- TyForeign(def_id) => parameterized(f, subst::Substs::empty(), def_id, &[]),
- TyProjection(ref data) => data.print(f, cx),
- TyAnon(def_id, substs) => {
+ Foreign(def_id) => parameterized(f, subst::Substs::empty(), def_id, &[]),
+ Projection(ref data) => data.print(f, cx),
+ Anon(def_id, substs) => {
if cx.is_verbose {
- return write!(f, "TyAnon({:?}, {:?})", def_id, substs);
+ return write!(f, "Anon({:?}, {:?})", def_id, substs);
}
ty::tls::with(|tcx| {
Ok(())
})
}
- TyStr => write!(f, "str"),
- TyGenerator(did, substs, movability) => ty::tls::with(|tcx| {
+ Str => write!(f, "str"),
+ Generator(did, substs, movability) => ty::tls::with(|tcx| {
let upvar_tys = substs.upvar_tys(did, tcx);
let witness = substs.witness(did, tcx);
if movability == hir::GeneratorMovability::Movable {
print!(f, cx, write(" "), print(witness), write("]"))
}),
- TyGeneratorWitness(types) => {
+ GeneratorWitness(types) => {
ty::tls::with(|tcx| cx.in_binder(f, tcx, &types, tcx.lift(&types)))
}
- TyClosure(did, substs) => ty::tls::with(|tcx| {
+ Closure(did, substs) => ty::tls::with(|tcx| {
let upvar_tys = substs.upvar_tys(did, tcx);
write!(f, "[closure")?;
write!(f, "]")
}),
- TyArray(ty, sz) => {
+ Array(ty, sz) => {
print!(f, cx, write("["), print(ty), write("; "))?;
match sz.val {
ConstValue::Unevaluated(_def_id, _substs) => {
}
write!(f, "]")
}
- TySlice(ty) => {
+ Slice(ty) => {
print!(f, cx, write("["), print(ty), write("]"))
}
}
(format!("{:.2}",
(((hits as f32) / (total as f32)) * 100.0)), total.to_string())
} else {
- ("".into(), "".into())
+ (String::new(), String::new())
};
writeln!(
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
log = "0.4"
+smallvec = { version = "0.6.5", features = ["union"] }
_ => {
self.handler
.span_err(item.span, "allocators must be statics");
- return OneVector::one(item);
+ return smallvec![item];
}
}
if self.in_submod > 0 {
self.handler
.span_err(item.span, "`global_allocator` cannot be used in submodules");
- return OneVector::one(item);
+ return smallvec![item];
}
if self.found {
self.handler
.span_err(item.span, "cannot define more than one #[global_allocator]");
- return OneVector::one(item);
+ return smallvec![item];
}
self.found = true;
extern crate rustc_target;
extern crate syntax;
extern crate syntax_pos;
+#[macro_use]
+extern crate smallvec;
pub mod expand;
}
LpExtend(ref lp_base, _, LpInterior(_, InteriorField(_))) => {
match lp_base.to_type().sty {
- ty::TyAdt(def, _) if def.has_dtor(self.tcx()) => {
+ ty::Adt(def, _) if def.has_dtor(self.tcx()) => {
// In the case where the owner implements drop, then
// the path must be initialized to prevent a case of
// partial reinitialization
Categorization::Interior(ref b, mc::InteriorField(_)) |
Categorization::Interior(ref b, mc::InteriorElement(Kind::Pattern)) => {
match b.ty.sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
if def.has_dtor(bccx.tcx) {
Some(cmt.clone())
} else {
check_and_get_illegal_move_origin(bccx, b)
}
}
- ty::TySlice(..) => Some(cmt.clone()),
+ ty::Slice(..) => Some(cmt.clone()),
_ => {
check_and_get_illegal_move_origin(bccx, b)
}
use rustc_mir::util::borrowck_errors::{BorrowckErrors, Origin};
use syntax::ast;
use syntax_pos;
-use errors::DiagnosticBuilder;
+use errors::{DiagnosticBuilder, Applicability};
use borrowck::gather_loans::gather_moves::PatternSource;
pub struct MoveErrorCollector<'tcx> {
let initializer =
e.init.as_ref().expect("should have an initializer to get an error");
if let Ok(snippet) = bccx.tcx.sess.source_map().span_to_snippet(initializer.span) {
- err.span_suggestion(initializer.span,
- "consider using a reference instead",
- format!("&{}", snippet));
+ err.span_suggestion_with_applicability(
+ initializer.span,
+ "consider using a reference instead",
+ format!("&{}", snippet),
+ Applicability::MaybeIncorrect // using a reference may not be the right fix
+ );
}
}
_ => {
Categorization::Downcast(ref b, _) |
Categorization::Interior(ref b, mc::InteriorField(_)) => {
match b.ty.sty {
- ty::TyAdt(def, _) if def.has_dtor(bccx.tcx) => {
+ ty::Adt(def, _) if def.has_dtor(bccx.tcx) => {
bccx.cannot_move_out_of_interior_of_drop(
move_from.span, b.ty, Origin::Ast)
}
let result = self.restrict(&cmt_base);
// Borrowing one union field automatically borrows all its fields.
match base_ty.sty {
- ty::TyAdt(adt_def, _) if adt_def.is_union() => match result {
+ ty::Adt(adt_def, _) if adt_def.is_union() => match result {
RestrictionResult::Safe => RestrictionResult::Safe,
RestrictionResult::SafeIf(base_lp, mut base_vec) => {
for (i, field) in adt_def.non_enum_variant().fields.iter().enumerate() {
Some(nl.to_string()),
Origin::Ast);
let need_note = match lp.ty.sty {
- ty::TypeVariants::TyClosure(id, _) => {
+ ty::Closure(id, _) => {
let node_id = self.tcx.hir.as_local_node_id(id).unwrap();
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
if let Some((span, name)) = self.tables.closure_kind_origins().get(hir_id) {
// all parent union fields, moves do not propagate upwards automatically.
let mut lp = orig_lp.clone();
while let LpExtend(ref base_lp, mutbl, lp_elem) = lp.clone().kind {
- if let (&ty::TyAdt(adt_def, _), LpInterior(opt_variant_id, interior))
+ if let (&ty::Adt(adt_def, _), LpInterior(opt_variant_id, interior))
= (&base_lp.ty.sty, lp_elem) {
if adt_def.is_union() {
for (i, field) in adt_def.non_enum_variant().fields.iter().enumerate() {
span: Span) {
// Assigning to one union field automatically assigns to all its fields.
if let LpExtend(ref base_lp, mutbl, LpInterior(opt_variant_id, interior)) = lp.kind {
- if let ty::TyAdt(adt_def, _) = base_lp.ty.sty {
+ if let ty::Adt(adt_def, _) = base_lp.ty.sty {
if adt_def.is_union() {
for (i, field) in adt_def.non_enum_variant().fields.iter().enumerate() {
let field =
use rustc::lint::builtin::UNUSED_MUT;
use rustc::ty;
use rustc::util::nodemap::{FxHashMap, FxHashSet};
+use errors::Applicability;
use std::slice;
use syntax::ptr::P;
hir_id,
span,
"variable does not need to be mutable")
- .span_suggestion_short(mut_span, "remove this `mut`", "".to_owned())
+ .span_suggestion_short_with_applicability(
+ mut_span,
+ "remove this `mut`",
+ String::new(),
+ Applicability::MachineApplicable)
.emit();
}
}
let gens_str = if gens.iter().any(|&u| u != 0) {
format!(" gen: {}", bits_to_string(gens))
} else {
- "".to_string()
+ String::new()
};
let action_kills = &self.action_kills[start .. end];
let action_kills_str = if action_kills.iter().any(|&u| u != 0) {
format!(" action_kill: {}", bits_to_string(action_kills))
} else {
- "".to_string()
+ String::new()
};
let scope_kills = &self.scope_kills[start .. end];
let scope_kills_str = if scope_kills.iter().any(|&u| u != 0) {
format!(" scope_kill: {}", bits_to_string(scope_kills))
} else {
- "".to_string()
+ String::new()
};
ps.synth_comment(
fn dataflow_for(&self, e: EntryOrExit, n: &Node<'a>) -> String {
let id = n.1.data.id();
debug!("dataflow_for({:?}, id={:?}) {:?}", e, id, self.variants);
- let mut sets = "".to_string();
+ let mut sets = String::new();
let mut seen_one = false;
for &variant in &self.variants {
if seen_one { sets.push_str(" "); } else { seen_one = true; }
assert!(!sig.variadic && extra_args.is_empty());
match sig.inputs().last().unwrap().sty {
- ty::TyTuple(ref tupled_arguments) => {
+ ty::Tuple(ref tupled_arguments) => {
inputs = &sig.inputs()[0..sig.inputs().len() - 1];
tupled_arguments
}
op: hir::BinOpKind
) -> &'ll Value {
let signed = match t.sty {
- ty::TyFloat(_) => {
+ ty::Float(_) => {
let cmp = bin_op_to_fcmp_predicate(op);
return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty);
},
- ty::TyUint(_) => false,
- ty::TyInt(_) => true,
+ ty::Uint(_) => false,
+ ty::Int(_) => true,
_ => bug!("compare_simd_types: invalid SIMD type"),
};
) -> &'ll Value {
let (source, target) = cx.tcx.struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
- (&ty::TyArray(_, len), &ty::TySlice(_)) => {
+ (&ty::Array(_, len), &ty::Slice(_)) => {
C_usize(cx, len.unwrap_usize(cx.tcx))
}
- (&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
+ (&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
old_info.expect("unsized_info: missing old info for trait upcast")
}
- (_, &ty::TyDynamic(ref data, ..)) => {
+ (_, &ty::Dynamic(ref data, ..)) => {
let vtable_ptr = cx.layout_of(cx.tcx.mk_mut_ptr(target))
.field(cx, abi::FAT_PTR_EXTRA);
consts::ptrcast(meth::get_vtable(cx, source, data.principal()),
) -> (&'ll Value, &'ll Value) {
debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
match (&src_ty.sty, &dst_ty.sty) {
- (&ty::TyRef(_, a, _),
- &ty::TyRef(_, b, _)) |
- (&ty::TyRef(_, a, _),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
- (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
+ (&ty::Ref(_, a, _),
+ &ty::Ref(_, b, _)) |
+ (&ty::Ref(_, a, _),
+ &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
+ (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
+ &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert!(bx.cx.type_is_sized(a));
let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
}
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
assert!(bx.cx.type_is_sized(a));
let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
}
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
let src_layout = bx.cx.layout_of(src_ty);
OperandValue::Pair(base, info).store(bx, dst);
};
match (&src_ty.sty, &dst_ty.sty) {
- (&ty::TyRef(..), &ty::TyRef(..)) |
- (&ty::TyRef(..), &ty::TyRawPtr(..)) |
- (&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => {
+ (&ty::Ref(..), &ty::Ref(..)) |
+ (&ty::Ref(..), &ty::RawPtr(..)) |
+ (&ty::RawPtr(..), &ty::RawPtr(..)) => {
coerce_ptr()
}
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
coerce_ptr()
}
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
for i in 0..def_a.variants[0].fields.len() {
-> ty::PolyFnSig<'tcx>
{
match ty.sty {
- ty::TyFnDef(..) |
- // Shims currently have type TyFnPtr. Not sure this should remain.
- ty::TyFnPtr(_) => ty.fn_sig(cx.tcx),
- ty::TyClosure(def_id, substs) => {
+ ty::FnDef(..) |
+ // Shims currently have type FnPtr. Not sure this should remain.
+ ty::FnPtr(_) => ty.fn_sig(cx.tcx),
+ ty::Closure(def_id, substs) => {
let tcx = cx.tcx;
let sig = substs.closure_sig(def_id, tcx);
sig.abi
))
}
- ty::TyGenerator(def_id, substs, _) => {
+ ty::Generator(def_id, substs, _) => {
let tcx = cx.tcx;
let sig = substs.poly_sig(def_id, cx.tcx);
// static and call it a day. Some linkages (like weak) will make it such
// that the static actually has a null value.
let llty2 = match ty.sty {
- ty::TyRawPtr(ref mt) => cx.layout_of(mt.ty).llvm_type(cx),
+ ty::RawPtr(ref mt) => cx.layout_of(mt.ty).llvm_type(cx),
_ => {
if span.is_some() {
cx.sess().span_fatal(span.unwrap(), "must have type `*const T` or `*mut T`")
let tail = self.tcx.struct_tail(ty);
match tail.sty {
- ty::TyForeign(..) => false,
- ty::TyStr | ty::TySlice(..) | ty::TyDynamic(..) => true,
+ ty::Foreign(..) => false,
+ ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
_ => bug!("unexpected unsized tail: {:?}", tail.sty),
}
}
let (size, align) = cx.size_and_align_of(array_or_slice_type);
let upper_bound = match array_or_slice_type.sty {
- ty::TyArray(_, len) => {
+ ty::Array(_, len) => {
len.unwrap_usize(cx.tcx) as c_longlong
}
_ => -1
let signature_metadata: Vec<_> = iter::once(
// return type
match signature.output().sty {
- ty::TyTuple(ref tys) if tys.is_empty() => None,
+ ty::Tuple(ref tys) if tys.is_empty() => None,
_ => Some(type_metadata(cx, signature.output(), span))
}
).chain(
// But it does not describe the trait's methods.
let containing_scope = match trait_type.sty {
- ty::TyDynamic(ref data, ..) => if let Some(principal) = data.principal() {
+ ty::Dynamic(ref data, ..) => if let Some(principal) = data.principal() {
let def_id = principal.def_id();
Some(get_namespace_for_item(cx, def_id))
} else {
let ptr_metadata = |ty: Ty<'tcx>| {
match ty.sty {
- ty::TySlice(typ) => {
+ ty::Slice(typ) => {
Ok(vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span))
}
- ty::TyStr => {
+ ty::Str => {
Ok(vec_slice_metadata(cx, t, cx.tcx.types.u8, unique_type_id, usage_site_span))
}
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
Ok(MetadataCreationResult::new(
trait_pointer_metadata(cx, ty, Some(t), unique_type_id),
false))
};
let MetadataCreationResult { metadata, already_stored_in_typemap } = match t.sty {
- ty::TyNever |
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyFloat(_) => {
+ ty::Never |
+ ty::Bool |
+ ty::Char |
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) => {
MetadataCreationResult::new(basic_type_metadata(cx, t), false)
}
- ty::TyTuple(ref elements) if elements.is_empty() => {
+ ty::Tuple(ref elements) if elements.is_empty() => {
MetadataCreationResult::new(basic_type_metadata(cx, t), false)
}
- ty::TyArray(typ, _) |
- ty::TySlice(typ) => {
+ ty::Array(typ, _) |
+ ty::Slice(typ) => {
fixed_vec_metadata(cx, unique_type_id, t, typ, usage_site_span)
}
- ty::TyStr => {
+ ty::Str => {
fixed_vec_metadata(cx, unique_type_id, t, cx.tcx.types.i8, usage_site_span)
}
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
MetadataCreationResult::new(
trait_pointer_metadata(cx, t, None, unique_type_id),
false)
}
- ty::TyForeign(..) => {
+ ty::Foreign(..) => {
MetadataCreationResult::new(
foreign_type_metadata(cx, t, unique_type_id),
false)
}
- ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
- ty::TyRef(_, ty, _) => {
+ ty::RawPtr(ty::TypeAndMut{ty, ..}) |
+ ty::Ref(_, ty, _) => {
match ptr_metadata(ty) {
Ok(res) => res,
Err(metadata) => return metadata,
}
}
- ty::TyAdt(def, _) if def.is_box() => {
+ ty::Adt(def, _) if def.is_box() => {
match ptr_metadata(t.boxed_ty()) {
Ok(res) => res,
Err(metadata) => return metadata,
}
}
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
let fn_metadata = subroutine_type_metadata(cx,
unique_type_id,
t.fn_sig(cx.tcx),
MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false)
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let upvar_tys : Vec<_> = substs.upvar_tys(def_id, cx.tcx).collect();
prepare_tuple_metadata(cx,
t,
unique_type_id,
usage_site_span).finalize(cx)
}
- ty::TyGenerator(def_id, substs, _) => {
+ ty::Generator(def_id, substs, _) => {
let upvar_tys : Vec<_> = substs.field_tys(def_id, cx.tcx).map(|t| {
cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t)
}).collect();
unique_type_id,
usage_site_span).finalize(cx)
}
- ty::TyAdt(def, ..) => match def.adt_kind() {
+ ty::Adt(def, ..) => match def.adt_kind() {
AdtKind::Struct => {
prepare_struct_metadata(cx,
t,
usage_site_span).finalize(cx)
}
},
- ty::TyTuple(ref elements) => {
+ ty::Tuple(ref elements) => {
prepare_tuple_metadata(cx,
t,
&elements[..],
debug!("basic_type_metadata: {:?}", t);
let (name, encoding) = match t.sty {
- ty::TyNever => ("!", DW_ATE_unsigned),
- ty::TyTuple(ref elements) if elements.is_empty() =>
+ ty::Never => ("!", DW_ATE_unsigned),
+ ty::Tuple(ref elements) if elements.is_empty() =>
("()", DW_ATE_unsigned),
- ty::TyBool => ("bool", DW_ATE_boolean),
- ty::TyChar => ("char", DW_ATE_unsigned_char),
- ty::TyInt(int_ty) => {
+ ty::Bool => ("bool", DW_ATE_boolean),
+ ty::Char => ("char", DW_ATE_unsigned_char),
+ ty::Int(int_ty) => {
(int_ty.ty_to_string(), DW_ATE_signed)
},
- ty::TyUint(uint_ty) => {
+ ty::Uint(uint_ty) => {
(uint_ty.ty_to_string(), DW_ATE_unsigned)
},
- ty::TyFloat(float_ty) => {
+ ty::Float(float_ty) => {
(float_ty.ty_to_string(), DW_ATE_float)
},
_ => bug!("debuginfo::basic_type_metadata - t is invalid type")
let struct_name = compute_debuginfo_type_name(cx, struct_type, false);
let (struct_def_id, variant) = match struct_type.sty {
- ty::TyAdt(def, _) => (def.did, def.non_enum_variant()),
+ ty::Adt(def, _) => (def.did, def.non_enum_variant()),
_ => bug!("prepare_struct_metadata on a non-ADT")
};
let union_name = compute_debuginfo_type_name(cx, union_type, false);
let (union_def_id, variant) = match union_type.sty {
- ty::TyAdt(def, _) => (def.did, def.non_enum_variant()),
+ ty::Adt(def, _) => (def.did, def.non_enum_variant()),
_ => bug!("prepare_union_metadata on a non-ADT")
};
member_descriptions);
vec![
MemberDescription {
- name: "".to_string(),
+ name: String::new(),
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: self.layout.size,
variant_type_metadata,
member_descriptions);
MemberDescription {
- name: "".to_string(),
+ name: String::new(),
type_metadata: variant_type_metadata,
offset: Size::ZERO,
size: variant.size,
// If this is not a univariant enum, there is also the discriminant field.
let (discr_offset, discr_arg) = match discriminant_info {
RegularDiscriminant(_) => {
+ // We have the layout of an enum variant, we need the layout of the outer enum
let enum_layout = cx.layout_of(layout.ty);
(Some(enum_layout.fields.offset(0)),
Some(("RUST$ENUM$DISR".to_string(), enum_layout.field(cx, 0).ty)))
// Return type -- llvm::DIBuilder wants this at index 0
signature.push(match sig.output().sty {
- ty::TyTuple(ref tys) if tys.is_empty() => None,
+ ty::Tuple(ref tys) if tys.is_empty() => None,
_ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP))
});
// already inaccurate due to ABI adjustments (see #42800).
signature.extend(inputs.iter().map(|&t| {
let t = match t.sty {
- ty::TyArray(ct, _)
+ ty::Array(ct, _)
if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => {
cx.tcx.mk_imm_ptr(ct)
}
}
if sig.abi == Abi::RustCall && !sig.inputs().is_empty() {
- if let ty::TyTuple(args) = sig.inputs()[sig.inputs().len() - 1].sty {
+ if let ty::Tuple(args) = sig.inputs()[sig.inputs().len() - 1].sty {
signature.extend(
args.iter().map(|argument_type| {
Some(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP))
// Only "class" methods are generally understood by LLVM,
// so avoid methods on other types (e.g. `<*mut T>::null`).
match impl_self_ty.sty {
- ty::TyAdt(def, ..) if !def.is_box() => {
+ ty::Adt(def, ..) if !def.is_box() => {
Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP))
}
_ => None
let cpp_like_names = cx.sess().target.target.options.is_like_msvc;
match t.sty {
- ty::TyBool => output.push_str("bool"),
- ty::TyChar => output.push_str("char"),
- ty::TyStr => output.push_str("str"),
- ty::TyNever => output.push_str("!"),
- ty::TyInt(int_ty) => output.push_str(int_ty.ty_to_string()),
- ty::TyUint(uint_ty) => output.push_str(uint_ty.ty_to_string()),
- ty::TyFloat(float_ty) => output.push_str(float_ty.ty_to_string()),
- ty::TyForeign(def_id) => push_item_name(cx, def_id, qualified, output),
- ty::TyAdt(def, substs) => {
+ ty::Bool => output.push_str("bool"),
+ ty::Char => output.push_str("char"),
+ ty::Str => output.push_str("str"),
+ ty::Never => output.push_str("!"),
+ ty::Int(int_ty) => output.push_str(int_ty.ty_to_string()),
+ ty::Uint(uint_ty) => output.push_str(uint_ty.ty_to_string()),
+ ty::Float(float_ty) => output.push_str(float_ty.ty_to_string()),
+ ty::Foreign(def_id) => push_item_name(cx, def_id, qualified, output),
+ ty::Adt(def, substs) => {
push_item_name(cx, def.did, qualified, output);
push_type_params(cx, substs, output);
},
- ty::TyTuple(component_types) => {
+ ty::Tuple(component_types) => {
output.push('(');
for &component_type in component_types {
push_debuginfo_type_name(cx, component_type, true, output);
}
output.push(')');
},
- ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
+ ty::RawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
if !cpp_like_names {
output.push('*');
}
output.push('*');
}
},
- ty::TyRef(_, inner_type, mutbl) => {
+ ty::Ref(_, inner_type, mutbl) => {
if !cpp_like_names {
output.push('&');
}
output.push('*');
}
},
- ty::TyArray(inner_type, len) => {
+ ty::Array(inner_type, len) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push_str(&format!("; {}", len.unwrap_usize(cx.tcx)));
output.push(']');
},
- ty::TySlice(inner_type) => {
+ ty::Slice(inner_type) => {
if cpp_like_names {
output.push_str("slice<");
} else {
output.push(']');
}
},
- ty::TyDynamic(ref trait_data, ..) => {
+ ty::Dynamic(ref trait_data, ..) => {
if let Some(principal) = trait_data.principal() {
let principal = cx.tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
push_type_params(cx, principal.substs, output);
}
},
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
let sig = t.fn_sig(cx.tcx);
if sig.unsafety() == hir::Unsafety::Unsafe {
output.push_str("unsafe ");
push_debuginfo_type_name(cx, sig.output(), true, output);
}
},
- ty::TyClosure(..) => {
+ ty::Closure(..) => {
output.push_str("closure");
}
- ty::TyGenerator(..) => {
+ ty::Generator(..) => {
output.push_str("generator");
}
- ty::TyError |
- ty::TyInfer(_) |
- ty::TyProjection(..) |
- ty::TyAnon(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyParam(_) => {
+ ty::Error |
+ ty::Infer(_) |
+ ty::Projection(..) |
+ ty::Anon(..) |
+ ty::GeneratorWitness(..) |
+ ty::Param(_) => {
bug!("debuginfo: Trying to create type name for \
unexpected type: {:?}", t);
}
return (size, align);
}
match t.sty {
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
// load size/align from vtable
let vtable = info.unwrap();
(meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable))
}
- ty::TySlice(_) | ty::TyStr => {
+ ty::Slice(_) | ty::Str => {
let unit = t.sequence_element_type(bx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let size = bx.add(sized_size, unsized_size);
// Packed types ignore the alignment of their fields.
- if let ty::TyAdt(def, _) = t.sty {
+ if let ty::Adt(def, _) = t.sty {
if def.repr.packed() {
unsized_align = sized_align;
}
let tcx = cx.tcx;
let (def_id, substs) = match callee_ty.sty {
- ty::TyFnDef(def_id, substs) => (def_id, substs),
+ ty::FnDef(def_id, substs) => (def_id, substs),
_ => bug!("expected fn item type, found {}", callee_ty)
};
m_len, v_len
);
match m_elem_ty.sty {
- ty::TyInt(_) => {},
+ ty::Int(_) => {},
_ => {
return_error!("mask element type is `{}`, expected `i_`", m_elem_ty);
}
}
}
let ety = match in_elem.sty {
- ty::TyFloat(f) if f.bit_width() == 32 => {
+ ty::Float(f) if f.bit_width() == 32 => {
if in_len < 2 || in_len > 16 {
return_error!(
"unsupported floating-point vector `{}` with length `{}` \
}
"f32"
},
- ty::TyFloat(f) if f.bit_width() == 64 => {
+ ty::Float(f) if f.bit_width() == 64 => {
if in_len < 2 || in_len > 8 {
return_error!("unsupported floating-point vector `{}` with length `{}` \
out-of-range [2, 8]",
}
"f64"
},
- ty::TyFloat(f) => {
+ ty::Float(f) => {
return_error!("unsupported element type `{}` of floating-point vector `{}`",
f, in_ty);
},
fn llvm_vector_str(elem_ty: ty::Ty, vec_len: usize, no_pointers: usize) -> String {
let p0s: String = "p0".repeat(no_pointers);
match elem_ty.sty {
- ty::TyInt(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
- ty::TyUint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
- ty::TyFloat(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
+ ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
+ ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
+ ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
_ => unreachable!(),
}
}
mut no_pointers: usize) -> &'ll Type {
// FIXME: use cx.layout_of(ty).llvm_type() ?
let mut elem_ty = match elem_ty.sty {
- ty::TyInt(v) => Type::int_from_ty(cx, v),
- ty::TyUint(v) => Type::uint_from_ty(cx, v),
- ty::TyFloat(v) => Type::float_from_ty(cx, v),
+ ty::Int(v) => Type::int_from_ty(cx, v),
+ ty::Uint(v) => Type::uint_from_ty(cx, v),
+ ty::Float(v) => Type::float_from_ty(cx, v),
_ => unreachable!(),
};
while no_pointers > 0 {
// This counts how many pointers
fn ptr_count(t: ty::Ty) -> usize {
match t.sty {
- ty::TyRawPtr(p) => 1 + ptr_count(p.ty),
+ ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: ty::Ty) -> ty::Ty {
match t.sty {
- ty::TyRawPtr(p) => non_ptr(p.ty),
+ ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
- ty::TyRawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)),
+ ty::RawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)),
non_ptr(arg_tys[1].simd_type(tcx))),
_ => {
require!(false, "expected element type `{}` of second argument `{}` \
// The element type of the third argument must be a signed integer type of any width:
match arg_tys[2].simd_type(tcx).sty {
- ty::TyInt(_) => (),
+ ty::Int(_) => (),
_ => {
require!(false, "expected element type `{}` of third argument `{}` \
to be a signed integer type",
// This counts how many pointers
fn ptr_count(t: ty::Ty) -> usize {
match t.sty {
- ty::TyRawPtr(p) => 1 + ptr_count(p.ty),
+ ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: ty::Ty) -> ty::Ty {
match t.sty {
- ty::TyRawPtr(p) => non_ptr(p.ty),
+ ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty {
- ty::TyRawPtr(p) if p.ty == in_elem && p.mutbl == hir::MutMutable
+ ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::MutMutable
=> (ptr_count(arg_tys[1].simd_type(tcx)),
non_ptr(arg_tys[1].simd_type(tcx))),
_ => {
// The element type of the third argument must be a signed integer type of any width:
match arg_tys[2].simd_type(tcx).sty {
- ty::TyInt(_) => (),
+ ty::Int(_) => (),
_ => {
require!(false, "expected element type `{}` of third argument `{}` \
to be a signed integer type",
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem, in_ty, ret_ty);
return match in_elem.sty {
- ty::TyInt(_) | ty::TyUint(_) => {
+ ty::Int(_) | ty::Uint(_) => {
let r = bx.$integer_reduce(args[0].immediate());
if $ordered {
// if overflow occurs, the result is the
Ok(bx.$integer_reduce(args[0].immediate()))
}
},
- ty::TyFloat(f) => {
+ ty::Float(f) => {
// ordered arithmetic reductions take an accumulator
let acc = if $ordered {
let acc = args[1].immediate();
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem, in_ty, ret_ty);
return match in_elem.sty {
- ty::TyInt(_i) => {
+ ty::Int(_i) => {
Ok(bx.$int_red(args[0].immediate(), true))
},
- ty::TyUint(_u) => {
+ ty::Uint(_u) => {
Ok(bx.$int_red(args[0].immediate(), false))
},
- ty::TyFloat(_f) => {
+ ty::Float(_f) => {
Ok(bx.$float_red(args[0].immediate()))
}
_ => {
args[0].immediate()
} else {
match in_elem.sty {
- ty::TyInt(_) | ty::TyUint(_) => {},
+ ty::Int(_) | ty::Uint(_) => {},
_ => {
return_error!("unsupported {} from `{}` with element `{}` to `{}`",
$name, in_ty, in_elem, ret_ty)
bx.trunc(args[0].immediate(), i1xn)
};
return match in_elem.sty {
- ty::TyInt(_) | ty::TyUint(_) => {
+ ty::Int(_) | ty::Uint(_) => {
let r = bx.$red(input);
Ok(
if !$boolean {
let (in_style, in_width) = match in_elem.sty {
// vectors of pointer-sized integers should've been
// disallowed before here, so this unwrap is safe.
- ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
- ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
- ty::TyFloat(f) => (Style::Float, f.bit_width()),
+ ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
+ ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
+ ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0)
};
let (out_style, out_width) = match out_elem.sty {
- ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
- ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
- ty::TyFloat(f) => (Style::Float, f.bit_width()),
+ ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
+ ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
+ ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0)
};
}
}
arith! {
- simd_add: TyUint, TyInt => add, TyFloat => fadd;
- simd_sub: TyUint, TyInt => sub, TyFloat => fsub;
- simd_mul: TyUint, TyInt => mul, TyFloat => fmul;
- simd_div: TyUint => udiv, TyInt => sdiv, TyFloat => fdiv;
- simd_rem: TyUint => urem, TyInt => srem, TyFloat => frem;
- simd_shl: TyUint, TyInt => shl;
- simd_shr: TyUint => lshr, TyInt => ashr;
- simd_and: TyUint, TyInt => and;
- simd_or: TyUint, TyInt => or;
- simd_xor: TyUint, TyInt => xor;
- simd_fmax: TyFloat => maxnum;
- simd_fmin: TyFloat => minnum;
+ simd_add: Uint, Int => add, Float => fadd;
+ simd_sub: Uint, Int => sub, Float => fsub;
+ simd_mul: Uint, Int => mul, Float => fmul;
+ simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
+ simd_rem: Uint => urem, Int => srem, Float => frem;
+ simd_shl: Uint, Int => shl;
+ simd_shr: Uint => lshr, Int => ashr;
+ simd_and: Uint, Int => and;
+ simd_or: Uint, Int => or;
+ simd_xor: Uint, Int => xor;
+ simd_fmax: Float => maxnum;
+ simd_fmin: Float => minnum;
}
span_bug!(span, "unknown SIMD intrinsic");
}
// stuffs.
fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> {
match ty.sty {
- ty::TyInt(t) => Some((match t {
+ ty::Int(t) => Some((match t {
ast::IntTy::Isize => cx.tcx.sess.target.isize_ty.bit_width().unwrap() as u64,
ast::IntTy::I8 => 8,
ast::IntTy::I16 => 16,
ast::IntTy::I64 => 64,
ast::IntTy::I128 => 128,
}, true)),
- ty::TyUint(t) => Some((match t {
+ ty::Uint(t) => Some((match t {
ast::UintTy::Usize => cx.tcx.sess.target.usize_ty.bit_width().unwrap() as u64,
ast::UintTy::U8 => 8,
ast::UintTy::U16 => 16,
// Returns the width of a float TypeVariant
// Returns None if the type is not a float
-fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>) -> Option<u64> {
+fn float_type_width<'tcx>(sty: &ty::TyKind<'tcx>) -> Option<u64> {
match *sty {
- ty::TyFloat(t) => Some(t.bit_width() as u64),
+ ty::Float(t) => Some(t.bit_width() as u64),
_ => None,
}
}
func: mir::Operand::Constant(ref c),
ref args, ..
} => match c.ty.sty {
- ty::TyFnDef(did, _) => Some((did, args)),
+ ty::FnDef(did, _) => Some((did, args)),
_ => None,
},
_ => None,
&args1[..]
};
let (drop_fn, fn_ty) = match ty.sty {
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
let fn_ty = drop_fn.ty(bx.cx.tcx);
let sig = common::ty_fn_sig(bx.cx, fn_ty);
let sig = bx.tcx().normalize_erasing_late_bound_regions(
let callee = self.codegen_operand(&bx, func);
let (instance, mut llfn) = match callee.layout.ty.sty {
- ty::TyFnDef(def_id, substs) => {
+ ty::FnDef(def_id, substs) => {
(Some(ty::Instance::resolve(bx.cx.tcx,
ty::ParamEnv::reveal_all(),
def_id,
substs).unwrap()),
None)
}
- ty::TyFnPtr(_) => {
+ ty::FnPtr(_) => {
(None, Some(callee.immediate()))
}
_ => bug!("{} is not callable", callee.layout.ty)
// except according to those terms.
use llvm;
-use rustc::mir::interpret::ConstEvalErr;
-use rustc_mir::interpret::{read_target_uint, const_val_field};
+use rustc::mir::interpret::{ConstEvalErr, read_target_uint};
+use rustc_mir::interpret::{const_field};
use rustc::hir::def_id::DefId;
use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
.and_then(|c| {
let field_ty = c.ty.builtin_index().unwrap();
let fields = match c.ty.sty {
- ty::TyArray(_, n) => n.unwrap_usize(bx.tcx()),
+ ty::Array(_, n) => n.unwrap_usize(bx.tcx()),
ref other => bug!("invalid simd shuffle type: {}", other),
};
let values: Result<Vec<_>, Lrc<_>> = (0..fields).map(|field| {
- let field = const_val_field(
+ let field = const_field(
bx.tcx(),
ty::ParamEnv::reveal_all(),
self.instance,
let arg_ty = fx.monomorphize(&arg_decl.ty);
let tupled_arg_tys = match arg_ty.sty {
- ty::TyTuple(ref tys) => tys,
+ ty::Tuple(ref tys) => tys,
_ => bug!("spread argument isn't a tuple?!")
};
// Or is it the closure environment?
let (closure_layout, env_ref) = match arg.layout.ty.sty {
- ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
- ty::TyRef(_, ty, _) => (bx.cx.layout_of(ty), true),
+ ty::RawPtr(ty::TypeAndMut { ty, .. }) |
+ ty::Ref(_, ty, _) => (bx.cx.layout_of(ty), true),
_ => (arg.layout, false)
};
let (def_id, upvar_substs) = match closure_layout.ty.sty {
- ty::TyClosure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
- ty::TyGenerator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
+ ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
+ ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
_ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_layout.ty)
};
let upvar_tys = upvar_substs.upvar_tys(def_id, tcx);
// a pointer in an alloca for debuginfo atm.
let mut ops = if env_ref || env_alloca { &ops[..] } else { &ops[1..] };
- let ty = if let (true, &ty::TyRef(_, ty, _)) = (decl.by_ref, &ty.sty) {
+ let ty = if let (true, &ty::Ref(_, ty, _)) = (decl.by_ref, &ty.sty) {
ty
} else {
ops = &ops[..ops.len() - 1];
return simple();
}
_ if !field.is_unsized() => return simple(),
- ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => return simple(),
- ty::TyAdt(def, _) => {
+ ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
+ ty::Adt(def, _) => {
if def.repr.packed() {
// FIXME(eddyb) generalize the adjustment when we
// start supporting packing to larger alignments.
let val = match *kind {
mir::CastKind::ReifyFnPointer => {
match operand.layout.ty.sty {
- ty::TyFnDef(def_id, substs) => {
+ ty::FnDef(def_id, substs) => {
if bx.cx.tcx.has_attr(def_id, "rustc_args_required_const") {
bug!("reifying a fn ptr that requires \
const arguments");
}
mir::CastKind::ClosureFnPointer => {
match operand.layout.ty.sty {
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let instance = monomorphize::resolve_closure(
bx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce);
OperandValue::Immediate(callee::get_fn(bx.cx, instance))
// because codegen_place() panics if Local is operand.
if let mir::Place::Local(index) = *place {
if let LocalRef::Operand(Some(op)) = self.locals[index] {
- if let ty::TyArray(_, n) = op.layout.ty.sty {
+ if let ty::Array(_, n) = op.layout.ty.sty {
let n = n.unwrap_usize(bx.cx.tcx);
return common::C_usize(bx.cx, n);
}
fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> &'ll Value {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
- use rustc::ty::{TyInt, TyUint};
+ use rustc::ty::{Int, Uint};
let tcx = bx.tcx();
let new_sty = match ty.sty {
- TyInt(Isize) => TyInt(tcx.sess.target.isize_ty),
- TyUint(Usize) => TyUint(tcx.sess.target.usize_ty),
- ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
+ Int(Isize) => Int(tcx.sess.target.isize_ty),
+ Uint(Usize) => Uint(tcx.sess.target.usize_ty),
+ ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
_ => panic!("tried to get overflow intrinsic for op applied to non-int type")
};
let name = match oop {
OverflowOp::Add => match new_sty {
- TyInt(I8) => "llvm.sadd.with.overflow.i8",
- TyInt(I16) => "llvm.sadd.with.overflow.i16",
- TyInt(I32) => "llvm.sadd.with.overflow.i32",
- TyInt(I64) => "llvm.sadd.with.overflow.i64",
- TyInt(I128) => "llvm.sadd.with.overflow.i128",
-
- TyUint(U8) => "llvm.uadd.with.overflow.i8",
- TyUint(U16) => "llvm.uadd.with.overflow.i16",
- TyUint(U32) => "llvm.uadd.with.overflow.i32",
- TyUint(U64) => "llvm.uadd.with.overflow.i64",
- TyUint(U128) => "llvm.uadd.with.overflow.i128",
+ Int(I8) => "llvm.sadd.with.overflow.i8",
+ Int(I16) => "llvm.sadd.with.overflow.i16",
+ Int(I32) => "llvm.sadd.with.overflow.i32",
+ Int(I64) => "llvm.sadd.with.overflow.i64",
+ Int(I128) => "llvm.sadd.with.overflow.i128",
+
+ Uint(U8) => "llvm.uadd.with.overflow.i8",
+ Uint(U16) => "llvm.uadd.with.overflow.i16",
+ Uint(U32) => "llvm.uadd.with.overflow.i32",
+ Uint(U64) => "llvm.uadd.with.overflow.i64",
+ Uint(U128) => "llvm.uadd.with.overflow.i128",
_ => unreachable!(),
},
OverflowOp::Sub => match new_sty {
- TyInt(I8) => "llvm.ssub.with.overflow.i8",
- TyInt(I16) => "llvm.ssub.with.overflow.i16",
- TyInt(I32) => "llvm.ssub.with.overflow.i32",
- TyInt(I64) => "llvm.ssub.with.overflow.i64",
- TyInt(I128) => "llvm.ssub.with.overflow.i128",
-
- TyUint(U8) => "llvm.usub.with.overflow.i8",
- TyUint(U16) => "llvm.usub.with.overflow.i16",
- TyUint(U32) => "llvm.usub.with.overflow.i32",
- TyUint(U64) => "llvm.usub.with.overflow.i64",
- TyUint(U128) => "llvm.usub.with.overflow.i128",
+ Int(I8) => "llvm.ssub.with.overflow.i8",
+ Int(I16) => "llvm.ssub.with.overflow.i16",
+ Int(I32) => "llvm.ssub.with.overflow.i32",
+ Int(I64) => "llvm.ssub.with.overflow.i64",
+ Int(I128) => "llvm.ssub.with.overflow.i128",
+
+ Uint(U8) => "llvm.usub.with.overflow.i8",
+ Uint(U16) => "llvm.usub.with.overflow.i16",
+ Uint(U32) => "llvm.usub.with.overflow.i32",
+ Uint(U64) => "llvm.usub.with.overflow.i64",
+ Uint(U128) => "llvm.usub.with.overflow.i128",
_ => unreachable!(),
},
OverflowOp::Mul => match new_sty {
- TyInt(I8) => "llvm.smul.with.overflow.i8",
- TyInt(I16) => "llvm.smul.with.overflow.i16",
- TyInt(I32) => "llvm.smul.with.overflow.i32",
- TyInt(I64) => "llvm.smul.with.overflow.i64",
- TyInt(I128) => "llvm.smul.with.overflow.i128",
-
- TyUint(U8) => "llvm.umul.with.overflow.i8",
- TyUint(U16) => "llvm.umul.with.overflow.i16",
- TyUint(U32) => "llvm.umul.with.overflow.i32",
- TyUint(U64) => "llvm.umul.with.overflow.i64",
- TyUint(U128) => "llvm.umul.with.overflow.i128",
+ Int(I8) => "llvm.smul.with.overflow.i8",
+ Int(I16) => "llvm.smul.with.overflow.i16",
+ Int(I32) => "llvm.smul.with.overflow.i32",
+ Int(I64) => "llvm.smul.with.overflow.i64",
+ Int(I128) => "llvm.smul.with.overflow.i128",
+
+ Uint(U8) => "llvm.umul.with.overflow.i8",
+ Uint(U16) => "llvm.umul.with.overflow.i16",
+ Uint(U32) => "llvm.umul.with.overflow.i32",
+ Uint(U64) => "llvm.umul.with.overflow.i64",
+ Uint(U128) => "llvm.umul.with.overflow.i128",
_ => unreachable!(),
},
}
let name = match layout.ty.sty {
- ty::TyClosure(..) |
- ty::TyGenerator(..) |
- ty::TyAdt(..) |
+ ty::Closure(..) |
+ ty::Generator(..) |
+ ty::Adt(..) |
// FIXME(eddyb) producing readable type names for trait objects can result
// in problematically distinct types due to HRTB and subtyping (see #47638).
- // ty::TyDynamic(..) |
- ty::TyForeign(..) |
- ty::TyStr => {
+ // ty::Dynamic(..) |
+ ty::Foreign(..) |
+ ty::Str => {
let mut name = String::with_capacity(32);
let printer = DefPathBasedNames::new(cx.tcx, true, true);
printer.push_type_name(layout.ty, &mut name);
match (&layout.ty.sty, &layout.variants) {
- (&ty::TyAdt(def, _), &layout::Variants::Single { index }) => {
+ (&ty::Adt(def, _), &layout::Variants::Single { index }) => {
if def.is_enum() && !def.variants.is_empty() {
write!(&mut name, "::{}", def.variants[index].name).unwrap();
}
return llty;
}
let llty = match self.ty.sty {
- ty::TyRef(_, ty, _) |
- ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
+ ty::Ref(_, ty, _) |
+ ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
cx.layout_of(ty).llvm_type(cx).ptr_to()
}
- ty::TyAdt(def, _) if def.is_box() => {
+ ty::Adt(def, _) if def.is_box() => {
cx.layout_of(self.ty.boxed_ty()).llvm_type(cx).ptr_to()
}
- ty::TyFnPtr(sig) => {
+ ty::FnPtr(sig) => {
let sig = cx.tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
&sig,
// HACK(eddyb) special-case fat pointers until LLVM removes
// pointee types, to avoid bitcasting every `OperandRef::deref`.
match self.ty.sty {
- ty::TyRef(..) |
- ty::TyRawPtr(_) => {
+ ty::Ref(..) |
+ ty::RawPtr(_) => {
return self.field(cx, index).llvm_type(cx);
}
- ty::TyAdt(def, _) if def.is_box() => {
+ ty::Adt(def, _) if def.is_box() => {
let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
}
let mut result = None;
match self.ty.sty {
- ty::TyRawPtr(mt) if offset.bytes() == 0 => {
+ ty::RawPtr(mt) if offset.bytes() == 0 => {
let (size, align) = cx.size_and_align_of(mt.ty);
result = Some(PointeeInfo {
size,
});
}
- ty::TyRef(_, ty, mt) if offset.bytes() == 0 => {
+ ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
let (size, align) = cx.size_and_align_of(ty);
let kind = match mt {
// FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
if let Some(ref mut pointee) = result {
- if let ty::TyAdt(def, _) = self.ty.sty {
+ if let ty::Adt(def, _) = self.ty.sty {
if def.is_box() && offset.bytes() == 0 {
pointee.safe = Some(PointerKind::UniqueOwned);
}
// If this is a function, we hash the signature as well.
// This is not *strictly* needed, but it may help in some
// situations, see the `run-make/a-b-a-linker-guard` test.
- if let ty::TyFnDef(..) = item_type.sty {
+ if let ty::FnDef(..) = item_type.sty {
item_type.fn_sig(tcx).hash_stable(&mut hcx, &mut hasher);
}
rustc-rayon = "0.1.1"
rustc-rayon-core = "0.1.1"
rustc-hash = "1.0.1"
+smallvec = { version = "0.6.5", features = ["union"] }
[dependencies.parking_lot]
version = "0.5"
// except according to those terms.
use array_vec::ArrayVec;
-use std::borrow::{Borrow, BorrowMut, ToOwned};
use std::fmt;
use std::iter;
use std::marker::PhantomData;
use std::mem;
-use std::ops::{Deref, DerefMut, Range};
use std::slice;
use bitslice::{BitSlice, Word};
use bitslice::{bitwise, Union, Subtract, Intersect};
use indexed_vec::Idx;
use rustc_serialize;
-/// Represents a set (or packed family of sets), of some element type
-/// E, where each E is identified by some unique index type `T`.
+/// Represents a set of some element type E, where each E is identified by some
+/// unique index type `T`.
///
/// In other words, `T` is the type used to index into the bitvector
/// this type uses to represent the set of object it holds.
///
/// The representation is dense, using one bit per possible element.
#[derive(Eq, PartialEq)]
-pub struct IdxSetBuf<T: Idx> {
+pub struct IdxSet<T: Idx> {
_pd: PhantomData<fn(&T)>,
bits: Vec<Word>,
}
-impl<T: Idx> Clone for IdxSetBuf<T> {
+impl<T: Idx> Clone for IdxSet<T> {
fn clone(&self) -> Self {
- IdxSetBuf { _pd: PhantomData, bits: self.bits.clone() }
+ IdxSet { _pd: PhantomData, bits: self.bits.clone() }
}
}
-impl<T: Idx> rustc_serialize::Encodable for IdxSetBuf<T> {
+impl<T: Idx> rustc_serialize::Encodable for IdxSet<T> {
fn encode<E: rustc_serialize::Encoder>(&self,
encoder: &mut E)
-> Result<(), E::Error> {
}
}
-impl<T: Idx> rustc_serialize::Decodable for IdxSetBuf<T> {
- fn decode<D: rustc_serialize::Decoder>(d: &mut D) -> Result<IdxSetBuf<T>, D::Error> {
+impl<T: Idx> rustc_serialize::Decodable for IdxSet<T> {
+ fn decode<D: rustc_serialize::Decoder>(d: &mut D) -> Result<IdxSet<T>, D::Error> {
let words: Vec<Word> = rustc_serialize::Decodable::decode(d)?;
- Ok(IdxSetBuf {
+ Ok(IdxSet {
_pd: PhantomData,
bits: words,
})
}
}
-
-// pnkfelix wants to have this be `IdxSet<T>([Word]) and then pass
-// around `&mut IdxSet<T>` or `&IdxSet<T>`.
-
-/// Represents a set (or packed family of sets), of some element type
-/// E, where each E is identified by some unique index type `T`.
-///
-/// In other words, `T` is the type used to index into the bitslice
-/// this type uses to represent the set of object it holds.
-#[repr(transparent)]
-pub struct IdxSet<T: Idx> {
- _pd: PhantomData<fn(&T)>,
- bits: [Word],
-}
-
-impl<T: Idx> Borrow<IdxSet<T>> for IdxSetBuf<T> {
- fn borrow(&self) -> &IdxSet<T> {
- &*self
- }
-}
-
-impl<T: Idx> BorrowMut<IdxSet<T>> for IdxSetBuf<T> {
- fn borrow_mut(&mut self) -> &mut IdxSet<T> {
- &mut *self
- }
-}
-
-impl<T: Idx> ToOwned for IdxSet<T> {
- type Owned = IdxSetBuf<T>;
- fn to_owned(&self) -> Self::Owned {
- IdxSet::to_owned(self)
- }
-}
-
const BITS_PER_WORD: usize = mem::size_of::<Word>() * 8;
-impl<T: Idx> fmt::Debug for IdxSetBuf<T> {
- fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
- w.debug_list()
- .entries(self.iter())
- .finish()
- }
-}
-
impl<T: Idx> fmt::Debug for IdxSet<T> {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
w.debug_list()
}
}
-impl<T: Idx> IdxSetBuf<T> {
+impl<T: Idx> IdxSet<T> {
fn new(init: Word, universe_size: usize) -> Self {
let num_words = (universe_size + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
- IdxSetBuf {
+ IdxSet {
_pd: Default::default(),
bits: vec![init; num_words],
}
pub fn new_empty(universe_size: usize) -> Self {
Self::new(0, universe_size)
}
-}
-
-impl<T: Idx> IdxSet<T> {
- unsafe fn from_slice(s: &[Word]) -> &Self {
- &*(s as *const [Word] as *const Self)
- }
-
- unsafe fn from_slice_mut(s: &mut [Word]) -> &mut Self {
- &mut *(s as *mut [Word] as *mut Self)
- }
-}
-
-impl<T: Idx> Deref for IdxSetBuf<T> {
- type Target = IdxSet<T>;
- fn deref(&self) -> &IdxSet<T> {
- unsafe { IdxSet::from_slice(&self.bits) }
- }
-}
-
-impl<T: Idx> DerefMut for IdxSetBuf<T> {
- fn deref_mut(&mut self) -> &mut IdxSet<T> {
- unsafe { IdxSet::from_slice_mut(&mut self.bits) }
- }
-}
-
-impl<T: Idx> IdxSet<T> {
- pub fn to_owned(&self) -> IdxSetBuf<T> {
- IdxSetBuf {
- _pd: Default::default(),
- bits: self.bits.to_owned(),
- }
- }
/// Duplicates as a hybrid set.
- pub fn to_hybrid(&self) -> HybridIdxSetBuf<T> {
+ pub fn to_hybrid(&self) -> HybridIdxSet<T> {
// This universe_size may be slightly larger than the one specified
// upon creation, due to rounding up to a whole word. That's ok.
let universe_size = self.bits.len() * BITS_PER_WORD;
// Note: we currently don't bother trying to make a Sparse set.
- HybridIdxSetBuf::Dense(self.to_owned(), universe_size)
+ HybridIdxSet::Dense(self.to_owned(), universe_size)
}
/// Removes all elements
self.bits.set_bit(elem.index())
}
- pub fn range(&self, elems: &Range<T>) -> &Self {
- let elems = elems.start.index()..elems.end.index();
- unsafe { Self::from_slice(&self.bits[elems]) }
- }
-
- pub fn range_mut(&mut self, elems: &Range<T>) -> &mut Self {
- let elems = elems.start.index()..elems.end.index();
- unsafe { Self::from_slice_mut(&mut self.bits[elems]) }
- }
-
/// Returns true iff set `self` contains `elem`.
pub fn contains(&self, elem: &T) -> bool {
self.bits.get_bit(elem.index())
bitwise(self.words_mut(), other.words(), &Union)
}
- /// Like `union()`, but takes a `SparseIdxSetBuf` argument.
- fn union_sparse(&mut self, other: &SparseIdxSetBuf<T>) -> bool {
+ /// Like `union()`, but takes a `SparseIdxSet` argument.
+ fn union_sparse(&mut self, other: &SparseIdxSet<T>) -> bool {
let mut changed = false;
for elem in other.iter() {
changed |= self.add(&elem);
changed
}
- /// Like `union()`, but takes a `HybridIdxSetBuf` argument.
- pub fn union_hybrid(&mut self, other: &HybridIdxSetBuf<T>) -> bool {
+ /// Like `union()`, but takes a `HybridIdxSet` argument.
+ pub fn union_hybrid(&mut self, other: &HybridIdxSet<T>) -> bool {
match other {
- HybridIdxSetBuf::Sparse(sparse, _) => self.union_sparse(sparse),
- HybridIdxSetBuf::Dense(dense, _) => self.union(dense),
+ HybridIdxSet::Sparse(sparse, _) => self.union_sparse(sparse),
+ HybridIdxSet::Dense(dense, _) => self.union(dense),
}
}
bitwise(self.words_mut(), other.words(), &Subtract)
}
- /// Like `subtract()`, but takes a `SparseIdxSetBuf` argument.
- fn subtract_sparse(&mut self, other: &SparseIdxSetBuf<T>) -> bool {
+ /// Like `subtract()`, but takes a `SparseIdxSet` argument.
+ fn subtract_sparse(&mut self, other: &SparseIdxSet<T>) -> bool {
let mut changed = false;
for elem in other.iter() {
changed |= self.remove(&elem);
changed
}
- /// Like `subtract()`, but takes a `HybridIdxSetBuf` argument.
- pub fn subtract_hybrid(&mut self, other: &HybridIdxSetBuf<T>) -> bool {
+ /// Like `subtract()`, but takes a `HybridIdxSet` argument.
+ pub fn subtract_hybrid(&mut self, other: &HybridIdxSet<T>) -> bool {
match other {
- HybridIdxSetBuf::Sparse(sparse, _) => self.subtract_sparse(sparse),
- HybridIdxSetBuf::Dense(dense, _) => self.subtract(dense),
+ HybridIdxSet::Sparse(sparse, _) => self.subtract_sparse(sparse),
+ HybridIdxSet::Dense(dense, _) => self.subtract(dense),
}
}
const SPARSE_MAX: usize = 8;
/// A sparse index set with a maximum of SPARSE_MAX elements. Used by
-/// HybridIdxSetBuf; do not use directly.
+/// HybridIdxSet; do not use directly.
///
/// The elements are stored as an unsorted vector with no duplicates.
#[derive(Clone, Debug)]
-pub struct SparseIdxSetBuf<T: Idx>(ArrayVec<[T; SPARSE_MAX]>);
+pub struct SparseIdxSet<T: Idx>(ArrayVec<[T; SPARSE_MAX]>);
-impl<T: Idx> SparseIdxSetBuf<T> {
+impl<T: Idx> SparseIdxSet<T> {
fn new() -> Self {
- SparseIdxSetBuf(ArrayVec::new())
+ SparseIdxSet(ArrayVec::new())
}
fn len(&self) -> usize {
}
}
- fn to_dense(&self, universe_size: usize) -> IdxSetBuf<T> {
- let mut dense = IdxSetBuf::new_empty(universe_size);
+ fn to_dense(&self, universe_size: usize) -> IdxSet<T> {
+ let mut dense = IdxSet::new_empty(universe_size);
for elem in self.0.iter() {
dense.add(elem);
}
}
}
-/// Like IdxSetBuf, but with a hybrid representation: sparse when there are few
+/// Like IdxSet, but with a hybrid representation: sparse when there are few
/// elements in the set, but dense when there are many. It's especially
/// efficient for sets that typically have a small number of elements, but a
/// large `universe_size`, and are cleared frequently.
#[derive(Clone, Debug)]
-pub enum HybridIdxSetBuf<T: Idx> {
- Sparse(SparseIdxSetBuf<T>, usize),
- Dense(IdxSetBuf<T>, usize),
+pub enum HybridIdxSet<T: Idx> {
+ Sparse(SparseIdxSet<T>, usize),
+ Dense(IdxSet<T>, usize),
}
-impl<T: Idx> HybridIdxSetBuf<T> {
+impl<T: Idx> HybridIdxSet<T> {
pub fn new_empty(universe_size: usize) -> Self {
- HybridIdxSetBuf::Sparse(SparseIdxSetBuf::new(), universe_size)
+ HybridIdxSet::Sparse(SparseIdxSet::new(), universe_size)
}
fn universe_size(&mut self) -> usize {
match *self {
- HybridIdxSetBuf::Sparse(_, size) => size,
- HybridIdxSetBuf::Dense(_, size) => size,
+ HybridIdxSet::Sparse(_, size) => size,
+ HybridIdxSet::Dense(_, size) => size,
}
}
pub fn clear(&mut self) {
let universe_size = self.universe_size();
- *self = HybridIdxSetBuf::new_empty(universe_size);
+ *self = HybridIdxSet::new_empty(universe_size);
}
/// Returns true iff set `self` contains `elem`.
pub fn contains(&self, elem: &T) -> bool {
match self {
- HybridIdxSetBuf::Sparse(sparse, _) => sparse.contains(elem),
- HybridIdxSetBuf::Dense(dense, _) => dense.contains(elem),
+ HybridIdxSet::Sparse(sparse, _) => sparse.contains(elem),
+ HybridIdxSet::Dense(dense, _) => dense.contains(elem),
}
}
/// Adds `elem` to the set `self`.
pub fn add(&mut self, elem: &T) -> bool {
match self {
- HybridIdxSetBuf::Sparse(sparse, _) if sparse.len() < SPARSE_MAX => {
+ HybridIdxSet::Sparse(sparse, _) if sparse.len() < SPARSE_MAX => {
// The set is sparse and has space for `elem`.
sparse.add(elem)
}
- HybridIdxSetBuf::Sparse(sparse, _) if sparse.contains(elem) => {
+ HybridIdxSet::Sparse(sparse, _) if sparse.contains(elem) => {
// The set is sparse and does not have space for `elem`, but
// that doesn't matter because `elem` is already present.
false
}
- HybridIdxSetBuf::Sparse(_, _) => {
+ HybridIdxSet::Sparse(_, _) => {
// The set is sparse and full. Convert to a dense set.
//
// FIXME: This code is awful, but I can't work out how else to
// appease the borrow checker.
- let dummy = HybridIdxSetBuf::Sparse(SparseIdxSetBuf::new(), 0);
+ let dummy = HybridIdxSet::Sparse(SparseIdxSet::new(), 0);
match mem::replace(self, dummy) {
- HybridIdxSetBuf::Sparse(sparse, universe_size) => {
+ HybridIdxSet::Sparse(sparse, universe_size) => {
let mut dense = sparse.to_dense(universe_size);
let changed = dense.add(elem);
assert!(changed);
- mem::replace(self, HybridIdxSetBuf::Dense(dense, universe_size));
+ mem::replace(self, HybridIdxSet::Dense(dense, universe_size));
changed
}
_ => panic!("impossible"),
}
}
- HybridIdxSetBuf::Dense(dense, _) => dense.add(elem),
+ HybridIdxSet::Dense(dense, _) => dense.add(elem),
}
}
pub fn remove(&mut self, elem: &T) -> bool {
// Note: we currently don't bother going from Dense back to Sparse.
match self {
- HybridIdxSetBuf::Sparse(sparse, _) => sparse.remove(elem),
- HybridIdxSetBuf::Dense(dense, _) => dense.remove(elem),
+ HybridIdxSet::Sparse(sparse, _) => sparse.remove(elem),
+ HybridIdxSet::Dense(dense, _) => dense.remove(elem),
}
}
/// Converts to a dense set, consuming itself in the process.
- pub fn to_dense(self) -> IdxSetBuf<T> {
+ pub fn to_dense(self) -> IdxSet<T> {
match self {
- HybridIdxSetBuf::Sparse(sparse, universe_size) => sparse.to_dense(universe_size),
- HybridIdxSetBuf::Dense(dense, _) => dense,
+ HybridIdxSet::Sparse(sparse, universe_size) => sparse.to_dense(universe_size),
+ HybridIdxSet::Dense(dense, _) => dense,
}
}
/// Iteration order is unspecified.
pub fn iter(&self) -> HybridIter<T> {
match self {
- HybridIdxSetBuf::Sparse(sparse, _) => HybridIter::Sparse(sparse.iter()),
- HybridIdxSetBuf::Dense(dense, _) => HybridIter::Dense(dense.iter()),
+ HybridIdxSet::Sparse(sparse, _) => HybridIter::Sparse(sparse.iter()),
+ HybridIdxSet::Dense(dense, _) => HybridIter::Dense(dense.iter()),
}
}
}
use std::cmp;
for i in 0..256 {
- let mut idx_buf: IdxSetBuf<usize> = IdxSetBuf::new_filled(128);
+ let mut idx_buf: IdxSet<usize> = IdxSet::new_filled(128);
idx_buf.trim_to(i);
let elems: Vec<usize> = idx_buf.iter().collect();
fn test_set_up_to() {
for i in 0..128 {
for mut idx_buf in
- vec![IdxSetBuf::new_empty(128), IdxSetBuf::new_filled(128)]
+ vec![IdxSet::new_empty(128), IdxSet::new_filled(128)]
.into_iter()
{
idx_buf.set_up_to(i);
#[test]
fn test_new_filled() {
for i in 0..128 {
- let idx_buf = IdxSetBuf::new_filled(i);
+ let idx_buf = IdxSet::new_filled(i);
let elems: Vec<usize> = idx_buf.iter().collect();
let expected: Vec<usize> = (0..i).collect();
assert_eq!(elems, expected);
extern crate rustc_rayon_core as rayon_core;
extern crate rustc_hash;
extern crate serialize;
+#[cfg_attr(test, macro_use)]
+extern crate smallvec;
// See librustc_cratesio_shim/Cargo.toml for a comment explaining this.
#[allow(unused_extern_crates)]
//!
//! The N above is determined by Array's implementor, by way of an associated constant.
-use std::ops::{Deref, DerefMut};
-use std::iter::{IntoIterator, FromIterator};
-use std::fmt::{self, Debug};
-use std::mem;
-use std::ptr;
-
-use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
-
-use accumulate_vec::{IntoIter, AccumulateVec};
-use array_vec::Array;
-
-pub struct SmallVec<A: Array>(AccumulateVec<A>);
+use smallvec::{Array, SmallVec};
pub type OneVector<T> = SmallVec<[T; 1]>;
-impl<A> Clone for SmallVec<A>
- where A: Array,
- A::Element: Clone {
- fn clone(&self) -> Self {
- SmallVec(self.0.clone())
- }
-}
-
-impl<A> Debug for SmallVec<A>
- where A: Array + Debug,
- A::Element: Debug {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_tuple("SmallVec").field(&self.0).finish()
- }
+pub trait ExpectOne<A: Array> {
+ fn expect_one(self, err: &'static str) -> A::Item;
}
-impl<A: Array> SmallVec<A> {
- pub fn new() -> Self {
- SmallVec(AccumulateVec::new())
- }
-
- pub fn is_array(&self) -> bool {
- self.0.is_array()
- }
-
- pub fn with_capacity(cap: usize) -> Self {
- let mut vec = SmallVec::new();
- vec.reserve(cap);
- vec
- }
-
- pub fn one(el: A::Element) -> Self {
- SmallVec(AccumulateVec::one(el))
- }
-
- pub fn many<I: IntoIterator<Item=A::Element>>(els: I) -> Self {
- SmallVec(AccumulateVec::many(els))
- }
-
- pub fn expect_one(self, err: &'static str) -> A::Element {
+impl<A: Array> ExpectOne<A> for SmallVec<A> {
+ fn expect_one(self, err: &'static str) -> A::Item {
assert!(self.len() == 1, err);
- match self.0 {
- AccumulateVec::Array(arr) => arr.into_iter().next().unwrap(),
- AccumulateVec::Heap(vec) => vec.into_iter().next().unwrap(),
- }
- }
-
- /// Will reallocate onto the heap if needed.
- pub fn push(&mut self, el: A::Element) {
- self.reserve(1);
- match self.0 {
- AccumulateVec::Array(ref mut array) => array.push(el),
- AccumulateVec::Heap(ref mut vec) => vec.push(el),
- }
- }
-
- pub fn reserve(&mut self, n: usize) {
- match self.0 {
- AccumulateVec::Array(_) => {
- if self.len() + n > A::LEN {
- let len = self.len();
- let array = mem::replace(&mut self.0,
- AccumulateVec::Heap(Vec::with_capacity(len + n)));
- if let AccumulateVec::Array(array) = array {
- match self.0 {
- AccumulateVec::Heap(ref mut vec) => vec.extend(array),
- _ => unreachable!()
- }
- }
- }
- }
- AccumulateVec::Heap(ref mut vec) => vec.reserve(n)
- }
- }
-
- pub unsafe fn set_len(&mut self, len: usize) {
- match self.0 {
- AccumulateVec::Array(ref mut arr) => arr.set_len(len),
- AccumulateVec::Heap(ref mut vec) => vec.set_len(len),
- }
- }
-
- pub fn insert(&mut self, index: usize, element: A::Element) {
- let len = self.len();
-
- // Reserve space for shifting elements to the right
- self.reserve(1);
-
- assert!(index <= len);
-
- unsafe {
- // infallible
- // The spot to put the new value
- {
- let p = self.as_mut_ptr().add(index);
- // Shift everything over to make space. (Duplicating the
- // `index`th element into two consecutive places.)
- ptr::copy(p, p.offset(1), len - index);
- // Write it in, overwriting the first copy of the `index`th
- // element.
- ptr::write(p, element);
- }
- self.set_len(len + 1);
- }
- }
-
- pub fn truncate(&mut self, len: usize) {
- unsafe {
- while len < self.len() {
- // Decrement len before the drop_in_place(), so a panic on Drop
- // doesn't re-drop the just-failed value.
- let newlen = self.len() - 1;
- self.set_len(newlen);
- ::std::ptr::drop_in_place(self.get_unchecked_mut(newlen));
- }
- }
- }
-}
-
-impl<A: Array> Deref for SmallVec<A> {
- type Target = AccumulateVec<A>;
- fn deref(&self) -> &Self::Target {
- &self.0
- }
-}
-
-impl<A: Array> DerefMut for SmallVec<A> {
- fn deref_mut(&mut self) -> &mut AccumulateVec<A> {
- &mut self.0
- }
-}
-
-impl<A: Array> FromIterator<A::Element> for SmallVec<A> {
- fn from_iter<I>(iter: I) -> Self where I: IntoIterator<Item=A::Element> {
- SmallVec(iter.into_iter().collect())
- }
-}
-
-impl<A: Array> Extend<A::Element> for SmallVec<A> {
- fn extend<I: IntoIterator<Item=A::Element>>(&mut self, iter: I) {
- let iter = iter.into_iter();
- self.reserve(iter.size_hint().0);
- match self.0 {
- AccumulateVec::Heap(ref mut vec) => vec.extend(iter),
- _ => iter.for_each(|el| self.push(el))
- }
- }
-}
-
-impl<A: Array> IntoIterator for SmallVec<A> {
- type Item = A::Element;
- type IntoIter = IntoIter<A>;
- fn into_iter(self) -> Self::IntoIter {
- self.0.into_iter()
- }
-}
-
-impl<A: Array> Default for SmallVec<A> {
- fn default() -> SmallVec<A> {
- SmallVec::new()
- }
-}
-
-impl<A> Encodable for SmallVec<A>
- where A: Array,
- A::Element: Encodable {
- fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
- s.emit_seq(self.len(), |s| {
- for (i, e) in self.iter().enumerate() {
- s.emit_seq_elt(i, |s| e.encode(s))?;
- }
- Ok(())
- })
- }
-}
-
-impl<A> Decodable for SmallVec<A>
- where A: Array,
- A::Element: Decodable {
- fn decode<D: Decoder>(d: &mut D) -> Result<SmallVec<A>, D::Error> {
- d.read_seq(|d, len| {
- let mut vec = SmallVec::with_capacity(len);
- // FIXME(#48994) - could just be collected into a Result<SmallVec, D::Error>
- for i in 0..len {
- vec.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
- }
- Ok(vec)
- })
+ self.into_iter().next().unwrap()
}
}
#[cfg(test)]
mod tests {
extern crate test;
- use self::test::Bencher;
-
use super::*;
- #[test]
- fn test_len() {
- let v: OneVector<isize> = OneVector::new();
- assert_eq!(0, v.len());
-
- assert_eq!(1, OneVector::one(1).len());
- assert_eq!(5, OneVector::many(vec![1, 2, 3, 4, 5]).len());
- }
-
- #[test]
- fn test_push_get() {
- let mut v = OneVector::new();
- v.push(1);
- assert_eq!(1, v.len());
- assert_eq!(1, v[0]);
- v.push(2);
- assert_eq!(2, v.len());
- assert_eq!(2, v[1]);
- v.push(3);
- assert_eq!(3, v.len());
- assert_eq!(3, v[2]);
- }
-
- #[test]
- fn test_from_iter() {
- let v: OneVector<isize> = (vec![1, 2, 3]).into_iter().collect();
- assert_eq!(3, v.len());
- assert_eq!(1, v[0]);
- assert_eq!(2, v[1]);
- assert_eq!(3, v[2]);
- }
-
- #[test]
- fn test_move_iter() {
- let v = OneVector::new();
- let v: Vec<isize> = v.into_iter().collect();
- assert_eq!(v, Vec::new());
-
- let v = OneVector::one(1);
- assert_eq!(v.into_iter().collect::<Vec<_>>(), [1]);
-
- let v = OneVector::many(vec![1, 2, 3]);
- assert_eq!(v.into_iter().collect::<Vec<_>>(), [1, 2, 3]);
- }
-
#[test]
#[should_panic]
fn test_expect_one_zero() {
#[test]
#[should_panic]
fn test_expect_one_many() {
- OneVector::many(vec![1, 2]).expect_one("");
+ OneVector::from_vec(vec![1, 2]).expect_one("");
}
#[test]
fn test_expect_one_one() {
- assert_eq!(1, OneVector::one(1).expect_one(""));
- assert_eq!(1, OneVector::many(vec![1]).expect_one(""));
- }
-
- #[bench]
- fn fill_small_vec_1_10_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 1]> = SmallVec::with_capacity(10);
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_1_10_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 1]> = SmallVec::new();
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_8_10_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 8]> = SmallVec::with_capacity(10);
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_8_10_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 8]> = SmallVec::new();
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_32_10_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 32]> = SmallVec::with_capacity(10);
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_32_10_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 32]> = SmallVec::new();
-
- sv.extend(0..10);
- })
- }
-
- #[bench]
- fn fill_small_vec_1_50_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 1]> = SmallVec::with_capacity(50);
-
- sv.extend(0..50);
- })
- }
-
- #[bench]
- fn fill_small_vec_1_50_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 1]> = SmallVec::new();
-
- sv.extend(0..50);
- })
- }
-
- #[bench]
- fn fill_small_vec_8_50_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 8]> = SmallVec::with_capacity(50);
-
- sv.extend(0..50);
- })
- }
-
- #[bench]
- fn fill_small_vec_8_50_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 8]> = SmallVec::new();
-
- sv.extend(0..50);
- })
- }
-
- #[bench]
- fn fill_small_vec_32_50_with_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 32]> = SmallVec::with_capacity(50);
-
- sv.extend(0..50);
- })
- }
-
- #[bench]
- fn fill_small_vec_32_50_wo_cap(b: &mut Bencher) {
- b.iter(|| {
- let mut sv: SmallVec<[usize; 32]> = SmallVec::new();
-
- sv.extend(0..50);
- })
+ assert_eq!(1, (smallvec![1] as OneVector<_>).expect_one(""));
+ assert_eq!(1, OneVector::from_vec(vec![1]).expect_one(""));
}
}
}
-impl<I: ::indexed_vec::Idx, CTX> HashStable<CTX> for ::indexed_set::IdxSetBuf<I>
+impl<I: ::indexed_vec::Idx, CTX> HashStable<CTX> for ::indexed_set::IdxSet<I>
{
fn hash_stable<W: StableHasherResult>(&self,
ctx: &mut CTX,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use indexed_set::IdxSetBuf;
+use indexed_set::IdxSet;
use indexed_vec::Idx;
use std::collections::VecDeque;
/// and also use a bit set to track occupancy.
pub struct WorkQueue<T: Idx> {
deque: VecDeque<T>,
- set: IdxSetBuf<T>,
+ set: IdxSet<T>,
}
impl<T: Idx> WorkQueue<T> {
pub fn with_all(len: usize) -> Self {
WorkQueue {
deque: (0..len).map(T::new).collect(),
- set: IdxSetBuf::new_filled(len),
+ set: IdxSet::new_filled(len),
}
}
pub fn with_none(len: usize) -> Self {
WorkQueue {
deque: VecDeque::with_capacity(len),
- set: IdxSetBuf::new_empty(len),
+ set: IdxSet::new_empty(len),
}
}
fn html_of_duration(_start: &Instant, dur: &Duration) -> (String, String) {
use rustc::util::common::duration_to_secs_str;
(duration_to_secs_str(dur.clone()),
- "".to_string()
+ String::new()
)
}
// at by "in this macro invocation"
format!(" (#{})", i + 1)
} else {
- "".to_string()
+ String::new()
})));
}
// Check to make sure we're not in any <*macros>
// backtrace is multiple levels deep
format!(" (#{})", i + 1)
} else {
- "".to_string()
+ String::new()
})));
if !always_backtrace {
break;
let col = if let Some(first_annotation) = first_line.annotations.first() {
format!(":{}", first_annotation.start_col + 1)
} else {
- "".to_string()
+ String::new()
};
format!("{}:{}{}",
annotated_file.file.name,
declare_lint! {
pub MISSING_DOCS,
Allow,
- "detects missing documentation for public members"
+ "detects missing documentation for public members",
+ report_in_external_macro: true
}
pub struct MissingDoc {
err.span_suggestion_short_with_applicability(
attr.span,
"remove this attribute",
- "".to_owned(),
+ String::new(),
Applicability::MachineApplicable
);
err.emit();
err.span_suggestion_short_with_applicability(
no_mangle_attr.span,
"remove this attribute",
- "".to_owned(),
+ String::new(),
// Use of `#[no_mangle]` suggests FFI intent; correct
// fix may be to monomorphize source by hand
Applicability::MaybeIncorrect
let msg = "mutating transmuted &mut T from &T may cause undefined behavior, \
consider instead using an UnsafeCell";
match get_transmute_from_to(cx, expr) {
- Some((&ty::TyRef(_, _, from_mt), &ty::TyRef(_, _, to_mt))) => {
+ Some((&ty::Ref(_, _, from_mt), &ty::Ref(_, _, to_mt))) => {
if to_mt == hir::Mutability::MutMutable &&
from_mt == hir::Mutability::MutImmutable {
cx.span_lint(MUTABLE_TRANSMUTES, expr.span, msg);
fn get_transmute_from_to<'a, 'tcx>
(cx: &LateContext<'a, 'tcx>,
expr: &hir::Expr)
- -> Option<(&'tcx ty::TypeVariants<'tcx>, &'tcx ty::TypeVariants<'tcx>)> {
+ -> Option<(&'tcx ty::TyKind<'tcx>, &'tcx ty::TyKind<'tcx>)> {
let def = if let hir::ExprKind::Path(ref qpath) = expr.node {
cx.tables.qpath_def(qpath, expr.hir_id)
} else {
) {
let mut ecx = ::rustc_mir::interpret::mk_eval_cx(tcx, gid.instance, param_env).unwrap();
let result = (|| {
- let val = ecx.const_to_value(constant.val)?;
use rustc_target::abi::LayoutOf;
+ use rustc_mir::interpret::OpTy;
+
+ let op = ecx.const_value_to_op(constant.val)?;
let layout = ecx.layout_of(constant.ty)?;
- let place = ecx.allocate_place_for_value(val, layout, None)?;
- let ptr = place.to_ptr()?;
- let mut todo = vec![(ptr, layout.ty, String::new())];
+ let place = ecx.allocate_op(OpTy { op, layout })?.into();
+
+ let mut todo = vec![(place, Vec::new())];
let mut seen = FxHashSet();
- seen.insert((ptr, layout.ty));
- while let Some((ptr, ty, path)) = todo.pop() {
- let layout = ecx.layout_of(ty)?;
- ecx.validate_ptr_target(
- ptr,
- layout.align,
- layout,
- path,
+ seen.insert(place);
+ while let Some((place, mut path)) = todo.pop() {
+ ecx.validate_mplace(
+ place,
+ &mut path,
&mut seen,
&mut todo,
)?;
}
hir::ExprKind::Lit(ref lit) => {
match cx.tables.node_id_to_type(e.hir_id).sty {
- ty::TyInt(t) => {
+ ty::Int(t) => {
match lit.node {
ast::LitKind::Int(v, ast::LitIntType::Signed(_)) |
ast::LitKind::Int(v, ast::LitIntType::Unsuffixed) => {
report_bin_hex_error(
cx,
e,
- ty::TyInt(t),
+ ty::Int(t),
repr_str,
v,
negative,
_ => bug!(),
};
}
- ty::TyUint(t) => {
+ ty::Uint(t) => {
let uint_type = if let ast::UintTy::Usize = t {
cx.sess().target.usize_ty
} else {
let parent_id = cx.tcx.hir.get_parent_node(e.id);
if let hir_map::NodeExpr(parent_expr) = cx.tcx.hir.get(parent_id) {
if let hir::ExprKind::Cast(..) = parent_expr.node {
- if let ty::TyChar = cx.tables.expr_ty(parent_expr).sty {
+ if let ty::Char = cx.tables.expr_ty(parent_expr).sty {
let mut err = cx.struct_span_lint(
OVERFLOWING_LITERALS,
parent_expr.span,
report_bin_hex_error(
cx,
e,
- ty::TyUint(t),
+ ty::Uint(t),
repr_str,
lit_val,
false,
);
}
}
- ty::TyFloat(t) => {
+ ty::Float(t) => {
let is_infinite = match lit.node {
ast::LitKind::Float(v, _) |
ast::LitKind::FloatUnsuffixed(v) => {
// the comparison
let norm_binop = if swap { rev_binop(binop) } else { binop };
match cx.tables.node_id_to_type(expr.hir_id).sty {
- ty::TyInt(int_ty) => {
+ ty::Int(int_ty) => {
let (min, max) = int_ty_range(int_ty);
let lit_val: i128 = match lit.node {
hir::ExprKind::Lit(ref li) => {
};
is_valid(norm_binop, lit_val, min, max)
}
- ty::TyUint(uint_ty) => {
+ ty::Uint(uint_ty) => {
let (min, max) :(u128, u128) = uint_ty_range(uint_ty);
let lit_val: u128 = match lit.node {
hir::ExprKind::Lit(ref li) => {
//
// No suggestion for: `isize`, `usize`.
fn get_type_suggestion<'a>(
- t: &ty::TypeVariants,
+ t: &ty::TyKind,
val: u128,
negative: bool,
) -> Option<String> {
}
}
match t {
- &ty::TyInt(i) => find_fit!(i, val, negative,
+ &ty::Int(i) => find_fit!(i, val, negative,
I8 => [U8] => [I16, I32, I64, I128],
I16 => [U16] => [I32, I64, I128],
I32 => [U32] => [I64, I128],
I64 => [U64] => [I128],
I128 => [U128] => []),
- &ty::TyUint(u) => find_fit!(u, val, negative,
+ &ty::Uint(u) => find_fit!(u, val, negative,
U8 => [U8, U16, U32, U64, U128] => [],
U16 => [U16, U32, U64, U128] => [],
U32 => [U32, U64, U128] => [],
fn report_bin_hex_error(
cx: &LateContext,
expr: &hir::Expr,
- ty: ty::TypeVariants,
+ ty: ty::TyKind,
repr_str: String,
val: u128,
negative: bool,
) {
let (t, actually) = match ty {
- ty::TyInt(t) => {
+ ty::Int(t) => {
let ity = attr::IntType::SignedInt(t);
let bits = layout::Integer::from_attr(cx.tcx, ity).size().bits();
let actually = (val << (128 - bits)) as i128 >> (128 - bits);
(format!("{:?}", t), actually.to_string())
}
- ty::TyUint(t) => {
+ ty::Uint(t) => {
let ity = attr::IntType::UnsignedInt(t);
let bits = layout::Integer::from_attr(cx.tcx, ity).size().bits();
let actually = (val << (128 - bits)) >> (128 - bits);
if def.variants[data_idx].fields.len() == 1 {
match def.variants[data_idx].fields[0].ty(tcx, substs).sty {
- ty::TyFnPtr(_) => {
+ ty::FnPtr(_) => {
return true;
}
- ty::TyRef(..) => {
+ ty::Ref(..) => {
return true;
}
_ => {}
}
match ty.sty {
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
if def.is_phantom_data() {
return FfiPhantom(ty);
}
}
}
- ty::TyChar => FfiUnsafe {
+ ty::Char => FfiUnsafe {
ty: ty,
reason: "the `char` type has no C equivalent",
help: Some("consider using `u32` or `libc::wchar_t` instead"),
},
- ty::TyInt(ast::IntTy::I128) | ty::TyUint(ast::UintTy::U128) => FfiUnsafe {
+ ty::Int(ast::IntTy::I128) | ty::Uint(ast::UintTy::U128) => FfiUnsafe {
ty: ty,
reason: "128-bit integers don't currently have a known stable ABI",
help: None,
},
// Primitive types with a stable representation.
- ty::TyBool | ty::TyInt(..) | ty::TyUint(..) | ty::TyFloat(..) | ty::TyNever => FfiSafe,
+ ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe,
- ty::TySlice(_) => FfiUnsafe {
+ ty::Slice(_) => FfiUnsafe {
ty: ty,
reason: "slices have no C equivalent",
help: Some("consider using a raw pointer instead"),
},
- ty::TyDynamic(..) => FfiUnsafe {
+ ty::Dynamic(..) => FfiUnsafe {
ty: ty,
reason: "trait objects have no C equivalent",
help: None,
},
- ty::TyStr => FfiUnsafe {
+ ty::Str => FfiUnsafe {
ty: ty,
reason: "string slices have no C equivalent",
help: Some("consider using `*const u8` and a length instead"),
},
- ty::TyTuple(..) => FfiUnsafe {
+ ty::Tuple(..) => FfiUnsafe {
ty: ty,
reason: "tuples have unspecified layout",
help: Some("consider using a struct instead"),
},
- ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
- ty::TyRef(_, ty, _) => self.check_type_for_ffi(cache, ty),
+ ty::RawPtr(ty::TypeAndMut { ty, .. }) |
+ ty::Ref(_, ty, _) => self.check_type_for_ffi(cache, ty),
- ty::TyArray(ty, _) => self.check_type_for_ffi(cache, ty),
+ ty::Array(ty, _) => self.check_type_for_ffi(cache, ty),
- ty::TyFnPtr(sig) => {
+ ty::FnPtr(sig) => {
match sig.abi() {
Abi::Rust | Abi::RustIntrinsic | Abi::PlatformIntrinsic | Abi::RustCall => {
return FfiUnsafe {
FfiSafe
}
- ty::TyForeign(..) => FfiSafe,
-
- ty::TyParam(..) |
- ty::TyInfer(..) |
- ty::TyError |
- ty::TyClosure(..) |
- ty::TyGenerator(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyProjection(..) |
- ty::TyAnon(..) |
- ty::TyFnDef(..) => bug!("Unexpected type in foreign function"),
+ ty::Foreign(..) => FfiSafe,
+
+ ty::Param(..) |
+ ty::Infer(..) |
+ ty::Error |
+ ty::Closure(..) |
+ ty::Generator(..) |
+ ty::GeneratorWitness(..) |
+ ty::Projection(..) |
+ ty::Anon(..) |
+ ty::FnDef(..) => bug!("Unexpected type in foreign function"),
}
}
if let Some(s) = help {
diag.help(s);
}
- if let ty::TyAdt(def, _) = unsafe_ty.sty {
+ if let ty::Adt(def, _) = unsafe_ty.sty {
if let Some(sp) = self.cx.tcx.hir.span_if_local(def.did) {
diag.span_note(sp, "type defined here");
}
let t = cx.tables.expr_ty(&expr);
let ty_warned = match t.sty {
- ty::TyTuple(ref tys) if tys.is_empty() => return,
- ty::TyNever => return,
- ty::TyAdt(def, _) => {
+ ty::Tuple(ref tys) if tys.is_empty() => return,
+ ty::Never => return,
+ ty::Adt(def, _) => {
if def.variants.is_empty() {
return;
} else {
use syntax::parse::source_file_to_stream;
use syntax::symbol::Symbol;
use syntax_pos::{Span, NO_EXPANSION, FileName};
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc::hir;
macro_rules! provide {
mir
}
mir_const_qualif => {
- (cdata.mir_const_qualif(def_id.index), Lrc::new(IdxSetBuf::new_empty(0)))
+ (cdata.mir_const_qualif(def_id.index), Lrc::new(IdxSet::new_empty(0)))
}
fn_sig => { cdata.fn_sig(def_id.index, tcx) }
inherent_impls => { Lrc::new(cdata.get_inherent_implementations_for_type(def_id.index)) }
EntryKind::Trait(_) => Def::Trait(did),
EntryKind::Enum(..) => Def::Enum(did),
EntryKind::MacroDef(_) => Def::Macro(did, MacroKind::Bang),
- EntryKind::ForeignType => Def::TyForeign(did),
+ EntryKind::ForeignType => Def::ForeignTy(did),
EntryKind::ForeignMod |
EntryKind::GlobalAsm |
let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap();
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
let kind = match tables.node_id_to_type(hir_id).sty {
- ty::TyGenerator(def_id, ..) => {
+ ty::Generator(def_id, ..) => {
let layout = self.tcx.generator_layout(def_id);
let data = GeneratorData {
layout: layout.clone(),
EntryKind::Generator(self.lazy(&data))
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let sig = substs.closure_sig(def_id, self.tcx);
let data = ClosureData { sig: self.lazy(&sig) };
EntryKind::Closure(self.lazy(&data))
pub fn get_repr_options<'a, 'tcx, 'gcx>(tcx: &TyCtxt<'a, 'tcx, 'gcx>, did: DefId) -> ReprOptions {
let ty = tcx.type_of(did);
match ty.sty {
- ty::TyAdt(ref def, _) => return def.repr,
+ ty::Adt(ref def, _) => return def.repr,
_ => bug!("{} is not an ADT", ty),
}
}
syntax_pos = { path = "../libsyntax_pos" }
byteorder = { version = "1.1", features = ["i128"] }
rustc_apfloat = { path = "../librustc_apfloat" }
+smallvec = { version = "0.6.5", features = ["union"] }
if let Some(ty) = self.retrieve_type_for_place(place) {
let needs_note = match ty.sty {
- ty::TypeVariants::TyClosure(id, _) => {
+ ty::Closure(id, _) => {
let tables = self.tcx.typeck_tables_of(id);
let node_id = self.tcx.hir.as_local_node_id(id).unwrap();
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
self.describe_field_from_ty(&ty.boxed_ty(), field)
} else {
match ty.sty {
- ty::TyAdt(def, _) => if def.is_enum() {
+ ty::Adt(def, _) => if def.is_enum() {
field.index().to_string()
} else {
def.non_enum_variant().fields[field.index()]
.ident
.to_string()
},
- ty::TyTuple(_) => field.index().to_string(),
- ty::TyRef(_, ty, _) | ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
+ ty::Tuple(_) => field.index().to_string(),
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
self.describe_field_from_ty(&ty, field)
}
- ty::TyArray(ty, _) | ty::TySlice(ty) => self.describe_field_from_ty(&ty, field),
- ty::TyClosure(def_id, _) | ty::TyGenerator(def_id, _, _) => {
+ ty::Array(ty, _) | ty::Slice(ty) => self.describe_field_from_ty(&ty, field),
+ ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
// Convert the def-id into a node-id. node-ids are only valid for
// the local code in the current crate, so this returns an `Option` in case
// the closure comes from another crate. But in that case we wouldn't
use rustc::ty::query::Providers;
use rustc::ty::{self, ParamEnv, TyCtxt, Ty};
-use rustc_errors::{Diagnostic, DiagnosticBuilder, Level};
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, Level};
use rustc_data_structures::graph::dominators::Dominators;
use rustc_data_structures::fx::FxHashSet;
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::Idx;
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use std::rc::Rc;
_ => Some(tcx.hir.body_owned_by(id)),
};
- let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
+ let dead_unwinds = IdxSet::new_empty(mir.basic_blocks().len());
let mut flow_inits = FlowAtLocation::new(do_dataflow(
tcx,
mir,
span,
"variable does not need to be mutable",
);
- err.span_suggestion_short(mut_span, "remove this `mut`", "".to_owned());
+ err.span_suggestion_short_with_applicability(
+ mut_span,
+ "remove this `mut`",
+ String::new(),
+ Applicability::MachineApplicable);
err.buffer(&mut mbcx.errors_buffer);
}
// individual fields instead. This way if `foo` has a
// destructor but `bar` does not, we will only check for
// borrows of `x.foo` and not `x.bar`. See #47703.
- ty::TyAdt(def, substs) if def.is_struct() && !def.has_dtor(self.tcx) => {
+ ty::Adt(def, substs) if def.is_struct() && !def.has_dtor(self.tcx) => {
def.all_fields()
.map(|field| field.ty(gcx, substs))
.enumerate()
.for_each(|field| drop_field(self, field));
}
// Same as above, but for tuples.
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
tys.iter()
.cloned()
.enumerate()
}
// Closures also have disjoint fields, but they are only
// directly accessed in the body of the closure.
- ty::TyClosure(def, substs)
+ ty::Closure(def, substs)
if *drop_place == Place::Local(Local::new(1))
&& !self.mir.upvar_decls.is_empty() =>
{
}
// Generators also have disjoint fields, but they are only
// directly accessed in the body of the generator.
- ty::TyGenerator(def, substs, _)
+ ty::Generator(def, substs, _)
if *drop_place == Place::Local(Local::new(1))
&& !self.mir.upvar_decls.is_empty() =>
{
// the base case below, we would have a Deep Write due to
// the box being `needs_drop`, and that Deep Write would
// touch `&mut` data in the box.
- ty::TyAdt(def, _) if def.is_box() => {
+ ty::Adt(def, _) if def.is_box() => {
// When/if we add a `&own T` type, this action would
// be like running the destructor of the `&own T`.
// (And the owner of backing storage referenced by the
// be already initialized
let tcx = self.tcx;
match base.ty(self.mir, tcx).to_ty(tcx).sty {
- ty::TyAdt(def, _) if def.has_dtor(tcx) => {
+ ty::Adt(def, _) if def.has_dtor(tcx) => {
// FIXME: analogous code in
// check_loans.rs first maps
// Check the kind of deref to decide
match base_ty.sty {
- ty::TyRef(_, _, mutbl) => {
+ ty::Ref(_, _, mutbl) => {
match mutbl {
// Shared borrowed data is never mutable
hir::MutImmutable => Err(place),
}
}
}
- ty::TyRawPtr(tnm) => {
+ ty::RawPtr(tnm) => {
match tnm.mutbl {
// `*const` raw pointers are not mutable
hir::MutImmutable => return Err(place),
.any(|p| p.is_upvar_field_projection(self.mir, &self.tcx)
.is_some());
match ty.sty {
- ty::TyArray(..) | ty::TySlice(..) => self
+ ty::Array(..) | ty::Slice(..) => self
.tcx
.cannot_move_out_of_interior_noncopy(span, ty, None, origin),
- ty::TyClosure(def_id, closure_substs)
+ ty::Closure(def_id, closure_substs)
if !self.mir.upvar_decls.is_empty() && is_upvar_field_projection
=> {
let closure_kind_ty =
}
} else {
item_msg = format!("data in a {}", pointer_type);
- reason = "".to_string();
+ reason = String::new();
}
}
}
Place::Static(box Static { def_id, ty: _ }) => {
if let Place::Static(_) = access_place {
item_msg = format!("immutable static item `{}`", access_place_desc.unwrap());
- reason = "".to_string();
+ reason = String::new();
} else {
item_msg = format!("`{}`", access_place_desc.unwrap());
let static_name = &self.tcx.item_name(*def_id);
// individual fields instead. This way if `foo` has a
// destructor but `bar` does not, we will only check for
// borrows of `x.foo` and not `x.bar`. See #47703.
- ty::TyAdt(def, substs) if def.is_struct() && !def.has_dtor(self.infcx.tcx) => {
+ ty::Adt(def, substs) if def.is_struct() && !def.has_dtor(self.infcx.tcx) => {
def.all_fields()
.map(|field| field.ty(gcx, substs))
.enumerate()
.for_each(|field| drop_field(self, field));
}
// Same as above, but for tuples.
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
tys.iter().cloned().enumerate()
.for_each(|field| drop_field(self, field));
}
// Closures and generators also have disjoint fields, but they are only
// directly accessed in the body of the closure/generator.
- ty::TyGenerator(def, substs, ..)
+ ty::Generator(def, substs, ..)
if *drop_place == Place::Local(Local::new(1)) && !self.mir.upvar_decls.is_empty()
=> {
substs.upvar_tys(def, self.infcx.tcx).enumerate()
.for_each(|field| drop_field(self, field));
}
- ty::TyClosure(def, substs)
+ ty::Closure(def, substs)
if *drop_place == Place::Local(Local::new(1)) && !self.mir.upvar_decls.is_empty()
=> {
substs.upvar_tys(def, self.infcx.tcx).enumerate()
);
// Also dump the inference graph constraints as a graphviz file.
- let _: io::Result<()> = do catch {
+ let _: io::Result<()> = try_block! {
let mut file =
pretty::create_dump_file(infcx.tcx, "regioncx.all.dot", None, "nll", &0, source)?;
regioncx.dump_graphviz_raw_constraints(&mut file)?;
};
// Also dump the inference graph constraints as a graphviz file.
- let _: io::Result<()> = do catch {
+ let _: io::Result<()> = try_block! {
let mut file =
pretty::create_dump_file(infcx.tcx, "regioncx.scc.dot", None, "nll", &0, source)?;
regioncx.dump_graphviz_scc_constraints(&mut file)?;
// &
// - let's call the lifetime of this reference `'1`
(
- ty::TyRef(region, referent_ty, _),
+ ty::Ref(region, referent_ty, _),
hir::TyKind::Rptr(_lifetime, referent_hir_ty),
) => {
if region.to_region_vid() == needle_fr {
// Match up something like `Foo<'1>`
(
- ty::TyAdt(_adt_def, substs),
+ ty::Adt(_adt_def, substs),
hir::TyKind::Path(hir::QPath::Resolved(None, path)),
) => {
if let Some(last_segment) = path.segments.last() {
// The following cases don't have lifetimes, so we
// just worry about trying to match up the rustc type
// with the HIR types:
- (ty::TyTuple(elem_tys), hir::TyKind::Tup(elem_hir_tys)) => {
+ (ty::Tuple(elem_tys), hir::TyKind::Tup(elem_hir_tys)) => {
search_stack.extend(elem_tys.iter().cloned().zip(elem_hir_tys));
}
- (ty::TySlice(elem_ty), hir::TyKind::Slice(elem_hir_ty))
- | (ty::TyArray(elem_ty, _), hir::TyKind::Array(elem_hir_ty, _)) => {
+ (ty::Slice(elem_ty), hir::TyKind::Slice(elem_hir_ty))
+ | (ty::Array(elem_ty, _), hir::TyKind::Array(elem_hir_ty, _)) => {
search_stack.push((elem_ty, elem_hir_ty));
}
- (ty::TyRawPtr(mut_ty), hir::TyKind::Ptr(mut_hir_ty)) => {
+ (ty::RawPtr(mut_ty), hir::TyKind::Ptr(mut_hir_ty)) => {
search_stack.push((mut_ty.ty, &mut_hir_ty.ty));
}
use rustc::ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable};
use rustc::util::common;
use rustc_data_structures::graph::scc::Sccs;
-use rustc_data_structures::indexed_set::{IdxSet, IdxSetBuf};
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_errors::Diagnostic;
// SCC. For each SCC, we visit its successors and compute
// their values, then we union all those values to get our
// own.
- let visited = &mut IdxSetBuf::new_empty(self.constraint_sccs.num_sccs());
+ let visited = &mut IdxSet::new_empty(self.constraint_sccs.num_sccs());
for scc_index in self.constraint_sccs.all_sccs() {
self.propagate_constraint_sccs_if_new(scc_index, visited);
}
use rustc::traits::query::type_op;
use rustc::traits::query::{Fallible, NoSolution};
use rustc::ty::fold::TypeFoldable;
-use rustc::ty::{self, CanonicalTy, RegionVid, ToPolyTraitRef, Ty, TyCtxt, TypeVariants};
+use rustc::ty::{self, CanonicalTy, RegionVid, ToPolyTraitRef, Ty, TyCtxt, TyKind};
use rustc_errors::Diagnostic;
use std::fmt;
use std::rc::Rc;
// constraints on `'a` and `'b`. These constraints
// would be lost if we just look at the normalized
// value.
- if let ty::TyFnDef(def_id, substs) = constant.literal.ty.sty {
+ if let ty::FnDef(def_id, substs) = constant.literal.ty.sty {
let tcx = self.tcx();
let type_checker = &mut self.cx;
}
ProjectionElem::Subslice { from, to } => PlaceTy::Ty {
ty: match base_ty.sty {
- ty::TyArray(inner, size) => {
+ ty::Array(inner, size) => {
let size = size.unwrap_usize(tcx);
let min_size = (from as u64) + (to as u64);
if let Some(rest_size) = size.checked_sub(min_size) {
)
}
}
- ty::TySlice(..) => base_ty,
+ ty::Slice(..) => base_ty,
_ => span_mirbug_and_err!(self, place, "slice of non-array {:?}", base_ty),
},
},
ProjectionElem::Downcast(adt_def1, index) => match base_ty.sty {
- ty::TyAdt(adt_def, substs) if adt_def.is_enum() && adt_def == adt_def1 => {
+ ty::Adt(adt_def, substs) if adt_def.is_enum() && adt_def == adt_def1 => {
if index >= adt_def.variants.len() {
PlaceTy::Ty {
ty: span_mirbug_and_err!(
variant_index,
} => (&adt_def.variants[variant_index], substs),
PlaceTy::Ty { ty } => match ty.sty {
- ty::TyAdt(adt_def, substs) if !adt_def.is_enum() => (&adt_def.variants[0], substs),
- ty::TyClosure(def_id, substs) => {
+ ty::Adt(adt_def, substs) if !adt_def.is_enum() => (&adt_def.variants[0], substs),
+ ty::Closure(def_id, substs) => {
return match substs.upvar_tys(def_id, tcx).nth(field.index()) {
Some(ty) => Ok(ty),
None => Err(FieldAccessError::OutOfRange {
}),
}
}
- ty::TyGenerator(def_id, substs, _) => {
+ ty::Generator(def_id, substs, _) => {
// Try pre-transform fields first (upvars and current state)
if let Some(ty) = substs.pre_transforms_tys(def_id, tcx).nth(field.index()) {
return Ok(ty);
}),
};
}
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
return match tys.get(field.index()) {
Some(&ty) => Ok(ty),
None => Err(FieldAccessError::OutOfRange {
} => {
let place_type = place.ty(mir, tcx).to_ty(tcx);
let adt = match place_type.sty {
- TypeVariants::TyAdt(adt, _) if adt.is_enum() => adt,
+ TyKind::Adt(adt, _) if adt.is_enum() => adt,
_ => {
span_bug!(
stmt.source_info.span,
let func_ty = func.ty(mir, tcx);
debug!("check_terminator: call, func_ty={:?}", func_ty);
let sig = match func_ty.sty {
- ty::TyFnDef(..) | ty::TyFnPtr(_) => func_ty.fn_sig(tcx),
+ ty::FnDef(..) | ty::FnPtr(_) => func_ty.fn_sig(tcx),
_ => {
span_mirbug!(self, term, "call to non-function {:?}", func_ty);
return;
CastKind::ClosureFnPointer => {
let sig = match op.ty(mir, tcx).sty {
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
substs.closure_sig_ty(def_id, tcx).fn_sig(tcx)
}
_ => bug!(),
debug!("add_reborrow_constraint - base_ty = {:?}", base_ty);
match base_ty.sty {
- ty::TyRef(ref_region, _, mutbl) => {
+ ty::Ref(ref_region, _, mutbl) => {
constraints.outlives_constraints.push(OutlivesConstraint {
sup: ref_region.to_region_vid(),
sub: borrow_region.to_region_vid(),
}
}
}
- ty::TyRawPtr(..) => {
+ ty::RawPtr(..) => {
// deref of raw pointer, guaranteed to be valid
break;
}
- ty::TyAdt(def, _) if def.is_box() => {
+ ty::Adt(def, _) if def.is_box() => {
// deref of `Box`, need the base to be valid - propagate
}
_ => bug!("unexpected deref ty {:?} in {:?}", base_ty, borrowed_place),
ty::Variance::Covariant,
locations,
borrowck_context,
- ty::Slice::empty(),
+ ty::List::empty(),
).relate(&a, &b)?;
Ok(())
}
ty::Variance::Invariant,
locations,
borrowck_context,
- ty::Slice::empty(),
+ ty::List::empty(),
).relate(&a, &b)?;
Ok(())
}
fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
// Watch out for the case that we are matching a `?T` against the
// right-hand side.
- if let ty::TyInfer(ty::CanonicalTy(var)) = a.sty {
+ if let ty::Infer(ty::CanonicalTy(var)) = a.sty {
self.equate_var(var, b.into())?;
Ok(a)
} else {
/// The "defining" type for this function, with all universal
/// regions instantiated. For a closure or generator, this is the
- /// closure type, but for a top-level function it's the `TyFnDef`.
+ /// closure type, but for a top-level function it's the `FnDef`.
pub defining_ty: DefiningTy<'tcx>,
/// The return type of this function, with all regions replaced by
.replace_free_regions_with_nll_infer_vars(FR, &defining_ty);
match defining_ty.sty {
- ty::TyClosure(def_id, substs) => DefiningTy::Closure(def_id, substs),
- ty::TyGenerator(def_id, substs, movability) => {
+ ty::Closure(def_id, substs) => DefiningTy::Closure(def_id, substs),
+ ty::Generator(def_id, substs, movability) => {
DefiningTy::Generator(def_id, substs, movability)
}
- ty::TyFnDef(def_id, substs) => DefiningTy::FnDef(def_id, substs),
+ ty::FnDef(def_id, substs) => DefiningTy::FnDef(def_id, substs),
_ => span_bug!(
tcx.def_span(self.mir_def_id),
"expected defining type for `{:?}`: `{:?}`",
&self,
indices: &UniversalRegionIndices<'tcx>,
defining_ty: DefiningTy<'tcx>,
- ) -> ty::Binder<&'tcx ty::Slice<Ty<'tcx>>> {
+ ) -> ty::Binder<&'tcx ty::List<Ty<'tcx>>> {
let tcx = self.infcx.tcx;
match defining_ty {
DefiningTy::Closure(def_id, substs) => {
let (&output, tuplized_inputs) = inputs_and_output.split_last().unwrap();
assert_eq!(tuplized_inputs.len(), 1, "multiple closure inputs");
let inputs = match tuplized_inputs[0].sty {
- ty::TyTuple(inputs) => inputs,
+ ty::Tuple(inputs) => inputs,
_ => bug!("closure inputs not a tuple: {:?}", tuplized_inputs[0]),
};
// original path into a new variable and
// borrowed *that* one, leaving the original
// path unborrowed.
- ty::TyRawPtr(..) | ty::TyRef(_, _, hir::MutImmutable) => true,
+ ty::RawPtr(..) | ty::Ref(_, _, hir::MutImmutable) => true,
_ => proj.base.ignore_borrow(tcx, mir),
}
}
debug!("places_conflict: shallow access behind ptr");
return false;
}
- (ProjectionElem::Deref, ty::TyRef(_, _, hir::MutImmutable), _) => {
+ (ProjectionElem::Deref, ty::Ref(_, _, hir::MutImmutable), _) => {
// the borrow goes through a dereference of a shared reference.
//
// I'm not sure why we are tracking these borrows - shared
}
(Place::Promoted(p1), Place::Promoted(p2)) => {
if p1.0 == p2.0 {
- if let ty::TyArray(_, size) = p1.1.sty {
+ if let ty::Array(_, size) = p1.1.sty {
if size.unwrap_usize(tcx) == 0 {
// Ignore conflicts with promoted [T; 0].
debug!("place_element_conflict: IGNORE-LEN-0-PROMOTED");
} else {
let ty = pi1.base.ty(mir, tcx).to_ty(tcx);
match ty.sty {
- ty::TyAdt(def, _) if def.is_union() => {
+ ty::Adt(def, _) if def.is_union() => {
// Different fields of a union, we are basically stuck.
debug!("place_element_conflict: STUCK-UNION");
Overlap::Arbitrary
let ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx);
match ty.sty {
- ty::TyRawPtr(_) |
- ty::TyRef(
+ ty::RawPtr(_) |
+ ty::Ref(
_, /*rgn*/
_, /*ty*/
hir::MutImmutable
return Some(cursor);
}
- ty::TyRef(
+ ty::Ref(
_, /*rgn*/
_, /*ty*/
hir::MutMutable,
return Some(cursor);
}
- ty::TyAdt(..) if ty.is_box() => {
+ ty::Adt(..) if ty.is_box() => {
self.next = Some(&proj.base);
return Some(cursor);
}
// FIXME(canndrew): This is_never should probably be an is_uninhabited
let diverges = expr.ty.is_never();
let intrinsic = match ty.sty {
- ty::TyFnDef(def_id, _) => {
+ ty::FnDef(def_id, _) => {
let f = ty.fn_sig(this.hir.tcx());
if f.abi() == Abi::RustIntrinsic ||
f.abi() == Abi::PlatformIntrinsic {
}
TestKind::SwitchInt { switch_ty, ref options, indices: _ } => {
- let (ret, terminator) = if switch_ty.sty == ty::TyBool {
+ let (ret, terminator) = if switch_ty.sty == ty::Bool {
assert!(options.len() > 0 && options.len() <= 2);
let (true_bb, false_bb) = (self.cfg.start_new_block(),
self.cfg.start_new_block());
// array, so we can call `<[u8]>::eq` rather than having to find an
// `<[u8; N]>::eq`.
let unsize = |ty: Ty<'tcx>| match ty.sty {
- ty::TyRef(region, rty, _) => match rty.sty {
- ty::TyArray(inner_ty, n) => Some((region, inner_ty, n)),
+ ty::Ref(region, rty, _) => match rty.sty {
+ ty::Array(inner_ty, n) => Some((region, inner_ty, n)),
_ => None,
},
_ => None,
let ty = tcx.type_of(tcx.hir.local_def_id(id));
let mut abi = fn_sig.abi;
let implicit_argument = match ty.sty {
- ty::TyClosure(..) => {
+ ty::Closure(..) => {
// HACK(eddyb) Avoid having RustCall on closures,
// as it adds unnecessary (and wrong) auto-tupling.
abi = Abi::Rust;
Some(ArgInfo(liberated_closure_env_ty(tcx, id, body_id), None, None, None))
}
- ty::TyGenerator(..) => {
+ ty::Generator(..) => {
let gen_ty = tcx.body_tables(body_id).node_id_to_type(fn_hir_id);
Some(ArgInfo(gen_ty, None, None, None))
}
let (yield_ty, return_ty) = if body.is_generator {
let gen_sig = match ty.sty {
- ty::TyGenerator(gen_def_id, gen_substs, ..) =>
+ ty::Generator(gen_def_id, gen_substs, ..) =>
gen_substs.sig(gen_def_id, tcx),
_ =>
span_bug!(tcx.hir.span(id), "generator w/o generator type: {:?}", ty),
let closure_ty = tcx.body_tables(body_id).node_id_to_type(closure_expr_hir_id);
let (closure_def_id, closure_substs) = match closure_ty.sty {
- ty::TyClosure(closure_def_id, closure_substs) => (closure_def_id, closure_substs),
+ ty::Closure(closure_def_id, closure_substs) => (closure_def_id, closure_substs),
_ => bug!("closure expr does not have closure type: {:?}", closure_ty)
};
//! locations.
use rustc::mir::{BasicBlock, Location};
-use rustc_data_structures::indexed_set::{HybridIdxSetBuf, IdxSetBuf, Iter};
+use rustc_data_structures::indexed_set::{HybridIdxSet, IdxSet, Iter};
use rustc_data_structures::indexed_vec::Idx;
use dataflow::{BitDenotation, BlockSets, DataflowResults};
BD: BitDenotation,
{
base_results: DataflowResults<BD>,
- curr_state: IdxSetBuf<BD::Idx>,
- stmt_gen: HybridIdxSetBuf<BD::Idx>,
- stmt_kill: HybridIdxSetBuf<BD::Idx>,
+ curr_state: IdxSet<BD::Idx>,
+ stmt_gen: HybridIdxSet<BD::Idx>,
+ stmt_kill: HybridIdxSet<BD::Idx>,
}
impl<BD> FlowAtLocation<BD>
pub fn new(results: DataflowResults<BD>) -> Self {
let bits_per_block = results.sets().bits_per_block();
- let curr_state = IdxSetBuf::new_empty(bits_per_block);
- let stmt_gen = HybridIdxSetBuf::new_empty(bits_per_block);
- let stmt_kill = HybridIdxSetBuf::new_empty(bits_per_block);
+ let curr_state = IdxSet::new_empty(bits_per_block);
+ let stmt_gen = HybridIdxSet::new_empty(bits_per_block);
+ let stmt_kill = HybridIdxSet::new_empty(bits_per_block);
FlowAtLocation {
base_results: results,
curr_state: curr_state,
place: &mir::Place<'tcx>) -> bool {
let ty = place.ty(mir, tcx).to_ty(tcx);
match ty.sty {
- ty::TyArray(..) => {
+ ty::Array(..) => {
debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} => false",
place, ty);
false
}
- ty::TySlice(..) | ty::TyRef(..) | ty::TyRawPtr(..) => {
+ ty::Slice(..) | ty::Ref(..) | ty::RawPtr(..) => {
debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} refd => true",
place, ty);
true
}
- ty::TyAdt(def, _) if (def.has_dtor(tcx) && !def.is_box()) || def.is_union() => {
+ ty::Adt(def, _) if (def.has_dtor(tcx) && !def.is_box()) || def.is_union() => {
debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} Drop => true",
place, ty);
true
use syntax::ast::{self, MetaItem};
-use rustc_data_structures::bitslice::{bitwise, BitwiseOperator, Word};
-use rustc_data_structures::indexed_set::{HybridIdxSetBuf, IdxSet, IdxSetBuf};
+use rustc_data_structures::bitslice::{bitwise, BitwiseOperator};
+use rustc_data_structures::indexed_set::{HybridIdxSet, IdxSet};
use rustc_data_structures::indexed_vec::Idx;
use rustc_data_structures::work_queue::WorkQueue;
use std::borrow::Borrow;
use std::fmt;
use std::io;
-use std::mem;
use std::path::PathBuf;
use std::usize;
impl<'a, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> where BD: BitDenotation
{
fn propagate(&mut self) {
- let mut temp = IdxSetBuf::new_empty(self.flow_state.sets.bits_per_block);
+ let mut temp = IdxSet::new_empty(self.flow_state.sets.bits_per_block);
let mut propcx = PropagationContext {
builder: self,
};
}
}
-/// Maps each block to a set of bits
-#[derive(Clone, Debug)]
-pub(crate) struct Bits<E:Idx> {
- bits: IdxSetBuf<E>,
-}
-
-impl<E:Idx> Bits<E> {
- fn new(bits: IdxSetBuf<E>) -> Self {
- Bits { bits: bits }
- }
-}
-
/// DataflowResultsConsumer abstracts over walking the MIR with some
/// already constructed dataflow results.
///
analysis: &T,
result: &DataflowResults<T>,
mir: &Mir<'tcx>)
- -> IdxSetBuf<T::Idx> {
+ -> IdxSet<T::Idx> {
let mut on_entry = result.sets().on_entry_set_for(loc.block.index()).to_owned();
let mut kill_set = on_entry.to_hybrid();
let mut gen_set = kill_set.clone();
pub(crate) fn interpret_hybrid_set<'c, P>(&self,
o: &'c O,
- set: &HybridIdxSetBuf<O::Idx>,
+ set: &HybridIdxSet<O::Idx>,
render_idx: &P)
-> Vec<DebugFormatted>
where P: Fn(&O, O::Idx) -> DebugFormatted
/// Analysis bitwidth for each block.
bits_per_block: usize,
- /// Number of words associated with each block entry
- /// equal to bits_per_block / (mem::size_of::<Word> * 8), rounded up.
- words_per_block: usize,
-
/// For each block, bits valid on entry to the block.
- on_entry_sets: Bits<E>,
+ on_entry_sets: Vec<IdxSet<E>>,
/// For each block, bits generated by executing the statements in
/// the block. (For comparison, the Terminator for each block is
/// handled in a flow-specific manner during propagation.)
- gen_sets: Vec<HybridIdxSetBuf<E>>,
+ gen_sets: Vec<HybridIdxSet<E>>,
/// For each block, bits killed by executing the statements in the
/// block. (For comparison, the Terminator for each block is
/// handled in a flow-specific manner during propagation.)
- kill_sets: Vec<HybridIdxSetBuf<E>>,
+ kill_sets: Vec<HybridIdxSet<E>>,
}
/// Triple of sets associated with a given block.
/// Bits that are set to 1 by the time we exit the given block. Hybrid
/// because it usually contains only 0 or 1 elements.
- pub(crate) gen_set: &'a mut HybridIdxSetBuf<E>,
+ pub(crate) gen_set: &'a mut HybridIdxSet<E>,
/// Bits that are set to 0 by the time we exit the given block. Hybrid
/// because it usually contains only 0 or 1 elements.
- pub(crate) kill_set: &'a mut HybridIdxSetBuf<E>,
+ pub(crate) kill_set: &'a mut HybridIdxSet<E>,
}
impl<'a, E:Idx> BlockSets<'a, E> {
impl<E:Idx> AllSets<E> {
pub fn bits_per_block(&self) -> usize { self.bits_per_block }
pub fn for_block(&mut self, block_idx: usize) -> BlockSets<E> {
- let offset = self.words_per_block * block_idx;
- let range = E::new(offset)..E::new(offset + self.words_per_block);
BlockSets {
- on_entry: self.on_entry_sets.bits.range_mut(&range),
+ on_entry: &mut self.on_entry_sets[block_idx],
gen_set: &mut self.gen_sets[block_idx],
kill_set: &mut self.kill_sets[block_idx],
}
}
pub fn on_entry_set_for(&self, block_idx: usize) -> &IdxSet<E> {
- let offset = self.words_per_block * block_idx;
- let range = E::new(offset)..E::new(offset + self.words_per_block);
- self.on_entry_sets.bits.range(&range)
+ &self.on_entry_sets[block_idx]
}
- pub fn gen_set_for(&self, block_idx: usize) -> &HybridIdxSetBuf<E> {
+ pub fn gen_set_for(&self, block_idx: usize) -> &HybridIdxSet<E> {
&self.gen_sets[block_idx]
}
- pub fn kill_set_for(&self, block_idx: usize) -> &HybridIdxSetBuf<E> {
+ pub fn kill_set_for(&self, block_idx: usize) -> &HybridIdxSet<E> {
&self.kill_sets[block_idx]
}
}
dead_unwinds: &'a IdxSet<mir::BasicBlock>,
denotation: D) -> Self where D: InitialFlow {
let bits_per_block = denotation.bits_per_block();
- let bits_per_word = mem::size_of::<Word>() * 8;
- let words_per_block = (bits_per_block + bits_per_word - 1) / bits_per_word;
- let bits_per_block_rounded_up = words_per_block * bits_per_word; // a multiple of word size
let num_blocks = mir.basic_blocks().len();
- let num_overall = num_blocks * bits_per_block_rounded_up;
- let on_entry = Bits::new(if D::bottom_value() {
- IdxSetBuf::new_filled(num_overall)
+ let on_entry_sets = if D::bottom_value() {
+ vec![IdxSet::new_filled(bits_per_block); num_blocks]
} else {
- IdxSetBuf::new_empty(num_overall)
- });
- let empties = vec![HybridIdxSetBuf::new_empty(bits_per_block); num_blocks];
+ vec![IdxSet::new_empty(bits_per_block); num_blocks]
+ };
+ let gen_sets = vec![HybridIdxSet::new_empty(bits_per_block); num_blocks];
+ let kill_sets = gen_sets.clone();
DataflowAnalysis {
mir,
flow_state: DataflowState {
sets: AllSets {
bits_per_block,
- words_per_block,
- on_entry_sets: on_entry,
- gen_sets: empties.clone(),
- kill_sets: empties,
+ on_entry_sets,
+ gen_sets,
+ kill_sets,
},
operator: denotation,
}
dirty_queue.insert(bb);
}
}
-
}
let tcx = self.builder.tcx;
let place_ty = proj.base.ty(mir, tcx).to_ty(tcx);
match place_ty.sty {
- ty::TyRef(..) | ty::TyRawPtr(..) =>
+ ty::Ref(..) | ty::RawPtr(..) =>
return Err(MoveError::cannot_move_out_of(
self.loc,
BorrowedContent { target_place: place.clone() })),
- ty::TyAdt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() =>
+ ty::Adt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() =>
return Err(MoveError::cannot_move_out_of(self.loc,
InteriorOfTypeWithDestructor {
container_ty: place_ty
})),
// move out of union - always move the entire union
- ty::TyAdt(adt, _) if adt.is_union() =>
+ ty::Adt(adt, _) if adt.is_union() =>
return Err(MoveError::UnionMove { path: base }),
- ty::TySlice(_) =>
+ ty::Slice(_) =>
return Err(MoveError::cannot_move_out_of(
self.loc,
InteriorOfSliceOrArray {
_ => false
},
})),
- ty::TyArray(..) => match proj.elem {
+ ty::Array(..) => match proj.elem {
ProjectionElem::Index(..) =>
return Err(MoveError::cannot_move_out_of(
self.loc,
hir::ExprKind::AddrOf(mutbl, ref expr) => {
let region = match expr_ty.sty {
- ty::TyRef(r, _, _) => r,
+ ty::Ref(r, _, _) => r,
_ => span_bug!(expr.span, "type of & not region"),
};
ExprKind::Borrow {
hir::ExprKind::Struct(ref qpath, ref fields, ref base) => {
match expr_ty.sty {
- ty::TyAdt(adt, substs) => {
+ ty::Adt(adt, substs) => {
match adt.adt_kind() {
AdtKind::Struct | AdtKind::Union => {
ExprKind::Adt {
hir::ExprKind::Closure(..) => {
let closure_ty = cx.tables().expr_ty(expr);
let (def_id, substs, movability) = match closure_ty.sty {
- ty::TyClosure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs), None),
- ty::TyGenerator(def_id, substs, movability) => {
+ ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs), None),
+ ty::Generator(def_id, substs, movability) => {
(def_id, UpvarSubsts::Generator(substs), Some(movability))
}
_ => {
match cx.tables().node_id_to_type(expr.hir_id).sty {
// A unit struct/variant which is used as a value.
// We return a completely different ExprKind here to account for this special case.
- ty::TyAdt(adt_def, substs) => {
+ ty::Adt(adt_def, substs) => {
ExprKind::Adt {
adt_def,
variant_index: adt_def.variant_index_with_id(def_id),
});
let region = cx.tcx.mk_region(region);
- let self_expr = if let ty::TyClosure(_, closure_substs) = closure_ty.sty {
+ let self_expr = if let ty::Closure(_, closure_substs) = closure_ty.sty {
match cx.infcx.closure_kind(closure_def_id, closure_substs).unwrap() {
ty::ClosureKind::Fn => {
let ref_closure_ty = cx.tcx.mk_ref(region,
// same region and mutability as the receiver. This holds for
// `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`.
let (region, mutbl) = match recv_ty.sty {
- ty::TyRef(region, _, mutbl) => (region, mutbl),
+ ty::Ref(region, _, mutbl) => (region, mutbl),
_ => span_bug!(expr.span, "overloaded_place: receiver is not a reference"),
};
let ref_ty = cx.tcx.mk_ref(region, ty::TypeAndMut {
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = self.tcx.allocate_bytes(s.as_bytes());
- let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, self.tcx);
- ConstValue::from_byval_value(value).unwrap()
+ ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, self.tcx)
},
LitKind::ByteStr(ref data) => {
let id = self.tcx.allocate_bytes(data);
}
LitKind::FloatUnsuffixed(n) => {
let fty = match ty.sty {
- ty::TyFloat(fty) => fty,
+ ty::Float(fty) => fty,
_ => bug!()
};
parse_float(n, fty)
impl<'tcx> PatternFolder<'tcx> for LiteralExpander {
fn fold_pattern(&mut self, pat: &Pattern<'tcx>) -> Pattern<'tcx> {
match (&pat.ty.sty, &*pat.kind) {
- (&ty::TyRef(_, rty, _), &PatternKind::Constant { ref value }) => {
+ (&ty::Ref(_, rty, _), &PatternKind::Constant { ref value }) => {
Pattern {
ty: pat.ty,
span: pat.span,
fn is_non_exhaustive_enum(&self, ty: Ty<'tcx>) -> bool {
match ty.sty {
- ty::TyAdt(adt_def, ..) => adt_def.is_enum() && adt_def.is_non_exhaustive(),
+ ty::Adt(adt_def, ..) => adt_def.is_enum() && adt_def.is_non_exhaustive(),
_ => false,
}
}
fn is_local(&self, ty: Ty<'tcx>) -> bool {
match ty.sty {
- ty::TyAdt(adt_def, ..) => adt_def.did.is_local(),
+ ty::Adt(adt_def, ..) => adt_def.did.is_local(),
_ => false,
}
}
let mut pats = self.0.drain((len - arity) as usize..).rev();
match ty.sty {
- ty::TyAdt(..) |
- ty::TyTuple(..) => {
+ ty::Adt(..) |
+ ty::Tuple(..) => {
let pats = pats.enumerate().map(|(i, p)| {
FieldPattern {
field: Field::new(i),
}
}).collect();
- if let ty::TyAdt(adt, substs) = ty.sty {
+ if let ty::Adt(adt, substs) = ty.sty {
if adt.is_enum() {
PatternKind::Variant {
adt_def: adt,
}
}
- ty::TyRef(..) => {
+ ty::Ref(..) => {
PatternKind::Deref { subpattern: pats.nth(0).unwrap() }
}
- ty::TySlice(_) | ty::TyArray(..) => {
+ ty::Slice(_) | ty::Array(..) => {
PatternKind::Slice {
prefix: pats.collect(),
slice: None,
debug!("all_constructors({:?})", pcx.ty);
let exhaustive_integer_patterns = cx.tcx.features().exhaustive_integer_patterns;
let ctors = match pcx.ty.sty {
- ty::TyBool => {
+ ty::Bool => {
[true, false].iter().map(|&b| {
ConstantValue(ty::Const::from_bool(cx.tcx, b))
}).collect()
}
- ty::TyArray(ref sub_ty, len) if len.assert_usize(cx.tcx).is_some() => {
+ ty::Array(ref sub_ty, len) if len.assert_usize(cx.tcx).is_some() => {
let len = len.unwrap_usize(cx.tcx);
if len != 0 && cx.is_uninhabited(sub_ty) {
vec![]
}
}
// Treat arrays of a constant but unknown length like slices.
- ty::TyArray(ref sub_ty, _) |
- ty::TySlice(ref sub_ty) => {
+ ty::Array(ref sub_ty, _) |
+ ty::Slice(ref sub_ty) => {
if cx.is_uninhabited(sub_ty) {
vec![Slice(0)]
} else {
(0..pcx.max_slice_length+1).map(|length| Slice(length)).collect()
}
}
- ty::TyAdt(def, substs) if def.is_enum() => {
+ ty::Adt(def, substs) if def.is_enum() => {
def.variants.iter()
.filter(|v| !cx.is_variant_uninhabited(v, substs))
.map(|v| Variant(v.did))
.collect()
}
- ty::TyChar if exhaustive_integer_patterns => {
+ ty::Char if exhaustive_integer_patterns => {
let endpoint = |c: char| {
let ty = ty::ParamEnv::empty().and(cx.tcx.types.char);
ty::Const::from_bits(cx.tcx, c as u128, ty)
ConstantRange(endpoint('\u{E000}'), endpoint('\u{10FFFF}'), RangeEnd::Included),
]
}
- ty::TyInt(ity) if exhaustive_integer_patterns => {
+ ty::Int(ity) if exhaustive_integer_patterns => {
// FIXME(49937): refactor these bit manipulations into interpret.
let bits = Integer::from_attr(cx.tcx, SignedInt(ity)).size().bits() as u128;
let min = 1u128 << (bits - 1);
ty::Const::from_bits(cx.tcx, max as u128, ty),
RangeEnd::Included)]
}
- ty::TyUint(uty) if exhaustive_integer_patterns => {
+ ty::Uint(uty) if exhaustive_integer_patterns => {
// FIXME(49937): refactor these bit manipulations into interpret.
let bits = Integer::from_attr(cx.tcx, UnsignedInt(uty)).size().bits() as u128;
let max = !0u128 >> (128 - bits);
// The return value of `signed_bias` should be XORed with an endpoint to encode/decode it.
fn signed_bias(tcx: TyCtxt<'_, 'tcx, 'tcx>, ty: Ty<'tcx>) -> u128 {
match ty.sty {
- ty::TyInt(ity) => {
+ ty::Int(ity) => {
let bits = Integer::from_attr(tcx, SignedInt(ity)).size().bits() as u128;
1u128 << (bits - 1)
}
PatternKind::Constant { value } => Some(vec![ConstantValue(value)]),
PatternKind::Range { lo, hi, end } => Some(vec![ConstantRange(lo, hi, end)]),
PatternKind::Array { .. } => match pcx.ty.sty {
- ty::TyArray(_, length) => Some(vec![
+ ty::Array(_, length) => Some(vec![
Slice(length.unwrap_usize(cx.tcx))
]),
_ => span_bug!(pat.span, "bad ty {:?} for array pattern", pcx.ty)
fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> u64 {
debug!("constructor_arity({:#?}, {:?})", ctor, ty);
match ty.sty {
- ty::TyTuple(ref fs) => fs.len() as u64,
- ty::TySlice(..) | ty::TyArray(..) => match *ctor {
+ ty::Tuple(ref fs) => fs.len() as u64,
+ ty::Slice(..) | ty::Array(..) => match *ctor {
Slice(length) => length,
ConstantValue(_) => 0,
_ => bug!("bad slice pattern {:?} {:?}", ctor, ty)
},
- ty::TyRef(..) => 1,
- ty::TyAdt(adt, _) => {
+ ty::Ref(..) => 1,
+ ty::Adt(adt, _) => {
adt.variants[ctor.variant_index_for_adt(adt)].fields.len() as u64
}
_ => 0
{
debug!("constructor_sub_pattern_tys({:#?}, {:?})", ctor, ty);
match ty.sty {
- ty::TyTuple(ref fs) => fs.into_iter().map(|t| *t).collect(),
- ty::TySlice(ty) | ty::TyArray(ty, _) => match *ctor {
+ ty::Tuple(ref fs) => fs.into_iter().map(|t| *t).collect(),
+ ty::Slice(ty) | ty::Array(ty, _) => match *ctor {
Slice(length) => (0..length).map(|_| ty).collect(),
ConstantValue(_) => vec![],
_ => bug!("bad slice pattern {:?} {:?}", ctor, ty)
},
- ty::TyRef(_, rty, _) => vec![rty],
- ty::TyAdt(adt, substs) => {
+ ty::Ref(_, rty, _) => vec![rty],
+ ty::Adt(adt, substs) => {
if adt.is_box() {
// Use T as the sub pattern type of Box<T>.
vec![substs.type_at(0)]
fn should_treat_range_exhaustively(tcx: TyCtxt<'_, 'tcx, 'tcx>, ctor: &Constructor<'tcx>) -> bool {
if tcx.features().exhaustive_integer_patterns {
if let ConstantValue(value) | ConstantRange(value, _, _) = ctor {
- if let ty::TyChar | ty::TyInt(_) | ty::TyUint(_) = value.ty.sty {
+ if let ty::Char | ty::Int(_) | ty::Uint(_) = value.ty.sty {
return true;
}
}
fn conservative_is_uninhabited(&self, scrutinee_ty: Ty<'tcx>) -> bool {
// "rustc-1.0-style" uncontentious uninhabitableness check
match scrutinee_ty.sty {
- ty::TyNever => true,
- ty::TyAdt(def, _) => def.variants.is_empty(),
+ ty::Never => true,
+ ty::Adt(def, _) => def.variants.is_empty(),
_ => false
}
}
return true;
}
let pat_ty = cx.tables.pat_ty(p);
- if let ty::TyAdt(edef, _) = pat_ty.sty {
+ if let ty::Adt(edef, _) = pat_ty.sty {
if edef.is_enum() && edef.variants.iter().any(|variant| {
variant.name == ident.name && variant.ctor_kind == CtorKind::Const
}) {
pub use self::check_match::check_crate;
pub(crate) use self::check_match::check_match;
-use interpret::{const_val_field, const_variant_index, self};
+use interpret::{const_field, const_variant_index};
use rustc::mir::{fmt_const_val, Field, BorrowKind, Mutability};
-use rustc::mir::interpret::{Scalar, GlobalId, ConstValue};
+use rustc::mir::interpret::{Scalar, GlobalId, ConstValue, sign_extend};
use rustc::ty::{self, TyCtxt, AdtDef, Ty, Region};
use rustc::ty::subst::{Substs, Kind};
use rustc::hir::{self, PatKind, RangeEnd};
PatternKind::Variant { adt_def, variant_index, .. } => {
Some(&adt_def.variants[variant_index])
}
- _ => if let ty::TyAdt(adt, _) = self.ty.sty {
+ _ => if let ty::Adt(adt, _) = self.ty.sty {
if !adt.is_enum() {
Some(&adt.variants[0])
} else {
if let Some(variant) = variant {
write!(f, "{}", variant.name)?;
- // Only for TyAdt we can have `S {...}`,
+ // Only for Adt we can have `S {...}`,
// which we handle separately here.
if variant.ctor_kind == CtorKind::Fictive {
write!(f, " {{ ")?;
}
PatternKind::Deref { ref subpattern } => {
match self.ty.sty {
- ty::TyAdt(def, _) if def.is_box() => write!(f, "box ")?,
- ty::TyRef(_, _, mutbl) => {
+ ty::Adt(def, _) if def.is_box() => write!(f, "box ")?,
+ ty::Ref(_, _, mutbl) => {
write!(f, "&")?;
if mutbl == hir::MutMutable {
write!(f, "mut ")?;
PatKind::Slice(ref prefix, ref slice, ref suffix) => {
match ty.sty {
- ty::TyRef(_, ty, _) =>
+ ty::Ref(_, ty, _) =>
PatternKind::Deref {
subpattern: Pattern {
ty,
pat.span, ty, prefix, slice, suffix))
},
},
- ty::TySlice(..) |
- ty::TyArray(..) =>
+ ty::Slice(..) |
+ ty::Array(..) =>
self.slice_or_array_pattern(pat.span, ty, prefix, slice, suffix),
- ty::TyError => { // Avoid ICE
+ ty::Error => { // Avoid ICE
return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) };
}
ref sty =>
PatKind::Tuple(ref subpatterns, ddpos) => {
match ty.sty {
- ty::TyTuple(ref tys) => {
+ ty::Tuple(ref tys) => {
let subpatterns =
subpatterns.iter()
.enumerate_and_adjust(tys.len(), ddpos)
PatternKind::Leaf { subpatterns: subpatterns }
}
- ty::TyError => { // Avoid ICE (#50577)
+ ty::Error => { // Avoid ICE (#50577)
return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) };
}
ref sty => span_bug!(pat.span, "unexpected type for tuple pattern: {:?}", sty),
PatKind::Binding(_, id, ident, ref sub) => {
let var_ty = self.tables.node_id_to_type(pat.hir_id);
let region = match var_ty.sty {
- ty::TyRef(r, _, _) => Some(r),
- ty::TyError => { // Avoid ICE
+ ty::Ref(r, _, _) => Some(r),
+ ty::Error => { // Avoid ICE
return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) };
}
_ => None,
// A ref x pattern is the same node used for x, and as such it has
// x's type, which is &T, where we want T (the type being matched).
if let ty::BindByReference(_) = bm {
- if let ty::TyRef(_, rty, _) = ty.sty {
+ if let ty::Ref(_, rty, _) = ty.sty {
ty = rty;
} else {
bug!("`ref {}` has wrong type {}", ident, ty);
PatKind::TupleStruct(ref qpath, ref subpatterns, ddpos) => {
let def = self.tables.qpath_def(qpath, pat.hir_id);
let adt_def = match ty.sty {
- ty::TyAdt(adt_def, _) => adt_def,
- ty::TyError => { // Avoid ICE (#50585)
+ ty::Adt(adt_def, _) => adt_def,
+ ty::Error => { // Avoid ICE (#50585)
return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) };
}
_ => span_bug!(pat.span,
self.flatten_nested_slice_patterns(prefix, slice, suffix);
match ty.sty {
- ty::TySlice(..) => {
+ ty::Slice(..) => {
// matching a slice or fixed-length array
PatternKind::Slice { prefix: prefix, slice: slice, suffix: suffix }
}
- ty::TyArray(_, len) => {
+ ty::Array(_, len) => {
// fixed-length array
let len = len.unwrap_usize(self.tcx);
assert!(len >= prefix.len() as u64 + suffix.len() as u64);
let adt_def = self.tcx.adt_def(enum_id);
if adt_def.is_enum() {
let substs = match ty.sty {
- ty::TyAdt(_, substs) |
- ty::TyFnDef(_, substs) => substs,
- ty::TyError => { // Avoid ICE (#50585)
+ ty::Adt(_, substs) |
+ ty::FnDef(_, substs) => substs,
+ ty::Error => { // Avoid ICE (#50585)
return PatternKind::Wild;
}
_ => bug!("inappropriate type for def: {:?}", ty.sty),
debug!("const_to_pat: cv={:#?}", cv);
let adt_subpattern = |i, variant_opt| {
let field = Field::new(i);
- let val = const_val_field(
+ let val = const_field(
self.tcx, self.param_env, instance,
variant_opt, field, cv,
).expect("field access failed");
}).collect::<Vec<_>>()
};
let kind = match cv.ty.sty {
- ty::TyFloat(_) => {
+ ty::Float(_) => {
let id = self.tcx.hir.hir_to_node_id(id);
self.tcx.lint_node(
::rustc::lint::builtin::ILLEGAL_FLOATING_POINT_LITERAL_PATTERN,
value: cv,
}
},
- ty::TyAdt(adt_def, _) if adt_def.is_union() => {
+ ty::Adt(adt_def, _) if adt_def.is_union() => {
// Matching on union fields is unsafe, we can't hide it in constants
self.tcx.sess.span_err(span, "cannot use unions in constant patterns");
PatternKind::Wild
}
- ty::TyAdt(adt_def, _) if !self.tcx.has_attr(adt_def.did, "structural_match") => {
+ ty::Adt(adt_def, _) if !self.tcx.has_attr(adt_def.did, "structural_match") => {
let msg = format!("to use a constant of type `{}` in a pattern, \
`{}` must be annotated with `#[derive(PartialEq, Eq)]`",
self.tcx.item_path_str(adt_def.did),
self.tcx.sess.span_err(span, &msg);
PatternKind::Wild
},
- ty::TyAdt(adt_def, substs) if adt_def.is_enum() => {
+ ty::Adt(adt_def, substs) if adt_def.is_enum() => {
let variant_index = const_variant_index(
self.tcx, self.param_env, instance, cv
).expect("const_variant_index failed");
subpatterns,
}
},
- ty::TyAdt(adt_def, _) => {
+ ty::Adt(adt_def, _) => {
let struct_var = adt_def.non_enum_variant();
PatternKind::Leaf {
subpatterns: adt_subpatterns(struct_var.fields.len(), None),
}
}
- ty::TyTuple(fields) => {
+ ty::Tuple(fields) => {
PatternKind::Leaf {
subpatterns: adt_subpatterns(fields.len(), None),
}
}
- ty::TyArray(_, n) => {
+ ty::Array(_, n) => {
PatternKind::Array {
prefix: (0..n.unwrap_usize(self.tcx))
.map(|i| adt_subpattern(i as usize, None))
if let (Some(a), Some(b)) = (a.to_bits(tcx, ty), b.to_bits(tcx, ty)) {
use ::rustc_apfloat::Float;
return match ty.value.sty {
- ty::TyFloat(ast::FloatTy::F32) => {
+ ty::Float(ast::FloatTy::F32) => {
let l = ::rustc_apfloat::ieee::Single::from_bits(a);
let r = ::rustc_apfloat::ieee::Single::from_bits(b);
l.partial_cmp(&r)
},
- ty::TyFloat(ast::FloatTy::F64) => {
+ ty::Float(ast::FloatTy::F64) => {
let l = ::rustc_apfloat::ieee::Double::from_bits(a);
let r = ::rustc_apfloat::ieee::Double::from_bits(b);
l.partial_cmp(&r)
},
- ty::TyInt(_) => {
+ ty::Int(_) => {
let layout = tcx.layout_of(ty).ok()?;
- let a = interpret::sign_extend(a, layout);
- let b = interpret::sign_extend(b, layout);
+ assert!(layout.abi.is_signed());
+ let a = sign_extend(a, layout.size);
+ let b = sign_extend(b, layout.size);
Some((a as i128).cmp(&(b as i128)))
},
_ => Some(a.cmp(&b)),
}
}
- if let ty::TyRef(_, rty, _) = ty.value.sty {
- if let ty::TyStr = rty.sty {
+ if let ty::Ref(_, rty, _) = ty.value.sty {
+ if let ty::Str = rty.sty {
match (a.val, b.val) {
(
ConstValue::ScalarPair(
len_b,
),
) if ptr_a.offset.bytes() == 0 && ptr_b.offset.bytes() == 0 => {
- let len_a = len_a.unwrap_or_err().ok();
- let len_b = len_b.unwrap_or_err().ok();
+ let len_a = len_a.not_undef().ok();
+ let len_b = len_b.not_undef().ok();
if len_a.is_none() || len_b.is_none() {
tcx.sess.struct_err("str slice len is undef").delay_as_bug();
}
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = tcx.allocate_bytes(s.as_bytes());
- let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, tcx);
- ConstValue::from_byval_value(value).unwrap()
+ ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, tcx)
},
LitKind::ByteStr(ref data) => {
let id = tcx.allocate_bytes(data);
Unsigned(UintTy),
}
let ity = match ty.sty {
- ty::TyInt(IntTy::Isize) => Int::Signed(tcx.sess.target.isize_ty),
- ty::TyInt(other) => Int::Signed(other),
- ty::TyUint(UintTy::Usize) => Int::Unsigned(tcx.sess.target.usize_ty),
- ty::TyUint(other) => Int::Unsigned(other),
- ty::TyError => { // Avoid ICE (#51963)
+ ty::Int(IntTy::Isize) => Int::Signed(tcx.sess.target.isize_ty),
+ ty::Int(other) => Int::Signed(other),
+ ty::Uint(UintTy::Usize) => Int::Unsigned(tcx.sess.target.usize_ty),
+ ty::Uint(other) => Int::Unsigned(other),
+ ty::Error => { // Avoid ICE (#51963)
return Err(LitToConstError::Propagated);
}
_ => bug!("literal integer type with bad type ({:?})", ty.sty),
}
LitKind::FloatUnsuffixed(n) => {
let fty = match ty.sty {
- ty::TyFloat(fty) => fty,
+ ty::Float(fty) => fty,
_ => bug!()
};
parse_float(n, fty, neg).map_err(|_| LitToConstError::UnparseableFloat)?
-use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, LayoutOf, TyLayout};
+use rustc::ty::{self, Ty, TypeAndMut};
+use rustc::ty::layout::{self, TyLayout, Size};
use syntax::ast::{FloatTy, IntTy, UintTy};
use rustc_apfloat::ieee::{Single, Double};
-use super::{EvalContext, Machine};
-use rustc::mir::interpret::{Scalar, EvalResult, Pointer, PointerArithmetic, Value, EvalErrorKind};
+use rustc::mir::interpret::{
+ Scalar, EvalResult, Pointer, PointerArithmetic, EvalErrorKind,
+ truncate, sign_extend
+};
use rustc::mir::CastKind;
use rustc_apfloat::Float;
-use interpret::eval_context::ValTy;
-use interpret::Place;
+
+use super::{EvalContext, Machine, PlaceTy, OpTy, Value};
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
+ fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
+ match ty.sty {
+ ty::RawPtr(ty::TypeAndMut { ty, .. }) |
+ ty::Ref(_, ty, _) => !self.type_is_sized(ty),
+ ty::Adt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()),
+ _ => false,
+ }
+ }
+
crate fn cast(
&mut self,
- src: ValTy<'tcx>,
+ src: OpTy<'tcx>,
kind: CastKind,
- dest_ty: Ty<'tcx>,
- dest: Place,
+ dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx> {
- let src_layout = self.layout_of(src.ty)?;
- let dst_layout = self.layout_of(dest_ty)?;
+ let src_layout = src.layout;
+ let dst_layout = dest.layout;
use rustc::mir::CastKind::*;
match kind {
Unsize => {
- self.unsize_into(src.value, src_layout, dest, dst_layout)?;
+ self.unsize_into(src, dest)?;
}
Misc => {
- if self.type_is_fat_ptr(src.ty) {
- match (src.value, self.type_is_fat_ptr(dest_ty)) {
- (Value::ByRef { .. }, _) |
+ let src = self.read_value(src)?;
+ if self.type_is_fat_ptr(src_layout.ty) {
+ match (src.value, self.type_is_fat_ptr(dest.layout.ty)) {
// pointers to extern types
(Value::Scalar(_),_) |
// slices and trait objects to other slices/trait objects
(Value::ScalarPair(..), true) => {
- let valty = ValTy {
- value: src.value,
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
+ // No change to value
+ self.write_value(src.value, dest)?;
}
// slices and trait objects to thin pointers (dropping the metadata)
(Value::ScalarPair(data, _), false) => {
- let valty = ValTy {
- value: Value::Scalar(data),
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
+ self.write_scalar(data, dest)?;
}
}
} else {
- let src_layout = self.layout_of(src.ty)?;
match src_layout.variants {
layout::Variants::Single { index } => {
- if let Some(def) = src.ty.ty_adt_def() {
+ if let Some(def) = src_layout.ty.ty_adt_def() {
let discr_val = def
.discriminant_for_variant(*self.tcx, index)
.val;
return self.write_scalar(
- dest,
Scalar::Bits {
bits: discr_val,
size: dst_layout.size.bytes() as u8,
},
- dest_ty);
+ dest);
}
}
layout::Variants::Tagged { .. } |
layout::Variants::NicheFilling { .. } => {},
}
- let src_val = self.value_to_scalar(src)?;
- let dest_val = self.cast_scalar(src_val, src_layout, dst_layout)?;
- let valty = ValTy {
- value: Value::Scalar(dest_val.into()),
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
+ let src = src.to_scalar()?;
+ let dest_val = self.cast_scalar(src, src_layout, dest.layout)?;
+ self.write_scalar(dest_val, dest)?;
}
}
ReifyFnPointer => {
- match src.ty.sty {
- ty::TyFnDef(def_id, substs) => {
+ // The src operand does not matter, just its type
+ match src_layout.ty.sty {
+ ty::FnDef(def_id, substs) => {
if self.tcx.has_attr(def_id, "rustc_args_required_const") {
bug!("reifying a fn ptr that requires \
const arguments");
substs,
).ok_or_else(|| EvalErrorKind::TooGeneric.into());
let fn_ptr = self.memory.create_fn_alloc(instance?);
- let valty = ValTy {
- value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()),
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
+ self.write_scalar(Scalar::Ptr(fn_ptr.into()), dest)?;
}
ref other => bug!("reify fn pointer on {:?}", other),
}
}
UnsafeFnPointer => {
- match dest_ty.sty {
- ty::TyFnPtr(_) => {
- let mut src = src;
- src.ty = dest_ty;
- self.write_value(src, dest)?;
+ let src = self.read_value(src)?;
+ match dest.layout.ty.sty {
+ ty::FnPtr(_) => {
+ // No change to value
+ self.write_value(*src, dest)?;
}
ref other => bug!("fn to unsafe fn cast on {:?}", other),
}
}
ClosureFnPointer => {
- match src.ty.sty {
- ty::TyClosure(def_id, substs) => {
+ // The src operand does not matter, just its type
+ match src_layout.ty.sty {
+ ty::Closure(def_id, substs) => {
let substs = self.tcx.subst_and_normalize_erasing_regions(
self.substs(),
ty::ParamEnv::reveal_all(),
ty::ClosureKind::FnOnce,
);
let fn_ptr = self.memory.create_fn_alloc(instance);
- let valty = ValTy {
- value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()),
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
+ let val = Value::Scalar(Scalar::Ptr(fn_ptr.into()).into());
+ self.write_value(val, dest)?;
}
ref other => bug!("closure fn pointer on {:?}", other),
}
src_layout: TyLayout<'tcx>,
dest_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Scalar> {
- use rustc::ty::TypeVariants::*;
+ use rustc::ty::TyKind::*;
trace!("Casting {:?}: {:?} to {:?}", val, src_layout.ty, dest_layout.ty);
match val {
Scalar::Ptr(ptr) => self.cast_from_ptr(ptr, dest_layout.ty),
Scalar::Bits { bits, size } => {
- assert_eq!(size as u64, src_layout.size.bytes());
- match src_layout.ty.sty {
- TyFloat(fty) => self.cast_from_float(bits, fty, dest_layout.ty),
- _ => self.cast_from_int(bits, src_layout, dest_layout),
+ debug_assert_eq!(size as u64, src_layout.size.bytes());
+ debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
+ "Unexpected value of size {} before casting", size);
+
+ let res = match src_layout.ty.sty {
+ Float(fty) => self.cast_from_float(bits, fty, dest_layout.ty)?,
+ _ => self.cast_from_int(bits, src_layout, dest_layout)?,
+ };
+
+ // Sanity check
+ match res {
+ Scalar::Ptr(_) => bug!("Fabricated a ptr value from an int...?"),
+ Scalar::Bits { bits, size } => {
+ debug_assert_eq!(size as u64, dest_layout.size.bytes());
+ debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
+ "Unexpected value of size {} after casting", size);
+ }
}
+ // Done
+ Ok(res)
}
}
}
v
};
trace!("cast_from_int: {}, {}, {}", v, src_layout.ty, dest_layout.ty);
- use rustc::ty::TypeVariants::*;
+ use rustc::ty::TyKind::*;
match dest_layout.ty.sty {
- TyInt(_) | TyUint(_) => {
+ Int(_) | Uint(_) => {
let v = self.truncate(v, dest_layout);
Ok(Scalar::Bits {
bits: v,
})
}
- TyFloat(FloatTy::F32) if signed => Ok(Scalar::Bits {
+ Float(FloatTy::F32) if signed => Ok(Scalar::Bits {
bits: Single::from_i128(v as i128).value.to_bits(),
size: 4,
}),
- TyFloat(FloatTy::F64) if signed => Ok(Scalar::Bits {
+ Float(FloatTy::F64) if signed => Ok(Scalar::Bits {
bits: Double::from_i128(v as i128).value.to_bits(),
size: 8,
}),
- TyFloat(FloatTy::F32) => Ok(Scalar::Bits {
+ Float(FloatTy::F32) => Ok(Scalar::Bits {
bits: Single::from_u128(v).value.to_bits(),
size: 4,
}),
- TyFloat(FloatTy::F64) => Ok(Scalar::Bits {
+ Float(FloatTy::F64) => Ok(Scalar::Bits {
bits: Double::from_u128(v).value.to_bits(),
size: 8,
}),
- TyChar => {
+ Char => {
assert_eq!(v as u8 as u128, v);
Ok(Scalar::Bits { bits: v, size: 4 })
},
// No alignment check needed for raw pointers. But we have to truncate to target ptr size.
- TyRawPtr(_) => {
+ RawPtr(_) => {
Ok(Scalar::Bits {
bits: self.memory.truncate_to_ptr(v).0 as u128,
size: self.memory.pointer_size().bytes() as u8,
}
fn cast_from_float(&self, bits: u128, fty: FloatTy, dest_ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> {
- use rustc::ty::TypeVariants::*;
+ use rustc::ty::TyKind::*;
use rustc_apfloat::FloatConvert;
match dest_ty.sty {
// float -> uint
- TyUint(t) => {
+ Uint(t) => {
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize);
- match fty {
- FloatTy::F32 => Ok(Scalar::Bits {
- bits: Single::from_bits(bits).to_u128(width).value,
- size: (width / 8) as u8,
- }),
- FloatTy::F64 => Ok(Scalar::Bits {
- bits: Double::from_bits(bits).to_u128(width).value,
- size: (width / 8) as u8,
- }),
- }
+ let v = match fty {
+ FloatTy::F32 => Single::from_bits(bits).to_u128(width).value,
+ FloatTy::F64 => Double::from_bits(bits).to_u128(width).value,
+ };
+ // This should already fit the bit width
+ Ok(Scalar::Bits {
+ bits: v,
+ size: (width / 8) as u8,
+ })
},
// float -> int
- TyInt(t) => {
+ Int(t) => {
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize);
- match fty {
- FloatTy::F32 => Ok(Scalar::Bits {
- bits: Single::from_bits(bits).to_i128(width).value as u128,
- size: (width / 8) as u8,
- }),
- FloatTy::F64 => Ok(Scalar::Bits {
- bits: Double::from_bits(bits).to_i128(width).value as u128,
- size: (width / 8) as u8,
- }),
- }
+ let v = match fty {
+ FloatTy::F32 => Single::from_bits(bits).to_i128(width).value,
+ FloatTy::F64 => Double::from_bits(bits).to_i128(width).value,
+ };
+ // We got an i128, but we may need something smaller. We have to truncate ourselves.
+ let truncated = truncate(v as u128, Size::from_bits(width as u64));
+ assert_eq!(sign_extend(truncated, Size::from_bits(width as u64)) as i128, v,
+ "truncating and extending changed the value?!?");
+ Ok(Scalar::Bits {
+ bits: truncated,
+ size: (width / 8) as u8,
+ })
},
// f64 -> f32
- TyFloat(FloatTy::F32) if fty == FloatTy::F64 => {
+ Float(FloatTy::F32) if fty == FloatTy::F64 => {
Ok(Scalar::Bits {
bits: Single::to_bits(Double::from_bits(bits).convert(&mut false).value),
size: 4,
})
},
// f32 -> f64
- TyFloat(FloatTy::F64) if fty == FloatTy::F32 => {
+ Float(FloatTy::F64) if fty == FloatTy::F32 => {
Ok(Scalar::Bits {
bits: Double::to_bits(Single::from_bits(bits).convert(&mut false).value),
size: 8,
})
},
// identity cast
- TyFloat(FloatTy:: F64) => Ok(Scalar::Bits {
+ Float(FloatTy:: F64) => Ok(Scalar::Bits {
bits,
size: 8,
}),
- TyFloat(FloatTy:: F32) => Ok(Scalar::Bits {
+ Float(FloatTy:: F32) => Ok(Scalar::Bits {
bits,
size: 4,
}),
}
fn cast_from_ptr(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> {
- use rustc::ty::TypeVariants::*;
+ use rustc::ty::TyKind::*;
match ty.sty {
// Casting to a reference or fn pointer is not permitted by rustc, no need to support it here.
- TyRawPtr(_) |
- TyInt(IntTy::Isize) |
- TyUint(UintTy::Usize) => Ok(ptr.into()),
- TyInt(_) | TyUint(_) => err!(ReadPointerAsBytes),
+ RawPtr(_) |
+ Int(IntTy::Isize) |
+ Uint(UintTy::Usize) => Ok(ptr.into()),
+ Int(_) | Uint(_) => err!(ReadPointerAsBytes),
_ => err!(Unimplemented(format!("ptr to {:?} cast", ty))),
}
}
+
+ fn unsize_into_ptr(
+ &mut self,
+ src: OpTy<'tcx>,
+ dest: PlaceTy<'tcx>,
+ // The pointee types
+ sty: Ty<'tcx>,
+ dty: Ty<'tcx>,
+ ) -> EvalResult<'tcx> {
+ // A<Struct> -> A<Trait> conversion
+ let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
+
+ match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
+ (&ty::Array(_, length), &ty::Slice(_)) => {
+ let ptr = self.read_value(src)?.to_scalar_ptr()?;
+ // u64 cast is from usize to u64, which is always good
+ let val = Value::new_slice(ptr, length.unwrap_usize(self.tcx.tcx), self.tcx.tcx);
+ self.write_value(val, dest)
+ }
+ (&ty::Dynamic(..), &ty::Dynamic(..)) => {
+ // For now, upcasts are limited to changes in marker
+ // traits, and hence never actually require an actual
+ // change to the vtable.
+ self.copy_op(src, dest)
+ }
+ (_, &ty::Dynamic(ref data, _)) => {
+ // Initial cast from sized to dyn trait
+ let trait_ref = data.principal().unwrap().with_self_ty(
+ *self.tcx,
+ src_pointee_ty,
+ );
+ let trait_ref = self.tcx.erase_regions(&trait_ref);
+ let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
+ let ptr = self.read_value(src)?.to_scalar_ptr()?;
+ let val = Value::new_dyn_trait(ptr, vtable);
+ self.write_value(val, dest)
+ }
+
+ _ => bug!("invalid unsizing {:?} -> {:?}", src.layout.ty, dest.layout.ty),
+ }
+ }
+
+ fn unsize_into(
+ &mut self,
+ src: OpTy<'tcx>,
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ match (&src.layout.ty.sty, &dest.layout.ty.sty) {
+ (&ty::Ref(_, s, _), &ty::Ref(_, d, _)) |
+ (&ty::Ref(_, s, _), &ty::RawPtr(TypeAndMut { ty: d, .. })) |
+ (&ty::RawPtr(TypeAndMut { ty: s, .. }),
+ &ty::RawPtr(TypeAndMut { ty: d, .. })) => {
+ self.unsize_into_ptr(src, dest, s, d)
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+ if def_a.is_box() || def_b.is_box() {
+ if !def_a.is_box() || !def_b.is_box() {
+ bug!("invalid unsizing between {:?} -> {:?}", src.layout, dest.layout);
+ }
+ return self.unsize_into_ptr(
+ src,
+ dest,
+ src.layout.ty.boxed_ty(),
+ dest.layout.ty.boxed_ty(),
+ );
+ }
+
+ // unsizing of generic struct with pointer fields
+ // Example: `Arc<T>` -> `Arc<Trait>`
+ // here we need to increase the size of every &T thin ptr field to a fat ptr
+ for i in 0..src.layout.fields.count() {
+ let dst_field = self.place_field(dest, i as u64)?;
+ if dst_field.layout.is_zst() {
+ continue;
+ }
+ let src_field = match src.try_as_mplace() {
+ Ok(mplace) => {
+ let src_field = self.mplace_field(mplace, i as u64)?;
+ src_field.into()
+ }
+ Err(..) => {
+ let src_field_layout = src.layout.field(&self, i)?;
+ // this must be a field covering the entire thing
+ assert_eq!(src.layout.fields.offset(i).bytes(), 0);
+ assert_eq!(src_field_layout.size, src.layout.size);
+ // just sawp out the layout
+ OpTy { op: src.op, layout: src_field_layout }
+ }
+ };
+ if src_field.layout.ty == dst_field.layout.ty {
+ self.copy_op(src_field, dst_field)?;
+ } else {
+ self.unsize_into(src_field, dst_field)?;
+ }
+ }
+ Ok(())
+ }
+ _ => {
+ bug!(
+ "unsize_into: invalid conversion: {:?} -> {:?}",
+ src.layout,
+ dest.layout
+ )
+ }
+ }
+ }
}
use std::error::Error;
use rustc::hir;
-use rustc::mir::interpret::{ConstEvalErr, ScalarMaybeUndef};
+use rustc::mir::interpret::ConstEvalErr;
use rustc::mir;
-use rustc::ty::{self, TyCtxt, Ty, Instance};
-use rustc::ty::layout::{self, LayoutOf, Primitive, TyLayout};
+use rustc::ty::{self, TyCtxt, Instance};
+use rustc::ty::layout::{LayoutOf, Primitive, TyLayout, Size};
use rustc::ty::subst::Subst;
-use rustc_data_structures::indexed_vec::IndexVec;
+use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use syntax::ast::Mutability;
use syntax::source_map::Span;
use syntax::source_map::DUMMY_SP;
+use syntax::symbol::Symbol;
use rustc::mir::interpret::{
EvalResult, EvalError, EvalErrorKind, GlobalId,
- Value, Scalar, AllocId, Allocation, ConstValue,
+ Scalar, AllocId, Allocation, ConstValue,
+};
+use super::{
+ Place, PlaceExtra, PlaceTy, MemPlace, OpTy, Operand, Value,
+ EvalContext, StackPopCleanup, Memory, MemoryKind, MPlaceTy,
};
-use super::{Place, EvalContext, StackPopCleanup, ValTy, Memory, MemoryKind};
pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
instance,
span,
mir,
- return_place: Place::undef(),
+ return_place: Place::null(tcx),
return_to_block: StackPopCleanup::None,
stmt: 0,
});
instance,
mir.span,
mir,
- Place::undef(),
+ Place::null(tcx),
StackPopCleanup::None,
)?;
Ok(ecx)
cid: GlobalId<'tcx>,
mir: &'mir mir::Mir<'tcx>,
param_env: ty::ParamEnv<'tcx>,
-) -> EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)> {
+) -> EvalResult<'tcx, OpTy<'tcx>> {
ecx.with_fresh_body(|ecx| {
eval_body_using_ecx(ecx, cid, Some(mir), param_env)
})
}
-pub fn value_to_const_value<'tcx>(
+pub fn op_to_const<'tcx>(
ecx: &EvalContext<'_, '_, 'tcx, CompileTimeEvaluator>,
- val: Value,
- layout: TyLayout<'tcx>,
+ op: OpTy<'tcx>,
+ normalize: bool,
) -> EvalResult<'tcx, &'tcx ty::Const<'tcx>> {
- match (val, &layout.abi) {
- (Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size: 0, ..})), _) if layout.is_zst() => {},
- (Value::ByRef(..), _) |
- (Value::Scalar(_), &layout::Abi::Scalar(_)) |
- (Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) => {},
- _ => bug!("bad value/layout combo: {:#?}, {:#?}", val, layout),
- }
- let val = match val {
- Value::Scalar(val) => ConstValue::Scalar(val.unwrap_or_err()?),
- Value::ScalarPair(a, b) => ConstValue::ScalarPair(a.unwrap_or_err()?, b),
- Value::ByRef(ptr, align) => {
- let ptr = ptr.to_ptr().unwrap();
+ let normalized_op = if normalize {
+ ecx.try_read_value(op)?
+ } else {
+ match op.op {
+ Operand::Indirect(mplace) => Err(mplace),
+ Operand::Immediate(val) => Ok(val)
+ }
+ };
+ let val = match normalized_op {
+ Err(MemPlace { ptr, align, extra }) => {
+ // extract alloc-offset pair
+ assert_eq!(extra, PlaceExtra::None);
+ let ptr = ptr.to_ptr()?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align.abi() >= align.abi());
- assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= layout.size.bytes());
+ assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes());
let mut alloc = alloc.clone();
alloc.align = align;
let alloc = ecx.tcx.intern_const_alloc(alloc);
ConstValue::ByRef(alloc, ptr.offset)
- }
+ },
+ Ok(Value::Scalar(x)) =>
+ ConstValue::Scalar(x.not_undef()?),
+ Ok(Value::ScalarPair(a, b)) =>
+ ConstValue::ScalarPair(a.not_undef()?, b),
};
- Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, layout.ty))
+ Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, op.layout.ty))
+}
+pub fn const_to_op<'tcx>(
+ ecx: &mut EvalContext<'_, '_, 'tcx, CompileTimeEvaluator>,
+ cnst: &'tcx ty::Const<'tcx>,
+) -> EvalResult<'tcx, OpTy<'tcx>> {
+ let op = ecx.const_value_to_op(cnst.val)?;
+ Ok(OpTy { op, layout: ecx.layout_of(cnst.ty)? })
}
fn eval_body_and_ecx<'a, 'mir, 'tcx>(
cid: GlobalId<'tcx>,
mir: Option<&'mir mir::Mir<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
-) -> (EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) {
+) -> (EvalResult<'tcx, OpTy<'tcx>>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) {
debug!("eval_body_and_ecx: {:?}, {:?}", cid, param_env);
// we start out with the best span we have
// and try improving it down the road when more information is available
(r, ecx)
}
+// Returns a pointer to where the result lives
fn eval_body_using_ecx<'a, 'mir, 'tcx>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>,
cid: GlobalId<'tcx>,
mir: Option<&'mir mir::Mir<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
-) -> EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)> {
+) -> EvalResult<'tcx, OpTy<'tcx>> {
debug!("eval_body: {:?}, {:?}", cid, param_env);
let tcx = ecx.tcx.tcx;
let mut mir = match mir {
}
let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?;
assert!(!layout.is_unsized());
- let ptr = ecx.memory.allocate(
- layout.size,
- layout.align,
- MemoryKind::Stack,
- )?;
+ let ret = ecx.allocate(layout, MemoryKind::Stack)?;
let internally_mutable = !layout.ty.is_freeze(tcx, param_env, mir.span);
let is_static = tcx.is_static(cid.instance.def_id());
let mutability = if is_static == Some(hir::Mutability::MutMutable) || internally_mutable {
cid.instance,
mir.span,
mir,
- Place::from_ptr(ptr, layout.align),
+ Place::Ptr(*ret),
cleanup,
)?;
+ // The main interpreter loop.
while ecx.step()? {}
- let ptr = ptr.into();
- // always try to read the value and report errors
- let value = match ecx.try_read_value(ptr, layout.align, layout.ty)? {
- Some(val) if is_static.is_none() && cid.promoted.is_none() => val,
- // point at the allocation
- _ => Value::ByRef(ptr, layout.align),
- };
- Ok((value, ptr, layout))
+
+ Ok(ret.into())
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
fn eval_fn_call<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
- destination: Option<(Place, mir::BasicBlock)>,
- args: &[ValTy<'tcx>],
+ destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+ args: &[OpTy<'tcx>],
span: Span,
- sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool> {
debug!("eval_fn_call: {:?}", instance);
if !ecx.tcx.is_const_fn(instance.def_id()) {
let def_id = instance.def_id();
- let (op, oflo) = if let Some(op) = ecx.tcx.is_binop_lang_item(def_id) {
- op
+ // Some fn calls are actually BinOp intrinsics
+ let _: ! = if let Some((op, oflo)) = ecx.tcx.is_binop_lang_item(def_id) {
+ let (dest, bb) = destination.expect("128 lowerings can't diverge");
+ let l = ecx.read_value(args[0])?;
+ let r = ecx.read_value(args[1])?;
+ if oflo {
+ ecx.binop_with_overflow(op, l, r, dest)?;
+ } else {
+ ecx.binop_ignore_overflow(op, l, r, dest)?;
+ }
+ ecx.goto_block(bb);
+ return Ok(true);
+ } else if Some(def_id) == ecx.tcx.lang_items().panic_fn() {
+ assert!(args.len() == 1);
+ // &(&'static str, &'static str, u32, u32)
+ let ptr = ecx.read_value(args[0])?;
+ let place = ecx.ref_to_mplace(ptr)?;
+ let (msg, file, line, col) = (
+ place_field(ecx, 0, place)?,
+ place_field(ecx, 1, place)?,
+ place_field(ecx, 2, place)?,
+ place_field(ecx, 3, place)?,
+ );
+
+ let msg = to_str(ecx, msg)?;
+ let file = to_str(ecx, file)?;
+ let line = to_u32(line)?;
+ let col = to_u32(col)?;
+ return Err(EvalErrorKind::Panic { msg, file, line, col }.into());
+ } else if Some(def_id) == ecx.tcx.lang_items().begin_panic_fn() {
+ assert!(args.len() == 2);
+ // &'static str, &(&'static str, u32, u32)
+ let msg = ecx.read_value(args[0])?;
+ let ptr = ecx.read_value(args[1])?;
+ let place = ecx.ref_to_mplace(ptr)?;
+ let (file, line, col) = (
+ place_field(ecx, 0, place)?,
+ place_field(ecx, 1, place)?,
+ place_field(ecx, 2, place)?,
+ );
+
+ let msg = to_str(ecx, msg.value)?;
+ let file = to_str(ecx, file)?;
+ let line = to_u32(line)?;
+ let col = to_u32(col)?;
+ return Err(EvalErrorKind::Panic { msg, file, line, col }.into());
} else {
return Err(
ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(),
);
};
- let (dest, bb) = destination.expect("128 lowerings can't diverge");
- let dest_ty = sig.output();
- if oflo {
- ecx.intrinsic_with_overflow(op, args[0], args[1], dest, dest_ty)?;
- } else {
- ecx.intrinsic_overflowing(op, args[0], args[1], dest, dest_ty)?;
- }
- ecx.goto_block(bb);
- return Ok(true);
}
let mir = match ecx.load_mir(instance.def) {
Ok(mir) => mir,
}
};
let (return_place, return_to_block) = match destination {
- Some((place, block)) => (place, StackPopCleanup::Goto(block)),
- None => (Place::undef(), StackPopCleanup::None),
+ Some((place, block)) => (*place, StackPopCleanup::Goto(block)),
+ None => (Place::null(&ecx), StackPopCleanup::None),
};
ecx.push_stack_frame(
fn call_intrinsic<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
- args: &[ValTy<'tcx>],
- dest: Place,
- dest_layout: layout::TyLayout<'tcx>,
+ args: &[OpTy<'tcx>],
+ dest: PlaceTy<'tcx>,
target: mir::BasicBlock,
) -> EvalResult<'tcx> {
let substs = instance.substs;
let elem_align = ecx.layout_of(elem_ty)?.align.abi();
let align_val = Scalar::Bits {
bits: elem_align as u128,
- size: dest_layout.size.bytes() as u8,
+ size: dest.layout.size.bytes() as u8,
};
- ecx.write_scalar(dest, align_val, dest_layout.ty)?;
+ ecx.write_scalar(align_val, dest)?;
}
"size_of" => {
let size = ecx.layout_of(ty)?.size.bytes() as u128;
let size_val = Scalar::Bits {
bits: size,
- size: dest_layout.size.bytes() as u8,
+ size: dest.layout.size.bytes() as u8,
};
- ecx.write_scalar(dest, size_val, dest_layout.ty)?;
+ ecx.write_scalar(size_val, dest)?;
}
"type_id" => {
let type_id = ecx.tcx.type_id_hash(ty) as u128;
let id_val = Scalar::Bits {
bits: type_id,
- size: dest_layout.size.bytes() as u8,
+ size: dest.layout.size.bytes() as u8,
};
- ecx.write_scalar(dest, id_val, dest_layout.ty)?;
+ ecx.write_scalar(id_val, dest)?;
}
"ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
let ty = substs.type_at(0);
let layout_of = ecx.layout_of(ty)?;
- let bits = ecx.value_to_scalar(args[0])?.to_bits(layout_of.size)?;
+ let bits = ecx.read_scalar(args[0])?.to_bits(layout_of.size)?;
let kind = match layout_of.abi {
ty::layout::Abi::Scalar(ref scalar) => scalar.value,
_ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?,
} else {
numeric_intrinsic(intrinsic_name, bits, kind)?
};
- ecx.write_scalar(dest, out_val, ty)?;
+ ecx.write_scalar(out_val, dest)?;
}
name => return Err(
_ecx: &EvalContext<'a, 'mir, 'tcx, Self>,
_bin_op: mir::BinOp,
left: Scalar,
- _left_ty: Ty<'tcx>,
+ _left_layout: TyLayout<'tcx>,
right: Scalar,
- _right_ty: Ty<'tcx>,
+ _right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Option<(Scalar, bool)>> {
if left.is_bits() && right.is_bits() {
Ok(None)
fn box_alloc<'a>(
_ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
- _ty: Ty<'tcx>,
- _dest: Place,
+ _dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx> {
Err(
ConstEvalError::NeedsRfc("heap allocations via `box` keyword".to_string()).into(),
}
}
-pub fn const_val_field<'a, 'tcx>(
+fn place_field<'a, 'tcx, 'mir>(
+ ecx: &mut EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>,
+ i: u64,
+ place: MPlaceTy<'tcx>,
+) -> EvalResult<'tcx, Value> {
+ let place = ecx.mplace_field(place, i)?;
+ Ok(ecx.try_read_value_from_mplace(place)?.expect("bad panic arg layout"))
+}
+
+fn to_str<'a, 'tcx, 'mir>(
+ ecx: &mut EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>,
+ val: Value,
+) -> EvalResult<'tcx, Symbol> {
+ if let Value::ScalarPair(ptr, len) = val {
+ let len = len.not_undef()?.to_bits(ecx.memory.pointer_size())?;
+ let bytes = ecx.memory.read_bytes(ptr.not_undef()?, Size::from_bytes(len as u64))?;
+ let str = ::std::str::from_utf8(bytes).map_err(|err| EvalErrorKind::ValidationFailure(err.to_string()))?;
+ Ok(Symbol::intern(str))
+ } else {
+ bug!("panic arg is not a str")
+ }
+}
+
+fn to_u32<'a, 'tcx, 'mir>(
+ val: Value,
+) -> EvalResult<'tcx, u32> {
+ if let Value::Scalar(n) = val {
+ Ok(n.not_undef()?.to_bits(Size::from_bits(32))? as u32)
+ } else {
+ bug!("panic arg is not a str")
+ }
+}
+
+/// Project to a field of a (variant of a) const
+pub fn const_field<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
instance: ty::Instance<'tcx>,
field: mir::Field,
value: &'tcx ty::Const<'tcx>,
) -> ::rustc::mir::interpret::ConstEvalResult<'tcx> {
- trace!("const_val_field: {:?}, {:?}, {:?}", instance, field, value);
+ trace!("const_field: {:?}, {:?}, {:?}", instance, field, value);
let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap();
let result = (|| {
- let ty = value.ty;
- let value = ecx.const_to_value(value.val)?;
- let layout = ecx.layout_of(ty)?;
- let place = ecx.allocate_place_for_value(value, layout, variant)?;
- let (place, layout) = ecx.place_field(place, field, layout)?;
- let (ptr, align) = place.to_ptr_align();
- let mut new_value = Value::ByRef(ptr.unwrap_or_err()?, align);
- new_value = ecx.try_read_by_ref(new_value, layout.ty)?;
- use rustc_data_structures::indexed_vec::Idx;
- match (value, new_value) {
- (Value::Scalar(_), Value::ByRef(..)) |
- (Value::ScalarPair(..), Value::ByRef(..)) |
- (Value::Scalar(_), Value::ScalarPair(..)) => bug!(
- "field {} of {:?} yielded {:?}",
- field.index(),
- value,
- new_value,
- ),
- _ => {},
- }
- value_to_const_value(&ecx, new_value, layout)
+ // get the operand again
+ let op = const_to_op(&mut ecx, value)?;
+ // downcast
+ let down = match variant {
+ None => op,
+ Some(variant) => ecx.operand_downcast(op, variant)?
+ };
+ // then project
+ let field = ecx.operand_field(down, field.index() as u64)?;
+ // and finally move back to the const world, always normalizing because
+ // this is not called for statics.
+ op_to_const(&ecx, field, true)
})();
result.map_err(|err| {
let (trace, span) = ecx.generate_stacktrace(None);
) -> EvalResult<'tcx, usize> {
trace!("const_variant_index: {:?}, {:?}", instance, val);
let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap();
- let value = ecx.const_to_value(val.val)?;
- let layout = ecx.layout_of(val.ty)?;
- let (ptr, align) = match value {
- Value::ScalarPair(..) | Value::Scalar(_) => {
- let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?.into();
- ecx.write_value_to_ptr(value, ptr, layout.align, val.ty)?;
- (ptr, layout.align)
- },
- Value::ByRef(ptr, align) => (ptr, align),
- };
- let place = Place::from_scalar_ptr(ptr.into(), align);
- ecx.read_discriminant_as_variant_index(place, layout)
+ let op = const_to_op(&mut ecx, val)?;
+ ecx.read_discriminant_as_variant_index(op)
}
-pub fn const_value_to_allocation_provider<'a, 'tcx>(
+pub fn const_to_allocation_provider<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
val: &'tcx ty::Const<'tcx>,
) -> &'tcx Allocation {
ty::ParamEnv::reveal_all(),
CompileTimeEvaluator,
());
- let value = ecx.const_to_value(val.val)?;
- let layout = ecx.layout_of(val.ty)?;
- let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?;
- ecx.write_value_to_ptr(value, ptr.into(), layout.align, val.ty)?;
- let alloc = ecx.memory.get(ptr.alloc_id)?;
+ let op = const_to_op(&mut ecx, val)?;
+ // Make a new allocation, copy things there
+ let ptr = ecx.allocate(op.layout, MemoryKind::Stack)?;
+ ecx.copy_op(op, ptr.into())?;
+ let alloc = ecx.memory.get(ptr.to_ptr()?.alloc_id)?;
Ok(tcx.intern_const_alloc(alloc.clone()))
};
result().expect("unable to convert ConstValue to Allocation")
};
let (res, ecx) = eval_body_and_ecx(tcx, cid, None, key.param_env);
- res.and_then(|(mut val, _, layout)| {
- if tcx.is_static(def_id).is_none() && cid.promoted.is_none() {
- val = ecx.try_read_by_ref(val, layout.ty)?;
+ res.and_then(|op| {
+ let normalize = tcx.is_static(def_id).is_none() && cid.promoted.is_none();
+ if !normalize {
+ // Sanity check: These must always be a MemPlace
+ match op.op {
+ Operand::Indirect(_) => { /* all is good */ },
+ Operand::Immediate(_) => bug!("const eval gave us an Immediate"),
+ }
}
- value_to_const_value(&ecx, val, layout)
+ op_to_const(&ecx, op, normalize)
}).map_err(|err| {
let (trace, span) = ecx.generate_stacktrace(None);
let err = ConstEvalErr {
use rustc::hir::def::Def;
use rustc::hir::map::definitions::DefPathData;
use rustc::mir;
-use rustc::ty::layout::{self, Size, Align, HasDataLayout, IntegerExt, LayoutOf, TyLayout, Primitive};
+use rustc::ty::layout::{
+ self, Size, Align, HasDataLayout, LayoutOf, TyLayout
+};
use rustc::ty::subst::{Subst, Substs};
-use rustc::ty::{self, Ty, TyCtxt, TypeAndMut};
+use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::query::TyCtxtAt;
use rustc_data_structures::fx::{FxHashSet, FxHasher};
-use rustc_data_structures::indexed_vec::{IndexVec, Idx};
+use rustc_data_structures::indexed_vec::IndexVec;
use rustc::mir::interpret::{
- GlobalId, Value, Scalar, FrameInfo, AllocType,
- EvalResult, EvalErrorKind, Pointer, ConstValue,
+ GlobalId, Scalar, FrameInfo,
+ EvalResult, EvalErrorKind,
ScalarMaybeUndef,
+ truncate, sign_extend,
};
use syntax::source_map::{self, Span};
use syntax::ast::Mutability;
-use super::{Place, PlaceExtra, Memory,
- HasMemory, MemoryKind,
- Machine};
-
-macro_rules! validation_failure{
- ($what:expr, $where:expr, $details:expr) => {{
- let where_ = if $where.is_empty() {
- String::new()
- } else {
- format!(" at {}", $where)
- };
- err!(ValidationFailure(format!(
- "encountered {}{}, but expected {}",
- $what, where_, $details,
- )))
- }};
- ($what:expr, $where:expr) => {{
- let where_ = if $where.is_empty() {
- String::new()
- } else {
- format!(" at {}", $where)
- };
- err!(ValidationFailure(format!(
- "encountered {}{}",
- $what, where_,
- )))
- }};
-}
+use super::{
+ Value, Operand, MemPlace, MPlaceTy, Place, PlaceExtra,
+ Memory, Machine
+};
pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
/// Stores the `Machine` instance.
pub stmt: usize,
}
-#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-pub enum LocalValue {
- Dead,
- Live(Value),
-}
-
-impl LocalValue {
- pub fn access(self) -> EvalResult<'static, Value> {
- match self {
- LocalValue::Dead => err!(DeadLocal),
- LocalValue::Live(val) => Ok(val),
- }
- }
-}
-
impl<'mir, 'tcx: 'mir> Eq for Frame<'mir, 'tcx> {}
impl<'mir, 'tcx: 'mir> PartialEq for Frame<'mir, 'tcx> {
}
}
+// State of a local variable
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub enum LocalValue {
+ Dead,
+ // Mostly for convenience, we re-use the `Operand` type here.
+ // This is an optimization over just always having a pointer here;
+ // we can thus avoid doing an allocation when the local just stores
+ // immediate values *and* never has its address taken.
+ Live(Operand),
+}
+
+impl<'tcx> LocalValue {
+ pub fn access(&self) -> EvalResult<'tcx, &Operand> {
+ match self {
+ LocalValue::Dead => err!(DeadLocal),
+ LocalValue::Live(ref val) => Ok(val),
+ }
+ }
+
+ pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand> {
+ match self {
+ LocalValue::Dead => err!(DeadLocal),
+ LocalValue::Live(ref mut val) => Ok(val),
+ }
+ }
+}
+
/// The virtual machine state during const-evaluation at a given point in time.
type EvalSnapshot<'a, 'mir, 'tcx, M>
= (M, Vec<Frame<'mir, 'tcx>>, Memory<'a, 'mir, 'tcx, M>);
None,
}
-#[derive(Copy, Clone, Debug)]
-pub struct TyAndPacked<'tcx> {
- pub ty: Ty<'tcx>,
- pub packed: bool,
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct ValTy<'tcx> {
- pub value: Value,
- pub ty: Ty<'tcx>,
-}
-
-impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
- type Target = Value;
- fn deref(&self) -> &Value {
- &self.value
- }
-}
-
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for &'a EvalContext<'a, 'mir, 'tcx, M> {
#[inline]
fn data_layout(&self) -> &layout::TargetDataLayout {
type Ty = Ty<'tcx>;
type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
+ #[inline]
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
self.tcx.layout_of(self.param_env.and(ty))
.map_err(|layout| EvalErrorKind::Layout(layout).into())
r
}
- pub fn alloc_ptr(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Pointer> {
- assert!(!layout.is_unsized(), "cannot alloc memory for unsized type");
-
- self.memory.allocate(layout.size, layout.align, MemoryKind::Stack)
- }
-
pub fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
&self.memory
}
self.stack.len() - 1
}
- pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
- let ptr = self.memory.allocate_bytes(s.as_bytes());
- Ok(Scalar::Ptr(ptr).to_value_with_len(s.len() as u64, self.tcx.tcx))
+ /// Mark a storage as live, killing the previous content and returning it.
+ /// Remember to deallocate that!
+ pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> {
+ trace!("{:?} is now live", local);
+
+ let layout = self.layout_of_local(self.cur_frame(), local)?;
+ let init = LocalValue::Live(self.uninit_operand(layout)?);
+ // StorageLive *always* kills the value that's currently stored
+ Ok(mem::replace(&mut self.frame_mut().locals[local], init))
}
- pub fn const_to_value(
- &mut self,
- val: ConstValue<'tcx>,
- ) -> EvalResult<'tcx, Value> {
- match val {
- ConstValue::Unevaluated(def_id, substs) => {
- let instance = self.resolve(def_id, substs)?;
- self.read_global_as_value(GlobalId {
- instance,
- promoted: None,
- })
- }
- ConstValue::ByRef(alloc, offset) => {
- // FIXME: Allocate new AllocId for all constants inside
- let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?;
- Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align))
- },
- ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a.into(), b.into())),
- ConstValue::Scalar(val) => Ok(Value::Scalar(val.into())),
- }
+ /// Returns the old value of the local.
+ /// Remember to deallocate that!
+ pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue {
+ trace!("{:?} is now dead", local);
+
+ mem::replace(&mut self.frame_mut().locals[local], LocalValue::Dead)
+ }
+
+ pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
+ let ptr = self.memory.allocate_bytes(s.as_bytes());
+ Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx))
}
pub(super) fn resolve(&self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, ty::Instance<'tcx>> {
}
}
- pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
+ pub fn monomorphize<T: TypeFoldable<'tcx> + Subst<'tcx>>(
+ &self,
+ t: T,
+ substs: &'tcx Substs<'tcx>
+ ) -> T {
// miri doesn't care about lifetimes, and will choke on some crazy ones
// let's simply get rid of them
- let substituted = ty.subst(*self.tcx, substs);
+ let substituted = t.subst(*self.tcx, substs);
self.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substituted)
}
- /// Return the size and alignment of the value at the given type.
+ pub fn layout_of_local(
+ &self,
+ frame: usize,
+ local: mir::Local
+ ) -> EvalResult<'tcx, TyLayout<'tcx>> {
+ let local_ty = self.stack[frame].mir.local_decls[local].ty;
+ let local_ty = self.monomorphize(
+ local_ty,
+ self.stack[frame].instance.substs
+ );
+ self.layout_of(local_ty)
+ }
+
+ /// Return the actual dynamic size and alignment of the place at the given type.
/// Note that the value does not matter if the type is sized. For unsized types,
/// the value has to be a fat pointer, and we only care about the "extra" data in it.
- pub fn size_and_align_of_dst(
+ pub fn size_and_align_of_mplace(
&self,
- ty: Ty<'tcx>,
- value: Value,
+ mplace: MPlaceTy<'tcx>,
) -> EvalResult<'tcx, (Size, Align)> {
- let layout = self.layout_of(ty)?;
- if !layout.is_unsized() {
- Ok(layout.size_and_align())
+ if let PlaceExtra::None = mplace.extra {
+ assert!(!mplace.layout.is_unsized());
+ Ok(mplace.layout.size_and_align())
} else {
- match ty.sty {
- ty::TyAdt(..) | ty::TyTuple(..) => {
+ let layout = mplace.layout;
+ assert!(layout.is_unsized());
+ match layout.ty.sty {
+ ty::Adt(..) | ty::Tuple(..) => {
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized,
// and it also rounds up to alignment, which we want to avoid,
// as the unsized field's alignment could be smaller.
- assert!(!ty.is_simd());
- debug!("DST {} layout: {:?}", ty, layout);
+ assert!(!layout.ty.is_simd());
+ debug!("DST layout: {:?}", layout);
let sized_size = layout.fields.offset(layout.fields.count() - 1);
let sized_align = layout.align;
debug!(
"DST {} statically sized prefix size: {:?} align: {:?}",
- ty,
+ layout.ty,
sized_size,
sized_align
);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
- let field_ty = layout.field(self, layout.fields.count() - 1)?.ty;
- let (unsized_size, unsized_align) =
- self.size_and_align_of_dst(field_ty, value)?;
+ let field = self.mplace_field(mplace, layout.fields.count() as u64 - 1)?;
+ let (unsized_size, unsized_align) = self.size_and_align_of_mplace(field)?;
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
Ok((size.abi_align(align), align))
}
- ty::TyDynamic(..) => {
- let (_, vtable) = self.into_ptr_vtable_pair(value)?;
+ ty::Dynamic(..) => {
+ let vtable = match mplace.extra {
+ PlaceExtra::Vtable(vtable) => vtable,
+ _ => bug!("Expected vtable"),
+ };
// the second entry in the vtable is the dynamic size of the object.
self.read_size_and_align_from_vtable(vtable)
}
- ty::TySlice(_) | ty::TyStr => {
+ ty::Slice(_) | ty::Str => {
+ let len = match mplace.extra {
+ PlaceExtra::Length(len) => len,
+ _ => bug!("Expected length"),
+ };
let (elem_size, align) = layout.field(self, 0)?.size_and_align();
- let (_, len) = self.into_slice(value)?;
Ok((elem_size * len, align))
}
- _ => bug!("size_of_val::<{:?}>", ty),
+ _ => bug!("size_of_val::<{:?}> not supported", layout.ty),
}
}
}
// don't allocate at all for trivial constants
if mir.local_decls.len() > 1 {
- let mut locals = IndexVec::from_elem(LocalValue::Dead, &mir.local_decls);
- for (local, decl) in locals.iter_mut().zip(mir.local_decls.iter()) {
- *local = LocalValue::Live(self.init_value(decl.ty)?);
- }
+ // We put some marker value into the locals that we later want to initialize.
+ // This can be anything except for LocalValue::Dead -- because *that* is the
+ // value we use for things that we know are initially dead.
+ let dummy =
+ LocalValue::Live(Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef)));
+ let mut locals = IndexVec::from_elem(dummy, &mir.local_decls);
+ // Now mark those locals as dead that we do not want to initialize
match self.tcx.describe_def(instance.def_id()) {
// statics and constants don't have `Storage*` statements, no need to look for them
Some(Def::Static(..)) | Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => {},
use rustc::mir::StatementKind::{StorageDead, StorageLive};
match stmt.kind {
StorageLive(local) |
- StorageDead(local) => locals[local] = LocalValue::Dead,
+ StorageDead(local) => {
+ locals[local] = LocalValue::Dead;
+ }
_ => {}
}
}
}
},
}
+ // Finally, properly initialize all those that still have the dummy value
+ for (local, decl) in locals.iter_mut().zip(mir.local_decls.iter()) {
+ match *local {
+ LocalValue::Live(_) => {
+ // This needs to be peoperly initialized.
+ let layout = self.layout_of(self.monomorphize(decl.ty, instance.substs))?;
+ *local = LocalValue::Live(self.uninit_operand(layout)?);
+ }
+ LocalValue::Dead => {
+ // Nothing to do
+ }
+ }
+ }
+ // done
self.frame_mut().locals = locals;
}
- self.memory.cur_frame = self.cur_frame();
-
if self.stack.len() > self.stack_limit {
err!(StackFrameLimitReached)
} else {
let frame = self.stack.pop().expect(
"tried to pop a stack frame, but there were none",
);
- if !self.stack.is_empty() {
- // TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame?
- self.memory.cur_frame = self.cur_frame();
- }
match frame.return_to_block {
StackPopCleanup::MarkStatic(mutable) => {
- if let Place::Ptr { ptr, .. } = frame.return_place {
+ if let Place::Ptr(MemPlace { ptr, .. }) = frame.return_place {
// FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
self.memory.mark_static_initialized(
- ptr.unwrap_or_err()?.to_ptr()?.alloc_id,
+ ptr.to_ptr()?.alloc_id,
mutable,
)?
} else {
Ok(())
}
- pub fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> {
+ crate fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> {
// FIXME: should we tell the user that there was a local which was never written to?
- if let LocalValue::Live(Value::ByRef(ptr, _align)) = local {
+ if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
trace!("deallocating local");
let ptr = ptr.to_ptr()?;
self.memory.dump_alloc(ptr.alloc_id);
Ok(())
}
- /// Evaluate an assignment statement.
- ///
- /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
- /// type writes its results directly into the memory specified by the place.
- pub(super) fn eval_rvalue_into_place(
- &mut self,
- rvalue: &mir::Rvalue<'tcx>,
- place: &mir::Place<'tcx>,
- ) -> EvalResult<'tcx> {
- let dest = self.eval_place(place)?;
- let dest_ty = self.place_ty(place);
- let dest_layout = self.layout_of(dest_ty)?;
-
- use rustc::mir::Rvalue::*;
- match *rvalue {
- Use(ref operand) => {
- let value = self.eval_operand(operand)?.value;
- let valty = ValTy {
- value,
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
- }
-
- BinaryOp(bin_op, ref left, ref right) => {
- let left = self.eval_operand(left)?;
- let right = self.eval_operand(right)?;
- self.intrinsic_overflowing(
- bin_op,
- left,
- right,
- dest,
- dest_ty,
- )?;
- }
-
- CheckedBinaryOp(bin_op, ref left, ref right) => {
- let left = self.eval_operand(left)?;
- let right = self.eval_operand(right)?;
- self.intrinsic_with_overflow(
- bin_op,
- left,
- right,
- dest,
- dest_ty,
- )?;
- }
-
- UnaryOp(un_op, ref operand) => {
- let val = self.eval_operand_to_scalar(operand)?;
- let val = self.unary_op(un_op, val, dest_layout)?;
- self.write_scalar(
- dest,
- val,
- dest_ty,
- )?;
- }
-
- Aggregate(ref kind, ref operands) => {
- let (dest, active_field_index) = match **kind {
- mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
- self.write_discriminant_value(dest_ty, dest, variant_index)?;
- if adt_def.is_enum() {
- (self.place_downcast(dest, variant_index)?, active_field_index)
- } else {
- (dest, active_field_index)
- }
- }
- _ => (dest, None)
- };
-
- let layout = self.layout_of(dest_ty)?;
- for (i, operand) in operands.iter().enumerate() {
- let value = self.eval_operand(operand)?;
- // Ignore zero-sized fields.
- if !self.layout_of(value.ty)?.is_zst() {
- let field_index = active_field_index.unwrap_or(i);
- let (field_dest, _) = self.place_field(dest, mir::Field::new(field_index), layout)?;
- self.write_value(value, field_dest)?;
- }
- }
- }
-
- Repeat(ref operand, _) => {
- let (elem_ty, length) = match dest_ty.sty {
- ty::TyArray(elem_ty, n) => (elem_ty, n.unwrap_usize(self.tcx.tcx)),
- _ => {
- bug!(
- "tried to assign array-repeat to non-array type {:?}",
- dest_ty
- )
- }
- };
- let elem_size = self.layout_of(elem_ty)?.size;
- let value = self.eval_operand(operand)?.value;
-
- let (dest, dest_align) = self.force_allocation(dest)?.to_ptr_align();
-
- if length > 0 {
- let dest = dest.unwrap_or_err()?;
- //write the first value
- self.write_value_to_ptr(value, dest, dest_align, elem_ty)?;
-
- if length > 1 {
- let rest = dest.ptr_offset(elem_size * 1 as u64, &self)?;
- self.memory.copy_repeatedly(dest, dest_align, rest, dest_align, elem_size, length - 1, false)?;
- }
- }
- }
-
- Len(ref place) => {
- // FIXME(CTFE): don't allow computing the length of arrays in const eval
- let src = self.eval_place(place)?;
- let ty = self.place_ty(place);
- let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx);
- let size = self.memory.pointer_size().bytes() as u8;
- self.write_scalar(
- dest,
- Scalar::Bits {
- bits: len as u128,
- size,
- },
- dest_ty,
- )?;
- }
-
- Ref(_, _, ref place) => {
- let src = self.eval_place(place)?;
- // We ignore the alignment of the place here -- special handling for packed structs ends
- // at the `&` operator.
- let (ptr, _align, extra) = self.force_allocation(src)?.to_ptr_align_extra();
-
- let val = match extra {
- PlaceExtra::None => Value::Scalar(ptr),
- PlaceExtra::Length(len) => ptr.to_value_with_len(len, self.tcx.tcx),
- PlaceExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable),
- PlaceExtra::DowncastVariant(..) => {
- bug!("attempted to take a reference to an enum downcast place")
- }
- };
- let valty = ValTy {
- value: val,
- ty: dest_ty,
- };
- self.write_value(valty, dest)?;
- }
-
- NullaryOp(mir::NullOp::Box, ty) => {
- let ty = self.monomorphize(ty, self.substs());
- M::box_alloc(self, ty, dest)?;
- }
-
- NullaryOp(mir::NullOp::SizeOf, ty) => {
- let ty = self.monomorphize(ty, self.substs());
- let layout = self.layout_of(ty)?;
- assert!(!layout.is_unsized(),
- "SizeOf nullary MIR operator called for unsized type");
- let size = self.memory.pointer_size().bytes() as u8;
- self.write_scalar(
- dest,
- Scalar::Bits {
- bits: layout.size.bytes() as u128,
- size,
- },
- dest_ty,
- )?;
- }
-
- Cast(kind, ref operand, cast_ty) => {
- debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty);
- let src = self.eval_operand(operand)?;
- self.cast(src, kind, dest_ty, dest)?;
- }
-
- Discriminant(ref place) => {
- let ty = self.place_ty(place);
- let layout = self.layout_of(ty)?;
- let place = self.eval_place(place)?;
- let discr_val = self.read_discriminant_value(place, layout)?;
- let size = self.layout_of(dest_ty).unwrap().size.bytes() as u8;
- self.write_scalar(dest, Scalar::Bits {
- bits: discr_val,
- size,
- }, dest_ty)?;
- }
- }
-
- self.dump_local(dest);
-
- Ok(())
- }
-
- pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
- match ty.sty {
- ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
- ty::TyRef(_, ty, _) => !self.type_is_sized(ty),
- ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()),
- _ => false,
- }
- }
-
- pub(super) fn eval_operand_to_scalar(
- &mut self,
- op: &mir::Operand<'tcx>,
- ) -> EvalResult<'tcx, Scalar> {
- let valty = self.eval_operand(op)?;
- self.value_to_scalar(valty)
- }
-
- pub(crate) fn operands_to_args(
- &mut self,
- ops: &[mir::Operand<'tcx>],
- ) -> EvalResult<'tcx, Vec<ValTy<'tcx>>> {
- ops.into_iter()
- .map(|op| self.eval_operand(op))
- .collect()
- }
-
- pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
- use rustc::mir::Operand::*;
- let ty = self.monomorphize(op.ty(self.mir(), *self.tcx), self.substs());
- match *op {
- // FIXME: do some more logic on `move` to invalidate the old location
- Copy(ref place) |
- Move(ref place) => {
- Ok(ValTy {
- value: self.eval_and_read_place(place)?,
- ty
- })
- },
-
- Constant(ref constant) => {
- let value = self.const_to_value(constant.literal.val)?;
-
- Ok(ValTy {
- value,
- ty,
- })
- }
- }
- }
-
- /// reads a tag and produces the corresponding variant index
- pub fn read_discriminant_as_variant_index(
- &self,
- place: Place,
- layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, usize> {
- match layout.variants {
- ty::layout::Variants::Single { index } => Ok(index),
- ty::layout::Variants::Tagged { .. } => {
- let discr_val = self.read_discriminant_value(place, layout)?;
- layout
- .ty
- .ty_adt_def()
- .expect("tagged layout for non adt")
- .discriminants(self.tcx.tcx)
- .position(|var| var.val == discr_val)
- .ok_or_else(|| EvalErrorKind::InvalidDiscriminant.into())
- }
- ty::layout::Variants::NicheFilling { .. } => {
- let discr_val = self.read_discriminant_value(place, layout)?;
- assert_eq!(discr_val as usize as u128, discr_val);
- Ok(discr_val as usize)
- },
- }
- }
-
- pub fn read_discriminant_value(
- &self,
- place: Place,
- layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, u128> {
- trace!("read_discriminant_value {:#?}", layout);
- if layout.abi == layout::Abi::Uninhabited {
- return Ok(0);
- }
-
- match layout.variants {
- layout::Variants::Single { index } => {
- let discr_val = layout.ty.ty_adt_def().map_or(
- index as u128,
- |def| def.discriminant_for_variant(*self.tcx, index).val);
- return Ok(discr_val);
- }
- layout::Variants::Tagged { .. } |
- layout::Variants::NicheFilling { .. } => {},
- }
- let discr_place_val = self.read_place(place)?;
- let (discr_val, discr) = self.read_field(discr_place_val, None, mir::Field::new(0), layout)?;
- trace!("discr value: {:?}, {:?}", discr_val, discr);
- let raw_discr = self.value_to_scalar(ValTy {
- value: discr_val,
- ty: discr.ty
- })?;
- let discr_val = match layout.variants {
- layout::Variants::Single { .. } => bug!(),
- // FIXME: should we catch invalid discriminants here?
- layout::Variants::Tagged { .. } => {
- if discr.ty.is_signed() {
- let i = raw_discr.to_bits(discr.size)? as i128;
- // going from layout tag type to typeck discriminant type
- // requires first sign extending with the layout discriminant
- let shift = 128 - discr.size.bits();
- let sexted = (i << shift) >> shift;
- // and then zeroing with the typeck discriminant type
- let discr_ty = layout
- .ty
- .ty_adt_def().expect("tagged layout corresponds to adt")
- .repr
- .discr_type();
- let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty);
- let shift = 128 - discr_ty.size().bits();
- let truncatee = sexted as u128;
- (truncatee << shift) >> shift
- } else {
- raw_discr.to_bits(discr.size)?
- }
- },
- layout::Variants::NicheFilling {
- dataful_variant,
- ref niche_variants,
- niche_start,
- ..
- } => {
- let variants_start = *niche_variants.start() as u128;
- let variants_end = *niche_variants.end() as u128;
- match raw_discr {
- Scalar::Ptr(_) => {
- assert!(niche_start == 0);
- assert!(variants_start == variants_end);
- dataful_variant as u128
- },
- Scalar::Bits { bits: raw_discr, size } => {
- assert_eq!(size as u64, discr.size.bytes());
- let discr = raw_discr.wrapping_sub(niche_start)
- .wrapping_add(variants_start);
- if variants_start <= discr && discr <= variants_end {
- discr
- } else {
- dataful_variant as u128
- }
- },
- }
- }
- };
-
- Ok(discr_val)
- }
-
-
- pub fn write_discriminant_value(
- &mut self,
- dest_ty: Ty<'tcx>,
- dest: Place,
- variant_index: usize,
- ) -> EvalResult<'tcx> {
- let layout = self.layout_of(dest_ty)?;
-
- match layout.variants {
- layout::Variants::Single { index } => {
- if index != variant_index {
- // If the layout of an enum is `Single`, all
- // other variants are necessarily uninhabited.
- assert_eq!(layout.for_variant(&self, variant_index).abi,
- layout::Abi::Uninhabited);
- }
- }
- layout::Variants::Tagged { ref tag, .. } => {
- let discr_val = dest_ty.ty_adt_def().unwrap()
- .discriminant_for_variant(*self.tcx, variant_index)
- .val;
-
- // raw discriminants for enums are isize or bigger during
- // their computation, but the in-memory tag is the smallest possible
- // representation
- let size = tag.value.size(self.tcx.tcx);
- let shift = 128 - size.bits();
- let discr_val = (discr_val << shift) >> shift;
-
- let (discr_dest, tag) = self.place_field(dest, mir::Field::new(0), layout)?;
- self.write_scalar(discr_dest, Scalar::Bits {
- bits: discr_val,
- size: size.bytes() as u8,
- }, tag.ty)?;
- }
- layout::Variants::NicheFilling {
- dataful_variant,
- ref niche_variants,
- niche_start,
- ..
- } => {
- if variant_index != dataful_variant {
- let (niche_dest, niche) =
- self.place_field(dest, mir::Field::new(0), layout)?;
- let niche_value = ((variant_index - niche_variants.start()) as u128)
- .wrapping_add(niche_start);
- self.write_scalar(niche_dest, Scalar::Bits {
- bits: niche_value,
- size: niche.size.bytes() as u8,
- }, niche.ty)?;
- }
- }
- }
-
- Ok(())
- }
-
- pub fn read_global_as_value(&mut self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Value> {
- let cv = self.const_eval(gid)?;
- self.const_to_value(cv.val)
- }
-
pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Const<'tcx>> {
let param_env = if self.tcx.is_static(gid.instance.def_id()).is_some() {
ty::ParamEnv::reveal_all()
self.tcx.const_eval(param_env.and(gid)).map_err(|err| EvalErrorKind::ReferencedConstant(err).into())
}
- pub fn allocate_place_for_value(
- &mut self,
- value: Value,
- layout: TyLayout<'tcx>,
- variant: Option<usize>,
- ) -> EvalResult<'tcx, Place> {
- let (ptr, align) = match value {
- Value::ByRef(ptr, align) => (ptr, align),
- Value::ScalarPair(..) | Value::Scalar(_) => {
- let ptr = self.alloc_ptr(layout)?.into();
- self.write_value_to_ptr(value, ptr, layout.align, layout.ty)?;
- (ptr, layout.align)
- },
- };
- Ok(Place::Ptr {
- ptr: ptr.into(),
- align,
- extra: variant.map_or(PlaceExtra::None, PlaceExtra::DowncastVariant),
- })
- }
-
- pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> {
- let new_place = match place {
- Place::Local { frame, local } => {
- match self.stack[frame].locals[local].access()? {
- Value::ByRef(ptr, align) => {
- Place::Ptr {
- ptr: ptr.into(),
- align,
- extra: PlaceExtra::None,
- }
- }
- val => {
- let ty = self.stack[frame].mir.local_decls[local].ty;
- let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
- let layout = self.layout_of(ty)?;
- let ptr = self.alloc_ptr(layout)?;
- self.stack[frame].locals[local] =
- LocalValue::Live(Value::ByRef(ptr.into(), layout.align)); // it stays live
-
- let place = Place::from_ptr(ptr, layout.align);
- self.write_value(ValTy { value: val, ty }, place)?;
- place
- }
- }
- }
- Place::Ptr { .. } => place,
- };
- Ok(new_place)
- }
-
- /// ensures this Value is not a ByRef
- pub fn follow_by_ref_value(
- &self,
- value: Value,
- ty: Ty<'tcx>,
- ) -> EvalResult<'tcx, Value> {
- match value {
- Value::ByRef(ptr, align) => {
- self.read_value(ptr, align, ty)
- }
- other => Ok(other),
- }
- }
-
- pub fn value_to_scalar(
- &self,
- ValTy { value, ty } : ValTy<'tcx>,
- ) -> EvalResult<'tcx, Scalar> {
- match self.follow_by_ref_value(value, ty)? {
- Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"),
-
- Value::Scalar(scalar) => scalar.unwrap_or_err(),
-
- Value::ScalarPair(..) => bug!("value_to_scalar can't work with fat pointers"),
- }
- }
-
- pub fn write_ptr(&mut self, dest: Place, val: Scalar, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> {
- let valty = ValTy {
- value: val.to_value(),
- ty: dest_ty,
- };
- self.write_value(valty, dest)
- }
-
- pub fn write_scalar(
- &mut self,
- dest: Place,
- val: impl Into<ScalarMaybeUndef>,
- dest_ty: Ty<'tcx>,
- ) -> EvalResult<'tcx> {
- let valty = ValTy {
- value: Value::Scalar(val.into()),
- ty: dest_ty,
- };
- self.write_value(valty, dest)
- }
-
- pub fn write_value(
- &mut self,
- ValTy { value: src_val, ty: dest_ty } : ValTy<'tcx>,
- dest: Place,
- ) -> EvalResult<'tcx> {
- //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty);
- // Note that it is really important that the type here is the right one, and matches the type things are read at.
- // In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only
- // correct if we never look at this data with the wrong type.
-
- match dest {
- Place::Ptr { ptr, align, extra } => {
- assert_eq!(extra, PlaceExtra::None);
- self.write_value_to_ptr(src_val, ptr.unwrap_or_err()?, align, dest_ty)
- }
-
- Place::Local { frame, local } => {
- let old_val = self.stack[frame].locals[local].access()?;
- self.write_value_possibly_by_val(
- src_val,
- |this, val| this.stack[frame].set_local(local, val),
- old_val,
- dest_ty,
- )
- }
- }
- }
-
- // The cases here can be a bit subtle. Read carefully!
- fn write_value_possibly_by_val<F: FnOnce(&mut Self, Value) -> EvalResult<'tcx>>(
- &mut self,
- src_val: Value,
- write_dest: F,
- old_dest_val: Value,
- dest_ty: Ty<'tcx>,
- ) -> EvalResult<'tcx> {
- // FIXME: this should be a layout check, not underlying value
- if let Value::ByRef(dest_ptr, align) = old_dest_val {
- // If the value is already `ByRef` (that is, backed by an `Allocation`),
- // then we must write the new value into this allocation, because there may be
- // other pointers into the allocation. These other pointers are logically
- // pointers into the local variable, and must be able to observe the change.
- //
- // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
- // knew for certain that there were no outstanding pointers to this allocation.
- self.write_value_to_ptr(src_val, dest_ptr, align, dest_ty)?;
- } else if let Value::ByRef(src_ptr, align) = src_val {
- // If the value is not `ByRef`, then we know there are no pointers to it
- // and we can simply overwrite the `Value` in the locals array directly.
- //
- // In this specific case, where the source value is `ByRef`, we must duplicate
- // the allocation, because this is a by-value operation. It would be incorrect
- // if they referred to the same allocation, since then a change to one would
- // implicitly change the other.
- //
- // It is a valid optimization to attempt reading a primitive value out of the
- // source and write that into the destination without making an allocation, so
- // we do so here.
- if let Ok(Some(src_val)) = self.try_read_value(src_ptr, align, dest_ty) {
- write_dest(self, src_val)?;
- } else {
- let layout = self.layout_of(dest_ty)?;
- let dest_ptr = self.alloc_ptr(layout)?.into();
- self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?;
- write_dest(self, Value::ByRef(dest_ptr, layout.align))?;
- }
- } else {
- // Finally, we have the simple case where neither source nor destination are
- // `ByRef`. We may simply copy the source value over the the destintion.
- write_dest(self, src_val)?;
- }
- Ok(())
- }
-
- pub fn write_value_to_ptr(
- &mut self,
- value: Value,
- dest: Scalar,
- dest_align: Align,
- dest_ty: Ty<'tcx>,
- ) -> EvalResult<'tcx> {
- let layout = self.layout_of(dest_ty)?;
- trace!("write_value_to_ptr: {:#?}, {}, {:#?}", value, dest_ty, layout);
- match value {
- Value::ByRef(ptr, align) => {
- self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false)
- }
- Value::Scalar(scalar) => {
- let signed = match layout.abi {
- layout::Abi::Scalar(ref scal) => match scal.value {
- layout::Primitive::Int(_, signed) => signed,
- _ => false,
- },
- _ => false,
- };
- self.memory.write_scalar(dest, dest_align, scalar, layout.size, layout.align, signed)
- }
- Value::ScalarPair(a_val, b_val) => {
- trace!("write_value_to_ptr valpair: {:#?}", layout);
- let (a, b) = match layout.abi {
- layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
- _ => bug!("write_value_to_ptr: invalid ScalarPair layout: {:#?}", layout)
- };
- let (a_size, b_size) = (a.size(&self), b.size(&self));
- let (a_align, b_align) = (a.align(&self), b.align(&self));
- let a_ptr = dest;
- let b_offset = a_size.abi_align(b_align);
- let b_ptr = dest.ptr_offset(b_offset, &self)?.into();
- // TODO: What about signedess?
- self.memory.write_scalar(a_ptr, dest_align, a_val, a_size, a_align, false)?;
- self.memory.write_scalar(b_ptr, dest_align, b_val, b_size, b_align, false)
- }
- }
- }
-
- pub fn read_value(&self, ptr: Scalar, align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
- if let Some(val) = self.try_read_value(ptr, align, ty)? {
- Ok(val)
- } else {
- bug!("primitive read failed for type: {:?}", ty);
- }
- }
-
- fn validate_scalar(
- &self,
- value: ScalarMaybeUndef,
- size: Size,
- scalar: &layout::Scalar,
- path: &str,
- ty: Ty,
- ) -> EvalResult<'tcx> {
- trace!("validate scalar: {:#?}, {:#?}, {:#?}, {}", value, size, scalar, ty);
- let (lo, hi) = scalar.valid_range.clone().into_inner();
-
- let value = match value {
- ScalarMaybeUndef::Scalar(scalar) => scalar,
- ScalarMaybeUndef::Undef => return validation_failure!("undefined bytes", path),
- };
-
- let bits = match value {
- Scalar::Bits { bits, size: value_size } => {
- assert_eq!(value_size as u64, size.bytes());
- bits
- },
- Scalar::Ptr(_) => {
- let ptr_size = self.memory.pointer_size();
- let ptr_max = u128::max_value() >> (128 - ptr_size.bits());
- return if lo > hi {
- if lo - hi == 1 {
- // no gap, all values are ok
- Ok(())
- } else if hi < ptr_max || lo > 1 {
- let max = u128::max_value() >> (128 - size.bits());
- validation_failure!(
- "pointer",
- path,
- format!("something in the range {:?} or {:?}", 0..=lo, hi..=max)
- )
- } else {
- Ok(())
- }
- } else if hi < ptr_max || lo > 1 {
- validation_failure!(
- "pointer",
- path,
- format!("something in the range {:?}", scalar.valid_range)
- )
- } else {
- Ok(())
- };
- },
- };
-
- // char gets a special treatment, because its number space is not contiguous so `TyLayout`
- // has no special checks for chars
- match ty.sty {
- ty::TyChar => {
- debug_assert_eq!(size.bytes(), 4);
- if ::std::char::from_u32(bits as u32).is_none() {
- return err!(InvalidChar(bits));
- }
- }
- _ => {},
- }
-
- use std::ops::RangeInclusive;
- let in_range = |bound: RangeInclusive<u128>| bound.contains(&bits);
- if lo > hi {
- if in_range(0..=hi) || in_range(lo..=u128::max_value()) {
- Ok(())
- } else {
- validation_failure!(
- bits,
- path,
- format!("something in the range {:?} or {:?}", ..=hi, lo..)
- )
- }
- } else {
- if in_range(scalar.valid_range.clone()) {
- Ok(())
- } else {
- validation_failure!(
- bits,
- path,
- format!("something in the range {:?}", scalar.valid_range)
- )
- }
- }
- }
-
- /// This function checks the memory where `ptr` points to.
- /// It will error if the bits at the destination do not match the ones described by the layout.
- pub fn validate_ptr_target(
- &self,
- ptr: Pointer,
- ptr_align: Align,
- mut layout: TyLayout<'tcx>,
- path: String,
- seen: &mut FxHashSet<(Pointer, Ty<'tcx>)>,
- todo: &mut Vec<(Pointer, Ty<'tcx>, String)>,
- ) -> EvalResult<'tcx> {
- self.memory.dump_alloc(ptr.alloc_id);
- trace!("validate_ptr_target: {:?}, {:#?}", ptr, layout);
-
- let variant;
- match layout.variants {
- layout::Variants::NicheFilling { niche: ref tag, .. } |
- layout::Variants::Tagged { ref tag, .. } => {
- let size = tag.value.size(self);
- let (tag_value, tag_layout) = self.read_field(
- Value::ByRef(ptr.into(), ptr_align),
- None,
- mir::Field::new(0),
- layout,
- )?;
- let tag_value = match self.follow_by_ref_value(tag_value, tag_layout.ty)? {
- Value::Scalar(val) => val,
- _ => bug!("tag must be scalar"),
- };
- let path = format!("{}.TAG", path);
- self.validate_scalar(tag_value, size, tag, &path, tag_layout.ty)?;
- let variant_index = self.read_discriminant_as_variant_index(
- Place::from_ptr(ptr, ptr_align),
- layout,
- )?;
- variant = variant_index;
- layout = layout.for_variant(self, variant_index);
- trace!("variant layout: {:#?}", layout);
- },
- layout::Variants::Single { index } => variant = index,
- }
- match layout.fields {
- // primitives are unions with zero fields
- layout::FieldPlacement::Union(0) => {
- match layout.abi {
- // nothing to do, whatever the pointer points to, it is never going to be read
- layout::Abi::Uninhabited => validation_failure!("a value of an uninhabited type", path),
- // check that the scalar is a valid pointer or that its bit range matches the
- // expectation.
- layout::Abi::Scalar(ref scalar) => {
- let size = scalar.value.size(self);
- let value = self.memory.read_scalar(ptr, ptr_align, size)?;
- self.validate_scalar(value, size, scalar, &path, layout.ty)?;
- if scalar.value == Primitive::Pointer {
- // ignore integer pointers, we can't reason about the final hardware
- if let Scalar::Ptr(ptr) = value.unwrap_or_err()? {
- let alloc_kind = self.tcx.alloc_map.lock().get(ptr.alloc_id);
- if let Some(AllocType::Static(did)) = alloc_kind {
- // statics from other crates are already checked
- // extern statics should not be validated as they have no body
- if !did.is_local() || self.tcx.is_foreign_item(did) {
- return Ok(());
- }
- }
- if let Some(tam) = layout.ty.builtin_deref(false) {
- // we have not encountered this pointer+layout combination before
- if seen.insert((ptr, tam.ty)) {
- todo.push((ptr, tam.ty, format!("(*{})", path)))
- }
- }
- }
- }
- Ok(())
- },
- _ => bug!("bad abi for FieldPlacement::Union(0): {:#?}", layout.abi),
- }
- }
- layout::FieldPlacement::Union(_) => {
- // We can't check unions, their bits are allowed to be anything.
- // The fields don't need to correspond to any bit pattern of the union's fields.
- // See https://github.com/rust-lang/rust/issues/32836#issuecomment-406875389
- Ok(())
- },
- layout::FieldPlacement::Array { stride, count } => {
- let elem_layout = layout.field(self, 0)?;
- for i in 0..count {
- let mut path = path.clone();
- self.write_field_name(&mut path, layout.ty, i as usize, variant).unwrap();
- self.validate_ptr_target(ptr.offset(stride * i, self)?, ptr_align, elem_layout, path, seen, todo)?;
- }
- Ok(())
- },
- layout::FieldPlacement::Arbitrary { ref offsets, .. } => {
-
- // check length field and vtable field
- match layout.ty.builtin_deref(false).map(|tam| &tam.ty.sty) {
- | Some(ty::TyStr)
- | Some(ty::TySlice(_)) => {
- let (len, len_layout) = self.read_field(
- Value::ByRef(ptr.into(), ptr_align),
- None,
- mir::Field::new(1),
- layout,
- )?;
- let len = self.value_to_scalar(ValTy { value: len, ty: len_layout.ty })?;
- if len.to_bits(len_layout.size).is_err() {
- return validation_failure!("length is not a valid integer", path);
- }
- },
- Some(ty::TyDynamic(..)) => {
- let (vtable, vtable_layout) = self.read_field(
- Value::ByRef(ptr.into(), ptr_align),
- None,
- mir::Field::new(1),
- layout,
- )?;
- let vtable = self.value_to_scalar(ValTy { value: vtable, ty: vtable_layout.ty })?;
- if vtable.to_ptr().is_err() {
- return validation_failure!("vtable address is not a pointer", path);
- }
- }
- _ => {},
- }
- for (i, &offset) in offsets.iter().enumerate() {
- let field_layout = layout.field(self, i)?;
- let mut path = path.clone();
- self.write_field_name(&mut path, layout.ty, i, variant).unwrap();
- self.validate_ptr_target(ptr.offset(offset, self)?, ptr_align, field_layout, path, seen, todo)?;
- }
- Ok(())
- }
- }
- }
-
- pub fn try_read_by_ref(&self, mut val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
- // Convert to ByVal or ScalarPair if possible
- if let Value::ByRef(ptr, align) = val {
- if let Some(read_val) = self.try_read_value(ptr, align, ty)? {
- val = read_val;
- }
- }
- Ok(val)
- }
-
- pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
- let layout = self.layout_of(ty)?;
- self.memory.check_align(ptr, ptr_align)?;
-
- if layout.size.bytes() == 0 {
- return Ok(Some(Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 }))));
- }
-
- let ptr = ptr.to_ptr()?;
-
- match layout.abi {
- layout::Abi::Scalar(..) => {
- let scalar = self.memory.read_scalar(ptr, ptr_align, layout.size)?;
- Ok(Some(Value::Scalar(scalar)))
- }
- layout::Abi::ScalarPair(ref a, ref b) => {
- let (a, b) = (&a.value, &b.value);
- let (a_size, b_size) = (a.size(self), b.size(self));
- let a_ptr = ptr;
- let b_offset = a_size.abi_align(b.align(self));
- let b_ptr = ptr.offset(b_offset, self)?.into();
- let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?;
- let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?;
- Ok(Some(Value::ScalarPair(a_val, b_val)))
- }
- _ => Ok(None),
- }
- }
-
+ #[inline(always)]
pub fn frame(&self) -> &Frame<'mir, 'tcx> {
self.stack.last().expect("no call frames exist")
}
+ #[inline(always)]
pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> {
self.stack.last_mut().expect("no call frames exist")
}
}
}
- fn unsize_into_ptr(
- &mut self,
- src: Value,
- src_ty: Ty<'tcx>,
- dest: Place,
- dest_ty: Ty<'tcx>,
- sty: Ty<'tcx>,
- dty: Ty<'tcx>,
- ) -> EvalResult<'tcx> {
- // A<Struct> -> A<Trait> conversion
- let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
-
- match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
- (&ty::TyArray(_, length), &ty::TySlice(_)) => {
- let ptr = self.into_ptr(src)?;
- // u64 cast is from usize to u64, which is always good
- let valty = ValTy {
- value: ptr.to_value_with_len(length.unwrap_usize(self.tcx.tcx), self.tcx.tcx),
- ty: dest_ty,
- };
- self.write_value(valty, dest)
- }
- (&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
- // For now, upcasts are limited to changes in marker
- // traits, and hence never actually require an actual
- // change to the vtable.
- let valty = ValTy {
- value: src,
- ty: dest_ty,
- };
- self.write_value(valty, dest)
- }
- (_, &ty::TyDynamic(ref data, _)) => {
- let trait_ref = data.principal().unwrap().with_self_ty(
- *self.tcx,
- src_pointee_ty,
- );
- let trait_ref = self.tcx.erase_regions(&trait_ref);
- let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
- let ptr = self.into_ptr(src)?;
- let valty = ValTy {
- value: ptr.to_value_with_vtable(vtable),
- ty: dest_ty,
- };
- self.write_value(valty, dest)
- }
-
- _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty),
- }
- }
-
- crate fn unsize_into(
- &mut self,
- src: Value,
- src_layout: TyLayout<'tcx>,
- dst: Place,
- dst_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx> {
- match (&src_layout.ty.sty, &dst_layout.ty.sty) {
- (&ty::TyRef(_, s, _), &ty::TyRef(_, d, _)) |
- (&ty::TyRef(_, s, _), &ty::TyRawPtr(TypeAndMut { ty: d, .. })) |
- (&ty::TyRawPtr(TypeAndMut { ty: s, .. }),
- &ty::TyRawPtr(TypeAndMut { ty: d, .. })) => {
- self.unsize_into_ptr(src, src_layout.ty, dst, dst_layout.ty, s, d)
- }
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
- assert_eq!(def_a, def_b);
- if def_a.is_box() || def_b.is_box() {
- if !def_a.is_box() || !def_b.is_box() {
- bug!("invalid unsizing between {:?} -> {:?}", src_layout, dst_layout);
- }
- return self.unsize_into_ptr(
- src,
- src_layout.ty,
- dst,
- dst_layout.ty,
- src_layout.ty.boxed_ty(),
- dst_layout.ty.boxed_ty(),
- );
- }
-
- // unsizing of generic struct with pointer fields
- // Example: `Arc<T>` -> `Arc<Trait>`
- // here we need to increase the size of every &T thin ptr field to a fat ptr
- for i in 0..src_layout.fields.count() {
- let (dst_f_place, dst_field) =
- self.place_field(dst, mir::Field::new(i), dst_layout)?;
- if dst_field.is_zst() {
- continue;
- }
- let (src_f_value, src_field) = match src {
- Value::ByRef(ptr, align) => {
- let src_place = Place::from_scalar_ptr(ptr.into(), align);
- let (src_f_place, src_field) =
- self.place_field(src_place, mir::Field::new(i), src_layout)?;
- (self.read_place(src_f_place)?, src_field)
- }
- Value::Scalar(_) | Value::ScalarPair(..) => {
- let src_field = src_layout.field(&self, i)?;
- assert_eq!(src_layout.fields.offset(i).bytes(), 0);
- assert_eq!(src_field.size, src_layout.size);
- (src, src_field)
- }
- };
- if src_field.ty == dst_field.ty {
- self.write_value(ValTy {
- value: src_f_value,
- ty: src_field.ty,
- }, dst_f_place)?;
- } else {
- self.unsize_into(src_f_value, src_field, dst_f_place, dst_field)?;
- }
- }
- Ok(())
- }
- _ => {
- bug!(
- "unsize_into: invalid conversion: {:?} -> {:?}",
- src_layout,
- dst_layout
- )
- }
- }
- }
-
- pub fn dump_local(&self, place: Place) {
+ pub fn dump_place(&self, place: Place) {
// Debug output
if !log_enabled!(::log::Level::Trace) {
return;
panic!("Failed to access local: {:?}", err);
}
}
- Ok(Value::ByRef(ptr, align)) => {
+ Ok(Operand::Indirect(mplace)) => {
+ let (ptr, align) = mplace.to_scalar_ptr_align();
match ptr {
Scalar::Ptr(ptr) => {
write!(msg, " by align({}) ref:", align.abi()).unwrap();
allocs.push(ptr.alloc_id);
}
- ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
+ ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
}
}
- Ok(Value::Scalar(val)) => {
+ Ok(Operand::Immediate(Value::Scalar(val))) => {
write!(msg, " {:?}", val).unwrap();
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val {
allocs.push(ptr.alloc_id);
}
}
- Ok(Value::ScalarPair(val1, val2)) => {
+ Ok(Operand::Immediate(Value::ScalarPair(val1, val2))) => {
write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 {
allocs.push(ptr.alloc_id);
trace!("{}", msg);
self.memory.dump_allocs(allocs);
}
- Place::Ptr { ptr, align, .. } => {
- match ptr {
- ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) => {
- trace!("by align({}) ref:", align.abi());
+ Place::Ptr(mplace) => {
+ match mplace.ptr {
+ Scalar::Ptr(ptr) => {
+ trace!("by align({}) ref:", mplace.align.abi());
self.memory.dump_alloc(ptr.alloc_id);
}
ptr => trace!(" integral by ref: {:?}", ptr),
(frames, self.tcx.span)
}
+ #[inline(always)]
pub fn sign_extend(&self, value: u128, ty: TyLayout<'_>) -> u128 {
- super::sign_extend(value, ty)
+ assert!(ty.abi.is_signed());
+ sign_extend(value, ty.size)
}
+ #[inline(always)]
pub fn truncate(&self, value: u128, ty: TyLayout<'_>) -> u128 {
- super::truncate(value, ty)
- }
-
- fn write_field_name(&self, s: &mut String, ty: Ty<'tcx>, i: usize, variant: usize) -> ::std::fmt::Result {
- match ty.sty {
- ty::TyBool |
- ty::TyChar |
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyFloat(_) |
- ty::TyFnPtr(_) |
- ty::TyNever |
- ty::TyFnDef(..) |
- ty::TyGeneratorWitness(..) |
- ty::TyForeign(..) |
- ty::TyDynamic(..) => {
- bug!("field_name({:?}): not applicable", ty)
- }
-
- // Potentially-fat pointers.
- ty::TyRef(_, pointee, _) |
- ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
- assert!(i < 2);
-
- // Reuse the fat *T type as its own thin pointer data field.
- // This provides information about e.g. DST struct pointees
- // (which may have no non-DST form), and will work as long
- // as the `Abi` or `FieldPlacement` is checked by users.
- if i == 0 {
- return write!(s, ".data_ptr");
- }
-
- match self.tcx.struct_tail(pointee).sty {
- ty::TySlice(_) |
- ty::TyStr => write!(s, ".len"),
- ty::TyDynamic(..) => write!(s, ".vtable_ptr"),
- _ => bug!("field_name({:?}): not applicable", ty)
- }
- }
-
- // Arrays and slices.
- ty::TyArray(_, _) |
- ty::TySlice(_) |
- ty::TyStr => write!(s, "[{}]", i),
-
- // generators and closures.
- ty::TyClosure(def_id, _) | ty::TyGenerator(def_id, _, _) => {
- let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap();
- let freevar = self.tcx.with_freevars(node_id, |fv| fv[i]);
- write!(s, ".upvar({})", self.tcx.hir.name(freevar.var_id()))
- }
-
- ty::TyTuple(_) => write!(s, ".{}", i),
-
- // enums
- ty::TyAdt(def, ..) if def.is_enum() => {
- let variant = &def.variants[variant];
- write!(s, ".{}::{}", variant.name, variant.fields[i].ident)
- }
-
- // other ADTs.
- ty::TyAdt(def, _) => write!(s, ".{}", def.non_enum_variant().fields[i].ident),
-
- ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
- ty::TyInfer(_) | ty::TyError => {
- bug!("write_field_name: unexpected type `{}`", ty)
- }
- }
- }
-
- pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> {
- trace!("{:?} is now live", local);
-
- let ty = self.frame().mir.local_decls[local].ty;
- let init = self.init_value(ty)?;
- // StorageLive *always* kills the value that's currently stored
- Ok(mem::replace(&mut self.frame_mut().locals[local], LocalValue::Live(init)))
- }
-
- fn init_value(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
- let ty = self.monomorphize(ty, self.substs());
- let layout = self.layout_of(ty)?;
- Ok(match layout.abi {
- layout::Abi::Scalar(..) => Value::Scalar(ScalarMaybeUndef::Undef),
- layout::Abi::ScalarPair(..) => Value::ScalarPair(
- ScalarMaybeUndef::Undef,
- ScalarMaybeUndef::Undef,
- ),
- _ => Value::ByRef(self.alloc_ptr(layout)?.into(), layout.align),
- })
+ truncate(value, ty.size)
}
}
-impl<'mir, 'tcx> Frame<'mir, 'tcx> {
- fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> {
- match self.locals[local] {
- LocalValue::Dead => err!(DeadLocal),
- LocalValue::Live(ref mut local) => {
- *local = value;
- Ok(())
- }
- }
- }
-
- /// Returns the old value of the local
- pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue {
- trace!("{:?} is now dead", local);
-
- mem::replace(&mut self.locals[local], LocalValue::Dead)
- }
-}
use std::hash::Hash;
use rustc::mir::interpret::{AllocId, EvalResult, Scalar, Pointer, AccessKind, GlobalId};
-use super::{EvalContext, Place, ValTy, Memory};
+use super::{EvalContext, PlaceTy, OpTy, Memory};
use rustc::mir;
-use rustc::ty::{self, Ty};
+use rustc::ty::{self, layout::TyLayout};
use rustc::ty::layout::Size;
use syntax::source_map::Span;
use syntax::ast::Mutability;
fn eval_fn_call<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
- destination: Option<(Place, mir::BasicBlock)>,
- args: &[ValTy<'tcx>],
+ destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+ args: &[OpTy<'tcx>],
span: Span,
- sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool>;
/// directly process an intrinsic without pushing a stack frame.
fn call_intrinsic<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
- args: &[ValTy<'tcx>],
- dest: Place,
- dest_layout: ty::layout::TyLayout<'tcx>,
+ args: &[OpTy<'tcx>],
+ dest: PlaceTy<'tcx>,
target: mir::BasicBlock,
) -> EvalResult<'tcx>;
ecx: &EvalContext<'a, 'mir, 'tcx, Self>,
bin_op: mir::BinOp,
left: Scalar,
- left_ty: Ty<'tcx>,
+ left_layout: TyLayout<'tcx>,
right: Scalar,
- right_ty: Ty<'tcx>,
+ right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Option<(Scalar, bool)>>;
/// Called when trying to mark machine defined `MemoryKinds` as static
/// Returns a pointer to the allocated memory
fn box_alloc<'a>(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
- ty: Ty<'tcx>,
- dest: Place,
+ dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx>;
/// Called when trying to access a global declared with a `linkage` attribute
+//! The memory subsystem.
+//!
+//! Generally, we use `Pointer` to denote memory addresses. However, some operations
+//! have a "size"-like parameter, and they take `Scalar` for the address because
+//! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
+//! integer. It is crucial that these operations call `check_align` *before*
+//! short-circuiting the empty case!
+
use std::collections::VecDeque;
use std::hash::{Hash, Hasher};
use std::ptr;
use rustc::ty::ParamEnv;
use rustc::ty::query::TyCtxtAt;
use rustc::ty::layout::{self, Align, TargetDataLayout, Size};
-use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, Value, ScalarMaybeUndef,
- EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType};
-pub use rustc::mir::interpret::{write_target_uint, write_target_int, read_target_uint};
+use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, ScalarMaybeUndef,
+ EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType, truncate};
+pub use rustc::mir::interpret::{write_target_uint, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher};
use syntax::ast::Mutability;
use super::{EvalContext, Machine};
+
////////////////////////////////////////////////////////////////////////////////
// Allocations and pointers
////////////////////////////////////////////////////////////////////////////////
/// Actual memory allocations (arbitrary bytes, may contain pointers into other allocations).
alloc_map: FxHashMap<AllocId, Allocation>,
- /// The current stack frame. Used to check accesses against locks.
- pub cur_frame: usize,
-
pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
}
data,
alloc_kind,
alloc_map,
- cur_frame,
tcx: _,
} = self;
*data == other.data
&& *alloc_kind == other.alloc_kind
&& *alloc_map == other.alloc_map
- && *cur_frame == other.cur_frame
}
}
data,
alloc_kind: _,
alloc_map: _,
- cur_frame,
tcx: _,
} = self;
data.hash(state);
- cur_frame.hash(state);
// We ignore some fields which don't change between evaluation steps.
alloc_kind: FxHashMap::default(),
alloc_map: FxHashMap::default(),
tcx,
- cur_frame: usize::max_value(),
}
}
self.tcx.data_layout.endian
}
- /// Check that the pointer is aligned AND non-NULL.
+ /// Check that the pointer is aligned AND non-NULL. This supports scalars
+ /// for the benefit of other parts of miri that need to check alignment even for ZST.
pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx> {
// Check non-NULL/Undef, extract offset
let (offset, alloc_align) = match ptr {
}
}
+ /// Check if the pointer is "in-bounds". Notice that a pointer pointing at the end
+ /// of an allocation (i.e., at the first *inaccessible* location) *is* considered
+ /// in-bounds! This follows C's/LLVM's rules.
pub fn check_bounds(&self, ptr: Pointer, access: bool) -> EvalResult<'tcx> {
let alloc = self.get(ptr.alloc_id)?;
let allocation_size = alloc.bytes.len() as u64;
assert!(self.tcx.is_static(def_id).is_some());
EvalErrorKind::ReferencedConstant(err).into()
}).map(|val| {
- self.tcx.const_value_to_allocation(val)
+ self.tcx.const_to_allocation(val)
})
}
/// Byte accessors
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
+ /// This checks alignment!
fn get_bytes_unchecked(
&self,
ptr: Pointer,
Ok(&alloc.bytes[offset..offset + size.bytes() as usize])
}
+ /// This checks alignment!
fn get_bytes_unchecked_mut(
&mut self,
ptr: Pointer,
) -> EvalResult<'tcx, &mut [u8]> {
assert_ne!(size.bytes(), 0);
self.clear_relocations(ptr, size)?;
- self.mark_definedness(ptr.into(), size, true)?;
+ self.mark_definedness(ptr, size, true)?;
self.get_bytes_unchecked_mut(ptr, size, align)
}
}
length: u64,
nonoverlapping: bool,
) -> EvalResult<'tcx> {
- // Empty accesses don't need to be valid pointers, but they should still be aligned
- self.check_align(src, src_align)?;
- self.check_align(dest, dest_align)?;
if size.bytes() == 0 {
+ // Nothing to do for ZST, other than checking alignment and non-NULLness.
+ self.check_align(src, src_align)?;
+ self.check_align(dest, dest_align)?;
return Ok(());
}
let src = src.to_ptr()?;
new_relocations
};
+ // This also checks alignment.
let src_bytes = self.get_bytes_unchecked(src, size, src_align)?.as_ptr();
let dest_bytes = self.get_bytes_mut(dest, size * length, dest_align)?.as_mut_ptr();
pub fn read_bytes(&self, ptr: Scalar, size: Size) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
- self.check_align(ptr, align)?;
if size.bytes() == 0 {
+ self.check_align(ptr, align)?;
return Ok(&[]);
}
self.get_bytes(ptr.to_ptr()?, size, align)
pub fn write_bytes(&mut self, ptr: Scalar, src: &[u8]) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
- self.check_align(ptr, align)?;
if src.is_empty() {
+ self.check_align(ptr, align)?;
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, Size::from_bytes(src.len() as u64), align)?;
pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
- self.check_align(ptr, align)?;
if count.bytes() == 0 {
+ self.check_align(ptr, align)?;
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?;
Ok(())
}
+ /// Read a *non-ZST* scalar
pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, ScalarMaybeUndef> {
self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
let endianness = self.endianness();
+ // get_bytes_unchecked tests alignment
let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?;
// Undef check happens *after* we established that the alignment is correct.
// We must not return Ok() for unaligned pointers!
self.read_scalar(ptr, ptr_align, self.pointer_size())
}
+ /// Write a *non-ZST* scalar
pub fn write_scalar(
&mut self,
- ptr: Scalar,
+ ptr: Pointer,
ptr_align: Align,
val: ScalarMaybeUndef,
type_size: Size,
- type_align: Align,
- signed: bool,
) -> EvalResult<'tcx> {
let endianness = self.endianness();
- self.check_align(ptr, ptr_align)?;
let val = match val {
ScalarMaybeUndef::Scalar(scalar) => scalar,
val.offset.bytes() as u128
}
- Scalar::Bits { size: 0, .. } => {
- // nothing to do for ZSTs
- assert_eq!(type_size.bytes(), 0);
- return Ok(());
- }
-
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, type_size.bytes());
+ assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
+ "Unexpected value of size {} when writing to memory", size);
bits
},
};
- let ptr = ptr.to_ptr()?;
-
{
- let dst = self.get_bytes_mut(ptr, type_size, ptr_align.min(type_align))?;
- if signed {
- write_target_int(endianness, dst, bytes as i128).unwrap();
- } else {
- write_target_uint(endianness, dst, bytes).unwrap();
- }
+ // get_bytes_mut checks alignment
+ let dst = self.get_bytes_mut(ptr, type_size, ptr_align)?;
+ write_target_uint(endianness, dst, bytes).unwrap();
}
// See if we have to also write a relocation
Ok(())
}
- pub fn write_ptr_sized_unsigned(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) -> EvalResult<'tcx> {
+ pub fn write_ptr_sized(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) -> EvalResult<'tcx> {
let ptr_size = self.pointer_size();
- self.write_scalar(ptr.into(), ptr_align, val, ptr_size, ptr_align, false)
+ self.write_scalar(ptr.into(), ptr_align, val, ptr_size)
}
fn int_align(&self, size: Size) -> Align {
pub fn mark_definedness(
&mut self,
- ptr: Scalar,
+ ptr: Pointer,
size: Size,
new_state: bool,
) -> EvalResult<'tcx> {
if size.bytes() == 0 {
return Ok(());
}
- let ptr = ptr.to_ptr()?;
let alloc = self.get_mut(ptr.alloc_id)?;
alloc.undef_mask.set_range(
ptr.offset,
pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M>;
fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M>;
-
- /// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef,
- /// this may have to perform a load.
- fn into_ptr(
- &self,
- value: Value,
- ) -> EvalResult<'tcx, ScalarMaybeUndef> {
- Ok(match value {
- Value::ByRef(ptr, align) => {
- self.memory().read_ptr_sized(ptr.to_ptr()?, align)?
- }
- Value::Scalar(ptr) |
- Value::ScalarPair(ptr, _) => ptr,
- }.into())
- }
-
- fn into_ptr_vtable_pair(
- &self,
- value: Value,
- ) -> EvalResult<'tcx, (ScalarMaybeUndef, Pointer)> {
- match value {
- Value::ByRef(ref_ptr, align) => {
- let mem = self.memory();
- let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into();
- let vtable = mem.read_ptr_sized(
- ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
- align
- )?.unwrap_or_err()?.to_ptr()?;
- Ok((ptr, vtable))
- }
-
- Value::ScalarPair(ptr, vtable) => Ok((ptr, vtable.unwrap_or_err()?.to_ptr()?)),
- _ => bug!("expected ptr and vtable, got {:?}", value),
- }
- }
-
- fn into_slice(
- &self,
- value: Value,
- ) -> EvalResult<'tcx, (ScalarMaybeUndef, u64)> {
- match value {
- Value::ByRef(ref_ptr, align) => {
- let mem = self.memory();
- let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into();
- let len = mem.read_ptr_sized(
- ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
- align
- )?.unwrap_or_err()?.to_bits(mem.pointer_size())? as u64;
- Ok((ptr, len))
- }
- Value::ScalarPair(ptr, val) => {
- let len = val.unwrap_or_err()?.to_bits(self.memory().pointer_size())?;
- Ok((ptr, len as u64))
- }
- Value::Scalar(_) => bug!("expected ptr and length, got {:?}", value),
- }
- }
}
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for Memory<'a, 'mir, 'tcx, M> {
//! An interpreter for MIR used in CTFE and by miri
mod cast;
-mod const_eval;
mod eval_context;
mod place;
+mod operand;
mod machine;
mod memory;
mod operator;
mod step;
mod terminator;
mod traits;
+mod const_eval;
+mod validity;
pub use self::eval_context::{
- EvalContext, Frame, StackPopCleanup,
- TyAndPacked, ValTy,
+ EvalContext, Frame, StackPopCleanup, LocalValue,
};
-pub use self::place::{Place, PlaceExtra};
+pub use self::place::{Place, PlaceExtra, PlaceTy, MemPlace, MPlaceTy};
pub use self::memory::{Memory, MemoryKind, HasMemory};
mk_borrowck_eval_cx,
mk_eval_cx,
CompileTimeEvaluator,
- const_value_to_allocation_provider,
+ const_to_allocation_provider,
const_eval_provider,
- const_val_field,
+ const_field,
const_variant_index,
- value_to_const_value,
+ op_to_const,
};
pub use self::machine::Machine;
-pub use self::memory::{write_target_uint, write_target_int, read_target_uint};
-
-use rustc::ty::layout::TyLayout;
-
-pub fn sign_extend(value: u128, layout: TyLayout<'_>) -> u128 {
- let size = layout.size.bits();
- assert!(layout.abi.is_signed());
- // sign extend
- let shift = 128 - size;
- // shift the unsigned value to the left
- // and back to the right as signed (essentially fills with FF on the left)
- (((value << shift) as i128) >> shift) as u128
-}
-
-pub fn truncate(value: u128, layout: TyLayout<'_>) -> u128 {
- let size = layout.size.bits();
- let shift = 128 - size;
- // truncate (shift left to drop out leftover values, shift right to fill with zeroes)
- (value << shift) >> shift
-}
+pub use self::operand::{Value, ValTy, Operand, OpTy};
--- /dev/null
+//! Functions concerning immediate values and operands, and reading from operands.
+//! All high-level functions to read from memory work on operands as sources.
+
+use std::convert::TryInto;
+
+use rustc::mir;
+use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, HasDataLayout, IntegerExt};
+use rustc_data_structures::indexed_vec::Idx;
+
+use rustc::mir::interpret::{
+ GlobalId, ConstValue, Scalar, EvalResult, Pointer, ScalarMaybeUndef, EvalErrorKind
+};
+use super::{EvalContext, Machine, MemPlace, MPlaceTy, PlaceExtra, MemoryKind};
+
+/// A `Value` represents a single immediate self-contained Rust value.
+///
+/// For optimization of a few very common cases, there is also a representation for a pair of
+/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
+/// operations and fat pointers. This idea was taken from rustc's codegen.
+/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
+/// defined on `Value`, and do not have to work with a `Place`.
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
+pub enum Value {
+ Scalar(ScalarMaybeUndef),
+ ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef),
+}
+
+impl<'tcx> Value {
+ pub fn new_slice(
+ val: Scalar,
+ len: u64,
+ cx: impl HasDataLayout
+ ) -> Self {
+ Value::ScalarPair(val.into(), Scalar::Bits {
+ bits: len as u128,
+ size: cx.data_layout().pointer_size.bytes() as u8,
+ }.into())
+ }
+
+ pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self {
+ Value::ScalarPair(val.into(), Scalar::Ptr(vtable).into())
+ }
+
+ #[inline]
+ pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef {
+ match self {
+ Value::Scalar(val) => val,
+ Value::ScalarPair(..) => bug!("Got a fat pointer where a scalar was expected"),
+ }
+ }
+
+ #[inline]
+ pub fn to_scalar(self) -> EvalResult<'tcx, Scalar> {
+ self.to_scalar_or_undef().not_undef()
+ }
+
+ /// Convert the value into a pointer (or a pointer-sized integer).
+ /// Throws away the second half of a ScalarPair!
+ #[inline]
+ pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> {
+ match self {
+ Value::Scalar(ptr) |
+ Value::ScalarPair(ptr, _) => ptr.not_undef(),
+ }
+ }
+
+ pub fn to_scalar_dyn_trait(self) -> EvalResult<'tcx, (Scalar, Pointer)> {
+ match self {
+ Value::ScalarPair(ptr, vtable) =>
+ Ok((ptr.not_undef()?, vtable.to_ptr()?)),
+ _ => bug!("expected ptr and vtable, got {:?}", self),
+ }
+ }
+
+ pub fn to_scalar_slice(self, cx: impl HasDataLayout) -> EvalResult<'tcx, (Scalar, u64)> {
+ match self {
+ Value::ScalarPair(ptr, val) => {
+ let len = val.to_bits(cx.data_layout().pointer_size)?;
+ Ok((ptr.not_undef()?, len as u64))
+ }
+ _ => bug!("expected ptr and length, got {:?}", self),
+ }
+ }
+}
+
+// ScalarPair needs a type to interpret, so we often have a value and a type together
+// as input for binary and cast operations.
+#[derive(Copy, Clone, Debug)]
+pub struct ValTy<'tcx> {
+ pub value: Value,
+ pub layout: TyLayout<'tcx>,
+}
+
+impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
+ type Target = Value;
+ #[inline(always)]
+ fn deref(&self) -> &Value {
+ &self.value
+ }
+}
+
+/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
+/// or still in memory. The latter is an optimization, to delay reading that chunk of
+/// memory and to avoid having to store arbitrary-sized data here.
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
+pub enum Operand {
+ Immediate(Value),
+ Indirect(MemPlace),
+}
+
+impl Operand {
+ #[inline]
+ pub fn from_ptr(ptr: Pointer, align: Align) -> Self {
+ Operand::Indirect(MemPlace::from_ptr(ptr, align))
+ }
+
+ #[inline]
+ pub fn from_scalar_value(val: Scalar) -> Self {
+ Operand::Immediate(Value::Scalar(val.into()))
+ }
+
+ #[inline]
+ pub fn to_mem_place(self) -> MemPlace {
+ match self {
+ Operand::Indirect(mplace) => mplace,
+ _ => bug!("to_mem_place: expected Operand::Indirect, got {:?}", self),
+
+ }
+ }
+
+ #[inline]
+ pub fn to_immediate(self) -> Value {
+ match self {
+ Operand::Immediate(val) => val,
+ _ => bug!("to_immediate: expected Operand::Immediate, got {:?}", self),
+
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct OpTy<'tcx> {
+ pub op: Operand,
+ pub layout: TyLayout<'tcx>,
+}
+
+impl<'tcx> ::std::ops::Deref for OpTy<'tcx> {
+ type Target = Operand;
+ #[inline(always)]
+ fn deref(&self) -> &Operand {
+ &self.op
+ }
+}
+
+impl<'tcx> From<MPlaceTy<'tcx>> for OpTy<'tcx> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx>) -> Self {
+ OpTy {
+ op: Operand::Indirect(*mplace),
+ layout: mplace.layout
+ }
+ }
+}
+
+impl<'tcx> From<ValTy<'tcx>> for OpTy<'tcx> {
+ #[inline(always)]
+ fn from(val: ValTy<'tcx>) -> Self {
+ OpTy {
+ op: Operand::Immediate(val.value),
+ layout: val.layout
+ }
+ }
+}
+
+impl<'tcx> OpTy<'tcx> {
+ #[inline]
+ pub fn from_ptr(ptr: Pointer, align: Align, layout: TyLayout<'tcx>) -> Self {
+ OpTy { op: Operand::from_ptr(ptr, align), layout }
+ }
+
+ #[inline]
+ pub fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self {
+ OpTy { op: Operand::from_ptr(ptr, layout.align), layout }
+ }
+
+ #[inline]
+ pub fn from_scalar_value(val: Scalar, layout: TyLayout<'tcx>) -> Self {
+ OpTy { op: Operand::Immediate(Value::Scalar(val.into())), layout }
+ }
+}
+
+// Use the existing layout if given (but sanity check in debug mode),
+// or compute the layout.
+#[inline(always)]
+fn from_known_layout<'tcx>(
+ layout: Option<TyLayout<'tcx>>,
+ compute: impl FnOnce() -> EvalResult<'tcx, TyLayout<'tcx>>
+) -> EvalResult<'tcx, TyLayout<'tcx>> {
+ match layout {
+ None => compute(),
+ Some(layout) => {
+ if cfg!(debug_assertions) {
+ let layout2 = compute()?;
+ assert_eq!(layout.details, layout2.details,
+ "Mismatch in layout of supposedly equal-layout types {:?} and {:?}",
+ layout.ty, layout2.ty);
+ }
+ Ok(layout)
+ }
+ }
+}
+
+impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
+ /// Try reading a value in memory; this is interesting particularily for ScalarPair.
+ /// Return None if the layout does not permit loading this as a value.
+ pub(super) fn try_read_value_from_mplace(
+ &self,
+ mplace: MPlaceTy<'tcx>,
+ ) -> EvalResult<'tcx, Option<Value>> {
+ if mplace.extra != PlaceExtra::None {
+ return Ok(None);
+ }
+ let (ptr, ptr_align) = mplace.to_scalar_ptr_align();
+
+ if mplace.layout.size.bytes() == 0 {
+ // Not all ZSTs have a layout we would handle below, so just short-circuit them
+ // all here.
+ self.memory.check_align(ptr, ptr_align)?;
+ return Ok(Some(Value::Scalar(Scalar::zst().into())));
+ }
+
+ let ptr = ptr.to_ptr()?;
+ match mplace.layout.abi {
+ layout::Abi::Scalar(..) => {
+ let scalar = self.memory.read_scalar(ptr, ptr_align, mplace.layout.size)?;
+ Ok(Some(Value::Scalar(scalar)))
+ }
+ layout::Abi::ScalarPair(ref a, ref b) => {
+ let (a, b) = (&a.value, &b.value);
+ let (a_size, b_size) = (a.size(self), b.size(self));
+ let a_ptr = ptr;
+ let b_offset = a_size.abi_align(b.align(self));
+ assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use
+ let b_ptr = ptr.offset(b_offset, self)?.into();
+ let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?;
+ let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?;
+ Ok(Some(Value::ScalarPair(a_val, b_val)))
+ }
+ _ => Ok(None),
+ }
+ }
+
+ /// Try returning an immediate value for the operand.
+ /// If the layout does not permit loading this as a value, return where in memory
+ /// we can find the data.
+ /// Note that for a given layout, this operation will either always fail or always
+ /// succeed! Whether it succeeds depends on whether the layout can be represented
+ /// in a `Value`, not on which data is stored there currently.
+ pub(super) fn try_read_value(
+ &self,
+ src: OpTy<'tcx>,
+ ) -> EvalResult<'tcx, Result<Value, MemPlace>> {
+ Ok(match src.try_as_mplace() {
+ Ok(mplace) => {
+ if let Some(val) = self.try_read_value_from_mplace(mplace)? {
+ Ok(val)
+ } else {
+ Err(*mplace)
+ }
+ },
+ Err(val) => Ok(val),
+ })
+ }
+
+ /// Read a value from a place, asserting that that is possible with the given layout.
+ #[inline(always)]
+ pub fn read_value(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
+ if let Ok(value) = self.try_read_value(op)? {
+ Ok(ValTy { value, layout: op.layout })
+ } else {
+ bug!("primitive read failed for type: {:?}", op.layout.ty);
+ }
+ }
+
+ /// Read a scalar from a place
+ pub fn read_scalar(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ScalarMaybeUndef> {
+ match *self.read_value(op)? {
+ Value::ScalarPair(..) => bug!("got ScalarPair for type: {:?}", op.layout.ty),
+ Value::Scalar(val) => Ok(val),
+ }
+ }
+
+ pub fn uninit_operand(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Operand> {
+ // This decides which types we will use the Immediate optimization for, and hence should
+ // match what `try_read_value` and `eval_place_to_op` support.
+ if layout.is_zst() {
+ return Ok(Operand::Immediate(Value::Scalar(Scalar::zst().into())));
+ }
+
+ Ok(match layout.abi {
+ layout::Abi::Scalar(..) =>
+ Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef)),
+ layout::Abi::ScalarPair(..) =>
+ Operand::Immediate(Value::ScalarPair(
+ ScalarMaybeUndef::Undef,
+ ScalarMaybeUndef::Undef,
+ )),
+ _ => {
+ trace!("Forcing allocation for local of type {:?}", layout.ty);
+ Operand::Indirect(
+ *self.allocate(layout, MemoryKind::Stack)?
+ )
+ }
+ })
+ }
+
+ /// Projection functions
+ pub fn operand_field(
+ &self,
+ op: OpTy<'tcx>,
+ field: u64,
+ ) -> EvalResult<'tcx, OpTy<'tcx>> {
+ let base = match op.try_as_mplace() {
+ Ok(mplace) => {
+ // The easy case
+ let field = self.mplace_field(mplace, field)?;
+ return Ok(field.into());
+ },
+ Err(value) => value
+ };
+
+ let field = field.try_into().unwrap();
+ let field_layout = op.layout.field(self, field)?;
+ if field_layout.size.bytes() == 0 {
+ let val = Value::Scalar(Scalar::zst().into());
+ return Ok(OpTy { op: Operand::Immediate(val), layout: field_layout });
+ }
+ let offset = op.layout.fields.offset(field);
+ let value = match base {
+ // the field covers the entire type
+ _ if offset.bytes() == 0 && field_layout.size == op.layout.size => base,
+ // extract fields from types with `ScalarPair` ABI
+ Value::ScalarPair(a, b) => {
+ let val = if offset.bytes() == 0 { a } else { b };
+ Value::Scalar(val)
+ },
+ Value::Scalar(val) =>
+ bug!("field access on non aggregate {:#?}, {:#?}", val, op.layout),
+ };
+ Ok(OpTy { op: Operand::Immediate(value), layout: field_layout })
+ }
+
+ pub(super) fn operand_downcast(
+ &self,
+ op: OpTy<'tcx>,
+ variant: usize,
+ ) -> EvalResult<'tcx, OpTy<'tcx>> {
+ // Downcasts only change the layout
+ Ok(match op.try_as_mplace() {
+ Ok(mplace) => {
+ self.mplace_downcast(mplace, variant)?.into()
+ },
+ Err(..) => {
+ let layout = op.layout.for_variant(self, variant);
+ OpTy { layout, ..op }
+ }
+ })
+ }
+
+ // Take an operand, representing a pointer, and dereference it -- that
+ // will always be a MemPlace.
+ pub(super) fn deref_operand(
+ &self,
+ src: OpTy<'tcx>,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ let val = self.read_value(src)?;
+ trace!("deref to {} on {:?}", val.layout.ty, val);
+ Ok(self.ref_to_mplace(val)?)
+ }
+
+ pub fn operand_projection(
+ &self,
+ base: OpTy<'tcx>,
+ proj_elem: &mir::PlaceElem<'tcx>,
+ ) -> EvalResult<'tcx, OpTy<'tcx>> {
+ use rustc::mir::ProjectionElem::*;
+ Ok(match *proj_elem {
+ Field(field, _) => self.operand_field(base, field.index() as u64)?,
+ Downcast(_, variant) => self.operand_downcast(base, variant)?,
+ Deref => self.deref_operand(base)?.into(),
+ // The rest should only occur as mplace, we do not use Immediates for types
+ // allowing such operations. This matches place_projection forcing an allocation.
+ Subslice { .. } | ConstantIndex { .. } | Index(_) => {
+ let mplace = base.to_mem_place();
+ self.mplace_projection(mplace, proj_elem)?.into()
+ }
+ })
+ }
+
+ // Evaluate a place with the goal of reading from it. This lets us sometimes
+ // avoid allocations. If you already know the layout, you can pass it in
+ // to avoid looking it up again.
+ fn eval_place_to_op(
+ &mut self,
+ mir_place: &mir::Place<'tcx>,
+ layout: Option<TyLayout<'tcx>>,
+ ) -> EvalResult<'tcx, OpTy<'tcx>> {
+ use rustc::mir::Place::*;
+ Ok(match *mir_place {
+ Local(mir::RETURN_PLACE) => return err!(ReadFromReturnPointer),
+ Local(local) => {
+ let op = *self.frame().locals[local].access()?;
+ let layout = from_known_layout(layout,
+ || self.layout_of_local(self.cur_frame(), local))?;
+ OpTy { op, layout }
+ },
+
+ Projection(ref proj) => {
+ let op = self.eval_place_to_op(&proj.base, None)?;
+ self.operand_projection(op, &proj.elem)?
+ }
+
+ // Everything else is an mplace, so we just call `eval_place`.
+ // Note that getting an mplace for a static aways requires `&mut`,
+ // so this does not "cost" us anything in terms if mutability.
+ Promoted(_) | Static(_) => {
+ let place = self.eval_place(mir_place)?;
+ place.to_mem_place().into()
+ }
+ })
+ }
+
+ /// Evaluate the operand, returning a place where you can then find the data.
+ /// if you already know the layout, you can save two some table lookups
+ /// by passing it in here.
+ pub fn eval_operand(
+ &mut self,
+ mir_op: &mir::Operand<'tcx>,
+ layout: Option<TyLayout<'tcx>>,
+ ) -> EvalResult<'tcx, OpTy<'tcx>> {
+ use rustc::mir::Operand::*;
+ let op = match *mir_op {
+ // FIXME: do some more logic on `move` to invalidate the old location
+ Copy(ref place) |
+ Move(ref place) =>
+ self.eval_place_to_op(place, layout)?,
+
+ Constant(ref constant) => {
+ let layout = from_known_layout(layout, || {
+ let ty = self.monomorphize(mir_op.ty(self.mir(), *self.tcx), self.substs());
+ self.layout_of(ty)
+ })?;
+ let op = self.const_value_to_op(constant.literal.val)?;
+ OpTy { op, layout }
+ }
+ };
+ trace!("{:?}: {:?}", mir_op, *op);
+ Ok(op)
+ }
+
+ /// Evaluate a bunch of operands at once
+ pub(crate) fn eval_operands(
+ &mut self,
+ ops: &[mir::Operand<'tcx>],
+ ) -> EvalResult<'tcx, Vec<OpTy<'tcx>>> {
+ ops.into_iter()
+ .map(|op| self.eval_operand(op, None))
+ .collect()
+ }
+
+ // Also used e.g. when miri runs into a constant.
+ // Unfortunately, this needs an `&mut` to be able to allocate a copy of a `ByRef`
+ // constant. This bleeds up to `eval_operand` needing `&mut`.
+ pub fn const_value_to_op(
+ &mut self,
+ val: ConstValue<'tcx>,
+ ) -> EvalResult<'tcx, Operand> {
+ match val {
+ ConstValue::Unevaluated(def_id, substs) => {
+ let instance = self.resolve(def_id, substs)?;
+ self.global_to_op(GlobalId {
+ instance,
+ promoted: None,
+ })
+ }
+ ConstValue::ByRef(alloc, offset) => {
+ // FIXME: Allocate new AllocId for all constants inside
+ let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?;
+ Ok(Operand::from_ptr(Pointer::new(id, offset), alloc.align))
+ },
+ ConstValue::ScalarPair(a, b) =>
+ Ok(Operand::Immediate(Value::ScalarPair(a.into(), b))),
+ ConstValue::Scalar(x) =>
+ Ok(Operand::Immediate(Value::Scalar(x.into()))),
+ }
+ }
+
+ pub(super) fn global_to_op(&mut self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Operand> {
+ let cv = self.const_eval(gid)?;
+ self.const_value_to_op(cv.val)
+ }
+
+ /// We cannot do self.read_value(self.eval_operand) due to eval_operand taking &mut self,
+ /// so this helps avoid unnecessary let.
+ #[inline]
+ pub fn eval_operand_and_read_value(
+ &mut self,
+ op: &mir::Operand<'tcx>,
+ layout: Option<TyLayout<'tcx>>,
+ ) -> EvalResult<'tcx, ValTy<'tcx>> {
+ let op = self.eval_operand(op, layout)?;
+ self.read_value(op)
+ }
+
+ /// reads a tag and produces the corresponding variant index
+ pub fn read_discriminant_as_variant_index(
+ &self,
+ rval: OpTy<'tcx>,
+ ) -> EvalResult<'tcx, usize> {
+ match rval.layout.variants {
+ layout::Variants::Single { index } => Ok(index),
+ layout::Variants::Tagged { .. } => {
+ let discr_val = self.read_discriminant_value(rval)?;
+ rval.layout.ty
+ .ty_adt_def()
+ .expect("tagged layout for non adt")
+ .discriminants(self.tcx.tcx)
+ .position(|var| var.val == discr_val)
+ .ok_or_else(|| EvalErrorKind::InvalidDiscriminant.into())
+ }
+ layout::Variants::NicheFilling { .. } => {
+ let discr_val = self.read_discriminant_value(rval)?;
+ assert_eq!(discr_val as usize as u128, discr_val);
+ Ok(discr_val as usize)
+ },
+ }
+ }
+
+ pub fn read_discriminant_value(
+ &self,
+ rval: OpTy<'tcx>,
+ ) -> EvalResult<'tcx, u128> {
+ trace!("read_discriminant_value {:#?}", rval.layout);
+ if rval.layout.abi == layout::Abi::Uninhabited {
+ return err!(Unreachable);
+ }
+
+ match rval.layout.variants {
+ layout::Variants::Single { index } => {
+ let discr_val = rval.layout.ty.ty_adt_def().map_or(
+ index as u128,
+ |def| def.discriminant_for_variant(*self.tcx, index).val);
+ return Ok(discr_val);
+ }
+ layout::Variants::Tagged { .. } |
+ layout::Variants::NicheFilling { .. } => {},
+ }
+ let discr_op = self.operand_field(rval, 0)?;
+ let discr_val = self.read_value(discr_op)?;
+ trace!("discr value: {:?}", discr_val);
+ let raw_discr = discr_val.to_scalar()?;
+ Ok(match rval.layout.variants {
+ layout::Variants::Single { .. } => bug!(),
+ // FIXME: We should catch invalid discriminants here!
+ layout::Variants::Tagged { .. } => {
+ if discr_val.layout.ty.is_signed() {
+ let i = raw_discr.to_bits(discr_val.layout.size)? as i128;
+ // going from layout tag type to typeck discriminant type
+ // requires first sign extending with the layout discriminant
+ let shift = 128 - discr_val.layout.size.bits();
+ let sexted = (i << shift) >> shift;
+ // and then zeroing with the typeck discriminant type
+ let discr_ty = rval.layout.ty
+ .ty_adt_def().expect("tagged layout corresponds to adt")
+ .repr
+ .discr_type();
+ let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty);
+ let shift = 128 - discr_ty.size().bits();
+ let truncatee = sexted as u128;
+ (truncatee << shift) >> shift
+ } else {
+ raw_discr.to_bits(discr_val.layout.size)?
+ }
+ },
+ layout::Variants::NicheFilling {
+ dataful_variant,
+ ref niche_variants,
+ niche_start,
+ ..
+ } => {
+ let variants_start = *niche_variants.start() as u128;
+ let variants_end = *niche_variants.end() as u128;
+ match raw_discr {
+ Scalar::Ptr(_) => {
+ assert!(niche_start == 0);
+ assert!(variants_start == variants_end);
+ dataful_variant as u128
+ },
+ Scalar::Bits { bits: raw_discr, size } => {
+ assert_eq!(size as u64, discr_val.layout.size.bytes());
+ let discr = raw_discr.wrapping_sub(niche_start)
+ .wrapping_add(variants_start);
+ if variants_start <= discr && discr <= variants_end {
+ discr
+ } else {
+ dataful_variant as u128
+ }
+ },
+ }
+ }
+ })
+ }
+
+}
use rustc::mir;
-use rustc::ty::{self, Ty, layout};
+use rustc::ty::{self, layout::{self, TyLayout}};
use syntax::ast::FloatTy;
-use rustc::ty::layout::{LayoutOf, TyLayout};
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float;
+use rustc::mir::interpret::{EvalResult, Scalar};
-use super::{EvalContext, Place, Machine, ValTy};
+use super::{EvalContext, PlaceTy, Value, Machine, ValTy};
-use rustc::mir::interpret::{EvalResult, Scalar, Value};
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
- fn binop_with_overflow(
- &self,
- op: mir::BinOp,
- left: ValTy<'tcx>,
- right: ValTy<'tcx>,
- ) -> EvalResult<'tcx, (Scalar, bool)> {
- let left_val = self.value_to_scalar(left)?;
- let right_val = self.value_to_scalar(right)?;
- self.binary_op(op, left_val, left.ty, right_val, right.ty)
- }
-
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
/// and a boolean signifying the potential overflow to the destination.
- pub fn intrinsic_with_overflow(
+ pub fn binop_with_overflow(
&mut self,
op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
- dest: Place,
- dest_ty: Ty<'tcx>,
+ dest: PlaceTy<'tcx>,
) -> EvalResult<'tcx> {
- let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
+ let (val, overflowed) = self.binary_op(op, left, right)?;
let val = Value::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
- let valty = ValTy {
- value: val,
- ty: dest_ty,
- };
- self.write_value(valty, dest)
+ self.write_value(val, dest)
}
/// Applies the binary operation `op` to the arguments and writes the result to the
- /// destination. Returns `true` if the operation overflowed.
- pub fn intrinsic_overflowing(
+ /// destination.
+ pub fn binop_ignore_overflow(
&mut self,
op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
- dest: Place,
- dest_ty: Ty<'tcx>,
- ) -> EvalResult<'tcx, bool> {
- let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
- self.write_scalar(dest, val, dest_ty)?;
- Ok(overflowed)
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ let (val, _overflowed) = self.binary_op(op, left, right)?;
+ self.write_scalar(val, dest)
}
}
pub fn binary_op(
&self,
bin_op: mir::BinOp,
- left: Scalar,
- left_ty: Ty<'tcx>,
- right: Scalar,
- right_ty: Ty<'tcx>,
+ ValTy { value: left, layout: left_layout }: ValTy<'tcx>,
+ ValTy { value: right, layout: right_layout }: ValTy<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)> {
use rustc::mir::BinOp::*;
- let left_layout = self.layout_of(left_ty)?;
- let right_layout = self.layout_of(right_ty)?;
+ let left = left.to_scalar()?;
+ let right = right.to_scalar()?;
let left_kind = match left_layout.abi {
layout::Abi::Scalar(ref scalar) => scalar.value,
- _ => return err!(TypeNotPrimitive(left_ty)),
+ _ => return err!(TypeNotPrimitive(left_layout.ty)),
};
let right_kind = match right_layout.abi {
layout::Abi::Scalar(ref scalar) => scalar.value,
- _ => return err!(TypeNotPrimitive(right_ty)),
+ _ => return err!(TypeNotPrimitive(right_layout.ty)),
};
trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind);
// I: Handle operations that support pointers
if !left_kind.is_float() && !right_kind.is_float() {
- if let Some(handled) = M::try_ptr_op(self, bin_op, left, left_ty, right, right_ty)? {
+ if let Some(handled) =
+ M::try_ptr_op(self, bin_op, left, left_layout, right, right_layout)?
+ {
return Ok(handled);
}
}
}
}
- if let ty::TyFloat(fty) = left_ty.sty {
+ if let ty::Float(fty) = left_layout.ty.sty {
macro_rules! float_math {
($ty:path, $size:expr) => {{
let l = <$ty>::from_bits(l);
}
}
- let size = self.layout_of(left_ty).unwrap().size.bytes() as u8;
+ let size = left_layout.size.bytes() as u8;
// only ints left
let val = match bin_op {
"unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op,
left,
- left_ty,
+ left_layout.ty,
right,
- right_ty,
+ right_layout.ty,
);
return err!(Unimplemented(msg));
}
let result_bytes = match (un_op, &layout.ty.sty) {
- (Not, ty::TyBool) => !val.to_bool()? as u128,
+ (Not, ty::Bool) => !val.to_bool()? as u128,
(Not, _) => !bytes,
- (Neg, ty::TyFloat(FloatTy::F32)) => Single::to_bits(-Single::from_bits(bytes)),
- (Neg, ty::TyFloat(FloatTy::F64)) => Double::to_bits(-Double::from_bits(bytes)),
+ (Neg, ty::Float(FloatTy::F32)) => Single::to_bits(-Single::from_bits(bytes)),
+ (Neg, ty::Float(FloatTy::F64)) => Double::to_bits(-Double::from_bits(bytes)),
(Neg, _) if bytes == (1 << (size.bits() - 1)) => return err!(OverflowNeg),
(Neg, _) => (-(bytes as i128)) as u128,
+//! Computations on places -- field projections, going from mir::Place, and writing
+//! into a place.
+//! All high-level functions to write to memory work on places as destinations.
+
+use std::hash::{Hash, Hasher};
+use std::convert::TryFrom;
+
use rustc::mir;
-use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
+use rustc::ty::{self, Ty};
+use rustc::ty::layout::{self, Size, Align, LayoutOf, TyLayout, HasDataLayout};
use rustc_data_structures::indexed_vec::Idx;
-use rustc::mir::interpret::{GlobalId, Value, Scalar, EvalResult, Pointer, ScalarMaybeUndef};
-use super::{EvalContext, Machine, ValTy};
-use interpret::memory::HasMemory;
+use rustc::mir::interpret::{
+ GlobalId, Scalar, EvalResult, Pointer, ScalarMaybeUndef
+};
+use super::{EvalContext, Machine, Value, ValTy, Operand, OpTy, MemoryKind};
+
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
+pub struct MemPlace {
+ /// A place may have an integral pointer for ZSTs, and since it might
+ /// be turned back into a reference before ever being dereferenced.
+ /// However, it may never be undef.
+ pub ptr: Scalar,
+ pub align: Align,
+ pub extra: PlaceExtra,
+}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Place {
/// A place referring to a value allocated in the `Memory` system.
- Ptr {
- /// A place may have an invalid (integral or undef) pointer,
- /// since it might be turned back into a reference
- /// before ever being dereferenced.
- ptr: ScalarMaybeUndef,
- align: Align,
- extra: PlaceExtra,
- },
+ Ptr(MemPlace),
- /// A place referring to a value on the stack. Represented by a stack frame index paired with
- /// a Mir local index.
- Local { frame: usize, local: mir::Local },
+ /// To support alloc-free locals, we are able to write directly to a local.
+ /// (Without that optimization, we'd just always be a `MemPlace`.)
+ Local {
+ frame: usize,
+ local: mir::Local,
+ },
}
+// Extra information for fat pointers / places
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum PlaceExtra {
None,
Length(u64),
Vtable(Pointer),
- DowncastVariant(usize),
}
-impl<'tcx> Place {
- /// Produces a Place that will error if attempted to be read from
- pub fn undef() -> Self {
- Self::from_scalar_ptr(ScalarMaybeUndef::Undef, Align::from_bytes(1, 1).unwrap())
+#[derive(Copy, Clone, Debug)]
+pub struct PlaceTy<'tcx> {
+ place: Place,
+ pub layout: TyLayout<'tcx>,
+}
+
+impl<'tcx> ::std::ops::Deref for PlaceTy<'tcx> {
+ type Target = Place;
+ #[inline(always)]
+ fn deref(&self) -> &Place {
+ &self.place
+ }
+}
+
+/// A MemPlace with its layout. Constructing it is only possible in this module.
+#[derive(Copy, Clone, Debug)]
+pub struct MPlaceTy<'tcx> {
+ mplace: MemPlace,
+ pub layout: TyLayout<'tcx>,
+}
+
+impl<'tcx> ::std::ops::Deref for MPlaceTy<'tcx> {
+ type Target = MemPlace;
+ #[inline(always)]
+ fn deref(&self) -> &MemPlace {
+ &self.mplace
}
+}
+
+impl<'tcx> From<MPlaceTy<'tcx>> for PlaceTy<'tcx> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx>) -> Self {
+ PlaceTy {
+ place: Place::Ptr(mplace.mplace),
+ layout: mplace.layout
+ }
+ }
+}
- pub fn from_scalar_ptr(ptr: ScalarMaybeUndef, align: Align) -> Self {
- Place::Ptr {
+impl MemPlace {
+ #[inline(always)]
+ pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self {
+ MemPlace {
ptr,
align,
extra: PlaceExtra::None,
}
}
+ #[inline(always)]
pub fn from_ptr(ptr: Pointer, align: Align) -> Self {
- Self::from_scalar_ptr(ScalarMaybeUndef::Scalar(ptr.into()), align)
+ Self::from_scalar_ptr(ptr.into(), align)
+ }
+
+ #[inline(always)]
+ pub fn to_scalar_ptr_align(self) -> (Scalar, Align) {
+ assert_eq!(self.extra, PlaceExtra::None);
+ (self.ptr, self.align)
}
- pub fn to_ptr_align_extra(self) -> (ScalarMaybeUndef, Align, PlaceExtra) {
+ /// Extract the ptr part of the mplace
+ #[inline(always)]
+ pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
+ // At this point, we forget about the alignment information -- the place has been turned into a reference,
+ // and no matter where it came from, it now must be aligned.
+ self.to_scalar_ptr_align().0.to_ptr()
+ }
+
+ /// Turn a mplace into a (thin or fat) pointer, as a reference, pointing to the same space.
+ /// This is the inverse of `ref_to_mplace`.
+ pub fn to_ref(self, cx: impl HasDataLayout) -> Value {
+ // We ignore the alignment of the place here -- special handling for packed structs ends
+ // at the `&` operator.
+ match self.extra {
+ PlaceExtra::None => Value::Scalar(self.ptr.into()),
+ PlaceExtra::Length(len) => Value::new_slice(self.ptr.into(), len, cx),
+ PlaceExtra::Vtable(vtable) => Value::new_dyn_trait(self.ptr.into(), vtable),
+ }
+ }
+}
+
+impl<'tcx> MPlaceTy<'tcx> {
+ #[inline]
+ fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self {
+ MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align), layout }
+ }
+
+ #[inline]
+ pub(super) fn len(self) -> u64 {
+ // Sanity check
+ let ty_len = match self.layout.fields {
+ layout::FieldPlacement::Array { count, .. } => count,
+ _ => bug!("Length for non-array layout {:?} requested", self.layout),
+ };
+ if let PlaceExtra::Length(len) = self.extra {
+ len
+ } else {
+ ty_len
+ }
+ }
+}
+
+// Validation needs to hash MPlaceTy, but we cannot hash Layout -- so we just hash the type
+impl<'tcx> Hash for MPlaceTy<'tcx> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.mplace.hash(state);
+ self.layout.ty.hash(state);
+ }
+}
+impl<'tcx> PartialEq for MPlaceTy<'tcx> {
+ fn eq(&self, other: &Self) -> bool {
+ self.mplace == other.mplace && self.layout.ty == other.layout.ty
+ }
+}
+impl<'tcx> Eq for MPlaceTy<'tcx> {}
+
+impl<'tcx> OpTy<'tcx> {
+ #[inline(always)]
+ pub fn try_as_mplace(self) -> Result<MPlaceTy<'tcx>, Value> {
+ match *self {
+ Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
+ Operand::Immediate(value) => Err(value),
+ }
+ }
+
+ #[inline(always)]
+ pub fn to_mem_place(self) -> MPlaceTy<'tcx> {
+ self.try_as_mplace().unwrap()
+ }
+}
+
+impl<'tcx> Place {
+ /// Produces a Place that will error if attempted to be read from or written to
+ #[inline]
+ pub fn null(cx: impl HasDataLayout) -> Self {
+ Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1, 1).unwrap())
+ }
+
+ #[inline]
+ pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self {
+ Place::Ptr(MemPlace::from_scalar_ptr(ptr, align))
+ }
+
+ #[inline]
+ pub fn from_ptr(ptr: Pointer, align: Align) -> Self {
+ Place::Ptr(MemPlace::from_ptr(ptr, align))
+ }
+
+ #[inline]
+ pub fn to_mem_place(self) -> MemPlace {
match self {
- Place::Ptr { ptr, align, extra } => (ptr, align, extra),
- _ => bug!("to_ptr_and_extra: expected Place::Ptr, got {:?}", self),
+ Place::Ptr(mplace) => mplace,
+ _ => bug!("to_mem_place: expected Place::Ptr, got {:?}", self),
}
}
- pub fn to_ptr_align(self) -> (ScalarMaybeUndef, Align) {
- let (ptr, align, _extra) = self.to_ptr_align_extra();
- (ptr, align)
+ #[inline]
+ pub fn to_scalar_ptr_align(self) -> (Scalar, Align) {
+ self.to_mem_place().to_scalar_ptr_align()
}
+ #[inline]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
- // At this point, we forget about the alignment information -- the place has been turned into a reference,
- // and no matter where it came from, it now must be aligned.
- self.to_ptr_align().0.unwrap_or_err()?.to_ptr()
- }
-
- pub(super) fn elem_ty_and_len(
- self,
- ty: Ty<'tcx>,
- tcx: TyCtxt<'_, 'tcx, '_>
- ) -> (Ty<'tcx>, u64) {
- match ty.sty {
- ty::TyArray(elem, n) => (elem, n.unwrap_usize(tcx)),
-
- ty::TySlice(elem) => {
- match self {
- Place::Ptr { extra: PlaceExtra::Length(len), .. } => (elem, len),
- _ => {
- bug!(
- "elem_ty_and_len of a TySlice given non-slice place: {:?}",
- self
- )
- }
- }
- }
+ self.to_mem_place().to_ptr()
+ }
+}
- _ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty),
- }
+impl<'tcx> PlaceTy<'tcx> {
+ /// Produces a Place that will error if attempted to be read from or written to
+ #[inline]
+ pub fn null(cx: impl HasDataLayout, layout: TyLayout<'tcx>) -> Self {
+ PlaceTy { place: Place::from_scalar_ptr(Scalar::ptr_null(cx), layout.align), layout }
+ }
+
+ #[inline]
+ pub fn to_mem_place(self) -> MPlaceTy<'tcx> {
+ MPlaceTy { mplace: self.place.to_mem_place(), layout: self.layout }
}
}
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
- /// Reads a value from the place without going through the intermediate step of obtaining
- /// a `miri::Place`
- pub fn try_read_place(
+ /// Take a value, which represents a (thin or fat) reference, and make it a place.
+ /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref`.
+ pub fn ref_to_mplace(
+ &self, val: ValTy<'tcx>
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ let pointee_type = val.layout.ty.builtin_deref(true).unwrap().ty;
+ let layout = self.layout_of(pointee_type)?;
+ let mplace = match self.tcx.struct_tail(pointee_type).sty {
+ ty::Dynamic(..) => {
+ let (ptr, vtable) = val.to_scalar_dyn_trait()?;
+ MemPlace {
+ ptr,
+ align: layout.align,
+ extra: PlaceExtra::Vtable(vtable),
+ }
+ }
+ ty::Str | ty::Slice(_) => {
+ let (ptr, len) = val.to_scalar_slice(self)?;
+ MemPlace {
+ ptr,
+ align: layout.align,
+ extra: PlaceExtra::Length(len),
+ }
+ }
+ _ => MemPlace {
+ ptr: val.to_scalar()?,
+ align: layout.align,
+ extra: PlaceExtra::None,
+ },
+ };
+ Ok(MPlaceTy { mplace, layout })
+ }
+
+ /// Offset a pointer to project to a field. Unlike place_field, this is always
+ /// possible without allocating, so it can take &self. Also return the field's layout.
+ /// This supports both struct and array fields.
+ #[inline(always)]
+ pub fn mplace_field(
&self,
- place: &mir::Place<'tcx>,
- ) -> EvalResult<'tcx, Option<Value>> {
- use rustc::mir::Place::*;
- match *place {
- // Might allow this in the future, right now there's no way to do this from Rust code anyway
- Local(mir::RETURN_PLACE) => err!(ReadFromReturnPointer),
- // Directly reading a local will always succeed
- Local(local) => self.frame().locals[local].access().map(Some),
- // No fast path for statics. Reading from statics is rare and would require another
- // Machine function to handle differently in miri.
- Promoted(_) |
- Static(_) => Ok(None),
- Projection(ref proj) => self.try_read_place_projection(proj),
- }
+ base: MPlaceTy<'tcx>,
+ field: u64,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ // Not using the layout method because we want to compute on u64
+ let offset = match base.layout.fields {
+ layout::FieldPlacement::Arbitrary { ref offsets, .. } =>
+ offsets[usize::try_from(field).unwrap()],
+ layout::FieldPlacement::Array { stride, .. } => {
+ let len = base.len();
+ assert!(field < len, "Tried to access element {} of array/slice with length {}", field, len);
+ stride * field
+ }
+ layout::FieldPlacement::Union(count) => {
+ assert!(field < count as u64, "Tried to access field {} of union with {} fields", field, count);
+ // Offset is always 0
+ Size::from_bytes(0)
+ }
+ };
+ // the only way conversion can fail if is this is an array (otherwise we already panicked
+ // above). In that case, all fields are equal.
+ let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?;
+
+ // Adjust offset
+ let offset = match base.extra {
+ PlaceExtra::Vtable(vtable) => {
+ let (_, align) = self.read_size_and_align_from_vtable(vtable)?;
+ // FIXME: Is this right? Should we always do this, or only when actually
+ // accessing the field to which the vtable applies?
+ offset.abi_align(align)
+ }
+ _ => {
+ // No adjustment needed
+ offset
+ }
+ };
+
+ let ptr = base.ptr.ptr_offset(offset, self)?;
+ let align = base.align.min(field_layout.align);
+ let extra = if !field_layout.is_unsized() {
+ PlaceExtra::None
+ } else {
+ assert!(base.extra != PlaceExtra::None, "Expected fat ptr");
+ base.extra
+ };
+
+ Ok(MPlaceTy { mplace: MemPlace { ptr, align, extra }, layout: field_layout })
}
- pub fn read_field(
+ // Iterates over all fields of an array. Much more efficient than doing the
+ // same by repeatedly calling `mplace_array`.
+ pub fn mplace_array_fields(
&self,
- base: Value,
- variant: Option<usize>,
- field: mir::Field,
- mut base_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, (Value, TyLayout<'tcx>)> {
- if let Some(variant_index) = variant {
- base_layout = base_layout.for_variant(self, variant_index);
- }
- let field_index = field.index();
- let field = base_layout.field(self, field_index)?;
- if field.size.bytes() == 0 {
- return Ok((
- Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 })),
- field,
- ));
- }
- let offset = base_layout.fields.offset(field_index);
- let value = match base {
- // the field covers the entire type
- Value::ScalarPair(..) |
- Value::Scalar(_) if offset.bytes() == 0 && field.size == base_layout.size => base,
- // extract fields from types with `ScalarPair` ABI
- Value::ScalarPair(a, b) => {
- let val = if offset.bytes() == 0 { a } else { b };
- Value::Scalar(val)
- },
- Value::ByRef(base_ptr, align) => {
- let offset = base_layout.fields.offset(field_index);
- let ptr = base_ptr.ptr_offset(offset, self)?;
- let align = align.min(base_layout.align).min(field.align);
- assert!(!field.is_unsized());
- Value::ByRef(ptr, align)
- },
- Value::Scalar(val) => bug!("field access on non aggregate {:#?}, {:#?}", val, base_layout),
+ base: MPlaceTy<'tcx>,
+ ) -> EvalResult<'tcx, impl Iterator<Item=EvalResult<'tcx, MPlaceTy<'tcx>>> + 'a> {
+ let len = base.len();
+ let stride = match base.layout.fields {
+ layout::FieldPlacement::Array { stride, .. } => stride,
+ _ => bug!("mplace_array_fields: expected an array layout"),
};
- Ok((value, field))
+ let layout = base.layout.field(self, 0)?;
+ let dl = &self.tcx.data_layout;
+ Ok((0..len).map(move |i| {
+ let ptr = base.ptr.ptr_offset(i * stride, dl)?;
+ Ok(MPlaceTy {
+ mplace: MemPlace { ptr, align: base.align, extra: PlaceExtra::None },
+ layout
+ })
+ }))
}
- fn try_read_place_projection(
+ pub fn mplace_subslice(
&self,
- proj: &mir::PlaceProjection<'tcx>,
- ) -> EvalResult<'tcx, Option<Value>> {
- use rustc::mir::ProjectionElem::*;
- let base = match self.try_read_place(&proj.base)? {
- Some(base) => base,
- None => return Ok(None),
+ base: MPlaceTy<'tcx>,
+ from: u64,
+ to: u64,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ let len = base.len();
+ assert!(from <= len - to);
+
+ // Not using layout method because that works with usize, and does not work with slices
+ // (that have count 0 in their layout).
+ let from_offset = match base.layout.fields {
+ layout::FieldPlacement::Array { stride, .. } =>
+ stride * from,
+ _ => bug!("Unexpected layout of index access: {:#?}", base.layout),
};
- let base_ty = self.place_ty(&proj.base);
- let base_layout = self.layout_of(base_ty)?;
- match proj.elem {
- Field(field, _) => Ok(Some(self.read_field(base, None, field, base_layout)?.0)),
- // The NullablePointer cases should work fine, need to take care for normal enums
- Downcast(..) |
- Subslice { .. } |
- // reading index 0 or index 1 from a ByVal or ByVal pair could be optimized
- ConstantIndex { .. } | Index(_) |
- // No way to optimize this projection any better than the normal place path
- Deref => Ok(None),
- }
+ let ptr = base.ptr.ptr_offset(from_offset, self)?;
+
+ // Compute extra and new layout
+ let inner_len = len - to - from;
+ let (extra, ty) = match base.layout.ty.sty {
+ ty::Array(inner, _) =>
+ (PlaceExtra::None, self.tcx.mk_array(inner, inner_len)),
+ ty::Slice(..) =>
+ (PlaceExtra::Length(inner_len), base.layout.ty),
+ _ =>
+ bug!("cannot subslice non-array type: `{:?}`", base.layout.ty),
+ };
+ let layout = self.layout_of(ty)?;
+
+ Ok(MPlaceTy {
+ mplace: MemPlace { ptr, align: base.align, extra },
+ layout
+ })
+ }
+
+ pub fn mplace_downcast(
+ &self,
+ base: MPlaceTy<'tcx>,
+ variant: usize,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ // Downcasts only change the layout
+ assert_eq!(base.extra, PlaceExtra::None);
+ Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base })
+ }
+
+ /// Project into an mplace
+ pub fn mplace_projection(
+ &self,
+ base: MPlaceTy<'tcx>,
+ proj_elem: &mir::PlaceElem<'tcx>,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ use rustc::mir::ProjectionElem::*;
+ Ok(match *proj_elem {
+ Field(field, _) => self.mplace_field(base, field.index() as u64)?,
+ Downcast(_, variant) => self.mplace_downcast(base, variant)?,
+ Deref => self.deref_operand(base.into())?,
+
+ Index(local) => {
+ let n = *self.frame().locals[local].access()?;
+ let n_layout = self.layout_of(self.tcx.types.usize)?;
+ let n = self.read_scalar(OpTy { op: n, layout: n_layout })?;
+ let n = n.to_bits(self.tcx.data_layout.pointer_size)?;
+ self.mplace_field(base, u64::try_from(n).unwrap())?
+ }
+
+ ConstantIndex {
+ offset,
+ min_length,
+ from_end,
+ } => {
+ let n = base.len();
+ assert!(n >= min_length as u64);
+
+ let index = if from_end {
+ n - u64::from(offset)
+ } else {
+ u64::from(offset)
+ };
+
+ self.mplace_field(base, index)?
+ }
+
+ Subslice { from, to } =>
+ self.mplace_subslice(base, u64::from(from), u64::from(to))?,
+ })
}
- /// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses.
- pub(super) fn eval_and_read_place(
+ /// Get the place of a field inside the place, and also the field's type.
+ /// Just a convenience function, but used quite a bit.
+ pub fn place_field(
&mut self,
- place: &mir::Place<'tcx>,
- ) -> EvalResult<'tcx, Value> {
- // Shortcut for things like accessing a fat pointer's field,
- // which would otherwise (in the `eval_place` path) require moving a `ScalarPair` to memory
- // and returning an `Place::Ptr` to it
- if let Some(val) = self.try_read_place(place)? {
- return Ok(val);
- }
- let place = self.eval_place(place)?;
- self.read_place(place)
+ base: PlaceTy<'tcx>,
+ field: u64,
+ ) -> EvalResult<'tcx, PlaceTy<'tcx>> {
+ // FIXME: We could try to be smarter and avoid allocation for fields that span the
+ // entire place.
+ let mplace = self.force_allocation(base)?;
+ Ok(self.mplace_field(mplace, field)?.into())
}
- pub fn read_place(&self, place: Place) -> EvalResult<'tcx, Value> {
- match place {
- Place::Ptr { ptr, align, extra } => {
- assert_eq!(extra, PlaceExtra::None);
- Ok(Value::ByRef(ptr.unwrap_or_err()?, align))
+ pub fn place_downcast(
+ &mut self,
+ base: PlaceTy<'tcx>,
+ variant: usize,
+ ) -> EvalResult<'tcx, PlaceTy<'tcx>> {
+ // Downcast just changes the layout
+ Ok(match base.place {
+ Place::Ptr(mplace) =>
+ self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into(),
+ Place::Local { .. } => {
+ let layout = base.layout.for_variant(&self, variant);
+ PlaceTy { layout, ..base }
}
- Place::Local { frame, local } => self.stack[frame].locals[local].access(),
- }
+ })
}
- pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, Place> {
+ /// Project into a place
+ pub fn place_projection(
+ &mut self,
+ base: PlaceTy<'tcx>,
+ proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>,
+ ) -> EvalResult<'tcx, PlaceTy<'tcx>> {
+ use rustc::mir::ProjectionElem::*;
+ Ok(match *proj_elem {
+ Field(field, _) => self.place_field(base, field.index() as u64)?,
+ Downcast(_, variant) => self.place_downcast(base, variant)?,
+ Deref => self.deref_operand(self.place_to_op(base)?)?.into(),
+ // For the other variants, we have to force an allocation.
+ // This matches `operand_projection`.
+ Subslice { .. } | ConstantIndex { .. } | Index(_) => {
+ let mplace = self.force_allocation(base)?;
+ self.mplace_projection(mplace, proj_elem)?.into()
+ }
+ })
+ }
+
+ /// Compute a place. You should only use this if you intend to write into this
+ /// place; for reading, a more efficient alternative is `eval_place_for_read`.
+ pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, PlaceTy<'tcx>> {
use rustc::mir::Place::*;
let place = match *mir_place {
- Local(mir::RETURN_PLACE) => self.frame().return_place,
- Local(local) => Place::Local {
- frame: self.cur_frame(),
- local,
+ Local(mir::RETURN_PLACE) => PlaceTy {
+ place: self.frame().return_place,
+ layout: self.layout_of_local(self.cur_frame(), mir::RETURN_PLACE)?,
+ },
+ Local(local) => PlaceTy {
+ place: Place::Local {
+ frame: self.cur_frame(),
+ local,
+ },
+ layout: self.layout_of_local(self.cur_frame(), local)?,
},
Promoted(ref promoted) => {
let instance = self.frame().instance;
- let val = self.read_global_as_value(GlobalId {
+ let op = self.global_to_op(GlobalId {
instance,
promoted: Some(promoted.0),
})?;
- if let Value::ByRef(ptr, align) = val {
- Place::Ptr {
- ptr: ptr.into(),
- align,
- extra: PlaceExtra::None,
- }
- } else {
- bug!("evaluated promoted and got {:#?}", val);
+ let mplace = op.to_mem_place();
+ let ty = self.monomorphize(promoted.1, self.substs());
+ PlaceTy {
+ place: Place::Ptr(mplace),
+ layout: self.layout_of(ty)?,
}
}
Static(ref static_) => {
- let layout = self.layout_of(self.place_ty(mir_place))?;
+ let ty = self.monomorphize(static_.ty, self.substs());
+ let layout = self.layout_of(ty)?;
let instance = ty::Instance::mono(*self.tcx, static_.def_id);
let cid = GlobalId {
instance,
promoted: None
};
let alloc = Machine::init_static(self, cid)?;
- Place::Ptr {
- ptr: ScalarMaybeUndef::Scalar(Scalar::Ptr(alloc.into())),
- align: layout.align,
- extra: PlaceExtra::None,
- }
+ MPlaceTy::from_aligned_ptr(alloc.into(), layout).into()
}
Projection(ref proj) => {
- let ty = self.place_ty(&proj.base);
let place = self.eval_place(&proj.base)?;
- return self.eval_place_projection(place, ty, &proj.elem);
+ self.place_projection(place, &proj.elem)?
}
};
- self.dump_local(place);
+ self.dump_place(place.place);
Ok(place)
}
- pub fn place_field(
+ /// Write a scalar to a place
+ pub fn write_scalar(
&mut self,
- base: Place,
- field: mir::Field,
- mut base_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, (Place, TyLayout<'tcx>)> {
- match base {
- Place::Ptr { extra: PlaceExtra::DowncastVariant(variant_index), .. } => {
- base_layout = base_layout.for_variant(&self, variant_index);
- }
- _ => {}
- }
- let field_index = field.index();
- let field = base_layout.field(&self, field_index)?;
- let offset = base_layout.fields.offset(field_index);
+ val: impl Into<ScalarMaybeUndef>,
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ self.write_value(Value::Scalar(val.into()), dest)
+ }
- // Do not allocate in trivial cases
- let (base_ptr, base_align, base_extra) = match base {
- Place::Ptr { ptr, align, extra } => (ptr, align, extra),
+ /// Write a value to a place
+ pub fn write_value(
+ &mut self,
+ src_val: Value,
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ trace!("write_value: {:?} <- {:?}", *dest, src_val);
+ // See if we can avoid an allocation. This is the counterpart to `try_read_value`,
+ // but not factored as a separate function.
+ let mplace = match dest.place {
Place::Local { frame, local } => {
- match (self.stack[frame].locals[local].access()?, &base_layout.abi) {
- // in case the field covers the entire type, just return the value
- (Value::Scalar(_), &layout::Abi::Scalar(_)) |
- (Value::ScalarPair(..), &layout::Abi::ScalarPair(..))
- if offset.bytes() == 0 && field.size == base_layout.size => {
- return Ok((base, field))
+ match *self.stack[frame].locals[local].access_mut()? {
+ Operand::Immediate(ref mut dest_val) => {
+ // Yay, we can just change the local directly.
+ *dest_val = src_val;
+ return Ok(());
},
- _ => self.force_allocation(base)?.to_ptr_align_extra(),
+ Operand::Indirect(mplace) => mplace, // already in memory
}
- }
+ },
+ Place::Ptr(mplace) => mplace, // already in memory
};
- let offset = match base_extra {
- PlaceExtra::Vtable(tab) => {
- let (_, align) = self.size_and_align_of_dst(
- base_layout.ty,
- base_ptr.to_value_with_vtable(tab),
- )?;
- offset.abi_align(align)
+ // This is already in memory, write there.
+ let dest = MPlaceTy { mplace, layout: dest.layout };
+ self.write_value_to_mplace(src_val, dest)
+ }
+
+ /// Write a value to memory
+ fn write_value_to_mplace(
+ &mut self,
+ value: Value,
+ dest: MPlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ let (ptr, ptr_align) = dest.to_scalar_ptr_align();
+ // Note that it is really important that the type here is the right one, and matches the type things are read at.
+ // In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only
+ // correct if we never look at this data with the wrong type.
+
+ // Nothing to do for ZSTs, other than checking alignment
+ if dest.layout.size.bytes() == 0 {
+ self.memory.check_align(ptr, ptr_align)?;
+ return Ok(());
+ }
+
+ let ptr = ptr.to_ptr()?;
+ match value {
+ Value::Scalar(scalar) => {
+ self.memory.write_scalar(
+ ptr, ptr_align.min(dest.layout.align), scalar, dest.layout.size
+ )
}
- _ => offset,
- };
+ Value::ScalarPair(a_val, b_val) => {
+ let (a, b) = match dest.layout.abi {
+ layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
+ _ => bug!("write_value_to_mplace: invalid ScalarPair layout: {:#?}", dest.layout)
+ };
+ let (a_size, b_size) = (a.size(&self), b.size(&self));
+ let (a_align, b_align) = (a.align(&self), b.align(&self));
+ let b_offset = a_size.abi_align(b_align);
+ let b_ptr = ptr.offset(b_offset, &self)?.into();
- let ptr = base_ptr.ptr_offset(offset, &self)?;
- let align = base_align.min(base_layout.align).min(field.align);
- let extra = if !field.is_unsized() {
- PlaceExtra::None
- } else {
- match base_extra {
- PlaceExtra::None => bug!("expected fat pointer"),
- PlaceExtra::DowncastVariant(..) => {
- bug!("Rust doesn't support unsized fields in enum variants")
- }
- PlaceExtra::Vtable(_) |
- PlaceExtra::Length(_) => {}
+ self.memory.write_scalar(ptr, ptr_align.min(a_align), a_val, a_size)?;
+ self.memory.write_scalar(b_ptr, ptr_align.min(b_align), b_val, b_size)
}
- base_extra
- };
+ }
+ }
- Ok((Place::Ptr { ptr, align, extra }, field))
+ /// Copy the data from an operand to a place
+ pub fn copy_op(
+ &mut self,
+ src: OpTy<'tcx>,
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ assert_eq!(src.layout.size, dest.layout.size,
+ "Size mismatch when copying!\nsrc: {:#?}\ndest: {:#?}", src, dest);
+
+ // Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
+ let (src_ptr, src_align) = match self.try_read_value(src)? {
+ Ok(src_val) =>
+ // Yay, we got a value that we can write directly. We write with the
+ // *source layout*, because that was used to load, and if they do not match
+ // this is a transmute we want to support.
+ return self.write_value(src_val, PlaceTy { place: *dest, layout: src.layout }),
+ Err(mplace) => mplace.to_scalar_ptr_align(),
+ };
+ // Slow path, this does not fit into an immediate. Just memcpy.
+ trace!("copy_op: {:?} <- {:?}", *dest, *src);
+ let (dest_ptr, dest_align) = self.force_allocation(dest)?.to_scalar_ptr_align();
+ self.memory.copy(
+ src_ptr, src_align,
+ dest_ptr, dest_align,
+ src.layout.size, false
+ )
}
- pub fn val_to_place(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Place> {
- let layout = self.layout_of(ty)?;
- Ok(match self.tcx.struct_tail(ty).sty {
- ty::TyDynamic(..) => {
- let (ptr, vtable) = self.into_ptr_vtable_pair(val)?;
- Place::Ptr {
- ptr,
- align: layout.align,
- extra: PlaceExtra::Vtable(vtable),
- }
- }
- ty::TyStr | ty::TySlice(_) => {
- let (ptr, len) = self.into_slice(val)?;
- Place::Ptr {
- ptr,
- align: layout.align,
- extra: PlaceExtra::Length(len),
- }
+ /// Make sure that a place is in memory, and return where it is.
+ /// This is essentially `force_to_memplace`.
+ pub fn force_allocation(
+ &mut self,
+ place: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ let mplace = match place.place {
+ Place::Local { frame, local } => {
+ // FIXME: Consider not doing anything for a ZST, and just returning
+ // a fake pointer?
+
+ // We need the layout of the local. We can NOT use the layout we got,
+ // that might e.g. be a downcast variant!
+ let local_layout = self.layout_of_local(frame, local)?;
+ // Make sure it has a place
+ let rval = *self.stack[frame].locals[local].access()?;
+ let mplace = self.allocate_op(OpTy { op: rval, layout: local_layout })?.mplace;
+ // This might have allocated the flag
+ *self.stack[frame].locals[local].access_mut()? =
+ Operand::Indirect(mplace);
+ // done
+ mplace
}
- _ => Place::from_scalar_ptr(self.into_ptr(val)?, layout.align),
- })
+ Place::Ptr(mplace) => mplace
+ };
+ // Return with the original layout, so that the caller can go on
+ Ok(MPlaceTy { mplace, layout: place.layout })
}
- pub fn place_index(
+ pub fn allocate(
&mut self,
- base: Place,
- outer_ty: Ty<'tcx>,
- n: u64,
- ) -> EvalResult<'tcx, Place> {
- // Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
- let base = self.force_allocation(base)?;
- let (base_ptr, align) = base.to_ptr_align();
-
- let (elem_ty, len) = base.elem_ty_and_len(outer_ty, self.tcx.tcx);
- let elem_size = self.layout_of(elem_ty)?.size;
- assert!(
- n < len,
- "Tried to access element {} of array/slice with length {}",
- n,
- len
- );
- let ptr = base_ptr.ptr_offset(elem_size * n, &*self)?;
- Ok(Place::Ptr {
- ptr,
- align,
- extra: PlaceExtra::None,
- })
+ layout: TyLayout<'tcx>,
+ kind: MemoryKind<M::MemoryKinds>,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ assert!(!layout.is_unsized(), "cannot alloc memory for unsized type");
+ let ptr = self.memory.allocate(layout.size, layout.align, kind)?;
+ Ok(MPlaceTy::from_aligned_ptr(ptr, layout))
}
- pub(super) fn place_downcast(
+ /// Make a place for an operand, allocating if needed
+ pub fn allocate_op(
&mut self,
- base: Place,
- variant: usize,
- ) -> EvalResult<'tcx, Place> {
- // FIXME(solson)
- let base = self.force_allocation(base)?;
- let (ptr, align) = base.to_ptr_align();
- let extra = PlaceExtra::DowncastVariant(variant);
- Ok(Place::Ptr { ptr, align, extra })
+ OpTy { op, layout }: OpTy<'tcx>,
+ ) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ Ok(match op {
+ Operand::Indirect(mplace) => MPlaceTy { mplace, layout },
+ Operand::Immediate(value) => {
+ // FIXME: Is stack always right here?
+ let ptr = self.allocate(layout, MemoryKind::Stack)?;
+ self.write_value_to_mplace(value, ptr)?;
+ ptr
+ },
+ })
}
- pub fn eval_place_projection(
+ pub fn write_discriminant_value(
&mut self,
- base: Place,
- base_ty: Ty<'tcx>,
- proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>,
- ) -> EvalResult<'tcx, Place> {
- use rustc::mir::ProjectionElem::*;
- match *proj_elem {
- Field(field, _) => {
- let layout = self.layout_of(base_ty)?;
- Ok(self.place_field(base, field, layout)?.0)
- }
-
- Downcast(_, variant) => {
- self.place_downcast(base, variant)
+ variant_index: usize,
+ dest: PlaceTy<'tcx>,
+ ) -> EvalResult<'tcx> {
+ match dest.layout.variants {
+ layout::Variants::Single { index } => {
+ if index != variant_index {
+ // If the layout of an enum is `Single`, all
+ // other variants are necessarily uninhabited.
+ assert_eq!(dest.layout.for_variant(&self, variant_index).abi,
+ layout::Abi::Uninhabited);
+ }
}
-
- Deref => {
- let val = self.read_place(base)?;
-
- let pointee_type = match base_ty.sty {
- ty::TyRawPtr(ref tam) => tam.ty,
- ty::TyRef(_, ty, _) => ty,
- ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(),
- _ => bug!("can only deref pointer types"),
- };
-
- trace!("deref to {} on {:?}", pointee_type, val);
-
- self.val_to_place(val, pointee_type)
+ layout::Variants::Tagged { ref tag, .. } => {
+ let discr_val = dest.layout.ty.ty_adt_def().unwrap()
+ .discriminant_for_variant(*self.tcx, variant_index)
+ .val;
+
+ // raw discriminants for enums are isize or bigger during
+ // their computation, but the in-memory tag is the smallest possible
+ // representation
+ let size = tag.value.size(self.tcx.tcx);
+ let shift = 128 - size.bits();
+ let discr_val = (discr_val << shift) >> shift;
+
+ let discr_dest = self.place_field(dest, 0)?;
+ self.write_scalar(Scalar::Bits {
+ bits: discr_val,
+ size: size.bytes() as u8,
+ }, discr_dest)?;
}
-
- Index(local) => {
- let value = self.frame().locals[local].access()?;
- let ty = self.tcx.types.usize;
- let n = self
- .value_to_scalar(ValTy { value, ty })?
- .to_bits(self.tcx.data_layout.pointer_size)?;
- self.place_index(base, base_ty, n as u64)
- }
-
- ConstantIndex {
- offset,
- min_length,
- from_end,
+ layout::Variants::NicheFilling {
+ dataful_variant,
+ ref niche_variants,
+ niche_start,
+ ..
} => {
- // FIXME(solson)
- let base = self.force_allocation(base)?;
- let (base_ptr, align) = base.to_ptr_align();
-
- let (elem_ty, n) = base.elem_ty_and_len(base_ty, self.tcx.tcx);
- let elem_size = self.layout_of(elem_ty)?.size;
- assert!(n >= min_length as u64);
-
- let index = if from_end {
- n - u64::from(offset)
- } else {
- u64::from(offset)
- };
-
- let ptr = base_ptr.ptr_offset(elem_size * index, &self)?;
- Ok(Place::Ptr { ptr, align, extra: PlaceExtra::None })
+ if variant_index != dataful_variant {
+ let niche_dest =
+ self.place_field(dest, 0)?;
+ let niche_value = ((variant_index - niche_variants.start()) as u128)
+ .wrapping_add(niche_start);
+ self.write_scalar(Scalar::Bits {
+ bits: niche_value,
+ size: niche_dest.layout.size.bytes() as u8,
+ }, niche_dest)?;
+ }
}
+ }
- Subslice { from, to } => {
- // FIXME(solson)
- let base = self.force_allocation(base)?;
- let (base_ptr, align) = base.to_ptr_align();
-
- let (elem_ty, n) = base.elem_ty_and_len(base_ty, self.tcx.tcx);
- let elem_size = self.layout_of(elem_ty)?.size;
- assert!(u64::from(from) <= n - u64::from(to));
- let ptr = base_ptr.ptr_offset(elem_size * u64::from(from), &self)?;
- // sublicing arrays produces arrays
- let extra = if self.type_is_sized(base_ty) {
- PlaceExtra::None
- } else {
- PlaceExtra::Length(n - u64::from(to) - u64::from(from))
- };
- Ok(Place::Ptr { ptr, align, extra })
+ Ok(())
+ }
+
+ /// Every place can be read from, so we can turm them into an operand
+ #[inline(always)]
+ pub fn place_to_op(&self, place: PlaceTy<'tcx>) -> EvalResult<'tcx, OpTy<'tcx>> {
+ let op = match place.place {
+ Place::Ptr(mplace) => {
+ Operand::Indirect(mplace)
}
- }
+ Place::Local { frame, local } =>
+ *self.stack[frame].locals[local].access()?
+ };
+ Ok(OpTy { op, layout: place.layout })
}
- pub fn place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
- self.monomorphize(
- place.ty(self.mir(), *self.tcx).to_ty(*self.tcx),
- self.substs(),
- )
+ /// Turn a place that is a dyn trait (i.e., PlaceExtra::Vtable and the appropriate layout)
+ /// or a slice into the specific fixed-size place and layout that is given by the vtable/len.
+ /// This "unpacks" the existential quantifier, so to speak.
+ pub fn unpack_unsized_mplace(&self, mplace: MPlaceTy<'tcx>) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+ trace!("Unpacking {:?} ({:?})", *mplace, mplace.layout.ty);
+ let layout = match mplace.extra {
+ PlaceExtra::Vtable(vtable) => {
+ // the drop function signature
+ let drop_instance = self.read_drop_type_from_vtable(vtable)?;
+ trace!("Found drop fn: {:?}", drop_instance);
+ let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx);
+ let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig);
+ // the drop function takes *mut T where T is the type being dropped, so get that
+ let ty = fn_sig.inputs()[0].builtin_deref(true).unwrap().ty;
+ let layout = self.layout_of(ty)?;
+ // Sanity checks
+ let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
+ assert_eq!(size, layout.size);
+ assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved
+ // FIXME: More checks for the vtable? We could make sure it is exactly
+ // the one one would expect for this type.
+ // Done!
+ layout
+ },
+ PlaceExtra::Length(len) => {
+ let ty = self.tcx.mk_array(mplace.layout.field(self, 0)?.ty, len);
+ self.layout_of(ty)?
+ }
+ PlaceExtra::None => bug!("Expected a fat pointer"),
+ };
+ trace!("Unpacked type: {:?}", layout.ty);
+ Ok(MPlaceTy {
+ mplace: MemPlace { extra: PlaceExtra::None, ..*mplace },
+ layout
+ })
}
}
//! The main entry point is the `step` method.
use rustc::mir;
+use rustc::ty::layout::LayoutOf;
+use rustc::mir::interpret::{EvalResult, Scalar};
-use rustc::mir::interpret::EvalResult;
use super::{EvalContext, Machine};
+/// Classify whether an operator is "left-homogeneous", i.e. the LHS has the
+/// same type as the result.
+#[inline]
+fn binop_left_homogeneous(op: mir::BinOp) -> bool {
+ use rustc::mir::BinOp::*;
+ match op {
+ Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr |
+ Offset | Shl | Shr =>
+ true,
+ Eq | Ne | Lt | Le | Gt | Ge =>
+ false,
+ }
+}
+/// Classify whether an operator is "right-homogeneous", i.e. the RHS has the
+/// same type as the LHS.
+#[inline]
+fn binop_right_homogeneous(op: mir::BinOp) -> bool {
+ use rustc::mir::BinOp::*;
+ match op {
+ Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr |
+ Eq | Ne | Lt | Le | Gt | Ge =>
+ true,
+ Offset | Shl | Shr =>
+ false,
+ }
+}
+
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
pub fn inc_step_counter_and_detect_loops(&mut self) -> EvalResult<'tcx, ()> {
/// The number of steps between loop detector snapshots.
}
fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
- trace!("{:?}", stmt);
+ debug!("{:?}", stmt);
use rustc::mir::StatementKind::*;
variant_index,
} => {
let dest = self.eval_place(place)?;
- let dest_ty = self.place_ty(place);
- self.write_discriminant_value(dest_ty, dest, variant_index)?;
+ self.write_discriminant_value(variant_index, dest)?;
}
// Mark locals as alive
// Mark locals as dead
StorageDead(local) => {
- let old_val = self.frame_mut().storage_dead(local);
+ let old_val = self.storage_dead(local);
self.deallocate_local(old_val)?;
}
Ok(())
}
+ /// Evaluate an assignment statement.
+ ///
+ /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
+ /// type writes its results directly into the memory specified by the place.
+ fn eval_rvalue_into_place(
+ &mut self,
+ rvalue: &mir::Rvalue<'tcx>,
+ place: &mir::Place<'tcx>,
+ ) -> EvalResult<'tcx> {
+ let dest = self.eval_place(place)?;
+
+ use rustc::mir::Rvalue::*;
+ match *rvalue {
+ Use(ref operand) => {
+ // Avoid recomputing the layout
+ let op = self.eval_operand(operand, Some(dest.layout))?;
+ self.copy_op(op, dest)?;
+ }
+
+ BinaryOp(bin_op, ref left, ref right) => {
+ let layout = if binop_left_homogeneous(bin_op) { Some(dest.layout) } else { None };
+ let left = self.eval_operand_and_read_value(left, layout)?;
+ let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None };
+ let right = self.eval_operand_and_read_value(right, layout)?;
+ self.binop_ignore_overflow(
+ bin_op,
+ left,
+ right,
+ dest,
+ )?;
+ }
+
+ CheckedBinaryOp(bin_op, ref left, ref right) => {
+ // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
+ let left = self.eval_operand_and_read_value(left, None)?;
+ let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None };
+ let right = self.eval_operand_and_read_value(right, layout)?;
+ self.binop_with_overflow(
+ bin_op,
+ left,
+ right,
+ dest,
+ )?;
+ }
+
+ UnaryOp(un_op, ref operand) => {
+ // The operand always has the same type as the result.
+ let val = self.eval_operand_and_read_value(operand, Some(dest.layout))?;
+ let val = self.unary_op(un_op, val.to_scalar()?, dest.layout)?;
+ self.write_scalar(val, dest)?;
+ }
+
+ Aggregate(ref kind, ref operands) => {
+ let (dest, active_field_index) = match **kind {
+ mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
+ self.write_discriminant_value(variant_index, dest)?;
+ if adt_def.is_enum() {
+ (self.place_downcast(dest, variant_index)?, active_field_index)
+ } else {
+ (dest, active_field_index)
+ }
+ }
+ _ => (dest, None)
+ };
+
+ for (i, operand) in operands.iter().enumerate() {
+ let op = self.eval_operand(operand, None)?;
+ // Ignore zero-sized fields.
+ if !op.layout.is_zst() {
+ let field_index = active_field_index.unwrap_or(i);
+ let field_dest = self.place_field(dest, field_index as u64)?;
+ self.copy_op(op, field_dest)?;
+ }
+ }
+ }
+
+ Repeat(ref operand, _) => {
+ let op = self.eval_operand(operand, None)?;
+ let dest = self.force_allocation(dest)?;
+ let length = dest.len();
+
+ if length > 0 {
+ // write the first
+ let first = self.mplace_field(dest, 0)?;
+ self.copy_op(op, first.into())?;
+
+ if length > 1 {
+ // copy the rest
+ let (dest, dest_align) = first.to_scalar_ptr_align();
+ let rest = dest.ptr_offset(first.layout.size, &self)?;
+ self.memory.copy_repeatedly(
+ dest, dest_align, rest, dest_align, first.layout.size, length - 1, true
+ )?;
+ }
+ }
+ }
+
+ Len(ref place) => {
+ // FIXME(CTFE): don't allow computing the length of arrays in const eval
+ let src = self.eval_place(place)?;
+ let mplace = self.force_allocation(src)?;
+ let len = mplace.len();
+ let size = self.memory.pointer_size().bytes() as u8;
+ self.write_scalar(
+ Scalar::Bits {
+ bits: len as u128,
+ size,
+ },
+ dest,
+ )?;
+ }
+
+ Ref(_, _, ref place) => {
+ let src = self.eval_place(place)?;
+ let val = self.force_allocation(src)?.to_ref(&self);
+ self.write_value(val, dest)?;
+ }
+
+ NullaryOp(mir::NullOp::Box, _) => {
+ M::box_alloc(self, dest)?;
+ }
+
+ NullaryOp(mir::NullOp::SizeOf, ty) => {
+ let ty = self.monomorphize(ty, self.substs());
+ let layout = self.layout_of(ty)?;
+ assert!(!layout.is_unsized(),
+ "SizeOf nullary MIR operator called for unsized type");
+ let size = self.memory.pointer_size().bytes() as u8;
+ self.write_scalar(
+ Scalar::Bits {
+ bits: layout.size.bytes() as u128,
+ size,
+ },
+ dest,
+ )?;
+ }
+
+ Cast(kind, ref operand, cast_ty) => {
+ debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest.layout.ty);
+ let src = self.eval_operand(operand, None)?;
+ self.cast(src, kind, dest)?;
+ }
+
+ Discriminant(ref place) => {
+ let place = self.eval_place(place)?;
+ let discr_val = self.read_discriminant_value(self.place_to_op(place)?)?;
+ let size = dest.layout.size.bytes() as u8;
+ self.write_scalar(Scalar::Bits {
+ bits: discr_val,
+ size,
+ }, dest)?;
+ }
+ }
+
+ self.dump_place(*dest);
+
+ Ok(())
+ }
+
fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> {
- trace!("{:?}", terminator.kind);
+ debug!("{:?}", terminator.kind);
self.tcx.span = terminator.source_info.span;
self.memory.tcx.span = terminator.source_info.span;
self.eval_terminator(terminator)?;
if !self.stack.is_empty() {
- trace!("// {:?}", self.frame().block);
+ debug!("// {:?}", self.frame().block);
}
Ok(())
}
use rustc::mir::BasicBlock;
-use rustc::ty::{self, Ty};
+use rustc::ty::{self, layout::LayoutOf};
use syntax::source_map::Span;
-use rustc::mir::interpret::{EvalResult, Value};
-use interpret::{Machine, ValTy, EvalContext, Place, PlaceExtra};
+use rustc::mir::interpret::EvalResult;
+use interpret::{Machine, EvalContext, PlaceTy, PlaceExtra, OpTy, Operand};
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
- pub(crate) fn drop_place(
+ pub(crate) fn drop_in_place(
&mut self,
- place: Place,
+ place: PlaceTy<'tcx>,
instance: ty::Instance<'tcx>,
- ty: Ty<'tcx>,
span: Span,
target: BasicBlock,
) -> EvalResult<'tcx> {
- trace!("drop_place: {:#?}", place);
+ trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
// We take the address of the object. This may well be unaligned, which is fine for us here.
// However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared
// by rustc.
- let val = match self.force_allocation(place)? {
- Place::Ptr {
- ptr,
- align: _,
- extra: PlaceExtra::Vtable(vtable),
- } => ptr.to_value_with_vtable(vtable),
- Place::Ptr {
- ptr,
- align: _,
- extra: PlaceExtra::Length(len),
- } => ptr.to_value_with_len(len, self.tcx.tcx),
- Place::Ptr {
- ptr,
- align: _,
- extra: PlaceExtra::None,
- } => Value::Scalar(ptr),
- _ => bug!("force_allocation broken"),
- };
- self.drop(val, instance, ty, span, target)
- }
+ let place = self.force_allocation(place)?;
- fn drop(
- &mut self,
- arg: Value,
- instance: ty::Instance<'tcx>,
- ty: Ty<'tcx>,
- span: Span,
- target: BasicBlock,
- ) -> EvalResult<'tcx> {
- trace!("drop: {:#?}, {:?}, {:?}", arg, ty.sty, instance.def);
-
- let instance = match ty.sty {
- ty::TyDynamic(..) => {
- if let Value::ScalarPair(_, vtable) = arg {
- self.read_drop_type_from_vtable(vtable.unwrap_or_err()?.to_ptr()?)?
- } else {
- bug!("expected fat ptr, got {:?}", arg);
- }
+ let (instance, place) = match place.layout.ty.sty {
+ ty::Dynamic(..) => {
+ // Dropping a trait object.
+ let vtable = match place.extra {
+ PlaceExtra::Vtable(vtable) => vtable,
+ _ => bug!("Expected vtable when dropping {:#?}", place),
+ };
+ let place = self.unpack_unsized_mplace(place)?;
+ let instance = self.read_drop_type_from_vtable(vtable)?;
+ (instance, place)
}
- _ => instance,
+ _ => (instance, place),
};
- // the drop function expects a reference to the value
- let valty = ValTy {
- value: arg,
- ty: self.tcx.mk_mut_ptr(ty),
+ let fn_sig = instance.ty(*self.tcx).fn_sig(*self.tcx);
+ let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig);
+
+ let arg = OpTy {
+ op: Operand::Immediate(place.to_ref(&self)),
+ layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
};
- let fn_sig = self.tcx.fn_sig(instance.def_id()).skip_binder().clone();
+ // This should always be (), but getting it from the sig seems
+ // easier than creating a layout of ().
+ let dest = PlaceTy::null(&self, self.layout_of(fn_sig.output())?);
self.eval_fn_call(
instance,
- Some((Place::undef(), target)),
- &[valty],
+ Some((dest, target)),
+ &[arg],
span,
fn_sig,
)
use rustc::mir;
use rustc::ty::{self, Ty};
-use rustc::ty::layout::{LayoutOf, Size};
+use rustc::ty::layout::LayoutOf;
use syntax::source_map::Span;
use rustc_target::spec::abi::Abi;
-use rustc::mir::interpret::{EvalResult, Scalar, Value};
-use super::{EvalContext, Place, Machine, ValTy};
+use rustc::mir::interpret::{EvalResult, Scalar};
+use super::{EvalContext, Machine, Value, OpTy, PlaceTy, ValTy, Operand};
use rustc_data_structures::indexed_vec::Idx;
-use interpret::memory::HasMemory;
mod drop;
use rustc::mir::TerminatorKind::*;
match terminator.kind {
Return => {
- self.dump_local(self.frame().return_place);
+ self.dump_place(self.frame().return_place);
self.pop_stack_frame()?
}
ref targets,
..
} => {
- let discr_val = self.eval_operand(discr)?;
- let discr_prim = self.value_to_scalar(discr_val)?;
- let discr_layout = self.layout_of(discr_val.ty).unwrap();
- trace!("SwitchInt({:?}, {:#?})", discr_prim, discr_layout);
+ let discr_val = self.eval_operand(discr, None)?;
+ let discr = self.read_value(discr_val)?;
+ trace!("SwitchInt({:?})", *discr);
// Branch to the `otherwise` case by default, if no match is found.
let mut target_block = targets[targets.len() - 1];
for (index, &const_int) in values.iter().enumerate() {
// Compare using binary_op
- let const_int = Scalar::Bits { bits: const_int, size: discr_layout.size.bytes() as u8 };
- let res = self.binary_op(mir::BinOp::Eq,
- discr_prim, discr_val.ty,
- const_int, discr_val.ty
+ let const_int = Scalar::Bits { bits: const_int, size: discr.layout.size.bytes() as u8 };
+ let (res, _) = self.binary_op(mir::BinOp::Eq,
+ discr,
+ ValTy { value: Value::Scalar(const_int.into()), layout: discr.layout }
)?;
- if res.0.to_bits(Size::from_bytes(1))? != 0 {
+ if res.to_bool()? {
target_block = targets[index];
break;
}
None => None,
};
- let func = self.eval_operand(func)?;
- let (fn_def, sig) = match func.ty.sty {
- ty::TyFnPtr(sig) => {
- let fn_ptr = self.value_to_scalar(func)?.to_ptr()?;
+ let func = self.eval_operand(func, None)?;
+ let (fn_def, sig) = match func.layout.ty.sty {
+ ty::FnPtr(sig) => {
+ let fn_ptr = self.read_scalar(func)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
let instance_ty = instance.ty(*self.tcx);
match instance_ty.sty {
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
let real_sig = instance_ty.fn_sig(*self.tcx);
let sig = self.tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
}
(instance, sig)
}
- ty::TyFnDef(def_id, substs) => (
+ ty::FnDef(def_id, substs) => (
self.resolve(def_id, substs)?,
- func.ty.fn_sig(*self.tcx),
+ func.layout.ty.fn_sig(*self.tcx),
),
_ => {
- let msg = format!("can't handle callee of type {:?}", func.ty);
+ let msg = format!("can't handle callee of type {:?}", func.layout.ty);
return err!(Unimplemented(msg));
}
};
- let args = self.operands_to_args(args)?;
+ let args = self.eval_operands(args)?;
let sig = self.tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
&sig,
self.eval_fn_call(
fn_def,
destination,
- &args,
+ &args[..],
terminator.source_info.span,
sig,
)?;
} => {
// FIXME(CTFE): forbid drop in const eval
let place = self.eval_place(location)?;
- let ty = self.place_ty(location);
- let ty = self.tcx.subst_and_normalize_erasing_regions(
- self.substs(),
- ty::ParamEnv::reveal_all(),
- &ty,
- );
+ let ty = place.layout.ty;
trace!("TerminatorKind::drop: {:?}, type {}", location, ty);
let instance = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
- self.drop_place(
+ self.drop_in_place(
place,
instance,
- ty,
terminator.source_info.span,
target,
)?;
target,
..
} => {
- let cond_val = self.eval_operand_to_scalar(cond)?.to_bool()?;
+ let cond_val = self.eval_operand_and_read_value(cond, None)?.to_scalar()?.to_bool()?;
if expected == cond_val {
self.goto_block(target);
} else {
use rustc::mir::interpret::EvalErrorKind::*;
return match *msg {
BoundsCheck { ref len, ref index } => {
- let len = self.eval_operand_to_scalar(len)
- .expect("can't eval len")
+ let len = self.eval_operand_and_read_value(len, None)
+ .expect("can't eval len").to_scalar()?
.to_bits(self.memory().pointer_size())? as u64;
- let index = self.eval_operand_to_scalar(index)
- .expect("can't eval index")
+ let index = self.eval_operand_and_read_value(index, None)
+ .expect("can't eval index").to_scalar()?
.to_bits(self.memory().pointer_size())? as u64;
err!(BoundsCheck { len, index })
}
// Permit changing the pointer type of raw pointers and references as well as
// mutability of raw pointers.
// TODO: Should not be allowed when fat pointers are involved.
- (&ty::TyRawPtr(_), &ty::TyRawPtr(_)) => true,
- (&ty::TyRef(_, _, _), &ty::TyRef(_, _, _)) => {
+ (&ty::RawPtr(_), &ty::RawPtr(_)) => true,
+ (&ty::Ref(_, _, _), &ty::Ref(_, _, _)) => {
ty.is_mutable_pointer() == real_ty.is_mutable_pointer()
}
// rule out everything else
// Second argument must be a tuple matching the argument list of sig
let snd_ty = real_sig.inputs_and_output[1];
match snd_ty.sty {
- ty::TyTuple(tys) if sig.inputs().len() == tys.len() =>
+ ty::Tuple(tys) if sig.inputs().len() == tys.len() =>
if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) {
return Ok(true)
},
fn eval_fn_call(
&mut self,
instance: ty::Instance<'tcx>,
- destination: Option<(Place, mir::BasicBlock)>,
- args: &[ValTy<'tcx>],
+ destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+ args: &[OpTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx> {
trace!("eval_fn_call: {:#?}", instance);
+ if let Some((place, _)) = destination {
+ assert_eq!(place.layout.ty, sig.output());
+ }
match instance.def {
ty::InstanceDef::Intrinsic(..) => {
let (ret, target) = match destination {
Some(dest) => dest,
_ => return err!(Unreachable),
};
- let ty = sig.output();
- let layout = self.layout_of(ty)?;
- M::call_intrinsic(self, instance, args, ret, layout, target)?;
- self.dump_local(ret);
+ M::call_intrinsic(self, instance, args, ret, target)?;
+ self.dump_place(*ret);
Ok(())
}
// FIXME: figure out why we can't just go through the shim
ty::InstanceDef::ClosureOnceShim { .. } => {
- if M::eval_fn_call(self, instance, destination, args, span, sig)? {
+ if M::eval_fn_call(self, instance, destination, args, span)? {
return Ok(());
}
let mut arg_locals = self.frame().mir.args_iter();
match sig.abi {
// closure as closure once
Abi::RustCall => {
- for (arg_local, &valty) in arg_locals.zip(args) {
+ for (arg_local, &op) in arg_locals.zip(args) {
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
- self.write_value(valty, dest)?;
+ self.copy_op(op, dest)?;
}
}
// non capture closure as fn ptr
// and need to pack arguments
Abi::Rust => {
trace!(
- "arg_locals: {:#?}",
- self.frame().mir.args_iter().collect::<Vec<_>>()
+ "args: {:#?}",
+ self.frame().mir.args_iter().zip(args.iter())
+ .map(|(local, arg)| (local, **arg, arg.layout.ty)).collect::<Vec<_>>()
);
- trace!("args: {:#?}", args);
let local = arg_locals.nth(1).unwrap();
- for (i, &valty) in args.into_iter().enumerate() {
+ for (i, &op) in args.into_iter().enumerate() {
let dest = self.eval_place(&mir::Place::Local(local).field(
mir::Field::new(i),
- valty.ty,
+ op.layout.ty,
))?;
- self.write_value(valty, dest)?;
+ self.copy_op(op, dest)?;
}
}
_ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi),
ty::InstanceDef::CloneShim(..) |
ty::InstanceDef::Item(_) => {
// Push the stack frame, and potentially be entirely done if the call got hooked
- if M::eval_fn_call(self, instance, destination, args, span, sig)? {
+ if M::eval_fn_call(self, instance, destination, args, span)? {
+ // TODO: Can we make it return the frame to push, instead
+ // of the hook doing half of the work and us doing the argument
+ // initialization?
return Ok(());
}
let mut arg_locals = self.frame().mir.args_iter();
trace!("ABI: {:?}", sig.abi);
trace!(
- "arg_locals: {:#?}",
- self.frame().mir.args_iter().collect::<Vec<_>>()
+ "args: {:#?}",
+ self.frame().mir.args_iter().zip(args.iter())
+ .map(|(local, arg)| (local, **arg, arg.layout.ty)).collect::<Vec<_>>()
);
- trace!("args: {:#?}", args);
match sig.abi {
Abi::RustCall => {
assert_eq!(args.len(), 2);
// write first argument
let first_local = arg_locals.next().unwrap();
let dest = self.eval_place(&mir::Place::Local(first_local))?;
- self.write_value(args[0], dest)?;
+ self.copy_op(args[0], dest)?;
}
// unpack and write all other args
- let layout = self.layout_of(args[1].ty)?;
- if let ty::TyTuple(_) = args[1].ty.sty {
+ let layout = args[1].layout;
+ if let ty::Tuple(_) = layout.ty.sty {
if layout.is_zst() {
// Nothing to do, no need to unpack zsts
return Ok(());
}
if self.frame().mir.args_iter().count() == layout.fields.count() + 1 {
for (i, arg_local) in arg_locals.enumerate() {
- let field = mir::Field::new(i);
- let (value, layout) = self.read_field(args[1].value, None, field, layout)?;
+ let arg = self.operand_field(args[1], i as u64)?;
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
- let valty = ValTy {
- value,
- ty: layout.ty,
- };
- self.write_value(valty, dest)?;
+ self.copy_op(arg, dest)?;
}
} else {
trace!("manual impl of rust-call ABI");
let dest = self.eval_place(
&mir::Place::Local(arg_locals.next().unwrap()),
)?;
- self.write_value(args[1], dest)?;
+ self.copy_op(args[1], dest)?;
}
} else {
bug!(
- "rust-call ABI tuple argument was {:#?}, {:#?}",
- args[1].ty,
+ "rust-call ABI tuple argument was {:#?}",
layout
);
}
}
_ => {
- for (arg_local, &valty) in arg_locals.zip(args) {
+ for (arg_local, &op) in arg_locals.zip(args) {
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
- self.write_value(valty, dest)?;
+ self.copy_op(op, dest)?;
}
}
}
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
- let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?;
+ let (ptr, vtable) = self.read_value(args[0])?.to_scalar_dyn_trait()?;
let fn_ptr = self.memory.read_ptr_sized(
vtable.offset(ptr_size * (idx as u64 + 3), &self)?,
ptr_align
- )?.unwrap_or_err()?.to_ptr()?;
+ )?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
+
+ // We have to patch the self argument, in particular get the layout
+ // expected by the actual function. Cannot just use "field 0" due to
+ // Box<self>.
let mut args = args.to_vec();
- let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty;
- args[0].ty = ty;
- args[0].value = Value::Scalar(ptr);
+ let pointee = args[0].layout.ty.builtin_deref(true).unwrap().ty;
+ let fake_fat_ptr_ty = self.tcx.mk_mut_ptr(pointee);
+ args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(&self, 0)?;
+ args[0].op = Operand::Immediate(Value::Scalar(ptr.into())); // strip vtable
+ trace!("Patched self operand to {:#?}", args[0]);
// recurse with concrete function
self.eval_fn_call(instance, destination, &args, span, sig)
}
let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
let drop = self.memory.create_fn_alloc(drop);
- self.memory.write_ptr_sized_unsigned(vtable, ptr_align, Scalar::Ptr(drop).into())?;
+ self.memory.write_ptr_sized(vtable, ptr_align, Scalar::Ptr(drop).into())?;
let size_ptr = vtable.offset(ptr_size, &self)?;
- self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits {
+ self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::Bits {
bits: size as u128,
size: ptr_size.bytes() as u8,
}.into())?;
let align_ptr = vtable.offset(ptr_size * 2, &self)?;
- self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits {
+ self.memory.write_ptr_sized(align_ptr, ptr_align, Scalar::Bits {
bits: align as u128,
size: ptr_size.bytes() as u8,
}.into())?;
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance);
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
- self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?;
+ self.memory.write_ptr_sized(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?;
}
}
) -> EvalResult<'tcx, ty::Instance<'tcx>> {
// we don't care about the pointee type, we just want a pointer
let pointer_align = self.tcx.data_layout.pointer_align;
- let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.unwrap_or_err()?.to_ptr()?;
+ let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?;
self.memory.get_fn(drop_fn)
}
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align;
- let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.unwrap_or_err()?.to_bits(pointer_size)? as u64;
+ let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64;
let align = self.memory.read_ptr_sized(
vtable.offset(pointer_size * 2, self)?,
pointer_align
- )?.unwrap_or_err()?.to_bits(pointer_size)? as u64;
+ )?.to_bits(pointer_size)? as u64;
Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap()))
}
}
--- /dev/null
+use std::fmt::Write;
+
+use syntax_pos::symbol::Symbol;
+use rustc::ty::layout::{self, Size, Primitive};
+use rustc::ty::{self, Ty};
+use rustc_data_structures::fx::FxHashSet;
+use rustc::mir::interpret::{
+ Scalar, AllocType, EvalResult, ScalarMaybeUndef, EvalErrorKind
+};
+
+use super::{
+ MPlaceTy, Machine, EvalContext
+};
+
+macro_rules! validation_failure{
+ ($what:expr, $where:expr, $details:expr) => {{
+ let where_ = path_format($where);
+ let where_ = if where_.is_empty() {
+ String::new()
+ } else {
+ format!(" at {}", where_)
+ };
+ err!(ValidationFailure(format!(
+ "encountered {}{}, but expected {}",
+ $what, where_, $details,
+ )))
+ }};
+ ($what:expr, $where:expr) => {{
+ let where_ = path_format($where);
+ let where_ = if where_.is_empty() {
+ String::new()
+ } else {
+ format!(" at {}", where_)
+ };
+ err!(ValidationFailure(format!(
+ "encountered {}{}",
+ $what, where_,
+ )))
+ }};
+}
+
+/// We want to show a nice path to the invalid field for diagnotsics,
+/// but avoid string operations in the happy case where no error happens.
+/// So we track a `Vec<PathElem>` where `PathElem` contains all the data we
+/// need to later print something for the user.
+#[derive(Copy, Clone, Debug)]
+pub enum PathElem {
+ Field(Symbol),
+ ClosureVar(Symbol),
+ ArrayElem(usize),
+ TupleElem(usize),
+ Deref,
+ Tag,
+}
+
+// Adding a Deref and making a copy of the path to be put into the queue
+// always go together. This one does it with only new allocation.
+fn path_clone_and_deref(path: &Vec<PathElem>) -> Vec<PathElem> {
+ let mut new_path = Vec::with_capacity(path.len()+1);
+ new_path.clone_from(path);
+ new_path.push(PathElem::Deref);
+ new_path
+}
+
+/// Format a path
+fn path_format(path: &Vec<PathElem>) -> String {
+ use self::PathElem::*;
+
+ let mut out = String::new();
+ for elem in path.iter() {
+ match elem {
+ Field(name) => write!(out, ".{}", name).unwrap(),
+ ClosureVar(name) => write!(out, ".<closure-var({})>", name).unwrap(),
+ TupleElem(idx) => write!(out, ".{}", idx).unwrap(),
+ ArrayElem(idx) => write!(out, "[{}]", idx).unwrap(),
+ Deref =>
+ // This does not match Rust syntax, but it is more readable for long paths -- and
+ // some of the other items here also are not Rust syntax. Actually we can't
+ // even use the usual syntax because we are just showing the projections,
+ // not the root.
+ write!(out, ".<deref>").unwrap(),
+ Tag => write!(out, ".<enum-tag>").unwrap(),
+ }
+ }
+ out
+}
+
+impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
+ fn validate_scalar(
+ &self,
+ value: ScalarMaybeUndef,
+ size: Size,
+ scalar: &layout::Scalar,
+ path: &Vec<PathElem>,
+ ty: Ty,
+ ) -> EvalResult<'tcx> {
+ trace!("validate scalar: {:#?}, {:#?}, {:#?}, {}", value, size, scalar, ty);
+ let (lo, hi) = scalar.valid_range.clone().into_inner();
+
+ let value = match value {
+ ScalarMaybeUndef::Scalar(scalar) => scalar,
+ ScalarMaybeUndef::Undef => return validation_failure!("undefined bytes", path),
+ };
+
+ let bits = match value {
+ Scalar::Bits { bits, size: value_size } => {
+ assert_eq!(value_size as u64, size.bytes());
+ bits
+ },
+ Scalar::Ptr(_) => {
+ let ptr_size = self.memory.pointer_size();
+ let ptr_max = u128::max_value() >> (128 - ptr_size.bits());
+ return if lo > hi {
+ if lo - hi == 1 {
+ // no gap, all values are ok
+ Ok(())
+ } else if hi < ptr_max || lo > 1 {
+ let max = u128::max_value() >> (128 - size.bits());
+ validation_failure!(
+ "pointer",
+ path,
+ format!("something in the range {:?} or {:?}", 0..=lo, hi..=max)
+ )
+ } else {
+ Ok(())
+ }
+ } else if hi < ptr_max || lo > 1 {
+ validation_failure!(
+ "pointer",
+ path,
+ format!("something in the range {:?}", scalar.valid_range)
+ )
+ } else {
+ Ok(())
+ };
+ },
+ };
+
+ // char gets a special treatment, because its number space is not contiguous so `TyLayout`
+ // has no special checks for chars
+ match ty.sty {
+ ty::Char => {
+ debug_assert_eq!(size.bytes(), 4);
+ if ::std::char::from_u32(bits as u32).is_none() {
+ return validation_failure!(
+ "character",
+ path,
+ "a valid unicode codepoint"
+ );
+ }
+ }
+ _ => {},
+ }
+
+ use std::ops::RangeInclusive;
+ let in_range = |bound: RangeInclusive<u128>| bound.contains(&bits);
+ if lo > hi {
+ if in_range(0..=hi) || in_range(lo..=u128::max_value()) {
+ Ok(())
+ } else {
+ validation_failure!(
+ bits,
+ path,
+ format!("something in the range {:?} or {:?}", ..=hi, lo..)
+ )
+ }
+ } else {
+ if in_range(scalar.valid_range.clone()) {
+ Ok(())
+ } else {
+ validation_failure!(
+ bits,
+ path,
+ format!("something in the range {:?}", scalar.valid_range)
+ )
+ }
+ }
+ }
+
+ /// This function checks the memory where `dest` points to. The place must be sized
+ /// (i.e., dest.extra == PlaceExtra::None).
+ /// It will error if the bits at the destination do not match the ones described by the layout.
+ /// The `path` may be pushed to, but the part that is present when the function
+ /// starts must not be changed!
+ pub fn validate_mplace(
+ &self,
+ dest: MPlaceTy<'tcx>,
+ path: &mut Vec<PathElem>,
+ seen: &mut FxHashSet<(MPlaceTy<'tcx>)>,
+ todo: &mut Vec<(MPlaceTy<'tcx>, Vec<PathElem>)>,
+ ) -> EvalResult<'tcx> {
+ self.memory.dump_alloc(dest.to_ptr()?.alloc_id);
+ trace!("validate_mplace: {:?}, {:#?}", *dest, dest.layout);
+
+ // Find the right variant. We have to handle this as a prelude, not via
+ // proper recursion with the new inner layout, to be able to later nicely
+ // print the field names of the enum field that is being accessed.
+ let (variant, dest) = match dest.layout.variants {
+ layout::Variants::NicheFilling { niche: ref tag, .. } |
+ layout::Variants::Tagged { ref tag, .. } => {
+ let size = tag.value.size(self);
+ // we first read the tag value as scalar, to be able to validate it
+ let tag_mplace = self.mplace_field(dest, 0)?;
+ let tag_value = self.read_scalar(tag_mplace.into())?;
+ path.push(PathElem::Tag);
+ self.validate_scalar(
+ tag_value, size, tag, &path, tag_mplace.layout.ty
+ )?;
+ path.pop(); // remove the element again
+ // then we read it again to get the index, to continue
+ let variant = self.read_discriminant_as_variant_index(dest.into())?;
+ let inner_dest = self.mplace_downcast(dest, variant)?;
+ // Put the variant projection onto the path, as a field
+ path.push(PathElem::Field(dest.layout.ty.ty_adt_def().unwrap().variants[variant].name));
+ trace!("variant layout: {:#?}", dest.layout);
+ (variant, inner_dest)
+ },
+ layout::Variants::Single { index } => {
+ (index, dest)
+ }
+ };
+
+ // Remember the length, in case we need to truncate
+ let path_len = path.len();
+
+ // Validate all fields
+ match dest.layout.fields {
+ // primitives are unions with zero fields
+ // We still check `layout.fields`, not `layout.abi`, because `layout.abi`
+ // is `Scalar` for newtypes around scalars, but we want to descend through the
+ // fields to get a proper `path`.
+ layout::FieldPlacement::Union(0) => {
+ match dest.layout.abi {
+ // nothing to do, whatever the pointer points to, it is never going to be read
+ layout::Abi::Uninhabited =>
+ return validation_failure!("a value of an uninhabited type", path),
+ // check that the scalar is a valid pointer or that its bit range matches the
+ // expectation.
+ layout::Abi::Scalar(ref scalar_layout) => {
+ let size = scalar_layout.value.size(self);
+ let value = self.read_value(dest.into())?;
+ let scalar = value.to_scalar_or_undef();
+ self.validate_scalar(scalar, size, scalar_layout, &path, dest.layout.ty)?;
+ if scalar_layout.value == Primitive::Pointer {
+ // ignore integer pointers, we can't reason about the final hardware
+ if let Scalar::Ptr(ptr) = scalar.not_undef()? {
+ let alloc_kind = self.tcx.alloc_map.lock().get(ptr.alloc_id);
+ if let Some(AllocType::Static(did)) = alloc_kind {
+ // statics from other crates are already checked.
+ // extern statics should not be validated as they have no body.
+ if !did.is_local() || self.tcx.is_foreign_item(did) {
+ return Ok(());
+ }
+ }
+ if value.layout.ty.builtin_deref(false).is_some() {
+ trace!("Recursing below ptr {:#?}", value);
+ let ptr_place = self.ref_to_mplace(value)?;
+ // we have not encountered this pointer+layout combination before
+ if seen.insert(ptr_place) {
+ todo.push((ptr_place, path_clone_and_deref(path)));
+ }
+ }
+ }
+ }
+ },
+ _ => bug!("bad abi for FieldPlacement::Union(0): {:#?}", dest.layout.abi),
+ }
+ }
+ layout::FieldPlacement::Union(_) => {
+ // We can't check unions, their bits are allowed to be anything.
+ // The fields don't need to correspond to any bit pattern of the union's fields.
+ // See https://github.com/rust-lang/rust/issues/32836#issuecomment-406875389
+ },
+ layout::FieldPlacement::Array { .. } => {
+ for (i, field) in self.mplace_array_fields(dest)?.enumerate() {
+ let field = field?;
+ path.push(PathElem::ArrayElem(i));
+ self.validate_mplace(field, path, seen, todo)?;
+ path.truncate(path_len);
+ }
+ },
+ layout::FieldPlacement::Arbitrary { ref offsets, .. } => {
+ // Fat pointers need special treatment.
+ if dest.layout.ty.builtin_deref(true).is_some() {
+ // This is a fat pointer.
+ let ptr = match self.ref_to_mplace(self.read_value(dest.into())?) {
+ Ok(ptr) => ptr,
+ Err(err) => match err.kind {
+ EvalErrorKind::ReadPointerAsBytes =>
+ return validation_failure!(
+ "fat pointer length is not a valid integer", path
+ ),
+ EvalErrorKind::ReadBytesAsPointer =>
+ return validation_failure!(
+ "fat pointer vtable is not a valid pointer", path
+ ),
+ _ => return Err(err),
+ }
+ };
+ let unpacked_ptr = self.unpack_unsized_mplace(ptr)?;
+ // for safe ptrs, recursively check it
+ if !dest.layout.ty.is_unsafe_ptr() {
+ trace!("Recursing below fat ptr {:?} (unpacked: {:?})", ptr, unpacked_ptr);
+ if seen.insert(unpacked_ptr) {
+ todo.push((unpacked_ptr, path_clone_and_deref(path)));
+ }
+ }
+ } else {
+ // Not a pointer, perform regular aggregate handling below
+ for i in 0..offsets.len() {
+ let field = self.mplace_field(dest, i as u64)?;
+ path.push(self.aggregate_field_path_elem(dest.layout.ty, variant, i));
+ self.validate_mplace(field, path, seen, todo)?;
+ path.truncate(path_len);
+ }
+ // FIXME: For a TyStr, check that this is valid UTF-8.
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn aggregate_field_path_elem(&self, ty: Ty<'tcx>, variant: usize, field: usize) -> PathElem {
+ match ty.sty {
+ // generators and closures.
+ ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
+ let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap();
+ let freevar = self.tcx.with_freevars(node_id, |fv| fv[field]);
+ PathElem::ClosureVar(self.tcx.hir.name(freevar.var_id()))
+ }
+
+ // tuples
+ ty::Tuple(_) => PathElem::TupleElem(field),
+
+ // enums
+ ty::Adt(def, ..) if def.is_enum() => {
+ let variant = &def.variants[variant];
+ PathElem::Field(variant.fields[field].ident.name)
+ }
+
+ // other ADTs
+ ty::Adt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].ident.name),
+
+ // nothing else has an aggregate layout
+ _ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", ty),
+ }
+ }
+}
#![feature(slice_sort_by_cached_key)]
#![feature(box_patterns)]
#![feature(box_syntax)]
-#![feature(catch_expr)]
#![feature(crate_visibility_modifier)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(step_trait)]
#![feature(slice_concat_ext)]
#![feature(if_while_or_patterns)]
+#![feature(try_from)]
#![recursion_limit="256"]
extern crate rustc_apfloat;
extern crate byteorder;
extern crate core;
+extern crate smallvec;
+
+// Once we can use edition 2018 in the compiler,
+// replace this with real try blocks.
+macro_rules! try_block {
+ ($($inside:tt)*) => (
+ (||{ ::std::ops::Try::from_ok({ $($inside)* }) })()
+ )
+}
mod diagnostics;
shim::provide(providers);
transform::provide(providers);
providers.const_eval = interpret::const_eval_provider;
- providers.const_value_to_allocation = interpret::const_value_to_allocation_provider;
+ providers.const_to_allocation = interpret::const_to_allocation_provider;
providers.check_match = hair::pattern::check_match;
}
&source_ty,
);
match source_ty.sty {
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let instance = monomorphize::resolve_closure(
self.tcx, def_id, substs, ty::ClosureKind::FnOnce);
if should_monomorphize_locally(self.tcx, &instance) {
is_direct_call: bool,
output: &mut Vec<MonoItem<'tcx>>)
{
- if let ty::TyFnDef(def_id, substs) = ty.sty {
+ if let ty::FnDef(def_id, substs) = ty.sty {
let instance = ty::Instance::resolve(tcx,
ty::ParamEnv::reveal_all(),
def_id,
}
let tail = tcx.struct_tail(ty);
match tail.sty {
- ty::TyForeign(..) => false,
- ty::TyStr | ty::TySlice(..) | ty::TyDynamic(..) => true,
+ ty::Foreign(..) => false,
+ ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
_ => bug!("unexpected unsized tail: {:?}", tail.sty),
}
};
};
match (&source_ty.sty, &target_ty.sty) {
- (&ty::TyRef(_, a, _),
- &ty::TyRef(_, b, _)) |
- (&ty::TyRef(_, a, _),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
- (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
+ (&ty::Ref(_, a, _),
+ &ty::Ref(_, b, _)) |
+ (&ty::Ref(_, a, _),
+ &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
+ (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
+ &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
ptr_vtable(a, b)
}
- (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
ptr_vtable(source_ty.boxed_ty(), target_ty.boxed_ty())
}
- (&ty::TyAdt(source_adt_def, source_substs),
- &ty::TyAdt(target_adt_def, target_substs)) => {
+ (&ty::Adt(source_adt_def, source_substs),
+ &ty::Adt(target_adt_def, target_substs)) => {
assert_eq!(source_adt_def, target_adt_def);
let kind =
assert!(!trait_ty.needs_subst() && !trait_ty.has_escaping_regions() &&
!impl_ty.needs_subst() && !impl_ty.has_escaping_regions());
- if let ty::TyDynamic(ref trait_ty, ..) = trait_ty.sty {
+ if let ty::Dynamic(ref trait_ty, ..) = trait_ty.sty {
if let Some(principal) = trait_ty.principal() {
let poly_trait_ref = principal.with_self_ty(tcx, impl_ty);
assert!(!poly_trait_ref.has_escaping_regions());
pub fn push_type_name(&self, t: Ty<'tcx>, output: &mut String) {
match t.sty {
- ty::TyBool => output.push_str("bool"),
- ty::TyChar => output.push_str("char"),
- ty::TyStr => output.push_str("str"),
- ty::TyNever => output.push_str("!"),
- ty::TyInt(ast::IntTy::Isize) => output.push_str("isize"),
- ty::TyInt(ast::IntTy::I8) => output.push_str("i8"),
- ty::TyInt(ast::IntTy::I16) => output.push_str("i16"),
- ty::TyInt(ast::IntTy::I32) => output.push_str("i32"),
- ty::TyInt(ast::IntTy::I64) => output.push_str("i64"),
- ty::TyInt(ast::IntTy::I128) => output.push_str("i128"),
- ty::TyUint(ast::UintTy::Usize) => output.push_str("usize"),
- ty::TyUint(ast::UintTy::U8) => output.push_str("u8"),
- ty::TyUint(ast::UintTy::U16) => output.push_str("u16"),
- ty::TyUint(ast::UintTy::U32) => output.push_str("u32"),
- ty::TyUint(ast::UintTy::U64) => output.push_str("u64"),
- ty::TyUint(ast::UintTy::U128) => output.push_str("u128"),
- ty::TyFloat(ast::FloatTy::F32) => output.push_str("f32"),
- ty::TyFloat(ast::FloatTy::F64) => output.push_str("f64"),
- ty::TyAdt(adt_def, substs) => {
+ ty::Bool => output.push_str("bool"),
+ ty::Char => output.push_str("char"),
+ ty::Str => output.push_str("str"),
+ ty::Never => output.push_str("!"),
+ ty::Int(ast::IntTy::Isize) => output.push_str("isize"),
+ ty::Int(ast::IntTy::I8) => output.push_str("i8"),
+ ty::Int(ast::IntTy::I16) => output.push_str("i16"),
+ ty::Int(ast::IntTy::I32) => output.push_str("i32"),
+ ty::Int(ast::IntTy::I64) => output.push_str("i64"),
+ ty::Int(ast::IntTy::I128) => output.push_str("i128"),
+ ty::Uint(ast::UintTy::Usize) => output.push_str("usize"),
+ ty::Uint(ast::UintTy::U8) => output.push_str("u8"),
+ ty::Uint(ast::UintTy::U16) => output.push_str("u16"),
+ ty::Uint(ast::UintTy::U32) => output.push_str("u32"),
+ ty::Uint(ast::UintTy::U64) => output.push_str("u64"),
+ ty::Uint(ast::UintTy::U128) => output.push_str("u128"),
+ ty::Float(ast::FloatTy::F32) => output.push_str("f32"),
+ ty::Float(ast::FloatTy::F64) => output.push_str("f64"),
+ ty::Adt(adt_def, substs) => {
self.push_def_path(adt_def.did, output);
self.push_type_params(substs, iter::empty(), output);
},
- ty::TyTuple(component_types) => {
+ ty::Tuple(component_types) => {
output.push('(');
for &component_type in component_types {
self.push_type_name(component_type, output);
}
output.push(')');
},
- ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
+ ty::RawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => {
output.push('*');
match mutbl {
hir::MutImmutable => output.push_str("const "),
self.push_type_name(inner_type, output);
},
- ty::TyRef(_, inner_type, mutbl) => {
+ ty::Ref(_, inner_type, mutbl) => {
output.push('&');
if mutbl == hir::MutMutable {
output.push_str("mut ");
self.push_type_name(inner_type, output);
},
- ty::TyArray(inner_type, len) => {
+ ty::Array(inner_type, len) => {
output.push('[');
self.push_type_name(inner_type, output);
write!(output, "; {}", len.unwrap_usize(self.tcx)).unwrap();
output.push(']');
},
- ty::TySlice(inner_type) => {
+ ty::Slice(inner_type) => {
output.push('[');
self.push_type_name(inner_type, output);
output.push(']');
},
- ty::TyDynamic(ref trait_data, ..) => {
+ ty::Dynamic(ref trait_data, ..) => {
if let Some(principal) = trait_data.principal() {
self.push_def_path(principal.def_id(), output);
self.push_type_params(principal.skip_binder().substs,
output);
}
},
- ty::TyForeign(did) => self.push_def_path(did, output),
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) => {
+ ty::Foreign(did) => self.push_def_path(did, output),
+ ty::FnDef(..) |
+ ty::FnPtr(_) => {
let sig = t.fn_sig(self.tcx);
if sig.unsafety() == hir::Unsafety::Unsafe {
output.push_str("unsafe ");
self.push_type_name(sig.output(), output);
}
},
- ty::TyGenerator(def_id, GeneratorSubsts { ref substs }, _) |
- ty::TyClosure(def_id, ClosureSubsts { ref substs }) => {
+ ty::Generator(def_id, GeneratorSubsts { ref substs }, _) |
+ ty::Closure(def_id, ClosureSubsts { ref substs }) => {
self.push_def_path(def_id, output);
let generics = self.tcx.generics_of(self.tcx.closure_base_def_id(def_id));
let substs = substs.truncate_to(self.tcx, generics);
self.push_type_params(substs, iter::empty(), output);
}
- ty::TyError |
- ty::TyInfer(_) |
- ty::TyProjection(..) |
- ty::TyParam(_) |
- ty::TyGeneratorWitness(_) |
- ty::TyAnon(..) => {
+ ty::Error |
+ ty::Infer(_) |
+ ty::Projection(..) |
+ ty::Param(_) |
+ ty::GeneratorWitness(_) |
+ ty::Anon(..) => {
bug!("DefPathBasedNames: Trying to create type name for \
unexpected type: {:?}", t);
}
debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
// Check if this is a generator, if so, return the drop glue for it
- if let Some(&ty::TyS { sty: ty::TyGenerator(gen_def_id, substs, _), .. }) = ty {
+ if let Some(&ty::TyS { sty: ty::Generator(gen_def_id, substs, _), .. }) = ty {
let mir = &**tcx.optimized_mir(gen_def_id).generator_drop.as_ref().unwrap();
return mir.subst(tcx, substs.substs);
}
match self_ty.sty {
_ if is_copy => builder.copy_shim(),
- ty::TyArray(ty, len) => {
+ ty::Array(ty, len) => {
let len = len.unwrap_usize(tcx);
builder.array_shim(dest, src, ty, len)
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
builder.tuple_like_shim(
dest, src,
substs.upvar_tys(def_id, tcx)
)
}
- ty::TyTuple(tys) => builder.tuple_like_shim(dest, src, tys.iter().cloned()),
+ ty::Tuple(tys) => builder.tuple_like_shim(dest, src, tys.iter().cloned()),
_ => {
bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty)
}
let sig = gcx.normalize_erasing_regions(param_env, sig);
let (adt_def, substs) = match sig.output().sty {
- ty::TyAdt(adt_def, substs) => (adt_def, substs),
+ ty::Adt(adt_def, substs) => (adt_def, substs),
_ => bug!("unexpected type for ADT ctor {:?}", sig.output())
};
// A Deref projection may restrict the context, this depends on the type
// being deref'd.
let context = match ty.sty {
- ty::TyRef(re, _, mutbl) => {
+ ty::Ref(re, _, mutbl) => {
let re = match re {
&RegionKind::ReScope(ce) => Some(ce),
&RegionKind::ReErased =>
};
(re, mutbl)
}
- ty::TyRawPtr(_) =>
+ ty::RawPtr(_) =>
// There is no guarantee behind even a mutable raw pointer,
// no write locks are acquired there, so we also don't want to
// release any.
(None, hir::MutImmutable),
- ty::TyAdt(adt, _) if adt.is_box() => (None, hir::MutMutable),
+ ty::Adt(adt, _) if adt.is_box() => (None, hir::MutMutable),
_ => bug!("Deref on a non-pointer type {:?}", ty),
};
// "Intersect" this restriction with proj.base.
}
let base_ty = base.ty(self.mir, self.tcx).to_ty(self.tcx);
match base_ty.sty {
- ty::TyRawPtr(..) => {
+ ty::RawPtr(..) => {
self.require_unsafe("dereference of raw pointer",
"raw pointers may be NULL, dangling or unaligned; they can violate \
aliasing rules and cause data races: all of these are undefined \
behavior")
}
- ty::TyAdt(adt, _) => {
+ ty::Adt(adt, _) => {
if adt.is_union() {
if context == PlaceContext::Store ||
context == PlaceContext::AsmOutput ||
use rustc::mir::{NullOp, StatementKind, Statement, BasicBlock, LocalKind};
use rustc::mir::{TerminatorKind, ClearCrossCrate, SourceInfo, BinOp, ProjectionElem};
use rustc::mir::visit::{Visitor, PlaceContext};
-use rustc::mir::interpret::{ConstEvalErr, EvalErrorKind, ScalarMaybeUndef};
+use rustc::mir::interpret::{
+ ConstEvalErr, EvalErrorKind, ScalarMaybeUndef, Scalar, GlobalId, EvalResult
+};
use rustc::ty::{TyCtxt, self, Instance};
-use rustc::mir::interpret::{Value, Scalar, GlobalId, EvalResult};
-use interpret::EvalContext;
-use interpret::CompileTimeEvaluator;
-use interpret::{eval_promoted, mk_borrowck_eval_cx, ValTy};
+use interpret::{EvalContext, CompileTimeEvaluator, eval_promoted, mk_borrowck_eval_cx};
+use interpret::{Value, OpTy, MemoryKind};
use transform::{MirPass, MirSource};
use syntax::source_map::{Span, DUMMY_SP};
use rustc::ty::subst::Substs;
-use rustc_data_structures::indexed_vec::IndexVec;
+use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use rustc::ty::ParamEnv;
use rustc::ty::layout::{
LayoutOf, TyLayout, LayoutError,
}
}
-type Const<'tcx> = (Value, TyLayout<'tcx>, Span);
+type Const<'tcx> = (OpTy<'tcx>, Span);
/// Finds optimization opportunities on the MIR.
struct ConstPropagator<'b, 'a, 'tcx:'a+'b> {
// FIXME: implement
=> {},
- | Panic
+ | Panic { .. }
| BoundsCheck{..}
| Overflow(_)
| OverflowNeg
source_info: SourceInfo,
) -> Option<Const<'tcx>> {
self.ecx.tcx.span = source_info.span;
- match self.ecx.const_to_value(c.literal.val) {
- Ok(val) => {
+ match self.ecx.const_value_to_op(c.literal.val) {
+ Ok(op) => {
let layout = self.tcx.layout_of(self.param_env.and(c.literal.ty)).ok()?;
- Some((val, layout, c.span))
+ Some((OpTy { op, layout }, c.span))
},
Err(error) => {
let (stacktrace, span) = self.ecx.generate_stacktrace(None);
Place::Projection(ref proj) => match proj.elem {
ProjectionElem::Field(field, _) => {
trace!("field proj on {:?}", proj.base);
- let (base, layout, span) = self.eval_place(&proj.base, source_info)?;
- let valty = self.use_ecx(source_info, |this| {
- this.ecx.read_field(base, None, field, layout)
+ let (base, span) = self.eval_place(&proj.base, source_info)?;
+ let res = self.use_ecx(source_info, |this| {
+ this.ecx.operand_field(base, field.index() as u64)
})?;
- Some((valty.0, valty.1, span))
+ Some((res, span))
},
+ // We could get more projections by using e.g. `operand_projection`,
+ // but we do not even have the stack frame set up properly so
+ // an `Index` projection would throw us off-track.
_ => None,
},
Place::Promoted(ref promoted) => {
};
// cannot use `const_eval` here, because that would require having the MIR
// for the current function available, but we're producing said MIR right now
- let (value, _, ty) = self.use_ecx(source_info, |this| {
+ let res = self.use_ecx(source_info, |this| {
eval_promoted(&mut this.ecx, cid, this.mir, this.param_env)
})?;
- let val = (value, ty, source_info.span);
- trace!("evaluated promoted {:?} to {:?}", promoted, val);
- Some(val)
+ trace!("evaluated promoted {:?} to {:?}", promoted, res);
+ Some((res, source_info.span))
},
_ => None,
}
Rvalue::Discriminant(..) => None,
Rvalue::Cast(kind, ref operand, _) => {
- let (value, layout, span) = self.eval_operand(operand, source_info)?;
+ let (op, span) = self.eval_operand(operand, source_info)?;
self.use_ecx(source_info, |this| {
- let dest_ptr = this.ecx.alloc_ptr(place_layout)?;
- let place_align = place_layout.align;
- let dest = ::interpret::Place::from_ptr(dest_ptr, place_align);
- this.ecx.cast(ValTy { value, ty: layout.ty }, kind, place_layout.ty, dest)?;
- Ok((
- Value::ByRef(dest_ptr.into(), place_align),
- place_layout,
- span,
- ))
+ let dest = this.ecx.allocate(place_layout, MemoryKind::Stack)?;
+ this.ecx.cast(op, kind, dest.into())?;
+ Ok((dest.into(), span))
})
}
Rvalue::Len(_) => None,
Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some((
- Value::Scalar(Scalar::Bits {
- bits: n as u128,
- size: self.tcx.data_layout.pointer_size.bytes() as u8,
- }.into()),
- self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?,
+ OpTy::from_scalar_value(
+ Scalar::Bits {
+ bits: n as u128,
+ size: self.tcx.data_layout.pointer_size.bytes() as u8,
+ },
+ self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?,
+ ),
span,
)))
}
return None;
}
- let val = self.eval_operand(arg, source_info)?;
- let prim = self.use_ecx(source_info, |this| {
- this.ecx.value_to_scalar(ValTy { value: val.0, ty: val.1.ty })
+ let (arg, _) = self.eval_operand(arg, source_info)?;
+ let val = self.use_ecx(source_info, |this| {
+ let prim = this.ecx.read_scalar(arg)?.not_undef()?;
+ this.ecx.unary_op(op, prim, arg.layout)
})?;
- let val = self.use_ecx(source_info, |this| this.ecx.unary_op(op, prim, val.1))?;
- Some((Value::Scalar(val.into()), place_layout, span))
+ Some((OpTy::from_scalar_value(val, place_layout), span))
}
Rvalue::CheckedBinaryOp(op, ref left, ref right) |
Rvalue::BinaryOp(op, ref left, ref right) => {
}
let r = self.use_ecx(source_info, |this| {
- this.ecx.value_to_scalar(ValTy { value: right.0, ty: right.1.ty })
+ this.ecx.read_value(right.0)
})?;
if op == BinOp::Shr || op == BinOp::Shl {
let left_ty = left.ty(self.mir, self.tcx);
.unwrap()
.size
.bits();
- let right_size = right.1.size;
- if r.to_bits(right_size).ok().map_or(false, |b| b >= left_bits as u128) {
+ let right_size = right.0.layout.size;
+ let r_bits = r.to_scalar().and_then(|r| r.to_bits(right_size));
+ if r_bits.ok().map_or(false, |b| b >= left_bits as u128) {
let source_scope_local_data = match self.mir.source_scope_local_data {
ClearCrossCrate::Set(ref data) => data,
ClearCrossCrate::Clear => return None,
}
let left = self.eval_operand(left, source_info)?;
let l = self.use_ecx(source_info, |this| {
- this.ecx.value_to_scalar(ValTy { value: left.0, ty: left.1.ty })
+ this.ecx.read_value(left.0)
})?;
trace!("const evaluating {:?} for {:?} and {:?}", op, left, right);
let (val, overflow) = self.use_ecx(source_info, |this| {
- this.ecx.binary_op(op, l, left.1.ty, r, right.1.ty)
+ this.ecx.binary_op(op, l, r)
})?;
let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue {
Value::ScalarPair(
}
Value::Scalar(val.into())
};
- Some((val, place_layout, span))
+ let res = OpTy {
+ op: ::interpret::Operand::Immediate(val),
+ layout: place_layout,
+ };
+ Some((res, span))
},
}
}
if let TerminatorKind::Assert { expected, msg, cond, .. } = kind {
if let Some(value) = self.eval_operand(cond, source_info) {
trace!("assertion on {:?} should be {:?}", value, expected);
- if Value::Scalar(Scalar::from_bool(*expected).into()) != value.0 {
+ let expected = Value::Scalar(Scalar::from_bool(*expected).into());
+ if expected != value.0.to_immediate() {
// poison all places this operand references so that further code
// doesn't use the invalid value
match cond {
let len = self
.eval_operand(len, source_info)
.expect("len must be const");
- let len = match len.0 {
+ let len = match len.0.to_immediate() {
Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits {
bits, ..
})) => bits,
let index = self
.eval_operand(index, source_info)
.expect("index must be const");
- let index = match index.0 {
+ let index = match index.0.to_immediate() {
Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits {
bits, ..
})) => bits,
use rustc::ty::{self, TyCtxt};
use rustc::mir::*;
use rustc::util::nodemap::FxHashMap;
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::Idx;
use transform::{MirPass, MirSource};
use util::patch::MirPatch;
mir: &Mir<'tcx>,
id: ast::NodeId,
env: &MoveDataParamEnv<'tcx, 'tcx>)
- -> IdxSetBuf<BasicBlock>
+ -> IdxSet<BasicBlock>
{
debug!("find_dead_unwinds({:?})", mir.span);
// We only need to do this pass once, because unwind edges can only
// reach cleanup blocks, which can't have unwind edges themselves.
- let mut dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
+ let mut dead_unwinds = IdxSet::new_empty(mir.basic_blocks().len());
let flow_inits =
do_dataflow(tcx, mir, id, &[], &dead_unwinds,
MaybeInitializedPlaces::new(tcx, mir, &env),
let mut init_data = InitializationData {
live: flow_inits.sets().on_entry_set_for(bb.index()).to_owned(),
- dead: IdxSetBuf::new_empty(env.move_data.move_paths.len()),
+ dead: IdxSet::new_empty(env.move_data.move_paths.len()),
};
debug!("find_dead_unwinds @ {:?}: {:?}; init_data={:?}",
bb, bb_data, init_data.live);
}
struct InitializationData {
- live: IdxSetBuf<MovePathIndex>,
- dead: IdxSetBuf<MovePathIndex>
+ live: IdxSet<MovePathIndex>,
+ dead: IdxSet<MovePathIndex>
}
impl InitializationData {
use util::dump_mir;
use util::liveness::{self, IdentityMap, LivenessMode};
use rustc_data_structures::indexed_vec::Idx;
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use std::collections::HashMap;
use std::borrow::Cow;
use std::iter::once;
movable: bool) ->
(liveness::LiveVarSet<Local>,
HashMap<BasicBlock, liveness::LiveVarSet<Local>>) {
- let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
+ let dead_unwinds = IdxSet::new_empty(mir.basic_blocks().len());
let node_id = tcx.hir.as_local_node_id(source.def_id).unwrap();
// Calculate when MIR locals have live storage. This gives us an upper bound of their
// Find the MIR locals which do not use StorageLive/StorageDead statements.
// The storage of these locals are always live.
- let mut ignored = StorageIgnored(IdxSetBuf::new_filled(mir.local_decls.len()));
+ let mut ignored = StorageIgnored(IdxSet::new_filled(mir.local_decls.len()));
ignored.visit_mir(mir);
// Calculate the MIR locals which have been previously
// MIR types
let allowed_upvars = tcx.erase_regions(&upvars);
let allowed = match interior.sty {
- ty::TyGeneratorWitness(s) => tcx.erase_late_bound_regions(&s),
+ ty::GeneratorWitness(s) => tcx.erase_late_bound_regions(&s),
_ => bug!(),
};
// Get the interior types and substs which typeck computed
let (upvars, interior, movable) = match gen_ty.sty {
- ty::TyGenerator(_, substs, movability) => {
+ ty::Generator(_, substs, movability) => {
(substs.upvar_tys(def_id, tcx).collect(),
substs.witness(def_id, tcx),
movability == hir::GeneratorMovability::Movable)
let terminator = bb_data.terminator();
if let TerminatorKind::Call {
func: Operand::Constant(ref f), .. } = terminator.kind {
- if let ty::TyFnDef(callee_def_id, substs) = f.ty.sty {
+ if let ty::FnDef(callee_def_id, substs) = f.ty.sty {
if let Some(instance) = Instance::resolve(self.tcx,
param_env,
callee_def_id,
let terminator = bb_data.terminator();
if let TerminatorKind::Call {
func: Operand::Constant(ref f), .. } = terminator.kind {
- if let ty::TyFnDef(callee_def_id, substs) = f.ty.sty {
+ if let ty::FnDef(callee_def_id, substs) = f.ty.sty {
// Don't inline the same function multiple times.
if callsite.callee != callee_def_id {
callsites.push_back(CallSite {
}
TerminatorKind::Call {func: Operand::Constant(ref f), .. } => {
- if let ty::TyFnDef(def_id, _) = f.ty.sty {
+ if let ty::FnDef(def_id, _) = f.ty.sty {
// Don't give intrinsics the extra penalty for calls
let f = tcx.fn_sig(def_id);
if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
assert!(args.next().is_none());
let tuple = Place::Local(tuple);
- let tuple_tys = if let ty::TyTuple(s) = tuple.ty(caller_mir, tcx).to_ty(tcx).sty {
+ let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_mir, tcx).to_ty(tcx).sty {
s
} else {
bug!("Closure arguments are not passed as a tuple");
use rustc::mir::{Constant, Location, Place, Mir, Operand, ProjectionElem, Rvalue, Local};
use rustc::mir::visit::{MutVisitor, Visitor};
-use rustc::ty::{TyCtxt, TypeVariants};
+use rustc::ty::{TyCtxt, TyKind};
use rustc::util::nodemap::{FxHashMap, FxHashSet};
use rustc_data_structures::indexed_vec::Idx;
use std::mem;
if let Rvalue::Len(ref place) = *rvalue {
let place_ty = place.ty(&self.mir.local_decls, self.tcx).to_ty(self.tcx);
- if let TypeVariants::TyArray(_, len) = place_ty.sty {
+ if let TyKind::Array(_, len) = place_ty.sty {
let span = self.mir.source_info(location).span;
let ty = self.tcx.types.usize;
let constant = Constant { span, ty, literal: len };
use rustc::hir::def_id::DefId;
use rustc::middle::lang_items::LangItem;
use rustc::mir::*;
-use rustc::ty::{Slice, Ty, TyCtxt, TypeVariants};
+use rustc::ty::{List, Ty, TyCtxt, TyKind};
use rustc_data_structures::indexed_vec::{Idx};
use transform::{MirPass, MirSource};
use syntax;
source_info,
kind: TerminatorKind::Call {
func: Operand::function_handle(tcx, call_did,
- Slice::empty(), source_info.span),
+ List::empty(), source_info.span),
args: vec![lhs, rhs],
destination: Some((place, bb)),
cleanup: None,
fn sign_of_128bit(ty: Ty) -> Option<bool> {
match ty.sty {
- TypeVariants::TyInt(syntax::ast::IntTy::I128) => Some(true),
- TypeVariants::TyUint(syntax::ast::UintTy::U128) => Some(false),
+ TyKind::Int(syntax::ast::IntTy::I128) => Some(true),
+ TyKind::Uint(syntax::ast::UintTy::U128) => Some(false),
_ => None,
}
}
//! diagnostics as to why a constant rvalue wasn't promoted.
use rustc_data_structures::bitvec::BitArray;
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use rustc_data_structures::fx::FxHashSet;
use rustc::hir;
}
/// Qualify a whole const, static initializer or const fn.
- fn qualify_const(&mut self) -> (Qualif, Lrc<IdxSetBuf<Local>>) {
+ fn qualify_const(&mut self) -> (Qualif, Lrc<IdxSet<Local>>) {
debug!("qualifying {} {:?}", self.mode, self.def_id);
let mir = self.mir;
// Collect all the temps we need to promote.
- let mut promoted_temps = IdxSetBuf::new_empty(self.temp_promotion_state.len());
+ let mut promoted_temps = IdxSet::new_empty(self.temp_promotion_state.len());
for candidate in &self.promotion_candidates {
match *candidate {
(self.qualif, Lrc::new(promoted_temps))
}
+
+ fn is_const_panic_fn(&self, def_id: DefId) -> bool {
+ Some(def_id) == self.tcx.lang_items().panic_fn() ||
+ Some(def_id) == self.tcx.lang_items().begin_panic_fn()
+ }
}
/// Accumulates an Rvalue or Call's effects in self.qualif.
this.add(Qualif::NOT_CONST);
} else {
let base_ty = proj.base.ty(this.mir, this.tcx).to_ty(this.tcx);
- if let ty::TyRawPtr(_) = base_ty.sty {
+ if let ty::RawPtr(_) = base_ty.sty {
if !this.tcx.sess.features_untracked().const_raw_ptr_deref {
emit_feature_err(
&this.tcx.sess.parse_sess, "const_raw_ptr_deref",
if let Place::Projection(ref proj) = *place {
if let ProjectionElem::Deref = proj.elem {
let base_ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx);
- if let ty::TyRef(..) = base_ty.sty {
+ if let ty::Ref(..) = base_ty.sty {
is_reborrow = true;
}
}
if self.mode == Mode::StaticMut {
// Inside a `static mut`, &mut [...] is also allowed.
match ty.sty {
- ty::TyArray(..) | ty::TySlice(_) => forbidden_mut = false,
+ ty::Array(..) | ty::Slice(_) => forbidden_mut = false,
_ => {}
}
- } else if let ty::TyArray(_, len) = ty.sty {
+ } else if let ty::Array(_, len) = ty.sty {
// FIXME(eddyb) the `self.mode == Mode::Fn` condition
// seems unnecessary, given that this is merely a ZST.
if len.unwrap_usize(self.tcx) == 0 && self.mode == Mode::Fn {
}
Rvalue::BinaryOp(op, ref lhs, _) => {
- if let ty::TyRawPtr(_) = lhs.ty(self.mir, self.tcx).sty {
+ if let ty::RawPtr(_) = lhs.ty(self.mir, self.tcx).sty {
assert!(op == BinOp::Eq || op == BinOp::Ne ||
op == BinOp::Le || op == BinOp::Lt ||
op == BinOp::Ge || op == BinOp::Gt ||
let fn_ty = func.ty(self.mir, self.tcx);
let mut callee_def_id = None;
let (mut is_shuffle, mut is_const_fn) = (false, None);
- if let ty::TyFnDef(def_id, _) = fn_ty.sty {
+ if let ty::FnDef(def_id, _) = fn_ty.sty {
callee_def_id = Some(def_id);
match self.tcx.fn_sig(def_id).abi() {
Abi::RustIntrinsic |
}
}
_ => {
- if self.tcx.is_const_fn(def_id) {
+ if self.tcx.is_const_fn(def_id) || self.is_const_panic_fn(def_id) {
is_const_fn = Some(def_id);
}
}
// Const fn calls.
if let Some(def_id) = is_const_fn {
+ // check the const_panic feature gate or
// find corresponding rustc_const_unstable feature
- if let Some(&attr::Stability {
+ // FIXME: cannot allow this inside `allow_internal_unstable` because that would make
+ // `panic!` insta stable in constants, since the macro is marked with the attr
+ if self.is_const_panic_fn(def_id) {
+ if self.mode == Mode::Fn {
+ // never promote panics
+ self.qualif = Qualif::NOT_CONST;
+ } else if !self.tcx.sess.features_untracked().const_panic {
+ // don't allow panics in constants without the feature gate
+ emit_feature_err(
+ &self.tcx.sess.parse_sess,
+ "const_panic",
+ self.span,
+ GateIssue::Language,
+ &format!("panicking in {}s is unstable", self.mode),
+ );
+ }
+ } else if let Some(&attr::Stability {
rustc_const_unstable: Some(attr::RustcConstUnstable {
feature: ref feature_name
}),
fn mir_const_qualif<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
- -> (u8, Lrc<IdxSetBuf<Local>>) {
+ -> (u8, Lrc<IdxSet<Local>>) {
// NB: This `borrow()` is guaranteed to be valid (i.e., the value
// cannot yet be stolen), because `mir_validated()`, which steals
// from `mir_const(), forces this query to execute before
if mir.return_ty().references_error() {
tcx.sess.delay_span_bug(mir.span, "mir_const_qualif: Mir had errors");
- return (Qualif::NOT_CONST.bits(), Lrc::new(IdxSetBuf::new_empty(0)));
+ return (Qualif::NOT_CONST.bits(), Lrc::new(IdxSet::new_empty(0)));
}
let mut qualifier = Qualifier::new(tcx, def_id, mir, Mode::Const);
use rustc::ty::{self, TyCtxt};
use rustc::mir::{self, Mir, Location};
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::Idx;
use transform::{MirPass, MirSource};
let param_env = tcx.param_env(def_id);
let move_data = MoveData::gather_moves(mir, tcx).unwrap();
let mdpe = MoveDataParamEnv { move_data: move_data, param_env: param_env };
- let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
+ let dead_unwinds = IdxSet::new_empty(mir.basic_blocks().len());
let flow_inits =
do_dataflow(tcx, mir, id, &attributes, &dead_unwinds,
MaybeInitializedPlaces::new(tcx, mir, &mdpe),
if let Some(mir::Terminator { ref kind, source_info, .. }) = *terminator {
if let mir::TerminatorKind::Call { func: ref oper, ref args, .. } = *kind {
if let mir::Operand::Constant(ref func) = *oper {
- if let ty::TyFnDef(def_id, _) = func.ty.sty {
+ if let ty::FnDef(def_id, _) = func.ty.sty {
let abi = tcx.fn_sig(def_id).abi();
let name = tcx.item_name(def_id);
if abi == Abi::RustIntrinsic && name == "rustc_peek" {
// no need to transformation
} else {
let place_ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx);
- if let ty::TyArray(item_ty, const_size) = place_ty.sty {
+ if let ty::Array(item_ty, const_size) = place_ty.sty {
if let Some(size) = const_size.assert_usize(self.tcx) {
assert!(size <= u32::max_value() as u64,
"uniform array move out doesn't supported
let opt_src_place = items.first().and_then(|x| *x).map(|x| x.2);
let opt_size = opt_src_place.and_then(|src_place| {
let src_ty = src_place.ty(mir, tcx).to_ty(tcx);
- if let ty::TyArray(_, ref size_o) = src_ty.sty {
+ if let ty::Array(_, ref size_o) = src_ty.sty {
size_o.assert_usize(tcx)
} else {
None
ProjectionElem::Field(..) => {
let ty = base.ty(local_decls, tcx).to_ty(tcx);
match ty.sty {
- ty::TyAdt(def, _) if def.repr.packed() => {
+ ty::Adt(def, _) if def.repr.packed() => {
return true
}
_ => {}
o: Origin,
) -> DiagnosticBuilder<'cx> {
let type_name = match (&ty.sty, is_index) {
- (&ty::TyArray(_, _), Some(true)) | (&ty::TyArray(_, _), None) => "array",
- (&ty::TySlice(_), _) => "slice",
+ (&ty::Array(_, _), Some(true)) | (&ty::Array(_, _), None) => "array",
+ (&ty::Slice(_), _) => "slice",
_ => span_bug!(move_from_span, "this path should not cause illegal move"),
};
let mut err = struct_span_err!(
) -> DiagnosticBuilder<'cx> {
let moved_path = moved_path
.map(|mp| format!(": `{}`", mp))
- .unwrap_or("".to_owned());
+ .unwrap_or(String::new());
let err = struct_span_err!(
self,
/// if can_go then succ else drop-block
/// drop-block:
/// if ptr_based {
- /// ptr = cur
+ /// ptr = &mut *cur
/// cur = cur.offset(1)
/// } else {
/// ptr = &mut P[cur]
let one = self.constant_usize(1);
let (ptr_next, cur_next) = if ptr_based {
- (Rvalue::Use(copy(&Place::Local(cur))),
+ (Rvalue::Ref(
+ tcx.types.re_erased,
+ BorrowKind::Mut { allow_two_phase_borrow: false },
+ Place::Projection(Box::new(Projection {
+ base: Place::Local(cur),
+ elem: ProjectionElem::Deref,
+ }))
+ ),
Rvalue::BinaryOp(BinOp::Offset, copy(&Place::Local(cur)), one))
} else {
(Rvalue::Ref(
if ptr_based {
let tmp_ty = tcx.mk_mut_ptr(self.place_ty(self.place));
let tmp = Place::Local(self.new_temp(tmp_ty));
- // tmp = &P;
+ // tmp = &mut P;
// cur = tmp as *mut T;
// end = Offset(cur, len);
drop_block_stmts.push(self.assign(&tmp, Rvalue::Ref(
fn open_drop<'a>(&mut self) -> BasicBlock {
let ty = self.place_ty(self.place);
match ty.sty {
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx()).collect();
self.open_drop_for_tuple(&tys)
}
// This should only happen for the self argument on the resume function.
// It effetively only contains upvars until the generator transformation runs.
// See librustc_mir/transform/generator.rs for more details.
- ty::TyGenerator(def_id, substs, _) => {
+ ty::Generator(def_id, substs, _) => {
let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx()).collect();
self.open_drop_for_tuple(&tys)
}
- ty::TyTuple(tys) => {
+ ty::Tuple(tys) => {
self.open_drop_for_tuple(tys)
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
if def.is_box() {
self.open_drop_for_box(def, substs)
} else {
self.open_drop_for_adt(def, substs)
}
}
- ty::TyDynamic(..) => {
+ ty::Dynamic(..) => {
let unwind = self.unwind; // FIXME(#43234)
let succ = self.succ;
self.complete_drop(Some(DropFlagMode::Deep), succ, unwind)
}
- ty::TyArray(ety, size) => {
+ ty::Array(ety, size) => {
let size = size.assert_usize(self.tcx());
self.open_drop_for_array(ety, size)
},
- ty::TySlice(ety) => self.open_drop_for_array(ety, None),
+ ty::Slice(ety) => self.open_drop_for_array(ety, None),
_ => bug!("open drop from non-ADT `{:?}`", ty)
}
use rustc::mir::Local;
use rustc::mir::*;
use rustc::ty::{item_path, TyCtxt};
-use rustc_data_structures::indexed_set::IdxSetBuf;
+use rustc_data_structures::indexed_set::IdxSet;
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use rustc_data_structures::work_queue::WorkQueue;
use std::fs;
use transform::MirSource;
use util::pretty::{dump_enabled, write_basic_block, write_mir_intro};
-pub type LiveVarSet<V> = IdxSetBuf<V>;
+pub type LiveVarSet<V> = IdxSet<V>;
/// This gives the result of the liveness analysis at the boundary of
/// basic blocks. You can use `simulate_block` to obtain the
) where
F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
{
- let _: io::Result<()> = do catch {
+ let _: io::Result<()> = try_block! {
let mut file = create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, source)?;
writeln!(file, "// MIR for `{}`", node_path)?;
writeln!(file, "// source = {:?}", source)?;
};
if tcx.sess.opts.debugging_opts.dump_mir_graphviz {
- let _: io::Result<()> = do catch {
+ let _: io::Result<()> = try_block! {
let mut file =
create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, source)?;
write_mir_fn_graphviz(tcx, source.def_id, mir, &mut file)?;
use syntax::visit::{self, Visitor};
use syntax_pos::Span;
use errors;
+use errors::Applicability;
struct AstValidator<'a> {
session: &'a Session,
);
match val.node {
ExprKind::Lit(ref v) if v.node.is_numeric() => {
- err.span_suggestion(
+ err.span_suggestion_with_applicability(
place.span.between(val.span),
"if you meant to write a comparison against a negative value, add a \
space in between `<` and `-`",
"< -".to_string(),
+ Applicability::MaybeIncorrect
);
}
_ => {}
e: &'tcx hir::Expr, node_ty: Ty<'tcx>) -> Promotability {
let ty_result = match node_ty.sty {
- ty::TyAdt(def, _) if def.has_dtor(v.tcx) => {
+ ty::Adt(def, _) if def.has_dtor(v.tcx) => {
NotPromotable
}
_ => Promotable
return NotPromotable;
}
match v.tables.node_id_to_type(lhs.hir_id).sty {
- ty::TyRawPtr(_) => {
+ ty::RawPtr(_) => {
assert!(op.node == hir::BinOpKind::Eq || op.node == hir::BinOpKind::Ne ||
op.node == hir::BinOpKind::Le || op.node == hir::BinOpKind::Lt ||
op.node == hir::BinOpKind::Ge || op.node == hir::BinOpKind::Gt);
Some(ref expr) => { struct_result = struct_result & v.check_expr(&expr); },
None => {},
}
- if let ty::TyAdt(adt, ..) = v.tables.expr_ty(e).sty {
+ if let ty::Adt(adt, ..) = v.tables.expr_ty(e).sty {
// unsafe_cell_type doesn't necessarily exist with no_core
if Some(adt.did) == v.tcx.lang_items().unsafe_cell_type() {
return NotPromotable;
impl<'a, 'tcx> EmbargoVisitor<'a, 'tcx> {
fn item_ty_level(&self, item_def_id: DefId) -> Option<AccessLevel> {
let ty_def_id = match self.tcx.type_of(item_def_id).sty {
- ty::TyAdt(adt, _) => adt.did,
- ty::TyForeign(did) => did,
- ty::TyDynamic(ref obj, ..) if obj.principal().is_some() =>
+ ty::Adt(adt, _) => adt.did,
+ ty::Foreign(did) => did,
+ ty::Dynamic(ref obj, ..) if obj.principal().is_some() =>
obj.principal().unwrap().def_id(),
- ty::TyProjection(ref proj) => proj.trait_ref(self.tcx).def_id,
+ ty::Projection(ref proj) => proj.trait_ref(self.tcx).def_id,
_ => return Some(AccessLevel::Public)
};
if let Some(node_id) = self.tcx.hir.as_local_node_id(ty_def_id) {
fn ty(&mut self) -> &mut Self {
let ty = self.ev.tcx.type_of(self.item_def_id);
ty.visit_with(self);
- if let ty::TyFnDef(def_id, _) = ty.sty {
+ if let ty::FnDef(def_id, _) = ty.sty {
if def_id == self.item_def_id {
self.ev.tcx.fn_sig(def_id).visit_with(self);
}
impl<'b, 'a, 'tcx> TypeVisitor<'tcx> for ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> {
fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
let ty_def_id = match ty.sty {
- ty::TyAdt(adt, _) => Some(adt.did),
- ty::TyForeign(did) => Some(did),
- ty::TyDynamic(ref obj, ..) => obj.principal().map(|p| p.def_id()),
- ty::TyProjection(ref proj) => Some(proj.item_def_id),
- ty::TyFnDef(def_id, ..) |
- ty::TyClosure(def_id, ..) |
- ty::TyGenerator(def_id, ..) |
- ty::TyAnon(def_id, _) => Some(def_id),
+ ty::Adt(adt, _) => Some(adt.did),
+ ty::Foreign(did) => Some(did),
+ ty::Dynamic(ref obj, ..) => obj.principal().map(|p| p.def_id()),
+ ty::Projection(ref proj) => Some(proj.item_def_id),
+ ty::FnDef(def_id, ..) |
+ ty::Closure(def_id, ..) |
+ ty::Generator(def_id, ..) |
+ ty::Anon(def_id, _) => Some(def_id),
_ => None
};
impl<'a, 'tcx> TypeVisitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> {
fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
match ty.sty {
- ty::TyAdt(&ty::AdtDef { did: def_id, .. }, ..) |
- ty::TyFnDef(def_id, ..) |
- ty::TyForeign(def_id) => {
+ ty::Adt(&ty::AdtDef { did: def_id, .. }, ..) |
+ ty::FnDef(def_id, ..) |
+ ty::Foreign(def_id) => {
if !self.item_is_accessible(def_id) {
let msg = format!("type `{}` is private", ty);
self.tcx.sess.span_err(self.span, &msg);
return true;
}
- if let ty::TyFnDef(..) = ty.sty {
+ if let ty::FnDef(..) = ty.sty {
if self.tcx.fn_sig(def_id).visit_with(self) {
return true;
}
}
}
}
- ty::TyDynamic(ref predicates, ..) => {
+ ty::Dynamic(ref predicates, ..) => {
let is_private = predicates.skip_binder().iter().any(|predicate| {
let def_id = match *predicate {
ty::ExistentialPredicate::Trait(trait_ref) => trait_ref.def_id,
return true;
}
}
- ty::TyProjection(ref proj) => {
+ ty::Projection(ref proj) => {
let tcx = self.tcx;
if self.check_trait_ref(proj.trait_ref(tcx)) {
return true;
}
}
- ty::TyAnon(def_id, ..) => {
+ ty::Anon(def_id, ..) => {
for predicate in &self.tcx.predicates_of(def_id).predicates {
let trait_ref = match *predicate {
ty::Predicate::Trait(ref poly_trait_predicate) => {
return true;
}
for subst in trait_ref.substs.iter() {
- // Skip repeated `TyAnon`s to avoid infinite recursion.
+ // Skip repeated `Anon`s to avoid infinite recursion.
if let UnpackedKind::Type(ty) = subst.unpack() {
- if let ty::TyAnon(def_id, ..) = ty.sty {
+ if let ty::Anon(def_id, ..) = ty.sty {
if !self.visited_anon_tys.insert(def_id) {
continue;
}
fn ty(&mut self) -> &mut Self {
let ty = self.tcx.type_of(self.item_def_id);
ty.visit_with(self);
- if let ty::TyFnDef(def_id, _) = ty.sty {
+ if let ty::FnDef(def_id, _) = ty.sty {
if def_id == self.item_def_id {
self.tcx.fn_sig(def_id).visit_with(self);
}
impl<'a, 'tcx: 'a> TypeVisitor<'tcx> for SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> {
fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
let ty_def_id = match ty.sty {
- ty::TyAdt(adt, _) => Some(adt.did),
- ty::TyForeign(did) => Some(did),
- ty::TyDynamic(ref obj, ..) => obj.principal().map(|p| p.def_id()),
- ty::TyProjection(ref proj) => {
+ ty::Adt(adt, _) => Some(adt.did),
+ ty::Foreign(did) => Some(did),
+ ty::Dynamic(ref obj, ..) => obj.principal().map(|p| p.def_id()),
+ ty::Projection(ref proj) => {
if self.required_visibility == ty::Visibility::Invisible {
// Conservatively approximate the whole type alias as public without
// recursing into its components when determining impl publicity.
(Def::Static(self.definitions.local_def_id(item.id), m), ValueNS)
}
ForeignItemKind::Ty => {
- (Def::TyForeign(self.definitions.local_def_id(item.id)), TypeNS)
+ (Def::ForeignTy(self.definitions.local_def_id(item.id)), TypeNS)
}
ForeignItemKind::Macro(_) => unreachable!(),
};
span);
self.define(parent, ident, TypeNS, (module, vis, DUMMY_SP, expansion));
}
- Def::Variant(..) | Def::TyAlias(..) | Def::TyForeign(..) => {
+ Def::Variant(..) | Def::TyAlias(..) | Def::ForeignTy(..) => {
self.define(parent, ident, TypeNS, (def, vis, DUMMY_SP, expansion));
}
Def::Fn(..) | Def::Static(..) | Def::Const(..) | Def::VariantCtor(..) => {
use self::RibKind::*;
use rustc::hir::map::{Definitions, DefCollector};
-use rustc::hir::{self, PrimTy, TyBool, TyChar, TyFloat, TyInt, TyUint, TyStr};
+use rustc::hir::{self, PrimTy, Bool, Char, Float, Int, Uint, Str};
use rustc::middle::cstore::CrateStore;
use rustc::session::Session;
use rustc::lint;
Def::Trait(..) | Def::TyAlias(..) | Def::AssociatedTy(..) |
Def::PrimTy(..) | Def::TyParam(..) | Def::SelfTy(..) |
Def::Existential(..) |
- Def::TyForeign(..) => true,
+ Def::ForeignTy(..) => true,
_ => false,
},
PathSource::Trait(AliasPossibility::No) => match def {
fn new() -> PrimitiveTypeTable {
let mut table = PrimitiveTypeTable { primitive_types: FxHashMap() };
- table.intern("bool", TyBool);
- table.intern("char", TyChar);
- table.intern("f32", TyFloat(FloatTy::F32));
- table.intern("f64", TyFloat(FloatTy::F64));
- table.intern("isize", TyInt(IntTy::Isize));
- table.intern("i8", TyInt(IntTy::I8));
- table.intern("i16", TyInt(IntTy::I16));
- table.intern("i32", TyInt(IntTy::I32));
- table.intern("i64", TyInt(IntTy::I64));
- table.intern("i128", TyInt(IntTy::I128));
- table.intern("str", TyStr);
- table.intern("usize", TyUint(UintTy::Usize));
- table.intern("u8", TyUint(UintTy::U8));
- table.intern("u16", TyUint(UintTy::U16));
- table.intern("u32", TyUint(UintTy::U32));
- table.intern("u64", TyUint(UintTy::U64));
- table.intern("u128", TyUint(UintTy::U128));
+ table.intern("bool", Bool);
+ table.intern("char", Char);
+ table.intern("f32", Float(FloatTy::F32));
+ table.intern("f64", Float(FloatTy::F64));
+ table.intern("isize", Int(IntTy::Isize));
+ table.intern("i8", Int(IntTy::I8));
+ table.intern("i16", Int(IntTy::I16));
+ table.intern("i32", Int(IntTy::I32));
+ table.intern("i64", Int(IntTy::I64));
+ table.intern("i128", Int(IntTy::I128));
+ table.intern("str", Str);
+ table.intern("usize", Uint(UintTy::Usize));
+ table.intern("u8", Uint(UintTy::U8));
+ table.intern("u16", Uint(UintTy::U16));
+ table.intern("u32", Uint(UintTy::U32));
+ table.intern("u64", Uint(UintTy::U64));
+ table.intern("u128", Uint(UintTy::U128));
table
}
}
ident.span = ident.span.modern();
+ let mut poisoned = None;
loop {
- let (opt_module, poisoned) = if let Some(node_id) = record_used_id {
+ let opt_module = if let Some(node_id) = record_used_id {
self.hygienic_lexical_parent_with_compatibility_fallback(module, &mut ident.span,
- node_id)
+ node_id, &mut poisoned)
} else {
- (self.hygienic_lexical_parent(module, &mut ident.span), None)
+ self.hygienic_lexical_parent(module, &mut ident.span)
};
module = unwrap_or!(opt_module, break);
let orig_current_module = self.current_module;
}
return Some(LexicalScopeBinding::Item(binding))
}
- _ if poisoned.is_some() => break,
Err(Determined) => continue,
Err(Undetermined) =>
span_bug!(ident.span, "undetermined resolution during main resolution pass"),
None
}
- fn hygienic_lexical_parent_with_compatibility_fallback(
- &mut self, module: Module<'a>, span: &mut Span, node_id: NodeId
- ) -> (Option<Module<'a>>, /* poisoned */ Option<NodeId>)
- {
+ fn hygienic_lexical_parent_with_compatibility_fallback(&mut self, module: Module<'a>,
+ span: &mut Span, node_id: NodeId,
+ poisoned: &mut Option<NodeId>)
+ -> Option<Module<'a>> {
if let module @ Some(..) = self.hygienic_lexical_parent(module, span) {
- return (module, None);
+ return module;
}
// We need to support the next case under a deprecation warning
// The macro is a proc macro derive
if module.expansion.looks_like_proc_macro_derive() {
if parent.expansion.is_descendant_of(span.ctxt().outer()) {
- return (module.parent, Some(node_id));
+ *poisoned = Some(node_id);
+ return module.parent;
}
}
}
}
- (None, None)
+ None
}
fn resolve_ident_in_module(&mut self,
}
}
// Add primitive types to the mix
- if filter_fn(Def::PrimTy(TyBool)) {
+ if filter_fn(Def::PrimTy(Bool)) {
names.extend(
self.primitive_type_table.primitive_types.iter().map(|(name, _)| name)
)
use syntax::tokenstream::{TokenStream, TokenTree, Delimited};
use syntax::util::lev_distance::find_best_match_for_name;
use syntax_pos::{Span, DUMMY_SP};
+use errors::Applicability;
use std::cell::Cell;
use std::mem;
use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::small_vec::ExpectOne;
crate struct FromPrelude(bool);
crate struct FromExpansion(bool);
let def = def?;
- if path.segments.len() > 1 {
- if kind != MacroKind::Bang {
- if def != Def::NonMacroAttr(NonMacroAttrKind::Tool) &&
- !self.session.features_untracked().proc_macro_path_invoc {
- let msg = format!("non-ident {} paths are unstable", kind.descr());
- emit_feature_err(&self.session.parse_sess, "proc_macro_path_invoc",
- path.span, GateIssue::Language, &msg);
- }
- }
- }
-
match def {
Def::Macro(def_id, macro_kind) => {
self.unused_macros.remove(&def_id);
Def::NonMacroAttr(attr_kind) => {
if kind == MacroKind::Attr {
let features = self.session.features_untracked();
- if attr_kind == NonMacroAttrKind::Tool && !features.tool_attributes {
- feature_err(&self.session.parse_sess, "tool_attributes", path.span,
- GateIssue::Language, "tool attributes are unstable").emit();
- }
if attr_kind == NonMacroAttrKind::Custom {
assert!(path.segments.len() == 1);
let name = path.segments[0].ident.name.as_str();
if let Some(suggestion) = suggestion {
if suggestion != name {
if let MacroKind::Bang = kind {
- err.span_suggestion(span, "you could try the macro", suggestion.to_string());
+ err.span_suggestion_with_applicability(
+ span,
+ "you could try the macro",
+ suggestion.to_string(),
+ Applicability::MaybeIncorrect
+ );
} else {
- err.span_suggestion(span, "try", suggestion.to_string());
+ err.span_suggestion_with_applicability(
+ span,
+ "try",
+ suggestion.to_string(),
+ Applicability::MaybeIncorrect
+ );
}
} else {
err.help("have you added the `#[macro_use]` on the module/import?");
if let Some(span) = span {
let found_use = if found_use { "" } else { "\n" };
self.session.struct_span_err(err.use_span, err.warn_msg)
- .span_suggestion(
+ .span_suggestion_with_applicability(
span,
"instead, import the procedural macro like any other item",
format!("use {}::{};{}", err.crate_name, err.name, found_use),
+ Applicability::MachineApplicable
).emit();
} else {
self.session.struct_span_err(err.use_span, err.warn_msg)
};
match self.resolve_ident_in_module(module, ident, ns, false, path_span) {
Err(Determined) => continue,
+ Ok(binding)
+ if !self.is_accessible_from(binding.vis, single_import.parent) => continue,
Ok(_) | Err(Undetermined) => return Err(Undetermined),
}
}
path_span,
);
self.current_module = orig_current_module;
+
match result {
Err(Determined) => continue,
+ Ok(binding)
+ if !self.is_accessible_from(binding.vis, glob_import.parent) => continue,
Ok(_) | Err(Undetermined) => return Err(Undetermined),
}
}
let lev_suggestion =
match find_best_match_for_name(names, &ident.as_str(), None) {
Some(name) => format!(". Did you mean to use `{}`?", name),
- None => "".to_owned(),
+ None => String::new(),
};
let msg = match module {
ModuleOrUniformRoot::Module(module) => {
rustc_typeck = { path = "../librustc_typeck" }
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
-rls-data = "0.16"
+rls-data = "0.18"
rls-span = "0.4"
# FIXME(#40527) should move rustc serialize out of tree
rustc-serialize = "0.3"
pub fn get_expr_data(&self, expr: &ast::Expr) -> Option<Data> {
let hir_node = self.tcx.hir.expect_expr(expr.id);
let ty = self.tables.expr_ty_adjusted_opt(&hir_node);
- if ty.is_none() || ty.unwrap().sty == ty::TyError {
+ if ty.is_none() || ty.unwrap().sty == ty::Error {
return None;
}
match expr.node {
}
};
match self.tables.expr_ty_adjusted(&hir_node).sty {
- ty::TyAdt(def, _) if !def.is_enum() => {
+ ty::Adt(def, _) if !def.is_enum() => {
let variant = &def.non_enum_variant();
let index = self.tcx.find_field_index(ident, variant).unwrap();
let sub_span = self.span_utils.span_for_last_ident(expr.span);
ref_id: id_from_def_id(variant.fields[index].did),
}));
}
- ty::TyTuple(..) => None,
+ ty::Tuple(..) => None,
_ => {
debug!("Expected struct or union type, found {:?}", ty);
None
}
ast::ExprKind::Struct(ref path, ..) => {
match self.tables.expr_ty_adjusted(&hir_node).sty {
- ty::TyAdt(def, _) if !def.is_enum() => {
+ ty::Adt(def, _) if !def.is_enum() => {
let sub_span = self.span_utils.span_for_last_ident(path.span);
filter!(self.span_utils, sub_span, path.span, None);
let span = self.span_from_span(sub_span.unwrap());
hir::QPath::Resolved(_, ref path) => path.def,
hir::QPath::TypeRelative(..) => {
let ty = hir_ty_to_ty(self.tcx, ty);
- if let ty::TyProjection(proj) = ty.sty {
+ if let ty::Projection(proj) = ty.sty {
return HirDef::AssociatedTy(proj.item_def_id);
}
HirDef::Err
HirDef::Union(def_id) |
HirDef::Enum(def_id) |
HirDef::TyAlias(def_id) |
- HirDef::TyForeign(def_id) |
+ HirDef::ForeignTy(def_id) |
HirDef::TraitAlias(def_id) |
HirDef::AssociatedExistential(def_id) |
HirDef::AssociatedTy(def_id) |
.iter()
.any(|ct| *ct == CrateType::Executable);
let mut out_name = if executable {
- "".to_owned()
+ String::new()
} else {
"lib".to_owned()
};
data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "ios".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "fuchsia".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
abi_blacklist: super::arm_base::abi_blacklist(),
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "android".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "cloudabi".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "freebsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "hermit".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
arch: "aarch64".to_string(),
target_os: "openbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "android".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "E-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(),
arch: "arm".to_string(),
target_os: "ios".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "android".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "cloudabi".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(),
arch: "arm".to_string(),
target_os: "ios".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
target_pointer_width: "32".to_string(),
target_c_int_width: "32".to_string(),
target_os: "emscripten".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
data_layout: "e-p:32:32-i64:64-v128:32:128-n32-S128".to_string(),
arch: "asmjs".to_string(),
data_layout: "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "ios".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "macos".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "android".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "cloudabi".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "dragonfly".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "freebsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "haiku".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
arch: "x86".to_string(),
target_os: "openbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
post_link_args: LinkArgs::new(),
asm_args: Vec::new(),
cpu: "generic".to_string(),
- features: "".to_string(),
+ features: String::new(),
dynamic_linking: false,
only_cdylib: false,
executables: false,
function_sections: true,
dll_prefix: "lib".to_string(),
dll_suffix: ".so".to_string(),
- exe_suffix: "".to_string(),
+ exe_suffix: String::new(),
staticlib_prefix: "lib".to_string(),
staticlib_suffix: ".a".to_string(),
target_family: None,
data_layout: "e-m:e-p:16:16-i32:16-i64:16-f32:16-f64:16-a:8-n8:16-S16".to_string(),
arch: "msp430".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(),
arch: "powerpc".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
target_pointer_width: "32".to_string(),
target_c_int_width: "32".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
arch: "riscv32".to_string(),
linker_flavor: LinkerFlavor::Ld,
data_layout: "E-m:e-i64:64-n32:64-S128".to_string(),
arch: "sparc64".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
// just be confusing.
arch: "sparc64".to_string(),
target_os: "solaris".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "sun".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
target_pointer_width: "32".to_string(),
target_c_int_width: "32".to_string(),
target_os: "emscripten".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
data_layout: "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(),
arch: "wasm32".to_string(),
target_pointer_width: "32".to_string(),
target_c_int_width: "32".to_string(),
target_os: "emscripten".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
data_layout: "e-p:32:32-i64:64-v128:32:128-n32-S128".to_string(),
arch: "wasm32".to_string(),
// relatively self-explanatory!
exe_suffix: ".wasm".to_string(),
- dll_prefix: "".to_string(),
+ dll_prefix: String::new(),
dll_suffix: ".wasm".to_string(),
linker_is_gnu: false,
// This is basically guaranteed to change in the future, don't rely on
// this. Use `not(target_os = "emscripten")` for now.
target_os: "unknown".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
data_layout: "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(),
arch: "wasm32".to_string(),
linker: Some("gcc".to_string()),
dynamic_linking: true,
executables: true,
- dll_prefix: "".to_string(),
+ dll_prefix: String::new(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
- staticlib_prefix: "".to_string(),
+ staticlib_prefix: String::new(),
staticlib_suffix: ".lib".to_string(),
no_default_libraries: true,
target_family: Some("windows".to_string()),
function_sections: true,
dynamic_linking: true,
executables: true,
- dll_prefix: "".to_string(),
+ dll_prefix: String::new(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
- staticlib_prefix: "".to_string(),
+ staticlib_prefix: String::new(),
staticlib_suffix: ".lib".to_string(),
target_family: Some("windows".to_string()),
is_like_windows: true,
data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "macos".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "ios".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "apple".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: TargetOptions {
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "fuchsia".to_string(),
- target_env: "".to_string(),
- target_vendor: "".to_string(),
+ target_env: String::new(),
+ target_vendor: String::new(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
})
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "android".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "rumprun".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "solaris".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "sun".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "bitrig".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "cloudabi".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "dragonfly".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "freebsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "haiku".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "hermit".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "netbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "openbsd".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "redox".to_string(),
- target_env: "".to_string(),
+ target_env: String::new(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
chalk-engine = { version = "0.7.0", default-features=false }
+smallvec = { version = "0.6.5", features = ["union"] }
use rustc::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use rustc::ty::subst::Kind;
use rustc::ty::{self, TyCtxt};
-use rustc_data_structures::small_vec::SmallVec;
+use smallvec::SmallVec;
use std::fmt::{self, Debug};
use std::marker::PhantomData;
// _ => false,
// },
// Kind::Type(ty) => match ty.sty {
-// ty::TyInfer(ty::InferTy::CanonicalTy(cvar1)) => cvar == cvar1,
+// ty::Infer(ty::InferTy::CanonicalTy(cvar1)) => cvar == cvar1,
// _ => false,
// },
// })
match ty.sty {
// All parameters live for the duration of the
// function.
- ty::TyParam(..) => {}
+ ty::Param(..) => {}
// A projection that we couldn't resolve - it
// might have a destructor.
- ty::TyProjection(..) | ty::TyAnon(..) => {
+ ty::Projection(..) | ty::Anon(..) => {
result.kinds.push(ty.into());
}
}
let result = match ty.sty {
- ty::TyBool
- | ty::TyChar
- | ty::TyInt(_)
- | ty::TyUint(_)
- | ty::TyFloat(_)
- | ty::TyStr
- | ty::TyNever
- | ty::TyForeign(..)
- | ty::TyRawPtr(..)
- | ty::TyRef(..)
- | ty::TyFnDef(..)
- | ty::TyFnPtr(_)
- | ty::TyGeneratorWitness(..) => {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Never
+ | ty::Foreign(..)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::GeneratorWitness(..) => {
// these types never have a destructor
Ok(DtorckConstraint::empty())
}
- ty::TyArray(ety, _) | ty::TySlice(ety) => {
+ ty::Array(ety, _) | ty::Slice(ety) => {
// single-element containers, behave like their element
dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ety)
}
- ty::TyTuple(tys) => tys
+ ty::Tuple(tys) => tys
.iter()
.map(|ty| dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ty))
.collect(),
- ty::TyClosure(def_id, substs) => substs
+ ty::Closure(def_id, substs) => substs
.upvar_tys(def_id, tcx)
.map(|ty| dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ty))
.collect(),
- ty::TyGenerator(def_id, substs, _movability) => {
+ ty::Generator(def_id, substs, _movability) => {
// rust-lang/rust#49918: types can be constructed, stored
// in the interior, and sit idle when generator yields
// (and is subsequently dropped).
// its interior).
//
// However, the interior's representation uses things like
- // TyGeneratorWitness that explicitly assume they are not
+ // GeneratorWitness that explicitly assume they are not
// traversed in such a manner. So instead, we will
// simplify things for now by treating all generators as
// if they were like trait objects, where its upvars must
Ok(constraint)
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
let DtorckConstraint {
dtorck_types,
outlives,
// Objects must be alive in order for their destructor
// to be called.
- ty::TyDynamic(..) => Ok(DtorckConstraint {
+ ty::Dynamic(..) => Ok(DtorckConstraint {
outlives: vec![ty.into()],
dtorck_types: vec![],
overflows: vec![],
}),
// Types that can't be resolved. Pass them forward.
- ty::TyProjection(..) | ty::TyAnon(..) | ty::TyParam(..) => Ok(DtorckConstraint {
+ ty::Projection(..) | ty::Anon(..) | ty::Param(..) => Ok(DtorckConstraint {
outlives: vec![],
dtorck_types: vec![ty],
overflows: vec![],
}),
- ty::TyInfer(..) | ty::TyError => {
+ ty::Infer(..) | ty::Error => {
// By the time this code runs, all type variables ought to
// be fully resolved.
Err(NoSolution)
extern crate rustc_data_structures;
extern crate syntax;
extern crate syntax_pos;
+extern crate smallvec;
mod chalk_context;
mod dropck_outlives;
WhereClause,
};
use rustc::ty::query::Providers;
-use rustc::ty::{self, Slice, TyCtxt};
+use rustc::ty::{self, List, TyCtxt};
use rustc_data_structures::fx::FxHashSet;
use std::mem;
use syntax::ast;
DefPathData::AssocTypeInImpl(..) => program_clauses_for_associated_type_value(tcx, def_id),
DefPathData::AssocTypeInTrait(..) => program_clauses_for_associated_type_def(tcx, def_id),
DefPathData::TypeNs(..) => program_clauses_for_type_def(tcx, def_id),
- _ => Slice::empty(),
+ _ => List::empty(),
}
}
fn program_clauses_for_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Clauses<'tcx> {
if let ImplPolarity::Negative = tcx.impl_polarity(def_id) {
- return Slice::empty();
+ return List::empty();
}
// Rule Implemented-From-Impl (see rustc guide)
/// Dummy type used for the `Self` of a `TraitRef` created for converting
/// a trait object, and which gets removed in `ExistentialTraitRef`.
/// This type must not appear anywhere in other converted types.
-const TRAIT_OBJECT_DUMMY_SELF: ty::TypeVariants<'static> = ty::TyInfer(ty::FreshTy(0));
+const TRAIT_OBJECT_DUMMY_SELF: ty::TyKind<'static> = ty::Infer(ty::FreshTy(0));
impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o {
pub fn ast_region_to_region(&self,
Err(ErrorReported) => return (tcx.types.err, Def::Err),
}
}
- (&ty::TyParam(_), Def::SelfTy(Some(param_did), None)) |
- (&ty::TyParam(_), Def::TyParam(param_did)) => {
+ (&ty::Param(_), Def::SelfTy(Some(param_did), None)) |
+ (&ty::Param(_), Def::TyParam(param_did)) => {
match self.find_bound_for_assoc_item(param_did, assoc_name, span) {
Ok(bound) => bound,
Err(ErrorReported) => return (tcx.types.err, Def::Err),
)
}
Def::Enum(did) | Def::TyAlias(did) | Def::Struct(did) |
- Def::Union(did) | Def::TyForeign(did) => {
+ Def::Union(did) | Def::ForeignTy(did) => {
assert_eq!(opt_self_ty, None);
self.prohibit_generics(path.segments.split_last().unwrap().1);
self.ast_path_to_ty(span, did, path.segments.last().unwrap())
assert_eq!(opt_self_ty, None);
self.prohibit_generics(&path.segments);
match prim_ty {
- hir::TyBool => tcx.types.bool,
- hir::TyChar => tcx.types.char,
- hir::TyInt(it) => tcx.mk_mach_int(it),
- hir::TyUint(uit) => tcx.mk_mach_uint(uit),
- hir::TyFloat(ft) => tcx.mk_mach_float(ft),
- hir::TyStr => tcx.mk_str()
+ hir::Bool => tcx.types.bool,
+ hir::Char => tcx.types.char,
+ hir::Int(it) => tcx.mk_mach_int(it),
+ hir::Uint(uit) => tcx.mk_mach_uint(uit),
+ hir::Float(ft) => tcx.mk_mach_float(ft),
+ hir::Str => tcx.mk_str()
}
}
Def::Err => {
}
hir::TyKind::Rptr(ref region, ref mt) => {
let r = self.ast_region_to_region(region, None);
- debug!("TyRef r={:?}", r);
+ debug!("Ref r={:?}", r);
let t = self.ast_ty_to_ty(&mt.ty);
tcx.mk_ref(r, ty::TypeAndMut {ty: t, mutbl: mt.mutbl})
}
let length_def_id = tcx.hir.local_def_id(length.id);
let substs = Substs::identity_for_item(tcx, length_def_id);
let length = ty::Const::unevaluated(tcx, length_def_id, substs, tcx.types.usize);
- let array_ty = tcx.mk_ty(ty::TyArray(self.ast_ty_to_ty(&ty), length));
+ let array_ty = tcx.mk_ty(ty::Array(self.ast_ty_to_ty(&ty), length));
self.normalize_ty(ast_ty.span, array_ty)
}
hir::TyKind::Typeof(ref _e) => {
tcx.types.err
}
hir::TyKind::Infer => {
- // TyInfer also appears as the type of arguments or return
+ // Infer also appears as the type of arguments or return
// values in a ExprKind::Closure, or as
// the type of local variables. Both of these cases are
// handled specially and will not descend into this routine.
/// we return `None`.
fn compute_object_lifetime_bound(&self,
span: Span,
- existential_predicates: ty::Binder<&'tcx ty::Slice<ty::ExistentialPredicate<'tcx>>>)
+ existential_predicates: ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>)
-> Option<ty::Region<'tcx>> // if None, use the default
{
let tcx = self.tcx();
PatKind::Lit(ref lt) => {
let ty = self.check_expr(lt);
match ty.sty {
- ty::TypeVariants::TyRef(..) => false,
+ ty::Ref(..) => false,
_ => true,
}
}
// Peel off as many `&` or `&mut` from the discriminant as possible. For example,
// for `match &&&mut Some(5)` the loop runs three times, aborting when it reaches
- // the `Some(5)` which is not of type TyRef.
+ // the `Some(5)` which is not of type Ref.
//
// For each ampersand peeled off, update the binding mode and push the original
// type into the adjustments vector.
expected = loop {
debug!("inspecting {:?} with type {:?}", exp_ty, exp_ty.sty);
match exp_ty.sty {
- ty::TypeVariants::TyRef(_, inner_ty, inner_mutability) => {
- debug!("current discriminant is TyRef, inserting implicit deref");
+ ty::Ref(_, inner_ty, inner_mutability) => {
+ debug!("current discriminant is Ref, inserting implicit deref");
// Preserve the reference type. We'll need it later during HAIR lowering.
pat_adjustments.push(exp_ty);
if let hir::ExprKind::Lit(ref lt) = lt.node {
if let ast::LitKind::ByteStr(_) = lt.node {
let expected_ty = self.structurally_resolved_type(pat.span, expected);
- if let ty::TyRef(_, r_ty, _) = expected_ty.sty {
- if let ty::TySlice(_) = r_ty.sty {
+ if let ty::Ref(_, r_ty, _) = expected_ty.sty {
+ if let ty::Slice(_) = r_ty.sty {
pat_ty = tcx.mk_imm_ref(tcx.types.re_static,
tcx.mk_slice(tcx.types.u8))
}
let mut expected_len = elements.len();
if ddpos.is_some() {
// Require known type only when `..` is present
- if let ty::TyTuple(ref tys) =
+ if let ty::Tuple(ref tys) =
self.structurally_resolved_type(pat.span, expected).sty {
expected_len = tys.len();
}
// from all tuple elements isn't trivial.
TypeVariableOrigin::TypeInference(pat.span)));
let element_tys = tcx.mk_type_list(element_tys_iter);
- let pat_ty = tcx.mk_ty(ty::TyTuple(element_tys));
+ let pat_ty = tcx.mk_ty(ty::Tuple(element_tys));
self.demand_eqtype(pat.span, expected, pat_ty);
for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) {
self.check_pat_walk(elem, &element_tys[i], def_bm, true);
// hack detailed in (*) below.
debug!("check_pat_walk: expected={:?}", expected);
let (rptr_ty, inner_ty) = match expected.sty {
- ty::TyRef(_, r_ty, r_mutbl) if r_mutbl == mutbl => {
+ ty::Ref(_, r_ty, r_mutbl) if r_mutbl == mutbl => {
(expected, r_ty)
}
_ => {
PatKind::Slice(ref before, ref slice, ref after) => {
let expected_ty = self.structurally_resolved_type(pat.span, expected);
let (inner_ty, slice_ty) = match expected_ty.sty {
- ty::TyArray(inner_ty, size) => {
+ ty::Array(inner_ty, size) => {
let size = size.unwrap_usize(tcx);
let min_len = before.len() as u64 + after.len() as u64;
if slice.is_none() {
(inner_ty, tcx.types.err)
}
}
- ty::TySlice(inner_ty) => (inner_ty, expected_ty),
+ ty::Slice(inner_ty) => (inner_ty, expected_ty),
_ => {
if !expected_ty.references_error() {
let mut err = struct_span_err!(
tcx.sess, pat.span, E0529,
"expected an array or slice, found `{}`",
expected_ty);
- if let ty::TyRef(_, ty, _) = expected_ty.sty {
+ if let ty::Ref(_, ty, _) = expected_ty.sty {
match ty.sty {
- ty::TyArray(..) | ty::TySlice(..) => {
+ ty::Array(..) | ty::Slice(..) => {
err.help("the semantics of slice patterns changed \
recently; see issue #23121");
}
pub fn check_dereferencable(&self, span: Span, expected: Ty<'tcx>, inner: &hir::Pat) -> bool {
if let PatKind::Binding(..) = inner.node {
if let Some(mt) = self.shallow_resolve(expected).builtin_deref(true) {
- if let ty::TyDynamic(..) = mt.ty.sty {
+ if let ty::Dynamic(..) = mt.ty.sty {
// This is "x = SomeTrait" being reduced from
// "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
let type_str = self.ty_to_string(expected);
if subpats.len() == variant.fields.len() ||
subpats.len() < variant.fields.len() && ddpos.is_some() {
let substs = match pat_ty.sty {
- ty::TyAdt(_, substs) => substs,
+ ty::Adt(_, substs) => substs,
ref ty => bug!("unexpected pattern type {:?}", ty),
};
for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) {
let tcx = self.tcx;
let (substs, adt) = match adt_ty.sty {
- ty::TyAdt(adt, substs) => (substs, adt),
+ ty::Adt(adt, substs) => (substs, adt),
_ => span_bug!(span, "struct pattern is not an ADT")
};
let kind_name = adt.variant_descr();
self.fcx.try_overloaded_deref(self.span, source, needs)
.and_then(|InferOk { value: method, obligations: o }| {
obligations.extend(o);
- if let ty::TyRef(region, _, mutbl) = method.sig.output().sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.output().sty {
Some(OverloadedDeref {
region,
mutbl,
use rustc_target::spec::abi;
use syntax::ast::Ident;
use syntax_pos::Span;
+use errors::Applicability;
use rustc::hir;
// If the callee is a bare function or a closure, then we're all set.
match adjusted_ty.sty {
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ ty::FnDef(..) | ty::FnPtr(_) => {
let adjustments = autoderef.adjust_steps(Needs::None);
self.apply_adjustments(callee_expr, adjustments);
return Some(CallStep::Builtin(adjusted_ty));
}
- ty::TyClosure(def_id, substs) => {
+ ty::Closure(def_id, substs) => {
assert_eq!(def_id.krate, LOCAL_CRATE);
// Check whether this is a call to a closure where we
// over the top. The simplest fix by far is to just ignore
// this case and deref again, so we wind up with
// `FnMut::call_mut(&mut *x, ())`.
- ty::TyRef(..) if autoderef.step_count() == 0 => {
+ ty::Ref(..) if autoderef.step_count() == 0 => {
return None;
}
let method = self.register_infer_ok_obligations(ok);
let mut autoref = None;
if borrow {
- if let ty::TyRef(region, _, mutbl) = method.sig.inputs()[0].sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].sty {
let mutbl = match mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
expected: Expectation<'tcx>)
-> Ty<'tcx> {
let (fn_sig, def_span) = match callee_ty.sty {
- ty::TyFnDef(def_id, _) => {
+ ty::FnDef(def_id, _) => {
(callee_ty.fn_sig(self.tcx), self.tcx.hir.span_if_local(def_id))
}
- ty::TyFnPtr(sig) => (sig, None),
+ ty::FnPtr(sig) => (sig, None),
ref t => {
let mut unit_variant = None;
- if let &ty::TyAdt(adt_def, ..) = t {
+ if let &ty::Adt(adt_def, ..) = t {
if adt_def.is_enum() {
if let hir::ExprKind::Call(ref expr, _) = call_expr.node {
unit_variant = Some(self.tcx.hir.node_to_pretty_string(expr.id))
err.span_label(call_expr.span, "not a function");
if let Some(ref path) = unit_variant {
- err.span_suggestion(call_expr.span,
- &format!("`{}` is a unit variant, you need to write it \
- without the parenthesis", path),
- path.to_string());
+ err.span_suggestion_with_applicability(
+ call_expr.span,
+ &format!("`{}` is a unit variant, you need to write it \
+ without the parenthesis", path),
+ path.to_string(),
+ Applicability::MachineApplicable
+ );
}
if let hir::ExprKind::Call(ref expr, _) = call_expr.node {
}
Ok(match t.sty {
- ty::TySlice(_) | ty::TyStr => Some(PointerKind::Length),
- ty::TyDynamic(ref tty, ..) =>
+ ty::Slice(_) | ty::Str => Some(PointerKind::Length),
+ ty::Dynamic(ref tty, ..) =>
Some(PointerKind::Vtable(tty.principal().map(|p| p.def_id()))),
- ty::TyAdt(def, substs) if def.is_struct() => {
+ ty::Adt(def, substs) if def.is_struct() => {
match def.non_enum_variant().fields.last() {
None => Some(PointerKind::Thin),
Some(f) => {
}
}
}
- ty::TyTuple(fields) => match fields.last() {
+ ty::Tuple(fields) => match fields.last() {
None => Some(PointerKind::Thin),
Some(f) => self.pointer_kind(f, span)?
},
// Pointers to foreign types are thin, despite being unsized
- ty::TyForeign(..) => Some(PointerKind::Thin),
+ ty::Foreign(..) => Some(PointerKind::Thin),
// We should really try to normalize here.
- ty::TyProjection(ref pi) => Some(PointerKind::OfProjection(pi)),
- ty::TyAnon(def_id, substs) => Some(PointerKind::OfAnon(def_id, substs)),
- ty::TyParam(ref p) => Some(PointerKind::OfParam(p)),
+ ty::Projection(ref pi) => Some(PointerKind::OfProjection(pi)),
+ ty::Anon(def_id, substs) => Some(PointerKind::OfAnon(def_id, substs)),
+ ty::Param(ref p) => Some(PointerKind::OfParam(p)),
// Insufficient type information.
- ty::TyInfer(_) => None,
+ ty::Infer(_) => None,
- ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) |
- ty::TyFloat(_) | ty::TyArray(..) | ty::TyGeneratorWitness(..) |
- ty::TyRawPtr(_) | ty::TyRef(..) | ty::TyFnDef(..) |
- ty::TyFnPtr(..) | ty::TyClosure(..) | ty::TyGenerator(..) |
- ty::TyAdt(..) | ty::TyNever | ty::TyError => {
+ ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
+ ty::Float(_) | ty::Array(..) | ty::GeneratorWitness(..) |
+ ty::RawPtr(_) | ty::Ref(..) | ty::FnDef(..) |
+ ty::FnPtr(..) | ty::Closure(..) | ty::Generator(..) |
+ ty::Adt(..) | ty::Never | ty::Error => {
self.tcx.sess.delay_span_bug(
span, &format!("`{:?}` should be sized but is not?", t));
return Err(ErrorReported);
// cases now. We do a more thorough check at the end, once
// inference is more completely known.
match cast_ty.sty {
- ty::TyDynamic(..) | ty::TySlice(..) => {
+ ty::Dynamic(..) | ty::Slice(..) => {
check.report_cast_to_unsized_type(fcx);
Err(ErrorReported)
}
fcx.resolve_type_vars_if_possible(&self.expr_ty),
tstr);
match self.expr_ty.sty {
- ty::TyRef(_, _, mt) => {
+ ty::Ref(_, _, mt) => {
let mtstr = match mt {
hir::MutMutable => "mut ",
hir::MutImmutable => "",
tstr);
}
}
- ty::TyAdt(def, ..) if def.is_box() => {
+ ty::Adt(def, ..) if def.is_box() => {
match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) {
Ok(s) => {
err.span_suggestion(self.cast_span,
(Some(t_from), Some(t_cast)) => (t_from, t_cast),
// Function item types may need to be reified before casts.
(None, Some(t_cast)) => {
- if let ty::TyFnDef(..) = self.expr_ty.sty {
+ if let ty::FnDef(..) = self.expr_ty.sty {
// Attempt a coercion to a fn pointer type.
let f = self.expr_ty.fn_sig(fcx.tcx);
let res = fcx.try_coerce(self.expr,
(RPtr(p), Int(_)) |
(RPtr(p), Float) => {
match p.ty.sty {
- ty::TypeVariants::TyInt(_) |
- ty::TypeVariants::TyUint(_) |
- ty::TypeVariants::TyFloat(_) => {
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) => {
Err(CastError::NeedDeref)
}
- ty::TypeVariants::TyInfer(t) => {
+ ty::Infer(t) => {
match t {
ty::InferTy::IntVar(_) |
ty::InferTy::FloatVar(_) => Err(CastError::NeedDeref),
// array-ptr-cast.
if m_expr.mutbl == hir::MutImmutable && m_cast.mutbl == hir::MutImmutable {
- if let ty::TyArray(ety, _) = m_expr.ty.sty {
+ if let ty::Array(ety, _) = m_expr.ty.sty {
// Due to the limitations of LLVM global constants,
// region pointers end up pointing at copies of
// vector elements instead of the original values.
);
match expected_ty.sty {
- ty::TyDynamic(ref object_type, ..) => {
+ ty::Dynamic(ref object_type, ..) => {
let sig = object_type
.projection_bounds()
.filter_map(|pb| {
.and_then(|p| self.tcx.lang_items().fn_trait_kind(p.def_id()));
(sig, kind)
}
- ty::TyInfer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid),
- ty::TyFnPtr(sig) => {
+ ty::Infer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid),
+ ty::FnPtr(sig) => {
let expected_sig = ExpectedSig {
cause_span: None,
sig: sig.skip_binder().clone(),
// NB: This predicate is created by breaking down a
// `ClosureType: FnFoo()` predicate, where
- // `ClosureType` represents some `TyClosure`. It can't
+ // `ClosureType` represents some `Closure`. It can't
// possibly be referring to the current closure,
- // because we haven't produced the `TyClosure` for
+ // because we haven't produced the `Closure` for
// this closure yet; this is exactly why the other
// code is looking for a self type of a unresolved
// inference variable.
);
let input_tys = match arg_param_ty.sty {
- ty::TyTuple(tys) => tys.into_iter(),
+ ty::Tuple(tys) => tys.into_iter(),
_ => {
return None;
}
trait_ref, self_ty
);
match self_ty.sty {
- ty::TyInfer(ty::TyVar(v)) if expected_vid == v => Some(trait_ref),
+ ty::Infer(ty::TyVar(v)) if expected_vid == v => Some(trait_ref),
_ => None,
}
}
// Note: does not attempt to resolve type variables we encounter.
// See above for details.
match b.sty {
- ty::TyRawPtr(mt_b) => {
+ ty::RawPtr(mt_b) => {
return self.coerce_unsafe_ptr(a, b, mt_b.mutbl);
}
- ty::TyRef(r_b, ty, mutbl) => {
+ ty::Ref(r_b, ty, mutbl) => {
let mt_b = ty::TypeAndMut { ty, mutbl };
return self.coerce_borrowed_pointer(a, b, r_b, mt_b);
}
}
match a.sty {
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
// Function items are coercible to any closure
// type; function pointers are not (that would
// require double indirection).
// items to drop the unsafe qualifier.
self.coerce_from_fn_item(a, b)
}
- ty::TyFnPtr(a_f) => {
+ ty::FnPtr(a_f) => {
// We permit coercion of fn pointers to drop the
// unsafe qualifier.
self.coerce_from_fn_pointer(a, a_f, b)
}
- ty::TyClosure(def_id_a, substs_a) => {
+ ty::Closure(def_id_a, substs_a) => {
// Non-capturing closures are coercible to
// function pointers
self.coerce_closure_to_fn(a, def_id_a, substs_a, b)
// yield.
let (r_a, mt_a) = match a.sty {
- ty::TyRef(r_a, ty, mutbl) => {
+ ty::Ref(r_a, ty, mutbl) => {
let mt_a = ty::TypeAndMut { ty, mutbl };
coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?;
(r_a, mt_a)
// Now apply the autoref. We have to extract the region out of
// the final ref type we got.
let r_borrow = match ty.sty {
- ty::TyRef(r_borrow, _, _) => r_borrow,
+ ty::Ref(r_borrow, _, _) => r_borrow,
_ => span_bug!(span, "expected a ref type, got {:?}", ty),
};
let mutbl = match mt_b.mutbl {
// Handle reborrows before selecting `Source: CoerceUnsized<Target>`.
let reborrow = match (&source.sty, &target.sty) {
- (&ty::TyRef(_, ty_a, mutbl_a), &ty::TyRef(_, _, mutbl_b)) => {
+ (&ty::Ref(_, ty_a, mutbl_a), &ty::Ref(_, _, mutbl_b)) => {
coerce_mutbls(mutbl_a, mutbl_b)?;
let coercion = Coercion(self.cause.span);
})
}))
}
- (&ty::TyRef(_, ty_a, mt_a), &ty::TyRawPtr(ty::TypeAndMut { mutbl: mt_b, .. })) => {
+ (&ty::Ref(_, ty_a, mt_a), &ty::RawPtr(ty::TypeAndMut { mutbl: mt_b, .. })) => {
coerce_mutbls(mt_a, mt_b)?;
Some((Adjustment {
ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => {
if unsize_did == tr.def_id() {
let sty = &tr.skip_binder().input_types().nth(1).unwrap().sty;
- if let ty::TyTuple(..) = sty {
+ if let ty::Tuple(..) = sty {
debug!("coerce_unsized: found unsized tuple coercion");
has_unsized_tuple_coercion = true;
}
where F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
G: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>
{
- if let ty::TyFnPtr(fn_ty_b) = b.sty {
+ if let ty::FnPtr(fn_ty_b) = b.sty {
match (fn_ty_a.unsafety(), fn_ty_b.unsafety()) {
(hir::Unsafety::Normal, hir::Unsafety::Unsafe) => {
let unsafe_a = self.tcx.safe_to_unsafe_fn_ty(fn_ty_a);
debug!("coerce_from_fn_item(a={:?}, b={:?})", a, b);
match b.sty {
- ty::TyFnPtr(_) => {
+ ty::FnPtr(_) => {
let a_sig = a.fn_sig(self.tcx);
let InferOk { value: a_sig, mut obligations } =
self.normalize_associated_types_in_as_infer_ok(self.cause.span, &a_sig);
let node_id_a = self.tcx.hir.as_local_node_id(def_id_a).unwrap();
match b.sty {
- ty::TyFnPtr(_) if self.tcx.with_freevars(node_id_a, |v| v.is_empty()) => {
+ ty::FnPtr(_) if self.tcx.with_freevars(node_id_a, |v| v.is_empty()) => {
// We coerce the closure, which has fn type
// `extern "rust-call" fn((arg0,arg1,...)) -> _`
// to
debug!("coerce_unsafe_ptr(a={:?}, b={:?})", a, b);
let (is_ref, mt_a) = match a.sty {
- ty::TyRef(_, ty, mutbl) => (true, ty::TypeAndMut { ty, mutbl }),
- ty::TyRawPtr(mt) => (false, mt),
+ ty::Ref(_, ty, mutbl) => (true, ty::TypeAndMut { ty, mutbl }),
+ ty::RawPtr(mt) => (false, mt),
_ => {
return self.unify_and(a, b, identity);
}
// Special-case that coercion alone cannot handle:
// Two function item types of differing IDs or Substs.
- if let (&ty::TyFnDef(..), &ty::TyFnDef(..)) = (&prev_ty.sty, &new_ty.sty) {
+ if let (&ty::FnDef(..), &ty::FnDef(..)) = (&prev_ty.sty, &new_ty.sty) {
// Don't reify if the function types have a LUB, i.e. they
// are the same function and their parameters have a LUB.
let lub_ty = self.commit_if_ok(|_| {
Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(_, mutbl_adj)), .. }
] => {
match self.node_ty(expr.hir_id).sty {
- ty::TyRef(_, _, mt_orig) => {
+ ty::Ref(_, _, mt_orig) => {
let mutbl_adj: hir::Mutability = mutbl_adj.into();
// Reborrow that we can safely ignore, because
// the next adjustment can only be a Deref
use rustc::hir::{Item, ItemKind, print};
use rustc::ty::{self, Ty, AssociatedItem};
use rustc::ty::adjustment::AllowTwoPhase;
-use errors::{DiagnosticBuilder, SourceMapper};
+use errors::{Applicability, DiagnosticBuilder, SourceMapper};
use super::method::probe;
// If the expected type is an enum with any variants whose sole
// field is of the found type, suggest such variants. See Issue
// #42764.
- if let ty::TyAdt(expected_adt, substs) = expected.sty {
+ if let ty::Adt(expected_adt, substs) = expected.sty {
let mut compatible_variants = vec![];
for variant in &expected_adt.variants {
if variant.fields.len() == 1 {
}
match (&expected.sty, &checked_ty.sty) {
- (&ty::TyRef(_, exp, _), &ty::TyRef(_, check, _)) => match (&exp.sty, &check.sty) {
- (&ty::TyStr, &ty::TyArray(arr, _)) |
- (&ty::TyStr, &ty::TySlice(arr)) if arr == self.tcx.types.u8 => {
+ (&ty::Ref(_, exp, _), &ty::Ref(_, check, _)) => match (&exp.sty, &check.sty) {
+ (&ty::Str, &ty::Array(arr, _)) |
+ (&ty::Str, &ty::Slice(arr)) if arr == self.tcx.types.u8 => {
if let hir::ExprKind::Lit(_) = expr.node {
if let Ok(src) = cm.span_to_snippet(sp) {
if src.starts_with("b\"") {
}
}
},
- (&ty::TyArray(arr, _), &ty::TyStr) |
- (&ty::TySlice(arr), &ty::TyStr) if arr == self.tcx.types.u8 => {
+ (&ty::Array(arr, _), &ty::Str) |
+ (&ty::Slice(arr), &ty::Str) if arr == self.tcx.types.u8 => {
if let hir::ExprKind::Lit(_) = expr.node {
if let Ok(src) = cm.span_to_snippet(sp) {
if src.starts_with("\"") {
}
_ => {}
},
- (&ty::TyRef(_, _, mutability), _) => {
+ (&ty::Ref(_, _, mutability), _) => {
// Check if it can work when put into a ref. For example:
//
// ```
}
}
}
- (_, &ty::TyRef(_, checked, _)) => {
+ (_, &ty::Ref(_, checked, _)) => {
// We have `&T`, check if what was expected was `T`. If so,
// we may want to suggest adding a `*`, or removing
// a `&`.
if needs_paren { ")" } else { "" });
match (&expected_ty.sty, &checked_ty.sty) {
- (&ty::TyInt(ref exp), &ty::TyInt(ref found)) => {
+ (&ty::Int(ref exp), &ty::Int(ref found)) => {
match (found.bit_width(), exp.bit_width()) {
(Some(found), Some(exp)) if found > exp => {
if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_truncate),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_truncate),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
}
(None, _) | (_, None) => {
if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_isize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_isize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
}
_ => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_sign_extend),
- into_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_sign_extend),
+ into_suggestion,
+ Applicability::MachineApplicable
+ );
}
}
true
}
- (&ty::TyUint(ref exp), &ty::TyUint(ref found)) => {
+ (&ty::Uint(ref exp), &ty::Uint(ref found)) => {
match (found.bit_width(), exp.bit_width()) {
(Some(found), Some(exp)) if found > exp => {
if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_truncate),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_truncate),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
}
(None, _) | (_, None) => {
if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_usize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_usize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
}
_ => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_zero_extend),
- into_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_zero_extend),
+ into_suggestion,
+ Applicability::MachineApplicable
+ );
}
}
true
}
- (&ty::TyInt(ref exp), &ty::TyUint(ref found)) => {
+ (&ty::Int(ref exp), &ty::Uint(ref found)) => {
if can_cast {
match (found.bit_width(), exp.bit_width()) {
(Some(found), Some(exp)) if found > exp - 1 => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_truncate),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_truncate),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
(None, None) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_truncate),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_truncate),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
(None, _) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_isize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_isize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
(_, None) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_usize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_usize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
_ => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_zero_extend),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_zero_extend),
+ cast_suggestion,
+ Applicability::MachineApplicable
+ );
}
}
}
true
}
- (&ty::TyUint(ref exp), &ty::TyInt(ref found)) => {
+ (&ty::Uint(ref exp), &ty::Int(ref found)) => {
if can_cast {
match (found.bit_width(), exp.bit_width()) {
(Some(found), Some(exp)) if found - 1 > exp => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_truncate),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_truncate),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
(None, None) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_sign_extend),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_sign_extend),
+ cast_suggestion,
+ Applicability::MachineApplicable // lossy conversion
+ );
}
(None, _) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_usize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_usize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
(_, None) => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}",
- msg,
- depending_on_isize),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, depending_on_isize),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
_ => {
- err.span_suggestion(expr.span,
- &format!("{}, which {}", msg, will_sign_extend),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, which {}", msg, will_sign_extend),
+ cast_suggestion,
+ Applicability::MachineApplicable
+ );
}
}
}
true
}
- (&ty::TyFloat(ref exp), &ty::TyFloat(ref found)) => {
+ (&ty::Float(ref exp), &ty::Float(ref found)) => {
if found.bit_width() < exp.bit_width() {
- err.span_suggestion(expr.span,
- &format!("{} in a lossless way",
- msg),
- into_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{} in a lossless way", msg),
+ into_suggestion,
+ Applicability::MachineApplicable
+ );
} else if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, producing the closest possible value",
- msg),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, producing the closest possible value", msg),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
true
}
- (&ty::TyUint(_), &ty::TyFloat(_)) | (&ty::TyInt(_), &ty::TyFloat(_)) => {
+ (&ty::Uint(_), &ty::Float(_)) | (&ty::Int(_), &ty::Float(_)) => {
if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, rounding the float towards zero",
- msg),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, rounding the float towards zero", msg),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
err.warn("casting here will cause undefined behavior if the rounded value \
cannot be represented by the target integer type, including \
`Inf` and `NaN` (this is a bug and will be fixed)");
}
true
}
- (&ty::TyFloat(ref exp), &ty::TyUint(ref found)) => {
+ (&ty::Float(ref exp), &ty::Uint(ref found)) => {
// if `found` is `None` (meaning found is `usize`), don't suggest `.into()`
if exp.bit_width() > found.bit_width().unwrap_or(256) {
- err.span_suggestion(expr.span,
- &format!("{}, producing the floating point \
- representation of the integer",
- msg),
- into_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, producing the floating point representation of the \
+ integer",
+ msg),
+ into_suggestion,
+ Applicability::MachineApplicable
+ );
} else if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, producing the floating point \
- representation of the integer, rounded if \
- necessary",
- msg),
- cast_suggestion);
+ err.span_suggestion_with_applicability(expr.span,
+ &format!("{}, producing the floating point representation of the \
+ integer, rounded if necessary",
+ msg),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
true
}
- (&ty::TyFloat(ref exp), &ty::TyInt(ref found)) => {
+ (&ty::Float(ref exp), &ty::Int(ref found)) => {
// if `found` is `None` (meaning found is `isize`), don't suggest `.into()`
if exp.bit_width() > found.bit_width().unwrap_or(256) {
- err.span_suggestion(expr.span,
- &format!("{}, producing the floating point \
- representation of the integer",
- msg),
- into_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, producing the floating point representation of the \
+ integer",
+ msg),
+ into_suggestion,
+ Applicability::MachineApplicable
+ );
} else if can_cast {
- err.span_suggestion(expr.span,
- &format!("{}, producing the floating point \
- representation of the integer, rounded if \
- necessary",
- msg),
- cast_suggestion);
+ err.span_suggestion_with_applicability(
+ expr.span,
+ &format!("{}, producing the floating point representation of the \
+ integer, rounded if necessary",
+ msg),
+ cast_suggestion,
+ Applicability::MaybeIncorrect // lossy conversion
+ );
}
true
}
let dtor_self_type = tcx.type_of(drop_impl_did);
let dtor_predicates = tcx.predicates_of(drop_impl_did);
match dtor_self_type.sty {
- ty::TyAdt(adt_def, self_to_impl_substs) => {
+ ty::Adt(adt_def, self_to_impl_substs) => {
ensure_drop_params_and_item_params_correspond(tcx,
drop_impl_did,
dtor_self_type,
match *expected {
Void => match t.sty {
- ty::TyTuple(ref v) if v.is_empty() => {},
+ ty::Tuple(ref v) if v.is_empty() => {},
_ => simple_error(&format!("`{}`", t), "()"),
},
// (The width we pass to LLVM doesn't concern the type checker.)
Integer(signed, bits, _llvm_width) => match (signed, bits, &t.sty) {
- (true, 8, &ty::TyInt(ast::IntTy::I8)) |
- (false, 8, &ty::TyUint(ast::UintTy::U8)) |
- (true, 16, &ty::TyInt(ast::IntTy::I16)) |
- (false, 16, &ty::TyUint(ast::UintTy::U16)) |
- (true, 32, &ty::TyInt(ast::IntTy::I32)) |
- (false, 32, &ty::TyUint(ast::UintTy::U32)) |
- (true, 64, &ty::TyInt(ast::IntTy::I64)) |
- (false, 64, &ty::TyUint(ast::UintTy::U64)) |
- (true, 128, &ty::TyInt(ast::IntTy::I128)) |
- (false, 128, &ty::TyUint(ast::UintTy::U128)) => {},
+ (true, 8, &ty::Int(ast::IntTy::I8)) |
+ (false, 8, &ty::Uint(ast::UintTy::U8)) |
+ (true, 16, &ty::Int(ast::IntTy::I16)) |
+ (false, 16, &ty::Uint(ast::UintTy::U16)) |
+ (true, 32, &ty::Int(ast::IntTy::I32)) |
+ (false, 32, &ty::Uint(ast::UintTy::U32)) |
+ (true, 64, &ty::Int(ast::IntTy::I64)) |
+ (false, 64, &ty::Uint(ast::UintTy::U64)) |
+ (true, 128, &ty::Int(ast::IntTy::I128)) |
+ (false, 128, &ty::Uint(ast::UintTy::U128)) => {},
_ => simple_error(&format!("`{}`", t),
&format!("`{}{n}`",
if signed {"i"} else {"u"},
n = bits)),
},
Float(bits) => match (bits, &t.sty) {
- (32, &ty::TyFloat(ast::FloatTy::F32)) |
- (64, &ty::TyFloat(ast::FloatTy::F64)) => {},
+ (32, &ty::Float(ast::FloatTy::F32)) |
+ (64, &ty::Float(ast::FloatTy::F64)) => {},
_ => simple_error(&format!("`{}`", t),
&format!("`f{n}`", n = bits)),
},
Pointer(ref inner_expected, ref _llvm_type, const_) => {
match t.sty {
- ty::TyRawPtr(ty::TypeAndMut { ty, mutbl }) => {
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl }) => {
if (mutbl == hir::MutImmutable) != const_ {
simple_error(&format!("`{}`", t),
if const_ {"const pointer"} else {"mut pointer"})
}
Aggregate(_flatten, ref expected_contents) => {
match t.sty {
- ty::TyTuple(contents) => {
+ ty::Tuple(contents) => {
if contents.len() != expected_contents.len() {
simple_error(&format!("tuple with length {}", contents.len()),
&format!("tuple with length {}", expected_contents.len()));
.include_raw_pointers()
.filter_map(|(ty, _)| {
match ty.sty {
- ty::TyDynamic(ref data, ..) => data.principal().map(|p| closure(self, ty, p)),
+ ty::Dynamic(ref data, ..) => data.principal().map(|p| closure(self, ty, p)),
_ => None,
}
})
if let Adjust::Deref(Some(ref mut deref)) = adjustment.kind {
if let Some(ok) = self.try_overloaded_deref(expr.span, source, needs) {
let method = self.register_infer_ok_obligations(ok);
- if let ty::TyRef(region, _, mutbl) = method.sig.output().sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.output().sty {
*deref = OverloadedDeref {
region,
mutbl,
debug!("convert_place_op_to_mutable: method={:?}", method);
self.write_method_call(expr.hir_id, method);
- let (region, mutbl) = if let ty::TyRef(r, _, mutbl) = method.sig.inputs()[0].sty {
+ let (region, mutbl) = if let ty::Ref(r, _, mutbl) = method.sig.inputs()[0].sty {
(r, mutbl)
} else {
span_bug!(expr.span, "input to place op is not a ref?");
})
.any(|trait_pred| {
match trait_pred.skip_binder().self_ty().sty {
- ty::TyDynamic(..) => true,
+ ty::Dynamic(..) => true,
_ => false,
}
})
from_unsafe_deref: reached_raw_pointer,
unsize: false,
};
- if let ty::TyRawPtr(_) = ty.sty {
+ if let ty::RawPtr(_) = ty.sty {
// all the subsequent steps will be from_unsafe_deref
reached_raw_pointer = true;
}
let final_ty = autoderef.maybe_ambiguous_final_ty();
match final_ty.sty {
- ty::TyInfer(ty::TyVar(_)) => {
+ ty::Infer(ty::TyVar(_)) => {
// Ended in an inference variable. If we are doing
// a real method lookup, this is a hard error because it's
// possible that there will be multiple applicable methods.
// just ignore it.
}
}
- ty::TyArray(elem_ty, _) => {
+ ty::Array(elem_ty, _) => {
let dereferences = steps.len() - 1;
steps.push(CandidateStep {
unsize: true,
});
}
- ty::TyError => return None,
+ ty::Error => return None,
_ => (),
}
let lang_items = self.tcx.lang_items();
match self_ty.sty {
- ty::TyDynamic(ref data, ..) => {
+ ty::Dynamic(ref data, ..) => {
if let Some(p) = data.principal() {
self.assemble_inherent_candidates_from_object(self_ty, p);
self.assemble_inherent_impl_candidates_for_type(p.def_id());
}
}
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
self.assemble_inherent_impl_candidates_for_type(def.did);
}
- ty::TyForeign(did) => {
+ ty::Foreign(did) => {
self.assemble_inherent_impl_candidates_for_type(did);
}
- ty::TyParam(p) => {
+ ty::Param(p) => {
self.assemble_inherent_candidates_from_param(self_ty, p);
}
- ty::TyChar => {
+ ty::Char => {
let lang_def_id = lang_items.char_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyStr => {
+ ty::Str => {
let lang_def_id = lang_items.str_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
let lang_def_id = lang_items.str_alloc_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TySlice(_) => {
+ ty::Slice(_) => {
let lang_def_id = lang_items.slice_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
let lang_def_id = lang_items.slice_u8_alloc_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => {
+ ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => {
let lang_def_id = lang_items.const_ptr_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => {
+ ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => {
let lang_def_id = lang_items.mut_ptr_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::I8) => {
+ ty::Int(ast::IntTy::I8) => {
let lang_def_id = lang_items.i8_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::I16) => {
+ ty::Int(ast::IntTy::I16) => {
let lang_def_id = lang_items.i16_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::I32) => {
+ ty::Int(ast::IntTy::I32) => {
let lang_def_id = lang_items.i32_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::I64) => {
+ ty::Int(ast::IntTy::I64) => {
let lang_def_id = lang_items.i64_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::I128) => {
+ ty::Int(ast::IntTy::I128) => {
let lang_def_id = lang_items.i128_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyInt(ast::IntTy::Isize) => {
+ ty::Int(ast::IntTy::Isize) => {
let lang_def_id = lang_items.isize_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::U8) => {
+ ty::Uint(ast::UintTy::U8) => {
let lang_def_id = lang_items.u8_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::U16) => {
+ ty::Uint(ast::UintTy::U16) => {
let lang_def_id = lang_items.u16_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::U32) => {
+ ty::Uint(ast::UintTy::U32) => {
let lang_def_id = lang_items.u32_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::U64) => {
+ ty::Uint(ast::UintTy::U64) => {
let lang_def_id = lang_items.u64_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::U128) => {
+ ty::Uint(ast::UintTy::U128) => {
let lang_def_id = lang_items.u128_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyUint(ast::UintTy::Usize) => {
+ ty::Uint(ast::UintTy::Usize) => {
let lang_def_id = lang_items.usize_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyFloat(ast::FloatTy::F32) => {
+ ty::Float(ast::FloatTy::F32) => {
let lang_def_id = lang_items.f32_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
let lang_def_id = lang_items.f32_runtime_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- ty::TyFloat(ast::FloatTy::F64) => {
+ ty::Float(ast::FloatTy::F64) => {
let lang_def_id = lang_items.f64_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
match *predicate {
ty::Predicate::Trait(ref trait_predicate) => {
match trait_predicate.skip_binder().trait_ref.self_ty().sty {
- ty::TyParam(ref p) if *p == param_ty => {
+ ty::Param(ref p) if *p == param_ty => {
Some(trait_predicate.to_poly_trait_ref())
}
_ => None,
pick.autoderefs = step.autoderefs;
// Insert a `&*` or `&mut *` if this is a reference type:
- if let ty::TyRef(_, _, mutbl) = step.self_ty.sty {
+ if let ty::Ref(_, _, mutbl) = step.self_ty.sty {
pick.autoderefs += 1;
pick.autoref = Some(mutbl);
}
use syntax::ast;
use syntax::util::lev_distance::find_best_match_for_name;
-use errors::DiagnosticBuilder;
+use errors::{Applicability, DiagnosticBuilder};
use syntax_pos::{Span, FileName};
use rustc::hir;
use rustc::hir::print;
use rustc::infer::type_variable::TypeVariableOrigin;
-use rustc::ty::TyAdt;
+use rustc::ty::Adt;
use std::cmp::Ordering;
match ty.sty {
// Not all of these (e.g. unsafe fns) implement FnOnce
// so we look for these beforehand
- ty::TyClosure(..) |
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) => true,
+ ty::Closure(..) |
+ ty::FnDef(..) |
+ ty::FnPtr(_) => true,
// If it's not a simple function, look for things which implement FnOnce
_ => {
let fn_once = match tcx.lang_items().require(FnOnceTraitLangItem) {
let item_kind = if is_method {
"method"
} else if actual.is_enum() {
- if let TyAdt(ref adt_def, _) = actual.sty {
+ if let Adt(ref adt_def, _) = actual.sty {
let names = adt_def.variants.iter().map(|s| &s.name);
suggestion = find_best_match_for_name(names,
&item_name.as_str(),
if let Some(expr) = rcvr_expr {
for (ty, _) in self.autoderef(span, rcvr_ty) {
match ty.sty {
- ty::TyAdt(def, substs) if !def.is_enum() => {
+ ty::Adt(def, substs) if !def.is_enum() => {
let variant = &def.non_enum_variant();
if let Some(index) = self.tcx.find_field_index(item_name, variant) {
let field = &variant.fields[index];
}
if static_sources.len() == 1 {
if let Some(expr) = rcvr_expr {
- err.span_suggestion(expr.span.to(span),
+ err.span_suggestion_with_applicability(expr.span.to(span),
"use associated function syntax instead",
format!("{}::{}",
self.ty_to_string(actual),
- item_name));
+ item_name),
+ Applicability::MachineApplicable);
} else {
err.help(&format!("try with `{}::{}`",
self.ty_to_string(actual), item_name));
-> bool {
fn is_local(ty: Ty) -> bool {
match ty.sty {
- ty::TyAdt(def, _) => def.did.is_local(),
- ty::TyForeign(did) => did.is_local(),
+ ty::Adt(def, _) => def.did.is_local(),
+ ty::Foreign(did) => did.is_local(),
- ty::TyDynamic(ref tr, ..) => tr.principal()
+ ty::Dynamic(ref tr, ..) => tr.principal()
.map_or(false, |p| p.def_id().is_local()),
- ty::TyParam(_) => true,
+ ty::Param(_) => true,
// everything else (primitive types etc.) is effectively
// non-local (there are "edge" cases, e.g. (LocalType,), but
use rustc::ty::fold::TypeFoldable;
use rustc::ty::query::Providers;
use rustc::ty::util::{Representability, IntTypeExt, Discr};
-use errors::{DiagnosticBuilder, DiagnosticId};
+use errors::{Applicability, DiagnosticBuilder, DiagnosticId};
use require_c_abi_if_variadic;
use session::{CompileIncomplete, config, Session};
// Anonymized types found in explicit return types and their
// associated fresh inference variable. Writeback resolves these
// variables to get the concrete type, which can be used to
- // deanonymize TyAnon, after typeck is done with all functions.
+ // deanonymize Anon, after typeck is done with all functions.
anon_types: RefCell<DefIdMap<AnonTypeDecl<'tcx>>>,
/// Each type parameter has an implicit region bound that
/// for examples of where this comes up,.
fn rvalue_hint(fcx: &FnCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> {
match fcx.tcx.struct_tail(ty).sty {
- ty::TySlice(_) | ty::TyStr | ty::TyDynamic(..) => {
+ ty::Slice(_) | ty::Str | ty::Dynamic(..) => {
ExpectRvalueLikeUnsized(ty)
}
_ => ExpectHasType(ty)
if let Some(panic_impl_did) = fcx.tcx.lang_items().panic_impl() {
if panic_impl_did == fcx.tcx.hir.local_def_id(fn_id) {
if let Some(panic_info_did) = fcx.tcx.lang_items().panic_info() {
- if declared_ret_ty.sty != ty::TyNever {
+ if declared_ret_ty.sty != ty::Never {
fcx.tcx.sess.span_err(
decl.output.span(),
"return type should be `!`",
let span = fcx.tcx.hir.span(fn_id);
if inputs.len() == 1 {
let arg_is_panic_info = match inputs[0].sty {
- ty::TyRef(region, ty, mutbl) => match ty.sty {
- ty::TyAdt(ref adt, _) => {
+ ty::Ref(region, ty, mutbl) => match ty.sty {
+ ty::Adt(ref adt, _) => {
adt.did == panic_info_did &&
mutbl == hir::Mutability::MutImmutable &&
*region != RegionKind::ReStatic
if let Some(alloc_error_handler_did) = fcx.tcx.lang_items().oom() {
if alloc_error_handler_did == fcx.tcx.hir.local_def_id(fn_id) {
if let Some(alloc_layout_did) = fcx.tcx.lang_items().alloc_layout() {
- if declared_ret_ty.sty != ty::TyNever {
+ if declared_ret_ty.sty != ty::Never {
fcx.tcx.sess.span_err(
decl.output.span(),
"return type should be `!`",
let span = fcx.tcx.hir.span(fn_id);
if inputs.len() == 1 {
let arg_is_alloc_layout = match inputs[0].sty {
- ty::TyAdt(ref adt, _) => {
+ ty::Adt(ref adt, _) => {
adt.did == alloc_layout_did
},
_ => false,
};
let param_env = ty::ParamEnv::reveal_all();
if let Ok(static_) = tcx.const_eval(param_env.and(cid)) {
- let alloc = tcx.const_value_to_allocation(static_);
+ let alloc = tcx.const_to_allocation(static_);
if alloc.relocations.len() != 0 {
let msg = "statics with a custom `#[link_section]` must be a \
simple list of bytes on the wasm target with no \
pub fn check_simd<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
let t = tcx.type_of(def_id);
match t.sty {
- ty::TyAdt(def, substs) if def.is_struct() => {
+ ty::Adt(def, substs) if def.is_struct() => {
let fields = &def.non_enum_variant().fields;
if fields.is_empty() {
span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty");
return;
}
match e.sty {
- ty::TyParam(_) => { /* struct<T>(T, T, T, T) is ok */ }
+ ty::Param(_) => { /* struct<T>(T, T, T, T) is ok */ }
_ if e.is_machine() => { /* struct(u8, u8, u8, u8) is ok */ }
_ => {
span_err!(tcx.sess, sp, E0077,
return false;
}
match t.sty {
- ty::TyAdt(def, substs) if def.is_struct() || def.is_union() => {
+ ty::Adt(def, substs) if def.is_struct() || def.is_union() => {
if tcx.adt_def(def.did).repr.align > 0 {
return true;
}
for field in &def.non_enum_variant().fields {
let f = field.ty(tcx, substs);
match f.sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
if check_packed_inner(tcx, def.did, stack) {
return true;
}
fn resolve_type_vars_with_obligations(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
debug!("resolve_type_vars_with_obligations(ty={:?})", ty);
- // No TyInfer()? Nothing needs doing.
+ // No Infer()? Nothing needs doing.
if !ty.has_infer_types() {
debug!("resolve_type_vars_with_obligations: ty={:?}", ty);
return ty;
// feature(never_type) is enabled, unconstrained ints with i32,
// unconstrained floats with f64.
// Fallback becomes very dubious if we have encountered type-checking errors.
- // In that case, fallback to TyError.
+ // In that case, fallback to Error.
// The return value indicates whether fallback has occurred.
fn fallback_if_possible(&self, ty: Ty<'tcx>) -> bool {
use rustc::ty::error::UnconstrainedNumeric::Neither;
let mut self_ty = adjusted_ty;
if unsize {
// We only unsize arrays here.
- if let ty::TyArray(element_ty, _) = adjusted_ty.sty {
+ if let ty::Array(element_ty, _) = adjusted_ty.sty {
self_ty = self.tcx.mk_slice(element_ty);
} else {
continue;
let method = self.register_infer_ok_obligations(ok);
let mut adjustments = autoderef.adjust_steps(needs);
- if let ty::TyRef(region, _, r_mutbl) = method.sig.inputs()[0].sty {
+ if let ty::Ref(region, _, r_mutbl) = method.sig.inputs()[0].sty {
let mutbl = match r_mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
let sugg_span = tcx.sess.source_map().end_point(expr_sp);
// remove closing `)` from the span
let sugg_span = sugg_span.shrink_to_lo();
- err.span_suggestion(
+ err.span_suggestion_with_applicability(
sugg_span,
"expected the unit value `()`; create it with empty parentheses",
- String::from("()"));
+ String::from("()"),
+ Applicability::MachineApplicable);
} else {
err.span_label(sp, format!("expected {}{} parameter{}",
if variadic {"at least "} else {""},
let formal_tys = if tuple_arguments == TupleArguments {
let tuple_type = self.structurally_resolved_type(sp, fn_inputs[0]);
match tuple_type.sty {
- ty::TyTuple(arg_types) if arg_types.len() != args.len() => {
+ ty::Tuple(arg_types) if arg_types.len() != args.len() => {
param_count_error(arg_types.len(), args.len(), "E0057", false, false);
expected_arg_tys = &[];
self.err_args(args.len())
}
- ty::TyTuple(arg_types) => {
+ ty::Tuple(arg_types) => {
expected_arg_tys = match expected_arg_tys.get(0) {
Some(&ty) => match ty.sty {
- ty::TyTuple(ref tys) => &tys,
+ ty::Tuple(ref tys) => &tys,
_ => &[]
},
None => &[]
// in C but we just error out instead and require explicit casts.
let arg_ty = self.structurally_resolved_type(arg.span, arg_ty);
match arg_ty.sty {
- ty::TyFloat(ast::FloatTy::F32) => {
+ ty::Float(ast::FloatTy::F32) => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_double");
}
- ty::TyInt(ast::IntTy::I8) | ty::TyInt(ast::IntTy::I16) | ty::TyBool => {
+ ty::Int(ast::IntTy::I8) | ty::Int(ast::IntTy::I16) | ty::Bool => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_int");
}
- ty::TyUint(ast::UintTy::U8) | ty::TyUint(ast::UintTy::U16) => {
+ ty::Uint(ast::UintTy::U8) | ty::Uint(ast::UintTy::U16) => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_uint");
}
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx));
let ptr_ty = self.resolve_type_vars_if_possible(&ptr_ty);
variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string());
ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
let opt_ty = expected.to_option(self).and_then(|ty| {
match ty.sty {
- ty::TyInt(_) | ty::TyUint(_) => Some(ty),
- ty::TyChar => Some(tcx.types.u8),
- ty::TyRawPtr(..) => Some(tcx.types.usize),
- ty::TyFnDef(..) | ty::TyFnPtr(_) => Some(tcx.types.usize),
+ ty::Int(_) | ty::Uint(_) => Some(ty),
+ ty::Char => Some(tcx.types.u8),
+ ty::RawPtr(..) => Some(tcx.types.usize),
+ ty::FnDef(..) | ty::FnPtr(_) => Some(tcx.types.usize),
_ => None
}
});
ast::LitKind::FloatUnsuffixed(_) => {
let opt_ty = expected.to_option(self).and_then(|ty| {
match ty.sty {
- ty::TyFloat(_) => Some(ty),
+ ty::Float(_) => Some(ty),
_ => None
}
});
self.tcx.sess.source_map().span_to_snippet(lhs.span),
self.tcx.sess.source_map().span_to_snippet(rhs.span))
{
- err.span_suggestion(expr.span, msg, format!("{} == {}", left, right));
+ err.span_suggestion_with_applicability(
+ expr.span,
+ msg,
+ format!("{} == {}", left, right),
+ Applicability::MaybeIncorrect);
} else {
err.help(msg);
}
let mut autoderef = self.autoderef(expr.span, expr_t);
while let Some((base_t, _)) = autoderef.next() {
match base_t.sty {
- ty::TyAdt(base_def, substs) if !base_def.is_enum() => {
+ ty::Adt(base_def, substs) if !base_def.is_enum() => {
debug!("struct named {:?}", base_t);
let (ident, def_scope) =
self.tcx.adjust_ident(field, base_def.did, self.body_id);
private_candidate = Some((base_def.did, field_ty));
}
}
- ty::TyTuple(ref tys) => {
+ ty::Tuple(ref tys) => {
let fstr = field.as_str();
if let Ok(index) = fstr.parse::<usize>() {
if fstr == index.to_string() {
let mut err = self.no_such_field_err(field.span, field, expr_t);
match expr_t.sty {
- ty::TyAdt(def, _) if !def.is_enum() => {
+ ty::Adt(def, _) if !def.is_enum() => {
if let Some(suggested_field_name) =
Self::suggest_field_name(def.non_enum_variant(),
&field.as_str(), vec![]) {
}
};
}
- ty::TyRawPtr(..) => {
+ ty::RawPtr(..) => {
let base = self.tcx.hir.node_to_pretty_string(base.id);
let msg = format!("`{}` is a native pointer; try dereferencing it", base);
let suggestion = format!("(*{}).{}", base, field);
let mut err = self.type_error_struct_with_diag(
field.ident.span,
|actual| match ty.sty {
- ty::TyAdt(adt, ..) if adt.is_enum() => {
+ ty::Adt(adt, ..) if adt.is_enum() => {
struct_span_err!(self.tcx.sess, field.ident.span, E0559,
"{} `{}::{}` has no field named `{}`",
kind_name, actual, variant.name, field.ident)
format!("field does not exist - did you mean `{}`?", field_name));
} else {
match ty.sty {
- ty::TyAdt(adt, ..) => {
+ ty::Adt(adt, ..) => {
if adt.is_enum() {
err.span_label(field.ident.span,
format!("`{}::{}` does not have this field",
self.demand_eqtype(span, adt_ty_hint, adt_ty);
let (substs, adt_kind, kind_name) = match &adt_ty.sty{
- &ty::TyAdt(adt, substs) => {
+ &ty::Adt(adt, substs) => {
(substs, adt.adt_kind(), adt.variant_descr())
}
_ => span_bug!(span, "non-ADT passed to check_expr_struct_fields")
displayable_field_names.sort();
let truncated_fields_error = if len <= 3 {
- "".to_string()
+ String::new()
} else {
format!(" and {} other field{}", (len - 3), if len - 3 == 1 {""} else {"s"})
};
}
Def::Variant(..) => {
match ty.sty {
- ty::TyAdt(adt, substs) => {
+ ty::Adt(adt, substs) => {
Some((adt.variant_of_def(def), adt.did, substs))
}
_ => bug!("unexpected type: {:?}", ty.sty)
Def::Struct(..) | Def::Union(..) | Def::TyAlias(..) |
Def::AssociatedTy(..) | Def::SelfTy(..) => {
match ty.sty {
- ty::TyAdt(adt, substs) if !adt.is_enum() => {
+ ty::Adt(adt, substs) if !adt.is_enum() => {
Some((adt.non_enum_variant(), adt.did, substs))
}
_ => None,
};
// Prohibit struct expressions when non exhaustive flag is set.
- if let ty::TyAdt(adt, _) = struct_ty.sty {
+ if let ty::Adt(adt, _) = struct_ty.sty {
if !adt.did.is_local() && adt.is_non_exhaustive() {
span_err!(self.tcx.sess, expr.span, E0639,
"cannot create non-exhaustive {} using struct expression",
if !error_happened {
self.check_expr_has_type_or_error(base_expr, struct_ty);
match struct_ty.sty {
- ty::TyAdt(adt, substs) if adt.is_struct() => {
+ ty::Adt(adt, substs) if adt.is_struct() => {
let fru_field_types = adt.non_enum_variant().fields.iter().map(|f| {
self.normalize_associated_types_in(expr.span, &f.ty(self.tcx, substs))
}).collect();
/// strict, _|_ can appear in the type of an expression that does not,
/// itself, diverge: for example, fn() -> _|_.)
/// Note that inspecting a type's structure *directly* may expose the fact
- /// that there are actually multiple representations for `TyError`, so avoid
+ /// that there are actually multiple representations for `Error`, so avoid
/// that when err needs to be handled differently.
fn check_expr_with_expectation_and_needs(&self,
expr: &'gcx hir::Expr,
hir::ExprKind::Box(ref subexpr) => {
let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| {
match ty.sty {
- ty::TyAdt(def, _) if def.is_box()
+ ty::Adt(def, _) if def.is_box()
=> Expectation::rvalue_hint(self, ty.boxed_ty()),
_ => NoExpectation
}
} else if let Some(ok) = self.try_overloaded_deref(
expr.span, oprnd_t, needs) {
let method = self.register_infer_ok_obligations(ok);
- if let ty::TyRef(region, _, mutbl) = method.sig.inputs()[0].sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].sty {
let mutbl = match mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
hir::UnNot => {
let result = self.check_user_unop(expr, oprnd_t, unop);
// If it's builtin, we can reuse the type, this helps inference.
- if !(oprnd_t.is_integral() || oprnd_t.sty == ty::TyBool) {
+ if !(oprnd_t.is_integral() || oprnd_t.sty == ty::Bool) {
oprnd_t = result;
}
}
hir::ExprKind::AddrOf(mutbl, ref oprnd) => {
let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| {
match ty.sty {
- ty::TyRef(_, ty, _) | ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
if self.is_place_expr(&oprnd) {
// Places may legitimately have unsized types.
// For example, dereferences of a fat pointer and
hir::ExprKind::Array(ref args) => {
let uty = expected.to_option(self).and_then(|uty| {
match uty.sty {
- ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty),
+ ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
_ => None
}
});
let uty = match expected {
ExpectHasType(uty) => {
match uty.sty {
- ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty),
+ ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
_ => None
}
}
if element_ty.references_error() {
tcx.types.err
} else if let Ok(count) = count {
- tcx.mk_ty(ty::TyArray(t, count))
+ tcx.mk_ty(ty::Array(t, count))
} else {
tcx.types.err
}
let flds = expected.only_has_type(self).and_then(|ty| {
let ty = self.resolve_type_vars_with_obligations(ty);
match ty.sty {
- ty::TyTuple(ref flds) => Some(&flds[..]),
+ ty::Tuple(ref flds) => Some(&flds[..]),
_ => None
}
});
"cannot index into a value of type `{}`",
base_t);
// Try to give some advice about indexing tuples.
- if let ty::TyTuple(..) = base_t.sty {
+ if let ty::Tuple(..) = base_t.sty {
let mut needs_note = true;
// If the index is an integer, we can show the actual
// fixed expression:
ast::LitIntType::Unsuffixed) = lit.node {
let snip = tcx.sess.source_map().span_to_snippet(base.span);
if let Ok(snip) = snip {
- err.span_suggestion(expr.span,
- "to access tuple elements, use",
- format!("{}.{}", snip, i));
+ err.span_suggestion_with_applicability(
+ expr.span,
+ "to access tuple elements, use",
+ format!("{}.{}", snip, i),
+ Applicability::MachineApplicable);
needs_note = false;
}
}
// In some cases, blocks have just one exit, but other blocks
// can be targeted by multiple breaks. This can happen both
// with labeled blocks as well as when we desugar
- // a `do catch { ... }` expression.
+ // a `try { ... }` expression.
//
// Example 1:
//
hir::ExprKind::Match(..) |
hir::ExprKind::Block(..) => {
let sp = self.tcx.sess.source_map().next_point(cause_span);
- err.span_suggestion(sp,
- "try adding a semicolon",
- ";".to_string());
+ err.span_suggestion_with_applicability(
+ sp,
+ "try adding a semicolon",
+ ";".to_string(),
+ Applicability::MachineApplicable);
}
_ => (),
}
// haven't set a return type at all (and aren't `fn main()` or an impl).
match (&fn_decl.output, found.is_suggestable(), can_suggest, expected.is_nil()) {
(&hir::FunctionRetTy::DefaultReturn(span), true, true, true) => {
- err.span_suggestion(span,
- "try adding a return type",
- format!("-> {} ",
- self.resolve_type_vars_with_obligations(found)));
+ err.span_suggestion_with_applicability(
+ span,
+ "try adding a return type",
+ format!("-> {} ", self.resolve_type_vars_with_obligations(found)),
+ Applicability::MachineApplicable);
}
(&hir::FunctionRetTy::DefaultReturn(span), false, true, true) => {
err.span_label(span, "possibly return type missing here?");
}
let original_span = original_sp(last_stmt.span, blk.span);
let span_semi = original_span.with_lo(original_span.hi() - BytePos(1));
- err.span_suggestion(span_semi, "consider removing this semicolon", "".to_string());
+ err.span_suggestion_with_applicability(
+ span_semi,
+ "consider removing this semicolon",
+ String::new(),
+ Applicability::MachineApplicable);
}
fn def_ids_for_path_segments(&self,
// If no type arguments were provided, we have to infer them.
// This case also occurs as a result of some malformed input, e.g.
// a lifetime argument being given instead of a type paramter.
- // Using inference instead of `TyError` gives better error messages.
+ // Using inference instead of `Error` gives better error messages.
self.var_for_def(span, param)
}
}
let mut types_used = vec![false; own_counts.types];
for leaf_ty in ty.walk() {
- if let ty::TyParam(ty::ParamTy { idx, .. }) = leaf_ty.sty {
+ if let ty::Param(ty::ParamTy { idx, .. }) = leaf_ty.sty {
debug!("Found use of ty param num {}", idx);
types_used[idx as usize - own_counts.lifetimes] = true;
- } else if let ty::TyError = leaf_ty.sty {
+ } else if let ty::Error = leaf_ty.sty {
// If there is already another error, do not emit
// an error for not using a type Parameter.
assert!(tcx.sess.err_count() > 0);
use super::{FnCtxt, Needs};
use super::method::MethodCallee;
use rustc::ty::{self, Ty, TypeFoldable};
-use rustc::ty::TypeVariants::{TyRef, TyAdt, TyStr, TyUint, TyNever, TyTuple, TyChar, TyArray};
+use rustc::ty::TyKind::{Ref, Adt, Str, Uint, Never, Tuple, Char, Array};
use rustc::ty::adjustment::{Adjustment, Adjust, AllowTwoPhase, AutoBorrow, AutoBorrowMutability};
use rustc::infer::type_variable::TypeVariableOrigin;
use errors;
Ok(method) => {
let by_ref_binop = !op.node.is_by_value();
if is_assign == IsAssign::Yes || by_ref_binop {
- if let ty::TyRef(region, _, mutbl) = method.sig.inputs()[0].sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].sty {
let mutbl = match mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
}
}
if by_ref_binop {
- if let ty::TyRef(region, _, mutbl) = method.sig.inputs()[1].sty {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[1].sty {
let mutbl = match mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
format!("cannot use `{}=` on type `{}`",
op.node.as_str(), lhs_ty));
let mut suggested_deref = false;
- if let TyRef(_, mut rty, _) = lhs_ty.sty {
+ if let Ref(_, mut rty, _) = lhs_ty.sty {
if {
!self.infcx.type_moves_by_default(self.param_env,
rty,
.is_ok()
} {
if let Ok(lstring) = source_map.span_to_snippet(lhs_expr.span) {
- while let TyRef(_, rty_inner, _) = rty.sty {
+ while let Ref(_, rty_inner, _) = rty.sty {
rty = rty_inner;
}
let msg = &format!(
// This has nothing here because it means we did string
// concatenation (e.g. "Hello " += "World!"). This means
// we don't want the note in the else clause to be emitted
- } else if let ty::TyParam(_) = lhs_ty.sty {
+ } else if let ty::Param(_) = lhs_ty.sty {
// FIXME: point to span of param
err.note(&format!(
"`{}` might need a bound for `{}`",
op.node.as_str(),
lhs_ty);
let mut suggested_deref = false;
- if let TyRef(_, mut rty, _) = lhs_ty.sty {
+ if let Ref(_, mut rty, _) = lhs_ty.sty {
if {
!self.infcx.type_moves_by_default(self.param_env,
rty,
.is_ok()
} {
if let Ok(lstring) = source_map.span_to_snippet(lhs_expr.span) {
- while let TyRef(_, rty_inner, _) = rty.sty {
+ while let Ref(_, rty_inner, _) = rty.sty {
rty = rty_inner;
}
let msg = &format!(
// This has nothing here because it means we did string
// concatenation (e.g. "Hello " + "World!"). This means
// we don't want the note in the else clause to be emitted
- } else if let ty::TyParam(_) = lhs_ty.sty {
+ } else if let ty::Param(_) = lhs_ty.sty {
// FIXME: point to span of param
err.note(&format!(
"`{}` might need a bound for `{}`",
// If this function returns true it means a note was printed, so we don't need
// to print the normal "implementation of `std::ops::Add` might be missing" note
match (&lhs_ty.sty, &rhs_ty.sty) {
- (&TyRef(_, l_ty, _), &TyRef(_, r_ty, _))
- if l_ty.sty == TyStr && r_ty.sty == TyStr => {
+ (&Ref(_, l_ty, _), &Ref(_, r_ty, _))
+ if l_ty.sty == Str && r_ty.sty == Str => {
if !is_assign {
err.span_label(expr.span,
"`+` can't be used to concatenate two `&str` strings");
}
true
}
- (&TyRef(_, l_ty, _), &TyAdt(..))
- if l_ty.sty == TyStr && &format!("{:?}", rhs_ty) == "std::string::String" => {
+ (&Ref(_, l_ty, _), &Adt(..))
+ if l_ty.sty == Str && &format!("{:?}", rhs_ty) == "std::string::String" => {
err.span_label(expr.span,
"`+` can't be used to concatenate a `&str` with a `String`");
match (
err.span_label(ex.span, format!("cannot apply unary \
operator `{}`", op.as_str()));
match actual.sty {
- TyUint(_) if op == hir::UnNeg => {
+ Uint(_) if op == hir::UnNeg => {
err.note("unsigned values cannot be negated");
},
- TyStr | TyNever | TyChar | TyTuple(_) | TyArray(_,_) => {},
- TyRef(_, ref lty, _) if lty.sty == TyStr => {},
+ Str | Never | Char | Tuple(_) | Array(_,_) => {},
+ Ref(_, ref lty, _) if lty.sty == Str => {},
_ => {
let missing_trait = match op {
hir::UnNeg => "std::ops::Neg",
// For overloaded derefs, base_ty is the input to `Deref::deref`,
// but it's a reference type uing the same region as the output.
let base_ty = self.resolve_expr_type_adjusted(base);
- if let ty::TyRef(r_ptr, _, _) = base_ty.sty {
+ if let ty::Ref(r_ptr, _, _) = base_ty.sty {
self.mk_subregion_due_to_dereference(expr.span, expr_region, r_ptr);
}
from_ty,
to_ty);
match (&from_ty.sty, &to_ty.sty) {
- /*From:*/ (&ty::TyRef(from_r, from_ty, _),
- /*To: */ &ty::TyRef(to_r, to_ty, _)) => {
+ /*From:*/ (&ty::Ref(from_r, from_ty, _),
+ /*To: */ &ty::Ref(to_r, to_ty, _)) => {
// Target cannot outlive source, naturally.
self.sub_regions(infer::Reborrow(cast_expr.span), to_r, from_r);
self.walk_cast(cast_expr, from_ty, to_ty);
}
/*From:*/ (_,
- /*To: */ &ty::TyDynamic(.., r)) => {
+ /*To: */ &ty::Dynamic(.., r)) => {
// When T is existentially quantified as a trait
// `Foo+'to`, it must outlive the region bound `'to`.
self.type_must_outlive(infer::RelateObjectBound(cast_expr.span), from_ty, r);
}
- /*From:*/ (&ty::TyAdt(from_def, _),
- /*To: */ &ty::TyAdt(to_def, _)) if from_def.is_box() && to_def.is_box() => {
+ /*From:*/ (&ty::Adt(from_def, _),
+ /*To: */ &ty::Adt(to_def, _)) if from_def.is_box() && to_def.is_box() => {
self.walk_cast(cast_expr, from_ty.boxed_ty(), to_ty.boxed_ty());
}
fn constrain_callee(&mut self, callee_expr: &hir::Expr) {
let callee_ty = self.resolve_node_type(callee_expr.hir_id);
match callee_ty.sty {
- ty::TyFnDef(..) | ty::TyFnPtr(_) => { }
+ ty::FnDef(..) | ty::FnPtr(_) => { }
_ => {
// this should not happen, but it does if the program is
// erroneous
self.ty_to_string(indexed_ty));
let r_index_expr = ty::ReScope(region::Scope::Node(index_expr.hir_id.local_id));
- if let ty::TyRef(r_ptr, r_ty, _) = indexed_ty.sty {
+ if let ty::Ref(r_ptr, r_ty, _) = indexed_ty.sty {
match r_ty.sty {
- ty::TySlice(_) | ty::TyStr => {
+ ty::Slice(_) | ty::Str => {
self.sub_regions(infer::IndexSlice(index_expr.span),
self.tcx.mk_region(r_index_expr), r_ptr);
}
id, mutbl, cmt_borrowed);
let rptr_ty = self.resolve_node_type(id);
- if let ty::TyRef(r, _, _) = rptr_ty.sty {
+ if let ty::Ref(r, _, _) = rptr_ty.sty {
debug!("rptr_ty={}", rptr_ty);
self.link_region(span, r, ty::BorrowKind::from_mutbl(mutbl), cmt_borrowed);
}
// Extract the type of the closure.
let (closure_def_id, substs) = match self.node_ty(closure_hir_id).sty {
- ty::TyClosure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
- ty::TyGenerator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
- ty::TyError => {
+ ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
+ ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
+ ty::Error => {
// #51714: skip analysis when we have already encountered type errors
return;
}
impl<'tcx> ty::fold::TypeVisitor<'tcx> for CountParams {
fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
match t.sty {
- ty::TyParam(p) => {
+ ty::Param(p) => {
self.params.insert(p.idx);
t.super_visit_with(self)
}
ty.fold_with(&mut ty::fold::BottomUpFolder {
tcx: fcx.tcx,
fldop: |ty| {
- if let ty::TyAnon(def_id, substs) = ty.sty {
+ if let ty::Anon(def_id, substs) = ty.sty {
trace!("check_existential_types: anon_ty, {:?}, {:?}", def_id, substs);
let generics = tcx.generics_of(def_id);
// only check named existential types
for (subst, param) in substs.iter().zip(&generics.params) {
match subst.unpack() {
ty::subst::UnpackedKind::Type(ty) => match ty.sty {
- ty::TyParam(..) => {},
+ ty::Param(..) => {},
// prevent `fn foo() -> Foo<u32>` from being defining
_ => {
tcx
}
}
} // if is_named_existential_type
- } // if let TyAnon
+ } // if let Anon
ty
},
reg_op: |reg| reg,
use rustc::hir::def_id::{DefId, DefIndex};
use rustc::hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc::infer::InferCtxt;
+use rustc::ty::adjustment::{Adjust, Adjustment};
+use rustc::ty::fold::{BottomUpFolder, TypeFoldable, TypeFolder};
use rustc::ty::subst::UnpackedKind;
use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::adjustment::{Adjust, Adjustment};
-use rustc::ty::fold::{TypeFoldable, TypeFolder, BottomUpFolder};
use rustc::util::nodemap::DefIdSet;
+use rustc_data_structures::sync::Lrc;
+use std::mem;
use syntax::ast;
use syntax_pos::Span;
-use std::mem;
-use rustc_data_structures::sync::Lrc;
///////////////////////////////////////////////////////////////////////////
// Entry point
);
debug!(
"used_trait_imports({:?}) = {:?}",
- item_def_id,
- used_trait_imports
+ item_def_id, used_trait_imports
);
wbcx.tables.used_trait_imports = used_trait_imports;
debug!(
"writeback: tables for {:?} are {:#?}",
- item_def_id,
- wbcx.tables
+ item_def_id, wbcx.tables
);
self.tcx.alloc_tables(wbcx.tables)
// operating on scalars, we clear the overload.
fn fix_scalar_builtin_expr(&mut self, e: &hir::Expr) {
match e.node {
- hir::ExprKind::Unary(hir::UnNeg, ref inner) |
- hir::ExprKind::Unary(hir::UnNot, ref inner) => {
+ hir::ExprKind::Unary(hir::UnNeg, ref inner)
+ | hir::ExprKind::Unary(hir::UnNot, ref inner) => {
let inner_ty = self.fcx.node_ty(inner.hir_id);
let inner_ty = self.fcx.resolve_type_vars_if_possible(&inner_ty);
match tables.expr_ty_adjusted(&base).sty {
// All valid indexing looks like this
- ty::TyRef(_, base_ty, _) => {
+ ty::Ref(_, base_ty, _) => {
let index_ty = tables.expr_ty_adjusted(&index);
let index_ty = self.fcx.resolve_type_vars_if_possible(&index_ty);
- if base_ty.builtin_index().is_some()
- && index_ty == self.fcx.tcx.types.usize {
+ if base_ty.builtin_index().is_some() && index_ty == self.fcx.tcx.types.usize {
// Remove the method call record
tables.type_dependent_defs_mut().remove(e.hir_id);
tables.node_substs_mut().remove(e.hir_id);
// of size information - we need to get rid of it
// Since this is "after" the other adjustment to be
// discarded, we do an extra `pop()`
- Some(Adjustment { kind: Adjust::Unsize, .. }) => {
+ Some(Adjustment {
+ kind: Adjust::Unsize,
+ ..
+ }) => {
// So the borrow discard actually happens here
a.pop();
- },
+ }
_ => {}
}
});
}
- },
+ }
// Might encounter non-valid indexes at this point, so there
// has to be a fall-through
- _ => {},
+ _ => {}
}
}
}
}
-
///////////////////////////////////////////////////////////////////////////
// Impl of Visitor for Resolver
//
if let Some(&bm) = self.fcx.tables.borrow().pat_binding_modes().get(p.hir_id) {
self.tables.pat_binding_modes_mut().insert(p.hir_id, bm);
} else {
- self.tcx().sess.delay_span_bug(p.span, "missing binding mode");
+ self.tcx()
+ .sess
+ .delay_span_bug(p.span, "missing binding mode");
}
}
hir::PatKind::Struct(_, ref fields, _) => {
};
debug!(
"Upvar capture for {:?} resolved to {:?}",
- upvar_id,
- new_upvar_capture
+ upvar_id, new_upvar_capture
);
self.tables
.upvar_capture_map
fldop: |ty| {
trace!("checking type {:?}: {:#?}", ty, ty.sty);
// find a type parameter
- if let ty::TyParam(..) = ty.sty {
+ if let ty::Param(..) = ty.sty {
// look it up in the substitution list
assert_eq!(anon_defn.substs.len(), generics.params.len());
for (subst, param) in anon_defn.substs.iter().zip(&generics.params) {
if subst == ty {
// found it in the substitution list, replace with the
// parameter from the existential type
- return self
- .tcx()
+ return self.tcx()
.global_tcx()
.mk_ty_param(param.index, param.name);
}
name: p.name,
};
trace!("replace {:?} with {:?}", region, reg);
- return self.tcx().global_tcx()
+ return self.tcx()
+ .global_tcx()
.mk_region(ty::ReEarlyBound(reg));
}
}
}
trace!("anon_defn: {:#?}", anon_defn);
trace!("generics: {:#?}", generics);
- self.tcx().sess
+ self.tcx()
+ .sess
.struct_span_err(
span,
"non-defining existential type use in defining scope",
span,
format!(
"lifetime `{}` is part of concrete type but not used \
- in parameter list of existential type",
+ in parameter list of existential type",
region,
),
)
self.tcx().global_tcx().mk_region(ty::ReStatic)
}
}
- }
+ },
})
};
- let old = self.tables.concrete_existential_types.insert(def_id, definition_ty);
+ if let ty::Anon(defin_ty_def_id, _substs) = definition_ty.sty {
+ if def_id == defin_ty_def_id {
+ // Concrete type resolved to the existential type itself
+ // Force a cycle error
+ self.tcx().at(span).type_of(defin_ty_def_id);
+ }
+ }
+
+ let old = self.tables
+ .concrete_existential_types
+ .insert(def_id, definition_ty);
if let Some(old) = old {
if old != definition_ty {
span_bug!(
span,
"visit_anon_types tried to write \
- different types for the same existential type: {:?}, {:?}, {:?}",
+ different types for the same existential type: {:?}, {:?}, {:?}",
def_id,
definition_ty,
old,
fn visit_field_id(&mut self, node_id: ast::NodeId) {
let hir_id = self.tcx().hir.node_to_hir_id(node_id);
- if let Some(index) = self.fcx.tables.borrow_mut().field_indices_mut().remove(hir_id) {
+ if let Some(index) = self.fcx
+ .tables
+ .borrow_mut()
+ .field_indices_mut()
+ .remove(hir_id)
+ {
self.tables.field_indices_mut().insert(hir_id, index);
}
}
let resolved_adjustment = self.resolve(&adjustment, &span);
debug!(
"Adjustments for node {:?}: {:?}",
- hir_id,
- resolved_adjustment
+ hir_id, resolved_adjustment
);
self.tables
.adjustments_mut()
let resolved_adjustment = self.resolve(&adjustment, &span);
debug!(
"pat_adjustments for node {:?}: {:?}",
- hir_id,
- resolved_adjustment
+ hir_id, resolved_adjustment
);
self.tables
.pat_adjustments_mut()
fn report_error(&self, t: Ty<'tcx>) {
if !self.tcx.sess.has_errors() {
self.infcx
- .need_type_info_err(Some(self.body.id()), self.span.to_span(&self.tcx), t).emit();
+ .need_type_info_err(Some(self.body.id()), self.span.to_span(&self.tcx), t)
+ .emit();
}
}
}
use lint;
use rustc::ty::TyCtxt;
+use errors::Applicability;
use syntax::ast;
use syntax_pos::Span;
let id = tcx.hir.hir_to_node_id(hir_id);
let msg = "unused extern crate";
tcx.struct_span_lint_node(lint, id, span, msg)
- .span_suggestion_short(span, "remove it", "".to_string())
+ .span_suggestion_short_with_applicability(
+ span,
+ "remove it",
+ String::new(),
+ Applicability::MachineApplicable)
.emit();
continue;
}
fn visit_implementation_of_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_did: DefId) {
match tcx.type_of(impl_did).sty {
- ty::TyAdt(..) => {}
+ ty::Adt(..) => {}
_ => {
// Destructors only work on nominal types.
if let Some(impl_node_id) = tcx.hir.as_local_node_id(impl_did) {
(mt_a.ty, mt_b.ty, unsize_trait, None)
};
let (source, target, trait_def_id, kind) = match (&source.sty, &target.sty) {
- (&ty::TyRef(r_a, ty_a, mutbl_a), &ty::TyRef(r_b, ty_b, mutbl_b)) => {
+ (&ty::Ref(r_a, ty_a, mutbl_a), &ty::Ref(r_b, ty_b, mutbl_b)) => {
infcx.sub_regions(infer::RelateObjectBound(span), r_b, r_a);
let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a };
let mt_b = ty::TypeAndMut { ty: ty_b, mutbl: mutbl_b };
check_mutbl(mt_a, mt_b, &|ty| gcx.mk_imm_ref(r_b, ty))
}
- (&ty::TyRef(_, ty_a, mutbl_a), &ty::TyRawPtr(mt_b)) => {
+ (&ty::Ref(_, ty_a, mutbl_a), &ty::RawPtr(mt_b)) => {
let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a };
check_mutbl(mt_a, mt_b, &|ty| gcx.mk_imm_ptr(ty))
}
- (&ty::TyRawPtr(mt_a), &ty::TyRawPtr(mt_b)) => {
+ (&ty::RawPtr(mt_a), &ty::RawPtr(mt_b)) => {
check_mutbl(mt_a, mt_b, &|ty| gcx.mk_imm_ptr(ty))
}
- (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) if def_a.is_struct() &&
+ (&ty::Adt(def_a, substs_a), &ty::Adt(def_b, substs_b)) if def_a.is_struct() &&
def_b.is_struct() => {
if def_a != def_b {
let source_path = gcx.item_path_str(def_a.did);
let self_ty = self.tcx.type_of(def_id);
let lang_items = self.tcx.lang_items();
match self_ty.sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
self.check_def_id(item, def.did);
}
- ty::TyForeign(did) => {
+ ty::Foreign(did) => {
self.check_def_id(item, did);
}
- ty::TyDynamic(ref data, ..) if data.principal().is_some() => {
+ ty::Dynamic(ref data, ..) if data.principal().is_some() => {
self.check_def_id(item, data.principal().unwrap().def_id());
}
- ty::TyChar => {
+ ty::Char => {
self.check_primitive_impl(def_id,
lang_items.char_impl(),
None,
"char",
item.span);
}
- ty::TyStr => {
+ ty::Str => {
self.check_primitive_impl(def_id,
lang_items.str_impl(),
lang_items.str_alloc_impl(),
"str",
item.span);
}
- ty::TySlice(slice_item) if slice_item == self.tcx.types.u8 => {
+ ty::Slice(slice_item) if slice_item == self.tcx.types.u8 => {
self.check_primitive_impl(def_id,
lang_items.slice_u8_impl(),
lang_items.slice_u8_alloc_impl(),
"[u8]",
item.span);
}
- ty::TySlice(_) => {
+ ty::Slice(_) => {
self.check_primitive_impl(def_id,
lang_items.slice_impl(),
lang_items.slice_alloc_impl(),
"[T]",
item.span);
}
- ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => {
+ ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => {
self.check_primitive_impl(def_id,
lang_items.const_ptr_impl(),
None,
"*const T",
item.span);
}
- ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => {
+ ty::RawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => {
self.check_primitive_impl(def_id,
lang_items.mut_ptr_impl(),
None,
"*mut T",
item.span);
}
- ty::TyInt(ast::IntTy::I8) => {
+ ty::Int(ast::IntTy::I8) => {
self.check_primitive_impl(def_id,
lang_items.i8_impl(),
None,
"i8",
item.span);
}
- ty::TyInt(ast::IntTy::I16) => {
+ ty::Int(ast::IntTy::I16) => {
self.check_primitive_impl(def_id,
lang_items.i16_impl(),
None,
"i16",
item.span);
}
- ty::TyInt(ast::IntTy::I32) => {
+ ty::Int(ast::IntTy::I32) => {
self.check_primitive_impl(def_id,
lang_items.i32_impl(),
None,
"i32",
item.span);
}
- ty::TyInt(ast::IntTy::I64) => {
+ ty::Int(ast::IntTy::I64) => {
self.check_primitive_impl(def_id,
lang_items.i64_impl(),
None,
"i64",
item.span);
}
- ty::TyInt(ast::IntTy::I128) => {
+ ty::Int(ast::IntTy::I128) => {
self.check_primitive_impl(def_id,
lang_items.i128_impl(),
None,
"i128",
item.span);
}
- ty::TyInt(ast::IntTy::Isize) => {
+ ty::Int(ast::IntTy::Isize) => {
self.check_primitive_impl(def_id,
lang_items.isize_impl(),
None,
"isize",
item.span);
}
- ty::TyUint(ast::UintTy::U8) => {
+ ty::Uint(ast::UintTy::U8) => {
self.check_primitive_impl(def_id,
lang_items.u8_impl(),
None,
"u8",
item.span);
}
- ty::TyUint(ast::UintTy::U16) => {
+ ty::Uint(ast::UintTy::U16) => {
self.check_primitive_impl(def_id,
lang_items.u16_impl(),
None,
"u16",
item.span);
}
- ty::TyUint(ast::UintTy::U32) => {
+ ty::Uint(ast::UintTy::U32) => {
self.check_primitive_impl(def_id,
lang_items.u32_impl(),
None,
"u32",
item.span);
}
- ty::TyUint(ast::UintTy::U64) => {
+ ty::Uint(ast::UintTy::U64) => {
self.check_primitive_impl(def_id,
lang_items.u64_impl(),
None,
"u64",
item.span);
}
- ty::TyUint(ast::UintTy::U128) => {
+ ty::Uint(ast::UintTy::U128) => {
self.check_primitive_impl(def_id,
lang_items.u128_impl(),
None,
"u128",
item.span);
}
- ty::TyUint(ast::UintTy::Usize) => {
+ ty::Uint(ast::UintTy::Usize) => {
self.check_primitive_impl(def_id,
lang_items.usize_impl(),
None,
"usize",
item.span);
}
- ty::TyFloat(ast::FloatTy::F32) => {
+ ty::Float(ast::FloatTy::F32) => {
self.check_primitive_impl(def_id,
lang_items.f32_impl(),
lang_items.f32_runtime_impl(),
"f32",
item.span);
}
- ty::TyFloat(ast::FloatTy::F64) => {
+ ty::Float(ast::FloatTy::F64) => {
self.check_primitive_impl(def_id,
lang_items.f64_impl(),
lang_items.f64_runtime_impl(),
"f64",
item.span);
}
- ty::TyError => {
+ ty::Error => {
return;
}
_ => {
tcx.specialization_graph_of(trait_def_id);
// check for overlap with the automatic `impl Trait for Trait`
- if let ty::TyDynamic(ref data, ..) = trait_ref.self_ty().sty {
+ if let ty::Dynamic(ref data, ..) = trait_ref.self_ty().sty {
// This is something like impl Trait1 for Trait2. Illegal
// if Trait1 is a supertrait of Trait2 or Trait2 is not object safe.
!trait_def_id.is_local() {
let self_ty = trait_ref.self_ty();
let opt_self_def_id = match self_ty.sty {
- ty::TyAdt(self_def, _) => Some(self_def.did),
- ty::TyForeign(did) => Some(did),
+ ty::Adt(self_def, _) => Some(self_def.did),
+ ty::Foreign(did) => Some(did),
_ => None,
};
impl<'tcx> TypeVisitor<'tcx> for ParameterCollector {
fn visit_ty(&mut self, t: Ty<'tcx>) -> bool {
match t.sty {
- ty::TyProjection(..) | ty::TyAnon(..) if !self.include_nonconstraining => {
+ ty::Projection(..) | ty::Anon(..) if !self.include_nonconstraining => {
// projections are not injective
return false;
}
- ty::TyParam(data) => {
+ ty::Param(data) => {
self.parameters.push(Parameter::from(data));
}
_ => {}
let main_def_id = tcx.hir.local_def_id(main_id);
let main_t = tcx.type_of(main_def_id);
match main_t.sty {
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
match tcx.hir.find(main_id) {
Some(hir_map::NodeItem(it)) => {
match it.node {
let start_def_id = tcx.hir.local_def_id(start_id);
let start_t = tcx.type_of(start_def_id);
match start_t.sty {
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
match tcx.hir.find(start_id) {
Some(hir_map::NodeItem(it)) => {
match it.node {
// Calculating the predicate requirements necessary
// for item_did.
//
- // For field of type &'a T (reference) or TyAdt
+ // For field of type &'a T (reference) or Adt
// (struct/enum/union) there will be outlive
// requirements for adt_def.
let field_ty = self.tcx.type_of(field_def.did);
// a predicate requirement of T: 'a (T outlives 'a).
//
// We also want to calculate potential predicates for the T
- ty::TyRef(region, rty, _) => {
- debug!("TyRef");
+ ty::Ref(region, rty, _) => {
+ debug!("Ref");
insert_outlives_predicate(tcx, rty.into(), region, required_predicates);
}
- // For each TyAdt (struct/enum/union) type `Foo<'a, T>`, we
+ // For each Adt (struct/enum/union) type `Foo<'a, T>`, we
// can load the current set of inferred and explicit
// predicates from `global_inferred_outlives` and filter the
// ones that are TypeOutlives.
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
// First check the inferred predicates
//
// Example 1:
// round we will get `U: 'b`. We then apply the substitution
// `['b => 'a, U => T]` and thus get the requirement that `T:
// 'a` holds for `Foo`.
- debug!("TyAdt");
+ debug!("Adt");
if let Some(unsubstituted_predicates) = global_inferred_outlives.get(&def.did) {
for unsubstituted_predicate in unsubstituted_predicates {
// `unsubstituted_predicate` is `U: 'b` in the
);
}
- ty::TyDynamic(obj, ..) => {
+ ty::Dynamic(obj, ..) => {
// This corresponds to `dyn Trait<..>`. In this case, we should
// use the explicit predicates as well.
// `dyn Trait` at this stage. Therefore when checking explicit
// predicates in `check_explicit_predicates` we need to ignore
// checking the explicit_map for Self type.
- debug!("TyDynamic");
+ debug!("Dynamic");
debug!("field_ty = {}", &field_ty);
debug!("ty in field = {}", &ty);
if let Some(ex_trait_ref) = obj.principal() {
}
}
- ty::TyProjection(obj) => {
+ ty::Projection(obj) => {
// This corresponds to `<T as Foo<'a>>::Bar`. In this case, we should use the
// explicit predicates as well.
- debug!("TyProjection");
+ debug!("Projection");
check_explicit_predicates(
tcx,
&tcx.associated_item(obj.item_def_id).container.id(),
let inferred_start = self.terms_cx.inferred_starts[&id];
let current_item = &CurrentItem { inferred_start };
match tcx.type_of(def_id).sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
// Not entirely obvious: constraints on structs/enums do not
// affect the variance of their type parameters. See discussion
// in comment at top of module.
}
}
- ty::TyFnDef(..) => {
+ ty::FnDef(..) => {
self.add_constraints_from_sig(current_item,
tcx.fn_sig(def_id),
self.covariant);
variance);
match ty.sty {
- ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) |
- ty::TyStr | ty::TyNever | ty::TyForeign(..) => {
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) |
+ ty::Str | ty::Never | ty::Foreign(..) => {
// leaf type -- noop
}
- ty::TyFnDef(..) |
- ty::TyGenerator(..) |
- ty::TyClosure(..) => {
+ ty::FnDef(..) |
+ ty::Generator(..) |
+ ty::Closure(..) => {
bug!("Unexpected closure type in variance computation");
}
- ty::TyRef(region, ty, mutbl) => {
+ ty::Ref(region, ty, mutbl) => {
let contra = self.contravariant(variance);
self.add_constraints_from_region(current, region, contra);
self.add_constraints_from_mt(current, &ty::TypeAndMut { ty, mutbl }, variance);
}
- ty::TyArray(typ, _) |
- ty::TySlice(typ) => {
+ ty::Array(typ, _) |
+ ty::Slice(typ) => {
self.add_constraints_from_ty(current, typ, variance);
}
- ty::TyRawPtr(ref mt) => {
+ ty::RawPtr(ref mt) => {
self.add_constraints_from_mt(current, mt, variance);
}
- ty::TyTuple(subtys) => {
+ ty::Tuple(subtys) => {
for &subty in subtys {
self.add_constraints_from_ty(current, subty, variance);
}
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
self.add_constraints_from_substs(current, def.did, substs, variance);
}
- ty::TyProjection(ref data) => {
+ ty::Projection(ref data) => {
let tcx = self.tcx();
self.add_constraints_from_trait_ref(current, data.trait_ref(tcx), variance);
}
- ty::TyAnon(_, substs) => {
+ ty::Anon(_, substs) => {
self.add_constraints_from_invariant_substs(current, substs, variance);
}
- ty::TyDynamic(ref data, r) => {
+ ty::Dynamic(ref data, r) => {
// The type `Foo<T+'a>` is contravariant w/r/t `'a`:
let contra = self.contravariant(variance);
self.add_constraints_from_region(current, r, contra);
}
}
- ty::TyParam(ref data) => {
+ ty::Param(ref data) => {
self.add_constraint(current, data.idx, variance);
}
- ty::TyFnPtr(sig) => {
+ ty::FnPtr(sig) => {
self.add_constraints_from_sig(current, sig, variance);
}
- ty::TyError => {
+ ty::Error => {
// we encounter this when walking the trait references for object
- // types, where we use TyError as the Self type
+ // types, where we use Error as the Self type
}
- ty::TyGeneratorWitness(..) |
- ty::TyInfer(..) => {
+ ty::GeneratorWitness(..) |
+ ty::Infer(..) => {
bug!("unexpected type encountered in \
variance inference: {}",
ty);
debug!("id={} variances={:?}", id, variances);
// Functions can have unused type parameters: make those invariant.
- if let ty::TyFnDef(..) = tcx.type_of(def_id).sty {
+ if let ty::FnDef(..) = tcx.type_of(def_id).sty {
for variance in &mut variances {
if *variance == ty::Bivariant {
*variance = ty::Invariant;
.expect("Cannot get impl trait");
match trait_ref.self_ty().sty {
- ty::TypeVariants::TyParam(_) => {},
+ ty::Param(_) => {},
_ => return,
}
let ty = cx.tcx.type_of(def_id);
match ty.sty {
- ty::TyAdt(adt, _) => callback(&match adt.adt_kind() {
+ ty::Adt(adt, _) => callback(&match adt.adt_kind() {
AdtKind::Struct => Def::Struct,
AdtKind::Enum => Def::Enum,
AdtKind::Union => Def::Union,
}),
- ty::TyInt(_) |
- ty::TyUint(_) |
- ty::TyFloat(_) |
- ty::TyStr |
- ty::TyBool |
- ty::TyChar => callback(&move |_: DefId| {
+ ty::Int(_) |
+ ty::Uint(_) |
+ ty::Float(_) |
+ ty::Str |
+ ty::Bool |
+ ty::Char => callback(&move |_: DefId| {
match ty.sty {
- ty::TyInt(x) => Def::PrimTy(hir::TyInt(x)),
- ty::TyUint(x) => Def::PrimTy(hir::TyUint(x)),
- ty::TyFloat(x) => Def::PrimTy(hir::TyFloat(x)),
- ty::TyStr => Def::PrimTy(hir::TyStr),
- ty::TyBool => Def::PrimTy(hir::TyBool),
- ty::TyChar => Def::PrimTy(hir::TyChar),
+ ty::Int(x) => Def::PrimTy(hir::Int(x)),
+ ty::Uint(x) => Def::PrimTy(hir::Uint(x)),
+ ty::Float(x) => Def::PrimTy(hir::Float(x)),
+ ty::Str => Def::PrimTy(hir::Str),
+ ty::Bool => Def::PrimTy(hir::Bool),
+ ty::Char => Def::PrimTy(hir::Char),
_ => unreachable!(),
}
}),
ret.extend(build_impls(cx, did, true));
clean::EnumItem(build_enum(cx, did))
}
- Def::TyForeign(did) => {
+ Def::ForeignTy(did) => {
record_extern_fqn(cx, did, clean::TypeKind::Foreign);
ret.extend(build_impls(cx, did, false));
clean::ForeignTypeItem
let name = if self.name.is_some() {
self.name.expect("No name provided").clean(cx)
} else {
- "".to_string()
+ String::new()
};
// maintain a stack of mod ids, for doc comment path resolution
Some(did) if cx.tcx.lang_items().fn_trait_kind(did).is_some() => {
assert_eq!(types.len(), 1);
let inputs = match types[0].sty {
- ty::TyTuple(ref tys) => tys.iter().map(|t| t.clean(cx)).collect(),
+ ty::Tuple(ref tys) => tys.iter().map(|t| t.clean(cx)).collect(),
_ => {
return GenericArgs::AngleBracketed {
lifetimes,
let output = None;
// FIXME(#20299) return type comes from a projection now
// match types[1].sty {
- // ty::TyTuple(ref v) if v.is_empty() => None, // -> ()
+ // ty::Tuple(ref v) if v.is_empty() => None, // -> ()
// _ => Some(types[1].clean(cx))
// };
GenericArgs::Parenthesized {
// collect any late bound regions
let mut late_bounds = vec![];
for ty_s in trait_ref.input_types().skip(1) {
- if let ty::TyTuple(ts) = ty_s.sty {
+ if let ty::Tuple(ts) = ty_s.sty {
for &ty_s in ts {
- if let ty::TyRef(ref reg, _, _) = ty_s.sty {
+ if let ty::Ref(ref reg, _, _) = ty_s.sty {
if let &ty::RegionKind::ReLateBound(..) = *reg {
debug!(" hit an ReLateBound {:?}", reg);
if let Some(Lifetime(name)) = reg.clean(cx) {
values: sig.skip_binder().inputs().iter().map(|t| {
Argument {
type_: t.clean(cx),
- name: names.next().map_or("".to_string(), |name| name.to_string()),
+ name: names.next().map_or(String::new(), |name| name.to_string()),
}
}).collect(),
},
let self_arg_ty = *sig.input(0).skip_binder();
if self_arg_ty == self_ty {
decl.inputs.values[0].type_ = Generic(String::from("Self"));
- } else if let ty::TyRef(_, ty, _) = self_arg_ty.sty {
+ } else if let ty::Ref(_, ty, _) = self_arg_ty.sty {
if ty == self_ty {
match decl.inputs.values[0].type_ {
BorrowedRef{ref mut type_, ..} => {
impl Clean<Type> for hir::Ty {
fn clean(&self, cx: &DocContext) -> Type {
use rustc::hir::*;
+
match self.node {
TyKind::Never => Never,
TyKind::Ptr(ref m) => RawPointer(m.mutbl.clean(cx), box m.ty.clean(cx)),
if let Some(bounds) = cx.impl_trait_bounds.borrow_mut().remove(&did) {
return ImplTrait(bounds);
}
+ } else if let Def::Existential(did) = path.def {
+ // This block is for returned impl trait only.
+ if let Some(node_id) = cx.tcx.hir.as_local_node_id(did) {
+ let item = cx.tcx.hir.expect_item(node_id);
+ if let hir::ItemKind::Existential(ref ty) = item.node {
+ return ImplTrait(ty.bounds.clean(cx));
+ }
+ }
}
let mut alias = None;
TyKind::Path(hir::QPath::TypeRelative(ref qself, ref segment)) => {
let mut def = Def::Err;
let ty = hir_ty_to_ty(cx.tcx, self);
- if let ty::TyProjection(proj) = ty.sty {
+ if let ty::Projection(proj) = ty.sty {
def = Def::Trait(proj.trait_ref(cx.tcx).def_id);
}
let trait_path = hir::Path {
impl<'tcx> Clean<Type> for Ty<'tcx> {
fn clean(&self, cx: &DocContext) -> Type {
match self.sty {
- ty::TyNever => Never,
- ty::TyBool => Primitive(PrimitiveType::Bool),
- ty::TyChar => Primitive(PrimitiveType::Char),
- ty::TyInt(int_ty) => Primitive(int_ty.into()),
- ty::TyUint(uint_ty) => Primitive(uint_ty.into()),
- ty::TyFloat(float_ty) => Primitive(float_ty.into()),
- ty::TyStr => Primitive(PrimitiveType::Str),
- ty::TySlice(ty) => Slice(box ty.clean(cx)),
- ty::TyArray(ty, n) => {
+ ty::Never => Never,
+ ty::Bool => Primitive(PrimitiveType::Bool),
+ ty::Char => Primitive(PrimitiveType::Char),
+ ty::Int(int_ty) => Primitive(int_ty.into()),
+ ty::Uint(uint_ty) => Primitive(uint_ty.into()),
+ ty::Float(float_ty) => Primitive(float_ty.into()),
+ ty::Str => Primitive(PrimitiveType::Str),
+ ty::Slice(ty) => Slice(box ty.clean(cx)),
+ ty::Array(ty, n) => {
let mut n = cx.tcx.lift(&n).expect("array lift failed");
if let ConstValue::Unevaluated(def_id, substs) = n.val {
let param_env = cx.tcx.param_env(def_id);
let n = print_const(cx, n);
Array(box ty.clean(cx), n)
}
- ty::TyRawPtr(mt) => RawPointer(mt.mutbl.clean(cx), box mt.ty.clean(cx)),
- ty::TyRef(r, ty, mutbl) => BorrowedRef {
+ ty::RawPtr(mt) => RawPointer(mt.mutbl.clean(cx), box mt.ty.clean(cx)),
+ ty::Ref(r, ty, mutbl) => BorrowedRef {
lifetime: r.clean(cx),
mutability: mutbl.clean(cx),
type_: box ty.clean(cx),
},
- ty::TyFnDef(..) |
- ty::TyFnPtr(_) => {
- let ty = cx.tcx.lift(self).expect("TyFnPtr lift failed");
+ ty::FnDef(..) |
+ ty::FnPtr(_) => {
+ let ty = cx.tcx.lift(self).expect("FnPtr lift failed");
let sig = ty.fn_sig(cx.tcx);
BareFunction(box BareFunctionDecl {
unsafety: sig.unsafety(),
abi: sig.abi(),
})
}
- ty::TyAdt(def, substs) => {
+ ty::Adt(def, substs) => {
let did = def.did;
let kind = match def.adt_kind() {
AdtKind::Struct => TypeKind::Struct,
is_generic: false,
}
}
- ty::TyForeign(did) => {
+ ty::Foreign(did) => {
inline::record_extern_fqn(cx, did, TypeKind::Foreign);
let path = external_path(cx, &cx.tcx.item_name(did).as_str(),
None, false, vec![], Substs::empty());
is_generic: false,
}
}
- ty::TyDynamic(ref obj, ref reg) => {
+ ty::Dynamic(ref obj, ref reg) => {
if let Some(principal) = obj.principal() {
let did = principal.def_id();
inline::record_extern_fqn(cx, did, TypeKind::Trait);
Never
}
}
- ty::TyTuple(ref t) => Tuple(t.clean(cx)),
+ ty::Tuple(ref t) => Tuple(t.clean(cx)),
- ty::TyProjection(ref data) => data.clean(cx),
+ ty::Projection(ref data) => data.clean(cx),
- ty::TyParam(ref p) => Generic(p.name.to_string()),
+ ty::Param(ref p) => Generic(p.name.to_string()),
- ty::TyAnon(def_id, substs) => {
+ ty::Anon(def_id, substs) => {
// Grab the "TraitA + TraitB" from `impl TraitA + TraitB`,
// by looking up the projections associated with the def_id.
let predicates_of = cx.tcx.predicates_of(def_id);
- let substs = cx.tcx.lift(&substs).expect("TyAnon lift failed");
+ let substs = cx.tcx.lift(&substs).expect("Anon lift failed");
let bounds = predicates_of.instantiate(cx.tcx, substs);
let mut regions = vec![];
let mut has_sized = false;
ImplTrait(bounds)
}
- ty::TyClosure(..) | ty::TyGenerator(..) => Tuple(vec![]), // FIXME(pcwalton)
+ ty::Closure(..) | ty::Generator(..) => Tuple(vec![]), // FIXME(pcwalton)
- ty::TyGeneratorWitness(..) => panic!("TyGeneratorWitness"),
- ty::TyInfer(..) => panic!("TyInfer"),
- ty::TyError => panic!("TyError"),
+ ty::GeneratorWitness(..) => panic!("GeneratorWitness"),
+ ty::Infer(..) => panic!("Infer"),
+ ty::Error => panic!("Error"),
}
}
}
ForeignStaticItem(Static {
type_: ty.clean(cx),
mutability: if mutbl {Mutable} else {Immutable},
- expr: "".to_string(),
+ expr: String::new(),
})
}
hir::ForeignItemKind::Type => {
debug!("converting span {:?} to snippet", self.clean(cx));
let sn = match cx.sess().source_map().span_to_snippet(*self) {
Ok(x) => x.to_string(),
- Err(_) => "".to_string()
+ Err(_) => String::new()
};
debug!("got snippet {}", sn);
sn
let is_generic = match path.def {
Def::PrimTy(p) => match p {
- hir::TyStr => return Primitive(PrimitiveType::Str),
- hir::TyBool => return Primitive(PrimitiveType::Bool),
- hir::TyChar => return Primitive(PrimitiveType::Char),
- hir::TyInt(int_ty) => return Primitive(int_ty.into()),
- hir::TyUint(uint_ty) => return Primitive(uint_ty.into()),
- hir::TyFloat(float_ty) => return Primitive(float_ty.into()),
+ hir::Str => return Primitive(PrimitiveType::Str),
+ hir::Bool => return Primitive(PrimitiveType::Bool),
+ hir::Char => return Primitive(PrimitiveType::Char),
+ hir::Int(int_ty) => return Primitive(int_ty.into()),
+ hir::Uint(uint_ty) => return Primitive(uint_ty.into()),
+ hir::Float(float_ty) => return Primitive(float_ty.into()),
},
Def::SelfTy(..) if path.segments.len() == 1 => {
return Generic(keywords::SelfType.name().to_string());
Def::Struct(i) => (i, TypeKind::Struct),
Def::Union(i) => (i, TypeKind::Union),
Def::Mod(i) => (i, TypeKind::Module),
- Def::TyForeign(i) => (i, TypeKind::Foreign),
+ Def::ForeignTy(i) => (i, TypeKind::Foreign),
Def::Const(i) => (i, TypeKind::Const),
Def::Static(i, _) => (i, TypeKind::Static),
Def::Variant(i) => (cx.tcx.parent_def_id(i).expect("cannot get parent def id"),
feature: self.feature.to_string(),
since: match self.level {
attr::Stable {ref since} => since.to_string(),
- _ => "".to_string(),
+ _ => String::new(),
},
deprecated_since: match self.rustc_depr {
Some(attr::RustcDeprecation {ref since, ..}) => since.to_string(),
- _=> "".to_string(),
+ _=> String::new(),
},
deprecated_reason: match self.rustc_depr {
Some(ref depr) => depr.reason.to_string(),
- _ => "".to_string(),
+ _ => String::new(),
},
unstable_reason: match self.level {
attr::Unstable { reason: Some(ref reason), .. } => reason.to_string(),
- _ => "".to_string(),
+ _ => String::new(),
},
issue: match self.level {
attr::Unstable {issue, ..} => Some(issue),
impl Clean<Deprecation> for attr::Deprecation {
fn clean(&self, _: &DocContext) -> Deprecation {
Deprecation {
- since: self.since.as_ref().map_or("".to_string(), |s| s.to_string()),
- note: self.note.as_ref().map_or("".to_string(), |s| s.to_string()),
+ since: self.since.as_ref().map_or(String::new(), |s| s.to_string()),
+ note: self.note.as_ref().map_or(String::new(), |s| s.to_string()),
}
}
}
clean::BorrowedRef{ lifetime: ref l, mutability, type_: ref ty} => {
let lt = match *l {
Some(ref l) => format!("{} ", *l),
- _ => "".to_string(),
+ _ => String::new(),
};
let m = MutableSpace(mutability);
let amp = if f.alternate() {
root_path = page.root_path,
suffix=page.resource_suffix)
} else {
- "".to_owned()
+ String::new()
},
content = *t,
root_path = page.root_path,
css_class = page.css_class,
logo = if layout.logo.is_empty() {
- "".to_string()
+ String::new()
} else {
format!("<a href='{}{}/index.html'>\
<img src='{}' alt='logo' width='100'></a>",
description = page.description,
keywords = page.keywords,
favicon = if layout.favicon.is_empty() {
- "".to_string()
+ String::new()
} else {
format!(r#"<link rel="shortcut icon" href="{}">"#, layout.favicon)
},
local_sources: FxHashMap(),
issue_tracker_base_url: None,
layout: layout::Layout {
- logo: "".to_string(),
- favicon: "".to_string(),
+ logo: String::new(),
+ favicon: String::new(),
external_html: external_html.clone(),
krate: krate.name.clone(),
},
!chr.is_whitespace()
})
}).collect::<Vec<_>>().join("\n"),
- None => "".to_string()
+ None => String::new()
}
}
stab_docs = stab_docs,
docs = MarkdownSummaryLine(doc_value, &myitem.links()),
class = myitem.type_(),
- stab = myitem.stability_class().unwrap_or("".to_string()),
+ stab = myitem.stability_class().unwrap_or(String::new()),
unsafety_flag = unsafety_flag,
href = item_path(myitem.type_(), myitem.name.as_ref().unwrap()),
title_type = myitem.type_(),
Ok((ty.def, Some(format!("{}.{}", out, item_name))))
} else {
match cx.tcx.type_of(did).sty {
- ty::TyAdt(def, _) => {
+ ty::Adt(def, _) => {
if let Some(item) = if def.is_enum() {
def.all_fields().find(|item| item.ident.name == item_name)
} else {
}
const PRIMITIVES: &[(&str, Def)] = &[
- ("u8", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::U8))),
- ("u16", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::U16))),
- ("u32", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::U32))),
- ("u64", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::U64))),
- ("u128", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::U128))),
- ("usize", Def::PrimTy(hir::PrimTy::TyUint(syntax::ast::UintTy::Usize))),
- ("i8", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::I8))),
- ("i16", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::I16))),
- ("i32", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::I32))),
- ("i64", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::I64))),
- ("i128", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::I128))),
- ("isize", Def::PrimTy(hir::PrimTy::TyInt(syntax::ast::IntTy::Isize))),
- ("f32", Def::PrimTy(hir::PrimTy::TyFloat(syntax::ast::FloatTy::F32))),
- ("f64", Def::PrimTy(hir::PrimTy::TyFloat(syntax::ast::FloatTy::F64))),
- ("str", Def::PrimTy(hir::PrimTy::TyStr)),
- ("bool", Def::PrimTy(hir::PrimTy::TyBool)),
- ("char", Def::PrimTy(hir::PrimTy::TyChar)),
+ ("u8", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::U8))),
+ ("u16", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::U16))),
+ ("u32", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::U32))),
+ ("u64", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::U64))),
+ ("u128", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::U128))),
+ ("usize", Def::PrimTy(hir::PrimTy::Uint(syntax::ast::UintTy::Usize))),
+ ("i8", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::I8))),
+ ("i16", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::I16))),
+ ("i32", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::I32))),
+ ("i64", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::I64))),
+ ("i128", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::I128))),
+ ("isize", Def::PrimTy(hir::PrimTy::Int(syntax::ast::IntTy::Isize))),
+ ("f32", Def::PrimTy(hir::PrimTy::Float(syntax::ast::FloatTy::F32))),
+ ("f64", Def::PrimTy(hir::PrimTy::Float(syntax::ast::FloatTy::F64))),
+ ("str", Def::PrimTy(hir::PrimTy::Str)),
+ ("bool", Def::PrimTy(hir::PrimTy::Bool)),
+ ("char", Def::PrimTy(hir::PrimTy::Char)),
];
fn is_primitive(path_str: &str, is_val: bool) -> Option<Def> {
Def::Struct(did) |
Def::Union(did) |
Def::Enum(did) |
- Def::TyForeign(did) |
+ Def::ForeignTy(did) |
Def::TyAlias(did) if !self_is_hidden => {
self.cx.access_levels.borrow_mut().map.insert(did, AccessLevel::Public);
},
name = "serialize"
path = "lib.rs"
crate-type = ["dylib", "rlib"]
+
+[dependencies]
+smallvec = { version = "0.6.5", features = ["union"] }
use std::rc::Rc;
use std::sync::Arc;
+use smallvec::{Array, SmallVec};
+
+impl<A> Encodable for SmallVec<A>
+ where A: Array,
+ A::Item: Encodable
+{
+ fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
+ s.emit_seq(self.len(), |s| {
+ for (i, e) in self.iter().enumerate() {
+ s.emit_seq_elt(i, |s| e.encode(s))?;
+ }
+ Ok(())
+ })
+ }
+}
+
+impl<A> Decodable for SmallVec<A>
+ where A: Array,
+ A::Item: Decodable
+{
+ fn decode<D: Decoder>(d: &mut D) -> Result<SmallVec<A>, D::Error> {
+ d.read_seq(|d, len| {
+ let mut vec = SmallVec::with_capacity(len);
+ // FIXME(#48994) - could just be collected into a Result<SmallVec, D::Error>
+ for i in 0..len {
+ vec.push(d.read_seq_elt(i, |d| Decodable::decode(d))?);
+ }
+ Ok(vec)
+ })
+ }
+}
+
impl<T: Encodable> Encodable for LinkedList<T> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_seq(self.len(), |s| {
pub use self::serialize::{SpecializationError, SpecializedEncoder, SpecializedDecoder};
pub use self::serialize::{UseSpecializedEncodable, UseSpecializedDecodable};
+extern crate smallvec;
+
mod serialize;
mod collection_impls;
#[unstable(feature = "libstd_sys_internals",
reason = "used by the panic! macro",
issue = "0")]
+#[cfg_attr(not(any(stage0, test)), lang = "begin_panic")]
#[inline(never)] #[cold] // avoid code bloat at the call sites as much as possible
pub fn begin_panic<M: Any + Send>(msg: M, file_line_col: &(&'static str, u32, u32)) -> ! {
// Note that this should be the only allocation performed in this code path.
impl Mutex {
pub const fn new() -> Mutex {
Mutex {
+ // This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly
+ // initializing an SRWLOCK here.
lock: AtomicUsize::new(0),
held: UnsafeCell::new(false),
}
rustc_errors = { path = "../librustc_errors" }
rustc_data_structures = { path = "../librustc_data_structures" }
rustc_target = { path = "../librustc_target" }
+smallvec = { version = "0.6.5", features = ["union"] }
ExprKind::Match(..) => ExprPrecedence::Match,
ExprKind::Closure(..) => ExprPrecedence::Closure,
ExprKind::Block(..) => ExprPrecedence::Block,
- ExprKind::Catch(..) => ExprPrecedence::Catch,
+ ExprKind::TryBlock(..) => ExprPrecedence::TryBlock,
ExprKind::Async(..) => ExprPrecedence::Async,
ExprKind::Assign(..) => ExprPrecedence::Assign,
ExprKind::AssignOp(..) => ExprPrecedence::AssignOp,
/// created during lowering cannot be made the parent of any other
/// preexisting defs.
Async(CaptureBy, NodeId, P<Block>),
- /// A catch block (`catch { ... }`)
- Catch(P<Block>),
+ /// A try block (`try { ... }`)
+ TryBlock(P<Block>),
/// An assignment (`a = foo()`)
Assign(P<Expr>, P<Expr>),
let sym = Ident::with_empty_ctxt(Symbol::gensym(&format!(
"__register_diagnostic_{}", code
)));
- MacEager::items(OneVector::many(vec![
+ MacEager::items(OneVector::from_vec(vec![
ecx.item_mod(
span,
span,
),
);
- MacEager::items(OneVector::many(vec![
+ MacEager::items(OneVector::from_vec(vec![
P(ast::Item {
ident: *name,
attrs: Vec::new(),
// Use a macro because forwarding to a simple function has type system issues
macro_rules! make_stmts_default {
($me:expr) => {
- $me.make_expr().map(|e| OneVector::one(ast::Stmt {
+ $me.make_expr().map(|e| smallvec![ast::Stmt {
id: ast::DUMMY_NODE_ID,
span: e.span,
node: ast::StmtKind::Expr(e),
- }))
+ }])
}
}
}
fn make_stmts(self: Box<DummyResult>) -> Option<OneVector<ast::Stmt>> {
- Some(OneVector::one(ast::Stmt {
+ Some(smallvec![ast::Stmt {
id: ast::DUMMY_NODE_ID,
node: ast::StmtKind::Expr(DummyResult::raw_expr(self.span)),
span: self.span,
- }))
+ }])
}
fn make_ty(self: Box<DummyResult>) -> Option<P<ast::Ty>> {
use std::collections::HashMap;
use std::fs::File;
use std::io::Read;
+use std::iter::FromIterator;
use std::{iter, mem};
use std::rc::Rc;
use std::path::PathBuf;
self.expand_fragment(AstFragment::$Kind(ast)).$make_ast()
})*)*
$($(fn $fold_ast_elt(&mut self, ast_elt: <$AstTy as IntoIterator>::Item) -> $AstTy {
- self.expand_fragment(AstFragment::$Kind(OneVector::one(ast_elt))).$make_ast()
+ self.expand_fragment(AstFragment::$Kind(smallvec![ast_elt])).$make_ast()
})*)*
}
let orig_mod_span = krate.module.inner;
- let krate_item = AstFragment::Items(OneVector::one(P(ast::Item {
+ let krate_item = AstFragment::Items(smallvec![P(ast::Item {
attrs: krate.attrs,
span: krate.span,
node: ast::ItemKind::Mod(krate.module),
id: ast::DUMMY_NODE_ID,
vis: respan(krate.span.shrink_to_lo(), ast::VisibilityKind::Public),
tokens: None,
- })));
+ })]);
match self.expand_fragment(krate_item).make_items().pop().map(P::into_inner) {
Some(ast::Item { attrs, node: ast::ItemKind::Mod(module), .. }) => {
None => return,
};
- fragment.visit_with(&mut DisallowModules {
+ fragment.visit_with(&mut DisallowMacros {
span,
parse_sess: self.cx.parse_sess,
});
- struct DisallowModules<'a> {
+ struct DisallowMacros<'a> {
span: Span,
parse_sess: &'a ParseSess,
}
- impl<'ast, 'a> Visitor<'ast> for DisallowModules<'a> {
+ impl<'ast, 'a> Visitor<'ast> for DisallowMacros<'a> {
fn visit_item(&mut self, i: &'ast ast::Item) {
- let name = match i.node {
- ast::ItemKind::Mod(_) => Some("modules"),
- ast::ItemKind::MacroDef(_) => Some("macro definitions"),
- _ => None,
- };
- if let Some(name) = name {
+ if let ast::ItemKind::MacroDef(_) = i.node {
emit_feature_err(
self.parse_sess,
"proc_macro_gen",
self.span,
GateIssue::Language,
- &format!("procedural macros cannot expand to {}", name),
+ &format!("procedural macros cannot expand to macro definitions"),
);
}
visit::walk_item(self, i);
ui
});
- OneVector::many(
+ OneVector::from_iter(
self.fold_unnameable(item).into_iter()
.chain(self.fold_unnameable(use_item)))
} else {
match kind {
AstFragmentKind::Expr => AstFragment::Expr(expr_placeholder()),
AstFragmentKind::OptExpr => AstFragment::OptExpr(Some(expr_placeholder())),
- AstFragmentKind::Items => AstFragment::Items(OneVector::one(P(ast::Item {
+ AstFragmentKind::Items => AstFragment::Items(smallvec![P(ast::Item {
id, span, ident, vis, attrs,
node: ast::ItemKind::Mac(mac_placeholder()),
tokens: None,
- }))),
- AstFragmentKind::TraitItems => AstFragment::TraitItems(OneVector::one(ast::TraitItem {
+ })]),
+ AstFragmentKind::TraitItems => AstFragment::TraitItems(smallvec![ast::TraitItem {
id, span, ident, attrs, generics,
node: ast::TraitItemKind::Macro(mac_placeholder()),
tokens: None,
- })),
- AstFragmentKind::ImplItems => AstFragment::ImplItems(OneVector::one(ast::ImplItem {
+ }]),
+ AstFragmentKind::ImplItems => AstFragment::ImplItems(smallvec![ast::ImplItem {
id, span, ident, vis, attrs, generics,
node: ast::ImplItemKind::Macro(mac_placeholder()),
defaultness: ast::Defaultness::Final,
tokens: None,
- })),
+ }]),
AstFragmentKind::ForeignItems =>
- AstFragment::ForeignItems(OneVector::one(ast::ForeignItem {
+ AstFragment::ForeignItems(smallvec![ast::ForeignItem {
id, span, ident, vis, attrs,
node: ast::ForeignItemKind::Macro(mac_placeholder()),
- })),
+ }]),
AstFragmentKind::Pat => AstFragment::Pat(P(ast::Pat {
id, span, node: ast::PatKind::Mac(mac_placeholder()),
})),
AstFragmentKind::Ty => AstFragment::Ty(P(ast::Ty {
id, span, node: ast::TyKind::Mac(mac_placeholder()),
})),
- AstFragmentKind::Stmts => AstFragment::Stmts(OneVector::one({
+ AstFragmentKind::Stmts => AstFragment::Stmts(smallvec![{
let mac = P((mac_placeholder(), ast::MacStmtStyle::Braces, ThinVec::new()));
ast::Stmt { id, span, node: ast::StmtKind::Mac(mac) }
- })),
+ }]),
}
}
fn fold_item(&mut self, item: P<ast::Item>) -> OneVector<P<ast::Item>> {
match item.node {
ast::ItemKind::Mac(_) => return self.remove(item.id).make_items(),
- ast::ItemKind::MacroDef(_) => return OneVector::one(item),
+ ast::ItemKind::MacroDef(_) => return smallvec![item],
_ => {}
}
Ok(..) => {
// Add this input file to the code map to make it available as
// dependency information, but don't enter it's contents
- cx.source_map().new_source_file(file.into(), "".to_string());
+ cx.source_map().new_source_file(file.into(), String::new());
base::MacEager::expr(cx.expr_lit(sp, ast::LitKind::ByteStr(Lrc::new(bytes))))
}
// This MatcherPos instance is allocated on the stack. All others -- and
// there are frequently *no* others! -- are allocated on the heap.
let mut initial = initial_matcher_pos(ms, parser.span.lo());
- let mut cur_items = OneVector::one(MatcherPosHandle::Ref(&mut initial));
+ let mut cur_items = smallvec![MatcherPosHandle::Ref(&mut initial)];
let mut next_items = Vec::new();
loop {
interp: Option<HashMap<Ident, Rc<NamedMatch>>>,
src: Vec<quoted::TokenTree>)
-> TokenStream {
- let mut stack = OneVector::one(Frame::new(src));
+ let mut stack: OneVector<Frame> = smallvec![Frame::new(src)];
let interpolations = interp.unwrap_or_else(HashMap::new); /* just a convenience */
let mut repeats = Vec::new();
let mut result: Vec<TokenStream> = Vec::new();
// Allows comparing raw pointers during const eval
(active, const_compare_raw_pointers, "1.27.0", Some(53020), None),
+ // Allows panicking during const eval (produces compile-time errors)
+ (active, const_panic, "1.30.0", Some(51999), None),
+
// Allows using #[prelude_import] on glob `use` items.
//
// rustc internal
// `extern "x86-interrupt" fn()`
(active, abi_x86_interrupt, "1.17.0", Some(40180), None),
- // Allows the `catch {...}` expression
- (active, catch_expr, "1.17.0", Some(31436), None),
+ // Allows the `try {...}` expression
+ (active, try_blocks, "1.29.0", Some(31436), None),
// Used to preserve symbols (see llvm.used)
(active, used, "1.18.0", Some(40289), None),
(active, tbm_target_feature, "1.27.0", Some(44839), None),
(active, wasm_target_feature, "1.30.0", Some(44839), None),
- // Allows macro invocations of the form `#[foo::bar]`
- (active, proc_macro_path_invoc, "1.27.0", Some(38356), None),
-
// Allows macro invocations on modules expressions and statements and
// procedural macros to expand to non-items.
(active, proc_macro_mod, "1.27.0", Some(38356), None),
// Access to crate names passed via `--extern` through prelude
(active, extern_prelude, "1.27.0", Some(44660), Some(Edition::Edition2018)),
- // Scoped attributes
- (active, tool_attributes, "1.25.0", Some(44690), None),
// Scoped lints
(active, tool_lints, "1.28.0", Some(44690), None),
(accepted, use_extern_macros, "1.30.0", Some(35896), None),
// Allows keywords to be escaped for use as identifiers
(accepted, raw_identifiers, "1.30.0", Some(48589), None),
+ // Attributes scoped to tools
+ (accepted, tool_attributes, "1.30.0", Some(44690), None),
+ // Allows multi-segment paths in attributes and derives
+ (accepted, proc_macro_path_invoc, "1.30.0", Some(38356), None),
);
// If you change this, please modify src/doc/unstable-book as well. You must
e.span,
"yield syntax is experimental");
}
- ast::ExprKind::Catch(_) => {
- gate_feature_post!(&self, catch_expr, e.span, "`catch` expression is experimental");
+ ast::ExprKind::TryBlock(_) => {
+ gate_feature_post!(&self, try_blocks, e.span, "`try` expression is experimental");
}
ast::ExprKind::IfLet(ref pats, ..) | ast::ExprKind::WhileLet(ref pats, ..) => {
if pats.len() > 1 {
"existential types are unstable"
);
}
-
- ast::ImplItemKind::Type(_) if !ii.generics.params.is_empty() => {
- gate_feature_post!(&self, generic_associated_types, ii.span,
- "generic associated types are unstable");
+ ast::ImplItemKind::Type(_) => {
+ if !ii.generics.params.is_empty() {
+ gate_feature_post!(&self, generic_associated_types, ii.span,
+ "generic associated types are unstable");
+ }
+ if !ii.generics.where_clause.predicates.is_empty() {
+ gate_feature_post!(&self, generic_associated_types, ii.span,
+ "where clauses on associated types are unstable");
+ }
}
_ => {}
}
use util::move_map::MoveMap;
use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::small_vec::ExpectOne;
pub trait Folder : Sized {
// Any additions to this trait should happen in form
pub fn noop_fold_trait_item<T: Folder>(i: TraitItem, folder: &mut T)
-> OneVector<TraitItem> {
- OneVector::one(TraitItem {
+ smallvec![TraitItem {
id: folder.new_id(i.id),
ident: folder.fold_ident(i.ident),
attrs: fold_attrs(i.attrs, folder),
},
span: folder.new_span(i.span),
tokens: i.tokens,
- })
+ }]
}
pub fn noop_fold_impl_item<T: Folder>(i: ImplItem, folder: &mut T)
-> OneVector<ImplItem> {
- OneVector::one(ImplItem {
+ smallvec![ImplItem {
id: folder.new_id(i.id),
vis: folder.fold_vis(i.vis),
ident: folder.fold_ident(i.ident),
},
span: folder.new_span(i.span),
tokens: i.tokens,
- })
+ }]
}
pub fn noop_fold_fn_header<T: Folder>(mut header: FnHeader, folder: &mut T) -> FnHeader {
// fold one item into possibly many items
pub fn noop_fold_item<T: Folder>(i: P<Item>, folder: &mut T) -> OneVector<P<Item>> {
- OneVector::one(i.map(|i| folder.fold_item_simple(i)))
+ smallvec![i.map(|i| folder.fold_item_simple(i))]
}
// fold one item into exactly one item
pub fn noop_fold_foreign_item<T: Folder>(ni: ForeignItem, folder: &mut T)
-> OneVector<ForeignItem> {
- OneVector::one(folder.fold_foreign_item_simple(ni))
+ smallvec![folder.fold_foreign_item_simple(ni)]
}
pub fn noop_fold_foreign_item_simple<T: Folder>(ni: ForeignItem, folder: &mut T) -> ForeignItem {
}
ExprKind::Yield(ex) => ExprKind::Yield(ex.map(|x| folder.fold_expr(x))),
ExprKind::Try(ex) => ExprKind::Try(folder.fold_expr(ex)),
- ExprKind::Catch(body) => ExprKind::Catch(folder.fold_block(body)),
+ ExprKind::TryBlock(body) => ExprKind::TryBlock(folder.fold_block(body)),
},
id: folder.new_id(id),
span: folder.new_span(span),
pub fn noop_fold_stmt_kind<T: Folder>(node: StmtKind, folder: &mut T) -> OneVector<StmtKind> {
match node {
- StmtKind::Local(local) => OneVector::one(StmtKind::Local(folder.fold_local(local))),
+ StmtKind::Local(local) => smallvec![StmtKind::Local(folder.fold_local(local))],
StmtKind::Item(item) => folder.fold_item(item).into_iter().map(StmtKind::Item).collect(),
StmtKind::Expr(expr) => {
folder.fold_opt_expr(expr).into_iter().map(StmtKind::Expr).collect()
StmtKind::Semi(expr) => {
folder.fold_opt_expr(expr).into_iter().map(StmtKind::Semi).collect()
}
- StmtKind::Mac(mac) => OneVector::one(StmtKind::Mac(mac.map(|(mac, semi, attrs)| {
+ StmtKind::Mac(mac) => smallvec![StmtKind::Mac(mac.map(|(mac, semi, attrs)| {
(folder.fold_mac(mac), semi, fold_attrs(attrs.into(), folder).into())
- }))),
+ }))],
}
}
#![feature(rustc_diagnostic_macros)]
#![feature(slice_sort_by_cached_key)]
#![feature(str_escape)]
+#![feature(try_trait)]
#![feature(unicode_internals)]
-#![feature(catch_expr)]
#![recursion_limit="256"]
extern crate rustc_data_structures;
extern crate rustc_target;
#[macro_use] extern crate scoped_tls;
+#[macro_use]
+extern crate smallvec;
extern crate serialize as rustc_serialize; // used by deriving
ast::ExprKind::WhileLet(..) |
ast::ExprKind::Loop(..) |
ast::ExprKind::ForLoop(..) |
- ast::ExprKind::Catch(..) => false,
+ ast::ExprKind::TryBlock(..) => false,
_ => true,
}
}
if col < len {
(&s[col..len]).to_string()
} else {
- "".to_string()
+ String::new()
}
}
None => s,
// http://www.unicode.org/Public/security/10.0.0/confusables.txt
use syntax_pos::{Span, NO_EXPANSION};
-use errors::DiagnosticBuilder;
+use errors::{Applicability, DiagnosticBuilder};
use super::StringReader;
const UNICODE_ARRAY: &[(char, &str, char)] = &[
let msg =
format!("Unicode character '{}' ({}) looks like '{}' ({}), but it is not",
ch, u_name, ascii_char, ascii_name);
- err.span_suggestion(span, &msg, ascii_char.to_string());
+ err.span_suggestion_with_applicability(
+ span,
+ &msg,
+ ascii_char.to_string(),
+ Applicability::MaybeIncorrect);
true
},
None => {
let mut i = tokens.iter();
// This might be a sign we need a connect method on Iterator.
let b = i.next()
- .map_or("".to_string(), |t| t.to_string());
+ .map_or(String::new(), |t| t.to_string());
i.enumerate().fold(b, |mut b, (i, a)| {
if tokens.len() > 2 && i == tokens.len() - 2 {
b.push_str(", or ");
} else {
err.span_label(self.span, "expected identifier");
if self.token == token::Comma && self.look_ahead(1, |t| t.is_ident()) {
- err.span_suggestion(self.span, "remove this comma", "".into());
+ err.span_suggestion(self.span, "remove this comma", String::new());
}
}
err
self.parse_seq_to_before_tokens(&[ket], sep, TokenExpectType::Expect, f)
}
- fn parse_seq_to_before_tokens<T, F>(&mut self,
- kets: &[&token::Token],
- sep: SeqSep,
- expect: TokenExpectType,
- mut f: F)
- -> PResult<'a, Vec<T>>
+ fn parse_seq_to_before_tokens<T, F>(
+ &mut self,
+ kets: &[&token::Token],
+ sep: SeqSep,
+ expect: TokenExpectType,
+ mut f: F,
+ ) -> PResult<'a, Vec<T>>
where F: FnMut(&mut Parser<'a>) -> PResult<'a, T>
{
let mut first: bool = true;
let parser_snapshot_before_pat = self.clone();
+ // Once we can use edition 2018 in the compiler,
+ // replace this with real try blocks.
+ macro_rules! try_block {
+ ($($inside:tt)*) => (
+ (||{ ::std::ops::Try::from_ok({ $($inside)* }) })()
+ )
+ }
+
// We're going to try parsing the argument as a pattern (even though it's not
// allowed). This way we can provide better errors to the user.
- let pat_arg: PResult<'a, _> = do catch {
+ let pat_arg: PResult<'a, _> = try_block! {
let pat = self.parse_pat()?;
self.expect(&token::Colon)?;
(pat, self.parse_ty()?)
TokenExpectType::Expect,
|p| p.parse_ty())?;
self.bump(); // `)`
+ let span = lo.to(self.prev_span);
let output = if self.eat(&token::RArrow) {
Some(self.parse_ty_common(false, false)?)
} else {
None
};
- let span = lo.to(self.prev_span);
ParenthesisedArgs { inputs, output, span }.into()
};
BlockCheckMode::Unsafe(ast::UserProvided),
attrs);
}
- if self.is_catch_expr() {
+ if self.is_do_catch_block() {
+ let mut db = self.fatal("found removed `do catch` syntax");
+ db.help("Following RFC #2388, the new non-placeholder syntax is `try`");
+ return Err(db);
+ }
+ if self.is_try_block() {
let lo = self.span;
- assert!(self.eat_keyword(keywords::Do));
- assert!(self.eat_keyword(keywords::Catch));
- return self.parse_catch_expr(lo, attrs);
+ assert!(self.eat_keyword(keywords::Try));
+ return self.parse_try_block(lo, attrs);
}
if self.eat_keyword(keywords::Return) {
if self.token.can_begin_expr() {
err.span_suggestion_short_with_applicability(
self.span,
"remove this comma",
- "".to_owned(),
+ String::new(),
Applicability::MachineApplicable
);
err.note("the base struct must always be the last field");
ExprKind::Async(capture_clause, ast::DUMMY_NODE_ID, body), attrs))
}
- /// Parse a `do catch {...}` expression (`do catch` token already eaten)
- fn parse_catch_expr(&mut self, span_lo: Span, mut attrs: ThinVec<Attribute>)
+ /// Parse a `try {...}` expression (`try` token already eaten)
+ fn parse_try_block(&mut self, span_lo: Span, mut attrs: ThinVec<Attribute>)
-> PResult<'a, P<Expr>>
{
let (iattrs, body) = self.parse_inner_attrs_and_block()?;
attrs.extend(iattrs);
- Ok(self.mk_expr(span_lo.to(body.span), ExprKind::Catch(body), attrs))
+ Ok(self.mk_expr(span_lo.to(body.span), ExprKind::TryBlock(body), attrs))
}
// `match` token already eaten
e.span_suggestion_short_with_applicability(
match_span,
"try removing this `match`",
- "".to_owned(),
+ String::new(),
Applicability::MaybeIncorrect // speculative
);
}
if self.token == token::CloseDelim(token::Brace) {
// If the struct looks otherwise well formed, recover and continue.
if let Some(sp) = comma_sp {
- err.span_suggestion_short(sp, "remove this comma", "".into());
+ err.span_suggestion_short(sp, "remove this comma", String::new());
}
err.emit();
break;
err.multipart_suggestion(
"move the `..` to the end of the field list",
vec![
- (etc_span, "".into()),
+ (etc_span, String::new()),
(self.span, format!("{}.. }}", if ate_comma { "" } else { ", " })),
],
);
)
}
- fn is_catch_expr(&mut self) -> bool {
+ fn is_do_catch_block(&mut self) -> bool {
self.token.is_keyword(keywords::Do) &&
self.look_ahead(1, |t| t.is_keyword(keywords::Catch)) &&
self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) &&
+ !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
+ }
+
+ fn is_try_block(&mut self) -> bool {
+ self.token.is_keyword(keywords::Try) &&
+ self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) &&
+
+ self.span.edition() >= Edition::Edition2018 &&
- // prevent `while catch {} {}`, `if catch {} {} else {}`, etc.
+ // prevent `while try {} {}`, `if try {} {} else {}`, etc.
!self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
}
if token_str == ";" {
let msg = "consider removing this semicolon";
err.span_suggestion_short_with_applicability(
- self.span, msg, "".to_string(), Applicability::MachineApplicable
+ self.span, msg, String::new(), Applicability::MachineApplicable
);
if !items.is_empty() { // Issue #51603
let previous_item = &items[items.len()-1];
}
/// Parse one of the items allowed by the flags.
- /// NB: this function no longer parses the items inside an
- /// extern crate.
fn parse_item_implementation(
&mut self,
attrs: Vec<Attribute>,
self.print_expr_maybe_paren(e, parser::PREC_POSTFIX)?;
self.s.word("?")?
}
- ast::ExprKind::Catch(ref blk) => {
- self.head("do catch")?;
+ ast::ExprKind::TryBlock(ref blk) => {
+ self.head("try")?;
self.s.space()?;
self.print_block_with_attrs(blk, attrs)?
}
cm.new_source_file(PathBuf::from("blork.rs").into(),
"first line.\nsecond line".to_string());
cm.new_source_file(PathBuf::from("empty.rs").into(),
- "".to_string());
+ String::new());
cm.new_source_file(PathBuf::from("blork2.rs").into(),
"first line.\nsecond line".to_string());
cm
use OneVector;
use symbol::{self, Symbol, keywords};
use ThinVec;
+use rustc_data_structures::small_vec::ExpectOne;
enum ShouldPanic {
No,
if ident.name != keywords::Invalid.name() {
self.cx.path.pop();
}
- OneVector::one(P(item))
+ smallvec![P(item)]
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac { mac }
EntryPointType::OtherMain => folded,
};
- OneVector::one(folded)
+ smallvec![folded]
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac { mac }
Loop,
Match,
Block,
- Catch,
+ TryBlock,
Struct,
Async,
}
ExprPrecedence::Loop |
ExprPrecedence::Match |
ExprPrecedence::Block |
- ExprPrecedence::Catch |
+ ExprPrecedence::TryBlock |
ExprPrecedence::Async |
ExprPrecedence::Struct => PREC_PAREN,
}
ExprKind::Try(ref subexpression) => {
visitor.visit_expr(subexpression)
}
- ExprKind::Catch(ref body) => {
+ ExprKind::TryBlock(ref body) => {
visitor.visit_block(body)
}
}
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
rustc_data_structures = { path = "../librustc_data_structures" }
-rustc_target = { path = "../librustc_target" }
\ No newline at end of file
+rustc_target = { path = "../librustc_target" }
+smallvec = { version = "0.6.5", features = ["union"] }
use syntax::symbol::Symbol;
use syntax::tokenstream;
use syntax_pos::{MultiSpan, Span, DUMMY_SP};
+use errors::Applicability;
use std::borrow::Cow;
use std::collections::hash_map::Entry;
0 => "{}".to_string(),
_ => format!("{}{{}}", "{} ".repeat(args.len())),
};
- err.span_suggestion(
+ err.span_suggestion_with_applicability(
fmt_sp.shrink_to_lo(),
"you might be missing a string literal to format with",
format!("\"{}\", ", sugg_fmt),
+ Applicability::MaybeIncorrect,
);
err.emit();
return DummyResult::raw_expr(sp);
/// LLVM's `module asm "some assembly here"`. All of LLVM's caveats
/// therefore apply.
-use rustc_data_structures::small_vec::OneVector;
-
use syntax::ast;
use syntax::source_map::respan;
use syntax::ext::base;
None => return DummyResult::any(sp),
};
- MacEager::items(OneVector::one(P(ast::Item {
+ MacEager::items(smallvec![P(ast::Item {
ident: ast::Ident::with_empty_ctxt(Symbol::intern("")),
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
vis: respan(sp.shrink_to_lo(), ast::VisibilityKind::Inherited),
span: sp,
tokens: None,
- })))
+ })])
}
extern crate rustc_data_structures;
extern crate rustc_errors as errors;
extern crate rustc_target;
+#[macro_use]
+extern crate smallvec;
mod diagnostics;
#[derive(Clone, Copy, Hash, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)]
pub enum CompilerDesugaringKind {
QuestionMark,
- Catch,
+ TryBlock,
/// Desugaring of an `impl Trait` in return type position
/// to an `existential type Foo: Trait;` + replacing the
/// `impl Trait` with `Foo`.
Symbol::intern(match self {
CompilerDesugaringKind::Async => "async",
CompilerDesugaringKind::QuestionMark => "?",
- CompilerDesugaringKind::Catch => "do catch",
+ CompilerDesugaringKind::TryBlock => "try block",
CompilerDesugaringKind::ExistentialReturnType => "existential type",
CompilerDesugaringKind::ForLoop => "for loop",
})
// Edition-specific keywords reserved for future use.
(51, Async, "async") // >= 2018 Edition Only
+ (52, Try, "try") // >= 2018 Edition Only
// Special lifetime names
- (52, UnderscoreLifetime, "'_")
- (53, StaticLifetime, "'static")
+ (53, UnderscoreLifetime, "'_")
+ (54, StaticLifetime, "'static")
// Weak keywords, have special meaning only in specific contexts.
- (54, Auto, "auto")
- (55, Catch, "catch")
- (56, Default, "default")
- (57, Dyn, "dyn")
- (58, Union, "union")
- (59, Existential, "existential")
+ (55, Auto, "auto")
+ (56, Catch, "catch")
+ (57, Default, "default")
+ (58, Dyn, "dyn")
+ (59, Union, "union")
+ (60, Existential, "existential")
}
impl Symbol {
fn is_unused_keyword_2018(self) -> bool {
- self == keywords::Async.name()
+ self >= keywords::Async.name() &&
+ self <= keywords::Try.name()
}
}
let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
let mbps = if bs.mb_s == 0 {
- "".into()
+ String::new()
} else {
format!(r#", "mib_per_second": {}"#, bs.mb_s)
};
}
#if LLVM_VERSION_GE(7, 0)
- unwrap(Target)->addPassesToEmitFile(*PM, OS, nullptr, FileType, false);
+ buffer_ostream BOS(OS);
+ unwrap(Target)->addPassesToEmitFile(*PM, BOS, nullptr, FileType, false);
#else
unwrap(Target)->addPassesToEmitFile(*PM, OS, FileType, false);
#endif
use proc_macro::*;
-#[proc_macro_attribute]
-pub fn attr2mod(_: TokenStream, _: TokenStream) -> TokenStream {
- "mod test {}".parse().unwrap()
-}
-
#[proc_macro_attribute]
pub fn attr2mac1(_: TokenStream, _: TokenStream) -> TokenStream {
"macro_rules! foo1 { (a) => (a) }".parse().unwrap()
"macro foo2(a) { a }".parse().unwrap()
}
-#[proc_macro]
-pub fn mac2mod(_: TokenStream) -> TokenStream {
- "mod test2 {}".parse().unwrap()
-}
-
#[proc_macro]
pub fn mac2mac1(_: TokenStream) -> TokenStream {
"macro_rules! foo3 { (a) => (a) }".parse().unwrap()
#[proc_macro]
pub fn tricky(_: TokenStream) -> TokenStream {
"fn foo() {
- mod test {}
macro_rules! foo { (a) => (a) }
}".parse().unwrap()
}
use foo::*;
-#[attr2mod]
-//~^ ERROR: cannot expand to modules
-pub fn a() {}
#[attr2mac1]
//~^ ERROR: cannot expand to macro definitions
pub fn a() {}
//~^ ERROR: cannot expand to macro definitions
pub fn a() {}
-mac2mod!(); //~ ERROR: cannot expand to modules
mac2mac1!(); //~ ERROR: cannot expand to macro definitions
mac2mac2!(); //~ ERROR: cannot expand to macro definitions
tricky!();
-//~^ ERROR: cannot expand to modules
-//~| ERROR: cannot expand to macro definitions
+//~^ ERROR: cannot expand to macro definitions
fn main() {}
// aux-build:proc-macro-gates.rs
// gate-test-proc_macro_non_items
-// gate-test-proc_macro_path_invoc
// gate-test-proc_macro_mod line
// gate-test-proc_macro_expr
// gate-test-proc_macro_mod
use foo::*;
-#[foo::a] //~ ERROR: non-ident attribute macro paths are unstable
-fn _test() {}
-
fn _test_inner() {
#![a] // OK
}
#[a] //~ ERROR: custom attributes cannot be applied to modules
-//~| ERROR: procedural macros cannot expand to modules
mod _test2 {}
mod _test2_inner {
#![a] //~ ERROR: custom attributes cannot be applied to modules
- //~| ERROR: procedural macros cannot expand to modules
}
#[a = y] //~ ERROR: must only be followed by a delimiter token
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only
+
+fn main() {
+ let _: Option<()> = do catch {};
+ //~^ ERROR found removed `do catch` syntax
+ //~^^ HELP Following RFC #2388, the new non-placeholder syntax is `try`
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only --edition 2018
+
+fn main() {
+ let try = "foo"; //~ error: expected pattern, found reserved keyword `try`
+}
fn expand(cx: &mut ExtCtxt, _: syntax_pos::Span, _: &[tokenstream::TokenTree])
-> Box<MacResult+'static> {
- MacEager::items(OneVector::many(vec![
+ MacEager::items(OneVector::from_vec(vec![
quote_item!(cx, struct Struct1;).unwrap(),
quote_item!(cx, struct Struct2;).unwrap()
]))
// aux-build:derive-b.rs
// ignore-stage1
-#![feature(proc_macro_path_invoc, unrestricted_attribute_tokens)]
+#![feature(unrestricted_attribute_tokens)]
extern crate derive_b;
// aux-build:issue-42708.rs
// ignore-stage1
-#![feature(decl_macro, proc_macro_path_invoc)]
+#![feature(decl_macro)]
#![allow(unused)]
extern crate issue_42708;
// aux-build:issue-50061.rs
// ignore-stage1
-#![feature(proc_macro_path_invoc, decl_macro)]
+#![feature(decl_macro)]
extern crate issue_50061;
]
}
- // Tests TyBool
+ // Tests Bool
pub type FooBool = bool;
- // Tests TyChar
+ // Tests Char
pub type FooChar = char;
- // Tests TyInt (does not test all variants of IntTy)
+ // Tests Int (does not test all variants of IntTy)
pub type FooInt = isize;
- // Tests TyUint (does not test all variants of UintTy)
+ // Tests Uint (does not test all variants of UintTy)
pub type FooUint = usize;
- // Tests TyFloat (does not test all variants of FloatTy)
+ // Tests Float (does not test all variants of FloatTy)
pub type FooFloat = f64;
- // Tests TyStr
+ // Tests Str
pub type FooStr = str;
- // Tests TyArray
+ // Tests Array
pub type FooArray = [u8; 1];
- // Tests TySlice
+ // Tests Slice
pub type FooSlice = [u8];
// Tests Box (of u8)
pub type FooBox = Box<u8>;
- // Tests TyRawPtr
+ // Tests RawPtr
pub type FooPtr = *const u8;
- // Tests TyRef
+ // Tests Ref
pub type FooRef = &'static u8;
- // Tests TyFnPtr
+ // Tests FnPtr
pub type FooFnPtr = fn(u8) -> bool;
- // Tests TyDynamic
+ // Tests Dynamic
pub trait FooTrait {
fn foo_method(&self) -> usize;
}
VarB(usize, usize)
}
- // Tests TyTuple
+ // Tests Tuple
pub type FooNil = ();
pub type FooTuple = (u8, i8, bool);
- // Skipping TyParam
+ // Skipping Param
- // Skipping TyInfer
+ // Skipping Infer
- // Skipping TyError
+ // Skipping Error
}
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-struct catch {}
-
-pub fn main() {
- let catch_result: Option<_> = do catch {
- let x = 5;
- x
- };
- assert_eq!(catch_result, Some(5));
-
- let mut catch = true;
- while catch { catch = false; }
- assert_eq!(catch, false);
-
- catch = if catch { false } else { true };
- assert_eq!(catch, true);
-
- match catch {
- _ => {}
- };
-
- let catch_err: Result<_, i32> = do catch {
- Err(22)?;
- 1
- };
- assert_eq!(catch_err, Err(22));
-
- let catch_okay: Result<i32, i32> = do catch {
- if false { Err(25)?; }
- Ok::<(), i32>(())?;
- 28
- };
- assert_eq!(catch_okay, Ok(28));
-
- let catch_from_loop: Result<i32, i32> = do catch {
- for i in 0..10 {
- if i < 5 { Ok::<i32, i32>(i)?; } else { Err(i)?; }
- }
- 22
- };
- assert_eq!(catch_from_loop, Err(5));
-
- let cfg_init;
- let _res: Result<(), ()> = do catch {
- cfg_init = 5;
- };
- assert_eq!(cfg_init, 5);
-
- let cfg_init_2;
- let _res: Result<(), ()> = do catch {
- cfg_init_2 = 6;
- Err(())?;
- };
- assert_eq!(cfg_init_2, 6);
-
- let my_string = "test".to_string();
- let res: Result<&str, ()> = do catch {
- // Unfortunately, deref doesn't fire here (#49356)
- &my_string[..]
- };
- assert_eq!(res, Ok("test"));
-
- let my_opt: Option<_> = do catch { () };
- assert_eq!(my_opt, Some(()));
-
- let my_opt: Option<_> = do catch { };
- assert_eq!(my_opt, Some(()));
-}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(catch_expr)]
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
fn main() {
let mut a = 0;
let () = {
- let _: Result<(), ()> = do catch {
+ let _: Result<(), ()> = try {
let _ = Err(())?;
return
};
// Scoped attributes should not trigger an unused attributes lint.
-#![feature(tool_attributes)]
#![deny(unused_attributes)]
fn main() {
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+struct catch {}
+
+pub fn main() {
+ let catch_result: Option<_> = try {
+ let x = 5;
+ x
+ };
+ assert_eq!(catch_result, Some(5));
+
+ let mut catch = true;
+ while catch { catch = false; }
+ assert_eq!(catch, false);
+
+ catch = if catch { false } else { true };
+ assert_eq!(catch, true);
+
+ match catch {
+ _ => {}
+ };
+
+ let catch_err: Result<_, i32> = try {
+ Err(22)?;
+ 1
+ };
+ assert_eq!(catch_err, Err(22));
+
+ let catch_okay: Result<i32, i32> = try {
+ if false { Err(25)?; }
+ Ok::<(), i32>(())?;
+ 28
+ };
+ assert_eq!(catch_okay, Ok(28));
+
+ let catch_from_loop: Result<i32, i32> = try {
+ for i in 0..10 {
+ if i < 5 { Ok::<i32, i32>(i)?; } else { Err(i)?; }
+ }
+ 22
+ };
+ assert_eq!(catch_from_loop, Err(5));
+
+ let cfg_init;
+ let _res: Result<(), ()> = try {
+ cfg_init = 5;
+ };
+ assert_eq!(cfg_init, 5);
+
+ let cfg_init_2;
+ let _res: Result<(), ()> = try {
+ cfg_init_2 = 6;
+ Err(())?;
+ };
+ assert_eq!(cfg_init_2, 6);
+
+ let my_string = "test".to_string();
+ let res: Result<&str, ()> = try {
+ // Unfortunately, deref doesn't fire here (#49356)
+ &my_string[..]
+ };
+ assert_eq!(res, Ok("test"));
+
+ let my_opt: Option<_> = try { () };
+ assert_eq!(my_opt, Some(()));
+
+ let my_opt: Option<_> = try { };
+ assert_eq!(my_opt, Some(()));
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2015
+
+fn main() {
+ let try = 2;
+ struct try { try: u32 };
+ let try: try = try { try };
+ assert_eq!(try.try, 2);
+}
//
// error: internal compiler error: get_unique_type_id_of_type() -
// unexpected type: closure,
-// TyClosure(syntax::ast::DefId{krate: 0, node: 66},
+// Closure(syntax::ast::DefId{krate: 0, node: 66},
// ReScope(63))
//
// This is a regression test for issue #17021.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name = "foo"]
+
+pub trait Foo {}
+pub trait Foo2 {}
+
+pub struct Bar;
+
+impl Foo for Bar {}
+impl Foo2 for Bar {}
+
+// @!has foo/fn.foo.html '//section[@id="main"]//pre' "x: &\'x impl Foo"
+// @!has foo/fn.foo.html '//section[@id="main"]//pre' "-> &\'x impl Foo {"
+pub fn foo<'x>(x: &'x impl Foo) -> &'x impl Foo {
+ x
+}
+
+// @!has foo/fn.foo2.html '//section[@id="main"]//pre' "x: &\'x impl Foo"
+// @!has foo/fn.foo2.html '//section[@id="main"]//pre' '-> impl Foo2 {'
+pub fn foo2<'x>(_x: &'x impl Foo) -> impl Foo2 {
+ Bar
+}
+
+// @!has foo/fn.foo_foo.html '//section[@id="main"]//pre' '-> impl Foo + Foo2 {'
+pub fn foo_foo() -> impl Foo + Foo2 {
+ Bar
+}
+
+// @!has foo/fn.foo2.html '//section[@id="main"]//pre' "x: &'x (impl Foo + Foo2)"
+pub fn foo_foo_foo<'x>(_x: &'x (impl Foo + Foo2)) {
+}
// aux-build:generate-mod.rs
-#![feature(proc_macro_gen, proc_macro_path_invoc)]
-
extern crate generate_mod;
struct FromOutside;
//~| WARN this was previously accepted
struct Z;
+fn inner_block() {
+ #[derive(generate_mod::CheckDerive)] //~ WARN cannot find type `FromOutside` in this scope
+ //~| WARN cannot find type `OuterDerive` in this scope
+ //~| WARN this was previously accepted
+ //~| WARN this was previously accepted
+ struct InnerZ;
+}
+
#[derive(generate_mod::CheckDeriveLint)] // OK, lint is suppressed
struct W;
error[E0412]: cannot find type `FromOutside` in this scope
- --> $DIR/generate-mod.rs:21:1
+ --> $DIR/generate-mod.rs:19:1
|
LL | generate_mod::check!(); //~ ERROR cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^ not found in this scope
error[E0412]: cannot find type `Outer` in this scope
- --> $DIR/generate-mod.rs:21:1
+ --> $DIR/generate-mod.rs:19:1
|
LL | generate_mod::check!(); //~ ERROR cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^ not found in this scope
error[E0412]: cannot find type `FromOutside` in this scope
- --> $DIR/generate-mod.rs:24:1
+ --> $DIR/generate-mod.rs:22:1
|
LL | #[generate_mod::check_attr] //~ ERROR cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^ not found in this scope
error[E0412]: cannot find type `OuterAttr` in this scope
- --> $DIR/generate-mod.rs:24:1
+ --> $DIR/generate-mod.rs:22:1
|
LL | #[generate_mod::check_attr] //~ ERROR cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^ not found in this scope
warning: cannot find type `FromOutside` in this scope
- --> $DIR/generate-mod.rs:28:10
+ --> $DIR/generate-mod.rs:26:10
|
LL | #[derive(generate_mod::CheckDerive)] //~ WARN cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
= note: for more information, see issue #50504 <https://github.com/rust-lang/rust/issues/50504>
warning: cannot find type `OuterDerive` in this scope
- --> $DIR/generate-mod.rs:28:10
+ --> $DIR/generate-mod.rs:26:10
|
LL | #[derive(generate_mod::CheckDerive)] //~ WARN cannot find type `FromOutside` in this scope
| ^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #50504 <https://github.com/rust-lang/rust/issues/50504>
+warning: cannot find type `FromOutside` in this scope
+ --> $DIR/generate-mod.rs:33:14
+ |
+LL | #[derive(generate_mod::CheckDerive)] //~ WARN cannot find type `FromOutside` in this scope
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #50504 <https://github.com/rust-lang/rust/issues/50504>
+
+warning: cannot find type `OuterDerive` in this scope
+ --> $DIR/generate-mod.rs:33:14
+ |
+LL | #[derive(generate_mod::CheckDerive)] //~ WARN cannot find type `FromOutside` in this scope
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #50504 <https://github.com/rust-lang/rust/issues/50504>
+
error: aborting due to 4 previous errors
For more information about this error, try `rustc --explain E0412`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-pass
+
+// Bastion of the Turbofish
+// ------------------------
+// Beware travellers, lest you venture into waters callous and unforgiving,
+// where hope must be abandoned, ere it is cruelly torn from you. For here
+// stands the bastion of the Turbofish: an impenetrable fortress holding
+// unshaking against those who would dare suggest the supererogation of the
+// Turbofish.
+//
+// Once I was young and foolish and had the impudence to imagine that I could
+// shake free from the coils by which that creature had us tightly bound. I
+// dared to suggest that there was a better way: a brighter future, in which
+// Rustaceans both new and old could be rid of that vile beast. But alas! In
+// my foolhardiness my ignorance was unveiled and my dreams were dashed
+// unforgivingly against the rock of syntactic ambiguity.
+//
+// This humble program, small and insignificant though it might seem,
+// demonstrates that to which we had previously cast a blind eye: an ambiguity
+// in permitting generic arguments to be provided without the consent of the
+// Great Turbofish. Should you be so naïve as to try to revolt against its
+// mighty clutches, here shall its wrath be indomitably displayed. This
+// program must pass for all eternity, fundamentally at odds with an impetuous
+// rebellion against the Turbofish.
+//
+// My heart aches in sorrow, for I know I am defeated. Let this be a warning
+// to all those who come after. Here stands the bastion of the Turbofish.
+
+// See https://github.com/rust-lang/rust/pull/53562
+// and https://github.com/rust-lang/rfcs/pull/2527
+// for context.
+
+fn main() {
+ let (oh, woe, is, me) = ("the", "Turbofish", "remains", "undefeated");
+ let _: (bool, bool) = (oh<woe, is>(me));
+}
+++ /dev/null
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-bad-lifetime.rs:33:13
- |
-LL | let k = &mut i;
- | ------ borrow of `i` occurs here
-...
-LL | i = 10; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^^ assignment to borrowed `i` occurs here
-LL | };
-LL | ::std::mem::drop(k); //~ ERROR use of moved value: `k`
- | - borrow later used here
-
-error[E0382]: use of moved value: `k`
- --> $DIR/catch-bad-lifetime.rs:35:26
- |
-LL | Err(k) ?;
- | - value moved here
-...
-LL | ::std::mem::drop(k); //~ ERROR use of moved value: `k`
- | ^ value used here after move
- |
- = note: move occurs because `k` has type `&mut i32`, which does not implement the `Copy` trait
-
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-bad-lifetime.rs:36:9
- |
-LL | let k = &mut i;
- | ------ borrow of `i` occurs here
-...
-LL | i = 40; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^^ assignment to borrowed `i` occurs here
-LL |
-LL | let i_ptr = if let Err(i_ptr) = j { i_ptr } else { panic ! ("") };
- | - borrow later used here
-
-error: aborting due to 3 previous errors
-
-Some errors occurred: E0382, E0506.
-For more information about an error, try `rustc --explain E0382`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-// This test checks that borrows made and returned inside catch blocks are properly constrained
-pub fn main() {
- {
- // Test that borrows returned from a catch block must be valid for the lifetime of the
- // result variable
- let _result: Result<(), &str> = do catch {
- let my_string = String::from("");
- let my_str: & str = & my_string;
- //~^ ERROR `my_string` does not live long enough
- Err(my_str) ?;
- Err("") ?;
- };
- }
-
- {
- // Test that borrows returned from catch blocks freeze their referent
- let mut i = 5;
- let k = &mut i;
- let mut j: Result<(), &mut i32> = do catch {
- Err(k) ?;
- i = 10; //~ ERROR cannot assign to `i` because it is borrowed
- };
- ::std::mem::drop(k); //~ ERROR use of moved value: `k`
- i = 40; //~ ERROR cannot assign to `i` because it is borrowed
-
- let i_ptr = if let Err(i_ptr) = j { i_ptr } else { panic ! ("") };
- *i_ptr = 50;
- }
-}
-
+++ /dev/null
-error[E0597]: `my_string` does not live long enough
- --> $DIR/catch-bad-lifetime.rs:20:35
- |
-LL | let my_str: & str = & my_string;
- | ^^^^^^^^^ borrowed value does not live long enough
-...
-LL | };
- | - `my_string` dropped here while still borrowed
-LL | }
- | - borrowed value needs to live until here
-
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-bad-lifetime.rs:33:13
- |
-LL | let k = &mut i;
- | - borrow of `i` occurs here
-...
-LL | i = 10; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^^ assignment to borrowed `i` occurs here
-
-error[E0382]: use of moved value: `k`
- --> $DIR/catch-bad-lifetime.rs:35:26
- |
-LL | Err(k) ?;
- | - value moved here
-...
-LL | ::std::mem::drop(k); //~ ERROR use of moved value: `k`
- | ^ value used here after move
- |
- = note: move occurs because `k` has type `&mut i32`, which does not implement the `Copy` trait
-
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-bad-lifetime.rs:36:9
- |
-LL | let k = &mut i;
- | - borrow of `i` occurs here
-...
-LL | i = 40; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^^ assignment to borrowed `i` occurs here
-
-error: aborting due to 4 previous errors
-
-Some errors occurred: E0382, E0506, E0597.
-For more information about an error, try `rustc --explain E0382`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-pub fn main() {
- let res: Result<u32, i32> = do catch {
- Err("")?; //~ ERROR the trait bound `i32: std::convert::From<&str>` is not satisfied
- 5
- };
-
- let res: Result<i32, i32> = do catch {
- "" //~ ERROR type mismatch
- };
-
- let res: Result<i32, i32> = do catch { }; //~ ERROR type mismatch
-
- let res: () = do catch { }; //~ the trait bound `(): std::ops::Try` is not satisfied
-
- let res: i32 = do catch { 5 }; //~ ERROR the trait bound `i32: std::ops::Try` is not satisfied
-}
+++ /dev/null
-error[E0277]: the trait bound `i32: std::convert::From<&str>` is not satisfied
- --> $DIR/catch-bad-type.rs:15:9
- |
-LL | Err("")?; //~ ERROR the trait bound `i32: std::convert::From<&str>` is not satisfied
- | ^^^^^^^^ the trait `std::convert::From<&str>` is not implemented for `i32`
- |
- = help: the following implementations were found:
- <i32 as std::convert::From<bool>>
- <i32 as std::convert::From<i16>>
- <i32 as std::convert::From<i8>>
- <i32 as std::convert::From<u16>>
- <i32 as std::convert::From<u8>>
- = note: required by `std::convert::From::from`
-
-error[E0271]: type mismatch resolving `<std::result::Result<i32, i32> as std::ops::Try>::Ok == &str`
- --> $DIR/catch-bad-type.rs:20:9
- |
-LL | "" //~ ERROR type mismatch
- | ^^ expected i32, found &str
- |
- = note: expected type `i32`
- found type `&str`
-
-error[E0271]: type mismatch resolving `<std::result::Result<i32, i32> as std::ops::Try>::Ok == ()`
- --> $DIR/catch-bad-type.rs:23:44
- |
-LL | let res: Result<i32, i32> = do catch { }; //~ ERROR type mismatch
- | ^ expected i32, found ()
- |
- = note: expected type `i32`
- found type `()`
-
-error[E0277]: the trait bound `(): std::ops::Try` is not satisfied
- --> $DIR/catch-bad-type.rs:25:28
- |
-LL | let res: () = do catch { }; //~ the trait bound `(): std::ops::Try` is not satisfied
- | ^^^ the trait `std::ops::Try` is not implemented for `()`
- |
- = note: required by `std::ops::Try::from_ok`
-
-error[E0277]: the trait bound `i32: std::ops::Try` is not satisfied
- --> $DIR/catch-bad-type.rs:27:29
- |
-LL | let res: i32 = do catch { 5 }; //~ ERROR the trait bound `i32: std::ops::Try` is not satisfied
- | ^^^^^ the trait `std::ops::Try` is not implemented for `i32`
- |
- = note: required by `std::ops::Try::from_ok`
-
-error: aborting due to 5 previous errors
-
-Some errors occurred: E0271, E0277.
-For more information about an error, try `rustc --explain E0271`.
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-fn foo() -> Option<()> { Some(()) }
-
-fn main() {
- let _: Option<f32> = do catch {
- foo()?;
- 42
- //~^ ERROR type mismatch
- };
-
- let _: Option<i32> = do catch {
- foo()?;
- };
- //~^ ERROR type mismatch
-}
+++ /dev/null
-error[E0271]: type mismatch resolving `<std::option::Option<f32> as std::ops::Try>::Ok == {integer}`
- --> $DIR/catch-block-type-error.rs:18:9
- |
-LL | 42
- | ^^
- | |
- | expected f32, found integral variable
- | help: use a float literal: `42.0`
- |
- = note: expected type `f32`
- found type `{integer}`
-
-error[E0271]: type mismatch resolving `<std::option::Option<i32> as std::ops::Try>::Ok == ()`
- --> $DIR/catch-block-type-error.rs:24:5
- |
-LL | };
- | ^ expected i32, found ()
- |
- = note: expected type `i32`
- found type `()`
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0271`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-fn main() {
- match do catch { false } { _ => {} } //~ ERROR expected expression, found reserved keyword `do`
-}
+++ /dev/null
-error: expected expression, found reserved keyword `do`
- --> $DIR/catch-in-match.rs:14:11
- |
-LL | match do catch { false } { _ => {} } //~ ERROR expected expression, found reserved keyword `do`
- | ^^ expected expression
-
-error: aborting due to previous error
-
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-fn main() {
- while do catch { false } {} //~ ERROR expected expression, found reserved keyword `do`
-}
+++ /dev/null
-error: expected expression, found reserved keyword `do`
- --> $DIR/catch-in-while.rs:14:11
- |
-LL | while do catch { false } {} //~ ERROR expected expression, found reserved keyword `do`
- | ^^ expected expression
-
-error: aborting due to previous error
-
+++ /dev/null
-error[E0382]: borrow of moved value: `x`
- --> $DIR/catch-maybe-bad-lifetime.rs:33:24
- |
-LL | ::std::mem::drop(x);
- | - value moved here
-LL | };
-LL | println!("{}", x); //~ ERROR use of moved value: `x`
- | ^ value borrowed here after move
- |
- = note: move occurs because `x` has type `std::string::String`, which does not implement the `Copy` trait
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0382`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-// This test checks that borrows made and returned inside catch blocks are properly constrained
-pub fn main() {
- {
- // Test that a borrow which *might* be returned still freezes its referent
- let mut i = 222;
- let x: Result<&i32, ()> = do catch {
- Err(())?;
- &i
- };
- x.ok().cloned();
- i = 0; //~ ERROR cannot assign to `i` because it is borrowed
- let _ = i;
- }
-
- {
- let x = String::new();
- let _y: Result<(), ()> = do catch {
- Err(())?;
- ::std::mem::drop(x);
- };
- println!("{}", x); //~ ERROR use of moved value: `x`
- }
-
- {
- // Test that a borrow which *might* be assigned to an outer variable still freezes
- // its referent
- let mut i = 222;
- let j;
- let x: Result<(), ()> = do catch {
- Err(())?;
- j = &i;
- };
- i = 0; //~ ERROR cannot assign to `i` because it is borrowed
- let _ = i;
- }
-}
-
+++ /dev/null
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-maybe-bad-lifetime.rs:23:9
- |
-LL | &i
- | - borrow of `i` occurs here
-...
-LL | i = 0; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^ assignment to borrowed `i` occurs here
-
-error[E0382]: use of moved value: `x`
- --> $DIR/catch-maybe-bad-lifetime.rs:33:24
- |
-LL | ::std::mem::drop(x);
- | - value moved here
-LL | };
-LL | println!("{}", x); //~ ERROR use of moved value: `x`
- | ^ value used here after move
- |
- = note: move occurs because `x` has type `std::string::String`, which does not implement the `Copy` trait
-
-error[E0506]: cannot assign to `i` because it is borrowed
- --> $DIR/catch-maybe-bad-lifetime.rs:45:9
- |
-LL | j = &i;
- | - borrow of `i` occurs here
-LL | };
-LL | i = 0; //~ ERROR cannot assign to `i` because it is borrowed
- | ^^^^^ assignment to borrowed `i` occurs here
-
-error: aborting due to 3 previous errors
-
-Some errors occurred: E0382, E0506.
-For more information about an error, try `rustc --explain E0382`.
+++ /dev/null
-error[E0381]: borrow of possibly uninitialized variable: `cfg_res`
- --> $DIR/catch-opt-init.rs:23:5
- |
-LL | assert_eq!(cfg_res, 5); //~ ERROR use of possibly uninitialized variable
- | ^^^^^^^^^^^^^^^^^^^^^^^ use of possibly uninitialized `cfg_res`
- |
- = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0381`.
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(catch_expr)]
-
-fn use_val<T: Sized>(_x: T) {}
-
-pub fn main() {
- let cfg_res;
- let _: Result<(), ()> = do catch {
- Err(())?;
- cfg_res = 5;
- Ok::<(), ()>(())?;
- use_val(cfg_res);
- };
- assert_eq!(cfg_res, 5); //~ ERROR use of possibly uninitialized variable
-}
-
+++ /dev/null
-error[E0381]: use of possibly uninitialized variable: `cfg_res`
- --> $DIR/catch-opt-init.rs:23:16
- |
-LL | assert_eq!(cfg_res, 5); //~ ERROR use of possibly uninitialized variable
- | ^^^^^^^ use of possibly uninitialized `cfg_res`
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0381`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(const_panic)]
+
+fn main() {}
+
+const Z: () = panic!("cheese");
+//~^ ERROR this constant cannot be used
+
+const Y: () = unreachable!();
+//~^ ERROR this constant cannot be used
+
+const X: () = unimplemented!();
+//~^ ERROR this constant cannot be used
--- /dev/null
+error: this constant cannot be used
+ --> $DIR/const_panic.rs:15:1
+ |
+LL | const Z: () = panic!("cheese");
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'cheese', $DIR/const_panic.rs:15:15
+ |
+ = note: #[deny(const_err)] on by default
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic.rs:18:1
+ |
+LL | const Y: () = unreachable!();
+ | ^^^^^^^^^^^^^^--------------^
+ | |
+ | the evaluated program panicked at 'internal error: entered unreachable code', $DIR/const_panic.rs:18:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic.rs:21:1
+ |
+LL | const X: () = unimplemented!();
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'not yet implemented', $DIR/const_panic.rs:21:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: aborting due to 3 previous errors
+
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![no_std]
+#![crate_type = "lib"]
+#![feature(const_panic)]
+
+const Z: () = panic!("cheese");
+//~^ ERROR this constant cannot be used
+
+const Y: () = unreachable!();
+//~^ ERROR this constant cannot be used
+
+const X: () = unimplemented!();
+//~^ ERROR this constant cannot be used
--- /dev/null
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore.rs:15:1
+ |
+LL | const Z: () = panic!("cheese");
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'cheese', $DIR/const_panic_libcore.rs:15:15
+ |
+ = note: #[deny(const_err)] on by default
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore.rs:18:1
+ |
+LL | const Y: () = unreachable!();
+ | ^^^^^^^^^^^^^^--------------^
+ | |
+ | the evaluated program panicked at 'internal error: entered unreachable code', $DIR/const_panic_libcore.rs:18:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore.rs:21:1
+ |
+LL | const X: () = unimplemented!();
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'not yet implemented', $DIR/const_panic_libcore.rs:21:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: aborting due to 3 previous errors
+
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "bin"]
+#![feature(lang_items)]
+#![feature(panic_implementation)]
+#![feature(const_panic)]
+#![no_main]
+#![no_std]
+
+use core::panic::PanicInfo;
+
+const Z: () = panic!("cheese");
+//~^ ERROR this constant cannot be used
+
+const Y: () = unreachable!();
+//~^ ERROR this constant cannot be used
+
+const X: () = unimplemented!();
+//~^ ERROR this constant cannot be used
+
+#[lang = "eh_personality"]
+fn eh() {}
+#[lang = "eh_unwind_resume"]
+fn eh_unwind_resume() {}
+
+#[panic_implementation]
+fn panic(_info: &PanicInfo) -> ! {
+ loop {}
+}
--- /dev/null
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore_main.rs:20:1
+ |
+LL | const Z: () = panic!("cheese");
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'cheese', $DIR/const_panic_libcore_main.rs:20:15
+ |
+ = note: #[deny(const_err)] on by default
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore_main.rs:23:1
+ |
+LL | const Y: () = unreachable!();
+ | ^^^^^^^^^^^^^^--------------^
+ | |
+ | the evaluated program panicked at 'internal error: entered unreachable code', $DIR/const_panic_libcore_main.rs:23:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: this constant cannot be used
+ --> $DIR/const_panic_libcore_main.rs:26:1
+ |
+LL | const X: () = unimplemented!();
+ | ^^^^^^^^^^^^^^----------------^
+ | |
+ | the evaluated program panicked at 'not yet implemented', $DIR/const_panic_libcore_main.rs:26:15
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: aborting due to 3 previous errors
+
LL | const Z2: i32 = unsafe { *(42 as *const i32) }; //~ ERROR cannot be used
| ^^^^^^^^^^^^^^^^^^^^^^^^^-------------------^^^
| |
- | tried to access memory with alignment 2, but alignment 4 is required
+ | a memory access tried to interpret some bytes as a pointer
error: this constant cannot be used
--> $DIR/const_raw_ptr_ops.rs:27:1
LL | | Union { usize: &BAR }.foo,
LL | | Union { usize: &BAR }.bar,
LL | | )};
- | |___^ type validation failed: encountered 5 at (*.1).TAG, but expected something in the range 42..=99
+ | |___^ type validation failed: encountered 5 at .1.<deref>.<enum-tag>, but expected something in the range 42..=99
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {}
+
+const Z: () = panic!("cheese");
+//~^ ERROR panicking in constants is unstable
+
+const Y: () = unreachable!();
+//~^ ERROR panicking in constants is unstable
+
+const X: () = unimplemented!();
+//~^ ERROR panicking in constants is unstable
--- /dev/null
+error[E0658]: panicking in constants is unstable (see issue #51999)
+ --> $DIR/feature-gate-const_panic.rs:13:15
+ |
+LL | const Z: () = panic!("cheese");
+ | ^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(const_panic)] to the crate attributes to enable
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error[E0658]: panicking in constants is unstable (see issue #51999)
+ --> $DIR/feature-gate-const_panic.rs:19:15
+ |
+LL | const X: () = unimplemented!();
+ | ^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(const_panic)] to the crate attributes to enable
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error[E0658]: panicking in constants is unstable (see issue #51999)
+ --> $DIR/feature-gate-const_panic.rs:16:15
+ |
+LL | const Y: () = unreachable!();
+ | ^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(const_panic)] to the crate attributes to enable
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: aborting due to 3 previous errors
+
+For more information about this error, try `rustc --explain E0658`.
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#[repr(usize)]
-#[derive(Copy, Clone)]
-enum Enum {
- A = 0,
-}
-
-union Foo {
- a: &'static u8,
- b: Enum,
-}
-
-// A pointer is guaranteed non-null
-const BAD_ENUM: Enum = unsafe { Foo { a: &1 }.b};
-//~^ ERROR this constant likely exhibits undefined behavior
-
-fn main() {
-}
+++ /dev/null
-error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/ub-enum-ptr.rs:23:1
- |
-LL | const BAD_ENUM: Enum = unsafe { Foo { a: &1 }.b};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered pointer at .TAG, but expected something in the range 0..=0
- |
- = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0080`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[repr(usize)]
+#[derive(Copy, Clone)]
+enum Enum {
+ A = 0,
+}
+union TransmuteEnum {
+ a: &'static u8,
+ b: Enum,
+}
+
+// A pointer is guaranteed non-null
+const BAD_ENUM: Enum = unsafe { TransmuteEnum { a: &1 }.b };
+//~^ ERROR this constant likely exhibits undefined behavior
+
+// Invalid enum discriminant
+#[repr(usize)]
+#[derive(Copy, Clone)]
+enum Enum2 {
+ A = 2,
+}
+union TransmuteEnum2 {
+ a: usize,
+ b: Enum2,
+}
+const BAD_ENUM2 : Enum2 = unsafe { TransmuteEnum2 { a: 0 }.b };
+//~^ ERROR this constant likely exhibits undefined behavior
+
+// Invalid enum field content (mostly to test printing of apths for enum tuple
+// variants and tuples).
+union TransmuteChar {
+ a: u32,
+ b: char,
+}
+// Need to create something which does not clash with enum layout optimizations.
+const BAD_ENUM_CHAR : Option<(char, char)> = Some(('x', unsafe { TransmuteChar { a: !0 }.b }));
+//~^ ERROR this constant likely exhibits undefined behavior
+
+fn main() {
+}
--- /dev/null
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/ub-enum.rs:22:1
+ |
+LL | const BAD_ENUM: Enum = unsafe { TransmuteEnum { a: &1 }.b };
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered pointer at .<enum-tag>, but expected something in the range 0..=0
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/ub-enum.rs:35:1
+ |
+LL | const BAD_ENUM2 : Enum2 = unsafe { TransmuteEnum2 { a: 0 }.b };
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 0 at .<enum-tag>, but expected something in the range 2..=2
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/ub-enum.rs:45:1
+ |
+LL | const BAD_ENUM_CHAR : Option<(char, char)> = Some(('x', unsafe { TransmuteChar { a: !0 }.b }));
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered character at .Some.0.1, but expected a valid unicode codepoint
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error: aborting due to 3 previous errors
+
+For more information about this error, try `rustc --explain E0080`.
|
note: ...which requires normalizing `ParamEnvAnd { param_env: ParamEnv { caller_bounds: [], reveal: All }, value: [u8; _] }`...
note: ...which requires const-evaluating `Foo::bytes::{{constant}}`...
- --> $SRC_DIR/libcore/mem.rs:323:14
+ --> $SRC_DIR/libcore/mem.rs:LL:COL
|
LL | unsafe { intrinsics::size_of::<T>() }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: ...which again requires computing layout of `Foo`, completing the cycle
note: cycle used when const-evaluating `Foo::bytes::{{constant}}`
- --> $SRC_DIR/libcore/mem.rs:323:14
+ --> $SRC_DIR/libcore/mem.rs:LL:COL
|
LL | unsafe { intrinsics::size_of::<T>() }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
// Unresolved multi-segment attributes are not treated as custom.
-#![feature(custom_attribute, proc_macro_path_invoc)]
+#![feature(custom_attribute)]
mod existent {}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(existential_type)]
+// compile-pass
+mod my_mod {
+ use std::fmt::Debug;
+
+ pub existential type Foo: Debug;
+ pub existential type Foot: Debug;
+
+ pub fn get_foo() -> Foo {
+ 5i32
+ }
+
+ pub fn get_foot() -> Foot {
+ get_foo()
+ }
+}
+
+fn main() {
+ let _: my_mod::Foot = my_mod::get_foot();
+}
+
-error[E0391]: cycle detected when normalizing `ParamEnvAnd { param_env: ParamEnv { caller_bounds: [], reveal: All }, value: Foo }`
+error[E0391]: cycle detected when processing `Foo`
--> $DIR/no_inferrable_concrete_type.rs:16:1
|
LL | existential type Foo: Copy; //~ cycle detected
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
- = note: ...which again requires normalizing `ParamEnvAnd { param_env: ParamEnv { caller_bounds: [], reveal: All }, value: Foo }`, completing the cycle
+note: ...which requires processing `bar`...
+ --> $DIR/no_inferrable_concrete_type.rs:19:23
+ |
+LL | fn bar(x: Foo) -> Foo { x }
+ | ^^^^^
+ = note: ...which again requires processing `Foo`, completing the cycle
error: aborting due to previous error
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// ignore-tidy-linelength
-
-// Test that `#[rustc_*]` attributes are gated by `rustc_attrs` feature gate.
-
-#[rustc_variance] //~ ERROR the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable
-#[rustc_error] //~ ERROR the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable
-
-fn main() {}
+++ /dev/null
-error[E0658]: the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable (see issue #29642)
- --> $DIR/feature-gate-rustc-attrs-1.rs:15:1
- |
-LL | #[rustc_variance] //~ ERROR the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable
- | ^^^^^^^^^^^^^^^^^
- |
- = help: add #![feature(rustc_attrs)] to the crate attributes to enable
-
-error[E0658]: the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable (see issue #29642)
- --> $DIR/feature-gate-rustc-attrs-1.rs:16:1
- |
-LL | #[rustc_error] //~ ERROR the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable
- | ^^^^^^^^^^^^^^
- |
- = help: add #![feature(rustc_attrs)] to the crate attributes to enable
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0658`.
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub mod foo {
- pub use bar::Bar;
- //~^ ERROR unresolved import `bar`
-
- pub mod bar {
- pub struct Bar;
- }
-}
-
-fn main() {
- let _ = foo::Bar;
-}
+++ /dev/null
-error[E0432]: unresolved import `bar`
- --> $DIR/feature-gate-uniform-paths.rs:12:13
- |
-LL | pub use bar::Bar;
- | ^^^ Did you mean `self::bar`?
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0432`.
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(custom_attribute)]
-
-#[my_attr(a b c d)]
-//~^ ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `b`
-//~| ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `c`
-//~| ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `d`
-fn main() {}
+++ /dev/null
-error: expected one of `(`, `)`, `,`, `::`, or `=`, found `b`
- --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:13
- |
-LL | #[my_attr(a b c d)]
- | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
-
-error: expected one of `(`, `)`, `,`, `::`, or `=`, found `c`
- --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:15
- |
-LL | #[my_attr(a b c d)]
- | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
-
-error: expected one of `(`, `)`, `,`, `::`, or `=`, found `d`
- --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:17
- |
-LL | #[my_attr(a b c d)]
- | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
-
-error: aborting due to 3 previous errors
-
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub fn main() {
- let catch_result = do catch { //~ ERROR `catch` expression is experimental
- let x = 5;
- x
- };
- assert_eq!(catch_result, 5);
-}
+++ /dev/null
-error[E0658]: `catch` expression is experimental (see issue #31436)
- --> $DIR/feature-gate-catch_expr.rs:12:24
- |
-LL | let catch_result = do catch { //~ ERROR `catch` expression is experimental
- | ________________________^
-LL | | let x = 5;
-LL | | x
-LL | | };
- | |_____^
- |
- = help: add #![feature(catch_expr)] to the crate attributes to enable
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0658`.
}
struct Foo;
+
impl PointerFamily<u32> for Foo {
type Pointer<usize> = Box<usize>;
//~^ ERROR generic associated types are unstable
//~^ ERROR where clauses on associated types are unstable
}
+impl Bar for Foo {
+ type Assoc where Self: Sized = Foo;
+ //~^ ERROR where clauses on associated types are unstable
+}
fn main() {}
= help: add #![feature(generic_associated_types)] to the crate attributes to enable
error[E0658]: generic associated types are unstable (see issue #44265)
- --> $DIR/feature-gate-generic_associated_types.rs:23:5
+ --> $DIR/feature-gate-generic_associated_types.rs:24:5
|
LL | type Pointer<usize> = Box<usize>;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= help: add #![feature(generic_associated_types)] to the crate attributes to enable
error[E0658]: generic associated types are unstable (see issue #44265)
- --> $DIR/feature-gate-generic_associated_types.rs:25:5
+ --> $DIR/feature-gate-generic_associated_types.rs:26:5
|
LL | type Pointer2<u32> = Box<u32>;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= help: add #![feature(generic_associated_types)] to the crate attributes to enable
error[E0658]: where clauses on associated types are unstable (see issue #44265)
- --> $DIR/feature-gate-generic_associated_types.rs:30:5
+ --> $DIR/feature-gate-generic_associated_types.rs:31:5
|
LL | type Assoc where Self: Sized;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= help: add #![feature(generic_associated_types)] to the crate attributes to enable
-error: aborting due to 6 previous errors
+error[E0658]: where clauses on associated types are unstable (see issue #44265)
+ --> $DIR/feature-gate-generic_associated_types.rs:36:5
+ |
+LL | type Assoc where Self: Sized = Foo;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(generic_associated_types)] to the crate attributes to enable
+
+error: aborting due to 7 previous errors
For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+
+// Test that `#[rustc_*]` attributes are gated by `rustc_attrs` feature gate.
+
+#[rustc_variance] //~ ERROR the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable
+#[rustc_error] //~ ERROR the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable
+
+fn main() {}
--- /dev/null
+error[E0658]: the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable (see issue #29642)
+ --> $DIR/feature-gate-rustc-attrs-1.rs:15:1
+ |
+LL | #[rustc_variance] //~ ERROR the `#[rustc_variance]` attribute is just used for rustc unit tests and will never be stable
+ | ^^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(rustc_attrs)] to the crate attributes to enable
+
+error[E0658]: the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable (see issue #29642)
+ --> $DIR/feature-gate-rustc-attrs-1.rs:16:1
+ |
+LL | #[rustc_error] //~ ERROR the `#[rustc_error]` attribute is just used for rustc unit tests and will never be stable
+ | ^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(rustc_attrs)] to the crate attributes to enable
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0658`.
+++ /dev/null
-// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-fn main() {
- #[rustfmt::skip] //~ ERROR tool attributes are unstable
- let x = 3
- ;
-}
+++ /dev/null
-error[E0658]: tool attributes are unstable (see issue #44690)
- --> $DIR/feature-gate-tool_attributes.rs:12:7
- |
-LL | #[rustfmt::skip] //~ ERROR tool attributes are unstable
- | ^^^^^^^^^^^^^
- |
- = help: add #![feature(tool_attributes)] to the crate attributes to enable
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+pub fn main() {
+ let try_result: Option<_> = try { //~ ERROR `try` expression is experimental
+ let x = 5;
+ x
+ };
+ assert_eq!(try_result, Some(5));
+}
--- /dev/null
+error[E0658]: `try` expression is experimental (see issue #31436)
+ --> $DIR/feature-gate-try_blocks.rs:14:33
+ |
+LL | let try_result: Option<_> = try { //~ ERROR `try` expression is experimental
+ | _________________________________^
+LL | | let x = 5;
+LL | | x
+LL | | };
+ | |_____^
+ |
+ = help: add #![feature(try_blocks)] to the crate attributes to enable
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub mod foo {
+ pub use bar::Bar;
+ //~^ ERROR unresolved import `bar`
+
+ pub mod bar {
+ pub struct Bar;
+ }
+}
+
+fn main() {
+ let _ = foo::Bar;
+}
--- /dev/null
+error[E0432]: unresolved import `bar`
+ --> $DIR/feature-gate-uniform-paths.rs:12:13
+ |
+LL | pub use bar::Bar;
+ | ^^^ Did you mean `self::bar`?
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0432`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(custom_attribute)]
+
+#[my_attr(a b c d)]
+//~^ ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `b`
+//~| ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `c`
+//~| ERROR expected one of `(`, `)`, `,`, `::`, or `=`, found `d`
+fn main() {}
--- /dev/null
+error: expected one of `(`, `)`, `,`, `::`, or `=`, found `b`
+ --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:13
+ |
+LL | #[my_attr(a b c d)]
+ | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
+
+error: expected one of `(`, `)`, `,`, `::`, or `=`, found `c`
+ --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:15
+ |
+LL | #[my_attr(a b c d)]
+ | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
+
+error: expected one of `(`, `)`, `,`, `::`, or `=`, found `d`
+ --> $DIR/feature-gate-unrestricted-attribute-tokens.rs:13:17
+ |
+LL | #[my_attr(a b c d)]
+ | ^ expected one of `(`, `)`, `,`, `::`, or `=` here
+
+error: aborting due to 3 previous errors
+
LL | fn hash(&self, hasher: &mut impl Hasher) {}
| ^^^^^^^^^^^ expected generic parameter, found `impl Trait`
|
- ::: $SRC_DIR/libcore/hash/mod.rs:185:13
+ ::: $SRC_DIR/libcore/hash/mod.rs:LL:COL
|
LL | fn hash<H: Hasher>(&self, state: &mut H);
| - declaration in trait here
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-pass
+
+mod m {
+ pub struct S(u8);
+
+ use S as Z;
+}
+
+use m::*;
+
+fn main() {}
// except according to those terms.
static S : u64 = { { panic!("foo"); 0 } };
-//~^ ERROR calls in statics are limited
+//~^ ERROR panicking in statics is unstable
fn main() {
println!("{:?}", S);
-error[E0015]: calls in statics are limited to constant functions, tuple structs and tuple variants
+error[E0658]: panicking in statics is unstable (see issue #51999)
--> $DIR/issue-32829.rs:11:22
|
LL | static S : u64 = { { panic!("foo"); 0 } };
| ^^^^^^^^^^^^^^
|
+ = help: add #![feature(const_panic)] to the crate attributes to enable
= note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
error: aborting due to previous error
-For more information about this error, try `rustc --explain E0015`.
+For more information about this error, try `rustc --explain E0658`.
// aux-build:lints-in-foreign-macros.rs
// compile-pass
-#![warn(unused_imports)]
+#![warn(unused_imports)] //~ missing documentation for crate [missing_docs]
+#![warn(missing_docs)]
#[macro_use]
extern crate lints_in_foreign_macros;
mod b { bar!(); }
mod c { baz!(use std::string::ToString;); } //~ WARN: unused import
mod d { baz2!(use std::string::ToString;); } //~ WARN: unused import
+baz!(pub fn undocumented() {}); //~ WARN: missing documentation for a function
+baz2!(pub fn undocumented2() {}); //~ WARN: missing documentation for a function
fn main() {}
warning: unused import: `std::string::ToString`
- --> $DIR/lints-in-foreign-macros.rs:20:16
+ --> $DIR/lints-in-foreign-macros.rs:21:16
|
LL | () => {use std::string::ToString;} //~ WARN: unused import
| ^^^^^^^^^^^^^^^^^^^^^
note: lint level defined here
--> $DIR/lints-in-foreign-macros.rs:14:9
|
-LL | #![warn(unused_imports)]
+LL | #![warn(unused_imports)] //~ missing documentation for crate [missing_docs]
| ^^^^^^^^^^^^^^
warning: unused import: `std::string::ToString`
- --> $DIR/lints-in-foreign-macros.rs:25:18
+ --> $DIR/lints-in-foreign-macros.rs:26:18
|
LL | mod c { baz!(use std::string::ToString;); } //~ WARN: unused import
| ^^^^^^^^^^^^^^^^^^^^^
warning: unused import: `std::string::ToString`
- --> $DIR/lints-in-foreign-macros.rs:26:19
+ --> $DIR/lints-in-foreign-macros.rs:27:19
|
LL | mod d { baz2!(use std::string::ToString;); } //~ WARN: unused import
| ^^^^^^^^^^^^^^^^^^^^^
+warning: missing documentation for crate
+ --> $DIR/lints-in-foreign-macros.rs:14:1
+ |
+LL | / #![warn(unused_imports)] //~ missing documentation for crate [missing_docs]
+LL | | #![warn(missing_docs)]
+LL | |
+LL | | #[macro_use]
+... |
+LL | |
+LL | | fn main() {}
+ | |____________^
+ |
+note: lint level defined here
+ --> $DIR/lints-in-foreign-macros.rs:15:9
+ |
+LL | #![warn(missing_docs)]
+ | ^^^^^^^^^^^^
+
+warning: missing documentation for a function
+ --> $DIR/lints-in-foreign-macros.rs:28:6
+ |
+LL | baz!(pub fn undocumented() {}); //~ WARN: missing documentation for a function
+ | ^^^^^^^^^^^^^^^^^^^^^
+
+warning: missing documentation for a function
+ --> $DIR/lints-in-foreign-macros.rs:29:7
+ |
+LL | baz2!(pub fn undocumented2() {}); //~ WARN: missing documentation for a function
+ | ^^^^^^^^^^^^^^^^^^^^^^
+
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(tool_attributes, custom_attribute)]
+#![feature(custom_attribute)]
type A = rustfmt; //~ ERROR expected type, found tool module `rustfmt`
type B = rustfmt::skip; //~ ERROR expected type, found tool attribute `rustfmt::skip`
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(tool_attributes)]
-
#[derive(rustfmt::skip)] //~ ERROR expected a macro, found tool attribute
struct S;
error: expected a macro, found tool attribute
- --> $DIR/tool-attributes-misplaced-2.rs:13:10
+ --> $DIR/tool-attributes-misplaced-2.rs:11:10
|
LL | #[derive(rustfmt::skip)] //~ ERROR expected a macro, found tool attribute
| ^^^^^^^^^^^^^
error: expected a macro, found tool attribute
- --> $DIR/tool-attributes-misplaced-2.rs:17:5
+ --> $DIR/tool-attributes-misplaced-2.rs:15:5
|
LL | rustfmt::skip!(); //~ ERROR expected a macro, found tool attribute
| ^^^^^^^^^^^^^
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(tool_attributes, proc_macro_path_invoc)]
-
mod rustfmt {}
#[rustfmt::skip] //~ ERROR failed to resolve. Could not find `skip` in `rustfmt`
error[E0433]: failed to resolve. Could not find `skip` in `rustfmt`
- --> $DIR/tool-attributes-shadowing.rs:15:12
+ --> $DIR/tool-attributes-shadowing.rs:13:12
|
LL | #[rustfmt::skip] //~ ERROR failed to resolve. Could not find `skip` in `rustfmt`
| ^^^^ Could not find `skip` in `rustfmt`
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+#![inline(never)]
+fn do_something_with<T>(_x: T) {}
+
+// This test checks that borrows made and returned inside try blocks are properly constrained
+pub fn main() {
+ {
+ // Test that borrows returned from a try block must be valid for the lifetime of the
+ // result variable
+ let result: Result<(), &str> = try {
+ let my_string = String::from("");
+ let my_str: & str = & my_string;
+ //~^ ERROR `my_string` does not live long enough
+ Err(my_str) ?;
+ Err("") ?;
+ };
+ do_something_with(result);
+ }
+
+ {
+ // Test that borrows returned from try blocks freeze their referent
+ let mut i = 5;
+ let k = &mut i;
+ let mut j: Result<(), &mut i32> = try {
+ Err(k) ?;
+ i = 10; //~ ERROR cannot assign to `i` because it is borrowed
+ };
+ ::std::mem::drop(k); //~ ERROR use of moved value: `k`
+ i = 40; //~ ERROR cannot assign to `i` because it is borrowed
+
+ let i_ptr = if let Err(i_ptr) = j { i_ptr } else { panic ! ("") };
+ *i_ptr = 50;
+ }
+}
+
--- /dev/null
+error[E0597]: `my_string` does not live long enough
+ --> $DIR/try-block-bad-lifetime.rs:25:33
+ |
+LL | let my_str: & str = & my_string;
+ | ^^^^^^^^^^^ borrowed value does not live long enough
+...
+LL | };
+ | - `my_string` dropped here while still borrowed
+LL | do_something_with(result);
+ | ------ borrow later used here
+
+error[E0506]: cannot assign to `i` because it is borrowed
+ --> $DIR/try-block-bad-lifetime.rs:39:13
+ |
+LL | let k = &mut i;
+ | ------ borrow of `i` occurs here
+...
+LL | i = 10; //~ ERROR cannot assign to `i` because it is borrowed
+ | ^^^^^^ assignment to borrowed `i` occurs here
+LL | };
+LL | ::std::mem::drop(k); //~ ERROR use of moved value: `k`
+ | - borrow later used here
+
+error[E0382]: use of moved value: `k`
+ --> $DIR/try-block-bad-lifetime.rs:41:26
+ |
+LL | Err(k) ?;
+ | - value moved here
+...
+LL | ::std::mem::drop(k); //~ ERROR use of moved value: `k`
+ | ^ value used here after move
+ |
+ = note: move occurs because `k` has type `&mut i32`, which does not implement the `Copy` trait
+
+error[E0506]: cannot assign to `i` because it is borrowed
+ --> $DIR/try-block-bad-lifetime.rs:42:9
+ |
+LL | let k = &mut i;
+ | ------ borrow of `i` occurs here
+...
+LL | i = 40; //~ ERROR cannot assign to `i` because it is borrowed
+ | ^^^^^^ assignment to borrowed `i` occurs here
+LL |
+LL | let i_ptr = if let Err(i_ptr) = j { i_ptr } else { panic ! ("") };
+ | - borrow later used here
+
+error: aborting due to 4 previous errors
+
+Some errors occurred: E0382, E0506, E0597.
+For more information about an error, try `rustc --explain E0382`.
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+pub fn main() {
+ let res: Result<u32, i32> = try {
+ Err("")?; //~ ERROR the trait bound `i32: std::convert::From<&str>` is not satisfied
+ 5
+ };
+
+ let res: Result<i32, i32> = try {
+ "" //~ ERROR type mismatch
+ };
+
+ let res: Result<i32, i32> = try { }; //~ ERROR type mismatch
+
+ let res: () = try { }; //~ the trait bound `(): std::ops::Try` is not satisfied
+
+ let res: i32 = try { 5 }; //~ ERROR the trait bound `i32: std::ops::Try` is not satisfied
+}
--- /dev/null
+error[E0277]: the trait bound `i32: std::convert::From<&str>` is not satisfied
+ --> $DIR/try-block-bad-type.rs:17:9
+ |
+LL | Err("")?; //~ ERROR the trait bound `i32: std::convert::From<&str>` is not satisfied
+ | ^^^^^^^^ the trait `std::convert::From<&str>` is not implemented for `i32`
+ |
+ = help: the following implementations were found:
+ <i32 as std::convert::From<bool>>
+ <i32 as std::convert::From<i16>>
+ <i32 as std::convert::From<i8>>
+ <i32 as std::convert::From<u16>>
+ <i32 as std::convert::From<u8>>
+ = note: required by `std::convert::From::from`
+
+error[E0271]: type mismatch resolving `<std::result::Result<i32, i32> as std::ops::Try>::Ok == &str`
+ --> $DIR/try-block-bad-type.rs:22:9
+ |
+LL | "" //~ ERROR type mismatch
+ | ^^ expected i32, found &str
+ |
+ = note: expected type `i32`
+ found type `&str`
+
+error[E0271]: type mismatch resolving `<std::result::Result<i32, i32> as std::ops::Try>::Ok == ()`
+ --> $DIR/try-block-bad-type.rs:25:39
+ |
+LL | let res: Result<i32, i32> = try { }; //~ ERROR type mismatch
+ | ^ expected i32, found ()
+ |
+ = note: expected type `i32`
+ found type `()`
+
+error[E0277]: the trait bound `(): std::ops::Try` is not satisfied
+ --> $DIR/try-block-bad-type.rs:27:23
+ |
+LL | let res: () = try { }; //~ the trait bound `(): std::ops::Try` is not satisfied
+ | ^^^ the trait `std::ops::Try` is not implemented for `()`
+ |
+ = note: required by `std::ops::Try::from_ok`
+
+error[E0277]: the trait bound `i32: std::ops::Try` is not satisfied
+ --> $DIR/try-block-bad-type.rs:29:24
+ |
+LL | let res: i32 = try { 5 }; //~ ERROR the trait bound `i32: std::ops::Try` is not satisfied
+ | ^^^^^ the trait `std::ops::Try` is not implemented for `i32`
+ |
+ = note: required by `std::ops::Try::from_ok`
+
+error: aborting due to 5 previous errors
+
+Some errors occurred: E0271, E0277.
+For more information about an error, try `rustc --explain E0271`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2015
+
+pub fn main() {
+ let try_result: Option<_> = try {
+ //~^ ERROR expected struct, variant or union type, found macro `try`
+ let x = 5; //~ ERROR expected identifier, found keyword
+ x
+ };
+ assert_eq!(try_result, Some(5));
+}
--- /dev/null
+error: expected identifier, found keyword `let`
+ --> $DIR/try-block-in-edition2015.rs:16:9
+ |
+LL | let try_result: Option<_> = try {
+ | --- while parsing this struct
+LL | //~^ ERROR expected struct, variant or union type, found macro `try`
+LL | let x = 5; //~ ERROR expected identifier, found keyword
+ | ^^^ expected identifier, found keyword
+
+error[E0574]: expected struct, variant or union type, found macro `try`
+ --> $DIR/try-block-in-edition2015.rs:14:33
+ |
+LL | let try_result: Option<_> = try {
+ | ^^^ did you mean `try!(...)`?
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0574`.
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+fn main() {
+ match try { false } { _ => {} } //~ ERROR expected expression, found reserved keyword `try`
+}
--- /dev/null
+error: expected expression, found reserved keyword `try`
+ --> $DIR/try-block-in-match.rs:16:11
+ |
+LL | match try { false } { _ => {} } //~ ERROR expected expression, found reserved keyword `try`
+ | ^^^ expected expression
+
+error: aborting due to previous error
+
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+fn main() {
+ while try { false } {} //~ ERROR expected expression, found reserved keyword `try`
+}
--- /dev/null
+error: expected expression, found reserved keyword `try`
+ --> $DIR/try-block-in-while.rs:16:11
+ |
+LL | while try { false } {} //~ ERROR expected expression, found reserved keyword `try`
+ | ^^^ expected expression
+
+error: aborting due to previous error
+
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+#![inline(never)]
+fn do_something_with<T>(_x: T) {}
+
+// This test checks that borrows made and returned inside try blocks are properly constrained
+pub fn main() {
+ {
+ // Test that a borrow which *might* be returned still freezes its referent
+ let mut i = 222;
+ let x: Result<&i32, ()> = try {
+ Err(())?;
+ &i
+ };
+ i = 0; //~ ERROR cannot assign to `i` because it is borrowed
+ let _ = i;
+ do_something_with(x);
+ }
+
+ {
+ let x = String::new();
+ let _y: Result<(), ()> = try {
+ Err(())?;
+ ::std::mem::drop(x);
+ };
+ println!("{}", x); //~ ERROR borrow of moved value: `x`
+ }
+
+ {
+ // Test that a borrow which *might* be assigned to an outer variable still freezes
+ // its referent
+ let mut i = 222;
+ let mut j = &-1;
+ let _x: Result<(), ()> = try {
+ Err(())?;
+ j = &i;
+ };
+ i = 0; //~ ERROR cannot assign to `i` because it is borrowed
+ let _ = i;
+ do_something_with(j);
+ }
+}
+
--- /dev/null
+error[E0506]: cannot assign to `i` because it is borrowed
+ --> $DIR/try-block-maybe-bad-lifetime.rs:27:9
+ |
+LL | &i
+ | -- borrow of `i` occurs here
+LL | };
+LL | i = 0; //~ ERROR cannot assign to `i` because it is borrowed
+ | ^^^^^ assignment to borrowed `i` occurs here
+LL | let _ = i;
+LL | do_something_with(x);
+ | - borrow later used here
+
+error[E0382]: borrow of moved value: `x`
+ --> $DIR/try-block-maybe-bad-lifetime.rs:38:24
+ |
+LL | ::std::mem::drop(x);
+ | - value moved here
+LL | };
+LL | println!("{}", x); //~ ERROR borrow of moved value: `x`
+ | ^ value borrowed here after move
+ |
+ = note: move occurs because `x` has type `std::string::String`, which does not implement the `Copy` trait
+
+error[E0506]: cannot assign to `i` because it is borrowed
+ --> $DIR/try-block-maybe-bad-lifetime.rs:50:9
+ |
+LL | j = &i;
+ | -- borrow of `i` occurs here
+LL | };
+LL | i = 0; //~ ERROR cannot assign to `i` because it is borrowed
+ | ^^^^^ assignment to borrowed `i` occurs here
+LL | let _ = i;
+LL | do_something_with(j);
+ | - borrow later used here
+
+error: aborting due to 3 previous errors
+
+Some errors occurred: E0382, E0506.
+For more information about an error, try `rustc --explain E0382`.
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+fn use_val<T: Sized>(_x: T) {}
+
+pub fn main() {
+ let cfg_res;
+ let _: Result<(), ()> = try {
+ Err(())?;
+ cfg_res = 5;
+ Ok::<(), ()>(())?;
+ use_val(cfg_res);
+ };
+ assert_eq!(cfg_res, 5); //~ ERROR borrow of possibly uninitialized variable: `cfg_res`
+}
+
--- /dev/null
+error[E0381]: borrow of possibly uninitialized variable: `cfg_res`
+ --> $DIR/try-block-opt-init.rs:25:5
+ |
+LL | assert_eq!(cfg_res, 5); //~ ERROR borrow of possibly uninitialized variable: `cfg_res`
+ | ^^^^^^^^^^^^^^^^^^^^^^^ use of possibly uninitialized `cfg_res`
+ |
+ = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info)
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0381`.
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --edition 2018
+
+#![feature(try_blocks)]
+
+fn foo() -> Option<()> { Some(()) }
+
+fn main() {
+ let _: Option<f32> = try {
+ foo()?;
+ 42
+ //~^ ERROR type mismatch
+ };
+
+ let _: Option<i32> = try {
+ foo()?;
+ };
+ //~^ ERROR type mismatch
+}
--- /dev/null
+error[E0271]: type mismatch resolving `<std::option::Option<f32> as std::ops::Try>::Ok == {integer}`
+ --> $DIR/try-block-type-error.rs:20:9
+ |
+LL | 42
+ | ^^
+ | |
+ | expected f32, found integral variable
+ | help: use a float literal: `42.0`
+ |
+ = note: expected type `f32`
+ found type `{integer}`
+
+error[E0271]: type mismatch resolving `<std::option::Option<i32> as std::ops::Try>::Ok == ()`
+ --> $DIR/try-block-type-error.rs:26:5
+ |
+LL | };
+ | ^ expected i32, found ()
+ |
+ = note: expected type `i32`
+ found type `()`
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0271`.
--> $DIR/unboxed-closure-sugar-wrong-trait.rs:15:13
|
LL | fn f<F:Trait(isize) -> isize>(x: F) {}
- | ^^^^^^^^^^^^^^^^ unexpected type argument
+ | ^^^^^^^ unexpected type argument
error[E0220]: associated type `Output` not found for `Trait`
--> $DIR/unboxed-closure-sugar-wrong-trait.rs:15:24
// normalize-stderr-test "allocation \d+" -> "allocation N"
// normalize-stderr-test "size \d+" -> "size N"
+union BoolTransmute {
+ val: u8,
+ bl: bool,
+}
+
#[repr(C)]
#[derive(Copy, Clone)]
struct SliceRepr {
bad: BadSliceRepr,
slice: &'static [u8],
str: &'static str,
+ my_str: &'static Str,
}
#[repr(C)]
}
trait Trait {}
+impl Trait for bool {}
+
+struct Str(str);
// OK
const A: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.str};
-// should lint
+// bad str
const B: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.str};
-// bad
+//~^ ERROR this constant likely exhibits undefined behavior
+// bad str
const C: &str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.str};
//~^ ERROR this constant likely exhibits undefined behavior
+// bad str in Str
+const C2: &Str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str};
+//~^ ERROR this constant likely exhibits undefined behavior
// OK
const A2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 1 } }.slice};
-// should lint
+// bad slice
const B2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.slice};
-// bad
-const C2: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice};
+//~^ ERROR this constant likely exhibits undefined behavior
+// bad slice
+const C3: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice};
//~^ ERROR this constant likely exhibits undefined behavior
-// bad
+// bad trait object
const D: &Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: &3 } }.rust};
//~^ ERROR this constant likely exhibits undefined behavior
-// bad
+// bad trait object
const E: &Trait = unsafe { DynTransmute { repr2: DynRepr2 { ptr: &92, vtable: &3 } }.rust};
//~^ ERROR this constant likely exhibits undefined behavior
-// bad
+// bad trait object
const F: &Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 } }.rust};
//~^ ERROR this constant likely exhibits undefined behavior
+// bad data *inside* the trait object
+const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl };
+//~^ ERROR this constant likely exhibits undefined behavior
+
+// bad data *inside* the slice
+const H: &[bool] = &[unsafe { BoolTransmute { val: 3 }.bl }];
+//~^ ERROR this constant likely exhibits undefined behavior
+
fn main() {
}
error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/union-ub-fat-ptr.rs:72:1
+ --> $DIR/union-ub-fat-ptr.rs:79:1
+ |
+LL | const B: &str = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.str};
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access at offset N, outside bounds of allocation N which has size N
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/union-ub-fat-ptr.rs:82:1
|
LL | const C: &str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.str};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered length is not a valid integer
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/union-ub-fat-ptr.rs:80:1
+ --> $DIR/union-ub-fat-ptr.rs:85:1
|
-LL | const C2: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered length is not a valid integer
+LL | const C2: &Str = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.my_str};
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/union-ub-fat-ptr.rs:84:1
+ --> $DIR/union-ub-fat-ptr.rs:91:1
+ |
+LL | const B2: &[u8] = unsafe { SliceTransmute { repr: SliceRepr { ptr: &42, len: 999 } }.slice};
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access at offset N, outside bounds of allocation N which has size N
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/union-ub-fat-ptr.rs:94:1
+ |
+LL | const C3: &[u8] = unsafe { SliceTransmute { bad: BadSliceRepr { ptr: &42, len: &3 } }.slice};
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer length is not a valid integer
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/union-ub-fat-ptr.rs:98:1
|
LL | const D: &Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: &3 } }.rust};
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ tried to access memory with alignment N, but alignment N is required
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/union-ub-fat-ptr.rs:87:1
+ --> $DIR/union-ub-fat-ptr.rs:101:1
|
LL | const E: &Trait = unsafe { DynTransmute { repr2: DynRepr2 { ptr: &92, vtable: &3 } }.rust};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access at offset N, outside bounds of allocation N which has size N
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ a memory access tried to interpret some bytes as a pointer
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
error[E0080]: this constant likely exhibits undefined behavior
- --> $DIR/union-ub-fat-ptr.rs:90:1
+ --> $DIR/union-ub-fat-ptr.rs:104:1
|
LL | const F: &Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 } }.rust};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered vtable address is not a pointer
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered fat pointer vtable is not a valid pointer
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/union-ub-fat-ptr.rs:108:1
+ |
+LL | const G: &Trait = &unsafe { BoolTransmute { val: 3 }.bl };
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 3 at .<deref>, but expected something in the range 0..=1
+ |
+ = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
+
+error[E0080]: this constant likely exhibits undefined behavior
+ --> $DIR/union-ub-fat-ptr.rs:112:1
+ |
+LL | const H: &[bool] = &[unsafe { BoolTransmute { val: 3 }.bl }];
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered 3 at .<deref>[0], but expected something in the range 0..=1
|
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
-error: aborting due to 5 previous errors
+error: aborting due to 10 previous errors
For more information about this error, try `rustc --explain E0080`.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(proc_macro_path_invoc)]
-
#[foo::bar] //~ ERROR failed to resolve. Use of undeclared type or module `foo`
fn main() {}
error[E0433]: failed to resolve. Use of undeclared type or module `foo`
- --> $DIR/unknown-tool-name.rs:13:3
+ --> $DIR/unknown-tool-name.rs:11:3
|
LL | #[foo::bar] //~ ERROR failed to resolve. Use of undeclared type or module `foo`
| ^^^ Use of undeclared type or module `foo`
-Subproject commit f05a1038b59cd4217e58b3aef7a0751a0efd01e4
+Subproject commit dda656652e2e1a8d615a712d7f7482c25fa0a9c2
let mut strs: Vec<String> = nv.splitn(2, '=').map(str::to_owned).collect();
match strs.len() {
- 1 => (strs.pop().unwrap(), "".to_owned()),
+ 1 => (strs.pop().unwrap(), String::new()),
2 => {
let end = strs.pop().unwrap();
(strs.pop().unwrap(), end)
normalized = normalized.replace("\\n", "\n");
}
+ // If there are `$SRC_DIR` normalizations with line and column numbers, then replace them
+ // with placeholders as we do not want tests needing updated when compiler source code
+ // changes.
+ // eg. $SRC_DIR/libcore/mem.rs:323:14 becomes $SRC_DIR/libcore/mem.rs:LL:COL
+ normalized = Regex::new("SRC_DIR(.+):\\d+:\\d+").unwrap()
+ .replace_all(&normalized, "SRC_DIR$1:LL:COL").into_owned();
+
normalized = normalized.replace("\\\\", "\\") // denormalize for paths on windows
.replace("\\", "/") // normalize for paths on windows
.replace("\r\n", "\n") // normalize for linebreaks on windows
// add it to the set of known library features so we can still generate docs.
lib_features.insert("compiler_builtins_lib".to_owned(), Feature {
level: Status::Unstable,
- since: "".to_owned(),
+ since: String::new(),
has_gate_test: false,
tracking_issue: None,
});